Introduction#

  • Create a network stack
  • Creat an amazon eks cluster

Network stack#

Let's create a network stack for launching a amazon eks cluster:

  • at least two subnets in two availability zones
  • security group for communication between control plane and data plane
  • role for control plane to call aws api on-behalf of you
  • role for nodegroup or instance with managed policies
AWSTemplateFormatVersion: '2010-09-09'
#------------------------------------------------------
# Mappings
#------------------------------------------------------
Mappings:
CidrMappings:
public-subnet-1:
CIDR: 10.0.0.0/24
public-subnet-2:
CIDR: 10.0.1.0/24
public-subnet-3:
CIDR: 10.0.2.0/24
#------------------------------------------------------
# Parameters
#------------------------------------------------------
Parameters:
CidrBlock:
Type: String
Description: CidrBlock
Default: 10.0.0.0/16
#------------------------------------------------------
# Resources: VPC, Subnets, NAT, Routes
#------------------------------------------------------
Resources:
VPC:
Type: AWS::EC2::VPC
Properties:
CidrBlock: !Ref CidrBlock
EnableDnsSupport: true
EnableDnsHostnames: true
Tags:
- Key: Name
Value: !Sub ${AWS::StackName}-vpc
InternetGateway:
Type: AWS::EC2::InternetGateway
Properties:
Tags:
- Key: Name
Value: !Sub ${AWS::StackName}-ig
AttachGateway:
Type: AWS::EC2::VPCGatewayAttachment
Properties:
VpcId: !Ref VPC
InternetGatewayId: !Ref InternetGateway
PublicRouteTable:
Type: AWS::EC2::RouteTable
Properties:
VpcId: !Ref VPC
Tags:
- Key: Name
Value: !Sub ${AWS::StackName}-public-rt
RouteInternetGateway:
Type: AWS::EC2::Route
DependsOn: AttachGateway
Properties:
RouteTableId: !Ref PublicRouteTable
DestinationCidrBlock: 0.0.0.0/0
GatewayId: !Ref InternetGateway
PublicSubnet1:
Type: AWS::EC2::Subnet
Properties:
MapPublicIpOnLaunch: true
AvailabilityZone:
Fn::Select:
- 0
- Fn::GetAZs:
Ref: AWS::Region
VpcId: !Ref VPC
CidrBlock:
Fn::FindInMap:
- CidrMappings
- public-subnet-1
- CIDR
Tags:
- Key: Name
Value: !Sub ${AWS::StackName}-public-subnet-1
PublicSubnet2:
Type: AWS::EC2::Subnet
Properties:
MapPublicIpOnLaunch: true
AvailabilityZone:
Fn::Select:
- 1
- Fn::GetAZs:
Ref: AWS::Region
VpcId: !Ref VPC
CidrBlock:
Fn::FindInMap:
- CidrMappings
- public-subnet-2
- CIDR
Tags:
- Key: Name
Value: !Sub ${AWS::StackName}-public-subnet-2
PublicSubnet3:
Type: AWS::EC2::Subnet
Properties:
MapPublicIpOnLaunch: true
AvailabilityZone:
Fn::Select:
- 2
- Fn::GetAZs:
Ref: AWS::Region
VpcId: !Ref VPC
CidrBlock:
Fn::FindInMap:
- CidrMappings
- public-subnet-3
- CIDR
Tags:
- Key: Name
Value: !Sub ${AWS::StackName}-public-subnet-3
PublicSubnet1RouteTableAssociation:
Type: AWS::EC2::SubnetRouteTableAssociation
Properties:
SubnetId: !Ref PublicSubnet1
RouteTableId: !Ref PublicRouteTable
PublicSubnet2RouteTableAssociation:
Type: AWS::EC2::SubnetRouteTableAssociation
Properties:
SubnetId: !Ref PublicSubnet2
RouteTableId: !Ref PublicRouteTable
PublicSubnet3RouteTableAssociation:
Type: AWS::EC2::SubnetRouteTableAssociation
Properties:
SubnetId: !Ref PublicSubnet3
RouteTableId: !Ref PublicRouteTable
ControlPlaneSecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: Communication between the control plane and worker nodegroups
VpcId: !Ref VPC
GroupName: !Sub ${AWS::StackName}-eks-cluster-sg
ControlPlaneSecurityGroupIngress:
Type: AWS::EC2::SecurityGroupIngress
Properties:
GroupId: !Ref ControlPlaneSecurityGroup
IpProtocol: -1
SourceSecurityGroupId: !Ref ControlPlaneSecurityGroup
SourceSecurityGroupOwnerId: !Ref AWS::AccountId
#------------------------------------------------------
# Export:
#------------------------------------------------------
Outputs:
VPC:
Value: !Ref VPC
Export:
Name: !Sub ${AWS::StackName}-vpc
PublicSubnet1:
Value: !Ref PublicSubnet1
Export:
Name: !Sub ${AWS::StackName}-public-subnet-1
PublicSubnet2:
Value: !Ref PublicSubnet2
Export:
Name: !Sub ${AWS::StackName}-public-subnet-2
PublicSubnet3:
Value: !Ref PublicSubnet3
Export:
Name: !Sub ${AWS::StackName}-public-subnet-3
PublicRouteTable:
Value: !Ref PublicRouteTable
Export:
Name: !Sub ${AWS::StackName}-public-route-table
InternetGateway:
Value: !Ref InternetGateway
Export:
Name: !Sub ${AWS::StackName}-ig
ControlPlaneSecurityGroup:
Value: !Ref ControlPlaneSecurityGroup
Export:
Name: !Sub ${AWS::StackName}-eks-cluster-sg

EKS cluster#

Let's create a eks cluster:

  • vervsion 1.30
  • instance type t3.medium
AWSTemplateFormatVersion: '2010-09-09'
#------------------------------------------------------
# Parameters
#------------------------------------------------------
Parameters:
NetworkStackName:
Type: String
Default: 'network-stack'
EKSClusterVersion:
Type: String
Default: '1.30'
NodeGroupInstanceType:
Type: String
Default: 't3.medium'
#------------------------------------------------------
# Resources: EKS Cluster
#------------------------------------------------------
Resources:
EKSClusterRole:
Type: AWS::IAM::Role
Properties:
AssumeRolePolicyDocument:
Version: '2012-10-17'
Statement:
- Effect: Allow
Principal:
Service:
- eks.amazonaws.com
Action:
- sts:AssumeRole
ManagedPolicyArns:
- !Sub arn:${AWS::Partition}:iam::aws:policy/AmazonEKSClusterPolicy
- !Sub arn:${AWS::Partition}:iam::aws:policy/AmazonEKSVPCResourceController
ControlPlane:
Type: AWS::EKS::Cluster
Properties:
Name: !Sub ${AWS::StackName}-eks-cluster
ResourcesVpcConfig:
SecurityGroupIds:
- !ImportValue
Fn::Sub: ${NetworkStackName}-eks-cluster-sg
SubnetIds:
- !ImportValue
Fn::Sub: ${NetworkStackName}-public-subnet-1
- !ImportValue
Fn::Sub: ${NetworkStackName}-public-subnet-2
- !ImportValue
Fn::Sub: ${NetworkStackName}-public-subnet-3
Version: !Ref EKSClusterVersion
RoleArn: !GetAtt EKSClusterRole.Arn
ManagedNodeGroupRole:
Type: AWS::IAM::Role
Properties:
AssumeRolePolicyDocument:
Version: '2012-10-17'
Statement:
- Effect: Allow
Principal:
Service:
- ec2.amazonaws.com
Action:
- sts:AssumeRole
ManagedPolicyArns:
- !Sub arn:${AWS::Partition}:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly
- !Sub arn:${AWS::Partition}:iam::aws:policy/AmazonEKSWorkerNodePolicy
- !Sub arn:${AWS::Partition}:iam::aws:policy/AmazonEKS_CNI_Policy
- !Sub arn:${AWS::Partition}:iam::aws:policy/AmazonSSMManagedInstanceCore
ManagedNodeGroup:
Type: AWS::EKS::Nodegroup
Properties:
AmiType: AL2_x86_64
ClusterName: !Ref ControlPlane
InstanceTypes:
- !Ref NodeGroupInstanceType
Labels:
alpha.eksctl.io/cluster-name: !Ref ControlPlane
NodeRole: !GetAtt ManagedNodeGroupRole.Arn
ScalingConfig:
DesiredSize: 3
MaxSize: 3
MinSize: 3
Subnets:
- !ImportValue
Fn::Sub: ${NetworkStackName}-public-subnet-1
- !ImportValue
Fn::Sub: ${NetworkStackName}-public-subnet-2
- !ImportValue
Fn::Sub: ${NetworkStackName}-public-subnet-3
Tags:
Name: !Sub ${AWS::StackName}-eks-nodegroup

Deploy#

Here is a simple script to deploy CloudFormation stacks

aws cloudformation validate-template \
--template-body file://1-network.yaml
aws cloudformation create-stack \
--stack-name network-stack \
--template-body file://1-network.yaml \
--capabilities CAPABILITY_NAMED_IAM
aws cloudformation update-stack \
--stack-name network-stack \
--template-body file://1-network.yaml \
--capabilities CAPABILITY_NAMED_IAM
aws cloudformation create-stack \
--stack-name eks-stack \
--template-body file://2-eks.yaml \
--capabilities CAPABILITY_NAMED_IAM
# aws eks update-kubeconfig --name eks-stack-eks-cluster

Update Configure#

aws eks update-kubeconfig --name eks-stack-eks-cluster

Get service account

kubectl -n kube-system get serviceaccount/ebs-csi-controller-sa -o yaml

Create identity provider

eksctl utils associate-iam-oidc-provider \
--cluster=eks-stack-eks-cluster \
--approve

Create trusted policy

{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Federated": "arn:aws:iam::633688584000:oidc-provider/oidc.eks.us-west-2.amazonaws.com/id/9D29659801172AADAE5B5A48CA2FE5BA"
},
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"StringEquals": {
"oidc.eks.us-west-2.amazonaws.com/id/9D29659801172AADAE5B5A48CA2FE5BA:aud": "sts.amazonaws.com",
"oidc.eks.us-west-2.amazonaws.com/id/9D29659801172AADAE5B5A48CA2FE5BA:sub": "system:serviceaccount:kube-system:ebs-csi-controller-sa"
}
}
}
]
}

First App#

Let's run a busybox.

kubectl run busybox --image=busybox --rm -it --command -- bin/sh

Let's run a nginx image.

Let's run an aws cli image.

kubectl run awscli --image=public.ecr.aws/aws-cli/aws-cli:latest -it --command -- /bin/sh

We can run locally to check the image.

docker run -it --entrypoint /bin/sh public.ecr.aws/aws-cli/aws-cli:latest

AWS CLI#

Let's run an aws cli image.

kubectl run awscli --image=public.ecr.aws/aws-cli/aws-cli:latest -it --command -- /bin/sh

Then check caller identity.

aws sts get-caller-identity

Create a deployment for awscli.

Shell into a running pod.

kubectl exec --stdin --tty awscli -- /bin/sh

Pod Identity#

  • install amazon eks pod identity agent eks add-on
  • create an iam role named RoleForPodDemo
  • create a service account name pod-identity-demo
  • create a pod identity association

First, let's install the add-on from console.

pod-identity-add-on

Second, create a service account in eks.

kubectl create serviceaccount pod-identity-demo -n default

Deploy awscli pod with the service account.

kubectl apply -f yaml/hello.yaml

Here is the hello.yaml.

apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: awscli
name: awscli
spec:
serviceAccountName: pod-identity-demo
containers:
- image: public.ecr.aws/aws-cli/aws-cli:latest
name: awscli
resources: {}
dnsPolicy: ClusterFirst
restartPolicy: Always
status: {}

Third, create a pod identity association from console

pod-identity-association

Finally, let's test it, shell into a running pod

kubectl exec --stdin --tty awscli -- /bin/sh

Create a bucket

aws s3api create-bucket --bucket haimtran-demo-02062024 --region us-west-2 --create-bucket-configuration LocationConstraint=us-west-2

Or using this command

aws s3 mb s3://haimtran-demo-03062024

Container Insight#

  • create an iam role for service account used by cwagent
  • install the amazon cw observability eks add-on

First, let create an iam role which will be used by the cwagent service account. It is possible to use eksctl or standard method.

eksctl utils associate-iam-oidc-provider --cluster eks-stack-eks-cluster --approve
eksctl create iamserviceaccount \
--name cloudwatch-agent \
--namespace amazon-cloudwatch --cluster eks-stack-eks-cluster \
--role-name role-for-cw-agent-add-on \
--attach-policy-arn arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy \
--role-only \
--approve

This eksctl command below will create a CloudFormation template under the hood. It add the aws managed policy arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy.

arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy

And a trusted policy.

trusted-policy.json
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Federated": "arn:aws:iam::094847457777:oidc-provider/oidc.eks.us-west-2.amazonaws.com/id/C048A79AB478F38A58EF0C5B4915934D"
},
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"StringEquals": {
"oidc.eks.us-west-2.amazonaws.com/id/C048A79AB478F38A58EF0C5B4915934D:sub": "system:serviceaccount:amazon-cloudwatch:cloudwatch-agent",
"oidc.eks.us-west-2.amazonaws.com/id/C048A79AB478F38A58EF0C5B4915934D:aud": "sts.amazonaws.com"
}
}
}
]
}

Second, add the amazon cw observability (add-on) from console, it will create a serviceaccount name cloudwatch-agent in namespace amazon-cloudwatch.

cw-observability-add-on

Describe the annoted role.

kubectl describe serviceaccounts amazon-cloudwatch-observability-controller-manager -n amazon-cloudwatch

And see role binding.

Name: amazon-cloudwatch-observability-controller-manager
Namespace: amazon-cloudwatch
Labels: app.kubernetes.io/instance=amazon-cloudwatch-observability
app.kubernetes.io/managed-by=EKS
app.kubernetes.io/name=amazon-cloudwatch-observability
app.kubernetes.io/version=1.0.0
Annotations: eks.amazonaws.com/role-arn: arn:aws:iam::094847457777:role/role-for-cw-agent-add-on

Finally, go to cloudwatch container insights and filter by namespaces to see how pods running with metrics, and logs.

EBS CSI#

  • create an iam role for the csi driver.
  • install the ebs csi add-on.

Let's create a role which will be used by the drive and annotated with a service account. Add this aws managed policy arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy and the following trusted policy.

arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy
trusted-policy.json
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Federated": "arn:aws:iam::094847457777:oidc-provider/oidc.eks.us-west-2.amazonaws.com/id/C048A79AB478F38A58EF0C5B4915934D"
},
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"StringEquals": {
"oidc.eks.us-west-2.amazonaws.com/id/C048A79AB478F38A58EF0C5B4915934D:aud": "sts.amazonaws.com",
"oidc.eks.us-west-2.amazonaws.com/id/C048A79AB478F38A58EF0C5B4915934D:sub": "system:serviceaccount:kube-system:ebs-csi-controller-sa"
}
}
}
]
}

Then let's install the add-on.

ebs-csi-add-on

Describe the serviceaccount and see the annotated role.

kubectl describe serviceaccount ebs-csi-controller-sa -n kube-system

Annotated role.

Name: ebs-csi-controller-sa
Namespace: kube-system
Labels: app.kubernetes.io/component=csi-driver
app.kubernetes.io/managed-by=EKS
app.kubernetes.io/name=aws-ebs-csi-driver
app.kubernetes.io/version=1.31.0
Annotations: eks.amazonaws.com/role-arn: arn:aws:iam::094847457777:role/AmazonEKS_EBS_CSI_DriverRole

Applications#

awscli.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: awscli
name: awscli
spec:
serviceAccountName: pod-identity-demo
containers:
- image: public.ecr.aws/aws-cli/aws-cli:latest
name: awscli
command: ['/bin/sh']
args: ['-c', 'while true; do echo hello; sleep 10;done']
book-service.yaml
apiVersion: v1
kind: Service
metadata:
name: go-app-service
spec:
ports:
- port: 80
targetPort: 3000
name: http
selector:
app: go-app
type: LoadBalancer
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: go-app-deployment
spec:
replicas: 2
selector:
matchLabels:
app: go-app
template:
metadata:
labels:
app: go-app
spec:
containers:
- image: 633688584000.dkr.ecr.us-west-2.amazonaws.com/go-app:latest
name: go-app
ports:
- containerPort: 3000
resources:
limits:
cpu: 100m
requests:
cpu: 100m
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: go-app-hpa
namespace: default
spec:
maxReplicas: 10
metrics:
- resource:
name: cpu
target:
averageUtilization: 15
type: Utilization
type: Resource
minReplicas: 2
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: go-app-deployment
hello-service.yaml
apiVersion: v1
kind: Service
metadata:
name: cdk8s-app-service-c8a84b3e
spec:
ports:
- port: 80
targetPort: 8080
selector:
app: hello-cdk8s
type: LoadBalancer
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cdk8s-app-deployment-c8f953f2
spec:
replicas: 2
selector:
matchLabels:
app: hello-cdk8s
template:
metadata:
labels:
app: hello-cdk8s
spec:
containers:
- image: 'paulbouwer/hello-kubernetes:1.7'
name: hello-kubernetes
ports:
- containerPort: 8080
resources:
limits:
cpu: 100m
requests:
cpu: 100m
metric-server.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: 'true'
rbac.authorization.k8s.io/aggregate-to-edit: 'true'
rbac.authorization.k8s.io/aggregate-to-view: 'true'
name: system:aggregated-metrics-reader
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
rules:
- apiGroups:
- ''
resources:
- nodes/metrics
verbs:
- get
- apiGroups:
- ''
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: metrics-server
strategy:
rollingUpdate:
maxUnavailable: 0
template:
metadata:
labels:
k8s-app: metrics-server
spec:
containers:
- args:
- --cert-dir=/tmp
- --secure-port=4443
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
image: registry.k8s.io/metrics-server/metrics-server:v0.6.3
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
name: metrics-server
ports:
- containerPort: 4443
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
initialDelaySeconds: 20
periodSeconds: 10
resources:
requests:
cpu: 100m
memory: 200Mi
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /tmp
name: tmp-dir
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
volumes:
- emptyDir: {}
name: tmp-dir
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
labels:
k8s-app: metrics-server
name: v1beta1.metrics.k8s.io
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: metrics-server
namespace: kube-system
version: v1beta1
versionPriority: 100

Reference#