Terraform 使用说明

Terraform 安装

Amazon Linux 2023 安装 Terraform

sudo dnf install -y yum-utils

sudo yum-config-manager --add-repo https://rpm.releases.hashicorp.com/AmazonLinux/hashicorp.repo

sudo dnf install -y terraform
  • 安装完成后,terraform 命令即可使用。
$ terraform version
Terraform v1.15.2
on linux_amd64

Terraform 配置 AWS EKS 集群

在 Terraform 节点服务器上安装 AWS CLI 并配置 AWS 凭证。

$ aws sts get-caller-identity
{
"UserId": "AIDA2H52UFCAOSDMJBUX6",
"Account": "<AWS_ACCOUNT_ID>",
"Arn": "arn:aws:iam::<AWS_ACCOUNT_ID>:user/ops"
}

AWS EKS 集群 Terraform 项目结构

$ tree 
.
├── main.tf # Terraform 主入口,调用其他模块
├── variables.tf # 定义变量,用于在 Terraform 中使用动态值
├── providers.tf # 定义 AWS 提供商
├── versions.tf # 定义 AWS 提供商的版本
├── vpc.tf # 定义 VPC 模块,用于创建 AWS VPC
├── eks.tf # 定义 EKS 模块,用于创建 AWS EKS 集群
├── aws-alb.tf # 定义 ALB 模块,用于创建 AWS ALB
├── efs.tf # 定义 EFS 模块,用于创建 AWS EFS 文件系统
└── outputs.tf # 定义输出,用于在 Terraform 中使用资源 ID

定义变量

variables.tf 文件中定义变量,用于在 Terraform 中使用动态值。

模块(module)的名称必须是一个固定的字符串(静态标识符),绝对不能包含变量插值 ${...}

variables.tf

variable "aws_region" {
default = "ap-east-1"
}

variable "eks_cluster_name" {
default = "eks-6992-hk-uat"
}

variable "eks_cluster_version" {
default = "1.35"
}

定义 AWS 提供商

providers.tf 文件中定义 AWS 提供商,指定 AWS 区域。

providers.tf
provider "aws" {
region = var.aws_region
}

provider "helm" {
kubernetes {
host = module.eks-6992-hk-uat.cluster_endpoint
cluster_ca_certificate = base64decode(module.eks-6992-hk-uat.cluster_certificate_authority_data)

# 确保 exec 块语法正确
exec {
api_version = "client.authentication.k8s.io/v1beta1"
args = ["eks", "get-token", "--cluster-name", module.eks-6992-hk-uat.cluster_name]
command = "aws"
}
}
}

provider "kubernetes" {
host = module.eks-6992-hk-uat.cluster_endpoint

cluster_ca_certificate = base64decode(
module.eks-6992-hk-uat.cluster_certificate_authority_data
)

exec {
api_version = "client.authentication.k8s.io/v1beta1"

command = "aws"

args = [
"eks",
"get-token",
"--cluster-name",
module.eks-6992-hk-uat.cluster_name
]
}
}


锁定版本

versions.tf 文件中定义 AWS 提供商的版本。

versions.tf

terraform {
required_version = ">= 1.15" # 锁定 Terraform 版本

required_providers { # 锁定 AWS 提供商版本
aws = {
source = "hashicorp/aws"
version = "~> 5.50"
}
helm = {
source = "hashicorp/helm"
version = "~> 2.15"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = "~> 2.29"
}
}
}

定义 VPC 模块

vpc.tf 文件中定义 VPC 模块,用于创建 AWS VPC。

vpc.tf
module "eks-6992-hk-uat-vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "5.8.1"

name = "${var.eks_cluster_name}-vpc" # VPC 名称

cidr = "10.0.0.0/16"

azs = [ # 跨三个可用区以实现高可用
"${var.aws_region}a",
"${var.aws_region}b",
"${var.aws_region}c"
]

private_subnets = [ # Private Subnets,节点实例和工作节点实例位于 Private Subnets 中的可用区
"10.0.1.0/24",
"10.0.2.0/24",
"10.0.3.0/24"
]

public_subnets = [ # Public Subnets, ALB/NAT Gateway 必须位于 Public Subnets 中的可用区
"10.0.101.0/24",
"10.0.102.0/24",
"10.0.103.0/24"
]

enable_nat_gateway = true # 启用 NAT Gateway, 用于节点实例和工作节点实例访问互联网

single_nat_gateway = true # 启用单 NAT Gateway, 用于节点实例和工作节点实例访问互联网, NAT Gateway 数量为 1,省钱模式

enable_dns_hostnames = true # 启用 DNS 主机名,用于节点实例和工作节点实例的 DNS 解析
enable_dns_support = true # 启用 DNS 支持,用于节点实例和工作节点实例的 DNS 解析

# 关键:EKS 自动发现子网所需的标签。为了让 EKS 自动识别哪些子网用于存放负载均衡器,哪些用于存放 Pod,你需要确保子网拥有正确的标签(只有共有子网可忽略此配置)
public_subnet_tags = {
"kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
"kubernetes.io/role/internal-elb" = 1
}
}


定义 EKS 模块

eks.tf 文件中定义 EKS 模块,用于创建 AWS EKS 集群。

以下配置为 EKS Auto Mode 配置,建议使用。EKS Auto Mode 是 AWS 提供的“完全代管”体验,它将 Karpenter、EBS 驱动、CNI 等全部内置并由 AWS 直接管理。
EKS Auto Mode 无法直接通过 API 从“普通模式”无缝切换到“Auto Mode”,需要在创建集群指定“Auto Mode”。所以,建议在创建集群时指定“Auto Mode”,而不是在后续通过 API 切换。

eks.tf
module "eks-6992-hk-uat" {
source = "terraform-aws-modules/eks/aws"
version = "~> 20.28"

cluster_name = var.eks_cluster_name # EKS 集群名称
cluster_version = var.eks_cluster_version # 指定 K8s 版本

vpc_id = module.eks-6992-hk-uat-vpc.vpc_id # VPC ID,用于创建 EKS 集群的 VPC ID
subnet_ids = module.eks-6992-hk-uat-vpc.private_subnets # Private Subnets,节点实例和工作节点实例位于 Private Subnets 中的可用区


cluster_endpoint_public_access = true # 启用集群端点访问(API Server),用于外部访问 EKS 集群

# 托管节点组配置(Managed Node Groups)。使用 AWS Auto Scaling Group (ASG),需要在集群中安装 Cluster Autoscaler 来自动调整节点实例数量。当发现 Pod 因为资源不足无法调度时,它会去修改 AWS ASG 的 desired_size 来增加节点实例数量。
# 建议 切换到 EKS Auto Mode (2024年底发布的新模式),EKS Auto Mode 是 AWS 提供的“完全代管”体验。它将 Karpenter、EBS 驱动、CNI 等全部内置并由 AWS 直接管理。
# eks_managed_node_groups = {
# default = {
# instance_types = ["t3.large"]
# min_size = 1
# max_size = 5
# desired_size = 2
# disk_size = 100 # 节点实例和工作节点实例的 EBS 卷大小,单位为 GiB
# }
# }
# 关键:开启 EKS Auto Mode 配置
cluster_compute_config = {
enabled = true
node_pools = ["general-purpose", "system"]
}


# 必须:Auto Mode 下不需要手动引导插件
bootstrap_self_managed_addons = false

# 自动模式下,建议让模块处理节点所需的 IAM Role 权限
# Auto Mode 节点需要能够与 EKS 控制面通信的权限
enable_cluster_creator_admin_permissions = true # 启用集群创建者管理员权限,用于集群创建者对 EKS 集群进行管理

# 关闭 CloudWatch 日志输出以节省成本
cluster_enabled_log_types = []
}

定义 AWS ALB Controller 模块

aws-alb.tf 文件中定义 AWS ALB Controller 模块,用于创建 AWS ALB Controller。

aws-alb.tf
module "alb_controller_irsa_role" {
source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks"
version = "~> 5.39"

role_name = "${var.eks_cluster_name}-alb-controller"

attach_load_balancer_controller_policy = true

oidc_providers = {
main = {
provider_arn = module.eks-6992-hk-uat.oidc_provider_arn

namespace_service_accounts = [
"kube-system:aws-load-balancer-controller"
]
}
}
}

# 2. 使用 Helm 部署控制器

resource "helm_release" "aws_load_balancer_controller" {
name = "aws-load-balancer-controller"
repository = "https://aws.github.io/eks-charts"
chart = "aws-load-balancer-controller"
namespace = "kube-system"
version = "1.14.0" # 建议检查最新版本

set {
name = "clusterName"
value = module.eks-6992-hk-uat.cluster_name
}
# --- 新增以下两项以解决 VPC ID 获取失败的问题 ---
set {
name = "vpcId"
value = module.eks-6992-hk-uat-vpc.vpc_id
}

set {
name = "region"
value = var.aws_region
}
# ----------------------------------------------

set {
name = "serviceAccount.create"
value = "true"
}

set {
name = "serviceAccount.name"
value = "aws-load-balancer-controller"
}

set {
name = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"
value = module.alb_controller_irsa_role.iam_role_arn
}
}


定义 EFS 模块

efs.tf 文件中定义 EFS 模块,用于创建 AWS EFS 文件系统。

efs.tf
resource "aws_efs_file_system" "eks_efs" {
creation_token = "${var.eks_cluster_name}-efs"

performance_mode = "generalPurpose"
throughput_mode = "bursting"

encrypted = true

tags = {
Name = "${var.eks_cluster_name}-efs"
}
}

# EFS 必须在每个 Private Subnet 创建 Mount Target。
resource "aws_efs_mount_target" "private" {
count = length(module.eks-6992-hk-uat-vpc.private_subnets)

file_system_id = aws_efs_file_system.eks_efs.id
subnet_id = module.eks-6992-hk-uat-vpc.private_subnets[count.index]

security_groups = [
aws_security_group.efs.id
]
}

# EFS 安全组
resource "aws_security_group" "efs" {
name_prefix = "${var.eks_cluster_name}-efs-sg"

vpc_id = module.eks-6992-hk-uat-vpc.vpc_id

ingress {
from_port = 2049
to_port = 2049
protocol = "tcp"

cidr_blocks = ["10.0.0.0/16"]
}

egress {
from_port = 0
to_port = 0
protocol = "-1"

cidr_blocks = ["0.0.0.0/0"]
}
}

# 创建 IAM Role(IRSA)
module "efs_csi_irsa_role" {
source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks"
version = "~> 5.39"

role_name = "${var.eks_cluster_name}-efs-csi"

attach_efs_csi_policy = true

oidc_providers = {
main = {
provider_arn = module.eks-6992-hk-uat.oidc_provider_arn

namespace_service_accounts = [
"kube-system:efs-csi-controller-sa"
]
}
}
}

resource "helm_release" "aws_efs_csi_driver" {
name = "aws-efs-csi-driver"

repository = "https://kubernetes-sigs.github.io/aws-efs-csi-driver/"
chart = "aws-efs-csi-driver"

namespace = "kube-system"


set {
name = "controller.serviceAccount.create"
value = "true"
}

set {
name = "controller.serviceAccount.name"
value = "efs-csi-controller-sa"
}

set {
name = "controller.serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"

value = module.efs_csi_irsa_role.iam_role_arn
}
}

# 创建 StorageClass
resource "kubernetes_storage_class" "efs" {
metadata {
name = "efs-sc"
}

storage_provisioner = "efs.csi.aws.com"

parameters = {
provisioningMode = "efs-ap"

fileSystemId = aws_efs_file_system.eks_efs.id

directoryPerms = "700"

gidRangeStart = "1000"
gidRangeEnd = "2000"

basePath = "/dynamic_provisioning"
}

reclaim_policy = "Retain"

volume_binding_mode = "Immediate"
}

初始化并应用 Terraform 项目

执行以下命令初始化 Terraform 项目

$ terraform init
Initializing modules...
Downloading registry.terraform.io/terraform-aws-modules/vpc/aws 5.8.1 for eks-6992-hk-uat-vpc...
- eks-6992-hk-uat-vpc in .terraform/modules/eks-6992-hk-uat-vpc
...
Initializing the backend...


Terraform has created a lock file .terraform.lock.hcl to record the provider
selections it made above. Include this file in your version control repository
so that Terraform can guarantee to make the same selections by default when
you run "terraform init" in the future.

Terraform has been successfully initialized!

使用命令 terraform plan 查看 Terraform 计划,可以检查是否有报错,并确认是否符合预期。

$ terraform plan

│ Error: Reference to undeclared module

│ on eks.tf line 8, in module "eks-6992-hk-uat":
│ 8: vpc_id = module.vpc.vpc_id # VPC ID,用于创建 EKS 集群的 VPC ID

│ No module call named "vpc" is declared in the root module.


│ Error: Reference to undeclared module

│ on eks.tf line 9, in module "eks-6992-hk-uat":
│ 9: subnet_ids = module.vpc.private_subnets # Private Subnets,节点实例和工作节点实例位于 Private Subnets 中的可用区

│ No module call named "vpc" is declared in the root module.


语法无误的情况下,可以查看 Terraform 计划,确认是否符合预期。

$ terraform plan
# 读取已有数据,包括 AWS 身份标识符(Caller Identity)以及 Terraform 模块的内容
module.eks-6992-hk-uat.module.kms.data.aws_caller_identity.current[0]: Reading...
module.eks-6992-hk-uat.data.aws_caller_identity.current[0]: Reading...
module.eks-6992-hk-uat.module.eks_managed_node_group["default"].data.aws_partition.current: Reading...
module.eks-6992-hk-uat.module.kms.data.aws_partition.current[0]: Reading...
module.eks-6992-hk-uat.data.aws_partition.current[0]: Reading...
module.eks-6992-hk-uat.module.eks_managed_node_group["default"].data.aws_caller_identity.current: Reading...
module.eks-6992-hk-uat.module.eks_managed_node_group["default"].data.aws_partition.current: Read complete after 0s [id=aws]
module.eks-6992-hk-uat.data.aws_caller_identity.current[0]: Read complete after 0s [id=<AWS_ID>]
module.eks-6992-hk-uat.module.eks_managed_node_group["default"].data.aws_iam_policy_document.assume_role_policy[0]: Reading...
module.eks-6992-hk-uat.data.aws_partition.current[0]: Read complete after 0s [id=aws]
module.eks-6992-hk-uat.data.aws_iam_policy_document.assume_role_policy[0]: Reading...
module.eks-6992-hk-uat.module.kms.data.aws_partition.current[0]: Read complete after 0s [id=aws]
module.eks-6992-hk-uat.module.kms.data.aws_caller_identity.current[0]: Read complete after 0s [id=<AWS_ID>]
module.eks-6992-hk-uat.module.eks_managed_node_group["default"].data.aws_iam_policy_document.assume_role_policy[0]: Read complete after 0s [id=2560088296]
module.eks-6992-hk-uat.module.eks_managed_node_group["default"].data.aws_caller_identity.current: Read complete after 0s [id=<AWS_ID>]
module.eks-6992-hk-uat.data.aws_iam_session_context.current[0]: Reading...
module.eks-6992-hk-uat.data.aws_iam_policy_document.assume_role_policy[0]: Read complete after 0s [id=2830595799]
module.eks-6992-hk-uat.data.aws_[0]: Reading[0]: Reading...
module.eks-6992-hk-uat.data.aws_iam_session_context.current[0]: Read complete after 0s [id=arn:aws:iam::<AWS_ID>:userops]
module.eks-6992-hk-uat.data.aws_iam_policy_document.custom[0]: Read complete after 0s [id=513122117]

Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols:
+ create
<= read (data resources)

# 真正要执行的操作计划
Terraform will perform the following actions:

# module.eks-6992-hk-uat.data.tls_certificate.this[0] will be read during apply
# (config refers to values not yet known)
<= data "tls_certificate" "this" {
+ certificates = (known after apply)
+ id = (known after apply)
+ url = (known after apply)
}

# module.eks-6992-hk-uat.aws_eks_cluster.this[0] will be created
+ resource "aws_eks_cluster" "this" {
+ arn = (known after apply)
+ bootstrap_self_managed_addons = true
+ certificate_authority = (known after apply)
+ cluster_id = (known after apply)
...
+ tags = {
+ "terraform-aws-modules" = "eks"
}
+ tags_all = {
+ "terraform-aws-modules" = "eks"
}
+ version = "1.35"

+ access_config {
+ authentication_mode = "API_AND_CONFIG_MAP"
+ bootstrap_cluster_creator_admin_permissions = false
}

+ encryption_config {
+ resources = [
+ "secrets",
...
}
}

# module.eks-6992-hk-uat.aws_iam_openid_connect_provider.oidc_provider[0] will be created
+ resource "aws_iam_openid_connect_provider" "oidc_provider" {
+ arn = (known after apply)
...
}

# module.eks-6992-hk-uat.aws_iam_policy.cluster_encryption[0] will be created
+ resource "aws_iam_policy" "cluster_encryption" {
...
}

# module.eks-6992-hk-uat.aws_iam_policy.custom[0] will be created
+ resource "aws_iam_policy" "custom" {
+ arn = (known after apply)
+ attachment_count = (known after apply)
+ id = (known after apply)
+ name = (known after apply)
+ name_prefix = "eks-6992-hk-uat-cluster-"
+ path = "/"
+ policy = jsonencode(
{
...
}


# module.eks-6992-hk-uat.aws_iam_role_policy_attachment.this["AmazonEKSVPCResourceController"] will be created
+ resource "aws_iam_role_policy_attachment" "this" {
...
}

# module.eks-6992-hk-uat.aws_security_group.cluster[0] will be created
+ resource "aws_security_group" "cluster" {
...
+ revoke_rules_on_delete = false
+ tags = {
+ "Name" = "eks-6992-hk-uat-cluster"
}
+ tags_all = {
+ "Name" = "eks-6992-hk-uat-cluster"
}
+ vpc_id = (known after apply)
}

# module.eks-6992-hk-uat.aws_security_group.node[0] will be created
+ resource "aws_security_group" "node" {
...
+ revoke_rules_on_delete = false
+ tags = {
+ "Name" = "eks-6992-hk-uat-node"
+ "kubernetes.io/cluster/eks-6992-hk-uat" = "owned"
}
+ tags_all = {
+ "Name" = "eks-6992-hk-uat-node"
+ "kubernetes.io/cluster/eks-6992-hk-uat" = "owned"
}
+ vpc_id = (known after apply)
}


# module.eks-6992-hk-uat-vpc.aws_eip.nat[0] will be created
+ resource "aws_eip" "nat" {
...
}
+ tags_all = {
+ "Name" = "eks-6992-hk-uat-vpc-ap-east-1-1a"
}
+ vpc = (known after apply)
}




# module.eks-6992-hk-uat.module.kms.data.aws_iam_policy_document.this[0] will be read during apply
# (config refers to values not yet known)
<= data "aws_iam_policy_document" "this" {
...

Plan: 58 to add, 0 to change, 0 to destroy.

───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────

Note: You didn't use the -out option to save this plan, so Terraform can't guarantee to take exactly these actions if you run "terraform apply" now.

执行 terraform apply 来应用计划中的操作。如果执行过程中有错误,根据错误提示解决即可。

$ terraform apply
...
Plan: 58 to add, 0 to change, 0 to destroy.

Do you want to perform these actions?
Terraform will perform the actions described above.
Only 'yes' will be accepted to approve.

Enter a value: yes
module.eks-6992-hk-uat-vpc.aws_vpc.this[0]: Creating...
module.eks-6992-hk-uat.aws_cloudwatch_log_group.this[0]: Creating...
module.eks-6992-hk-uat.aws_iam_policy.custom[0]: Creating...
module.eks-6992-hk-uat.module.eks_managed_node_group["default"].aws_iam_role.this[0]: Creating...
module.eks-6992-hk-uat.aws_iam_role.this[0]: Creating...
...

部署成功后,查看 EKS 集群

$ aws eks list-clusters
{
"clusters": [
"eks-6992-hk-uat"
]
}

为集群创建或更新 kubeconfig 文件用于和集群通信

$ aws eks update-kubeconfig --name eks-6992-hk-uat

$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
i-057a6a23baca521b9 Ready <none> 34m v1.35.2-eks-f69f56f
i-079349c2cf9fbaf68 Ready <none> 34m v1.35.2-eks-f69f56f

$ kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system aws-load-balancer-controller-dbb7579b9-7mppr 1/1 Running 0 96m
kube-system aws-load-balancer-controller-dbb7579b9-vrqwd 1/1 Running 0 96m
kube-system efs-csi-controller-7b8bdc5484-jpmm2 3/3 Running 0 7m42s
kube-system efs-csi-controller-7b8bdc5484-ktr2f 3/3 Running 0 7m42s
kube-system efs-csi-node-6lnmm 3/3 Running 0 7m42s
kube-system efs-csi-node-xd87k 3/3 Running 0 7m42s


$ kubectl get csidriver
NAME ATTACHREQUIRED PODINFOONMOUNT STORAGECAPACITY TOKENREQUESTS REQUIRESREPUBLISH MODES AGE
ebs.csi.eks.amazonaws.com true false false <unset> false Persistent 3h48m
efs.csi.aws.com false false false <unset> false Persistent 4m45s

$ kubectl get storageclass
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
efs-sc efs.csi.aws.com Retain Immediate true 3m8s
gp2 kubernetes.io/aws-ebs Delete WaitForFirstConsumer false 3h49m

参考以下配置创建 Nginx Pod 测试 ALB Controller 是否工作正常

alb-test.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-test
spec:
replicas: 2
selector:
matchLabels:
app: nginx-test
template:
metadata:
labels:
app: nginx-test
spec:
containers:
- name: nginx
image: nginx:1.27
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: nginx-test
spec:
selector:
app: nginx-test

ports:
- port: 80
targetPort: 80

type: ClusterIP

---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: nginx-test
annotations:

# 使用 AWS ALB
kubernetes.io/ingress.class: alb

# internet-facing 公网 ALB
alb.ingress.kubernetes.io/scheme: internet-facing

# IP 模式(EKS Auto Mode 必须)
alb.ingress.kubernetes.io/target-type: ip

# ALB 监听端口
alb.ingress.kubernetes.io/listen-ports: '[{"HTTP":80}]'

spec:
ingressClassName: alb

rules:
- http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: nginx-test
port:
number: 80

```

部署后检查,等待 ALB 部署成功后,访问 ALB 地址,查看是否能正常访问 Nginx Pod 页面
```shell
$ kubectl get all,ingress -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod/nginx-test-56548c9dbc-4krsc 1/1 Running 0 4m46s 10.0.2.192 i-079349c2cf9fbaf68 <none> <none>
pod/nginx-test-56548c9dbc-z84sk 1/1 Running 0 4m46s 10.0.2.112 i-057a6a23baca521b9 <none> <none>

NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
service/kubernetes ClusterIP 172.20.0.1 <none> 443/TCP 178m <none>
service/nginx-test ClusterIP 172.20.2.227 <none> 80/TCP 4m46s app=nginx-test

NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
deployment.apps/nginx-test 2/2 2 2 4m46s nginx nginx:1.27 app=nginx-test

NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
replicaset.apps/nginx-test-56548c9dbc 2 2 2 4m46s nginx nginx:1.27 app=nginx-test,pod-template-hash=56548c9dbc

NAME CLASS HOSTS ADDRESS PORTS AGE
ingress.networking.k8s.io/nginx-test alb * k8s-default-nginxtes-a59dc38148-1861994834.ap-east-1.elb.amazonaws.com 80 4m46s

创建 Pod 测试 EFS 和 PVC 挂载是否正常

efs-test.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: efs-pvc
spec:
accessModes:
- ReadWriteMany

storageClassName: efs-sc

resources:
requests:
storage: 5Gi
---
apiVersion: v1
kind: Pod
metadata:
name: efs-test
spec:
containers:
- name: app
image: nginx

volumeMounts:
- mountPath: /data
name: efs-volume

volumes:
- name: efs-volume
persistentVolumeClaim:
claimName: efs-pvc

正常部署后,检查 Pod 以及对应的 PVC 是否正常运行,进入 Pod 测试读写是否正常

$ kubectl get pod 
NAME READY STATUS RESTARTS AGE
efs-test 1/1 Running 0 113s

$ kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE
efs-pvc Bound pvc-dc224e47-0365-46d9-9c5b-06e5cb1fb95b 5Gi RWX efs-sc <unset> 2m8s