mirror of
https://gitea.com/mcereda/oam.git
synced 2026-02-09 05:44:23 +00:00
feat(pulumi): show how to create eks clusters in pulumi
This commit is contained in:
2
examples/pulumi/aws eks cluster/.gitignore
vendored
Normal file
2
examples/pulumi/aws eks cluster/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
/bin/
|
||||
/node_modules/
|
||||
7
examples/pulumi/aws eks cluster/Pulumi.any.yaml
Normal file
7
examples/pulumi/aws eks cluster/Pulumi.any.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
encryptionsalt: v1:iA3XmpOvico=:v1:XCz1yi4Ve/38v0D/:9v7XaYgqAGnZ4FJqGIaNT9uaFFec5Q==
|
||||
config:
|
||||
aws:defaultTags:
|
||||
tags:
|
||||
ManagedBy: pulumi
|
||||
PulumiProject: eks-cluster
|
||||
aws:region: eu-west-1
|
||||
7
examples/pulumi/aws eks cluster/Pulumi.yaml
Normal file
7
examples/pulumi/aws eks cluster/Pulumi.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
name: aws-eks-cluster
|
||||
runtime: nodejs
|
||||
description: A minimal EKS cluster in AWS
|
||||
config:
|
||||
pulumi:tags:
|
||||
value:
|
||||
pulumi:template: aws-typescript
|
||||
299
examples/pulumi/aws eks cluster/index.ts
Normal file
299
examples/pulumi/aws eks cluster/index.ts
Normal file
@@ -0,0 +1,299 @@
|
||||
/**
|
||||
* Follows https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html.
|
||||
* Multiple methods of creating EKS clusters at some point try to create 2
|
||||
* CloudFormation stacks.
|
||||
* The fock, AWS? (╯°Д°)╯︵/(.□ . \)
|
||||
*
|
||||
* Minimum resource requirements
|
||||
* -----------------------------------------------------------------------------
|
||||
* - IAM service roles:
|
||||
* - 1 to manage the cluster, with 'AmazonEKSClusterPolicy' policy
|
||||
* - 1 to manage EC2 worker nodes (if using them), with policies
|
||||
* - 'AmazonEKS_CNI_Policy'
|
||||
* - 'AmazonEKSWorkerNodePolicy'
|
||||
* - 'AmazonEC2ContainerRegistryReadOnly'
|
||||
* - 'AmazonSSMManagedInstanceCore' (optional, for troubleshooting)
|
||||
* - 1 to manage fargate resources (if using them), with
|
||||
* 'AmazonEKSFargatePodExecutionRolePolicy' policy
|
||||
* - The control plane
|
||||
* - 1 executor for pods (EC2, managed EC2 or Fargate)
|
||||
* - 1 access entry with assigned EKS access policy (if using APIs for
|
||||
* authentication)
|
||||
**/
|
||||
|
||||
import * as pulumi from "@pulumi/pulumi";
|
||||
import * as aws from "@pulumi/aws";
|
||||
|
||||
const callerIdentity = aws.getCallerIdentity({});
|
||||
const subnetIds = [
|
||||
"subnet-0123456789abcdef0", // private, eu-west-1a
|
||||
"subnet-123456789abcdef01", // private, eu-west-1b
|
||||
];
|
||||
|
||||
|
||||
/**
|
||||
* Custom Cluster Service Role
|
||||
* -----------------------------------------------------------------------------
|
||||
* Required to use other AWS resources (the KMS key for encryption).
|
||||
* Intended to be only used by this cluster.
|
||||
* Creation took 1s on average.
|
||||
* Deletion took 1s on average.
|
||||
**/
|
||||
|
||||
const cluster_assumeRole_policy = JSON.stringify({
|
||||
Version: "2012-10-17",
|
||||
Statement: [{
|
||||
Effect: "Allow",
|
||||
Action: "sts:AssumeRole",
|
||||
Principal: {
|
||||
Service: "eks.amazonaws.com",
|
||||
},
|
||||
}],
|
||||
});
|
||||
|
||||
const cluster_serviceRole_role = new aws.iam.Role("cluster-serviceRole-role", {
|
||||
description: "Allows EKS to manage clusters on behalf of the user.",
|
||||
tags: {
|
||||
Description: "EKS cluster service role",
|
||||
EksComponent: "Cluster service role",
|
||||
RoleType: "ServiceRole",
|
||||
},
|
||||
|
||||
assumeRolePolicy: cluster_assumeRole_policy,
|
||||
});
|
||||
|
||||
const cluster_serviceRole_rolePolicyAttachment = new aws.iam.RolePolicyAttachment("cluster-serviceRole-rolePolicyAttachment", {
|
||||
role: cluster_serviceRole_role.name,
|
||||
policyArn: "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy",
|
||||
});
|
||||
|
||||
|
||||
/**
|
||||
* Cluster.
|
||||
* Better defined as the control plane and its dedicated nodes.
|
||||
* -----------------------------------------------------------------------------
|
||||
* Gotchas:
|
||||
* - 'vpcConfig.clusterSecurityGroupId' cannot be customized.
|
||||
* Should one try and set it, one will get the following error message:
|
||||
* 'error: aws:eks/cluster:Cluster resource 'cluster' has a problem: Value for
|
||||
* unconfigurable attribute. Can't configure a value for
|
||||
* "vpc_config.0.cluster_security_group_id": its value will be decided
|
||||
* automatically based on the result of applying this configuration.'
|
||||
* Creation took 426s on average (382, 402, 454, 423, 371, 523, 422).
|
||||
* Deletion took 167s on average (70, 125, 149, 167, 320, 167).
|
||||
**/
|
||||
|
||||
const k8s_version = "1.29" // https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html
|
||||
|
||||
const cluster = new aws.eks.Cluster("cluster", {
|
||||
tags: {
|
||||
Description: "Test EKS cluster",
|
||||
EksComponent: "Control Plane and associated nodes",
|
||||
},
|
||||
|
||||
vpcConfig: {
|
||||
subnetIds: subnetIds,
|
||||
endpointPrivateAccess: true,
|
||||
},
|
||||
version: k8s_version,
|
||||
roleArn: cluster_serviceRole_role.arn,
|
||||
accessConfig: {
|
||||
authenticationMode: "API",
|
||||
},
|
||||
enabledClusterLogTypes: [
|
||||
// https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html
|
||||
// "api",
|
||||
// "audit",
|
||||
// "authenticator",
|
||||
// "controllerManager",
|
||||
// "scheduler",
|
||||
],
|
||||
});
|
||||
|
||||
|
||||
/**
|
||||
* Access management.
|
||||
* ------------------
|
||||
* Refer to https://docs.aws.amazon.com/eks/latest/userguide/access-entries.html.
|
||||
* Creation took 1s on average.
|
||||
* Deletion took 1s on average.
|
||||
**/
|
||||
|
||||
const cluster_admin_accessEntry = new aws.eks.AccessEntry("cluster-admin-accessEntry", {
|
||||
clusterName: cluster.name,
|
||||
|
||||
principalArn: callerIdentity.then(callerIdentity => callerIdentity.arn),
|
||||
kubernetesGroups: [
|
||||
// No 'system:…', 'amazon:…', 'aws:…', 'eks:…' nor 'iam:…'.
|
||||
// See reference page.
|
||||
],
|
||||
});
|
||||
|
||||
const cluster_admin_accessPolicyAssociation = new aws.eks.AccessPolicyAssociation("cluster-admin-accessPolicyAssociation", {
|
||||
clusterName: cluster.name,
|
||||
policyArn: "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy",
|
||||
principalArn: callerIdentity.then(callerIdentity => callerIdentity.arn),
|
||||
accessScope: {
|
||||
type: "cluster",
|
||||
},
|
||||
});
|
||||
|
||||
|
||||
/**
|
||||
* Custom EC2 Worker Node Service Role
|
||||
* -----------------------------------------------------------------------------
|
||||
* Required to use EC2 instances as worker nodes.
|
||||
* Intended to be only used by this cluster.
|
||||
* Creation took 1s on average.
|
||||
* Deletion took 1s on average.
|
||||
**/
|
||||
|
||||
const node_assumeRole_policy = JSON.stringify({
|
||||
Version: "2012-10-17",
|
||||
Statement: [{
|
||||
Effect: "Allow",
|
||||
Action: "sts:AssumeRole",
|
||||
Principal: {
|
||||
Service: "ec2.amazonaws.com",
|
||||
},
|
||||
}],
|
||||
});
|
||||
|
||||
const node_service_role = new aws.iam.Role("node-service-role", {
|
||||
description: "Allows EKS to manage EC2 instances on behalf of the user.",
|
||||
tags: {
|
||||
Description: "EC2 node service role",
|
||||
EksComponent: "EC2 node service role",
|
||||
RoleType: "ServiceRole",
|
||||
},
|
||||
|
||||
assumeRolePolicy: node_assumeRole_policy,
|
||||
});
|
||||
|
||||
const node_service_rolePolicyAttachment_cni = new aws.iam.RolePolicyAttachment("node-service-rolePolicyAttachment-cni", {
|
||||
role: node_service_role.name,
|
||||
policyArn: "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy",
|
||||
});
|
||||
const node_service_rolePolicyAttachment_ecr = new aws.iam.RolePolicyAttachment("node-service-rolePolicyAttachment-ecr", {
|
||||
role: node_service_role.name,
|
||||
policyArn: "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly",
|
||||
});
|
||||
const node_service_rolePolicyAttachment_workerNode = new aws.iam.RolePolicyAttachment("node-service-rolePolicyAttachment-workerNode", {
|
||||
role: node_service_role.name,
|
||||
policyArn: "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy",
|
||||
});
|
||||
|
||||
// SSM and troubleshooting
|
||||
const node_service_rolePolicyAttachment_ssm = new aws.iam.RolePolicyAttachment("node-service-rolePolicyAttachment-ssm", {
|
||||
role: node_service_role.name,
|
||||
policyArn: "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore",
|
||||
});
|
||||
|
||||
|
||||
/**
|
||||
* EC2 worker nodes
|
||||
* -----------------------------------------------------------------------------
|
||||
* Creation took 102s on average (86, 117) with dedicated (non-SPOT) instances.
|
||||
* Deletion took 286s on average (153, 136, 136, 136, 502, 502, 431).
|
||||
**/
|
||||
|
||||
const nodeGroup = new aws.eks.NodeGroup("nodeGroup", {
|
||||
clusterName: cluster.name,
|
||||
tags: {
|
||||
Description: "EC2 node group",
|
||||
EksComponent: "EC2 node group",
|
||||
},
|
||||
|
||||
nodeRoleArn: node_service_role.arn,
|
||||
subnetIds: cluster.vpcConfig.subnetIds,
|
||||
capacityType: "SPOT",
|
||||
instanceTypes: [
|
||||
// https://docs.aws.amazon.com/eks/latest/userguide/choosing-instance-type.html
|
||||
// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#AvailableInstanceTypes
|
||||
"t3a.medium",
|
||||
"t3.medium",
|
||||
],
|
||||
scalingConfig: {
|
||||
minSize: 1,
|
||||
maxSize: 3,
|
||||
desiredSize: 2,
|
||||
},
|
||||
updateConfig: {
|
||||
// either the number OR the percentage
|
||||
// maxUnavailable: 1,
|
||||
maxUnavailablePercentage: 50,
|
||||
},
|
||||
});
|
||||
|
||||
|
||||
/**
|
||||
* Custom Fargate Service Role
|
||||
* -----------------------------------------------------------------------------
|
||||
* Required to use Fargate as worker node.
|
||||
* Intended to be only used by this cluster.
|
||||
* Creation took 1s on average.
|
||||
* Deletion took 1s on average.
|
||||
**/
|
||||
const regionOutput = aws.getRegionOutput({});
|
||||
const sourceArnOutput = pulumi.all([
|
||||
callerIdentity.then(callerIdentity => callerIdentity.accountId),
|
||||
cluster.name,
|
||||
regionOutput.apply(region => region.id),
|
||||
]).apply(([
|
||||
accountId,
|
||||
clusterName,
|
||||
regionId,
|
||||
]) => `arn:aws:eks:${regionId}:${accountId}:fargateprofile/${clusterName}/*`);
|
||||
|
||||
const fargate_serviceRole_role = new aws.iam.Role("fargate-service-role", {
|
||||
description: "Allows EKS to manage fargate pods on behalf of the user.",
|
||||
tags: {
|
||||
Description: "EC2 fargate service role",
|
||||
EksComponent: "EC2 fargate service role",
|
||||
RoleType: "ServiceRole",
|
||||
},
|
||||
|
||||
assumeRolePolicy: sourceArnOutput.apply(sourceArn => JSON.stringify({
|
||||
Version: "2012-10-17",
|
||||
Statement: [{
|
||||
Effect: "Allow",
|
||||
Action: "sts:AssumeRole",
|
||||
Principal: {
|
||||
Service: "eks-fargate-pods.amazonaws.com",
|
||||
},
|
||||
Condition: {
|
||||
ArnLike: {
|
||||
"aws:SourceArn": sourceArn,
|
||||
}
|
||||
},
|
||||
}],
|
||||
})),
|
||||
});
|
||||
|
||||
const fargate_serviceRole_rolePolicyAttachment = new aws.iam.RolePolicyAttachment("fargate-serviceRole-rolePolicyAttachment", {
|
||||
role: fargate_serviceRole_role.name,
|
||||
policyArn: "arn:aws:iam::aws:policy/AmazonEKSFargatePodExecutionRolePolicy",
|
||||
});
|
||||
|
||||
|
||||
/**
|
||||
* Fargate profile
|
||||
* -----------------------------------------------------------------------------
|
||||
* Creation took 98s on average (93, 76, 123).
|
||||
* Deletion took 105s on average (82, 95, 146, 95).
|
||||
**/
|
||||
|
||||
const fargateProfile = new aws.eks.FargateProfile("fargateProfile", {
|
||||
clusterName: cluster.name,
|
||||
tags: {
|
||||
Description: "Fargate profile",
|
||||
EksComponent: "Fargate profile",
|
||||
},
|
||||
|
||||
podExecutionRoleArn: fargate_serviceRole_role.arn,
|
||||
subnetIds: cluster.vpcConfig.subnetIds,
|
||||
selectors: [
|
||||
{ namespace: "default" },
|
||||
{ namespace: "kube-system" },
|
||||
],
|
||||
});
|
||||
5303
examples/pulumi/aws eks cluster/package-lock.json
generated
Normal file
5303
examples/pulumi/aws eks cluster/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
12
examples/pulumi/aws eks cluster/package.json
Normal file
12
examples/pulumi/aws eks cluster/package.json
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"name": "aws-eks-cluster",
|
||||
"main": "index.ts",
|
||||
"devDependencies": {
|
||||
"@types/node": "^18"
|
||||
},
|
||||
"dependencies": {
|
||||
"@pulumi/pulumi": "^3.0.0",
|
||||
"@pulumi/aws": "^6.0.0",
|
||||
"@pulumi/awsx": "^2.0.2"
|
||||
}
|
||||
}
|
||||
18
examples/pulumi/aws eks cluster/tsconfig.json
Normal file
18
examples/pulumi/aws eks cluster/tsconfig.json
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"strict": true,
|
||||
"outDir": "bin",
|
||||
"target": "es2016",
|
||||
"module": "commonjs",
|
||||
"moduleResolution": "node",
|
||||
"sourceMap": true,
|
||||
"experimentalDecorators": true,
|
||||
"pretty": true,
|
||||
"noFallthroughCasesInSwitch": true,
|
||||
"noImplicitReturns": true,
|
||||
"forceConsistentCasingInFileNames": true
|
||||
},
|
||||
"files": [
|
||||
"index.ts"
|
||||
]
|
||||
}
|
||||
@@ -16,62 +16,88 @@
|
||||
|
||||
## TL;DR
|
||||
|
||||
When one creates a [_cluster_][amazon eks clusters], one really only creates the cluster's control plane and the dedicated nodes underneath it.<br/>
|
||||
Worker nodes can consist in any combination of [self-managed nodes], [managed node groups] and [Fargate], and depend on the control plane.
|
||||
When one creates a [_cluster_][amazon eks clusters], one really only creates the cluster's control plane and the
|
||||
dedicated nodes underneath it.<br/>
|
||||
Worker nodes can consist of any combination of [self-managed nodes], [managed node groups] and [Fargate], and depend on
|
||||
the control plane.
|
||||
|
||||
EKS automatically installs [self-managed add-ons][amazon eks add-ons] like the AWS VPC CNI plugin, `kube-proxy` and CoreDNS.<br/>
|
||||
EKS automatically installs some [self-managed add-ons][amazon eks add-ons] like the AWS VPC CNI plugin, `kube-proxy` and
|
||||
CoreDNS.<br/>
|
||||
Disable them in the cluster's definition.
|
||||
|
||||
Upon cluster creation, EKS [automatically creates a security group][amazon eks security group requirements and considerations] and applies it to both the control plane and nodes.<br/>
|
||||
Such security group cannot be avoided nor customized in the cluster's definition (e.g. using IaC tools like [Pulumi] or [Terraform]):
|
||||
Upon cluster creation, EKS
|
||||
[automatically creates a security group][amazon eks security group requirements and considerations] and applies it to
|
||||
both the control plane and nodes.<br/>
|
||||
Such security group cannot be avoided nor customized in the cluster's definition (e.g. using IaC tools like [Pulumi] or
|
||||
[Terraform]):
|
||||
|
||||
> ```txt
|
||||
> error: aws:eks/cluster:Cluster resource 'cluster' has a problem: Value for unconfigurable attribute. Can't configure a value for "vpc_config.0.cluster_security_group_id": its value will be decided automatically based on the result of applying this configuration.
|
||||
> ```
|
||||
|
||||
For some reason, giving resources a tag like `aks:eks:cluster-name=value` succeeds, but has no effect (it is not really applied).
|
||||
For some reason, giving resources a tag like `aks:eks:cluster-name=value` succeeds, but has no effect (it is not really
|
||||
applied).
|
||||
|
||||
By default, the IAM principal creating the cluster is the only one able to make calls to the cluster's API server.<br/>
|
||||
To let other IAM principals have access to the cluster, one needs to add them to it. See [Enabling IAM principal access to your cluster](https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html) and [Required permissions](https://docs.aws.amazon.com/eks/latest/userguide/view-kubernetes-resources.html#view-kubernetes-resources-permissions) to do so.
|
||||
To let other IAM principals have access to the cluster, one needs to add them to it. See [access management].
|
||||
|
||||
<details>
|
||||
<summary>Usage</summary>
|
||||
|
||||
```sh
|
||||
# Create clusters.
|
||||
aws eks create-cluster \
|
||||
--name 'DeepThought' \
|
||||
--role-arn 'arn:aws:iam::000011112222:role/aws-service-role/eks.amazonaws.com/AWSServiceRoleForAmazonEKS' \
|
||||
--resources-vpc-config 'subnetIds=subnet-11112222333344445,subnet-66667777888899990'
|
||||
```sh
|
||||
# Create clusters.
|
||||
aws eks create-cluster \
|
||||
--name 'DeepThought' \
|
||||
--role-arn 'arn:aws:iam::000011112222:role/aws-service-role/eks.amazonaws.com/AWSServiceRoleForAmazonEKS' \
|
||||
--resources-vpc-config 'subnetIds=subnet-11112222333344445,subnet-66667777888899990'
|
||||
aws eks create-cluster … --access-config 'authenticationMode=API'
|
||||
|
||||
# Connect to clusters.
|
||||
aws eks update-kubeconfig --name 'name' && kubectl cluster-info
|
||||
# Check cluster's authentication mode.
|
||||
aws eks describe-cluster --name 'DeepThought' --query 'cluster.accessConfig.authenticationMode' --output 'text'
|
||||
|
||||
# Change encryption configuration.
|
||||
aws eks associate-encryption-config \
|
||||
--cluster-name 'DeepThought' \
|
||||
--encryption-config '[{
|
||||
"provider": { "keyArn": "arn:aws:kms:eu-west-1:000011112222:key/33334444-5555-6666-7777-88889999aaaa" },
|
||||
"resources": [ "secrets" ]
|
||||
}]'
|
||||
# Change encryption configuration.
|
||||
aws eks associate-encryption-config \
|
||||
--cluster-name 'DeepThought' \
|
||||
--encryption-config '[{
|
||||
"provider": { "keyArn": "arn:aws:kms:eu-west-1:000011112222:key/33334444-5555-6666-7777-88889999aaaa" },
|
||||
"resources": [ "secrets" ]
|
||||
}]'
|
||||
|
||||
|
||||
# Create EC2 node groups.
|
||||
aws eks create-nodegroup \
|
||||
--cluster-name 'DeepThought' \
|
||||
--nodegroup-name 'alpha' \
|
||||
--scaling-config 'minSize=1,maxSize=3,desiredSize=1' \
|
||||
--node-role-arn 'arn:aws:iam::000011112222:role/DeepThinkerNodeRole' \
|
||||
--subnets 'subnet-11112222333344445' 'subnet-66667777888899990'
|
||||
# Create access entries to use IAM for authentication.
|
||||
aws eks create-access-entry --cluster-name 'DeepThought' \
|
||||
--principal-arn 'arn:aws:iam::000011112222:role/Admin'
|
||||
aws eks create-access-entry … --principal-arn 'arn:aws:iam::000011112222:user/bob'
|
||||
|
||||
# Create Fargate profiles.
|
||||
aws eks create-fargate-profile \
|
||||
--cluster-name 'DeepThought' \
|
||||
--fargate-profile-name 'alpha' \
|
||||
--pod-execution-role-arn 'arn:aws:iam::000011112222:role/DeepThinkerFargate' \
|
||||
--subnets 'subnet-11112222333344445' 'subnet-66667777888899990' \
|
||||
--selectors 'namespace=string'
|
||||
```
|
||||
# List available access policies.
|
||||
aws eks list-access-policies
|
||||
|
||||
# Associate policies to access entries.
|
||||
aws eks associate-access-policy --cluster-name 'DeepThought' \
|
||||
--principal-arn 'arn:aws:iam::000011112222:role/Admin' \
|
||||
--policy-arn 'arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy' \
|
||||
--access-scope '[ "type": "cluster" ]'
|
||||
|
||||
# Connect to clusters.
|
||||
aws eks update-kubeconfig --name 'DeepThought' && kubectl cluster-info
|
||||
|
||||
|
||||
# Create EC2 node groups.
|
||||
aws eks create-nodegroup \
|
||||
--cluster-name 'DeepThought' \
|
||||
--nodegroup-name 'alpha' \
|
||||
--scaling-config 'minSize=1,maxSize=3,desiredSize=1' \
|
||||
--node-role-arn 'arn:aws:iam::000011112222:role/DeepThinkerNodeRole' \
|
||||
--subnets 'subnet-11112222333344445' 'subnet-66667777888899990'
|
||||
|
||||
# Create Fargate profiles.
|
||||
aws eks create-fargate-profile \
|
||||
--cluster-name 'DeepThought' \
|
||||
--fargate-profile-name 'alpha' \
|
||||
--pod-execution-role-arn 'arn:aws:iam::000011112222:role/DeepThinkerFargate' \
|
||||
--subnets 'subnet-11112222333344445' 'subnet-66667777888899990' \
|
||||
--selectors 'namespace=string'
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
@@ -83,20 +109,30 @@ To let other IAM principals have access to the cluster, one needs to add them to
|
||||
|
||||
## Requirements
|
||||
|
||||
- \[suggestion] 1 (one) custom _Cluster Service Role_ with the `AmazonEKSClusterPolicy` policy attached or similar custom permissions.
|
||||
- \[suggestion] 1 (one) custom _Cluster Service Role_ with the `AmazonEKSClusterPolicy` IAM policy attached or similar
|
||||
custom permissions.
|
||||
|
||||
Kubernetes clusters managed by EKS make calls to other AWS services on the user's behalf to manage the resources that the cluster uses.<br/>
|
||||
<details style="margin-bottom: 1em;">
|
||||
|
||||
Kubernetes clusters managed by EKS make calls to other AWS services on the user's behalf to manage the resources that
|
||||
the cluster uses.<br/>
|
||||
For a cluster to be allowed to make those calls, it **requires** to have the aforementioned permissions.
|
||||
|
||||
To create clusters which would **not** require access to any other AWS resource, one can assign the cluster the `AWSServiceRoleForAmazonEKS` service-linked role directly <sup>[1][service-linked role permissions for amazon eks],[2][amazon eks cluster iam role]</sup>.
|
||||
To create clusters which would **not** require access to any other AWS resource, one can assign the cluster the
|
||||
`AWSServiceRoleForAmazonEKS` service-linked role directly <sup>[1][service-linked role permissions for amazon eks],
|
||||
[2][amazon eks cluster iam role]</sup>.
|
||||
|
||||
> Amazon EKS uses the service-linked role named `AWSServiceRoleForAmazonEKS` - The role allows Amazon EKS to manage clusters in your account. The attached policies allow the role to manage the following resources: network interfaces, security groups, logs, and VPCs.
|
||||
> Amazon EKS uses the service-linked role named `AWSServiceRoleForAmazonEKS` - The role allows Amazon EKS to manage
|
||||
> clusters in your account. The attached policies allow the role to manage the following resources: network
|
||||
> interfaces, security groups, logs, and VPCs.
|
||||
>
|
||||
> ---
|
||||
>
|
||||
> Prior to October 3, 2023, [AmazonEKSClusterPolicy] was required on the IAM role for each cluster.
|
||||
>
|
||||
> Prior to April 16, 2020, [AmazonEKSServicePolicy] was also required and the suggested name was `eksServiceRole`. With the `AWSServiceRoleForAmazonEKS` service-linked role, that policy is no longer required for clusters created on or after April 16, 2020.
|
||||
> Prior to April 16, 2020, [AmazonEKSServicePolicy] was also required and the suggested name was `eksServiceRole`.
|
||||
> With the `AWSServiceRoleForAmazonEKS` service-linked role, that policy is no longer required for clusters created on
|
||||
> or after April 16, 2020.
|
||||
|
||||
<div class="tip" style="
|
||||
background-color: rgba(0,255,0,0.0625);
|
||||
@@ -106,26 +142,39 @@ To let other IAM principals have access to the cluster, one needs to add them to
|
||||
">
|
||||
<header style="font-weight: bold; margin-bottom: 0.5em">Pro tip</header>
|
||||
|
||||
Should one want to use more advanced features like [encryption with managed keys][secrets encryption through kms], the role will need access to the referenced resources.<br/>
|
||||
Should one want to use more advanced features like [encryption with managed keys][secrets encryption through kms], the
|
||||
role will need access to the referenced resources.<br/>
|
||||
In this case it would probably be better to create a custom role instead of assigning permissions to the built-in one.
|
||||
|
||||
</div>
|
||||
|
||||
- \[suggestion] 1+ (one or more) custom service role(s) for the pod executors, with the required policies attached or similar permissions.
|
||||
</details>
|
||||
|
||||
- \[suggestion] 1+ (one or more) custom service role(s) for the pod executors, with the required policies attached or
|
||||
similar permissions.
|
||||
|
||||
The reasons and required permissions vary depending on the type of executor.<br/>
|
||||
It would probably be better to create a custom role instead of assigning permissions to the built-in one.
|
||||
|
||||
See the corresponding section under [Create worker nodes].
|
||||
|
||||
- Private clusters have [more special requirements][private cluster requirements] of their own.
|
||||
- 1+ (one or more) executor(s) for pods.<br/>
|
||||
See the [Create worker nodes] section.
|
||||
|
||||
- \[if using APIs for authentication] 1+ (one or more) access entry (/entries) with an EKS access policy assigned.
|
||||
|
||||
- _Private_ clusters have [more special requirements][private cluster requirements] of their own.
|
||||
|
||||
## Creation procedure
|
||||
|
||||
The Internet is full of guides and abstractions which do not work, are confusing, or rely on other code.<br/>
|
||||
Some create Cloudformation stacks in the process.
|
||||
Some create Cloudformation stacks in the process. Follow the
|
||||
[Getting started guide][getting started with amazon eks - aws management console and aws cli] to avoid issues.
|
||||
|
||||
1. Create a VPC, if one does not have them already, with public and private subnets that meet [EKS' requirements][amazon eks vpc and subnet requirements and considerations].
|
||||
This is what worked:
|
||||
|
||||
1. Create a VPC, if one does not have them already, with public and private subnets that meet
|
||||
[EKS' requirements][amazon eks vpc and subnet requirements and considerations].
|
||||
|
||||
[Example in Cloudformation](https://s3.us-west-2.amazonaws.com/amazon-eks/cloudformation/2020-10-29/amazon-eks-vpc-private-subnets.yaml)
|
||||
|
||||
@@ -159,7 +208,7 @@ Some create Cloudformation stacks in the process.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<details style="margin-bottom: 1em;">
|
||||
<summary>Example in Pulumi</summary>
|
||||
|
||||
```ts
|
||||
@@ -186,9 +235,9 @@ Some create Cloudformation stacks in the process.
|
||||
```
|
||||
|
||||
</details>
|
||||
<br/>
|
||||
|
||||
1. Create the cluster.
|
||||
1. Create the cluster.<br/>
|
||||
Make sure you give it the correct cluster service role.
|
||||
|
||||
<details>
|
||||
<summary>Example in CLI</summary>
|
||||
@@ -202,7 +251,7 @@ Some create Cloudformation stacks in the process.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<details style="margin-bottom: 1em;">
|
||||
<summary>Example in Pulumi</summary>
|
||||
|
||||
```ts
|
||||
@@ -220,7 +269,6 @@ Some create Cloudformation stacks in the process.
|
||||
```
|
||||
|
||||
</details>
|
||||
<br/>
|
||||
|
||||
1. [Give access to users][access management].
|
||||
1. Connect to the cluster.
|
||||
@@ -235,11 +283,12 @@ Some create Cloudformation stacks in the process.
|
||||
```
|
||||
|
||||
1. [Create some worker nodes][create worker nodes].
|
||||
1. TODO
|
||||
1. Profit!
|
||||
|
||||
## Create worker nodes
|
||||
|
||||
See [step 3](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html#eks-launch-workers) of the [getting started guide][getting started with amazon eks - aws management console and aws cli].
|
||||
See [step 3](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html#eks-launch-workers) of the
|
||||
[getting started guide][getting started with amazon eks - aws management console and aws cli].
|
||||
|
||||
### Create managed node groups
|
||||
|
||||
@@ -247,21 +296,25 @@ See [Choosing an Amazon EC2 instance type] and [Managed node groups] for more in
|
||||
|
||||
Additional requirements:
|
||||
|
||||
- \[suggestion] 1 (one) custom _Node Service Role_ with the `AmazonEKSWorkerNodePolicy`, `AmazonEC2ContainerRegistryReadOnly` and `AmazonEKS_CNI_Policy` policies attached or similar permissions.
|
||||
- \[suggestion] 1 (one) custom _Node Service Role_ with the `AmazonEKSWorkerNodePolicy`,
|
||||
`AmazonEC2ContainerRegistryReadOnly` and `AmazonEKS_CNI_Policy` policies attached or similar permissions.
|
||||
|
||||
The EKS nodes' `kubelet` makes calls to the AWS APIs on one's behalf.<br/>
|
||||
Nodes receive permissions for these API calls through an IAM instance profile and associated policies.
|
||||
|
||||
For a node to be allowed to make those calls, it **requires** to have the aforementioned permissions.
|
||||
|
||||
- When deploying a managed node group in **private** subnets, one must ensure that it can access Amazon ECR for pulling container images.<br/>
|
||||
Do this by connecting a NAT gateway to the route table of the subnet, or by adding the following AWS PrivateLink VPC endpoints:
|
||||
- When deploying a managed node group in **private** subnets, one must ensure that it can access Amazon ECR for pulling
|
||||
container images.<br/>
|
||||
Do this by connecting a NAT gateway to the route table of the subnet, or by adding the following AWS PrivateLink VPC
|
||||
endpoints:
|
||||
|
||||
- Amazon ECR API endpoint interface: `com.amazonaws.{region}.ecr.api`.
|
||||
- Amazon ECR Docker registry API endpoint interface: `com.amazonaws.{region}.ecr.dkr`.
|
||||
- Amazon S3 gateway endpoint: `com.amazonaws.{region}.s3`.
|
||||
|
||||
- If the nodes are to be created in private subnets, the cluster [**must** provide its private API server endpoint](https://docs.aws.amazon.com/eks/latest/userguide/private-clusters.html).<br/>
|
||||
- If the nodes are to be created in private subnets, the cluster
|
||||
[**must** provide its private API server endpoint][private cluster requirements].<br/>
|
||||
Set the cluster's `vpc_config.0.endpoint_private_access` attribute to `true`.
|
||||
|
||||
Procedure:
|
||||
@@ -304,7 +357,7 @@ Procedure:
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<details style="margin-bottom: 1em;">
|
||||
<summary>Example in Pulumi</summary>
|
||||
|
||||
```ts
|
||||
@@ -333,7 +386,6 @@ Procedure:
|
||||
```
|
||||
|
||||
</details>
|
||||
<br/>
|
||||
|
||||
1. Create the desired node groups.
|
||||
|
||||
@@ -375,12 +427,14 @@ Procedure:
|
||||
|
||||
Additional requirements:
|
||||
|
||||
- \[suggestion] 1 (one) custom _Fargate Service Role_ with the `AmazonEKSFargatePodExecutionRolePolicy` policy attached or similar permissions.
|
||||
- \[suggestion] 1 (one) custom _Fargate Service Role_ with the `AmazonEKSFargatePodExecutionRolePolicy` policy attached
|
||||
or similar permissions.
|
||||
|
||||
To create pods on Fargate, the components running on Fargate must make calls to the AWS APIs on one's behalf.<br/>
|
||||
This is so that it can take actions such as pull container images from ECR or route logs to other AWS services.
|
||||
|
||||
For a cluster to be allowed to make those calls, it **requires** to have a Fargate profile assigned, and this profile must use a role with:
|
||||
For a cluster to be allowed to make those calls, it **requires** to have a Fargate profile assigned, and this profile
|
||||
must use a role with:
|
||||
|
||||
- The `AmazonEKSFargatePodExecutionRolePolicy` policy attached to it, or
|
||||
- Comparable permissions.
|
||||
@@ -426,7 +480,7 @@ Procedure:
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<details style="margin-bottom: 1em;">
|
||||
<summary>Example in Pulumi</summary>
|
||||
|
||||
```ts
|
||||
@@ -458,7 +512,6 @@ Procedure:
|
||||
```
|
||||
|
||||
</details>
|
||||
<br/>
|
||||
|
||||
1. Create the desired Fargate profiles.
|
||||
|
||||
@@ -497,6 +550,8 @@ Procedure:
|
||||
|
||||
## Access management
|
||||
|
||||
The current default authentication method is through the `aws-auth` configmap in the `kube-system` namespace.
|
||||
|
||||
By default, the IAM principal creating the cluster is the only one able to make calls to the cluster's API server.<br/>
|
||||
To let other IAM principals have access to the cluster, one needs to add them to it.
|
||||
|
||||
@@ -508,6 +563,43 @@ See the following to allow others:
|
||||
- [How do I resolve the error "You must be logged in to the server (Unauthorized)" when I connect to the Amazon EKS API server?]
|
||||
- [Identity and Access Management]
|
||||
- [Using IAM Groups to manage Kubernetes cluster access]
|
||||
- [Simplified Amazon EKS Access - NEW Cluster Access Management Controls]
|
||||
|
||||
When a cluster's authentication mode includes the APIs:
|
||||
|
||||
```sh
|
||||
# Check cluster's authentication mode.
|
||||
$ aws eks describe-cluster --name 'thisIsBananas' --query 'cluster.accessConfig.authenticationMode' --output 'text'
|
||||
API_AND_CONFIG_MAP
|
||||
```
|
||||
|
||||
One can use access entries to allow IAM users and roles to connect to it:
|
||||
|
||||
```sh
|
||||
# Create access entries to use IAM for authentication.
|
||||
aws eks create-access-entry --cluster-name 'DeepThought' \
|
||||
--principal-arn 'arn:aws:iam::000011112222:role/Admin'
|
||||
aws eks create-access-entry … --principal-arn 'arn:aws:iam::000011112222:user/bob'
|
||||
```
|
||||
|
||||
> In the case the configmap is also used, APIs take precedence over the configmap.
|
||||
|
||||
Mind that, to allow operations inside the cluster, every access entry requires to be assigned an EKS access policy:
|
||||
|
||||
```sh
|
||||
# List available access policies.
|
||||
aws eks list-access-policies
|
||||
|
||||
# Associate policies to access entries.
|
||||
aws eks associate-access-policy --cluster-name 'DeepThought' \
|
||||
--principal-arn 'arn:aws:iam::000011112222:role/Admin' \
|
||||
--policy-arn 'arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy' \
|
||||
--access-scope '[ "type": "cluster" ]'
|
||||
aws eks associate-access-policy --cluster-name 'DeepThought' \
|
||||
--principal-arn 'arn:aws:iam::000011112222:user/bob' \
|
||||
--policy-arn 'arn:aws:eks::aws:cluster-access-policy/AmazonEKSViewPolicy' \
|
||||
--access-scope '[ "type": "namespace", "namespaces": [ "bob" ] ]'
|
||||
```
|
||||
|
||||
## Secrets encryption through KMS
|
||||
|
||||
@@ -515,7 +607,8 @@ See [Enabling secret encryption on an existing cluster].
|
||||
|
||||
TL;DR:
|
||||
|
||||
1. Make sure the role used in the cluster has access to the used key with `kms:DescribeKey` and `kms:CreateGrant` permissions.
|
||||
1. Make sure the role used in the cluster has access to the used key with `kms:DescribeKey` and `kms:CreateGrant`
|
||||
permissions.
|
||||
1. Configure the cluster:
|
||||
|
||||
<details>
|
||||
@@ -555,7 +648,8 @@ See [Amazon EKS troubleshooting].
|
||||
|
||||
Use the [AWSSupport-TroubleshootEKSWorkerNode](https://docs.aws.amazon.com/systems-manager-automation-runbooks/latest/userguide/automation-awssupport-troubleshooteksworkernode.html) runbook.
|
||||
|
||||
> For the automation to work, worker nodes **must** have permission to access Systems Manager and have Systems Manager running.<br/>
|
||||
> For the automation to work, worker nodes **must** have permission to access Systems Manager and have Systems Manager
|
||||
> running.<br/>
|
||||
> Grant this permission by attaching the [`AmazonSSMManagedInstanceCore`](https://docs.aws.amazon.com/systems-manager/latest/userguide/setup-instance-profile.html#instance-profile-policies-overview) policy to the node role.
|
||||
|
||||
Procedure:
|
||||
@@ -605,6 +699,7 @@ Debug: see [Identify common issues].
|
||||
- [Choosing an Amazon EC2 instance type]
|
||||
- [Private cluster requirements]
|
||||
- [De-mystifying cluster networking for Amazon EKS worker nodes]
|
||||
- [Simplified Amazon EKS Access - NEW Cluster Access Management Controls]
|
||||
|
||||
<!--
|
||||
References
|
||||
@@ -651,7 +746,8 @@ Debug: see [Identify common issues].
|
||||
[required permissions to view eks resources]: https://docs.aws.amazon.com/eks/latest/userguide/view-kubernetes-resources.html#view-kubernetes-resources-permissions
|
||||
[self-managed nodes]: https://docs.aws.amazon.com/eks/latest/userguide/worker.html
|
||||
[service-linked role permissions for amazon eks]: https://docs.aws.amazon.com/eks/latest/userguide/using-service-linked-roles-eks.html#service-linked-role-permissions-eks
|
||||
[using service-linked roles for amazon eks]: https://docs.aws.amazon.com/eks/latest/userguide/using-service-linked-roles.html
|
||||
[simplified amazon eks access - new cluster access management controls]: https://www.youtube.com/watch?v=ae25cbV5Lxo
|
||||
[using iam groups to manage kubernetes cluster access]: https://archive.eksworkshop.com/beginner/091_iam-groups/
|
||||
[using service-linked roles for amazon eks]: https://docs.aws.amazon.com/eks/latest/userguide/using-service-linked-roles.html
|
||||
|
||||
<!-- Others -->
|
||||
|
||||
Reference in New Issue
Block a user