The aws:eks/cluster:Cluster resource, part of the Pulumi AWS provider, provisions the EKS control plane: the API server, scheduler, and controller manager that orchestrate Kubernetes workloads. This guide focuses on four capabilities: standard cluster creation with API authentication, EKS Auto Mode for automated infrastructure, hybrid nodes for on-premises workloads, and local clusters on AWS Outposts.
EKS clusters require IAM roles with specific managed policies, VPC subnets across multiple availability zones, and explicit dependsOn declarations to ensure proper resource ordering. The examples are intentionally small. Combine them with your own node groups, Fargate profiles, and add-ons.
Create a standard EKS cluster with API authentication
Most deployments start with a standard cluster that provides the Kubernetes control plane while you manage compute separately through node groups or Fargate profiles.
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const cluster = new aws.iam.Role("cluster", {
name: "eks-cluster-example",
assumeRolePolicy: JSON.stringify({
Version: "2012-10-17",
Statement: [{
Action: [
"sts:AssumeRole",
"sts:TagSession",
],
Effect: "Allow",
Principal: {
Service: "eks.amazonaws.com",
},
}],
}),
});
const clusterAmazonEKSClusterPolicy = new aws.iam.RolePolicyAttachment("cluster_AmazonEKSClusterPolicy", {
policyArn: "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy",
role: cluster.name,
});
const example = new aws.eks.Cluster("example", {
name: "example",
accessConfig: {
authenticationMode: "API",
},
roleArn: cluster.arn,
version: "1.31",
vpcConfig: {
subnetIds: [
az1.id,
az2.id,
az3.id,
],
},
}, {
dependsOn: [clusterAmazonEKSClusterPolicy],
});
import pulumi
import json
import pulumi_aws as aws
cluster = aws.iam.Role("cluster",
name="eks-cluster-example",
assume_role_policy=json.dumps({
"Version": "2012-10-17",
"Statement": [{
"Action": [
"sts:AssumeRole",
"sts:TagSession",
],
"Effect": "Allow",
"Principal": {
"Service": "eks.amazonaws.com",
},
}],
}))
cluster_amazon_eks_cluster_policy = aws.iam.RolePolicyAttachment("cluster_AmazonEKSClusterPolicy",
policy_arn="arn:aws:iam::aws:policy/AmazonEKSClusterPolicy",
role=cluster.name)
example = aws.eks.Cluster("example",
name="example",
access_config={
"authentication_mode": "API",
},
role_arn=cluster.arn,
version="1.31",
vpc_config={
"subnet_ids": [
az1["id"],
az2["id"],
az3["id"],
],
},
opts = pulumi.ResourceOptions(depends_on=[cluster_amazon_eks_cluster_policy]))
package main
import (
"encoding/json"
"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/eks"
"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/iam"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
tmpJSON0, err := json.Marshal(map[string]interface{}{
"Version": "2012-10-17",
"Statement": []map[string]interface{}{
map[string]interface{}{
"Action": []string{
"sts:AssumeRole",
"sts:TagSession",
},
"Effect": "Allow",
"Principal": map[string]interface{}{
"Service": "eks.amazonaws.com",
},
},
},
})
if err != nil {
return err
}
json0 := string(tmpJSON0)
cluster, err := iam.NewRole(ctx, "cluster", &iam.RoleArgs{
Name: pulumi.String("eks-cluster-example"),
AssumeRolePolicy: pulumi.String(json0),
})
if err != nil {
return err
}
clusterAmazonEKSClusterPolicy, err := iam.NewRolePolicyAttachment(ctx, "cluster_AmazonEKSClusterPolicy", &iam.RolePolicyAttachmentArgs{
PolicyArn: pulumi.String("arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"),
Role: cluster.Name,
})
if err != nil {
return err
}
_, err = eks.NewCluster(ctx, "example", &eks.ClusterArgs{
Name: pulumi.String("example"),
AccessConfig: &eks.ClusterAccessConfigArgs{
AuthenticationMode: pulumi.String("API"),
},
RoleArn: cluster.Arn,
Version: pulumi.String("1.31"),
VpcConfig: &eks.ClusterVpcConfigArgs{
SubnetIds: pulumi.StringArray{
az1.Id,
az2.Id,
az3.Id,
},
},
}, pulumi.DependsOn([]pulumi.Resource{
clusterAmazonEKSClusterPolicy,
}))
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using System.Text.Json;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var cluster = new Aws.Iam.Role("cluster", new()
{
Name = "eks-cluster-example",
AssumeRolePolicy = JsonSerializer.Serialize(new Dictionary<string, object?>
{
["Version"] = "2012-10-17",
["Statement"] = new[]
{
new Dictionary<string, object?>
{
["Action"] = new[]
{
"sts:AssumeRole",
"sts:TagSession",
},
["Effect"] = "Allow",
["Principal"] = new Dictionary<string, object?>
{
["Service"] = "eks.amazonaws.com",
},
},
},
}),
});
var clusterAmazonEKSClusterPolicy = new Aws.Iam.RolePolicyAttachment("cluster_AmazonEKSClusterPolicy", new()
{
PolicyArn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy",
Role = cluster.Name,
});
var example = new Aws.Eks.Cluster("example", new()
{
Name = "example",
AccessConfig = new Aws.Eks.Inputs.ClusterAccessConfigArgs
{
AuthenticationMode = "API",
},
RoleArn = cluster.Arn,
Version = "1.31",
VpcConfig = new Aws.Eks.Inputs.ClusterVpcConfigArgs
{
SubnetIds = new[]
{
az1.Id,
az2.Id,
az3.Id,
},
},
}, new CustomResourceOptions
{
DependsOn =
{
clusterAmazonEKSClusterPolicy,
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.iam.Role;
import com.pulumi.aws.iam.RoleArgs;
import com.pulumi.aws.iam.RolePolicyAttachment;
import com.pulumi.aws.iam.RolePolicyAttachmentArgs;
import com.pulumi.aws.eks.Cluster;
import com.pulumi.aws.eks.ClusterArgs;
import com.pulumi.aws.eks.inputs.ClusterAccessConfigArgs;
import com.pulumi.aws.eks.inputs.ClusterVpcConfigArgs;
import static com.pulumi.codegen.internal.Serialization.*;
import com.pulumi.resources.CustomResourceOptions;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var cluster = new Role("cluster", RoleArgs.builder()
.name("eks-cluster-example")
.assumeRolePolicy(serializeJson(
jsonObject(
jsonProperty("Version", "2012-10-17"),
jsonProperty("Statement", jsonArray(jsonObject(
jsonProperty("Action", jsonArray(
"sts:AssumeRole",
"sts:TagSession"
)),
jsonProperty("Effect", "Allow"),
jsonProperty("Principal", jsonObject(
jsonProperty("Service", "eks.amazonaws.com")
))
)))
)))
.build());
var clusterAmazonEKSClusterPolicy = new RolePolicyAttachment("clusterAmazonEKSClusterPolicy", RolePolicyAttachmentArgs.builder()
.policyArn("arn:aws:iam::aws:policy/AmazonEKSClusterPolicy")
.role(cluster.name())
.build());
var example = new Cluster("example", ClusterArgs.builder()
.name("example")
.accessConfig(ClusterAccessConfigArgs.builder()
.authenticationMode("API")
.build())
.roleArn(cluster.arn())
.version("1.31")
.vpcConfig(ClusterVpcConfigArgs.builder()
.subnetIds(
az1.id(),
az2.id(),
az3.id())
.build())
.build(), CustomResourceOptions.builder()
.dependsOn(clusterAmazonEKSClusterPolicy)
.build());
}
}
resources:
example:
type: aws:eks:Cluster
properties:
name: example
accessConfig:
authenticationMode: API
roleArn: ${cluster.arn}
version: '1.31'
vpcConfig:
subnetIds:
- ${az1.id}
- ${az2.id}
- ${az3.id}
options:
dependsOn:
- ${clusterAmazonEKSClusterPolicy}
cluster:
type: aws:iam:Role
properties:
name: eks-cluster-example
assumeRolePolicy:
fn::toJSON:
Version: 2012-10-17
Statement:
- Action:
- sts:AssumeRole
- sts:TagSession
Effect: Allow
Principal:
Service: eks.amazonaws.com
clusterAmazonEKSClusterPolicy:
type: aws:iam:RolePolicyAttachment
name: cluster_AmazonEKSClusterPolicy
properties:
policyArn: arn:aws:iam::aws:policy/AmazonEKSClusterPolicy
role: ${cluster.name}
The roleArn grants the control plane permissions to manage AWS resources on your behalf. The vpcConfig places the cluster in your VPC subnets, which must span multiple availability zones for high availability. The accessConfig sets authenticationMode to “API”, enabling EKS access entries for fine-grained authentication. The dependsOn ensures the IAM policy attachment completes before cluster creation, preventing permission errors.
Enable EKS Auto Mode for automated infrastructure
EKS Auto Mode eliminates manual node group management by automatically provisioning compute, storage, and load balancing based on pod requirements.
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const node = new aws.iam.Role("node", {
name: "eks-auto-node-example",
assumeRolePolicy: JSON.stringify({
Version: "2012-10-17",
Statement: [{
Action: ["sts:AssumeRole"],
Effect: "Allow",
Principal: {
Service: "ec2.amazonaws.com",
},
}],
}),
});
const cluster = new aws.iam.Role("cluster", {
name: "eks-cluster-example",
assumeRolePolicy: JSON.stringify({
Version: "2012-10-17",
Statement: [{
Action: [
"sts:AssumeRole",
"sts:TagSession",
],
Effect: "Allow",
Principal: {
Service: "eks.amazonaws.com",
},
}],
}),
});
const clusterAmazonEKSClusterPolicy = new aws.iam.RolePolicyAttachment("cluster_AmazonEKSClusterPolicy", {
policyArn: "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy",
role: cluster.name,
});
const clusterAmazonEKSComputePolicy = new aws.iam.RolePolicyAttachment("cluster_AmazonEKSComputePolicy", {
policyArn: "arn:aws:iam::aws:policy/AmazonEKSComputePolicy",
role: cluster.name,
});
const clusterAmazonEKSBlockStoragePolicy = new aws.iam.RolePolicyAttachment("cluster_AmazonEKSBlockStoragePolicy", {
policyArn: "arn:aws:iam::aws:policy/AmazonEKSBlockStoragePolicy",
role: cluster.name,
});
const clusterAmazonEKSLoadBalancingPolicy = new aws.iam.RolePolicyAttachment("cluster_AmazonEKSLoadBalancingPolicy", {
policyArn: "arn:aws:iam::aws:policy/AmazonEKSLoadBalancingPolicy",
role: cluster.name,
});
const clusterAmazonEKSNetworkingPolicy = new aws.iam.RolePolicyAttachment("cluster_AmazonEKSNetworkingPolicy", {
policyArn: "arn:aws:iam::aws:policy/AmazonEKSNetworkingPolicy",
role: cluster.name,
});
const example = new aws.eks.Cluster("example", {
name: "example",
accessConfig: {
authenticationMode: "API",
},
roleArn: cluster.arn,
version: "1.31",
bootstrapSelfManagedAddons: false,
computeConfig: {
enabled: true,
nodePools: ["general-purpose"],
nodeRoleArn: node.arn,
},
kubernetesNetworkConfig: {
elasticLoadBalancing: {
enabled: true,
},
},
storageConfig: {
blockStorage: {
enabled: true,
},
},
vpcConfig: {
endpointPrivateAccess: true,
endpointPublicAccess: true,
subnetIds: [
az1.id,
az2.id,
az3.id,
],
},
}, {
dependsOn: [
clusterAmazonEKSClusterPolicy,
clusterAmazonEKSComputePolicy,
clusterAmazonEKSBlockStoragePolicy,
clusterAmazonEKSLoadBalancingPolicy,
clusterAmazonEKSNetworkingPolicy,
],
});
const nodeAmazonEKSWorkerNodeMinimalPolicy = new aws.iam.RolePolicyAttachment("node_AmazonEKSWorkerNodeMinimalPolicy", {
policyArn: "arn:aws:iam::aws:policy/AmazonEKSWorkerNodeMinimalPolicy",
role: node.name,
});
const nodeAmazonEC2ContainerRegistryPullOnly = new aws.iam.RolePolicyAttachment("node_AmazonEC2ContainerRegistryPullOnly", {
policyArn: "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryPullOnly",
role: node.name,
});
import pulumi
import json
import pulumi_aws as aws
node = aws.iam.Role("node",
name="eks-auto-node-example",
assume_role_policy=json.dumps({
"Version": "2012-10-17",
"Statement": [{
"Action": ["sts:AssumeRole"],
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com",
},
}],
}))
cluster = aws.iam.Role("cluster",
name="eks-cluster-example",
assume_role_policy=json.dumps({
"Version": "2012-10-17",
"Statement": [{
"Action": [
"sts:AssumeRole",
"sts:TagSession",
],
"Effect": "Allow",
"Principal": {
"Service": "eks.amazonaws.com",
},
}],
}))
cluster_amazon_eks_cluster_policy = aws.iam.RolePolicyAttachment("cluster_AmazonEKSClusterPolicy",
policy_arn="arn:aws:iam::aws:policy/AmazonEKSClusterPolicy",
role=cluster.name)
cluster_amazon_eks_compute_policy = aws.iam.RolePolicyAttachment("cluster_AmazonEKSComputePolicy",
policy_arn="arn:aws:iam::aws:policy/AmazonEKSComputePolicy",
role=cluster.name)
cluster_amazon_eks_block_storage_policy = aws.iam.RolePolicyAttachment("cluster_AmazonEKSBlockStoragePolicy",
policy_arn="arn:aws:iam::aws:policy/AmazonEKSBlockStoragePolicy",
role=cluster.name)
cluster_amazon_eks_load_balancing_policy = aws.iam.RolePolicyAttachment("cluster_AmazonEKSLoadBalancingPolicy",
policy_arn="arn:aws:iam::aws:policy/AmazonEKSLoadBalancingPolicy",
role=cluster.name)
cluster_amazon_eks_networking_policy = aws.iam.RolePolicyAttachment("cluster_AmazonEKSNetworkingPolicy",
policy_arn="arn:aws:iam::aws:policy/AmazonEKSNetworkingPolicy",
role=cluster.name)
example = aws.eks.Cluster("example",
name="example",
access_config={
"authentication_mode": "API",
},
role_arn=cluster.arn,
version="1.31",
bootstrap_self_managed_addons=False,
compute_config={
"enabled": True,
"node_pools": ["general-purpose"],
"node_role_arn": node.arn,
},
kubernetes_network_config={
"elastic_load_balancing": {
"enabled": True,
},
},
storage_config={
"block_storage": {
"enabled": True,
},
},
vpc_config={
"endpoint_private_access": True,
"endpoint_public_access": True,
"subnet_ids": [
az1["id"],
az2["id"],
az3["id"],
],
},
opts = pulumi.ResourceOptions(depends_on=[
cluster_amazon_eks_cluster_policy,
cluster_amazon_eks_compute_policy,
cluster_amazon_eks_block_storage_policy,
cluster_amazon_eks_load_balancing_policy,
cluster_amazon_eks_networking_policy,
]))
node_amazon_eks_worker_node_minimal_policy = aws.iam.RolePolicyAttachment("node_AmazonEKSWorkerNodeMinimalPolicy",
policy_arn="arn:aws:iam::aws:policy/AmazonEKSWorkerNodeMinimalPolicy",
role=node.name)
node_amazon_ec2_container_registry_pull_only = aws.iam.RolePolicyAttachment("node_AmazonEC2ContainerRegistryPullOnly",
policy_arn="arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryPullOnly",
role=node.name)
package main
import (
"encoding/json"
"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/eks"
"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/iam"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
tmpJSON0, err := json.Marshal(map[string]interface{}{
"Version": "2012-10-17",
"Statement": []map[string]interface{}{
map[string]interface{}{
"Action": []string{
"sts:AssumeRole",
},
"Effect": "Allow",
"Principal": map[string]interface{}{
"Service": "ec2.amazonaws.com",
},
},
},
})
if err != nil {
return err
}
json0 := string(tmpJSON0)
node, err := iam.NewRole(ctx, "node", &iam.RoleArgs{
Name: pulumi.String("eks-auto-node-example"),
AssumeRolePolicy: pulumi.String(json0),
})
if err != nil {
return err
}
tmpJSON1, err := json.Marshal(map[string]interface{}{
"Version": "2012-10-17",
"Statement": []map[string]interface{}{
map[string]interface{}{
"Action": []string{
"sts:AssumeRole",
"sts:TagSession",
},
"Effect": "Allow",
"Principal": map[string]interface{}{
"Service": "eks.amazonaws.com",
},
},
},
})
if err != nil {
return err
}
json1 := string(tmpJSON1)
cluster, err := iam.NewRole(ctx, "cluster", &iam.RoleArgs{
Name: pulumi.String("eks-cluster-example"),
AssumeRolePolicy: pulumi.String(json1),
})
if err != nil {
return err
}
clusterAmazonEKSClusterPolicy, err := iam.NewRolePolicyAttachment(ctx, "cluster_AmazonEKSClusterPolicy", &iam.RolePolicyAttachmentArgs{
PolicyArn: pulumi.String("arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"),
Role: cluster.Name,
})
if err != nil {
return err
}
clusterAmazonEKSComputePolicy, err := iam.NewRolePolicyAttachment(ctx, "cluster_AmazonEKSComputePolicy", &iam.RolePolicyAttachmentArgs{
PolicyArn: pulumi.String("arn:aws:iam::aws:policy/AmazonEKSComputePolicy"),
Role: cluster.Name,
})
if err != nil {
return err
}
clusterAmazonEKSBlockStoragePolicy, err := iam.NewRolePolicyAttachment(ctx, "cluster_AmazonEKSBlockStoragePolicy", &iam.RolePolicyAttachmentArgs{
PolicyArn: pulumi.String("arn:aws:iam::aws:policy/AmazonEKSBlockStoragePolicy"),
Role: cluster.Name,
})
if err != nil {
return err
}
clusterAmazonEKSLoadBalancingPolicy, err := iam.NewRolePolicyAttachment(ctx, "cluster_AmazonEKSLoadBalancingPolicy", &iam.RolePolicyAttachmentArgs{
PolicyArn: pulumi.String("arn:aws:iam::aws:policy/AmazonEKSLoadBalancingPolicy"),
Role: cluster.Name,
})
if err != nil {
return err
}
clusterAmazonEKSNetworkingPolicy, err := iam.NewRolePolicyAttachment(ctx, "cluster_AmazonEKSNetworkingPolicy", &iam.RolePolicyAttachmentArgs{
PolicyArn: pulumi.String("arn:aws:iam::aws:policy/AmazonEKSNetworkingPolicy"),
Role: cluster.Name,
})
if err != nil {
return err
}
_, err = eks.NewCluster(ctx, "example", &eks.ClusterArgs{
Name: pulumi.String("example"),
AccessConfig: &eks.ClusterAccessConfigArgs{
AuthenticationMode: pulumi.String("API"),
},
RoleArn: cluster.Arn,
Version: pulumi.String("1.31"),
BootstrapSelfManagedAddons: pulumi.Bool(false),
ComputeConfig: &eks.ClusterComputeConfigArgs{
Enabled: pulumi.Bool(true),
NodePools: pulumi.StringArray{
pulumi.String("general-purpose"),
},
NodeRoleArn: node.Arn,
},
KubernetesNetworkConfig: &eks.ClusterKubernetesNetworkConfigArgs{
ElasticLoadBalancing: &eks.ClusterKubernetesNetworkConfigElasticLoadBalancingArgs{
Enabled: pulumi.Bool(true),
},
},
StorageConfig: &eks.ClusterStorageConfigArgs{
BlockStorage: &eks.ClusterStorageConfigBlockStorageArgs{
Enabled: pulumi.Bool(true),
},
},
VpcConfig: &eks.ClusterVpcConfigArgs{
EndpointPrivateAccess: pulumi.Bool(true),
EndpointPublicAccess: pulumi.Bool(true),
SubnetIds: pulumi.StringArray{
az1.Id,
az2.Id,
az3.Id,
},
},
}, pulumi.DependsOn([]pulumi.Resource{
clusterAmazonEKSClusterPolicy,
clusterAmazonEKSComputePolicy,
clusterAmazonEKSBlockStoragePolicy,
clusterAmazonEKSLoadBalancingPolicy,
clusterAmazonEKSNetworkingPolicy,
}))
if err != nil {
return err
}
_, err = iam.NewRolePolicyAttachment(ctx, "node_AmazonEKSWorkerNodeMinimalPolicy", &iam.RolePolicyAttachmentArgs{
PolicyArn: pulumi.String("arn:aws:iam::aws:policy/AmazonEKSWorkerNodeMinimalPolicy"),
Role: node.Name,
})
if err != nil {
return err
}
_, err = iam.NewRolePolicyAttachment(ctx, "node_AmazonEC2ContainerRegistryPullOnly", &iam.RolePolicyAttachmentArgs{
PolicyArn: pulumi.String("arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryPullOnly"),
Role: node.Name,
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using System.Text.Json;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var node = new Aws.Iam.Role("node", new()
{
Name = "eks-auto-node-example",
AssumeRolePolicy = JsonSerializer.Serialize(new Dictionary<string, object?>
{
["Version"] = "2012-10-17",
["Statement"] = new[]
{
new Dictionary<string, object?>
{
["Action"] = new[]
{
"sts:AssumeRole",
},
["Effect"] = "Allow",
["Principal"] = new Dictionary<string, object?>
{
["Service"] = "ec2.amazonaws.com",
},
},
},
}),
});
var cluster = new Aws.Iam.Role("cluster", new()
{
Name = "eks-cluster-example",
AssumeRolePolicy = JsonSerializer.Serialize(new Dictionary<string, object?>
{
["Version"] = "2012-10-17",
["Statement"] = new[]
{
new Dictionary<string, object?>
{
["Action"] = new[]
{
"sts:AssumeRole",
"sts:TagSession",
},
["Effect"] = "Allow",
["Principal"] = new Dictionary<string, object?>
{
["Service"] = "eks.amazonaws.com",
},
},
},
}),
});
var clusterAmazonEKSClusterPolicy = new Aws.Iam.RolePolicyAttachment("cluster_AmazonEKSClusterPolicy", new()
{
PolicyArn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy",
Role = cluster.Name,
});
var clusterAmazonEKSComputePolicy = new Aws.Iam.RolePolicyAttachment("cluster_AmazonEKSComputePolicy", new()
{
PolicyArn = "arn:aws:iam::aws:policy/AmazonEKSComputePolicy",
Role = cluster.Name,
});
var clusterAmazonEKSBlockStoragePolicy = new Aws.Iam.RolePolicyAttachment("cluster_AmazonEKSBlockStoragePolicy", new()
{
PolicyArn = "arn:aws:iam::aws:policy/AmazonEKSBlockStoragePolicy",
Role = cluster.Name,
});
var clusterAmazonEKSLoadBalancingPolicy = new Aws.Iam.RolePolicyAttachment("cluster_AmazonEKSLoadBalancingPolicy", new()
{
PolicyArn = "arn:aws:iam::aws:policy/AmazonEKSLoadBalancingPolicy",
Role = cluster.Name,
});
var clusterAmazonEKSNetworkingPolicy = new Aws.Iam.RolePolicyAttachment("cluster_AmazonEKSNetworkingPolicy", new()
{
PolicyArn = "arn:aws:iam::aws:policy/AmazonEKSNetworkingPolicy",
Role = cluster.Name,
});
var example = new Aws.Eks.Cluster("example", new()
{
Name = "example",
AccessConfig = new Aws.Eks.Inputs.ClusterAccessConfigArgs
{
AuthenticationMode = "API",
},
RoleArn = cluster.Arn,
Version = "1.31",
BootstrapSelfManagedAddons = false,
ComputeConfig = new Aws.Eks.Inputs.ClusterComputeConfigArgs
{
Enabled = true,
NodePools = new[]
{
"general-purpose",
},
NodeRoleArn = node.Arn,
},
KubernetesNetworkConfig = new Aws.Eks.Inputs.ClusterKubernetesNetworkConfigArgs
{
ElasticLoadBalancing = new Aws.Eks.Inputs.ClusterKubernetesNetworkConfigElasticLoadBalancingArgs
{
Enabled = true,
},
},
StorageConfig = new Aws.Eks.Inputs.ClusterStorageConfigArgs
{
BlockStorage = new Aws.Eks.Inputs.ClusterStorageConfigBlockStorageArgs
{
Enabled = true,
},
},
VpcConfig = new Aws.Eks.Inputs.ClusterVpcConfigArgs
{
EndpointPrivateAccess = true,
EndpointPublicAccess = true,
SubnetIds = new[]
{
az1.Id,
az2.Id,
az3.Id,
},
},
}, new CustomResourceOptions
{
DependsOn =
{
clusterAmazonEKSClusterPolicy,
clusterAmazonEKSComputePolicy,
clusterAmazonEKSBlockStoragePolicy,
clusterAmazonEKSLoadBalancingPolicy,
clusterAmazonEKSNetworkingPolicy,
},
});
var nodeAmazonEKSWorkerNodeMinimalPolicy = new Aws.Iam.RolePolicyAttachment("node_AmazonEKSWorkerNodeMinimalPolicy", new()
{
PolicyArn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodeMinimalPolicy",
Role = node.Name,
});
var nodeAmazonEC2ContainerRegistryPullOnly = new Aws.Iam.RolePolicyAttachment("node_AmazonEC2ContainerRegistryPullOnly", new()
{
PolicyArn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryPullOnly",
Role = node.Name,
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.iam.Role;
import com.pulumi.aws.iam.RoleArgs;
import com.pulumi.aws.iam.RolePolicyAttachment;
import com.pulumi.aws.iam.RolePolicyAttachmentArgs;
import com.pulumi.aws.eks.Cluster;
import com.pulumi.aws.eks.ClusterArgs;
import com.pulumi.aws.eks.inputs.ClusterAccessConfigArgs;
import com.pulumi.aws.eks.inputs.ClusterComputeConfigArgs;
import com.pulumi.aws.eks.inputs.ClusterKubernetesNetworkConfigArgs;
import com.pulumi.aws.eks.inputs.ClusterKubernetesNetworkConfigElasticLoadBalancingArgs;
import com.pulumi.aws.eks.inputs.ClusterStorageConfigArgs;
import com.pulumi.aws.eks.inputs.ClusterStorageConfigBlockStorageArgs;
import com.pulumi.aws.eks.inputs.ClusterVpcConfigArgs;
import static com.pulumi.codegen.internal.Serialization.*;
import com.pulumi.resources.CustomResourceOptions;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var node = new Role("node", RoleArgs.builder()
.name("eks-auto-node-example")
.assumeRolePolicy(serializeJson(
jsonObject(
jsonProperty("Version", "2012-10-17"),
jsonProperty("Statement", jsonArray(jsonObject(
jsonProperty("Action", jsonArray("sts:AssumeRole")),
jsonProperty("Effect", "Allow"),
jsonProperty("Principal", jsonObject(
jsonProperty("Service", "ec2.amazonaws.com")
))
)))
)))
.build());
var cluster = new Role("cluster", RoleArgs.builder()
.name("eks-cluster-example")
.assumeRolePolicy(serializeJson(
jsonObject(
jsonProperty("Version", "2012-10-17"),
jsonProperty("Statement", jsonArray(jsonObject(
jsonProperty("Action", jsonArray(
"sts:AssumeRole",
"sts:TagSession"
)),
jsonProperty("Effect", "Allow"),
jsonProperty("Principal", jsonObject(
jsonProperty("Service", "eks.amazonaws.com")
))
)))
)))
.build());
var clusterAmazonEKSClusterPolicy = new RolePolicyAttachment("clusterAmazonEKSClusterPolicy", RolePolicyAttachmentArgs.builder()
.policyArn("arn:aws:iam::aws:policy/AmazonEKSClusterPolicy")
.role(cluster.name())
.build());
var clusterAmazonEKSComputePolicy = new RolePolicyAttachment("clusterAmazonEKSComputePolicy", RolePolicyAttachmentArgs.builder()
.policyArn("arn:aws:iam::aws:policy/AmazonEKSComputePolicy")
.role(cluster.name())
.build());
var clusterAmazonEKSBlockStoragePolicy = new RolePolicyAttachment("clusterAmazonEKSBlockStoragePolicy", RolePolicyAttachmentArgs.builder()
.policyArn("arn:aws:iam::aws:policy/AmazonEKSBlockStoragePolicy")
.role(cluster.name())
.build());
var clusterAmazonEKSLoadBalancingPolicy = new RolePolicyAttachment("clusterAmazonEKSLoadBalancingPolicy", RolePolicyAttachmentArgs.builder()
.policyArn("arn:aws:iam::aws:policy/AmazonEKSLoadBalancingPolicy")
.role(cluster.name())
.build());
var clusterAmazonEKSNetworkingPolicy = new RolePolicyAttachment("clusterAmazonEKSNetworkingPolicy", RolePolicyAttachmentArgs.builder()
.policyArn("arn:aws:iam::aws:policy/AmazonEKSNetworkingPolicy")
.role(cluster.name())
.build());
var example = new Cluster("example", ClusterArgs.builder()
.name("example")
.accessConfig(ClusterAccessConfigArgs.builder()
.authenticationMode("API")
.build())
.roleArn(cluster.arn())
.version("1.31")
.bootstrapSelfManagedAddons(false)
.computeConfig(ClusterComputeConfigArgs.builder()
.enabled(true)
.nodePools("general-purpose")
.nodeRoleArn(node.arn())
.build())
.kubernetesNetworkConfig(ClusterKubernetesNetworkConfigArgs.builder()
.elasticLoadBalancing(ClusterKubernetesNetworkConfigElasticLoadBalancingArgs.builder()
.enabled(true)
.build())
.build())
.storageConfig(ClusterStorageConfigArgs.builder()
.blockStorage(ClusterStorageConfigBlockStorageArgs.builder()
.enabled(true)
.build())
.build())
.vpcConfig(ClusterVpcConfigArgs.builder()
.endpointPrivateAccess(true)
.endpointPublicAccess(true)
.subnetIds(
az1.id(),
az2.id(),
az3.id())
.build())
.build(), CustomResourceOptions.builder()
.dependsOn(
clusterAmazonEKSClusterPolicy,
clusterAmazonEKSComputePolicy,
clusterAmazonEKSBlockStoragePolicy,
clusterAmazonEKSLoadBalancingPolicy,
clusterAmazonEKSNetworkingPolicy)
.build());
var nodeAmazonEKSWorkerNodeMinimalPolicy = new RolePolicyAttachment("nodeAmazonEKSWorkerNodeMinimalPolicy", RolePolicyAttachmentArgs.builder()
.policyArn("arn:aws:iam::aws:policy/AmazonEKSWorkerNodeMinimalPolicy")
.role(node.name())
.build());
var nodeAmazonEC2ContainerRegistryPullOnly = new RolePolicyAttachment("nodeAmazonEC2ContainerRegistryPullOnly", RolePolicyAttachmentArgs.builder()
.policyArn("arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryPullOnly")
.role(node.name())
.build());
}
}
resources:
example:
type: aws:eks:Cluster
properties:
name: example
accessConfig:
authenticationMode: API
roleArn: ${cluster.arn}
version: '1.31'
bootstrapSelfManagedAddons: false
computeConfig:
enabled: true
nodePools:
- general-purpose
nodeRoleArn: ${node.arn}
kubernetesNetworkConfig:
elasticLoadBalancing:
enabled: true
storageConfig:
blockStorage:
enabled: true
vpcConfig:
endpointPrivateAccess: true
endpointPublicAccess: true
subnetIds:
- ${az1.id}
- ${az2.id}
- ${az3.id}
options:
dependsOn:
- ${clusterAmazonEKSClusterPolicy}
- ${clusterAmazonEKSComputePolicy}
- ${clusterAmazonEKSBlockStoragePolicy}
- ${clusterAmazonEKSLoadBalancingPolicy}
- ${clusterAmazonEKSNetworkingPolicy}
node:
type: aws:iam:Role
properties:
name: eks-auto-node-example
assumeRolePolicy:
fn::toJSON:
Version: 2012-10-17
Statement:
- Action:
- sts:AssumeRole
Effect: Allow
Principal:
Service: ec2.amazonaws.com
nodeAmazonEKSWorkerNodeMinimalPolicy:
type: aws:iam:RolePolicyAttachment
name: node_AmazonEKSWorkerNodeMinimalPolicy
properties:
policyArn: arn:aws:iam::aws:policy/AmazonEKSWorkerNodeMinimalPolicy
role: ${node.name}
nodeAmazonEC2ContainerRegistryPullOnly:
type: aws:iam:RolePolicyAttachment
name: node_AmazonEC2ContainerRegistryPullOnly
properties:
policyArn: arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryPullOnly
role: ${node.name}
cluster:
type: aws:iam:Role
properties:
name: eks-cluster-example
assumeRolePolicy:
fn::toJSON:
Version: 2012-10-17
Statement:
- Action:
- sts:AssumeRole
- sts:TagSession
Effect: Allow
Principal:
Service: eks.amazonaws.com
clusterAmazonEKSClusterPolicy:
type: aws:iam:RolePolicyAttachment
name: cluster_AmazonEKSClusterPolicy
properties:
policyArn: arn:aws:iam::aws:policy/AmazonEKSClusterPolicy
role: ${cluster.name}
clusterAmazonEKSComputePolicy:
type: aws:iam:RolePolicyAttachment
name: cluster_AmazonEKSComputePolicy
properties:
policyArn: arn:aws:iam::aws:policy/AmazonEKSComputePolicy
role: ${cluster.name}
clusterAmazonEKSBlockStoragePolicy:
type: aws:iam:RolePolicyAttachment
name: cluster_AmazonEKSBlockStoragePolicy
properties:
policyArn: arn:aws:iam::aws:policy/AmazonEKSBlockStoragePolicy
role: ${cluster.name}
clusterAmazonEKSLoadBalancingPolicy:
type: aws:iam:RolePolicyAttachment
name: cluster_AmazonEKSLoadBalancingPolicy
properties:
policyArn: arn:aws:iam::aws:policy/AmazonEKSLoadBalancingPolicy
role: ${cluster.name}
clusterAmazonEKSNetworkingPolicy:
type: aws:iam:RolePolicyAttachment
name: cluster_AmazonEKSNetworkingPolicy
properties:
policyArn: arn:aws:iam::aws:policy/AmazonEKSNetworkingPolicy
role: ${cluster.name}
When computeConfig.enabled is true, EKS automatically provisions EC2 instances from the specified nodePools as pods are scheduled. The nodeRoleArn grants worker nodes permissions to join the cluster and pull container images. EKS Auto Mode requires three features enabled together: computeConfig, kubernetesNetworkConfig.elasticLoadBalancing, and storageConfig.blockStorage. Setting bootstrapSelfManagedAddons to false prevents conflicts with Auto Mode’s managed infrastructure. The cluster role needs five AWS-managed policies (Cluster, Compute, BlockStorage, LoadBalancing, Networking) instead of just the standard cluster policy.
Configure hybrid nodes for on-premises workloads
EKS Hybrid Nodes extend Kubernetes clusters to on-premises infrastructure, running pods on servers outside AWS while maintaining centralized control plane management.
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const cluster = new aws.iam.Role("cluster", {
name: "eks-cluster-example",
assumeRolePolicy: JSON.stringify({
Version: "2012-10-17",
Statement: [{
Action: [
"sts:AssumeRole",
"sts:TagSession",
],
Effect: "Allow",
Principal: {
Service: "eks.amazonaws.com",
},
}],
}),
});
const clusterAmazonEKSClusterPolicy = new aws.iam.RolePolicyAttachment("cluster_AmazonEKSClusterPolicy", {
policyArn: "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy",
role: cluster.name,
});
const example = new aws.eks.Cluster("example", {
name: "example",
accessConfig: {
authenticationMode: "API",
},
roleArn: cluster.arn,
version: "1.31",
remoteNetworkConfig: {
remoteNodeNetworks: {
cidrs: ["172.16.0.0/18"],
},
remotePodNetworks: {
cidrs: ["172.16.64.0/18"],
},
},
vpcConfig: {
endpointPrivateAccess: true,
endpointPublicAccess: true,
subnetIds: [
az1.id,
az2.id,
az3.id,
],
},
}, {
dependsOn: [clusterAmazonEKSClusterPolicy],
});
import pulumi
import json
import pulumi_aws as aws
cluster = aws.iam.Role("cluster",
name="eks-cluster-example",
assume_role_policy=json.dumps({
"Version": "2012-10-17",
"Statement": [{
"Action": [
"sts:AssumeRole",
"sts:TagSession",
],
"Effect": "Allow",
"Principal": {
"Service": "eks.amazonaws.com",
},
}],
}))
cluster_amazon_eks_cluster_policy = aws.iam.RolePolicyAttachment("cluster_AmazonEKSClusterPolicy",
policy_arn="arn:aws:iam::aws:policy/AmazonEKSClusterPolicy",
role=cluster.name)
example = aws.eks.Cluster("example",
name="example",
access_config={
"authentication_mode": "API",
},
role_arn=cluster.arn,
version="1.31",
remote_network_config={
"remote_node_networks": {
"cidrs": ["172.16.0.0/18"],
},
"remote_pod_networks": {
"cidrs": ["172.16.64.0/18"],
},
},
vpc_config={
"endpoint_private_access": True,
"endpoint_public_access": True,
"subnet_ids": [
az1["id"],
az2["id"],
az3["id"],
],
},
opts = pulumi.ResourceOptions(depends_on=[cluster_amazon_eks_cluster_policy]))
package main
import (
"encoding/json"
"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/eks"
"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/iam"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
tmpJSON0, err := json.Marshal(map[string]interface{}{
"Version": "2012-10-17",
"Statement": []map[string]interface{}{
map[string]interface{}{
"Action": []string{
"sts:AssumeRole",
"sts:TagSession",
},
"Effect": "Allow",
"Principal": map[string]interface{}{
"Service": "eks.amazonaws.com",
},
},
},
})
if err != nil {
return err
}
json0 := string(tmpJSON0)
cluster, err := iam.NewRole(ctx, "cluster", &iam.RoleArgs{
Name: pulumi.String("eks-cluster-example"),
AssumeRolePolicy: pulumi.String(json0),
})
if err != nil {
return err
}
clusterAmazonEKSClusterPolicy, err := iam.NewRolePolicyAttachment(ctx, "cluster_AmazonEKSClusterPolicy", &iam.RolePolicyAttachmentArgs{
PolicyArn: pulumi.String("arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"),
Role: cluster.Name,
})
if err != nil {
return err
}
_, err = eks.NewCluster(ctx, "example", &eks.ClusterArgs{
Name: pulumi.String("example"),
AccessConfig: &eks.ClusterAccessConfigArgs{
AuthenticationMode: pulumi.String("API"),
},
RoleArn: cluster.Arn,
Version: pulumi.String("1.31"),
RemoteNetworkConfig: &eks.ClusterRemoteNetworkConfigArgs{
RemoteNodeNetworks: &eks.ClusterRemoteNetworkConfigRemoteNodeNetworksArgs{
Cidrs: pulumi.StringArray{
pulumi.String("172.16.0.0/18"),
},
},
RemotePodNetworks: &eks.ClusterRemoteNetworkConfigRemotePodNetworksArgs{
Cidrs: pulumi.StringArray{
pulumi.String("172.16.64.0/18"),
},
},
},
VpcConfig: &eks.ClusterVpcConfigArgs{
EndpointPrivateAccess: pulumi.Bool(true),
EndpointPublicAccess: pulumi.Bool(true),
SubnetIds: pulumi.StringArray{
az1.Id,
az2.Id,
az3.Id,
},
},
}, pulumi.DependsOn([]pulumi.Resource{
clusterAmazonEKSClusterPolicy,
}))
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using System.Text.Json;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var cluster = new Aws.Iam.Role("cluster", new()
{
Name = "eks-cluster-example",
AssumeRolePolicy = JsonSerializer.Serialize(new Dictionary<string, object?>
{
["Version"] = "2012-10-17",
["Statement"] = new[]
{
new Dictionary<string, object?>
{
["Action"] = new[]
{
"sts:AssumeRole",
"sts:TagSession",
},
["Effect"] = "Allow",
["Principal"] = new Dictionary<string, object?>
{
["Service"] = "eks.amazonaws.com",
},
},
},
}),
});
var clusterAmazonEKSClusterPolicy = new Aws.Iam.RolePolicyAttachment("cluster_AmazonEKSClusterPolicy", new()
{
PolicyArn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy",
Role = cluster.Name,
});
var example = new Aws.Eks.Cluster("example", new()
{
Name = "example",
AccessConfig = new Aws.Eks.Inputs.ClusterAccessConfigArgs
{
AuthenticationMode = "API",
},
RoleArn = cluster.Arn,
Version = "1.31",
RemoteNetworkConfig = new Aws.Eks.Inputs.ClusterRemoteNetworkConfigArgs
{
RemoteNodeNetworks = new Aws.Eks.Inputs.ClusterRemoteNetworkConfigRemoteNodeNetworksArgs
{
Cidrs = new[]
{
"172.16.0.0/18",
},
},
RemotePodNetworks = new Aws.Eks.Inputs.ClusterRemoteNetworkConfigRemotePodNetworksArgs
{
Cidrs = new[]
{
"172.16.64.0/18",
},
},
},
VpcConfig = new Aws.Eks.Inputs.ClusterVpcConfigArgs
{
EndpointPrivateAccess = true,
EndpointPublicAccess = true,
SubnetIds = new[]
{
az1.Id,
az2.Id,
az3.Id,
},
},
}, new CustomResourceOptions
{
DependsOn =
{
clusterAmazonEKSClusterPolicy,
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.iam.Role;
import com.pulumi.aws.iam.RoleArgs;
import com.pulumi.aws.iam.RolePolicyAttachment;
import com.pulumi.aws.iam.RolePolicyAttachmentArgs;
import com.pulumi.aws.eks.Cluster;
import com.pulumi.aws.eks.ClusterArgs;
import com.pulumi.aws.eks.inputs.ClusterAccessConfigArgs;
import com.pulumi.aws.eks.inputs.ClusterRemoteNetworkConfigArgs;
import com.pulumi.aws.eks.inputs.ClusterRemoteNetworkConfigRemoteNodeNetworksArgs;
import com.pulumi.aws.eks.inputs.ClusterRemoteNetworkConfigRemotePodNetworksArgs;
import com.pulumi.aws.eks.inputs.ClusterVpcConfigArgs;
import static com.pulumi.codegen.internal.Serialization.*;
import com.pulumi.resources.CustomResourceOptions;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var cluster = new Role("cluster", RoleArgs.builder()
.name("eks-cluster-example")
.assumeRolePolicy(serializeJson(
jsonObject(
jsonProperty("Version", "2012-10-17"),
jsonProperty("Statement", jsonArray(jsonObject(
jsonProperty("Action", jsonArray(
"sts:AssumeRole",
"sts:TagSession"
)),
jsonProperty("Effect", "Allow"),
jsonProperty("Principal", jsonObject(
jsonProperty("Service", "eks.amazonaws.com")
))
)))
)))
.build());
var clusterAmazonEKSClusterPolicy = new RolePolicyAttachment("clusterAmazonEKSClusterPolicy", RolePolicyAttachmentArgs.builder()
.policyArn("arn:aws:iam::aws:policy/AmazonEKSClusterPolicy")
.role(cluster.name())
.build());
var example = new Cluster("example", ClusterArgs.builder()
.name("example")
.accessConfig(ClusterAccessConfigArgs.builder()
.authenticationMode("API")
.build())
.roleArn(cluster.arn())
.version("1.31")
.remoteNetworkConfig(ClusterRemoteNetworkConfigArgs.builder()
.remoteNodeNetworks(ClusterRemoteNetworkConfigRemoteNodeNetworksArgs.builder()
.cidrs("172.16.0.0/18")
.build())
.remotePodNetworks(ClusterRemoteNetworkConfigRemotePodNetworksArgs.builder()
.cidrs("172.16.64.0/18")
.build())
.build())
.vpcConfig(ClusterVpcConfigArgs.builder()
.endpointPrivateAccess(true)
.endpointPublicAccess(true)
.subnetIds(
az1.id(),
az2.id(),
az3.id())
.build())
.build(), CustomResourceOptions.builder()
.dependsOn(clusterAmazonEKSClusterPolicy)
.build());
}
}
resources:
example:
type: aws:eks:Cluster
properties:
name: example
accessConfig:
authenticationMode: API
roleArn: ${cluster.arn}
version: '1.31'
remoteNetworkConfig:
remoteNodeNetworks:
cidrs:
- 172.16.0.0/18
remotePodNetworks:
cidrs:
- 172.16.64.0/18
vpcConfig:
endpointPrivateAccess: true
endpointPublicAccess: true
subnetIds:
- ${az1.id}
- ${az2.id}
- ${az3.id}
options:
dependsOn:
- ${clusterAmazonEKSClusterPolicy}
cluster:
type: aws:iam:Role
properties:
name: eks-cluster-example
assumeRolePolicy:
fn::toJSON:
Version: 2012-10-17
Statement:
- Action:
- sts:AssumeRole
- sts:TagSession
Effect: Allow
Principal:
Service: eks.amazonaws.com
clusterAmazonEKSClusterPolicy:
type: aws:iam:RolePolicyAttachment
name: cluster_AmazonEKSClusterPolicy
properties:
policyArn: arn:aws:iam::aws:policy/AmazonEKSClusterPolicy
role: ${cluster.name}
The remoteNetworkConfig defines CIDR ranges for on-premises nodes and pods. The remoteNodeNetworks.cidrs specify where your on-premises servers reside, while remotePodNetworks.cidrs define the IP space for pods running on those servers. These ranges must not overlap with your VPC or existing networks. The vpcConfig still requires endpointPrivateAccess and endpointPublicAccess settings to control how on-premises nodes reach the control plane.
Deploy a local cluster on AWS Outposts
AWS Outposts brings EKS to on-premises data centers, running the entire Kubernetes control plane locally for low-latency access and data residency requirements.
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const example = aws.outposts.getOutpost({
name: "example",
});
const cluster = new aws.iam.Role("cluster", {
name: "eks-cluster-example",
assumeRolePolicy: JSON.stringify({
Version: "2012-10-17",
Statement: [{
Action: [
"sts:AssumeRole",
"sts:TagSession",
],
Effect: "Allow",
Principal: {
Service: [
"eks.amazonaws.com",
"ec2.amazonaws.com",
],
},
}],
}),
});
const clusterAmazonEKSLocalOutpostClusterPolicy = new aws.iam.RolePolicyAttachment("cluster_AmazonEKSLocalOutpostClusterPolicy", {
policyArn: "arn:aws:iam::aws:policy/AmazonEKSLocalOutpostClusterPolicy",
role: cluster.name,
});
const exampleCluster = new aws.eks.Cluster("example", {
name: "example",
accessConfig: {
authenticationMode: "CONFIG_MAP",
},
roleArn: cluster.arn,
version: "1.31",
vpcConfig: {
endpointPrivateAccess: true,
endpointPublicAccess: false,
subnetIds: [
az1.id,
az2.id,
az3.id,
],
},
outpostConfig: {
controlPlaneInstanceType: "m5.large",
outpostArns: [example.then(example => example.arn)],
},
}, {
dependsOn: [clusterAmazonEKSLocalOutpostClusterPolicy],
});
import pulumi
import json
import pulumi_aws as aws
example = aws.outposts.get_outpost(name="example")
cluster = aws.iam.Role("cluster",
name="eks-cluster-example",
assume_role_policy=json.dumps({
"Version": "2012-10-17",
"Statement": [{
"Action": [
"sts:AssumeRole",
"sts:TagSession",
],
"Effect": "Allow",
"Principal": {
"Service": [
"eks.amazonaws.com",
"ec2.amazonaws.com",
],
},
}],
}))
cluster_amazon_eks_local_outpost_cluster_policy = aws.iam.RolePolicyAttachment("cluster_AmazonEKSLocalOutpostClusterPolicy",
policy_arn="arn:aws:iam::aws:policy/AmazonEKSLocalOutpostClusterPolicy",
role=cluster.name)
example_cluster = aws.eks.Cluster("example",
name="example",
access_config={
"authentication_mode": "CONFIG_MAP",
},
role_arn=cluster.arn,
version="1.31",
vpc_config={
"endpoint_private_access": True,
"endpoint_public_access": False,
"subnet_ids": [
az1["id"],
az2["id"],
az3["id"],
],
},
outpost_config={
"control_plane_instance_type": "m5.large",
"outpost_arns": [example.arn],
},
opts = pulumi.ResourceOptions(depends_on=[cluster_amazon_eks_local_outpost_cluster_policy]))
package main
import (
"encoding/json"
"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/eks"
"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/iam"
"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/outposts"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
example, err := outposts.GetOutpost(ctx, &outposts.GetOutpostArgs{
Name: pulumi.StringRef("example"),
}, nil)
if err != nil {
return err
}
tmpJSON0, err := json.Marshal(map[string]interface{}{
"Version": "2012-10-17",
"Statement": []map[string]interface{}{
map[string]interface{}{
"Action": []string{
"sts:AssumeRole",
"sts:TagSession",
},
"Effect": "Allow",
"Principal": map[string]interface{}{
"Service": []string{
"eks.amazonaws.com",
"ec2.amazonaws.com",
},
},
},
},
})
if err != nil {
return err
}
json0 := string(tmpJSON0)
cluster, err := iam.NewRole(ctx, "cluster", &iam.RoleArgs{
Name: pulumi.String("eks-cluster-example"),
AssumeRolePolicy: pulumi.String(json0),
})
if err != nil {
return err
}
clusterAmazonEKSLocalOutpostClusterPolicy, err := iam.NewRolePolicyAttachment(ctx, "cluster_AmazonEKSLocalOutpostClusterPolicy", &iam.RolePolicyAttachmentArgs{
PolicyArn: pulumi.String("arn:aws:iam::aws:policy/AmazonEKSLocalOutpostClusterPolicy"),
Role: cluster.Name,
})
if err != nil {
return err
}
_, err = eks.NewCluster(ctx, "example", &eks.ClusterArgs{
Name: pulumi.String("example"),
AccessConfig: &eks.ClusterAccessConfigArgs{
AuthenticationMode: pulumi.String("CONFIG_MAP"),
},
RoleArn: cluster.Arn,
Version: pulumi.String("1.31"),
VpcConfig: &eks.ClusterVpcConfigArgs{
EndpointPrivateAccess: pulumi.Bool(true),
EndpointPublicAccess: pulumi.Bool(false),
SubnetIds: pulumi.StringArray{
az1.Id,
az2.Id,
az3.Id,
},
},
OutpostConfig: &eks.ClusterOutpostConfigArgs{
ControlPlaneInstanceType: pulumi.String("m5.large"),
OutpostArns: pulumi.StringArray{
pulumi.String(example.Arn),
},
},
}, pulumi.DependsOn([]pulumi.Resource{
clusterAmazonEKSLocalOutpostClusterPolicy,
}))
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using System.Text.Json;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var example = Aws.Outposts.GetOutpost.Invoke(new()
{
Name = "example",
});
var cluster = new Aws.Iam.Role("cluster", new()
{
Name = "eks-cluster-example",
AssumeRolePolicy = JsonSerializer.Serialize(new Dictionary<string, object?>
{
["Version"] = "2012-10-17",
["Statement"] = new[]
{
new Dictionary<string, object?>
{
["Action"] = new[]
{
"sts:AssumeRole",
"sts:TagSession",
},
["Effect"] = "Allow",
["Principal"] = new Dictionary<string, object?>
{
["Service"] = new[]
{
"eks.amazonaws.com",
"ec2.amazonaws.com",
},
},
},
},
}),
});
var clusterAmazonEKSLocalOutpostClusterPolicy = new Aws.Iam.RolePolicyAttachment("cluster_AmazonEKSLocalOutpostClusterPolicy", new()
{
PolicyArn = "arn:aws:iam::aws:policy/AmazonEKSLocalOutpostClusterPolicy",
Role = cluster.Name,
});
var exampleCluster = new Aws.Eks.Cluster("example", new()
{
Name = "example",
AccessConfig = new Aws.Eks.Inputs.ClusterAccessConfigArgs
{
AuthenticationMode = "CONFIG_MAP",
},
RoleArn = cluster.Arn,
Version = "1.31",
VpcConfig = new Aws.Eks.Inputs.ClusterVpcConfigArgs
{
EndpointPrivateAccess = true,
EndpointPublicAccess = false,
SubnetIds = new[]
{
az1.Id,
az2.Id,
az3.Id,
},
},
OutpostConfig = new Aws.Eks.Inputs.ClusterOutpostConfigArgs
{
ControlPlaneInstanceType = "m5.large",
OutpostArns = new[]
{
example.Apply(getOutpostResult => getOutpostResult.Arn),
},
},
}, new CustomResourceOptions
{
DependsOn =
{
clusterAmazonEKSLocalOutpostClusterPolicy,
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.outposts.OutpostsFunctions;
import com.pulumi.aws.outposts.inputs.GetOutpostArgs;
import com.pulumi.aws.iam.Role;
import com.pulumi.aws.iam.RoleArgs;
import com.pulumi.aws.iam.RolePolicyAttachment;
import com.pulumi.aws.iam.RolePolicyAttachmentArgs;
import com.pulumi.aws.eks.Cluster;
import com.pulumi.aws.eks.ClusterArgs;
import com.pulumi.aws.eks.inputs.ClusterAccessConfigArgs;
import com.pulumi.aws.eks.inputs.ClusterVpcConfigArgs;
import com.pulumi.aws.eks.inputs.ClusterOutpostConfigArgs;
import static com.pulumi.codegen.internal.Serialization.*;
import com.pulumi.resources.CustomResourceOptions;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var example = OutpostsFunctions.getOutpost(GetOutpostArgs.builder()
.name("example")
.build());
var cluster = new Role("cluster", RoleArgs.builder()
.name("eks-cluster-example")
.assumeRolePolicy(serializeJson(
jsonObject(
jsonProperty("Version", "2012-10-17"),
jsonProperty("Statement", jsonArray(jsonObject(
jsonProperty("Action", jsonArray(
"sts:AssumeRole",
"sts:TagSession"
)),
jsonProperty("Effect", "Allow"),
jsonProperty("Principal", jsonObject(
jsonProperty("Service", jsonArray(
"eks.amazonaws.com",
"ec2.amazonaws.com"
))
))
)))
)))
.build());
var clusterAmazonEKSLocalOutpostClusterPolicy = new RolePolicyAttachment("clusterAmazonEKSLocalOutpostClusterPolicy", RolePolicyAttachmentArgs.builder()
.policyArn("arn:aws:iam::aws:policy/AmazonEKSLocalOutpostClusterPolicy")
.role(cluster.name())
.build());
var exampleCluster = new Cluster("exampleCluster", ClusterArgs.builder()
.name("example")
.accessConfig(ClusterAccessConfigArgs.builder()
.authenticationMode("CONFIG_MAP")
.build())
.roleArn(cluster.arn())
.version("1.31")
.vpcConfig(ClusterVpcConfigArgs.builder()
.endpointPrivateAccess(true)
.endpointPublicAccess(false)
.subnetIds(
az1.id(),
az2.id(),
az3.id())
.build())
.outpostConfig(ClusterOutpostConfigArgs.builder()
.controlPlaneInstanceType("m5.large")
.outpostArns(example.arn())
.build())
.build(), CustomResourceOptions.builder()
.dependsOn(clusterAmazonEKSLocalOutpostClusterPolicy)
.build());
}
}
resources:
exampleCluster:
type: aws:eks:Cluster
name: example
properties:
name: example
accessConfig:
authenticationMode: CONFIG_MAP
roleArn: ${cluster.arn}
version: '1.31'
vpcConfig:
endpointPrivateAccess: true
endpointPublicAccess: false
subnetIds:
- ${az1.id}
- ${az2.id}
- ${az3.id}
outpostConfig:
controlPlaneInstanceType: m5.large
outpostArns:
- ${example.arn}
options:
dependsOn:
- ${clusterAmazonEKSLocalOutpostClusterPolicy}
cluster:
type: aws:iam:Role
properties:
name: eks-cluster-example
assumeRolePolicy:
fn::toJSON:
Version: 2012-10-17
Statement:
- Action:
- sts:AssumeRole
- sts:TagSession
Effect: Allow
Principal:
Service:
- eks.amazonaws.com
- ec2.amazonaws.com
clusterAmazonEKSLocalOutpostClusterPolicy:
type: aws:iam:RolePolicyAttachment
name: cluster_AmazonEKSLocalOutpostClusterPolicy
properties:
policyArn: arn:aws:iam::aws:policy/AmazonEKSLocalOutpostClusterPolicy
role: ${cluster.name}
variables:
example:
fn::invoke:
function: aws:outposts:getOutpost
arguments:
name: example
The outpostConfig specifies where the control plane runs. The controlPlaneInstanceType determines the EC2 instance type for control plane components, and outpostArns lists the Outposts where the cluster deploys. Unlike standard clusters, Outpost clusters use authenticationMode “CONFIG_MAP” and require endpointPublicAccess set to false since the control plane runs locally. The IAM role needs trust relationships for both eks.amazonaws.com and ec2.amazonaws.com, and uses AmazonEKSLocalOutpostClusterPolicy instead of the standard cluster policy.
Beyond these examples
These snippets focus on specific cluster-level features: standard clusters with API authentication, EKS Auto Mode for automated infrastructure, and hybrid nodes and Outposts deployment. They’re intentionally minimal rather than full Kubernetes deployments.
The examples rely on pre-existing infrastructure such as IAM roles with appropriate trust policies and managed policy attachments, VPC subnets across multiple availability zones, and AWS Outposts for the local cluster example. They focus on configuring the control plane rather than provisioning everything around it.
To keep things focused, common cluster patterns are omitted, including:
- Encryption configuration (encryptionConfig with KMS keys)
- Control plane logging (enabledClusterLogTypes)
- Node groups and Fargate profiles
- Add-ons and cluster extensions
- Zonal shift and upgrade policies
These omissions are intentional: the goal is to illustrate how each cluster feature is wired, not provide drop-in Kubernetes platforms. See the EKS Cluster resource reference for all available configuration options.
Let's deploy AWS EKS Clusters
Get started with Pulumi Cloud, then follow our quick setup guide to deploy this infrastructure.
Try Pulumi Cloud for FREEFrequently Asked Questions
IAM & Permissions
dependsOn on aws.iam.RolePolicy or aws.iam.RolePolicyAttachment resources, EKS cannot delete managed EC2 infrastructure like Security Groups during cluster deletion, causing failures.EKS Auto Mode
true: computeConfig.enabled, kubernetesNetworkConfig.elasticLoadBalancing.enabled, and storageConfig.blockStorage.enabled. Also set bootstrapSelfManagedAddons to false. All three config flags must be set consistently (all true to enable, all false to disable).computeConfig.enabled, kubernetesNetworkConfig.elasticLoadBalancing.enabled, storageConfig.blockStorage.enabled) to be set to the same value. You cannot enable them independently.Cluster Configuration
version property value. If not specified initially, the latest version at creation is used with no automatic upgrades except those triggered by EKS. Downgrades are not supported.true. This property is immutable; changing it forces a new cluster to be created.^[0-9A-Za-z][A-Za-z0-9\-_]*$). The name is immutable after creation.deletionProtection to true. When enabled, the cluster cannot be deleted unless deletion protection is first disabled. Defaults to false.Specialized Deployments
remoteNetworkConfig with remoteNodeNetworks.cidrs and remotePodNetworks.cidrs to specify CIDR blocks for nodes and pods running outside AWS.outpostConfig with controlPlaneInstanceType and outpostArns. Set vpcConfig.endpointPrivateAccess to true and endpointPublicAccess to false. Use authenticationMode: CONFIG_MAP in accessConfig.vpcConfig with appropriate subnet IDs. Refer to AWS documentation on Cluster VPC Considerations and Cluster Security Group Considerations for detailed requirements.Using a different cloud?
Explore containers guides for other cloud providers: