Create AWS EKS Node Groups

The aws:eks/nodeGroup:NodeGroup resource, part of the Pulumi AWS provider, provisions and manages an EKS-compatible Auto Scaling Group of Kubernetes worker nodes. This guide focuses on three capabilities: node group creation with scaling policies, IAM role configuration, and multi-AZ subnet placement.

Node groups depend on an existing EKS cluster, an IAM role with specific AWS-managed policies, and VPC subnets where worker nodes will run. The examples are intentionally small. Combine them with your own cluster, networking, and access configuration.

Attach required IAM policies to the node role

Worker nodes need an IAM role with three AWS-managed policies: one for cluster operations, one for pod networking, and one for pulling container images from ECR.

import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

const example = new aws.iam.Role("example", {
    name: "eks-node-group-example",
    assumeRolePolicy: JSON.stringify({
        Statement: [{
            Action: "sts:AssumeRole",
            Effect: "Allow",
            Principal: {
                Service: "ec2.amazonaws.com",
            },
        }],
        Version: "2012-10-17",
    }),
});
const example_AmazonEKSWorkerNodePolicy = new aws.iam.RolePolicyAttachment("example-AmazonEKSWorkerNodePolicy", {
    policyArn: "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy",
    role: example.name,
});
const example_AmazonEKSCNIPolicy = new aws.iam.RolePolicyAttachment("example-AmazonEKS_CNI_Policy", {
    policyArn: "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy",
    role: example.name,
});
const example_AmazonEC2ContainerRegistryReadOnly = new aws.iam.RolePolicyAttachment("example-AmazonEC2ContainerRegistryReadOnly", {
    policyArn: "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly",
    role: example.name,
});
import pulumi
import json
import pulumi_aws as aws

example = aws.iam.Role("example",
    name="eks-node-group-example",
    assume_role_policy=json.dumps({
        "Statement": [{
            "Action": "sts:AssumeRole",
            "Effect": "Allow",
            "Principal": {
                "Service": "ec2.amazonaws.com",
            },
        }],
        "Version": "2012-10-17",
    }))
example__amazon_eks_worker_node_policy = aws.iam.RolePolicyAttachment("example-AmazonEKSWorkerNodePolicy",
    policy_arn="arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy",
    role=example.name)
example__amazon_ekscni_policy = aws.iam.RolePolicyAttachment("example-AmazonEKS_CNI_Policy",
    policy_arn="arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy",
    role=example.name)
example__amazon_ec2_container_registry_read_only = aws.iam.RolePolicyAttachment("example-AmazonEC2ContainerRegistryReadOnly",
    policy_arn="arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly",
    role=example.name)
package main

import (
	"encoding/json"

	"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/iam"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		tmpJSON0, err := json.Marshal(map[string]interface{}{
			"Statement": []map[string]interface{}{
				map[string]interface{}{
					"Action": "sts:AssumeRole",
					"Effect": "Allow",
					"Principal": map[string]interface{}{
						"Service": "ec2.amazonaws.com",
					},
				},
			},
			"Version": "2012-10-17",
		})
		if err != nil {
			return err
		}
		json0 := string(tmpJSON0)
		example, err := iam.NewRole(ctx, "example", &iam.RoleArgs{
			Name:             pulumi.String("eks-node-group-example"),
			AssumeRolePolicy: pulumi.String(json0),
		})
		if err != nil {
			return err
		}
		_, err = iam.NewRolePolicyAttachment(ctx, "example-AmazonEKSWorkerNodePolicy", &iam.RolePolicyAttachmentArgs{
			PolicyArn: pulumi.String("arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"),
			Role:      example.Name,
		})
		if err != nil {
			return err
		}
		_, err = iam.NewRolePolicyAttachment(ctx, "example-AmazonEKS_CNI_Policy", &iam.RolePolicyAttachmentArgs{
			PolicyArn: pulumi.String("arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"),
			Role:      example.Name,
		})
		if err != nil {
			return err
		}
		_, err = iam.NewRolePolicyAttachment(ctx, "example-AmazonEC2ContainerRegistryReadOnly", &iam.RolePolicyAttachmentArgs{
			PolicyArn: pulumi.String("arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"),
			Role:      example.Name,
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using System.Text.Json;
using Pulumi;
using Aws = Pulumi.Aws;

return await Deployment.RunAsync(() => 
{
    var example = new Aws.Iam.Role("example", new()
    {
        Name = "eks-node-group-example",
        AssumeRolePolicy = JsonSerializer.Serialize(new Dictionary<string, object?>
        {
            ["Statement"] = new[]
            {
                new Dictionary<string, object?>
                {
                    ["Action"] = "sts:AssumeRole",
                    ["Effect"] = "Allow",
                    ["Principal"] = new Dictionary<string, object?>
                    {
                        ["Service"] = "ec2.amazonaws.com",
                    },
                },
            },
            ["Version"] = "2012-10-17",
        }),
    });

    var example_AmazonEKSWorkerNodePolicy = new Aws.Iam.RolePolicyAttachment("example-AmazonEKSWorkerNodePolicy", new()
    {
        PolicyArn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy",
        Role = example.Name,
    });

    var example_AmazonEKSCNIPolicy = new Aws.Iam.RolePolicyAttachment("example-AmazonEKS_CNI_Policy", new()
    {
        PolicyArn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy",
        Role = example.Name,
    });

    var example_AmazonEC2ContainerRegistryReadOnly = new Aws.Iam.RolePolicyAttachment("example-AmazonEC2ContainerRegistryReadOnly", new()
    {
        PolicyArn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly",
        Role = example.Name,
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.iam.Role;
import com.pulumi.aws.iam.RoleArgs;
import com.pulumi.aws.iam.RolePolicyAttachment;
import com.pulumi.aws.iam.RolePolicyAttachmentArgs;
import static com.pulumi.codegen.internal.Serialization.*;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var example = new Role("example", RoleArgs.builder()
            .name("eks-node-group-example")
            .assumeRolePolicy(serializeJson(
                jsonObject(
                    jsonProperty("Statement", jsonArray(jsonObject(
                        jsonProperty("Action", "sts:AssumeRole"),
                        jsonProperty("Effect", "Allow"),
                        jsonProperty("Principal", jsonObject(
                            jsonProperty("Service", "ec2.amazonaws.com")
                        ))
                    ))),
                    jsonProperty("Version", "2012-10-17")
                )))
            .build());

        var example_AmazonEKSWorkerNodePolicy = new RolePolicyAttachment("example-AmazonEKSWorkerNodePolicy", RolePolicyAttachmentArgs.builder()
            .policyArn("arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy")
            .role(example.name())
            .build());

        var example_AmazonEKSCNIPolicy = new RolePolicyAttachment("example-AmazonEKSCNIPolicy", RolePolicyAttachmentArgs.builder()
            .policyArn("arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy")
            .role(example.name())
            .build());

        var example_AmazonEC2ContainerRegistryReadOnly = new RolePolicyAttachment("example-AmazonEC2ContainerRegistryReadOnly", RolePolicyAttachmentArgs.builder()
            .policyArn("arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly")
            .role(example.name())
            .build());

    }
}
resources:
  example:
    type: aws:iam:Role
    properties:
      name: eks-node-group-example
      assumeRolePolicy:
        fn::toJSON:
          Statement:
            - Action: sts:AssumeRole
              Effect: Allow
              Principal:
                Service: ec2.amazonaws.com
          Version: 2012-10-17
  example-AmazonEKSWorkerNodePolicy:
    type: aws:iam:RolePolicyAttachment
    properties:
      policyArn: arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy
      role: ${example.name}
  example-AmazonEKSCNIPolicy:
    type: aws:iam:RolePolicyAttachment
    name: example-AmazonEKS_CNI_Policy
    properties:
      policyArn: arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy
      role: ${example.name}
  example-AmazonEC2ContainerRegistryReadOnly:
    type: aws:iam:RolePolicyAttachment
    properties:
      policyArn: arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly
      role: ${example.name}

The assumeRolePolicy grants EC2 the ability to assume this role. The three RolePolicyAttachment resources attach AWS-managed policies: AmazonEKSWorkerNodePolicy allows nodes to register with the cluster, AmazonEKS_CNI_Policy enables the VPC CNI plugin to manage pod networking, and AmazonEC2ContainerRegistryReadOnly permits pulling images from ECR.

Distribute nodes across availability zones

High-availability clusters spread worker nodes across multiple zones to survive zone failures.

import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
import * as std from "@pulumi/std";

const available = aws.getAvailabilityZones({
    state: "available",
});
const example: aws.ec2.Subnet[] = [];
for (const range = {value: 0}; range.value < 2; range.value++) {
    example.push(new aws.ec2.Subnet(`example-${range.value}`, {
        availabilityZone: available.then(available => available.names[range.value]),
        cidrBlock: std.cidrsubnet({
            input: exampleAwsVpc.cidrBlock,
            newbits: 8,
            netnum: range.value,
        }).then(invoke => invoke.result),
        vpcId: exampleAwsVpc.id,
    }));
}
import pulumi
import pulumi_aws as aws
import pulumi_std as std

available = aws.get_availability_zones(state="available")
example = []
for range in [{"value": i} for i in range(0, 2)]:
    example.append(aws.ec2.Subnet(f"example-{range['value']}",
        availability_zone=available.names[range["value"]],
        cidr_block=std.cidrsubnet(input=example_aws_vpc["cidrBlock"],
            newbits=8,
            netnum=range["value"]).result,
        vpc_id=example_aws_vpc["id"]))
package main

import (
	"github.com/pulumi/pulumi-aws/sdk/v7/go/aws"
	"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/ec2"
	"github.com/pulumi/pulumi-std/sdk/go/std"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		available, err := aws.GetAvailabilityZones(ctx, &aws.GetAvailabilityZonesArgs{
			State: pulumi.StringRef("available"),
		}, nil)
		if err != nil {
			return err
		}
		invokeCidrsubnet, err := std.Cidrsubnet(ctx, &std.CidrsubnetArgs{
			Input:   exampleAwsVpc.CidrBlock,
			Newbits: 8,
			Netnum:  val0,
		}, nil)
		if err != nil {
			return err
		}
		var example []*ec2.Subnet
		for index := 0; index < 2; index++ {
			key0 := index
			val0 := index
			__res, err := ec2.NewSubnet(ctx, fmt.Sprintf("example-%v", key0), &ec2.SubnetArgs{
				AvailabilityZone: pulumi.String(available.Names[val0]),
				CidrBlock:        pulumi.String(invokeCidrsubnet.Result),
				VpcId:            pulumi.Any(exampleAwsVpc.Id),
			})
			if err != nil {
				return err
			}
			example = append(example, __res)
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
using Std = Pulumi.Std;

return await Deployment.RunAsync(() => 
{
    var available = Aws.GetAvailabilityZones.Invoke(new()
    {
        State = "available",
    });

    var example = new List<Aws.Ec2.Subnet>();
    for (var rangeIndex = 0; rangeIndex < 2; rangeIndex++)
    {
        var range = new { Value = rangeIndex };
        example.Add(new Aws.Ec2.Subnet($"example-{range.Value}", new()
        {
            AvailabilityZone = available.Apply(getAvailabilityZonesResult => getAvailabilityZonesResult.Names)[range.Value],
            CidrBlock = Std.Cidrsubnet.Invoke(new()
            {
                Input = exampleAwsVpc.CidrBlock,
                Newbits = 8,
                Netnum = range.Value,
            }).Apply(invoke => invoke.Result),
            VpcId = exampleAwsVpc.Id,
        }));
    }
});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.AwsFunctions;
import com.pulumi.aws.inputs.GetAvailabilityZonesArgs;
import com.pulumi.aws.ec2.Subnet;
import com.pulumi.aws.ec2.SubnetArgs;
import com.pulumi.std.StdFunctions;
import com.pulumi.std.inputs.CidrsubnetArgs;
import com.pulumi.codegen.internal.KeyedValue;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        final var available = AwsFunctions.getAvailabilityZones(GetAvailabilityZonesArgs.builder()
            .state("available")
            .build());

        for (var i = 0; i < 2; i++) {
            new Subnet("example-" + i, SubnetArgs.builder()
                .availabilityZone(available.names()[range.value()])
                .cidrBlock(StdFunctions.cidrsubnet(CidrsubnetArgs.builder()
                    .input(exampleAwsVpc.cidrBlock())
                    .newbits(8)
                    .netnum(range.value())
                    .build()).result())
                .vpcId(exampleAwsVpc.id())
                .build());

        
}
    }
}

The getAvailabilityZones function retrieves available zones in your region. The loop creates one subnet per zone, using cidrsubnet to carve non-overlapping CIDR blocks from the VPC’s address space. Node groups reference these subnet IDs to distribute instances across zones.

Create a node group with scaling and update policies

Once you have an IAM role and subnets, you can provision the node group itself.

import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

const example = new aws.eks.NodeGroup("example", {
    clusterName: exampleAwsEksCluster.name,
    nodeGroupName: "example",
    nodeRoleArn: exampleAwsIamRole.arn,
    subnetIds: exampleAwsSubnet.map(__item => __item.id),
    scalingConfig: {
        desiredSize: 1,
        maxSize: 2,
        minSize: 1,
    },
    updateConfig: {
        maxUnavailable: 1,
    },
}, {
    dependsOn: [
        example_AmazonEKSWorkerNodePolicy,
        example_AmazonEKSCNIPolicy,
        example_AmazonEC2ContainerRegistryReadOnly,
    ],
});
import pulumi
import pulumi_aws as aws

example = aws.eks.NodeGroup("example",
    cluster_name=example_aws_eks_cluster["name"],
    node_group_name="example",
    node_role_arn=example_aws_iam_role["arn"],
    subnet_ids=[__item["id"] for __item in example_aws_subnet],
    scaling_config={
        "desired_size": 1,
        "max_size": 2,
        "min_size": 1,
    },
    update_config={
        "max_unavailable": 1,
    },
    opts = pulumi.ResourceOptions(depends_on=[
            example__amazon_eks_worker_node_policy,
            example__amazon_ekscni_policy,
            example__amazon_ec2_container_registry_read_only,
        ]))
package main

import (
	"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/eks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
var splat0 []interface{}
for _, val0 := range exampleAwsSubnet {
splat0 = append(splat0, val0.Id)
}
_, err := eks.NewNodeGroup(ctx, "example", &eks.NodeGroupArgs{
ClusterName: pulumi.Any(exampleAwsEksCluster.Name),
NodeGroupName: pulumi.String("example"),
NodeRoleArn: pulumi.Any(exampleAwsIamRole.Arn),
SubnetIds: toPulumiArray(splat0),
ScalingConfig: &eks.NodeGroupScalingConfigArgs{
DesiredSize: pulumi.Int(1),
MaxSize: pulumi.Int(2),
MinSize: pulumi.Int(1),
},
UpdateConfig: &eks.NodeGroupUpdateConfigArgs{
MaxUnavailable: pulumi.Int(1),
},
}, pulumi.DependsOn([]pulumi.Resource{
example_AmazonEKSWorkerNodePolicy,
example_AmazonEKSCNIPolicy,
example_AmazonEC2ContainerRegistryReadOnly,
}))
if err != nil {
return err
}
return nil
})
}
func toPulumiArray(arr []) pulumi.Array {
var pulumiArr pulumi.Array
for _, v := range arr {
pulumiArr = append(pulumiArr, pulumi.(v))
}
return pulumiArr
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;

return await Deployment.RunAsync(() => 
{
    var example = new Aws.Eks.NodeGroup("example", new()
    {
        ClusterName = exampleAwsEksCluster.Name,
        NodeGroupName = "example",
        NodeRoleArn = exampleAwsIamRole.Arn,
        SubnetIds = exampleAwsSubnet.Select(__item => __item.Id).ToList(),
        ScalingConfig = new Aws.Eks.Inputs.NodeGroupScalingConfigArgs
        {
            DesiredSize = 1,
            MaxSize = 2,
            MinSize = 1,
        },
        UpdateConfig = new Aws.Eks.Inputs.NodeGroupUpdateConfigArgs
        {
            MaxUnavailable = 1,
        },
    }, new CustomResourceOptions
    {
        DependsOn =
        {
            example_AmazonEKSWorkerNodePolicy,
            example_AmazonEKSCNIPolicy,
            example_AmazonEC2ContainerRegistryReadOnly,
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.eks.NodeGroup;
import com.pulumi.aws.eks.NodeGroupArgs;
import com.pulumi.aws.eks.inputs.NodeGroupScalingConfigArgs;
import com.pulumi.aws.eks.inputs.NodeGroupUpdateConfigArgs;
import com.pulumi.resources.CustomResourceOptions;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var example = new NodeGroup("example", NodeGroupArgs.builder()
            .clusterName(exampleAwsEksCluster.name())
            .nodeGroupName("example")
            .nodeRoleArn(exampleAwsIamRole.arn())
            .subnetIds(exampleAwsSubnet.stream().map(element -> element.id()).collect(toList()))
            .scalingConfig(NodeGroupScalingConfigArgs.builder()
                .desiredSize(1)
                .maxSize(2)
                .minSize(1)
                .build())
            .updateConfig(NodeGroupUpdateConfigArgs.builder()
                .maxUnavailable(1)
                .build())
            .build(), CustomResourceOptions.builder()
                .dependsOn(                
                    example_AmazonEKSWorkerNodePolicy,
                    example_AmazonEKSCNIPolicy,
                    example_AmazonEC2ContainerRegistryReadOnly)
                .build());

    }
}

The scalingConfig block sets the Auto Scaling Group’s capacity: desiredSize controls the initial count, while minSize and maxSize define scaling boundaries. The updateConfig block controls rolling updates: maxUnavailable limits how many nodes can be replaced simultaneously. The dependsOn ensures IAM policies are attached before nodes attempt to join the cluster.

Beyond these examples

These snippets focus on specific node group features: managed node group provisioning, IAM role configuration, and multi-AZ subnet placement. They’re intentionally minimal rather than full cluster deployments.

The examples assume pre-existing infrastructure such as an EKS cluster and a VPC with CIDR block. They focus on configuring the node group rather than provisioning everything around it.

To keep things focused, common node group patterns are omitted, including:

  • Launch templates for custom AMIs or user data
  • Spot instances (capacityType)
  • Kubernetes labels and taints
  • Remote access configuration (SSH keys)

These omissions are intentional: the goal is to illustrate how each node group feature is wired, not provide drop-in cluster modules. See the EKS NodeGroup resource reference for all available configuration options.

Let's create AWS EKS Node Groups

Get started with Pulumi Cloud, then follow our quick setup guide to deploy this infrastructure.

Try Pulumi Cloud for FREE

Frequently Asked Questions

Configuration & Setup
What IAM policies does my node role need?
Your node role requires three AWS managed policies: AmazonEKSWorkerNodePolicy, AmazonEKS_CNI_Policy, and AmazonEC2ContainerRegistryReadOnly. The example shows these attached via dependsOn to ensure they’re in place before node group creation.
How do I name my node group?
Use either nodeGroupName or nodeGroupNamePrefix, but not both. The name can’t exceed 63 characters and must start with a letter or digit, but can include hyphens and underscores for remaining characters.
What subnets should I use for my node group?
Specify EC2 subnet IDs via subnetIds. The example shows creating subnets across multiple availability zones using CIDR subnet calculations. Note that subnetIds is immutable after creation.
Immutability & Updates
What can't I change after creating my node group?
These properties are immutable: amiType, capacityType, clusterName, diskSize, instanceTypes, nodeGroupName, nodeGroupNamePrefix, nodeRoleArn, subnetIds, and remoteAccess. Changes to any of these require recreating the node group.
Why does my node group show a perpetual diff on desiredSize?
External autoscalers (like Cluster Autoscaler) modify desiredSize, causing Pulumi to detect drift. Use ignoreChanges on scalingConfig.desiredSize to allow external management of the desired count.
What if pods can't drain during a version update?
Set forceUpdateVersion to true to force the version update when existing pods can’t be drained due to pod disruption budget issues.
What scaling configuration is required?
You must configure scalingConfig with desiredSize, maxSize, and minSize for the Auto Scaling Group. The example shows setting these to 1, 2, and 1 respectively.
Launch Configuration
Can I use both launchTemplate and remoteAccess?
No, launchTemplate and remoteAccess are mutually exclusive. Choose launchTemplate for advanced configuration or remoteAccess for simple SSH access.
What's the default instance type for node groups?
Node groups default to ["t3.medium"] if you don’t specify instanceTypes. Note that instanceTypes is immutable after creation.
What's the default disk size for worker nodes?
Disk size defaults to 50 GiB for Windows node groups and 20 GiB for all other node groups. The diskSize property is immutable after creation.
Limits & Constraints
How many taints can I apply to a node group?
You can apply a maximum of 50 Kubernetes taints per node group.

Using a different cloud?

Explore containers guides for other cloud providers: