The aws:batch/jobDefinition:JobDefinition resource, part of the Pulumi AWS provider, defines the blueprint for AWS Batch jobs: container images, compute requirements, and execution parameters. This guide focuses on three capabilities: container and multinode job types, EKS and Fargate platform capabilities, and multi-container orchestration.
Job definitions reference IAM roles for execution permissions, Docker images from accessible registries, and optionally EKS clusters or CloudWatch log groups. The examples are intentionally small. Combine them with your own IAM policies, compute environments, and job queues.
Define a container job with resource requirements
Most Batch workloads start with a container-based job definition that specifies the Docker image, command, and compute resources.
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const test = new aws.batch.JobDefinition("test", {
name: "my_test_batch_job_definition",
type: "container",
containerProperties: JSON.stringify({
command: [
"ls",
"-la",
],
image: "busybox",
resourceRequirements: [
{
type: "VCPU",
value: "0.25",
},
{
type: "MEMORY",
value: "512",
},
],
volumes: [{
host: {
sourcePath: "/tmp",
},
name: "tmp",
}],
environment: [{
name: "VARNAME",
value: "VARVAL",
}],
mountPoints: [{
sourceVolume: "tmp",
containerPath: "/tmp",
readOnly: false,
}],
ulimits: [{
hardLimit: 1024,
name: "nofile",
softLimit: 1024,
}],
}),
});
import pulumi
import json
import pulumi_aws as aws
test = aws.batch.JobDefinition("test",
name="my_test_batch_job_definition",
type="container",
container_properties=json.dumps({
"command": [
"ls",
"-la",
],
"image": "busybox",
"resourceRequirements": [
{
"type": "VCPU",
"value": "0.25",
},
{
"type": "MEMORY",
"value": "512",
},
],
"volumes": [{
"host": {
"sourcePath": "/tmp",
},
"name": "tmp",
}],
"environment": [{
"name": "VARNAME",
"value": "VARVAL",
}],
"mountPoints": [{
"sourceVolume": "tmp",
"containerPath": "/tmp",
"readOnly": False,
}],
"ulimits": [{
"hardLimit": 1024,
"name": "nofile",
"softLimit": 1024,
}],
}))
package main
import (
"encoding/json"
"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/batch"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
tmpJSON0, err := json.Marshal(map[string]interface{}{
"command": []string{
"ls",
"-la",
},
"image": "busybox",
"resourceRequirements": []map[string]interface{}{
map[string]interface{}{
"type": "VCPU",
"value": "0.25",
},
map[string]interface{}{
"type": "MEMORY",
"value": "512",
},
},
"volumes": []map[string]interface{}{
map[string]interface{}{
"host": map[string]interface{}{
"sourcePath": "/tmp",
},
"name": "tmp",
},
},
"environment": []map[string]interface{}{
map[string]interface{}{
"name": "VARNAME",
"value": "VARVAL",
},
},
"mountPoints": []map[string]interface{}{
map[string]interface{}{
"sourceVolume": "tmp",
"containerPath": "/tmp",
"readOnly": false,
},
},
"ulimits": []map[string]interface{}{
map[string]interface{}{
"hardLimit": 1024,
"name": "nofile",
"softLimit": 1024,
},
},
})
if err != nil {
return err
}
json0 := string(tmpJSON0)
_, err = batch.NewJobDefinition(ctx, "test", &batch.JobDefinitionArgs{
Name: pulumi.String("my_test_batch_job_definition"),
Type: pulumi.String("container"),
ContainerProperties: pulumi.String(json0),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using System.Text.Json;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var test = new Aws.Batch.JobDefinition("test", new()
{
Name = "my_test_batch_job_definition",
Type = "container",
ContainerProperties = JsonSerializer.Serialize(new Dictionary<string, object?>
{
["command"] = new[]
{
"ls",
"-la",
},
["image"] = "busybox",
["resourceRequirements"] = new[]
{
new Dictionary<string, object?>
{
["type"] = "VCPU",
["value"] = "0.25",
},
new Dictionary<string, object?>
{
["type"] = "MEMORY",
["value"] = "512",
},
},
["volumes"] = new[]
{
new Dictionary<string, object?>
{
["host"] = new Dictionary<string, object?>
{
["sourcePath"] = "/tmp",
},
["name"] = "tmp",
},
},
["environment"] = new[]
{
new Dictionary<string, object?>
{
["name"] = "VARNAME",
["value"] = "VARVAL",
},
},
["mountPoints"] = new[]
{
new Dictionary<string, object?>
{
["sourceVolume"] = "tmp",
["containerPath"] = "/tmp",
["readOnly"] = false,
},
},
["ulimits"] = new[]
{
new Dictionary<string, object?>
{
["hardLimit"] = 1024,
["name"] = "nofile",
["softLimit"] = 1024,
},
},
}),
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.batch.JobDefinition;
import com.pulumi.aws.batch.JobDefinitionArgs;
import static com.pulumi.codegen.internal.Serialization.*;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var test = new JobDefinition("test", JobDefinitionArgs.builder()
.name("my_test_batch_job_definition")
.type("container")
.containerProperties(serializeJson(
jsonObject(
jsonProperty("command", jsonArray(
"ls",
"-la"
)),
jsonProperty("image", "busybox"),
jsonProperty("resourceRequirements", jsonArray(
jsonObject(
jsonProperty("type", "VCPU"),
jsonProperty("value", "0.25")
),
jsonObject(
jsonProperty("type", "MEMORY"),
jsonProperty("value", "512")
)
)),
jsonProperty("volumes", jsonArray(jsonObject(
jsonProperty("host", jsonObject(
jsonProperty("sourcePath", "/tmp")
)),
jsonProperty("name", "tmp")
))),
jsonProperty("environment", jsonArray(jsonObject(
jsonProperty("name", "VARNAME"),
jsonProperty("value", "VARVAL")
))),
jsonProperty("mountPoints", jsonArray(jsonObject(
jsonProperty("sourceVolume", "tmp"),
jsonProperty("containerPath", "/tmp"),
jsonProperty("readOnly", false)
))),
jsonProperty("ulimits", jsonArray(jsonObject(
jsonProperty("hardLimit", 1024),
jsonProperty("name", "nofile"),
jsonProperty("softLimit", 1024)
)))
)))
.build());
}
}
resources:
test:
type: aws:batch:JobDefinition
properties:
name: my_test_batch_job_definition
type: container
containerProperties:
fn::toJSON:
command:
- ls
- -la
image: busybox
resourceRequirements:
- type: VCPU
value: '0.25'
- type: MEMORY
value: '512'
volumes:
- host:
sourcePath: /tmp
name: tmp
environment:
- name: VARNAME
value: VARVAL
mountPoints:
- sourceVolume: tmp
containerPath: /tmp
readOnly: false
ulimits:
- hardLimit: 1024
name: nofile
softLimit: 1024
The type property must be “container” for single-container jobs. The containerProperties JSON document defines the image, command, and resourceRequirements (VCPU and MEMORY). Batch schedules the job on compute environments that can satisfy these resource requirements.
Distribute work across multiple nodes
Tightly coupled parallel workloads like MPI applications coordinate across multiple compute nodes with different roles.
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const test = new aws.batch.JobDefinition("test", {
name: "tf_test_batch_job_definition_multinode",
type: "multinode",
nodeProperties: JSON.stringify({
mainNode: 0,
nodeRangeProperties: [
{
container: {
command: [
"ls",
"-la",
],
image: "busybox",
memory: 128,
vcpus: 1,
},
targetNodes: "0:",
},
{
container: {
command: [
"echo",
"test",
],
image: "busybox",
memory: 128,
vcpus: 1,
},
targetNodes: "1:",
},
],
numNodes: 2,
}),
});
import pulumi
import json
import pulumi_aws as aws
test = aws.batch.JobDefinition("test",
name="tf_test_batch_job_definition_multinode",
type="multinode",
node_properties=json.dumps({
"mainNode": 0,
"nodeRangeProperties": [
{
"container": {
"command": [
"ls",
"-la",
],
"image": "busybox",
"memory": 128,
"vcpus": 1,
},
"targetNodes": "0:",
},
{
"container": {
"command": [
"echo",
"test",
],
"image": "busybox",
"memory": 128,
"vcpus": 1,
},
"targetNodes": "1:",
},
],
"numNodes": 2,
}))
package main
import (
"encoding/json"
"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/batch"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
tmpJSON0, err := json.Marshal(map[string]interface{}{
"mainNode": 0,
"nodeRangeProperties": []map[string]interface{}{
map[string]interface{}{
"container": map[string]interface{}{
"command": []string{
"ls",
"-la",
},
"image": "busybox",
"memory": 128,
"vcpus": 1,
},
"targetNodes": "0:",
},
map[string]interface{}{
"container": map[string]interface{}{
"command": []string{
"echo",
"test",
},
"image": "busybox",
"memory": 128,
"vcpus": 1,
},
"targetNodes": "1:",
},
},
"numNodes": 2,
})
if err != nil {
return err
}
json0 := string(tmpJSON0)
_, err = batch.NewJobDefinition(ctx, "test", &batch.JobDefinitionArgs{
Name: pulumi.String("tf_test_batch_job_definition_multinode"),
Type: pulumi.String("multinode"),
NodeProperties: pulumi.String(json0),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using System.Text.Json;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var test = new Aws.Batch.JobDefinition("test", new()
{
Name = "tf_test_batch_job_definition_multinode",
Type = "multinode",
NodeProperties = JsonSerializer.Serialize(new Dictionary<string, object?>
{
["mainNode"] = 0,
["nodeRangeProperties"] = new[]
{
new Dictionary<string, object?>
{
["container"] = new Dictionary<string, object?>
{
["command"] = new[]
{
"ls",
"-la",
},
["image"] = "busybox",
["memory"] = 128,
["vcpus"] = 1,
},
["targetNodes"] = "0:",
},
new Dictionary<string, object?>
{
["container"] = new Dictionary<string, object?>
{
["command"] = new[]
{
"echo",
"test",
},
["image"] = "busybox",
["memory"] = 128,
["vcpus"] = 1,
},
["targetNodes"] = "1:",
},
},
["numNodes"] = 2,
}),
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.batch.JobDefinition;
import com.pulumi.aws.batch.JobDefinitionArgs;
import static com.pulumi.codegen.internal.Serialization.*;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var test = new JobDefinition("test", JobDefinitionArgs.builder()
.name("tf_test_batch_job_definition_multinode")
.type("multinode")
.nodeProperties(serializeJson(
jsonObject(
jsonProperty("mainNode", 0),
jsonProperty("nodeRangeProperties", jsonArray(
jsonObject(
jsonProperty("container", jsonObject(
jsonProperty("command", jsonArray(
"ls",
"-la"
)),
jsonProperty("image", "busybox"),
jsonProperty("memory", 128),
jsonProperty("vcpus", 1)
)),
jsonProperty("targetNodes", "0:")
),
jsonObject(
jsonProperty("container", jsonObject(
jsonProperty("command", jsonArray(
"echo",
"test"
)),
jsonProperty("image", "busybox"),
jsonProperty("memory", 128),
jsonProperty("vcpus", 1)
)),
jsonProperty("targetNodes", "1:")
)
)),
jsonProperty("numNodes", 2)
)))
.build());
}
}
resources:
test:
type: aws:batch:JobDefinition
properties:
name: tf_test_batch_job_definition_multinode
type: multinode
nodeProperties:
fn::toJSON:
mainNode: 0
nodeRangeProperties:
- container:
command:
- ls
- -la
image: busybox
memory: 128
vcpus: 1
targetNodes: '0:'
- container:
command:
- echo
- test
image: busybox
memory: 128
vcpus: 1
targetNodes: '1:'
numNodes: 2
The type property becomes “multinode” for distributed jobs. The nodeProperties JSON document defines mainNode (the coordinator), numNodes (total count), and nodeRangeProperties (different configurations for different node groups). The targetNodes syntax “0:” means “node 0 and beyond” while “1:” means “node 1 and beyond”.
Run jobs on EKS clusters with pod configuration
Teams with existing Kubernetes infrastructure can run Batch jobs as EKS pods, leveraging Kubernetes-native resource management.
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const test = new aws.batch.JobDefinition("test", {
name: " tf_test_batch_job_definition_eks",
type: "container",
eksProperties: {
podProperties: {
hostNetwork: true,
containers: [{
image: "public.ecr.aws/amazonlinux/amazonlinux:1",
commands: [
"sleep",
"60",
],
resources: {
limits: {
cpu: "1",
memory: "1024Mi",
},
},
}],
metadata: {
labels: {
environment: "test",
},
},
},
},
});
import pulumi
import pulumi_aws as aws
test = aws.batch.JobDefinition("test",
name=" tf_test_batch_job_definition_eks",
type="container",
eks_properties={
"pod_properties": {
"host_network": True,
"containers": [{
"image": "public.ecr.aws/amazonlinux/amazonlinux:1",
"commands": [
"sleep",
"60",
],
"resources": {
"limits": {
"cpu": "1",
"memory": "1024Mi",
},
},
}],
"metadata": {
"labels": {
"environment": "test",
},
},
},
})
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/batch"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := batch.NewJobDefinition(ctx, "test", &batch.JobDefinitionArgs{
Name: pulumi.String(" tf_test_batch_job_definition_eks"),
Type: pulumi.String("container"),
EksProperties: &batch.JobDefinitionEksPropertiesArgs{
PodProperties: &batch.JobDefinitionEksPropertiesPodPropertiesArgs{
HostNetwork: pulumi.Bool(true),
Containers: batch.JobDefinitionEksPropertiesPodPropertiesContainerArray{
&batch.JobDefinitionEksPropertiesPodPropertiesContainerArgs{
Image: pulumi.String("public.ecr.aws/amazonlinux/amazonlinux:1"),
Commands: pulumi.StringArray{
pulumi.String("sleep"),
pulumi.String("60"),
},
Resources: &batch.JobDefinitionEksPropertiesPodPropertiesContainerResourcesArgs{
Limits: pulumi.StringMap{
"cpu": pulumi.String("1"),
"memory": pulumi.String("1024Mi"),
},
},
},
},
Metadata: &batch.JobDefinitionEksPropertiesPodPropertiesMetadataArgs{
Labels: pulumi.StringMap{
"environment": pulumi.String("test"),
},
},
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var test = new Aws.Batch.JobDefinition("test", new()
{
Name = " tf_test_batch_job_definition_eks",
Type = "container",
EksProperties = new Aws.Batch.Inputs.JobDefinitionEksPropertiesArgs
{
PodProperties = new Aws.Batch.Inputs.JobDefinitionEksPropertiesPodPropertiesArgs
{
HostNetwork = true,
Containers = new[]
{
new Aws.Batch.Inputs.JobDefinitionEksPropertiesPodPropertiesContainerArgs
{
Image = "public.ecr.aws/amazonlinux/amazonlinux:1",
Commands = new[]
{
"sleep",
"60",
},
Resources = new Aws.Batch.Inputs.JobDefinitionEksPropertiesPodPropertiesContainerResourcesArgs
{
Limits =
{
{ "cpu", "1" },
{ "memory", "1024Mi" },
},
},
},
},
Metadata = new Aws.Batch.Inputs.JobDefinitionEksPropertiesPodPropertiesMetadataArgs
{
Labels =
{
{ "environment", "test" },
},
},
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.batch.JobDefinition;
import com.pulumi.aws.batch.JobDefinitionArgs;
import com.pulumi.aws.batch.inputs.JobDefinitionEksPropertiesArgs;
import com.pulumi.aws.batch.inputs.JobDefinitionEksPropertiesPodPropertiesArgs;
import com.pulumi.aws.batch.inputs.JobDefinitionEksPropertiesPodPropertiesMetadataArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var test = new JobDefinition("test", JobDefinitionArgs.builder()
.name(" tf_test_batch_job_definition_eks")
.type("container")
.eksProperties(JobDefinitionEksPropertiesArgs.builder()
.podProperties(JobDefinitionEksPropertiesPodPropertiesArgs.builder()
.hostNetwork(true)
.containers(JobDefinitionEksPropertiesPodPropertiesContainerArgs.builder()
.image("public.ecr.aws/amazonlinux/amazonlinux:1")
.commands(
"sleep",
"60")
.resources(JobDefinitionEksPropertiesPodPropertiesContainerResourcesArgs.builder()
.limits(Map.ofEntries(
Map.entry("cpu", "1"),
Map.entry("memory", "1024Mi")
))
.build())
.build())
.metadata(JobDefinitionEksPropertiesPodPropertiesMetadataArgs.builder()
.labels(Map.of("environment", "test"))
.build())
.build())
.build())
.build());
}
}
resources:
test:
type: aws:batch:JobDefinition
properties:
name: ' tf_test_batch_job_definition_eks'
type: container
eksProperties:
podProperties:
hostNetwork: true
containers:
- image: public.ecr.aws/amazonlinux/amazonlinux:1
commands:
- sleep
- '60'
resources:
limits:
cpu: '1'
memory: 1024Mi
metadata:
labels:
environment: test
The eksProperties property replaces containerProperties when running on EKS. The podProperties define Kubernetes pod specifications including containers, resource limits (using Kubernetes syntax like “1024Mi”), and networking options like hostNetwork.
Run serverless containers on Fargate
Fargate eliminates the need to manage EC2 instances by running containers as serverless tasks.
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const assumeRolePolicy = aws.iam.getPolicyDocument({
statements: [{
actions: ["sts:AssumeRole"],
principals: [{
type: "Service",
identifiers: ["ecs-tasks.amazonaws.com"],
}],
}],
});
const ecsTaskExecutionRole = new aws.iam.Role("ecs_task_execution_role", {
name: "my_test_batch_exec_role",
assumeRolePolicy: assumeRolePolicy.then(assumeRolePolicy => assumeRolePolicy.json),
});
const ecsTaskExecutionRolePolicy = new aws.iam.RolePolicyAttachment("ecs_task_execution_role_policy", {
role: ecsTaskExecutionRole.name,
policyArn: "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy",
});
const test = new aws.batch.JobDefinition("test", {
name: "my_test_batch_job_definition",
type: "container",
platformCapabilities: ["FARGATE"],
containerProperties: pulumi.jsonStringify({
command: [
"echo",
"test",
],
image: "busybox",
jobRoleArn: "arn:aws:iam::123456789012:role/AWSBatchS3ReadOnly",
fargatePlatformConfiguration: {
platformVersion: "LATEST",
},
resourceRequirements: [
{
type: "VCPU",
value: "0.25",
},
{
type: "MEMORY",
value: "512",
},
],
executionRoleArn: ecsTaskExecutionRole.arn,
}),
});
import pulumi
import json
import pulumi_aws as aws
assume_role_policy = aws.iam.get_policy_document(statements=[{
"actions": ["sts:AssumeRole"],
"principals": [{
"type": "Service",
"identifiers": ["ecs-tasks.amazonaws.com"],
}],
}])
ecs_task_execution_role = aws.iam.Role("ecs_task_execution_role",
name="my_test_batch_exec_role",
assume_role_policy=assume_role_policy.json)
ecs_task_execution_role_policy = aws.iam.RolePolicyAttachment("ecs_task_execution_role_policy",
role=ecs_task_execution_role.name,
policy_arn="arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy")
test = aws.batch.JobDefinition("test",
name="my_test_batch_job_definition",
type="container",
platform_capabilities=["FARGATE"],
container_properties=pulumi.Output.json_dumps({
"command": [
"echo",
"test",
],
"image": "busybox",
"jobRoleArn": "arn:aws:iam::123456789012:role/AWSBatchS3ReadOnly",
"fargatePlatformConfiguration": {
"platformVersion": "LATEST",
},
"resourceRequirements": [
{
"type": "VCPU",
"value": "0.25",
},
{
"type": "MEMORY",
"value": "512",
},
],
"executionRoleArn": ecs_task_execution_role.arn,
}))
package main
import (
"encoding/json"
"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/batch"
"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/iam"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
assumeRolePolicy, err := iam.GetPolicyDocument(ctx, &iam.GetPolicyDocumentArgs{
Statements: []iam.GetPolicyDocumentStatement{
{
Actions: []string{
"sts:AssumeRole",
},
Principals: []iam.GetPolicyDocumentStatementPrincipal{
{
Type: "Service",
Identifiers: []string{
"ecs-tasks.amazonaws.com",
},
},
},
},
},
}, nil)
if err != nil {
return err
}
ecsTaskExecutionRole, err := iam.NewRole(ctx, "ecs_task_execution_role", &iam.RoleArgs{
Name: pulumi.String("my_test_batch_exec_role"),
AssumeRolePolicy: pulumi.String(assumeRolePolicy.Json),
})
if err != nil {
return err
}
_, err = iam.NewRolePolicyAttachment(ctx, "ecs_task_execution_role_policy", &iam.RolePolicyAttachmentArgs{
Role: ecsTaskExecutionRole.Name,
PolicyArn: pulumi.String("arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy"),
})
if err != nil {
return err
}
_, err = batch.NewJobDefinition(ctx, "test", &batch.JobDefinitionArgs{
Name: pulumi.String("my_test_batch_job_definition"),
Type: pulumi.String("container"),
PlatformCapabilities: pulumi.StringArray{
pulumi.String("FARGATE"),
},
ContainerProperties: ecsTaskExecutionRole.Arn.ApplyT(func(arn string) (pulumi.String, error) {
var _zero pulumi.String
tmpJSON0, err := json.Marshal(map[string]interface{}{
"command": []string{
"echo",
"test",
},
"image": "busybox",
"jobRoleArn": "arn:aws:iam::123456789012:role/AWSBatchS3ReadOnly",
"fargatePlatformConfiguration": map[string]interface{}{
"platformVersion": "LATEST",
},
"resourceRequirements": []map[string]interface{}{
map[string]interface{}{
"type": "VCPU",
"value": "0.25",
},
map[string]interface{}{
"type": "MEMORY",
"value": "512",
},
},
"executionRoleArn": arn,
})
if err != nil {
return _zero, err
}
json0 := string(tmpJSON0)
return pulumi.String(json0), nil
}).(pulumi.StringOutput),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using System.Text.Json;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var assumeRolePolicy = Aws.Iam.GetPolicyDocument.Invoke(new()
{
Statements = new[]
{
new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs
{
Actions = new[]
{
"sts:AssumeRole",
},
Principals = new[]
{
new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs
{
Type = "Service",
Identifiers = new[]
{
"ecs-tasks.amazonaws.com",
},
},
},
},
},
});
var ecsTaskExecutionRole = new Aws.Iam.Role("ecs_task_execution_role", new()
{
Name = "my_test_batch_exec_role",
AssumeRolePolicy = assumeRolePolicy.Apply(getPolicyDocumentResult => getPolicyDocumentResult.Json),
});
var ecsTaskExecutionRolePolicy = new Aws.Iam.RolePolicyAttachment("ecs_task_execution_role_policy", new()
{
Role = ecsTaskExecutionRole.Name,
PolicyArn = "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy",
});
var test = new Aws.Batch.JobDefinition("test", new()
{
Name = "my_test_batch_job_definition",
Type = "container",
PlatformCapabilities = new[]
{
"FARGATE",
},
ContainerProperties = Output.JsonSerialize(Output.Create(new Dictionary<string, object?>
{
["command"] = new[]
{
"echo",
"test",
},
["image"] = "busybox",
["jobRoleArn"] = "arn:aws:iam::123456789012:role/AWSBatchS3ReadOnly",
["fargatePlatformConfiguration"] = new Dictionary<string, object?>
{
["platformVersion"] = "LATEST",
},
["resourceRequirements"] = new[]
{
new Dictionary<string, object?>
{
["type"] = "VCPU",
["value"] = "0.25",
},
new Dictionary<string, object?>
{
["type"] = "MEMORY",
["value"] = "512",
},
},
["executionRoleArn"] = ecsTaskExecutionRole.Arn,
})),
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.iam.IamFunctions;
import com.pulumi.aws.iam.inputs.GetPolicyDocumentArgs;
import com.pulumi.aws.iam.Role;
import com.pulumi.aws.iam.RoleArgs;
import com.pulumi.aws.iam.RolePolicyAttachment;
import com.pulumi.aws.iam.RolePolicyAttachmentArgs;
import com.pulumi.aws.batch.JobDefinition;
import com.pulumi.aws.batch.JobDefinitionArgs;
import static com.pulumi.codegen.internal.Serialization.*;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var assumeRolePolicy = IamFunctions.getPolicyDocument(GetPolicyDocumentArgs.builder()
.statements(GetPolicyDocumentStatementArgs.builder()
.actions("sts:AssumeRole")
.principals(GetPolicyDocumentStatementPrincipalArgs.builder()
.type("Service")
.identifiers("ecs-tasks.amazonaws.com")
.build())
.build())
.build());
var ecsTaskExecutionRole = new Role("ecsTaskExecutionRole", RoleArgs.builder()
.name("my_test_batch_exec_role")
.assumeRolePolicy(assumeRolePolicy.json())
.build());
var ecsTaskExecutionRolePolicy = new RolePolicyAttachment("ecsTaskExecutionRolePolicy", RolePolicyAttachmentArgs.builder()
.role(ecsTaskExecutionRole.name())
.policyArn("arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy")
.build());
var test = new JobDefinition("test", JobDefinitionArgs.builder()
.name("my_test_batch_job_definition")
.type("container")
.platformCapabilities("FARGATE")
.containerProperties(ecsTaskExecutionRole.arn().applyValue(_arn -> serializeJson(
jsonObject(
jsonProperty("command", jsonArray(
"echo",
"test"
)),
jsonProperty("image", "busybox"),
jsonProperty("jobRoleArn", "arn:aws:iam::123456789012:role/AWSBatchS3ReadOnly"),
jsonProperty("fargatePlatformConfiguration", jsonObject(
jsonProperty("platformVersion", "LATEST")
)),
jsonProperty("resourceRequirements", jsonArray(
jsonObject(
jsonProperty("type", "VCPU"),
jsonProperty("value", "0.25")
),
jsonObject(
jsonProperty("type", "MEMORY"),
jsonProperty("value", "512")
)
)),
jsonProperty("executionRoleArn", _arn)
))))
.build());
}
}
resources:
ecsTaskExecutionRole:
type: aws:iam:Role
name: ecs_task_execution_role
properties:
name: my_test_batch_exec_role
assumeRolePolicy: ${assumeRolePolicy.json}
ecsTaskExecutionRolePolicy:
type: aws:iam:RolePolicyAttachment
name: ecs_task_execution_role_policy
properties:
role: ${ecsTaskExecutionRole.name}
policyArn: arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy
test:
type: aws:batch:JobDefinition
properties:
name: my_test_batch_job_definition
type: container
platformCapabilities:
- FARGATE
containerProperties:
fn::toJSON:
command:
- echo
- test
image: busybox
jobRoleArn: arn:aws:iam::123456789012:role/AWSBatchS3ReadOnly
fargatePlatformConfiguration:
platformVersion: LATEST
resourceRequirements:
- type: VCPU
value: '0.25'
- type: MEMORY
value: '512'
executionRoleArn: ${ecsTaskExecutionRole.arn}
variables:
assumeRolePolicy:
fn::invoke:
function: aws:iam:getPolicyDocument
arguments:
statements:
- actions:
- sts:AssumeRole
principals:
- type: Service
identifiers:
- ecs-tasks.amazonaws.com
The platformCapabilities property set to [“FARGATE”] signals serverless execution. Fargate requires an executionRoleArn for pulling images and writing logs, plus a fargatePlatformConfiguration to specify the platform version. Resource requirements must use Fargate-compatible values (0.25, 0.5, 1, 2, 4 vCPU; 512MB to 30GB memory).
Orchestrate multi-container tasks with dependencies
Complex workflows often require multiple containers that start in a specific order, with some containers depending on others.
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const test = new aws.batch.JobDefinition("test", {
name: "my_test_batch_job_definition",
type: "container",
platformCapabilities: ["FARGATE"],
ecsProperties: JSON.stringify({
taskProperties: [{
executionRoleArn: ecsTaskExecutionRole.arn,
containers: [
{
image: "public.ecr.aws/amazonlinux/amazonlinux:1",
command: [
"sleep",
"60",
],
dependsOn: [{
containerName: "container_b",
condition: "COMPLETE",
}],
secrets: [{
name: "TEST",
valueFrom: "DUMMY",
}],
environment: [{
name: "test",
value: "Environment Variable",
}],
essential: true,
logConfiguration: {
logDriver: "awslogs",
options: {
"awslogs-group": "tf_test_batch_job",
"awslogs-region": "us-west-2",
"awslogs-stream-prefix": "ecs",
},
},
name: "container_a",
privileged: false,
readonlyRootFilesystem: false,
resourceRequirements: [
{
value: "1.0",
type: "VCPU",
},
{
value: "2048",
type: "MEMORY",
},
],
},
{
image: "public.ecr.aws/amazonlinux/amazonlinux:1",
command: [
"sleep",
"360",
],
name: "container_b",
essential: false,
resourceRequirements: [
{
value: "1.0",
type: "VCPU",
},
{
value: "2048",
type: "MEMORY",
},
],
},
],
}],
}),
});
import pulumi
import json
import pulumi_aws as aws
test = aws.batch.JobDefinition("test",
name="my_test_batch_job_definition",
type="container",
platform_capabilities=["FARGATE"],
ecs_properties=json.dumps({
"taskProperties": [{
"executionRoleArn": ecs_task_execution_role["arn"],
"containers": [
{
"image": "public.ecr.aws/amazonlinux/amazonlinux:1",
"command": [
"sleep",
"60",
],
"dependsOn": [{
"containerName": "container_b",
"condition": "COMPLETE",
}],
"secrets": [{
"name": "TEST",
"valueFrom": "DUMMY",
}],
"environment": [{
"name": "test",
"value": "Environment Variable",
}],
"essential": True,
"logConfiguration": {
"logDriver": "awslogs",
"options": {
"awslogs-group": "tf_test_batch_job",
"awslogs-region": "us-west-2",
"awslogs-stream-prefix": "ecs",
},
},
"name": "container_a",
"privileged": False,
"readonlyRootFilesystem": False,
"resourceRequirements": [
{
"value": "1.0",
"type": "VCPU",
},
{
"value": "2048",
"type": "MEMORY",
},
],
},
{
"image": "public.ecr.aws/amazonlinux/amazonlinux:1",
"command": [
"sleep",
"360",
],
"name": "container_b",
"essential": False,
"resourceRequirements": [
{
"value": "1.0",
"type": "VCPU",
},
{
"value": "2048",
"type": "MEMORY",
},
],
},
],
}],
}))
package main
import (
"encoding/json"
"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/batch"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
tmpJSON0, err := json.Marshal(map[string]interface{}{
"taskProperties": []map[string]interface{}{
map[string]interface{}{
"executionRoleArn": ecsTaskExecutionRole.Arn,
"containers": []interface{}{
map[string]interface{}{
"image": "public.ecr.aws/amazonlinux/amazonlinux:1",
"command": []string{
"sleep",
"60",
},
"dependsOn": []map[string]interface{}{
map[string]interface{}{
"containerName": "container_b",
"condition": "COMPLETE",
},
},
"secrets": []map[string]interface{}{
map[string]interface{}{
"name": "TEST",
"valueFrom": "DUMMY",
},
},
"environment": []map[string]interface{}{
map[string]interface{}{
"name": "test",
"value": "Environment Variable",
},
},
"essential": true,
"logConfiguration": map[string]interface{}{
"logDriver": "awslogs",
"options": map[string]interface{}{
"awslogs-group": "tf_test_batch_job",
"awslogs-region": "us-west-2",
"awslogs-stream-prefix": "ecs",
},
},
"name": "container_a",
"privileged": false,
"readonlyRootFilesystem": false,
"resourceRequirements": []map[string]interface{}{
map[string]interface{}{
"value": "1.0",
"type": "VCPU",
},
map[string]interface{}{
"value": "2048",
"type": "MEMORY",
},
},
},
map[string]interface{}{
"image": "public.ecr.aws/amazonlinux/amazonlinux:1",
"command": []string{
"sleep",
"360",
},
"name": "container_b",
"essential": false,
"resourceRequirements": []map[string]interface{}{
map[string]interface{}{
"value": "1.0",
"type": "VCPU",
},
map[string]interface{}{
"value": "2048",
"type": "MEMORY",
},
},
},
},
},
},
})
if err != nil {
return err
}
json0 := string(tmpJSON0)
_, err = batch.NewJobDefinition(ctx, "test", &batch.JobDefinitionArgs{
Name: pulumi.String("my_test_batch_job_definition"),
Type: pulumi.String("container"),
PlatformCapabilities: pulumi.StringArray{
pulumi.String("FARGATE"),
},
EcsProperties: pulumi.String(json0),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using System.Text.Json;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var test = new Aws.Batch.JobDefinition("test", new()
{
Name = "my_test_batch_job_definition",
Type = "container",
PlatformCapabilities = new[]
{
"FARGATE",
},
EcsProperties = JsonSerializer.Serialize(new Dictionary<string, object?>
{
["taskProperties"] = new[]
{
new Dictionary<string, object?>
{
["executionRoleArn"] = ecsTaskExecutionRole.Arn,
["containers"] = new[]
{
new Dictionary<string, object?>
{
["image"] = "public.ecr.aws/amazonlinux/amazonlinux:1",
["command"] = new[]
{
"sleep",
"60",
},
["dependsOn"] = new[]
{
new Dictionary<string, object?>
{
["containerName"] = "container_b",
["condition"] = "COMPLETE",
},
},
["secrets"] = new[]
{
new Dictionary<string, object?>
{
["name"] = "TEST",
["valueFrom"] = "DUMMY",
},
},
["environment"] = new[]
{
new Dictionary<string, object?>
{
["name"] = "test",
["value"] = "Environment Variable",
},
},
["essential"] = true,
["logConfiguration"] = new Dictionary<string, object?>
{
["logDriver"] = "awslogs",
["options"] = new Dictionary<string, object?>
{
["awslogs-group"] = "tf_test_batch_job",
["awslogs-region"] = "us-west-2",
["awslogs-stream-prefix"] = "ecs",
},
},
["name"] = "container_a",
["privileged"] = false,
["readonlyRootFilesystem"] = false,
["resourceRequirements"] = new[]
{
new Dictionary<string, object?>
{
["value"] = "1.0",
["type"] = "VCPU",
},
new Dictionary<string, object?>
{
["value"] = "2048",
["type"] = "MEMORY",
},
},
},
new Dictionary<string, object?>
{
["image"] = "public.ecr.aws/amazonlinux/amazonlinux:1",
["command"] = new[]
{
"sleep",
"360",
},
["name"] = "container_b",
["essential"] = false,
["resourceRequirements"] = new[]
{
new Dictionary<string, object?>
{
["value"] = "1.0",
["type"] = "VCPU",
},
new Dictionary<string, object?>
{
["value"] = "2048",
["type"] = "MEMORY",
},
},
},
},
},
},
}),
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.batch.JobDefinition;
import com.pulumi.aws.batch.JobDefinitionArgs;
import static com.pulumi.codegen.internal.Serialization.*;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var test = new JobDefinition("test", JobDefinitionArgs.builder()
.name("my_test_batch_job_definition")
.type("container")
.platformCapabilities("FARGATE")
.ecsProperties(serializeJson(
jsonObject(
jsonProperty("taskProperties", jsonArray(jsonObject(
jsonProperty("executionRoleArn", ecsTaskExecutionRole.arn()),
jsonProperty("containers", jsonArray(
jsonObject(
jsonProperty("image", "public.ecr.aws/amazonlinux/amazonlinux:1"),
jsonProperty("command", jsonArray(
"sleep",
"60"
)),
jsonProperty("dependsOn", jsonArray(jsonObject(
jsonProperty("containerName", "container_b"),
jsonProperty("condition", "COMPLETE")
))),
jsonProperty("secrets", jsonArray(jsonObject(
jsonProperty("name", "TEST"),
jsonProperty("valueFrom", "DUMMY")
))),
jsonProperty("environment", jsonArray(jsonObject(
jsonProperty("name", "test"),
jsonProperty("value", "Environment Variable")
))),
jsonProperty("essential", true),
jsonProperty("logConfiguration", jsonObject(
jsonProperty("logDriver", "awslogs"),
jsonProperty("options", jsonObject(
jsonProperty("awslogs-group", "tf_test_batch_job"),
jsonProperty("awslogs-region", "us-west-2"),
jsonProperty("awslogs-stream-prefix", "ecs")
))
)),
jsonProperty("name", "container_a"),
jsonProperty("privileged", false),
jsonProperty("readonlyRootFilesystem", false),
jsonProperty("resourceRequirements", jsonArray(
jsonObject(
jsonProperty("value", "1.0"),
jsonProperty("type", "VCPU")
),
jsonObject(
jsonProperty("value", "2048"),
jsonProperty("type", "MEMORY")
)
))
),
jsonObject(
jsonProperty("image", "public.ecr.aws/amazonlinux/amazonlinux:1"),
jsonProperty("command", jsonArray(
"sleep",
"360"
)),
jsonProperty("name", "container_b"),
jsonProperty("essential", false),
jsonProperty("resourceRequirements", jsonArray(
jsonObject(
jsonProperty("value", "1.0"),
jsonProperty("type", "VCPU")
),
jsonObject(
jsonProperty("value", "2048"),
jsonProperty("type", "MEMORY")
)
))
)
))
)))
)))
.build());
}
}
resources:
test:
type: aws:batch:JobDefinition
properties:
name: my_test_batch_job_definition
type: container
platformCapabilities:
- FARGATE
ecsProperties:
fn::toJSON:
taskProperties:
- executionRoleArn: ${ecsTaskExecutionRole.arn}
containers:
- image: public.ecr.aws/amazonlinux/amazonlinux:1
command:
- sleep
- '60'
dependsOn:
- containerName: container_b
condition: COMPLETE
secrets:
- name: TEST
valueFrom: DUMMY
environment:
- name: test
value: Environment Variable
essential: true
logConfiguration:
logDriver: awslogs
options:
awslogs-group: tf_test_batch_job
awslogs-region: us-west-2
awslogs-stream-prefix: ecs
name: container_a
privileged: false
readonlyRootFilesystem: false
resourceRequirements:
- value: '1.0'
type: VCPU
- value: '2048'
type: MEMORY
- image: public.ecr.aws/amazonlinux/amazonlinux:1
command:
- sleep
- '360'
name: container_b
essential: false
resourceRequirements:
- value: '1.0'
type: VCPU
- value: '2048'
type: MEMORY
The ecsProperties property enables multi-container orchestration. The taskProperties array defines containers with dependsOn relationships (container_a waits for container_b to COMPLETE). The essential property controls whether container failure causes the entire task to fail. Each container gets its own resource requirements and logging configuration.
Beyond these examples
These snippets focus on specific job definition features: container and multinode job types, EKS and Fargate platform capabilities, and multi-container orchestration with dependencies. They’re intentionally minimal rather than full batch processing pipelines.
The examples may reference pre-existing infrastructure such as IAM execution roles and job roles, EKS clusters (for EKS jobs), CloudWatch log groups, and Docker images in accessible registries. They focus on configuring the job definition rather than provisioning the surrounding Batch infrastructure.
To keep things focused, common job definition patterns are omitted, including:
- Retry strategies for failed jobs (retryStrategy)
- Job timeouts and termination (timeout)
- Fair share scheduling priority (schedulingPriority)
- Parameter substitution (parameters)
- Tag propagation to ECS tasks (propagateTags)
These omissions are intentional: the goal is to illustrate how each job definition feature is wired, not provide drop-in batch processing modules. See the Batch Job Definition resource reference for all available configuration options.
Let's create AWS Batch Job Definitions
Get started with Pulumi Cloud, then follow our quick setup guide to deploy this infrastructure.
Try Pulumi Cloud for FREEFrequently Asked Questions
Job Definition Types & Configuration
container type jobs. Use containerProperties for standard container configurations, ecsProperties for ECS-specific features like multiple containers with dependencies, or eksProperties for running jobs on EKS clusters.multinode for multi-node parallel jobs that coordinate across nodes (requires nodeProperties). Use container for single-container jobs or ECS/EKS-based workloads (requires one of containerProperties, ecsProperties, or eksProperties).containerProperties, ecsProperties, and eksProperties are only valid when type is container. If you’re using type: "multinode", you must use nodeProperties instead.name property is immutable. Changing the name requires creating a new job definition.Platform & Compute
platformCapabilities to ["FARGATE"] and include executionRoleArn in your container properties. The platform defaults to EC2 if not specified.Updates & Revisions
deregisterOnNewRevision: true), the previous revision is deregistered and marked INACTIVE. Set deregisterOnNewRevision: false to keep old revisions ACTIVE.arn includes the revision number (e.g., :#), while arnPrefix is the ARN without the revision. Use arnPrefix when you need a stable reference across revisions.Scheduling & Limits
retryStrategy per job definition.timeout per job definition. AWS Batch terminates jobs that exceed this duration.