The aws:appautoscaling/policy:Policy resource, part of the Pulumi AWS provider, defines Application Auto Scaling policies that adjust capacity for AWS services based on metrics, thresholds, or forecasts. This guide focuses on three capabilities: target tracking policies with predefined and custom metrics, step scaling for threshold-based capacity changes, and predictive scaling for forecasted demand patterns.
Policies depend on appautoscaling.Target resources that define the scalable dimension and capacity bounds. Step scaling also requires CloudWatch alarms to trigger adjustments. The examples are intentionally small. Combine them with your own targets, alarms, and service infrastructure.
Scale DynamoDB read capacity with target tracking
DynamoDB tables often need to scale read capacity automatically as query patterns change throughout the day. Target tracking policies adjust capacity to maintain a specific utilization percentage.
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const dynamodbTableReadTarget = new aws.appautoscaling.Target("dynamodb_table_read_target", {
maxCapacity: 100,
minCapacity: 5,
resourceId: "table/tableName",
scalableDimension: "dynamodb:table:ReadCapacityUnits",
serviceNamespace: "dynamodb",
});
const dynamodbTableReadPolicy = new aws.appautoscaling.Policy("dynamodb_table_read_policy", {
name: pulumi.interpolate`DynamoDBReadCapacityUtilization:${dynamodbTableReadTarget.resourceId}`,
policyType: "TargetTrackingScaling",
resourceId: dynamodbTableReadTarget.resourceId,
scalableDimension: dynamodbTableReadTarget.scalableDimension,
serviceNamespace: dynamodbTableReadTarget.serviceNamespace,
targetTrackingScalingPolicyConfiguration: {
predefinedMetricSpecification: {
predefinedMetricType: "DynamoDBReadCapacityUtilization",
},
targetValue: 70,
},
});
import pulumi
import pulumi_aws as aws
dynamodb_table_read_target = aws.appautoscaling.Target("dynamodb_table_read_target",
max_capacity=100,
min_capacity=5,
resource_id="table/tableName",
scalable_dimension="dynamodb:table:ReadCapacityUnits",
service_namespace="dynamodb")
dynamodb_table_read_policy = aws.appautoscaling.Policy("dynamodb_table_read_policy",
name=dynamodb_table_read_target.resource_id.apply(lambda resource_id: f"DynamoDBReadCapacityUtilization:{resource_id}"),
policy_type="TargetTrackingScaling",
resource_id=dynamodb_table_read_target.resource_id,
scalable_dimension=dynamodb_table_read_target.scalable_dimension,
service_namespace=dynamodb_table_read_target.service_namespace,
target_tracking_scaling_policy_configuration={
"predefined_metric_specification": {
"predefined_metric_type": "DynamoDBReadCapacityUtilization",
},
"target_value": 70,
})
package main
import (
"fmt"
"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/appautoscaling"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
dynamodbTableReadTarget, err := appautoscaling.NewTarget(ctx, "dynamodb_table_read_target", &appautoscaling.TargetArgs{
MaxCapacity: pulumi.Int(100),
MinCapacity: pulumi.Int(5),
ResourceId: pulumi.String("table/tableName"),
ScalableDimension: pulumi.String("dynamodb:table:ReadCapacityUnits"),
ServiceNamespace: pulumi.String("dynamodb"),
})
if err != nil {
return err
}
_, err = appautoscaling.NewPolicy(ctx, "dynamodb_table_read_policy", &appautoscaling.PolicyArgs{
Name: dynamodbTableReadTarget.ResourceId.ApplyT(func(resourceId string) (string, error) {
return fmt.Sprintf("DynamoDBReadCapacityUtilization:%v", resourceId), nil
}).(pulumi.StringOutput),
PolicyType: pulumi.String("TargetTrackingScaling"),
ResourceId: dynamodbTableReadTarget.ResourceId,
ScalableDimension: dynamodbTableReadTarget.ScalableDimension,
ServiceNamespace: dynamodbTableReadTarget.ServiceNamespace,
TargetTrackingScalingPolicyConfiguration: &appautoscaling.PolicyTargetTrackingScalingPolicyConfigurationArgs{
PredefinedMetricSpecification: &appautoscaling.PolicyTargetTrackingScalingPolicyConfigurationPredefinedMetricSpecificationArgs{
PredefinedMetricType: pulumi.String("DynamoDBReadCapacityUtilization"),
},
TargetValue: pulumi.Float64(70),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var dynamodbTableReadTarget = new Aws.AppAutoScaling.Target("dynamodb_table_read_target", new()
{
MaxCapacity = 100,
MinCapacity = 5,
ResourceId = "table/tableName",
ScalableDimension = "dynamodb:table:ReadCapacityUnits",
ServiceNamespace = "dynamodb",
});
var dynamodbTableReadPolicy = new Aws.AppAutoScaling.Policy("dynamodb_table_read_policy", new()
{
Name = dynamodbTableReadTarget.ResourceId.Apply(resourceId => $"DynamoDBReadCapacityUtilization:{resourceId}"),
PolicyType = "TargetTrackingScaling",
ResourceId = dynamodbTableReadTarget.ResourceId,
ScalableDimension = dynamodbTableReadTarget.ScalableDimension,
ServiceNamespace = dynamodbTableReadTarget.ServiceNamespace,
TargetTrackingScalingPolicyConfiguration = new Aws.AppAutoScaling.Inputs.PolicyTargetTrackingScalingPolicyConfigurationArgs
{
PredefinedMetricSpecification = new Aws.AppAutoScaling.Inputs.PolicyTargetTrackingScalingPolicyConfigurationPredefinedMetricSpecificationArgs
{
PredefinedMetricType = "DynamoDBReadCapacityUtilization",
},
TargetValue = 70,
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.appautoscaling.Target;
import com.pulumi.aws.appautoscaling.TargetArgs;
import com.pulumi.aws.appautoscaling.Policy;
import com.pulumi.aws.appautoscaling.PolicyArgs;
import com.pulumi.aws.appautoscaling.inputs.PolicyTargetTrackingScalingPolicyConfigurationArgs;
import com.pulumi.aws.appautoscaling.inputs.PolicyTargetTrackingScalingPolicyConfigurationPredefinedMetricSpecificationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var dynamodbTableReadTarget = new Target("dynamodbTableReadTarget", TargetArgs.builder()
.maxCapacity(100)
.minCapacity(5)
.resourceId("table/tableName")
.scalableDimension("dynamodb:table:ReadCapacityUnits")
.serviceNamespace("dynamodb")
.build());
var dynamodbTableReadPolicy = new Policy("dynamodbTableReadPolicy", PolicyArgs.builder()
.name(dynamodbTableReadTarget.resourceId().applyValue(_resourceId -> String.format("DynamoDBReadCapacityUtilization:%s", _resourceId)))
.policyType("TargetTrackingScaling")
.resourceId(dynamodbTableReadTarget.resourceId())
.scalableDimension(dynamodbTableReadTarget.scalableDimension())
.serviceNamespace(dynamodbTableReadTarget.serviceNamespace())
.targetTrackingScalingPolicyConfiguration(PolicyTargetTrackingScalingPolicyConfigurationArgs.builder()
.predefinedMetricSpecification(PolicyTargetTrackingScalingPolicyConfigurationPredefinedMetricSpecificationArgs.builder()
.predefinedMetricType("DynamoDBReadCapacityUtilization")
.build())
.targetValue(70.0)
.build())
.build());
}
}
resources:
dynamodbTableReadTarget:
type: aws:appautoscaling:Target
name: dynamodb_table_read_target
properties:
maxCapacity: 100
minCapacity: 5
resourceId: table/tableName
scalableDimension: dynamodb:table:ReadCapacityUnits
serviceNamespace: dynamodb
dynamodbTableReadPolicy:
type: aws:appautoscaling:Policy
name: dynamodb_table_read_policy
properties:
name: DynamoDBReadCapacityUtilization:${dynamodbTableReadTarget.resourceId}
policyType: TargetTrackingScaling
resourceId: ${dynamodbTableReadTarget.resourceId}
scalableDimension: ${dynamodbTableReadTarget.scalableDimension}
serviceNamespace: ${dynamodbTableReadTarget.serviceNamespace}
targetTrackingScalingPolicyConfiguration:
predefinedMetricSpecification:
predefinedMetricType: DynamoDBReadCapacityUtilization
targetValue: 70
The policy monitors DynamoDB read capacity utilization and adds or removes capacity units to maintain the targetValue of 70%. The predefinedMetricSpecification uses AWS-managed metrics; the policy automatically creates CloudWatch alarms to track utilization. The policyType of “TargetTrackingScaling” tells Application Auto Scaling to adjust capacity continuously rather than in discrete steps.
Scale down ECS tasks with step scaling
ECS services running batch workloads or background jobs often need to scale down quickly when demand drops. Step scaling policies trigger immediate capacity changes based on CloudWatch alarm thresholds.
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const ecsTarget = new aws.appautoscaling.Target("ecs_target", {
maxCapacity: 4,
minCapacity: 1,
resourceId: "service/clusterName/serviceName",
scalableDimension: "ecs:service:DesiredCount",
serviceNamespace: "ecs",
});
const ecsPolicy = new aws.appautoscaling.Policy("ecs_policy", {
name: "scale-down",
policyType: "StepScaling",
resourceId: ecsTarget.resourceId,
scalableDimension: ecsTarget.scalableDimension,
serviceNamespace: ecsTarget.serviceNamespace,
stepScalingPolicyConfiguration: {
adjustmentType: "ChangeInCapacity",
cooldown: 60,
metricAggregationType: "Maximum",
stepAdjustments: [{
metricIntervalUpperBound: "0",
scalingAdjustment: -1,
}],
},
});
import pulumi
import pulumi_aws as aws
ecs_target = aws.appautoscaling.Target("ecs_target",
max_capacity=4,
min_capacity=1,
resource_id="service/clusterName/serviceName",
scalable_dimension="ecs:service:DesiredCount",
service_namespace="ecs")
ecs_policy = aws.appautoscaling.Policy("ecs_policy",
name="scale-down",
policy_type="StepScaling",
resource_id=ecs_target.resource_id,
scalable_dimension=ecs_target.scalable_dimension,
service_namespace=ecs_target.service_namespace,
step_scaling_policy_configuration={
"adjustment_type": "ChangeInCapacity",
"cooldown": 60,
"metric_aggregation_type": "Maximum",
"step_adjustments": [{
"metric_interval_upper_bound": "0",
"scaling_adjustment": -1,
}],
})
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/appautoscaling"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
ecsTarget, err := appautoscaling.NewTarget(ctx, "ecs_target", &appautoscaling.TargetArgs{
MaxCapacity: pulumi.Int(4),
MinCapacity: pulumi.Int(1),
ResourceId: pulumi.String("service/clusterName/serviceName"),
ScalableDimension: pulumi.String("ecs:service:DesiredCount"),
ServiceNamespace: pulumi.String("ecs"),
})
if err != nil {
return err
}
_, err = appautoscaling.NewPolicy(ctx, "ecs_policy", &appautoscaling.PolicyArgs{
Name: pulumi.String("scale-down"),
PolicyType: pulumi.String("StepScaling"),
ResourceId: ecsTarget.ResourceId,
ScalableDimension: ecsTarget.ScalableDimension,
ServiceNamespace: ecsTarget.ServiceNamespace,
StepScalingPolicyConfiguration: &appautoscaling.PolicyStepScalingPolicyConfigurationArgs{
AdjustmentType: pulumi.String("ChangeInCapacity"),
Cooldown: pulumi.Int(60),
MetricAggregationType: pulumi.String("Maximum"),
StepAdjustments: appautoscaling.PolicyStepScalingPolicyConfigurationStepAdjustmentArray{
&appautoscaling.PolicyStepScalingPolicyConfigurationStepAdjustmentArgs{
MetricIntervalUpperBound: pulumi.String("0"),
ScalingAdjustment: pulumi.Int(-1),
},
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var ecsTarget = new Aws.AppAutoScaling.Target("ecs_target", new()
{
MaxCapacity = 4,
MinCapacity = 1,
ResourceId = "service/clusterName/serviceName",
ScalableDimension = "ecs:service:DesiredCount",
ServiceNamespace = "ecs",
});
var ecsPolicy = new Aws.AppAutoScaling.Policy("ecs_policy", new()
{
Name = "scale-down",
PolicyType = "StepScaling",
ResourceId = ecsTarget.ResourceId,
ScalableDimension = ecsTarget.ScalableDimension,
ServiceNamespace = ecsTarget.ServiceNamespace,
StepScalingPolicyConfiguration = new Aws.AppAutoScaling.Inputs.PolicyStepScalingPolicyConfigurationArgs
{
AdjustmentType = "ChangeInCapacity",
Cooldown = 60,
MetricAggregationType = "Maximum",
StepAdjustments = new[]
{
new Aws.AppAutoScaling.Inputs.PolicyStepScalingPolicyConfigurationStepAdjustmentArgs
{
MetricIntervalUpperBound = "0",
ScalingAdjustment = -1,
},
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.appautoscaling.Target;
import com.pulumi.aws.appautoscaling.TargetArgs;
import com.pulumi.aws.appautoscaling.Policy;
import com.pulumi.aws.appautoscaling.PolicyArgs;
import com.pulumi.aws.appautoscaling.inputs.PolicyStepScalingPolicyConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var ecsTarget = new Target("ecsTarget", TargetArgs.builder()
.maxCapacity(4)
.minCapacity(1)
.resourceId("service/clusterName/serviceName")
.scalableDimension("ecs:service:DesiredCount")
.serviceNamespace("ecs")
.build());
var ecsPolicy = new Policy("ecsPolicy", PolicyArgs.builder()
.name("scale-down")
.policyType("StepScaling")
.resourceId(ecsTarget.resourceId())
.scalableDimension(ecsTarget.scalableDimension())
.serviceNamespace(ecsTarget.serviceNamespace())
.stepScalingPolicyConfiguration(PolicyStepScalingPolicyConfigurationArgs.builder()
.adjustmentType("ChangeInCapacity")
.cooldown(60)
.metricAggregationType("Maximum")
.stepAdjustments(PolicyStepScalingPolicyConfigurationStepAdjustmentArgs.builder()
.metricIntervalUpperBound("0")
.scalingAdjustment(-1)
.build())
.build())
.build());
}
}
resources:
ecsTarget:
type: aws:appautoscaling:Target
name: ecs_target
properties:
maxCapacity: 4
minCapacity: 1
resourceId: service/clusterName/serviceName
scalableDimension: ecs:service:DesiredCount
serviceNamespace: ecs
ecsPolicy:
type: aws:appautoscaling:Policy
name: ecs_policy
properties:
name: scale-down
policyType: StepScaling
resourceId: ${ecsTarget.resourceId}
scalableDimension: ${ecsTarget.scalableDimension}
serviceNamespace: ${ecsTarget.serviceNamespace}
stepScalingPolicyConfiguration:
adjustmentType: ChangeInCapacity
cooldown: 60
metricAggregationType: Maximum
stepAdjustments:
- metricIntervalUpperBound: 0
scalingAdjustment: -1
When a CloudWatch alarm fires, the policy removes one task (scalingAdjustment: -1). The stepAdjustments array defines threshold ranges; metricIntervalUpperBound of “0” means “when the metric is less than the alarm threshold.” The adjustmentType of “ChangeInCapacity” adds or removes a fixed number of tasks rather than setting an absolute count.
Scale Aurora read replicas based on CPU
Aurora clusters distribute read traffic across replicas to handle query load. When CPU utilization rises, adding replicas spreads the load and maintains response times.
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const replicas = new aws.appautoscaling.Target("replicas", {
serviceNamespace: "rds",
scalableDimension: "rds:cluster:ReadReplicaCount",
resourceId: `cluster:${example.id}`,
minCapacity: 1,
maxCapacity: 15,
});
const replicasPolicy = new aws.appautoscaling.Policy("replicas", {
name: "cpu-auto-scaling",
serviceNamespace: replicas.serviceNamespace,
scalableDimension: replicas.scalableDimension,
resourceId: replicas.resourceId,
policyType: "TargetTrackingScaling",
targetTrackingScalingPolicyConfiguration: {
predefinedMetricSpecification: {
predefinedMetricType: "RDSReaderAverageCPUUtilization",
},
targetValue: 75,
scaleInCooldown: 300,
scaleOutCooldown: 300,
},
});
import pulumi
import pulumi_aws as aws
replicas = aws.appautoscaling.Target("replicas",
service_namespace="rds",
scalable_dimension="rds:cluster:ReadReplicaCount",
resource_id=f"cluster:{example['id']}",
min_capacity=1,
max_capacity=15)
replicas_policy = aws.appautoscaling.Policy("replicas",
name="cpu-auto-scaling",
service_namespace=replicas.service_namespace,
scalable_dimension=replicas.scalable_dimension,
resource_id=replicas.resource_id,
policy_type="TargetTrackingScaling",
target_tracking_scaling_policy_configuration={
"predefined_metric_specification": {
"predefined_metric_type": "RDSReaderAverageCPUUtilization",
},
"target_value": 75,
"scale_in_cooldown": 300,
"scale_out_cooldown": 300,
})
package main
import (
"fmt"
"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/appautoscaling"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
replicas, err := appautoscaling.NewTarget(ctx, "replicas", &appautoscaling.TargetArgs{
ServiceNamespace: pulumi.String("rds"),
ScalableDimension: pulumi.String("rds:cluster:ReadReplicaCount"),
ResourceId: pulumi.Sprintf("cluster:%v", example.Id),
MinCapacity: pulumi.Int(1),
MaxCapacity: pulumi.Int(15),
})
if err != nil {
return err
}
_, err = appautoscaling.NewPolicy(ctx, "replicas", &appautoscaling.PolicyArgs{
Name: pulumi.String("cpu-auto-scaling"),
ServiceNamespace: replicas.ServiceNamespace,
ScalableDimension: replicas.ScalableDimension,
ResourceId: replicas.ResourceId,
PolicyType: pulumi.String("TargetTrackingScaling"),
TargetTrackingScalingPolicyConfiguration: &appautoscaling.PolicyTargetTrackingScalingPolicyConfigurationArgs{
PredefinedMetricSpecification: &appautoscaling.PolicyTargetTrackingScalingPolicyConfigurationPredefinedMetricSpecificationArgs{
PredefinedMetricType: pulumi.String("RDSReaderAverageCPUUtilization"),
},
TargetValue: pulumi.Float64(75),
ScaleInCooldown: pulumi.Int(300),
ScaleOutCooldown: pulumi.Int(300),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var replicas = new Aws.AppAutoScaling.Target("replicas", new()
{
ServiceNamespace = "rds",
ScalableDimension = "rds:cluster:ReadReplicaCount",
ResourceId = $"cluster:{example.Id}",
MinCapacity = 1,
MaxCapacity = 15,
});
var replicasPolicy = new Aws.AppAutoScaling.Policy("replicas", new()
{
Name = "cpu-auto-scaling",
ServiceNamespace = replicas.ServiceNamespace,
ScalableDimension = replicas.ScalableDimension,
ResourceId = replicas.ResourceId,
PolicyType = "TargetTrackingScaling",
TargetTrackingScalingPolicyConfiguration = new Aws.AppAutoScaling.Inputs.PolicyTargetTrackingScalingPolicyConfigurationArgs
{
PredefinedMetricSpecification = new Aws.AppAutoScaling.Inputs.PolicyTargetTrackingScalingPolicyConfigurationPredefinedMetricSpecificationArgs
{
PredefinedMetricType = "RDSReaderAverageCPUUtilization",
},
TargetValue = 75,
ScaleInCooldown = 300,
ScaleOutCooldown = 300,
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.appautoscaling.Target;
import com.pulumi.aws.appautoscaling.TargetArgs;
import com.pulumi.aws.appautoscaling.Policy;
import com.pulumi.aws.appautoscaling.PolicyArgs;
import com.pulumi.aws.appautoscaling.inputs.PolicyTargetTrackingScalingPolicyConfigurationArgs;
import com.pulumi.aws.appautoscaling.inputs.PolicyTargetTrackingScalingPolicyConfigurationPredefinedMetricSpecificationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var replicas = new Target("replicas", TargetArgs.builder()
.serviceNamespace("rds")
.scalableDimension("rds:cluster:ReadReplicaCount")
.resourceId(String.format("cluster:%s", example.id()))
.minCapacity(1)
.maxCapacity(15)
.build());
var replicasPolicy = new Policy("replicasPolicy", PolicyArgs.builder()
.name("cpu-auto-scaling")
.serviceNamespace(replicas.serviceNamespace())
.scalableDimension(replicas.scalableDimension())
.resourceId(replicas.resourceId())
.policyType("TargetTrackingScaling")
.targetTrackingScalingPolicyConfiguration(PolicyTargetTrackingScalingPolicyConfigurationArgs.builder()
.predefinedMetricSpecification(PolicyTargetTrackingScalingPolicyConfigurationPredefinedMetricSpecificationArgs.builder()
.predefinedMetricType("RDSReaderAverageCPUUtilization")
.build())
.targetValue(75.0)
.scaleInCooldown(300)
.scaleOutCooldown(300)
.build())
.build());
}
}
resources:
replicas:
type: aws:appautoscaling:Target
properties:
serviceNamespace: rds
scalableDimension: rds:cluster:ReadReplicaCount
resourceId: cluster:${example.id}
minCapacity: 1
maxCapacity: 15
replicasPolicy:
type: aws:appautoscaling:Policy
name: replicas
properties:
name: cpu-auto-scaling
serviceNamespace: ${replicas.serviceNamespace}
scalableDimension: ${replicas.scalableDimension}
resourceId: ${replicas.resourceId}
policyType: TargetTrackingScaling
targetTrackingScalingPolicyConfiguration:
predefinedMetricSpecification:
predefinedMetricType: RDSReaderAverageCPUUtilization
targetValue: 75
scaleInCooldown: 300
scaleOutCooldown: 300
The policy tracks RDSReaderAverageCPUUtilization across all read replicas and adjusts replica count to maintain 75% CPU. The scaleInCooldown and scaleOutCooldown properties (both 300 seconds) prevent rapid scaling oscillations by enforcing wait periods between adjustments. The serviceNamespace of “rds” and scalableDimension of “rds:cluster:ReadReplicaCount” tell Application Auto Scaling which Aurora metric to control.
Calculate backlog per task with metric math
Queue-based applications need to scale based on the ratio of pending work to active workers. Metric math expressions combine multiple CloudWatch metrics into a derived scaling signal.
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const ecsTarget = new aws.appautoscaling.Target("ecs_target", {
maxCapacity: 4,
minCapacity: 1,
resourceId: "service/clusterName/serviceName",
scalableDimension: "ecs:service:DesiredCount",
serviceNamespace: "ecs",
});
const example = new aws.appautoscaling.Policy("example", {
name: "foo",
policyType: "TargetTrackingScaling",
resourceId: ecsTarget.resourceId,
scalableDimension: ecsTarget.scalableDimension,
serviceNamespace: ecsTarget.serviceNamespace,
targetTrackingScalingPolicyConfiguration: {
targetValue: 100,
customizedMetricSpecification: {
metrics: [
{
label: "Get the queue size (the number of messages waiting to be processed)",
id: "m1",
metricStat: {
metric: {
metricName: "ApproximateNumberOfMessagesVisible",
namespace: "AWS/SQS",
dimensions: [{
name: "QueueName",
value: "my-queue",
}],
},
stat: "Sum",
},
returnData: false,
},
{
label: "Get the ECS running task count (the number of currently running tasks)",
id: "m2",
metricStat: {
metric: {
metricName: "RunningTaskCount",
namespace: "ECS/ContainerInsights",
dimensions: [
{
name: "ClusterName",
value: "default",
},
{
name: "ServiceName",
value: "web-app",
},
],
},
stat: "Average",
},
returnData: false,
},
{
label: "Calculate the backlog per instance",
id: "e1",
expression: "m1 / m2",
returnData: true,
},
],
},
},
});
import pulumi
import pulumi_aws as aws
ecs_target = aws.appautoscaling.Target("ecs_target",
max_capacity=4,
min_capacity=1,
resource_id="service/clusterName/serviceName",
scalable_dimension="ecs:service:DesiredCount",
service_namespace="ecs")
example = aws.appautoscaling.Policy("example",
name="foo",
policy_type="TargetTrackingScaling",
resource_id=ecs_target.resource_id,
scalable_dimension=ecs_target.scalable_dimension,
service_namespace=ecs_target.service_namespace,
target_tracking_scaling_policy_configuration={
"target_value": 100,
"customized_metric_specification": {
"metrics": [
{
"label": "Get the queue size (the number of messages waiting to be processed)",
"id": "m1",
"metric_stat": {
"metric": {
"metric_name": "ApproximateNumberOfMessagesVisible",
"namespace": "AWS/SQS",
"dimensions": [{
"name": "QueueName",
"value": "my-queue",
}],
},
"stat": "Sum",
},
"return_data": False,
},
{
"label": "Get the ECS running task count (the number of currently running tasks)",
"id": "m2",
"metric_stat": {
"metric": {
"metric_name": "RunningTaskCount",
"namespace": "ECS/ContainerInsights",
"dimensions": [
{
"name": "ClusterName",
"value": "default",
},
{
"name": "ServiceName",
"value": "web-app",
},
],
},
"stat": "Average",
},
"return_data": False,
},
{
"label": "Calculate the backlog per instance",
"id": "e1",
"expression": "m1 / m2",
"return_data": True,
},
],
},
})
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/appautoscaling"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
ecsTarget, err := appautoscaling.NewTarget(ctx, "ecs_target", &appautoscaling.TargetArgs{
MaxCapacity: pulumi.Int(4),
MinCapacity: pulumi.Int(1),
ResourceId: pulumi.String("service/clusterName/serviceName"),
ScalableDimension: pulumi.String("ecs:service:DesiredCount"),
ServiceNamespace: pulumi.String("ecs"),
})
if err != nil {
return err
}
_, err = appautoscaling.NewPolicy(ctx, "example", &appautoscaling.PolicyArgs{
Name: pulumi.String("foo"),
PolicyType: pulumi.String("TargetTrackingScaling"),
ResourceId: ecsTarget.ResourceId,
ScalableDimension: ecsTarget.ScalableDimension,
ServiceNamespace: ecsTarget.ServiceNamespace,
TargetTrackingScalingPolicyConfiguration: &appautoscaling.PolicyTargetTrackingScalingPolicyConfigurationArgs{
TargetValue: pulumi.Float64(100),
CustomizedMetricSpecification: &appautoscaling.PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationArgs{
Metrics: appautoscaling.PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricArray{
&appautoscaling.PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricArgs{
Label: pulumi.String("Get the queue size (the number of messages waiting to be processed)"),
Id: pulumi.String("m1"),
MetricStat: &appautoscaling.PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricMetricStatArgs{
Metric: &appautoscaling.PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricMetricStatMetricArgs{
MetricName: pulumi.String("ApproximateNumberOfMessagesVisible"),
Namespace: pulumi.String("AWS/SQS"),
Dimensions: appautoscaling.PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricMetricStatMetricDimensionArray{
&appautoscaling.PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricMetricStatMetricDimensionArgs{
Name: pulumi.String("QueueName"),
Value: pulumi.String("my-queue"),
},
},
},
Stat: pulumi.String("Sum"),
},
ReturnData: pulumi.Bool(false),
},
&appautoscaling.PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricArgs{
Label: pulumi.String("Get the ECS running task count (the number of currently running tasks)"),
Id: pulumi.String("m2"),
MetricStat: &appautoscaling.PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricMetricStatArgs{
Metric: &appautoscaling.PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricMetricStatMetricArgs{
MetricName: pulumi.String("RunningTaskCount"),
Namespace: pulumi.String("ECS/ContainerInsights"),
Dimensions: appautoscaling.PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricMetricStatMetricDimensionArray{
&appautoscaling.PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricMetricStatMetricDimensionArgs{
Name: pulumi.String("ClusterName"),
Value: pulumi.String("default"),
},
&appautoscaling.PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricMetricStatMetricDimensionArgs{
Name: pulumi.String("ServiceName"),
Value: pulumi.String("web-app"),
},
},
},
Stat: pulumi.String("Average"),
},
ReturnData: pulumi.Bool(false),
},
&appautoscaling.PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricArgs{
Label: pulumi.String("Calculate the backlog per instance"),
Id: pulumi.String("e1"),
Expression: pulumi.String("m1 / m2"),
ReturnData: pulumi.Bool(true),
},
},
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var ecsTarget = new Aws.AppAutoScaling.Target("ecs_target", new()
{
MaxCapacity = 4,
MinCapacity = 1,
ResourceId = "service/clusterName/serviceName",
ScalableDimension = "ecs:service:DesiredCount",
ServiceNamespace = "ecs",
});
var example = new Aws.AppAutoScaling.Policy("example", new()
{
Name = "foo",
PolicyType = "TargetTrackingScaling",
ResourceId = ecsTarget.ResourceId,
ScalableDimension = ecsTarget.ScalableDimension,
ServiceNamespace = ecsTarget.ServiceNamespace,
TargetTrackingScalingPolicyConfiguration = new Aws.AppAutoScaling.Inputs.PolicyTargetTrackingScalingPolicyConfigurationArgs
{
TargetValue = 100,
CustomizedMetricSpecification = new Aws.AppAutoScaling.Inputs.PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationArgs
{
Metrics = new[]
{
new Aws.AppAutoScaling.Inputs.PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricArgs
{
Label = "Get the queue size (the number of messages waiting to be processed)",
Id = "m1",
MetricStat = new Aws.AppAutoScaling.Inputs.PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricMetricStatArgs
{
Metric = new Aws.AppAutoScaling.Inputs.PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricMetricStatMetricArgs
{
MetricName = "ApproximateNumberOfMessagesVisible",
Namespace = "AWS/SQS",
Dimensions = new[]
{
new Aws.AppAutoScaling.Inputs.PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricMetricStatMetricDimensionArgs
{
Name = "QueueName",
Value = "my-queue",
},
},
},
Stat = "Sum",
},
ReturnData = false,
},
new Aws.AppAutoScaling.Inputs.PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricArgs
{
Label = "Get the ECS running task count (the number of currently running tasks)",
Id = "m2",
MetricStat = new Aws.AppAutoScaling.Inputs.PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricMetricStatArgs
{
Metric = new Aws.AppAutoScaling.Inputs.PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricMetricStatMetricArgs
{
MetricName = "RunningTaskCount",
Namespace = "ECS/ContainerInsights",
Dimensions = new[]
{
new Aws.AppAutoScaling.Inputs.PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricMetricStatMetricDimensionArgs
{
Name = "ClusterName",
Value = "default",
},
new Aws.AppAutoScaling.Inputs.PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricMetricStatMetricDimensionArgs
{
Name = "ServiceName",
Value = "web-app",
},
},
},
Stat = "Average",
},
ReturnData = false,
},
new Aws.AppAutoScaling.Inputs.PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricArgs
{
Label = "Calculate the backlog per instance",
Id = "e1",
Expression = "m1 / m2",
ReturnData = true,
},
},
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.appautoscaling.Target;
import com.pulumi.aws.appautoscaling.TargetArgs;
import com.pulumi.aws.appautoscaling.Policy;
import com.pulumi.aws.appautoscaling.PolicyArgs;
import com.pulumi.aws.appautoscaling.inputs.PolicyTargetTrackingScalingPolicyConfigurationArgs;
import com.pulumi.aws.appautoscaling.inputs.PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var ecsTarget = new Target("ecsTarget", TargetArgs.builder()
.maxCapacity(4)
.minCapacity(1)
.resourceId("service/clusterName/serviceName")
.scalableDimension("ecs:service:DesiredCount")
.serviceNamespace("ecs")
.build());
var example = new Policy("example", PolicyArgs.builder()
.name("foo")
.policyType("TargetTrackingScaling")
.resourceId(ecsTarget.resourceId())
.scalableDimension(ecsTarget.scalableDimension())
.serviceNamespace(ecsTarget.serviceNamespace())
.targetTrackingScalingPolicyConfiguration(PolicyTargetTrackingScalingPolicyConfigurationArgs.builder()
.targetValue(100.0)
.customizedMetricSpecification(PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationArgs.builder()
.metrics(
PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricArgs.builder()
.label("Get the queue size (the number of messages waiting to be processed)")
.id("m1")
.metricStat(PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricMetricStatArgs.builder()
.metric(PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricMetricStatMetricArgs.builder()
.metricName("ApproximateNumberOfMessagesVisible")
.namespace("AWS/SQS")
.dimensions(PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricMetricStatMetricDimensionArgs.builder()
.name("QueueName")
.value("my-queue")
.build())
.build())
.stat("Sum")
.build())
.returnData(false)
.build(),
PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricArgs.builder()
.label("Get the ECS running task count (the number of currently running tasks)")
.id("m2")
.metricStat(PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricMetricStatArgs.builder()
.metric(PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricMetricStatMetricArgs.builder()
.metricName("RunningTaskCount")
.namespace("ECS/ContainerInsights")
.dimensions(
PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricMetricStatMetricDimensionArgs.builder()
.name("ClusterName")
.value("default")
.build(),
PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricMetricStatMetricDimensionArgs.builder()
.name("ServiceName")
.value("web-app")
.build())
.build())
.stat("Average")
.build())
.returnData(false)
.build(),
PolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecificationMetricArgs.builder()
.label("Calculate the backlog per instance")
.id("e1")
.expression("m1 / m2")
.returnData(true)
.build())
.build())
.build())
.build());
}
}
resources:
ecsTarget:
type: aws:appautoscaling:Target
name: ecs_target
properties:
maxCapacity: 4
minCapacity: 1
resourceId: service/clusterName/serviceName
scalableDimension: ecs:service:DesiredCount
serviceNamespace: ecs
example:
type: aws:appautoscaling:Policy
properties:
name: foo
policyType: TargetTrackingScaling
resourceId: ${ecsTarget.resourceId}
scalableDimension: ${ecsTarget.scalableDimension}
serviceNamespace: ${ecsTarget.serviceNamespace}
targetTrackingScalingPolicyConfiguration:
targetValue: 100
customizedMetricSpecification:
metrics:
- label: Get the queue size (the number of messages waiting to be processed)
id: m1
metricStat:
metric:
metricName: ApproximateNumberOfMessagesVisible
namespace: AWS/SQS
dimensions:
- name: QueueName
value: my-queue
stat: Sum
returnData: false
- label: Get the ECS running task count (the number of currently running tasks)
id: m2
metricStat:
metric:
metricName: RunningTaskCount
namespace: ECS/ContainerInsights
dimensions:
- name: ClusterName
value: default
- name: ServiceName
value: web-app
stat: Average
returnData: false
- label: Calculate the backlog per instance
id: e1
expression: m1 / m2
returnData: true
The customizedMetricSpecification defines three metrics: SQS queue depth (m1), ECS running task count (m2), and a calculated expression (e1) that divides queue size by task count. Only the expression has returnData set to true, making it the scaling signal. The policy scales to maintain 100 messages per task by adjusting ECS task count.
Use predictive scaling for ECS memory patterns
Applications with predictable daily or weekly traffic patterns can benefit from predictive scaling, which forecasts demand and scales proactively rather than reactively.
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const example = new aws.appautoscaling.Policy("example", {
name: "example-policy",
resourceId: exampleAwsAppautoscalingTarget.resourceId,
scalableDimension: exampleAwsAppautoscalingTarget.scalableDimension,
serviceNamespace: exampleAwsAppautoscalingTarget.serviceNamespace,
policyType: "PredictiveScaling",
predictiveScalingPolicyConfiguration: {
metricSpecifications: [{
targetValue: "40",
predefinedMetricPairSpecification: {
predefinedMetricType: "ECSServiceMemoryUtilization",
},
}],
},
});
import pulumi
import pulumi_aws as aws
example = aws.appautoscaling.Policy("example",
name="example-policy",
resource_id=example_aws_appautoscaling_target["resourceId"],
scalable_dimension=example_aws_appautoscaling_target["scalableDimension"],
service_namespace=example_aws_appautoscaling_target["serviceNamespace"],
policy_type="PredictiveScaling",
predictive_scaling_policy_configuration={
"metric_specifications": [{
"target_value": "40",
"predefined_metric_pair_specification": {
"predefined_metric_type": "ECSServiceMemoryUtilization",
},
}],
})
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/appautoscaling"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := appautoscaling.NewPolicy(ctx, "example", &appautoscaling.PolicyArgs{
Name: pulumi.String("example-policy"),
ResourceId: pulumi.Any(exampleAwsAppautoscalingTarget.ResourceId),
ScalableDimension: pulumi.Any(exampleAwsAppautoscalingTarget.ScalableDimension),
ServiceNamespace: pulumi.Any(exampleAwsAppautoscalingTarget.ServiceNamespace),
PolicyType: pulumi.String("PredictiveScaling"),
PredictiveScalingPolicyConfiguration: &appautoscaling.PolicyPredictiveScalingPolicyConfigurationArgs{
MetricSpecifications: appautoscaling.PolicyPredictiveScalingPolicyConfigurationMetricSpecificationArray{
&appautoscaling.PolicyPredictiveScalingPolicyConfigurationMetricSpecificationArgs{
TargetValue: pulumi.String("40"),
PredefinedMetricPairSpecification: &appautoscaling.PolicyPredictiveScalingPolicyConfigurationMetricSpecificationPredefinedMetricPairSpecificationArgs{
PredefinedMetricType: pulumi.String("ECSServiceMemoryUtilization"),
},
},
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var example = new Aws.AppAutoScaling.Policy("example", new()
{
Name = "example-policy",
ResourceId = exampleAwsAppautoscalingTarget.ResourceId,
ScalableDimension = exampleAwsAppautoscalingTarget.ScalableDimension,
ServiceNamespace = exampleAwsAppautoscalingTarget.ServiceNamespace,
PolicyType = "PredictiveScaling",
PredictiveScalingPolicyConfiguration = new Aws.AppAutoScaling.Inputs.PolicyPredictiveScalingPolicyConfigurationArgs
{
MetricSpecifications = new[]
{
new Aws.AppAutoScaling.Inputs.PolicyPredictiveScalingPolicyConfigurationMetricSpecificationArgs
{
TargetValue = "40",
PredefinedMetricPairSpecification = new Aws.AppAutoScaling.Inputs.PolicyPredictiveScalingPolicyConfigurationMetricSpecificationPredefinedMetricPairSpecificationArgs
{
PredefinedMetricType = "ECSServiceMemoryUtilization",
},
},
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.appautoscaling.Policy;
import com.pulumi.aws.appautoscaling.PolicyArgs;
import com.pulumi.aws.appautoscaling.inputs.PolicyPredictiveScalingPolicyConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new Policy("example", PolicyArgs.builder()
.name("example-policy")
.resourceId(exampleAwsAppautoscalingTarget.resourceId())
.scalableDimension(exampleAwsAppautoscalingTarget.scalableDimension())
.serviceNamespace(exampleAwsAppautoscalingTarget.serviceNamespace())
.policyType("PredictiveScaling")
.predictiveScalingPolicyConfiguration(PolicyPredictiveScalingPolicyConfigurationArgs.builder()
.metricSpecifications(PolicyPredictiveScalingPolicyConfigurationMetricSpecificationArgs.builder()
.targetValue("40")
.predefinedMetricPairSpecification(PolicyPredictiveScalingPolicyConfigurationMetricSpecificationPredefinedMetricPairSpecificationArgs.builder()
.predefinedMetricType("ECSServiceMemoryUtilization")
.build())
.build())
.build())
.build());
}
}
resources:
example:
type: aws:appautoscaling:Policy
properties:
name: example-policy
resourceId: ${exampleAwsAppautoscalingTarget.resourceId}
scalableDimension: ${exampleAwsAppautoscalingTarget.scalableDimension}
serviceNamespace: ${exampleAwsAppautoscalingTarget.serviceNamespace}
policyType: PredictiveScaling
predictiveScalingPolicyConfiguration:
metricSpecifications:
- targetValue: 40
predefinedMetricPairSpecification:
predefinedMetricType: ECSServiceMemoryUtilization
The predictiveScalingPolicyConfiguration uses historical ECS memory utilization data to forecast future demand. The predefinedMetricPairSpecification of “ECSServiceMemoryUtilization” tells AWS to analyze both load and capacity metrics. The policy scales tasks before memory utilization reaches the targetValue of 40%, reducing latency spikes during predictable traffic increases.
Beyond these examples
These snippets focus on specific policy-level features: target tracking with predefined and custom metrics, step scaling for threshold-based adjustments, and predictive scaling for forecasted demand. They’re intentionally minimal rather than full auto-scaling solutions.
The examples reference pre-existing infrastructure such as appautoscaling.Target resources defining capacity bounds, CloudWatch alarms (for step scaling), and DynamoDB tables, ECS services, Aurora clusters, or MSK clusters. They focus on configuring the policy rather than provisioning the underlying services.
To keep things focused, common policy patterns are omitted, including:
- CloudWatch alarm creation and configuration
- Cooldown periods and adjustment magnitude tuning
- Custom metric specifications for non-standard signals
- Policy attachment to multiple targets
These omissions are intentional: the goal is to illustrate how each policy type is wired, not provide drop-in auto-scaling modules. See the Application Auto Scaling Policy resource reference for all available configuration options.
Let's configure AWS Application Auto Scaling Policies
Get started with Pulumi Cloud, then follow our quick setup guide to deploy this infrastructure.
Try Pulumi Cloud for FREEFrequently Asked Questions
Policy Types & Configuration
StepScaling (default), TargetTrackingScaling, and PredictiveScaling. Each requires a corresponding configuration block.policyType: use stepScalingPolicyConfiguration for StepScaling, targetTrackingScalingPolicyConfiguration for TargetTrackingScaling, or predictiveScalingPolicyConfiguration for PredictiveScaling.Immutability & Updates
name, resourceId, scalableDimension, and serviceNamespace properties are immutable. Changing any of these will force resource replacement.Metrics & Scaling Behavior
scaleInCooldown and scaleOutCooldown in targetTrackingScalingPolicyConfiguration. For StepScaling, set cooldown in stepScalingPolicyConfiguration.customizedMetricSpecification with a metrics array containing metricStat entries and expression entries to calculate custom scaling metrics.DynamoDBReadCapacityUtilization), while customized metrics let you define custom CloudWatch metrics or metric math expressions.Service Integration
serviceNamespace value (dynamodb, ecs, rds, kafka) for your target service.