Google Cloud (GCP) Classic

Pulumi Official
Package maintained by Pulumi
v6.24.0 published on Tuesday, May 17, 2022 by Pulumi

Autoscaler

Represents an Autoscaler resource.

Autoscalers allow you to automatically scale virtual machine instances in managed instance groups according to an autoscaling policy that you define.

To get more information about Autoscaler, see:

Example Usage

Autoscaler Single Instance

using Pulumi;
using Gcp = Pulumi.Gcp;

class MyStack : Stack
{
    public MyStack()
    {
        var debian9 = Output.Create(Gcp.Compute.GetImage.InvokeAsync(new Gcp.Compute.GetImageArgs
        {
            Family = "debian-9",
            Project = "debian-cloud",
        }));
        var defaultInstanceTemplate = new Gcp.Compute.InstanceTemplate("defaultInstanceTemplate", new Gcp.Compute.InstanceTemplateArgs
        {
            MachineType = "e2-medium",
            CanIpForward = false,
            Tags = 
            {
                "foo",
                "bar",
            },
            Disks = 
            {
                new Gcp.Compute.Inputs.InstanceTemplateDiskArgs
                {
                    SourceImage = debian9.Apply(debian9 => debian9.Id),
                },
            },
            NetworkInterfaces = 
            {
                new Gcp.Compute.Inputs.InstanceTemplateNetworkInterfaceArgs
                {
                    Network = "default",
                },
            },
            Metadata = 
            {
                { "foo", "bar" },
            },
            ServiceAccount = new Gcp.Compute.Inputs.InstanceTemplateServiceAccountArgs
            {
                Scopes = 
                {
                    "userinfo-email",
                    "compute-ro",
                    "storage-ro",
                },
            },
        }, new CustomResourceOptions
        {
            Provider = google_beta,
        });
        var defaultTargetPool = new Gcp.Compute.TargetPool("defaultTargetPool", new Gcp.Compute.TargetPoolArgs
        {
        }, new CustomResourceOptions
        {
            Provider = google_beta,
        });
        var defaultInstanceGroupManager = new Gcp.Compute.InstanceGroupManager("defaultInstanceGroupManager", new Gcp.Compute.InstanceGroupManagerArgs
        {
            Zone = "us-central1-f",
            Versions = 
            {
                new Gcp.Compute.Inputs.InstanceGroupManagerVersionArgs
                {
                    InstanceTemplate = defaultInstanceTemplate.Id,
                    Name = "primary",
                },
            },
            TargetPools = 
            {
                defaultTargetPool.Id,
            },
            BaseInstanceName = "autoscaler-sample",
        }, new CustomResourceOptions
        {
            Provider = google_beta,
        });
        var defaultAutoscaler = new Gcp.Compute.Autoscaler("defaultAutoscaler", new Gcp.Compute.AutoscalerArgs
        {
            Zone = "us-central1-f",
            Target = defaultInstanceGroupManager.Id,
            AutoscalingPolicy = new Gcp.Compute.Inputs.AutoscalerAutoscalingPolicyArgs
            {
                MaxReplicas = 5,
                MinReplicas = 1,
                CooldownPeriod = 60,
                Metrics = 
                {
                    new Gcp.Compute.Inputs.AutoscalerAutoscalingPolicyMetricArgs
                    {
                        Name = "pubsub.googleapis.com/subscription/num_undelivered_messages",
                        Filter = "resource.type = pubsub_subscription AND resource.label.subscription_id = our-subscription",
                        SingleInstanceAssignment = 65535,
                    },
                },
            },
        }, new CustomResourceOptions
        {
            Provider = google_beta,
        });
    }

}
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/compute"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		debian9, err := compute.LookupImage(ctx, &compute.LookupImageArgs{
			Family:  pulumi.StringRef("debian-9"),
			Project: pulumi.StringRef("debian-cloud"),
		}, nil)
		if err != nil {
			return err
		}
		defaultInstanceTemplate, err := compute.NewInstanceTemplate(ctx, "defaultInstanceTemplate", &compute.InstanceTemplateArgs{
			MachineType:  pulumi.String("e2-medium"),
			CanIpForward: pulumi.Bool(false),
			Tags: pulumi.StringArray{
				pulumi.String("foo"),
				pulumi.String("bar"),
			},
			Disks: compute.InstanceTemplateDiskArray{
				&compute.InstanceTemplateDiskArgs{
					SourceImage: pulumi.String(debian9.Id),
				},
			},
			NetworkInterfaces: compute.InstanceTemplateNetworkInterfaceArray{
				&compute.InstanceTemplateNetworkInterfaceArgs{
					Network: pulumi.String("default"),
				},
			},
			Metadata: pulumi.AnyMap{
				"foo": pulumi.Any("bar"),
			},
			ServiceAccount: &compute.InstanceTemplateServiceAccountArgs{
				Scopes: pulumi.StringArray{
					pulumi.String("userinfo-email"),
					pulumi.String("compute-ro"),
					pulumi.String("storage-ro"),
				},
			},
		}, pulumi.Provider(google_beta))
		if err != nil {
			return err
		}
		defaultTargetPool, err := compute.NewTargetPool(ctx, "defaultTargetPool", nil, pulumi.Provider(google_beta))
		if err != nil {
			return err
		}
		defaultInstanceGroupManager, err := compute.NewInstanceGroupManager(ctx, "defaultInstanceGroupManager", &compute.InstanceGroupManagerArgs{
			Zone: pulumi.String("us-central1-f"),
			Versions: compute.InstanceGroupManagerVersionArray{
				&compute.InstanceGroupManagerVersionArgs{
					InstanceTemplate: defaultInstanceTemplate.ID(),
					Name:             pulumi.String("primary"),
				},
			},
			TargetPools: pulumi.StringArray{
				defaultTargetPool.ID(),
			},
			BaseInstanceName: pulumi.String("autoscaler-sample"),
		}, pulumi.Provider(google_beta))
		if err != nil {
			return err
		}
		_, err = compute.NewAutoscaler(ctx, "defaultAutoscaler", &compute.AutoscalerArgs{
			Zone:   pulumi.String("us-central1-f"),
			Target: defaultInstanceGroupManager.ID(),
			AutoscalingPolicy: &compute.AutoscalerAutoscalingPolicyArgs{
				MaxReplicas:    pulumi.Int(5),
				MinReplicas:    pulumi.Int(1),
				CooldownPeriod: pulumi.Int(60),
				Metrics: compute.AutoscalerAutoscalingPolicyMetricArray{
					&compute.AutoscalerAutoscalingPolicyMetricArgs{
						Name:                     pulumi.String("pubsub.googleapis.com/subscription/num_undelivered_messages"),
						Filter:                   pulumi.String("resource.type = pubsub_subscription AND resource.label.subscription_id = our-subscription"),
						SingleInstanceAssignment: pulumi.Float64(65535),
					},
				},
			},
		}, pulumi.Provider(google_beta))
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import java.util.*;
import java.io.*;
import java.nio.*;
import com.pulumi.*;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        final var debian9 = Output.of(ComputeFunctions.getImage(GetImageArgs.builder()
            .family("debian-9")
            .project("debian-cloud")
            .build()));

        var defaultInstanceTemplate = new InstanceTemplate("defaultInstanceTemplate", InstanceTemplateArgs.builder()        
            .machineType("e2-medium")
            .canIpForward(false)
            .tags(            
                "foo",
                "bar")
            .disks(InstanceTemplateDisk.builder()
                .sourceImage(debian9.apply(getImageResult -> getImageResult.getId()))
                .build())
            .networkInterfaces(InstanceTemplateNetworkInterface.builder()
                .network("default")
                .build())
            .metadata(Map.of("foo", "bar"))
            .serviceAccount(InstanceTemplateServiceAccount.builder()
                .scopes(                
                    "userinfo-email",
                    "compute-ro",
                    "storage-ro")
                .build())
            .build());

        var defaultTargetPool = new TargetPool("defaultTargetPool");

        var defaultInstanceGroupManager = new InstanceGroupManager("defaultInstanceGroupManager", InstanceGroupManagerArgs.builder()        
            .zone("us-central1-f")
            .versions(InstanceGroupManagerVersion.builder()
                .instanceTemplate(defaultInstanceTemplate.getId())
                .name("primary")
                .build())
            .targetPools(defaultTargetPool.getId())
            .baseInstanceName("autoscaler-sample")
            .build());

        var defaultAutoscaler = new Autoscaler("defaultAutoscaler", AutoscalerArgs.builder()        
            .zone("us-central1-f")
            .target(defaultInstanceGroupManager.getId())
            .autoscalingPolicy(AutoscalerAutoscalingPolicy.builder()
                .maxReplicas(5)
                .minReplicas(1)
                .cooldownPeriod(60)
                .metrics(AutoscalerAutoscalingPolicyMetric.builder()
                    .name("pubsub.googleapis.com/subscription/num_undelivered_messages")
                    .filter("resource.type = pubsub_subscription AND resource.label.subscription_id = our-subscription")
                    .singleInstanceAssignment(65535)
                    .build())
                .build())
            .build());

        }
}
import pulumi
import pulumi_gcp as gcp

debian9 = gcp.compute.get_image(family="debian-9",
    project="debian-cloud")
default_instance_template = gcp.compute.InstanceTemplate("defaultInstanceTemplate",
    machine_type="e2-medium",
    can_ip_forward=False,
    tags=[
        "foo",
        "bar",
    ],
    disks=[gcp.compute.InstanceTemplateDiskArgs(
        source_image=debian9.id,
    )],
    network_interfaces=[gcp.compute.InstanceTemplateNetworkInterfaceArgs(
        network="default",
    )],
    metadata={
        "foo": "bar",
    },
    service_account=gcp.compute.InstanceTemplateServiceAccountArgs(
        scopes=[
            "userinfo-email",
            "compute-ro",
            "storage-ro",
        ],
    ),
    opts=pulumi.ResourceOptions(provider=google_beta))
default_target_pool = gcp.compute.TargetPool("defaultTargetPool", opts=pulumi.ResourceOptions(provider=google_beta))
default_instance_group_manager = gcp.compute.InstanceGroupManager("defaultInstanceGroupManager",
    zone="us-central1-f",
    versions=[gcp.compute.InstanceGroupManagerVersionArgs(
        instance_template=default_instance_template.id,
        name="primary",
    )],
    target_pools=[default_target_pool.id],
    base_instance_name="autoscaler-sample",
    opts=pulumi.ResourceOptions(provider=google_beta))
default_autoscaler = gcp.compute.Autoscaler("defaultAutoscaler",
    zone="us-central1-f",
    target=default_instance_group_manager.id,
    autoscaling_policy=gcp.compute.AutoscalerAutoscalingPolicyArgs(
        max_replicas=5,
        min_replicas=1,
        cooldown_period=60,
        metrics=[gcp.compute.AutoscalerAutoscalingPolicyMetricArgs(
            name="pubsub.googleapis.com/subscription/num_undelivered_messages",
            filter="resource.type = pubsub_subscription AND resource.label.subscription_id = our-subscription",
            single_instance_assignment=65535,
        )],
    ),
    opts=pulumi.ResourceOptions(provider=google_beta))
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const debian9 = gcp.compute.getImage({
    family: "debian-9",
    project: "debian-cloud",
});
const defaultInstanceTemplate = new gcp.compute.InstanceTemplate("defaultInstanceTemplate", {
    machineType: "e2-medium",
    canIpForward: false,
    tags: [
        "foo",
        "bar",
    ],
    disks: [{
        sourceImage: debian9.then(debian9 => debian9.id),
    }],
    networkInterfaces: [{
        network: "default",
    }],
    metadata: {
        foo: "bar",
    },
    serviceAccount: {
        scopes: [
            "userinfo-email",
            "compute-ro",
            "storage-ro",
        ],
    },
}, {
    provider: google_beta,
});
const defaultTargetPool = new gcp.compute.TargetPool("defaultTargetPool", {}, {
    provider: google_beta,
});
const defaultInstanceGroupManager = new gcp.compute.InstanceGroupManager("defaultInstanceGroupManager", {
    zone: "us-central1-f",
    versions: [{
        instanceTemplate: defaultInstanceTemplate.id,
        name: "primary",
    }],
    targetPools: [defaultTargetPool.id],
    baseInstanceName: "autoscaler-sample",
}, {
    provider: google_beta,
});
const defaultAutoscaler = new gcp.compute.Autoscaler("defaultAutoscaler", {
    zone: "us-central1-f",
    target: defaultInstanceGroupManager.id,
    autoscalingPolicy: {
        maxReplicas: 5,
        minReplicas: 1,
        cooldownPeriod: 60,
        metrics: [{
            name: "pubsub.googleapis.com/subscription/num_undelivered_messages",
            filter: "resource.type = pubsub_subscription AND resource.label.subscription_id = our-subscription",
            singleInstanceAssignment: 65535,
        }],
    },
}, {
    provider: google_beta,
});
resources:
  defaultAutoscaler:
    type: gcp:compute:Autoscaler
    properties:
      zone: us-central1-f
      target: ${defaultInstanceGroupManager.id}
      autoscalingPolicy:
        maxReplicas: 5
        minReplicas: 1
        cooldownPeriod: 60
        metrics:
          - name: pubsub.googleapis.com/subscription/num_undelivered_messages
            filter: resource.type = pubsub_subscription AND resource.label.subscription_id = our-subscription
            singleInstanceAssignment: 65535
    options:
      provider: ${["google-beta"]}
  defaultInstanceTemplate:
    type: gcp:compute:InstanceTemplate
    properties:
      machineType: e2-medium
      canIpForward: false
      tags:
        - foo
        - bar
      disks:
        - sourceImage: ${debian9.id}
      networkInterfaces:
        - network: default
      metadata:
        foo: bar
      serviceAccount:
        scopes:
          - userinfo-email
          - compute-ro
          - storage-ro
    options:
      provider: ${["google-beta"]}
  defaultTargetPool:
    type: gcp:compute:TargetPool
    options:
      provider: ${["google-beta"]}
  defaultInstanceGroupManager:
    type: gcp:compute:InstanceGroupManager
    properties:
      zone: us-central1-f
      versions:
        - instanceTemplate: ${defaultInstanceTemplate.id}
          name: primary
      targetPools:
        - ${defaultTargetPool.id}
      baseInstanceName: autoscaler-sample
    options:
      provider: ${["google-beta"]}
variables:
  debian9:
    Fn::Invoke:
      Function: gcp:compute:getImage
      Arguments:
        family: debian-9
        project: debian-cloud

Autoscaler Basic

using Pulumi;
using Gcp = Pulumi.Gcp;

class MyStack : Stack
{
    public MyStack()
    {
        var debian9 = Output.Create(Gcp.Compute.GetImage.InvokeAsync(new Gcp.Compute.GetImageArgs
        {
            Family = "debian-9",
            Project = "debian-cloud",
        }));
        var foobarInstanceTemplate = new Gcp.Compute.InstanceTemplate("foobarInstanceTemplate", new Gcp.Compute.InstanceTemplateArgs
        {
            MachineType = "e2-medium",
            CanIpForward = false,
            Tags = 
            {
                "foo",
                "bar",
            },
            Disks = 
            {
                new Gcp.Compute.Inputs.InstanceTemplateDiskArgs
                {
                    SourceImage = debian9.Apply(debian9 => debian9.Id),
                },
            },
            NetworkInterfaces = 
            {
                new Gcp.Compute.Inputs.InstanceTemplateNetworkInterfaceArgs
                {
                    Network = "default",
                },
            },
            Metadata = 
            {
                { "foo", "bar" },
            },
            ServiceAccount = new Gcp.Compute.Inputs.InstanceTemplateServiceAccountArgs
            {
                Scopes = 
                {
                    "userinfo-email",
                    "compute-ro",
                    "storage-ro",
                },
            },
        });
        var foobarTargetPool = new Gcp.Compute.TargetPool("foobarTargetPool", new Gcp.Compute.TargetPoolArgs
        {
        });
        var foobarInstanceGroupManager = new Gcp.Compute.InstanceGroupManager("foobarInstanceGroupManager", new Gcp.Compute.InstanceGroupManagerArgs
        {
            Zone = "us-central1-f",
            Versions = 
            {
                new Gcp.Compute.Inputs.InstanceGroupManagerVersionArgs
                {
                    InstanceTemplate = foobarInstanceTemplate.Id,
                    Name = "primary",
                },
            },
            TargetPools = 
            {
                foobarTargetPool.Id,
            },
            BaseInstanceName = "foobar",
        });
        var foobarAutoscaler = new Gcp.Compute.Autoscaler("foobarAutoscaler", new Gcp.Compute.AutoscalerArgs
        {
            Zone = "us-central1-f",
            Target = foobarInstanceGroupManager.Id,
            AutoscalingPolicy = new Gcp.Compute.Inputs.AutoscalerAutoscalingPolicyArgs
            {
                MaxReplicas = 5,
                MinReplicas = 1,
                CooldownPeriod = 60,
                CpuUtilization = new Gcp.Compute.Inputs.AutoscalerAutoscalingPolicyCpuUtilizationArgs
                {
                    Target = 0.5,
                },
            },
        });
    }

}
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/compute"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		debian9, err := compute.LookupImage(ctx, &compute.LookupImageArgs{
			Family:  pulumi.StringRef("debian-9"),
			Project: pulumi.StringRef("debian-cloud"),
		}, nil)
		if err != nil {
			return err
		}
		foobarInstanceTemplate, err := compute.NewInstanceTemplate(ctx, "foobarInstanceTemplate", &compute.InstanceTemplateArgs{
			MachineType:  pulumi.String("e2-medium"),
			CanIpForward: pulumi.Bool(false),
			Tags: pulumi.StringArray{
				pulumi.String("foo"),
				pulumi.String("bar"),
			},
			Disks: compute.InstanceTemplateDiskArray{
				&compute.InstanceTemplateDiskArgs{
					SourceImage: pulumi.String(debian9.Id),
				},
			},
			NetworkInterfaces: compute.InstanceTemplateNetworkInterfaceArray{
				&compute.InstanceTemplateNetworkInterfaceArgs{
					Network: pulumi.String("default"),
				},
			},
			Metadata: pulumi.AnyMap{
				"foo": pulumi.Any("bar"),
			},
			ServiceAccount: &compute.InstanceTemplateServiceAccountArgs{
				Scopes: pulumi.StringArray{
					pulumi.String("userinfo-email"),
					pulumi.String("compute-ro"),
					pulumi.String("storage-ro"),
				},
			},
		})
		if err != nil {
			return err
		}
		foobarTargetPool, err := compute.NewTargetPool(ctx, "foobarTargetPool", nil)
		if err != nil {
			return err
		}
		foobarInstanceGroupManager, err := compute.NewInstanceGroupManager(ctx, "foobarInstanceGroupManager", &compute.InstanceGroupManagerArgs{
			Zone: pulumi.String("us-central1-f"),
			Versions: compute.InstanceGroupManagerVersionArray{
				&compute.InstanceGroupManagerVersionArgs{
					InstanceTemplate: foobarInstanceTemplate.ID(),
					Name:             pulumi.String("primary"),
				},
			},
			TargetPools: pulumi.StringArray{
				foobarTargetPool.ID(),
			},
			BaseInstanceName: pulumi.String("foobar"),
		})
		if err != nil {
			return err
		}
		_, err = compute.NewAutoscaler(ctx, "foobarAutoscaler", &compute.AutoscalerArgs{
			Zone:   pulumi.String("us-central1-f"),
			Target: foobarInstanceGroupManager.ID(),
			AutoscalingPolicy: &compute.AutoscalerAutoscalingPolicyArgs{
				MaxReplicas:    pulumi.Int(5),
				MinReplicas:    pulumi.Int(1),
				CooldownPeriod: pulumi.Int(60),
				CpuUtilization: &compute.AutoscalerAutoscalingPolicyCpuUtilizationArgs{
					Target: pulumi.Float64(0.5),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import java.util.*;
import java.io.*;
import java.nio.*;
import com.pulumi.*;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        final var debian9 = Output.of(ComputeFunctions.getImage(GetImageArgs.builder()
            .family("debian-9")
            .project("debian-cloud")
            .build()));

        var foobarInstanceTemplate = new InstanceTemplate("foobarInstanceTemplate", InstanceTemplateArgs.builder()        
            .machineType("e2-medium")
            .canIpForward(false)
            .tags(            
                "foo",
                "bar")
            .disks(InstanceTemplateDisk.builder()
                .sourceImage(debian9.apply(getImageResult -> getImageResult.getId()))
                .build())
            .networkInterfaces(InstanceTemplateNetworkInterface.builder()
                .network("default")
                .build())
            .metadata(Map.of("foo", "bar"))
            .serviceAccount(InstanceTemplateServiceAccount.builder()
                .scopes(                
                    "userinfo-email",
                    "compute-ro",
                    "storage-ro")
                .build())
            .build());

        var foobarTargetPool = new TargetPool("foobarTargetPool");

        var foobarInstanceGroupManager = new InstanceGroupManager("foobarInstanceGroupManager", InstanceGroupManagerArgs.builder()        
            .zone("us-central1-f")
            .versions(InstanceGroupManagerVersion.builder()
                .instanceTemplate(foobarInstanceTemplate.getId())
                .name("primary")
                .build())
            .targetPools(foobarTargetPool.getId())
            .baseInstanceName("foobar")
            .build());

        var foobarAutoscaler = new Autoscaler("foobarAutoscaler", AutoscalerArgs.builder()        
            .zone("us-central1-f")
            .target(foobarInstanceGroupManager.getId())
            .autoscalingPolicy(AutoscalerAutoscalingPolicy.builder()
                .maxReplicas(5)
                .minReplicas(1)
                .cooldownPeriod(60)
                .cpuUtilization(AutoscalerAutoscalingPolicyCpuUtilization.builder()
                    .target(0.5)
                    .build())
                .build())
            .build());

        }
}
import pulumi
import pulumi_gcp as gcp

debian9 = gcp.compute.get_image(family="debian-9",
    project="debian-cloud")
foobar_instance_template = gcp.compute.InstanceTemplate("foobarInstanceTemplate",
    machine_type="e2-medium",
    can_ip_forward=False,
    tags=[
        "foo",
        "bar",
    ],
    disks=[gcp.compute.InstanceTemplateDiskArgs(
        source_image=debian9.id,
    )],
    network_interfaces=[gcp.compute.InstanceTemplateNetworkInterfaceArgs(
        network="default",
    )],
    metadata={
        "foo": "bar",
    },
    service_account=gcp.compute.InstanceTemplateServiceAccountArgs(
        scopes=[
            "userinfo-email",
            "compute-ro",
            "storage-ro",
        ],
    ))
foobar_target_pool = gcp.compute.TargetPool("foobarTargetPool")
foobar_instance_group_manager = gcp.compute.InstanceGroupManager("foobarInstanceGroupManager",
    zone="us-central1-f",
    versions=[gcp.compute.InstanceGroupManagerVersionArgs(
        instance_template=foobar_instance_template.id,
        name="primary",
    )],
    target_pools=[foobar_target_pool.id],
    base_instance_name="foobar")
foobar_autoscaler = gcp.compute.Autoscaler("foobarAutoscaler",
    zone="us-central1-f",
    target=foobar_instance_group_manager.id,
    autoscaling_policy=gcp.compute.AutoscalerAutoscalingPolicyArgs(
        max_replicas=5,
        min_replicas=1,
        cooldown_period=60,
        cpu_utilization=gcp.compute.AutoscalerAutoscalingPolicyCpuUtilizationArgs(
            target=0.5,
        ),
    ))
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const debian9 = gcp.compute.getImage({
    family: "debian-9",
    project: "debian-cloud",
});
const foobarInstanceTemplate = new gcp.compute.InstanceTemplate("foobarInstanceTemplate", {
    machineType: "e2-medium",
    canIpForward: false,
    tags: [
        "foo",
        "bar",
    ],
    disks: [{
        sourceImage: debian9.then(debian9 => debian9.id),
    }],
    networkInterfaces: [{
        network: "default",
    }],
    metadata: {
        foo: "bar",
    },
    serviceAccount: {
        scopes: [
            "userinfo-email",
            "compute-ro",
            "storage-ro",
        ],
    },
});
const foobarTargetPool = new gcp.compute.TargetPool("foobarTargetPool", {});
const foobarInstanceGroupManager = new gcp.compute.InstanceGroupManager("foobarInstanceGroupManager", {
    zone: "us-central1-f",
    versions: [{
        instanceTemplate: foobarInstanceTemplate.id,
        name: "primary",
    }],
    targetPools: [foobarTargetPool.id],
    baseInstanceName: "foobar",
});
const foobarAutoscaler = new gcp.compute.Autoscaler("foobarAutoscaler", {
    zone: "us-central1-f",
    target: foobarInstanceGroupManager.id,
    autoscalingPolicy: {
        maxReplicas: 5,
        minReplicas: 1,
        cooldownPeriod: 60,
        cpuUtilization: {
            target: 0.5,
        },
    },
});
resources:
  foobarAutoscaler:
    type: gcp:compute:Autoscaler
    properties:
      zone: us-central1-f
      target: ${foobarInstanceGroupManager.id}
      autoscalingPolicy:
        maxReplicas: 5
        minReplicas: 1
        cooldownPeriod: 60
        cpuUtilization:
          target: 0.5
  foobarInstanceTemplate:
    type: gcp:compute:InstanceTemplate
    properties:
      machineType: e2-medium
      canIpForward: false
      tags:
        - foo
        - bar
      disks:
        - sourceImage: ${debian9.id}
      networkInterfaces:
        - network: default
      metadata:
        foo: bar
      serviceAccount:
        scopes:
          - userinfo-email
          - compute-ro
          - storage-ro
  foobarTargetPool:
    type: gcp:compute:TargetPool
  foobarInstanceGroupManager:
    type: gcp:compute:InstanceGroupManager
    properties:
      zone: us-central1-f
      versions:
        - instanceTemplate: ${foobarInstanceTemplate.id}
          name: primary
      targetPools:
        - ${foobarTargetPool.id}
      baseInstanceName: foobar
variables:
  debian9:
    Fn::Invoke:
      Function: gcp:compute:getImage
      Arguments:
        family: debian-9
        project: debian-cloud

Create a Autoscaler Resource

new Autoscaler(name: string, args: AutoscalerArgs, opts?: CustomResourceOptions);
@overload
def Autoscaler(resource_name: str,
               opts: Optional[ResourceOptions] = None,
               autoscaling_policy: Optional[AutoscalerAutoscalingPolicyArgs] = None,
               description: Optional[str] = None,
               name: Optional[str] = None,
               project: Optional[str] = None,
               target: Optional[str] = None,
               zone: Optional[str] = None)
@overload
def Autoscaler(resource_name: str,
               args: AutoscalerArgs,
               opts: Optional[ResourceOptions] = None)
func NewAutoscaler(ctx *Context, name string, args AutoscalerArgs, opts ...ResourceOption) (*Autoscaler, error)
public Autoscaler(string name, AutoscalerArgs args, CustomResourceOptions? opts = null)
public Autoscaler(String name, AutoscalerArgs args)
public Autoscaler(String name, AutoscalerArgs args, CustomResourceOptions options)
type: gcp:compute:Autoscaler
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.

name string
The unique name of the resource.
args AutoscalerArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
resource_name str
The unique name of the resource.
args AutoscalerArgs
The arguments to resource properties.
opts ResourceOptions
Bag of options to control resource's behavior.
ctx Context
Context object for the current deployment.
name string
The unique name of the resource.
args AutoscalerArgs
The arguments to resource properties.
opts ResourceOption
Bag of options to control resource's behavior.
name string
The unique name of the resource.
args AutoscalerArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
name String
The unique name of the resource.
args AutoscalerArgs
The arguments to resource properties.
options CustomResourceOptions
Bag of options to control resource's behavior.

Autoscaler Resource Properties

To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

Inputs

The Autoscaler resource accepts the following input properties:

AutoscalingPolicy AutoscalerAutoscalingPolicyArgs

The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.

Target string

Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.

Description string

An optional description of this resource.

Name string

The identifier for this object. Format specified above.

Project string

The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

Zone string

URL of the zone where the instance group resides.

AutoscalingPolicy AutoscalerAutoscalingPolicyArgs

The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.

Target string

Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.

Description string

An optional description of this resource.

Name string

The identifier for this object. Format specified above.

Project string

The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

Zone string

URL of the zone where the instance group resides.

autoscalingPolicy AutoscalerAutoscalingPolicyArgs

The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.

target String

Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.

description String

An optional description of this resource.

name String

The identifier for this object. Format specified above.

project String

The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

zone String

URL of the zone where the instance group resides.

autoscalingPolicy AutoscalerAutoscalingPolicyArgs

The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.

target string

Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.

description string

An optional description of this resource.

name string

The identifier for this object. Format specified above.

project string

The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

zone string

URL of the zone where the instance group resides.

autoscaling_policy AutoscalerAutoscalingPolicyArgs

The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.

target str

Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.

description str

An optional description of this resource.

name str

The identifier for this object. Format specified above.

project str

The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

zone str

URL of the zone where the instance group resides.

autoscalingPolicy Property Map

The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.

target String

Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.

description String

An optional description of this resource.

name String

The identifier for this object. Format specified above.

project String

The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

zone String

URL of the zone where the instance group resides.

Outputs

All input properties are implicitly available as output properties. Additionally, the Autoscaler resource produces the following output properties:

CreationTimestamp string

Creation timestamp in RFC3339 text format.

Id string

The provider-assigned unique ID for this managed resource.

SelfLink string

The URI of the created resource.

CreationTimestamp string

Creation timestamp in RFC3339 text format.

Id string

The provider-assigned unique ID for this managed resource.

SelfLink string

The URI of the created resource.

creationTimestamp String

Creation timestamp in RFC3339 text format.

id String

The provider-assigned unique ID for this managed resource.

selfLink String

The URI of the created resource.

creationTimestamp string

Creation timestamp in RFC3339 text format.

id string

The provider-assigned unique ID for this managed resource.

selfLink string

The URI of the created resource.

creation_timestamp str

Creation timestamp in RFC3339 text format.

id str

The provider-assigned unique ID for this managed resource.

self_link str

The URI of the created resource.

creationTimestamp String

Creation timestamp in RFC3339 text format.

id String

The provider-assigned unique ID for this managed resource.

selfLink String

The URI of the created resource.

Look up an Existing Autoscaler Resource

Get an existing Autoscaler resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

public static get(name: string, id: Input<ID>, state?: AutoscalerState, opts?: CustomResourceOptions): Autoscaler
@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        autoscaling_policy: Optional[AutoscalerAutoscalingPolicyArgs] = None,
        creation_timestamp: Optional[str] = None,
        description: Optional[str] = None,
        name: Optional[str] = None,
        project: Optional[str] = None,
        self_link: Optional[str] = None,
        target: Optional[str] = None,
        zone: Optional[str] = None) -> Autoscaler
func GetAutoscaler(ctx *Context, name string, id IDInput, state *AutoscalerState, opts ...ResourceOption) (*Autoscaler, error)
public static Autoscaler Get(string name, Input<string> id, AutoscalerState? state, CustomResourceOptions? opts = null)
public static Autoscaler get(String name, Output<String> id, AutoscalerState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
resource_name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
The following state arguments are supported:
AutoscalingPolicy AutoscalerAutoscalingPolicyArgs

The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.

CreationTimestamp string

Creation timestamp in RFC3339 text format.

Description string

An optional description of this resource.

Name string

The identifier for this object. Format specified above.

Project string

The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

SelfLink string

The URI of the created resource.

Target string

Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.

Zone string

URL of the zone where the instance group resides.

AutoscalingPolicy AutoscalerAutoscalingPolicyArgs

The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.

CreationTimestamp string

Creation timestamp in RFC3339 text format.

Description string

An optional description of this resource.

Name string

The identifier for this object. Format specified above.

Project string

The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

SelfLink string

The URI of the created resource.

Target string

Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.

Zone string

URL of the zone where the instance group resides.

autoscalingPolicy AutoscalerAutoscalingPolicyArgs

The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.

creationTimestamp String

Creation timestamp in RFC3339 text format.

description String

An optional description of this resource.

name String

The identifier for this object. Format specified above.

project String

The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

selfLink String

The URI of the created resource.

target String

Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.

zone String

URL of the zone where the instance group resides.

autoscalingPolicy AutoscalerAutoscalingPolicyArgs

The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.

creationTimestamp string

Creation timestamp in RFC3339 text format.

description string

An optional description of this resource.

name string

The identifier for this object. Format specified above.

project string

The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

selfLink string

The URI of the created resource.

target string

Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.

zone string

URL of the zone where the instance group resides.

autoscaling_policy AutoscalerAutoscalingPolicyArgs

The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.

creation_timestamp str

Creation timestamp in RFC3339 text format.

description str

An optional description of this resource.

name str

The identifier for this object. Format specified above.

project str

The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

self_link str

The URI of the created resource.

target str

Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.

zone str

URL of the zone where the instance group resides.

autoscalingPolicy Property Map

The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.

creationTimestamp String

Creation timestamp in RFC3339 text format.

description String

An optional description of this resource.

name String

The identifier for this object. Format specified above.

project String

The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

selfLink String

The URI of the created resource.

target String

Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.

zone String

URL of the zone where the instance group resides.

Supporting Types

AutoscalerAutoscalingPolicy

MaxReplicas int

The maximum number of instances that the autoscaler can scale up to. This is required when creating or updating an autoscaler. The maximum number of replicas should not be lower than minimal number of replicas.

MinReplicas int

The minimum number of replicas that the autoscaler can scale down to. This cannot be less than 0. If not provided, autoscaler will choose a default value depending on maximum number of instances allowed.

CooldownPeriod int

The number of seconds that the autoscaler should wait before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds. Virtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process.

CpuUtilization AutoscalerAutoscalingPolicyCpuUtilization

Defines the CPU utilization policy that allows the autoscaler to scale based on the average CPU utilization of a managed instance group. Structure is documented below.

LoadBalancingUtilization AutoscalerAutoscalingPolicyLoadBalancingUtilization

Configuration parameters of autoscaling based on a load balancer. Structure is documented below.

Metrics List<AutoscalerAutoscalingPolicyMetric>

Configuration parameters of autoscaling based on a custom metric. Structure is documented below.

Mode string

Defines operating mode for this policy. Default value is ON. Possible values are OFF, ONLY_UP, and ON.

ScaleDownControl AutoscalerAutoscalingPolicyScaleDownControl

Defines scale down controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.

ScaleInControl AutoscalerAutoscalingPolicyScaleInControl

Defines scale in controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.

ScalingSchedules List<AutoscalerAutoscalingPolicyScalingSchedule>

Scaling schedules defined for an autoscaler. Multiple schedules can be set on an autoscaler and they can overlap. Structure is documented below.

MaxReplicas int

The maximum number of instances that the autoscaler can scale up to. This is required when creating or updating an autoscaler. The maximum number of replicas should not be lower than minimal number of replicas.

MinReplicas int

The minimum number of replicas that the autoscaler can scale down to. This cannot be less than 0. If not provided, autoscaler will choose a default value depending on maximum number of instances allowed.

CooldownPeriod int

The number of seconds that the autoscaler should wait before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds. Virtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process.

CpuUtilization AutoscalerAutoscalingPolicyCpuUtilization

Defines the CPU utilization policy that allows the autoscaler to scale based on the average CPU utilization of a managed instance group. Structure is documented below.

LoadBalancingUtilization AutoscalerAutoscalingPolicyLoadBalancingUtilization

Configuration parameters of autoscaling based on a load balancer. Structure is documented below.

Metrics []AutoscalerAutoscalingPolicyMetric

Configuration parameters of autoscaling based on a custom metric. Structure is documented below.

Mode string

Defines operating mode for this policy. Default value is ON. Possible values are OFF, ONLY_UP, and ON.

ScaleDownControl AutoscalerAutoscalingPolicyScaleDownControl

Defines scale down controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.

ScaleInControl AutoscalerAutoscalingPolicyScaleInControl

Defines scale in controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.

ScalingSchedules []AutoscalerAutoscalingPolicyScalingSchedule

Scaling schedules defined for an autoscaler. Multiple schedules can be set on an autoscaler and they can overlap. Structure is documented below.

maxReplicas Integer

The maximum number of instances that the autoscaler can scale up to. This is required when creating or updating an autoscaler. The maximum number of replicas should not be lower than minimal number of replicas.

minReplicas Integer

The minimum number of replicas that the autoscaler can scale down to. This cannot be less than 0. If not provided, autoscaler will choose a default value depending on maximum number of instances allowed.

cooldownPeriod Integer

The number of seconds that the autoscaler should wait before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds. Virtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process.

cpuUtilization AutoscalerAutoscalingPolicyCpuUtilization

Defines the CPU utilization policy that allows the autoscaler to scale based on the average CPU utilization of a managed instance group. Structure is documented below.

loadBalancingUtilization AutoscalerAutoscalingPolicyLoadBalancingUtilization

Configuration parameters of autoscaling based on a load balancer. Structure is documented below.

metrics ListAutoscalingPolicyMetric>

Configuration parameters of autoscaling based on a custom metric. Structure is documented below.

mode String

Defines operating mode for this policy. Default value is ON. Possible values are OFF, ONLY_UP, and ON.

scaleDownControl AutoscalerAutoscalingPolicyScaleDownControl

Defines scale down controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.

scaleInControl AutoscalerAutoscalingPolicyScaleInControl

Defines scale in controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.

scalingSchedules ListAutoscalingPolicyScalingSchedule>

Scaling schedules defined for an autoscaler. Multiple schedules can be set on an autoscaler and they can overlap. Structure is documented below.

maxReplicas number

The maximum number of instances that the autoscaler can scale up to. This is required when creating or updating an autoscaler. The maximum number of replicas should not be lower than minimal number of replicas.

minReplicas number

The minimum number of replicas that the autoscaler can scale down to. This cannot be less than 0. If not provided, autoscaler will choose a default value depending on maximum number of instances allowed.

cooldownPeriod number

The number of seconds that the autoscaler should wait before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds. Virtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process.

cpuUtilization AutoscalerAutoscalingPolicyCpuUtilization

Defines the CPU utilization policy that allows the autoscaler to scale based on the average CPU utilization of a managed instance group. Structure is documented below.

loadBalancingUtilization AutoscalerAutoscalingPolicyLoadBalancingUtilization

Configuration parameters of autoscaling based on a load balancer. Structure is documented below.

metrics AutoscalerAutoscalingPolicyMetric[]

Configuration parameters of autoscaling based on a custom metric. Structure is documented below.

mode string

Defines operating mode for this policy. Default value is ON. Possible values are OFF, ONLY_UP, and ON.

scaleDownControl AutoscalerAutoscalingPolicyScaleDownControl

Defines scale down controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.

scaleInControl AutoscalerAutoscalingPolicyScaleInControl

Defines scale in controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.

scalingSchedules AutoscalerAutoscalingPolicyScalingSchedule[]

Scaling schedules defined for an autoscaler. Multiple schedules can be set on an autoscaler and they can overlap. Structure is documented below.

max_replicas int

The maximum number of instances that the autoscaler can scale up to. This is required when creating or updating an autoscaler. The maximum number of replicas should not be lower than minimal number of replicas.

min_replicas int

The minimum number of replicas that the autoscaler can scale down to. This cannot be less than 0. If not provided, autoscaler will choose a default value depending on maximum number of instances allowed.

cooldown_period int

The number of seconds that the autoscaler should wait before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds. Virtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process.

cpu_utilization AutoscalerAutoscalingPolicyCpuUtilization

Defines the CPU utilization policy that allows the autoscaler to scale based on the average CPU utilization of a managed instance group. Structure is documented below.

load_balancing_utilization AutoscalerAutoscalingPolicyLoadBalancingUtilization

Configuration parameters of autoscaling based on a load balancer. Structure is documented below.

metrics Sequence[AutoscalerAutoscalingPolicyMetric]

Configuration parameters of autoscaling based on a custom metric. Structure is documented below.

mode str

Defines operating mode for this policy. Default value is ON. Possible values are OFF, ONLY_UP, and ON.

scale_down_control AutoscalerAutoscalingPolicyScaleDownControl

Defines scale down controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.

scale_in_control AutoscalerAutoscalingPolicyScaleInControl

Defines scale in controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.

scaling_schedules Sequence[AutoscalerAutoscalingPolicyScalingSchedule]

Scaling schedules defined for an autoscaler. Multiple schedules can be set on an autoscaler and they can overlap. Structure is documented below.

maxReplicas Number

The maximum number of instances that the autoscaler can scale up to. This is required when creating or updating an autoscaler. The maximum number of replicas should not be lower than minimal number of replicas.

minReplicas Number

The minimum number of replicas that the autoscaler can scale down to. This cannot be less than 0. If not provided, autoscaler will choose a default value depending on maximum number of instances allowed.

cooldownPeriod Number

The number of seconds that the autoscaler should wait before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds. Virtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process.

cpuUtilization Property Map

Defines the CPU utilization policy that allows the autoscaler to scale based on the average CPU utilization of a managed instance group. Structure is documented below.

loadBalancingUtilization Property Map

Configuration parameters of autoscaling based on a load balancer. Structure is documented below.

metrics List

Configuration parameters of autoscaling based on a custom metric. Structure is documented below.

mode String

Defines operating mode for this policy. Default value is ON. Possible values are OFF, ONLY_UP, and ON.

scaleDownControl Property Map

Defines scale down controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.

scaleInControl Property Map

Defines scale in controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.

scalingSchedules List

Scaling schedules defined for an autoscaler. Multiple schedules can be set on an autoscaler and they can overlap. Structure is documented below.

AutoscalerAutoscalingPolicyCpuUtilization

Target double

Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.

PredictiveMethod string

Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are:

  • NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics.
  • OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand.
Target float64

Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.

PredictiveMethod string

Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are:

  • NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics.
  • OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand.
target Double

Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.

predictiveMethod String

Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are:

  • NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics.
  • OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand.
target number

Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.

predictiveMethod string

Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are:

  • NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics.
  • OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand.
target float

Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.

predictive_method str

Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are:

  • NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics.
  • OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand.
target Number

Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.

predictiveMethod String

Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are:

  • NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics.
  • OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand.

AutoscalerAutoscalingPolicyLoadBalancingUtilization

Target double

Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.

Target float64

Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.

target Double

Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.

target number

Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.

target float

Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.

target Number

Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.

AutoscalerAutoscalingPolicyMetric

Name string

The identifier for this object. Format specified above.

Filter string

A filter string to be used as the filter string for a Stackdriver Monitoring TimeSeries.list API call. This filter is used to select a specific TimeSeries for the purpose of autoscaling and to determine whether the metric is exporting per-instance or per-group data. You can only use the AND operator for joining selectors. You can only use direct equality comparison operator (=) without any functions for each selector. You can specify the metric in both the filter string and in the metric field. However, if specified in both places, the metric must be identical. The monitored resource type determines what kind of values are expected for the metric. If it is a gce_instance, the autoscaler expects the metric to include a separate TimeSeries for each instance in a group. In such a case, you cannot filter on resource labels. If the resource type is any other value, the autoscaler expects this metric to contain values that apply to the entire autoscaled instance group and resource label filtering can be performed to point autoscaler at the correct TimeSeries to scale upon. This is called a per-group metric for the purpose of autoscaling. If not specified, the type defaults to gce_instance. You should provide a filter that is selective enough to pick just one TimeSeries for the autoscaled group or for each of the instances (if you are using gce_instance resource type). If multiple TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value.

SingleInstanceAssignment double

If scaling is based on a per-group metric value that represents the total amount of work to be done or resource usage, set this value to an amount assigned for a single instance of the scaled group. The autoscaler will keep the number of instances proportional to the value of this metric, the metric itself should not change value due to group resizing. For example, a good metric to use with the target is pubsub.googleapis.com/subscription/num_undelivered_messages or a custom metric exporting the total number of requests coming to your instances. A bad example would be a metric exporting an average or median latency, since this value can't include a chunk assignable to a single instance, it could be better used with utilization_target instead.

Target double

Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.

Type string

Defines how target utilization value is expressed for a Stackdriver Monitoring metric. Possible values are GAUGE, DELTA_PER_SECOND, and DELTA_PER_MINUTE.

Name string

The identifier for this object. Format specified above.

Filter string

A filter string to be used as the filter string for a Stackdriver Monitoring TimeSeries.list API call. This filter is used to select a specific TimeSeries for the purpose of autoscaling and to determine whether the metric is exporting per-instance or per-group data. You can only use the AND operator for joining selectors. You can only use direct equality comparison operator (=) without any functions for each selector. You can specify the metric in both the filter string and in the metric field. However, if specified in both places, the metric must be identical. The monitored resource type determines what kind of values are expected for the metric. If it is a gce_instance, the autoscaler expects the metric to include a separate TimeSeries for each instance in a group. In such a case, you cannot filter on resource labels. If the resource type is any other value, the autoscaler expects this metric to contain values that apply to the entire autoscaled instance group and resource label filtering can be performed to point autoscaler at the correct TimeSeries to scale upon. This is called a per-group metric for the purpose of autoscaling. If not specified, the type defaults to gce_instance. You should provide a filter that is selective enough to pick just one TimeSeries for the autoscaled group or for each of the instances (if you are using gce_instance resource type). If multiple TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value.

SingleInstanceAssignment float64

If scaling is based on a per-group metric value that represents the total amount of work to be done or resource usage, set this value to an amount assigned for a single instance of the scaled group. The autoscaler will keep the number of instances proportional to the value of this metric, the metric itself should not change value due to group resizing. For example, a good metric to use with the target is pubsub.googleapis.com/subscription/num_undelivered_messages or a custom metric exporting the total number of requests coming to your instances. A bad example would be a metric exporting an average or median latency, since this value can't include a chunk assignable to a single instance, it could be better used with utilization_target instead.

Target float64

Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.

Type string

Defines how target utilization value is expressed for a Stackdriver Monitoring metric. Possible values are GAUGE, DELTA_PER_SECOND, and DELTA_PER_MINUTE.

name String

The identifier for this object. Format specified above.

filter String

A filter string to be used as the filter string for a Stackdriver Monitoring TimeSeries.list API call. This filter is used to select a specific TimeSeries for the purpose of autoscaling and to determine whether the metric is exporting per-instance or per-group data. You can only use the AND operator for joining selectors. You can only use direct equality comparison operator (=) without any functions for each selector. You can specify the metric in both the filter string and in the metric field. However, if specified in both places, the metric must be identical. The monitored resource type determines what kind of values are expected for the metric. If it is a gce_instance, the autoscaler expects the metric to include a separate TimeSeries for each instance in a group. In such a case, you cannot filter on resource labels. If the resource type is any other value, the autoscaler expects this metric to contain values that apply to the entire autoscaled instance group and resource label filtering can be performed to point autoscaler at the correct TimeSeries to scale upon. This is called a per-group metric for the purpose of autoscaling. If not specified, the type defaults to gce_instance. You should provide a filter that is selective enough to pick just one TimeSeries for the autoscaled group or for each of the instances (if you are using gce_instance resource type). If multiple TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value.

singleInstanceAssignment Double

If scaling is based on a per-group metric value that represents the total amount of work to be done or resource usage, set this value to an amount assigned for a single instance of the scaled group. The autoscaler will keep the number of instances proportional to the value of this metric, the metric itself should not change value due to group resizing. For example, a good metric to use with the target is pubsub.googleapis.com/subscription/num_undelivered_messages or a custom metric exporting the total number of requests coming to your instances. A bad example would be a metric exporting an average or median latency, since this value can't include a chunk assignable to a single instance, it could be better used with utilization_target instead.

target Double

Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.

type String

Defines how target utilization value is expressed for a Stackdriver Monitoring metric. Possible values are GAUGE, DELTA_PER_SECOND, and DELTA_PER_MINUTE.

name string

The identifier for this object. Format specified above.

filter string

A filter string to be used as the filter string for a Stackdriver Monitoring TimeSeries.list API call. This filter is used to select a specific TimeSeries for the purpose of autoscaling and to determine whether the metric is exporting per-instance or per-group data. You can only use the AND operator for joining selectors. You can only use direct equality comparison operator (=) without any functions for each selector. You can specify the metric in both the filter string and in the metric field. However, if specified in both places, the metric must be identical. The monitored resource type determines what kind of values are expected for the metric. If it is a gce_instance, the autoscaler expects the metric to include a separate TimeSeries for each instance in a group. In such a case, you cannot filter on resource labels. If the resource type is any other value, the autoscaler expects this metric to contain values that apply to the entire autoscaled instance group and resource label filtering can be performed to point autoscaler at the correct TimeSeries to scale upon. This is called a per-group metric for the purpose of autoscaling. If not specified, the type defaults to gce_instance. You should provide a filter that is selective enough to pick just one TimeSeries for the autoscaled group or for each of the instances (if you are using gce_instance resource type). If multiple TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value.

singleInstanceAssignment number

If scaling is based on a per-group metric value that represents the total amount of work to be done or resource usage, set this value to an amount assigned for a single instance of the scaled group. The autoscaler will keep the number of instances proportional to the value of this metric, the metric itself should not change value due to group resizing. For example, a good metric to use with the target is pubsub.googleapis.com/subscription/num_undelivered_messages or a custom metric exporting the total number of requests coming to your instances. A bad example would be a metric exporting an average or median latency, since this value can't include a chunk assignable to a single instance, it could be better used with utilization_target instead.

target number

Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.

type string

Defines how target utilization value is expressed for a Stackdriver Monitoring metric. Possible values are GAUGE, DELTA_PER_SECOND, and DELTA_PER_MINUTE.

name str

The identifier for this object. Format specified above.

filter str

A filter string to be used as the filter string for a Stackdriver Monitoring TimeSeries.list API call. This filter is used to select a specific TimeSeries for the purpose of autoscaling and to determine whether the metric is exporting per-instance or per-group data. You can only use the AND operator for joining selectors. You can only use direct equality comparison operator (=) without any functions for each selector. You can specify the metric in both the filter string and in the metric field. However, if specified in both places, the metric must be identical. The monitored resource type determines what kind of values are expected for the metric. If it is a gce_instance, the autoscaler expects the metric to include a separate TimeSeries for each instance in a group. In such a case, you cannot filter on resource labels. If the resource type is any other value, the autoscaler expects this metric to contain values that apply to the entire autoscaled instance group and resource label filtering can be performed to point autoscaler at the correct TimeSeries to scale upon. This is called a per-group metric for the purpose of autoscaling. If not specified, the type defaults to gce_instance. You should provide a filter that is selective enough to pick just one TimeSeries for the autoscaled group or for each of the instances (if you are using gce_instance resource type). If multiple TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value.

single_instance_assignment float

If scaling is based on a per-group metric value that represents the total amount of work to be done or resource usage, set this value to an amount assigned for a single instance of the scaled group. The autoscaler will keep the number of instances proportional to the value of this metric, the metric itself should not change value due to group resizing. For example, a good metric to use with the target is pubsub.googleapis.com/subscription/num_undelivered_messages or a custom metric exporting the total number of requests coming to your instances. A bad example would be a metric exporting an average or median latency, since this value can't include a chunk assignable to a single instance, it could be better used with utilization_target instead.

target float

Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.

type str

Defines how target utilization value is expressed for a Stackdriver Monitoring metric. Possible values are GAUGE, DELTA_PER_SECOND, and DELTA_PER_MINUTE.

name String

The identifier for this object. Format specified above.

filter String

A filter string to be used as the filter string for a Stackdriver Monitoring TimeSeries.list API call. This filter is used to select a specific TimeSeries for the purpose of autoscaling and to determine whether the metric is exporting per-instance or per-group data. You can only use the AND operator for joining selectors. You can only use direct equality comparison operator (=) without any functions for each selector. You can specify the metric in both the filter string and in the metric field. However, if specified in both places, the metric must be identical. The monitored resource type determines what kind of values are expected for the metric. If it is a gce_instance, the autoscaler expects the metric to include a separate TimeSeries for each instance in a group. In such a case, you cannot filter on resource labels. If the resource type is any other value, the autoscaler expects this metric to contain values that apply to the entire autoscaled instance group and resource label filtering can be performed to point autoscaler at the correct TimeSeries to scale upon. This is called a per-group metric for the purpose of autoscaling. If not specified, the type defaults to gce_instance. You should provide a filter that is selective enough to pick just one TimeSeries for the autoscaled group or for each of the instances (if you are using gce_instance resource type). If multiple TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value.

singleInstanceAssignment Number

If scaling is based on a per-group metric value that represents the total amount of work to be done or resource usage, set this value to an amount assigned for a single instance of the scaled group. The autoscaler will keep the number of instances proportional to the value of this metric, the metric itself should not change value due to group resizing. For example, a good metric to use with the target is pubsub.googleapis.com/subscription/num_undelivered_messages or a custom metric exporting the total number of requests coming to your instances. A bad example would be a metric exporting an average or median latency, since this value can't include a chunk assignable to a single instance, it could be better used with utilization_target instead.

target Number

Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.

type String

Defines how target utilization value is expressed for a Stackdriver Monitoring metric. Possible values are GAUGE, DELTA_PER_SECOND, and DELTA_PER_MINUTE.

AutoscalerAutoscalingPolicyScaleDownControl

MaxScaledDownReplicas AutoscalerAutoscalingPolicyScaleDownControlMaxScaledDownReplicas

A nested object resource Structure is documented below.

TimeWindowSec int

How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.

MaxScaledDownReplicas AutoscalerAutoscalingPolicyScaleDownControlMaxScaledDownReplicas

A nested object resource Structure is documented below.

TimeWindowSec int

How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.

maxScaledDownReplicas AutoscalerAutoscalingPolicyScaleDownControlMaxScaledDownReplicas

A nested object resource Structure is documented below.

timeWindowSec Integer

How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.

maxScaledDownReplicas AutoscalerAutoscalingPolicyScaleDownControlMaxScaledDownReplicas

A nested object resource Structure is documented below.

timeWindowSec number

How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.

max_scaled_down_replicas AutoscalerAutoscalingPolicyScaleDownControlMaxScaledDownReplicas

A nested object resource Structure is documented below.

time_window_sec int

How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.

maxScaledDownReplicas Property Map

A nested object resource Structure is documented below.

timeWindowSec Number

How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.

AutoscalerAutoscalingPolicyScaleDownControlMaxScaledDownReplicas

Fixed int

Specifies a fixed number of VM instances. This must be a positive integer.

Percent int

Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.

Fixed int

Specifies a fixed number of VM instances. This must be a positive integer.

Percent int

Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.

fixed Integer

Specifies a fixed number of VM instances. This must be a positive integer.

percent Integer

Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.

fixed number

Specifies a fixed number of VM instances. This must be a positive integer.

percent number

Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.

fixed int

Specifies a fixed number of VM instances. This must be a positive integer.

percent int

Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.

fixed Number

Specifies a fixed number of VM instances. This must be a positive integer.

percent Number

Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.

AutoscalerAutoscalingPolicyScaleInControl

MaxScaledInReplicas AutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas

A nested object resource Structure is documented below.

TimeWindowSec int

How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.

MaxScaledInReplicas AutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas

A nested object resource Structure is documented below.

TimeWindowSec int

How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.

maxScaledInReplicas AutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas

A nested object resource Structure is documented below.

timeWindowSec Integer

How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.

maxScaledInReplicas AutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas

A nested object resource Structure is documented below.

timeWindowSec number

How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.

max_scaled_in_replicas AutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas

A nested object resource Structure is documented below.

time_window_sec int

How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.

maxScaledInReplicas Property Map

A nested object resource Structure is documented below.

timeWindowSec Number

How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.

AutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas

Fixed int

Specifies a fixed number of VM instances. This must be a positive integer.

Percent int

Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.

Fixed int

Specifies a fixed number of VM instances. This must be a positive integer.

Percent int

Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.

fixed Integer

Specifies a fixed number of VM instances. This must be a positive integer.

percent Integer

Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.

fixed number

Specifies a fixed number of VM instances. This must be a positive integer.

percent number

Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.

fixed int

Specifies a fixed number of VM instances. This must be a positive integer.

percent int

Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.

fixed Number

Specifies a fixed number of VM instances. This must be a positive integer.

percent Number

Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.

AutoscalerAutoscalingPolicyScalingSchedule

DurationSec int

The duration of time intervals (in seconds) for which this scaling schedule will be running. The minimum allowed value is 300.

MinRequiredReplicas int

Minimum number of VM instances that autoscaler will recommend in time intervals starting according to schedule.

Name string

The identifier for this object. Format specified above.

Schedule string

The start timestamps of time intervals when this scaling schedule should provide a scaling signal. This field uses the extended cron format (with an optional year field).

Description string

An optional description of this resource.

Disabled bool

A boolean value that specifies if a scaling schedule can influence autoscaler recommendations. If set to true, then a scaling schedule has no effect.

TimeZone string

The time zone to be used when interpreting the schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database.

DurationSec int

The duration of time intervals (in seconds) for which this scaling schedule will be running. The minimum allowed value is 300.

MinRequiredReplicas int

Minimum number of VM instances that autoscaler will recommend in time intervals starting according to schedule.

Name string

The identifier for this object. Format specified above.

Schedule string

The start timestamps of time intervals when this scaling schedule should provide a scaling signal. This field uses the extended cron format (with an optional year field).

Description string

An optional description of this resource.

Disabled bool

A boolean value that specifies if a scaling schedule can influence autoscaler recommendations. If set to true, then a scaling schedule has no effect.

TimeZone string

The time zone to be used when interpreting the schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database.

durationSec Integer

The duration of time intervals (in seconds) for which this scaling schedule will be running. The minimum allowed value is 300.

minRequiredReplicas Integer

Minimum number of VM instances that autoscaler will recommend in time intervals starting according to schedule.

name String

The identifier for this object. Format specified above.

schedule String

The start timestamps of time intervals when this scaling schedule should provide a scaling signal. This field uses the extended cron format (with an optional year field).

description String

An optional description of this resource.

disabled Boolean

A boolean value that specifies if a scaling schedule can influence autoscaler recommendations. If set to true, then a scaling schedule has no effect.

timeZone String

The time zone to be used when interpreting the schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database.

durationSec number

The duration of time intervals (in seconds) for which this scaling schedule will be running. The minimum allowed value is 300.

minRequiredReplicas number

Minimum number of VM instances that autoscaler will recommend in time intervals starting according to schedule.

name string

The identifier for this object. Format specified above.

schedule string

The start timestamps of time intervals when this scaling schedule should provide a scaling signal. This field uses the extended cron format (with an optional year field).

description string

An optional description of this resource.

disabled boolean

A boolean value that specifies if a scaling schedule can influence autoscaler recommendations. If set to true, then a scaling schedule has no effect.

timeZone string

The time zone to be used when interpreting the schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database.

duration_sec int

The duration of time intervals (in seconds) for which this scaling schedule will be running. The minimum allowed value is 300.

min_required_replicas int

Minimum number of VM instances that autoscaler will recommend in time intervals starting according to schedule.

name str

The identifier for this object. Format specified above.

schedule str

The start timestamps of time intervals when this scaling schedule should provide a scaling signal. This field uses the extended cron format (with an optional year field).

description str

An optional description of this resource.

disabled bool

A boolean value that specifies if a scaling schedule can influence autoscaler recommendations. If set to true, then a scaling schedule has no effect.

time_zone str

The time zone to be used when interpreting the schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database.

durationSec Number

The duration of time intervals (in seconds) for which this scaling schedule will be running. The minimum allowed value is 300.

minRequiredReplicas Number

Minimum number of VM instances that autoscaler will recommend in time intervals starting according to schedule.

name String

The identifier for this object. Format specified above.

schedule String

The start timestamps of time intervals when this scaling schedule should provide a scaling signal. This field uses the extended cron format (with an optional year field).

description String

An optional description of this resource.

disabled Boolean

A boolean value that specifies if a scaling schedule can influence autoscaler recommendations. If set to true, then a scaling schedule has no effect.

timeZone String

The time zone to be used when interpreting the schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database.

Import

Autoscaler can be imported using any of these accepted formats

 $ pulumi import gcp:compute/autoscaler:Autoscaler default projects/{{project}}/zones/{{zone}}/autoscalers/{{name}}
 $ pulumi import gcp:compute/autoscaler:Autoscaler default {{project}}/{{zone}}/{{name}}
 $ pulumi import gcp:compute/autoscaler:Autoscaler default {{zone}}/{{name}}
 $ pulumi import gcp:compute/autoscaler:Autoscaler default {{name}}

Package Details

Repository
https://github.com/pulumi/pulumi-gcp
License
Apache-2.0
Notes

This Pulumi package is based on the google-beta Terraform Provider.