1. Packages
  2. Google Cloud (GCP) Classic
  3. API Docs
  4. vertex
  5. AiDeploymentResourcePool
Google Cloud Classic v7.20.0 published on Wednesday, Apr 24, 2024 by Pulumi

gcp.vertex.AiDeploymentResourcePool

Explore with Pulumi AI

gcp logo
Google Cloud Classic v7.20.0 published on Wednesday, Apr 24, 2024 by Pulumi

    ‘DeploymentResourcePool can be shared by multiple deployed models, whose underlying specification consists of dedicated resources.’

    To get more information about DeploymentResourcePool, see:

    Example Usage

    Vertex Ai Deployment Resource Pool

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const deploymentResourcePool = new gcp.vertex.AiDeploymentResourcePool("deployment_resource_pool", {
        region: "us-central1",
        name: "example-deployment-resource-pool",
        dedicatedResources: {
            machineSpec: {
                machineType: "n1-standard-4",
                acceleratorType: "NVIDIA_TESLA_K80",
                acceleratorCount: 1,
            },
            minReplicaCount: 1,
            maxReplicaCount: 2,
            autoscalingMetricSpecs: [{
                metricName: "aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle",
                target: 60,
            }],
        },
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    deployment_resource_pool = gcp.vertex.AiDeploymentResourcePool("deployment_resource_pool",
        region="us-central1",
        name="example-deployment-resource-pool",
        dedicated_resources=gcp.vertex.AiDeploymentResourcePoolDedicatedResourcesArgs(
            machine_spec=gcp.vertex.AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs(
                machine_type="n1-standard-4",
                accelerator_type="NVIDIA_TESLA_K80",
                accelerator_count=1,
            ),
            min_replica_count=1,
            max_replica_count=2,
            autoscaling_metric_specs=[gcp.vertex.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs(
                metric_name="aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle",
                target=60,
            )],
        ))
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/vertex"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := vertex.NewAiDeploymentResourcePool(ctx, "deployment_resource_pool", &vertex.AiDeploymentResourcePoolArgs{
    			Region: pulumi.String("us-central1"),
    			Name:   pulumi.String("example-deployment-resource-pool"),
    			DedicatedResources: &vertex.AiDeploymentResourcePoolDedicatedResourcesArgs{
    				MachineSpec: &vertex.AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs{
    					MachineType:      pulumi.String("n1-standard-4"),
    					AcceleratorType:  pulumi.String("NVIDIA_TESLA_K80"),
    					AcceleratorCount: pulumi.Int(1),
    				},
    				MinReplicaCount: pulumi.Int(1),
    				MaxReplicaCount: pulumi.Int(2),
    				AutoscalingMetricSpecs: vertex.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArray{
    					&vertex.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs{
    						MetricName: pulumi.String("aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle"),
    						Target:     pulumi.Int(60),
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var deploymentResourcePool = new Gcp.Vertex.AiDeploymentResourcePool("deployment_resource_pool", new()
        {
            Region = "us-central1",
            Name = "example-deployment-resource-pool",
            DedicatedResources = new Gcp.Vertex.Inputs.AiDeploymentResourcePoolDedicatedResourcesArgs
            {
                MachineSpec = new Gcp.Vertex.Inputs.AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs
                {
                    MachineType = "n1-standard-4",
                    AcceleratorType = "NVIDIA_TESLA_K80",
                    AcceleratorCount = 1,
                },
                MinReplicaCount = 1,
                MaxReplicaCount = 2,
                AutoscalingMetricSpecs = new[]
                {
                    new Gcp.Vertex.Inputs.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs
                    {
                        MetricName = "aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle",
                        Target = 60,
                    },
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.vertex.AiDeploymentResourcePool;
    import com.pulumi.gcp.vertex.AiDeploymentResourcePoolArgs;
    import com.pulumi.gcp.vertex.inputs.AiDeploymentResourcePoolDedicatedResourcesArgs;
    import com.pulumi.gcp.vertex.inputs.AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var deploymentResourcePool = new AiDeploymentResourcePool("deploymentResourcePool", AiDeploymentResourcePoolArgs.builder()        
                .region("us-central1")
                .name("example-deployment-resource-pool")
                .dedicatedResources(AiDeploymentResourcePoolDedicatedResourcesArgs.builder()
                    .machineSpec(AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs.builder()
                        .machineType("n1-standard-4")
                        .acceleratorType("NVIDIA_TESLA_K80")
                        .acceleratorCount(1)
                        .build())
                    .minReplicaCount(1)
                    .maxReplicaCount(2)
                    .autoscalingMetricSpecs(AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs.builder()
                        .metricName("aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle")
                        .target(60)
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      deploymentResourcePool:
        type: gcp:vertex:AiDeploymentResourcePool
        name: deployment_resource_pool
        properties:
          region: us-central1
          name: example-deployment-resource-pool
          dedicatedResources:
            machineSpec:
              machineType: n1-standard-4
              acceleratorType: NVIDIA_TESLA_K80
              acceleratorCount: 1
            minReplicaCount: 1
            maxReplicaCount: 2
            autoscalingMetricSpecs:
              - metricName: aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle
                target: 60
    

    Create AiDeploymentResourcePool Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new AiDeploymentResourcePool(name: string, args?: AiDeploymentResourcePoolArgs, opts?: CustomResourceOptions);
    @overload
    def AiDeploymentResourcePool(resource_name: str,
                                 args: Optional[AiDeploymentResourcePoolArgs] = None,
                                 opts: Optional[ResourceOptions] = None)
    
    @overload
    def AiDeploymentResourcePool(resource_name: str,
                                 opts: Optional[ResourceOptions] = None,
                                 dedicated_resources: Optional[AiDeploymentResourcePoolDedicatedResourcesArgs] = None,
                                 name: Optional[str] = None,
                                 project: Optional[str] = None,
                                 region: Optional[str] = None)
    func NewAiDeploymentResourcePool(ctx *Context, name string, args *AiDeploymentResourcePoolArgs, opts ...ResourceOption) (*AiDeploymentResourcePool, error)
    public AiDeploymentResourcePool(string name, AiDeploymentResourcePoolArgs? args = null, CustomResourceOptions? opts = null)
    public AiDeploymentResourcePool(String name, AiDeploymentResourcePoolArgs args)
    public AiDeploymentResourcePool(String name, AiDeploymentResourcePoolArgs args, CustomResourceOptions options)
    
    type: gcp:vertex:AiDeploymentResourcePool
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args AiDeploymentResourcePoolArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args AiDeploymentResourcePoolArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args AiDeploymentResourcePoolArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args AiDeploymentResourcePoolArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args AiDeploymentResourcePoolArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Example

    The following reference example uses placeholder values for all input properties.

    var aiDeploymentResourcePoolResource = new Gcp.Vertex.AiDeploymentResourcePool("aiDeploymentResourcePoolResource", new()
    {
        DedicatedResources = new Gcp.Vertex.Inputs.AiDeploymentResourcePoolDedicatedResourcesArgs
        {
            MachineSpec = new Gcp.Vertex.Inputs.AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs
            {
                AcceleratorCount = 0,
                AcceleratorType = "string",
                MachineType = "string",
            },
            MinReplicaCount = 0,
            AutoscalingMetricSpecs = new[]
            {
                new Gcp.Vertex.Inputs.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs
                {
                    MetricName = "string",
                    Target = 0,
                },
            },
            MaxReplicaCount = 0,
        },
        Name = "string",
        Project = "string",
        Region = "string",
    });
    
    example, err := vertex.NewAiDeploymentResourcePool(ctx, "aiDeploymentResourcePoolResource", &vertex.AiDeploymentResourcePoolArgs{
    	DedicatedResources: &vertex.AiDeploymentResourcePoolDedicatedResourcesArgs{
    		MachineSpec: &vertex.AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs{
    			AcceleratorCount: pulumi.Int(0),
    			AcceleratorType:  pulumi.String("string"),
    			MachineType:      pulumi.String("string"),
    		},
    		MinReplicaCount: pulumi.Int(0),
    		AutoscalingMetricSpecs: vertex.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArray{
    			&vertex.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs{
    				MetricName: pulumi.String("string"),
    				Target:     pulumi.Int(0),
    			},
    		},
    		MaxReplicaCount: pulumi.Int(0),
    	},
    	Name:    pulumi.String("string"),
    	Project: pulumi.String("string"),
    	Region:  pulumi.String("string"),
    })
    
    var aiDeploymentResourcePoolResource = new AiDeploymentResourcePool("aiDeploymentResourcePoolResource", AiDeploymentResourcePoolArgs.builder()        
        .dedicatedResources(AiDeploymentResourcePoolDedicatedResourcesArgs.builder()
            .machineSpec(AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs.builder()
                .acceleratorCount(0)
                .acceleratorType("string")
                .machineType("string")
                .build())
            .minReplicaCount(0)
            .autoscalingMetricSpecs(AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs.builder()
                .metricName("string")
                .target(0)
                .build())
            .maxReplicaCount(0)
            .build())
        .name("string")
        .project("string")
        .region("string")
        .build());
    
    ai_deployment_resource_pool_resource = gcp.vertex.AiDeploymentResourcePool("aiDeploymentResourcePoolResource",
        dedicated_resources=gcp.vertex.AiDeploymentResourcePoolDedicatedResourcesArgs(
            machine_spec=gcp.vertex.AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs(
                accelerator_count=0,
                accelerator_type="string",
                machine_type="string",
            ),
            min_replica_count=0,
            autoscaling_metric_specs=[gcp.vertex.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs(
                metric_name="string",
                target=0,
            )],
            max_replica_count=0,
        ),
        name="string",
        project="string",
        region="string")
    
    const aiDeploymentResourcePoolResource = new gcp.vertex.AiDeploymentResourcePool("aiDeploymentResourcePoolResource", {
        dedicatedResources: {
            machineSpec: {
                acceleratorCount: 0,
                acceleratorType: "string",
                machineType: "string",
            },
            minReplicaCount: 0,
            autoscalingMetricSpecs: [{
                metricName: "string",
                target: 0,
            }],
            maxReplicaCount: 0,
        },
        name: "string",
        project: "string",
        region: "string",
    });
    
    type: gcp:vertex:AiDeploymentResourcePool
    properties:
        dedicatedResources:
            autoscalingMetricSpecs:
                - metricName: string
                  target: 0
            machineSpec:
                acceleratorCount: 0
                acceleratorType: string
                machineType: string
            maxReplicaCount: 0
            minReplicaCount: 0
        name: string
        project: string
        region: string
    

    AiDeploymentResourcePool Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The AiDeploymentResourcePool resource accepts the following input properties:

    DedicatedResources AiDeploymentResourcePoolDedicatedResources
    The underlying dedicated resources that the deployment resource pool uses. Structure is documented below.
    Name string
    The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are /^a-z?$/.


    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    Region string
    The region of deployment resource pool. eg us-central1
    DedicatedResources AiDeploymentResourcePoolDedicatedResourcesArgs
    The underlying dedicated resources that the deployment resource pool uses. Structure is documented below.
    Name string
    The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are /^a-z?$/.


    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    Region string
    The region of deployment resource pool. eg us-central1
    dedicatedResources AiDeploymentResourcePoolDedicatedResources
    The underlying dedicated resources that the deployment resource pool uses. Structure is documented below.
    name String
    The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are /^a-z?$/.


    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    region String
    The region of deployment resource pool. eg us-central1
    dedicatedResources AiDeploymentResourcePoolDedicatedResources
    The underlying dedicated resources that the deployment resource pool uses. Structure is documented below.
    name string
    The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are /^a-z?$/.


    project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    region string
    The region of deployment resource pool. eg us-central1
    dedicated_resources AiDeploymentResourcePoolDedicatedResourcesArgs
    The underlying dedicated resources that the deployment resource pool uses. Structure is documented below.
    name str
    The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are /^a-z?$/.


    project str
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    region str
    The region of deployment resource pool. eg us-central1
    dedicatedResources Property Map
    The underlying dedicated resources that the deployment resource pool uses. Structure is documented below.
    name String
    The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are /^a-z?$/.


    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    region String
    The region of deployment resource pool. eg us-central1

    Outputs

    All input properties are implicitly available as output properties. Additionally, the AiDeploymentResourcePool resource produces the following output properties:

    CreateTime string
    A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
    Id string
    The provider-assigned unique ID for this managed resource.
    CreateTime string
    A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
    Id string
    The provider-assigned unique ID for this managed resource.
    createTime String
    A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
    id String
    The provider-assigned unique ID for this managed resource.
    createTime string
    A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
    id string
    The provider-assigned unique ID for this managed resource.
    create_time str
    A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
    id str
    The provider-assigned unique ID for this managed resource.
    createTime String
    A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
    id String
    The provider-assigned unique ID for this managed resource.

    Look up Existing AiDeploymentResourcePool Resource

    Get an existing AiDeploymentResourcePool resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: AiDeploymentResourcePoolState, opts?: CustomResourceOptions): AiDeploymentResourcePool
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            create_time: Optional[str] = None,
            dedicated_resources: Optional[AiDeploymentResourcePoolDedicatedResourcesArgs] = None,
            name: Optional[str] = None,
            project: Optional[str] = None,
            region: Optional[str] = None) -> AiDeploymentResourcePool
    func GetAiDeploymentResourcePool(ctx *Context, name string, id IDInput, state *AiDeploymentResourcePoolState, opts ...ResourceOption) (*AiDeploymentResourcePool, error)
    public static AiDeploymentResourcePool Get(string name, Input<string> id, AiDeploymentResourcePoolState? state, CustomResourceOptions? opts = null)
    public static AiDeploymentResourcePool get(String name, Output<String> id, AiDeploymentResourcePoolState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    CreateTime string
    A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
    DedicatedResources AiDeploymentResourcePoolDedicatedResources
    The underlying dedicated resources that the deployment resource pool uses. Structure is documented below.
    Name string
    The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are /^a-z?$/.


    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    Region string
    The region of deployment resource pool. eg us-central1
    CreateTime string
    A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
    DedicatedResources AiDeploymentResourcePoolDedicatedResourcesArgs
    The underlying dedicated resources that the deployment resource pool uses. Structure is documented below.
    Name string
    The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are /^a-z?$/.


    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    Region string
    The region of deployment resource pool. eg us-central1
    createTime String
    A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
    dedicatedResources AiDeploymentResourcePoolDedicatedResources
    The underlying dedicated resources that the deployment resource pool uses. Structure is documented below.
    name String
    The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are /^a-z?$/.


    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    region String
    The region of deployment resource pool. eg us-central1
    createTime string
    A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
    dedicatedResources AiDeploymentResourcePoolDedicatedResources
    The underlying dedicated resources that the deployment resource pool uses. Structure is documented below.
    name string
    The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are /^a-z?$/.


    project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    region string
    The region of deployment resource pool. eg us-central1
    create_time str
    A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
    dedicated_resources AiDeploymentResourcePoolDedicatedResourcesArgs
    The underlying dedicated resources that the deployment resource pool uses. Structure is documented below.
    name str
    The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are /^a-z?$/.


    project str
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    region str
    The region of deployment resource pool. eg us-central1
    createTime String
    A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
    dedicatedResources Property Map
    The underlying dedicated resources that the deployment resource pool uses. Structure is documented below.
    name String
    The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are /^a-z?$/.


    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    region String
    The region of deployment resource pool. eg us-central1

    Supporting Types

    AiDeploymentResourcePoolDedicatedResources, AiDeploymentResourcePoolDedicatedResourcesArgs

    MachineSpec AiDeploymentResourcePoolDedicatedResourcesMachineSpec
    The specification of a single machine used by the prediction Structure is documented below.
    MinReplicaCount int
    The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed.
    AutoscalingMetricSpecs List<AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpec>
    A list of the metric specifications that overrides a resource utilization metric. Structure is documented below.
    MaxReplicaCount int
    The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type).
    MachineSpec AiDeploymentResourcePoolDedicatedResourcesMachineSpec
    The specification of a single machine used by the prediction Structure is documented below.
    MinReplicaCount int
    The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed.
    AutoscalingMetricSpecs []AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpec
    A list of the metric specifications that overrides a resource utilization metric. Structure is documented below.
    MaxReplicaCount int
    The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type).
    machineSpec AiDeploymentResourcePoolDedicatedResourcesMachineSpec
    The specification of a single machine used by the prediction Structure is documented below.
    minReplicaCount Integer
    The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed.
    autoscalingMetricSpecs List<AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpec>
    A list of the metric specifications that overrides a resource utilization metric. Structure is documented below.
    maxReplicaCount Integer
    The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type).
    machineSpec AiDeploymentResourcePoolDedicatedResourcesMachineSpec
    The specification of a single machine used by the prediction Structure is documented below.
    minReplicaCount number
    The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed.
    autoscalingMetricSpecs AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpec[]
    A list of the metric specifications that overrides a resource utilization metric. Structure is documented below.
    maxReplicaCount number
    The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type).
    machine_spec AiDeploymentResourcePoolDedicatedResourcesMachineSpec
    The specification of a single machine used by the prediction Structure is documented below.
    min_replica_count int
    The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed.
    autoscaling_metric_specs Sequence[AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpec]
    A list of the metric specifications that overrides a resource utilization metric. Structure is documented below.
    max_replica_count int
    The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type).
    machineSpec Property Map
    The specification of a single machine used by the prediction Structure is documented below.
    minReplicaCount Number
    The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed.
    autoscalingMetricSpecs List<Property Map>
    A list of the metric specifications that overrides a resource utilization metric. Structure is documented below.
    maxReplicaCount Number
    The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type).

    AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpec, AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs

    MetricName string
    The resource metric name. Supported metrics: For Online Prediction: * aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle * aiplatform.googleapis.com/prediction/online/cpu/utilization
    Target int
    The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.
    MetricName string
    The resource metric name. Supported metrics: For Online Prediction: * aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle * aiplatform.googleapis.com/prediction/online/cpu/utilization
    Target int
    The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.
    metricName String
    The resource metric name. Supported metrics: For Online Prediction: * aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle * aiplatform.googleapis.com/prediction/online/cpu/utilization
    target Integer
    The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.
    metricName string
    The resource metric name. Supported metrics: For Online Prediction: * aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle * aiplatform.googleapis.com/prediction/online/cpu/utilization
    target number
    The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.
    metric_name str
    The resource metric name. Supported metrics: For Online Prediction: * aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle * aiplatform.googleapis.com/prediction/online/cpu/utilization
    target int
    The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.
    metricName String
    The resource metric name. Supported metrics: For Online Prediction: * aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle * aiplatform.googleapis.com/prediction/online/cpu/utilization
    target Number
    The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.

    AiDeploymentResourcePoolDedicatedResourcesMachineSpec, AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs

    AcceleratorCount int
    The number of accelerators to attach to the machine.
    AcceleratorType string
    The type of accelerator(s) that may be attached to the machine as per accelerator_count. See possible values here.
    MachineType string
    The type of the machine. See the list of machine types supported for prediction.
    AcceleratorCount int
    The number of accelerators to attach to the machine.
    AcceleratorType string
    The type of accelerator(s) that may be attached to the machine as per accelerator_count. See possible values here.
    MachineType string
    The type of the machine. See the list of machine types supported for prediction.
    acceleratorCount Integer
    The number of accelerators to attach to the machine.
    acceleratorType String
    The type of accelerator(s) that may be attached to the machine as per accelerator_count. See possible values here.
    machineType String
    The type of the machine. See the list of machine types supported for prediction.
    acceleratorCount number
    The number of accelerators to attach to the machine.
    acceleratorType string
    The type of accelerator(s) that may be attached to the machine as per accelerator_count. See possible values here.
    machineType string
    The type of the machine. See the list of machine types supported for prediction.
    accelerator_count int
    The number of accelerators to attach to the machine.
    accelerator_type str
    The type of accelerator(s) that may be attached to the machine as per accelerator_count. See possible values here.
    machine_type str
    The type of the machine. See the list of machine types supported for prediction.
    acceleratorCount Number
    The number of accelerators to attach to the machine.
    acceleratorType String
    The type of accelerator(s) that may be attached to the machine as per accelerator_count. See possible values here.
    machineType String
    The type of the machine. See the list of machine types supported for prediction.

    Import

    DeploymentResourcePool can be imported using any of these accepted formats:

    • projects/{{project}}/locations/{{region}}/deploymentResourcePools/{{name}}

    • {{project}}/{{region}}/{{name}}

    • {{region}}/{{name}}

    • {{name}}

    When using the pulumi import command, DeploymentResourcePool can be imported using one of the formats above. For example:

    $ pulumi import gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool default projects/{{project}}/locations/{{region}}/deploymentResourcePools/{{name}}
    
    $ pulumi import gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool default {{project}}/{{region}}/{{name}}
    
    $ pulumi import gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool default {{region}}/{{name}}
    
    $ pulumi import gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool default {{name}}
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    Google Cloud (GCP) Classic pulumi/pulumi-gcp
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the google-beta Terraform Provider.
    gcp logo
    Google Cloud Classic v7.20.0 published on Wednesday, Apr 24, 2024 by Pulumi