aws logo
AWS Classic v5.41.0, May 15 23

aws.sagemaker.Model

Explore with Pulumi AI

Provides a SageMaker model resource.

Inference Execution Config

  • mode - (Required) How containers in a multi-container are run. The following values are valid Serial and Direct.

Example Usage

Basic usage

using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;

return await Deployment.RunAsync(() => 
{
    var assumeRole = Aws.Iam.GetPolicyDocument.Invoke(new()
    {
        Statements = new[]
        {
            new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs
            {
                Actions = new[]
                {
                    "sts:AssumeRole",
                },
                Principals = new[]
                {
                    new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs
                    {
                        Type = "Service",
                        Identifiers = new[]
                        {
                            "sagemaker.amazonaws.com",
                        },
                    },
                },
            },
        },
    });

    var exampleRole = new Aws.Iam.Role("exampleRole", new()
    {
        AssumeRolePolicy = assumeRole.Apply(getPolicyDocumentResult => getPolicyDocumentResult.Json),
    });

    var test = Aws.Sagemaker.GetPrebuiltEcrImage.Invoke(new()
    {
        RepositoryName = "kmeans",
    });

    var exampleModel = new Aws.Sagemaker.Model("exampleModel", new()
    {
        ExecutionRoleArn = exampleRole.Arn,
        PrimaryContainer = new Aws.Sagemaker.Inputs.ModelPrimaryContainerArgs
        {
            Image = test.Apply(getPrebuiltEcrImageResult => getPrebuiltEcrImageResult.RegistryPath),
        },
    });

});
package main

import (
	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/iam"
	"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/sagemaker"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		assumeRole, err := iam.GetPolicyDocument(ctx, &iam.GetPolicyDocumentArgs{
			Statements: []iam.GetPolicyDocumentStatement{
				{
					Actions: []string{
						"sts:AssumeRole",
					},
					Principals: []iam.GetPolicyDocumentStatementPrincipal{
						{
							Type: "Service",
							Identifiers: []string{
								"sagemaker.amazonaws.com",
							},
						},
					},
				},
			},
		}, nil)
		if err != nil {
			return err
		}
		exampleRole, err := iam.NewRole(ctx, "exampleRole", &iam.RoleArgs{
			AssumeRolePolicy: *pulumi.String(assumeRole.Json),
		})
		if err != nil {
			return err
		}
		test, err := sagemaker.GetPrebuiltEcrImage(ctx, &sagemaker.GetPrebuiltEcrImageArgs{
			RepositoryName: "kmeans",
		}, nil)
		if err != nil {
			return err
		}
		_, err = sagemaker.NewModel(ctx, "exampleModel", &sagemaker.ModelArgs{
			ExecutionRoleArn: exampleRole.Arn,
			PrimaryContainer: &sagemaker.ModelPrimaryContainerArgs{
				Image: *pulumi.String(test.RegistryPath),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.iam.IamFunctions;
import com.pulumi.aws.iam.inputs.GetPolicyDocumentArgs;
import com.pulumi.aws.iam.Role;
import com.pulumi.aws.iam.RoleArgs;
import com.pulumi.aws.sagemaker.SagemakerFunctions;
import com.pulumi.aws.sagemaker.inputs.GetPrebuiltEcrImageArgs;
import com.pulumi.aws.sagemaker.Model;
import com.pulumi.aws.sagemaker.ModelArgs;
import com.pulumi.aws.sagemaker.inputs.ModelPrimaryContainerArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        final var assumeRole = IamFunctions.getPolicyDocument(GetPolicyDocumentArgs.builder()
            .statements(GetPolicyDocumentStatementArgs.builder()
                .actions("sts:AssumeRole")
                .principals(GetPolicyDocumentStatementPrincipalArgs.builder()
                    .type("Service")
                    .identifiers("sagemaker.amazonaws.com")
                    .build())
                .build())
            .build());

        var exampleRole = new Role("exampleRole", RoleArgs.builder()        
            .assumeRolePolicy(assumeRole.applyValue(getPolicyDocumentResult -> getPolicyDocumentResult.json()))
            .build());

        final var test = SagemakerFunctions.getPrebuiltEcrImage(GetPrebuiltEcrImageArgs.builder()
            .repositoryName("kmeans")
            .build());

        var exampleModel = new Model("exampleModel", ModelArgs.builder()        
            .executionRoleArn(exampleRole.arn())
            .primaryContainer(ModelPrimaryContainerArgs.builder()
                .image(test.applyValue(getPrebuiltEcrImageResult -> getPrebuiltEcrImageResult.registryPath()))
                .build())
            .build());

    }
}
import pulumi
import pulumi_aws as aws

assume_role = aws.iam.get_policy_document(statements=[aws.iam.GetPolicyDocumentStatementArgs(
    actions=["sts:AssumeRole"],
    principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
        type="Service",
        identifiers=["sagemaker.amazonaws.com"],
    )],
)])
example_role = aws.iam.Role("exampleRole", assume_role_policy=assume_role.json)
test = aws.sagemaker.get_prebuilt_ecr_image(repository_name="kmeans")
example_model = aws.sagemaker.Model("exampleModel",
    execution_role_arn=example_role.arn,
    primary_container=aws.sagemaker.ModelPrimaryContainerArgs(
        image=test.registry_path,
    ))
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

const assumeRole = aws.iam.getPolicyDocument({
    statements: [{
        actions: ["sts:AssumeRole"],
        principals: [{
            type: "Service",
            identifiers: ["sagemaker.amazonaws.com"],
        }],
    }],
});
const exampleRole = new aws.iam.Role("exampleRole", {assumeRolePolicy: assumeRole.then(assumeRole => assumeRole.json)});
const test = aws.sagemaker.getPrebuiltEcrImage({
    repositoryName: "kmeans",
});
const exampleModel = new aws.sagemaker.Model("exampleModel", {
    executionRoleArn: exampleRole.arn,
    primaryContainer: {
        image: test.then(test => test.registryPath),
    },
});
resources:
  exampleModel:
    type: aws:sagemaker:Model
    properties:
      executionRoleArn: ${exampleRole.arn}
      primaryContainer:
        image: ${test.registryPath}
  exampleRole:
    type: aws:iam:Role
    properties:
      assumeRolePolicy: ${assumeRole.json}
variables:
  assumeRole:
    fn::invoke:
      Function: aws:iam:getPolicyDocument
      Arguments:
        statements:
          - actions:
              - sts:AssumeRole
            principals:
              - type: Service
                identifiers:
                  - sagemaker.amazonaws.com
  test:
    fn::invoke:
      Function: aws:sagemaker:getPrebuiltEcrImage
      Arguments:
        repositoryName: kmeans

Create Model Resource

new Model(name: string, args: ModelArgs, opts?: CustomResourceOptions);
@overload
def Model(resource_name: str,
          opts: Optional[ResourceOptions] = None,
          containers: Optional[Sequence[ModelContainerArgs]] = None,
          enable_network_isolation: Optional[bool] = None,
          execution_role_arn: Optional[str] = None,
          inference_execution_config: Optional[ModelInferenceExecutionConfigArgs] = None,
          name: Optional[str] = None,
          primary_container: Optional[ModelPrimaryContainerArgs] = None,
          tags: Optional[Mapping[str, str]] = None,
          vpc_config: Optional[ModelVpcConfigArgs] = None)
@overload
def Model(resource_name: str,
          args: ModelArgs,
          opts: Optional[ResourceOptions] = None)
func NewModel(ctx *Context, name string, args ModelArgs, opts ...ResourceOption) (*Model, error)
public Model(string name, ModelArgs args, CustomResourceOptions? opts = null)
public Model(String name, ModelArgs args)
public Model(String name, ModelArgs args, CustomResourceOptions options)
type: aws:sagemaker:Model
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.

name string
The unique name of the resource.
args ModelArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
resource_name str
The unique name of the resource.
args ModelArgs
The arguments to resource properties.
opts ResourceOptions
Bag of options to control resource's behavior.
ctx Context
Context object for the current deployment.
name string
The unique name of the resource.
args ModelArgs
The arguments to resource properties.
opts ResourceOption
Bag of options to control resource's behavior.
name string
The unique name of the resource.
args ModelArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
name String
The unique name of the resource.
args ModelArgs
The arguments to resource properties.
options CustomResourceOptions
Bag of options to control resource's behavior.

Model Resource Properties

To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

Inputs

The Model resource accepts the following input properties:

ExecutionRoleArn string

A role that SageMaker can assume to access model artifacts and docker images for deployment.

Containers List<Pulumi.Aws.Sagemaker.Inputs.ModelContainerArgs>

Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.

EnableNetworkIsolation bool

Isolates the model container. No inbound or outbound network calls can be made to or from the model container.

InferenceExecutionConfig Pulumi.Aws.Sagemaker.Inputs.ModelInferenceExecutionConfigArgs

Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.

Name string

The name of the model (must be unique). If omitted, this provider will assign a random, unique name.

PrimaryContainer Pulumi.Aws.Sagemaker.Inputs.ModelPrimaryContainerArgs

The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.

Tags Dictionary<string, string>

A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

VpcConfig Pulumi.Aws.Sagemaker.Inputs.ModelVpcConfigArgs

Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.

ExecutionRoleArn string

A role that SageMaker can assume to access model artifacts and docker images for deployment.

Containers []ModelContainerArgs

Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.

EnableNetworkIsolation bool

Isolates the model container. No inbound or outbound network calls can be made to or from the model container.

InferenceExecutionConfig ModelInferenceExecutionConfigArgs

Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.

Name string

The name of the model (must be unique). If omitted, this provider will assign a random, unique name.

PrimaryContainer ModelPrimaryContainerArgs

The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.

Tags map[string]string

A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

VpcConfig ModelVpcConfigArgs

Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.

executionRoleArn String

A role that SageMaker can assume to access model artifacts and docker images for deployment.

containers List<ModelContainerArgs>

Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.

enableNetworkIsolation Boolean

Isolates the model container. No inbound or outbound network calls can be made to or from the model container.

inferenceExecutionConfig ModelInferenceExecutionConfigArgs

Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.

name String

The name of the model (must be unique). If omitted, this provider will assign a random, unique name.

primaryContainer ModelPrimaryContainerArgs

The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.

tags Map<String,String>

A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

vpcConfig ModelVpcConfigArgs

Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.

executionRoleArn string

A role that SageMaker can assume to access model artifacts and docker images for deployment.

containers ModelContainerArgs[]

Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.

enableNetworkIsolation boolean

Isolates the model container. No inbound or outbound network calls can be made to or from the model container.

inferenceExecutionConfig ModelInferenceExecutionConfigArgs

Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.

name string

The name of the model (must be unique). If omitted, this provider will assign a random, unique name.

primaryContainer ModelPrimaryContainerArgs

The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.

tags {[key: string]: string}

A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

vpcConfig ModelVpcConfigArgs

Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.

execution_role_arn str

A role that SageMaker can assume to access model artifacts and docker images for deployment.

containers Sequence[ModelContainerArgs]

Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.

enable_network_isolation bool

Isolates the model container. No inbound or outbound network calls can be made to or from the model container.

inference_execution_config ModelInferenceExecutionConfigArgs

Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.

name str

The name of the model (must be unique). If omitted, this provider will assign a random, unique name.

primary_container ModelPrimaryContainerArgs

The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.

tags Mapping[str, str]

A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

vpc_config ModelVpcConfigArgs

Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.

executionRoleArn String

A role that SageMaker can assume to access model artifacts and docker images for deployment.

containers List<Property Map>

Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.

enableNetworkIsolation Boolean

Isolates the model container. No inbound or outbound network calls can be made to or from the model container.

inferenceExecutionConfig Property Map

Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.

name String

The name of the model (must be unique). If omitted, this provider will assign a random, unique name.

primaryContainer Property Map

The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.

tags Map<String>

A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

vpcConfig Property Map

Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.

Outputs

All input properties are implicitly available as output properties. Additionally, the Model resource produces the following output properties:

Arn string

The Amazon Resource Name (ARN) assigned by AWS to this model.

Id string

The provider-assigned unique ID for this managed resource.

TagsAll Dictionary<string, string>

A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

Arn string

The Amazon Resource Name (ARN) assigned by AWS to this model.

Id string

The provider-assigned unique ID for this managed resource.

TagsAll map[string]string

A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

arn String

The Amazon Resource Name (ARN) assigned by AWS to this model.

id String

The provider-assigned unique ID for this managed resource.

tagsAll Map<String,String>

A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

arn string

The Amazon Resource Name (ARN) assigned by AWS to this model.

id string

The provider-assigned unique ID for this managed resource.

tagsAll {[key: string]: string}

A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

arn str

The Amazon Resource Name (ARN) assigned by AWS to this model.

id str

The provider-assigned unique ID for this managed resource.

tags_all Mapping[str, str]

A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

arn String

The Amazon Resource Name (ARN) assigned by AWS to this model.

id String

The provider-assigned unique ID for this managed resource.

tagsAll Map<String>

A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

Look up Existing Model Resource

Get an existing Model resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

public static get(name: string, id: Input<ID>, state?: ModelState, opts?: CustomResourceOptions): Model
@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        arn: Optional[str] = None,
        containers: Optional[Sequence[ModelContainerArgs]] = None,
        enable_network_isolation: Optional[bool] = None,
        execution_role_arn: Optional[str] = None,
        inference_execution_config: Optional[ModelInferenceExecutionConfigArgs] = None,
        name: Optional[str] = None,
        primary_container: Optional[ModelPrimaryContainerArgs] = None,
        tags: Optional[Mapping[str, str]] = None,
        tags_all: Optional[Mapping[str, str]] = None,
        vpc_config: Optional[ModelVpcConfigArgs] = None) -> Model
func GetModel(ctx *Context, name string, id IDInput, state *ModelState, opts ...ResourceOption) (*Model, error)
public static Model Get(string name, Input<string> id, ModelState? state, CustomResourceOptions? opts = null)
public static Model get(String name, Output<String> id, ModelState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
resource_name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
The following state arguments are supported:
Arn string

The Amazon Resource Name (ARN) assigned by AWS to this model.

Containers List<Pulumi.Aws.Sagemaker.Inputs.ModelContainerArgs>

Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.

EnableNetworkIsolation bool

Isolates the model container. No inbound or outbound network calls can be made to or from the model container.

ExecutionRoleArn string

A role that SageMaker can assume to access model artifacts and docker images for deployment.

InferenceExecutionConfig Pulumi.Aws.Sagemaker.Inputs.ModelInferenceExecutionConfigArgs

Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.

Name string

The name of the model (must be unique). If omitted, this provider will assign a random, unique name.

PrimaryContainer Pulumi.Aws.Sagemaker.Inputs.ModelPrimaryContainerArgs

The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.

Tags Dictionary<string, string>

A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

TagsAll Dictionary<string, string>

A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

VpcConfig Pulumi.Aws.Sagemaker.Inputs.ModelVpcConfigArgs

Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.

Arn string

The Amazon Resource Name (ARN) assigned by AWS to this model.

Containers []ModelContainerArgs

Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.

EnableNetworkIsolation bool

Isolates the model container. No inbound or outbound network calls can be made to or from the model container.

ExecutionRoleArn string

A role that SageMaker can assume to access model artifacts and docker images for deployment.

InferenceExecutionConfig ModelInferenceExecutionConfigArgs

Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.

Name string

The name of the model (must be unique). If omitted, this provider will assign a random, unique name.

PrimaryContainer ModelPrimaryContainerArgs

The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.

Tags map[string]string

A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

TagsAll map[string]string

A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

VpcConfig ModelVpcConfigArgs

Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.

arn String

The Amazon Resource Name (ARN) assigned by AWS to this model.

containers List<ModelContainerArgs>

Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.

enableNetworkIsolation Boolean

Isolates the model container. No inbound or outbound network calls can be made to or from the model container.

executionRoleArn String

A role that SageMaker can assume to access model artifacts and docker images for deployment.

inferenceExecutionConfig ModelInferenceExecutionConfigArgs

Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.

name String

The name of the model (must be unique). If omitted, this provider will assign a random, unique name.

primaryContainer ModelPrimaryContainerArgs

The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.

tags Map<String,String>

A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

tagsAll Map<String,String>

A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

vpcConfig ModelVpcConfigArgs

Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.

arn string

The Amazon Resource Name (ARN) assigned by AWS to this model.

containers ModelContainerArgs[]

Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.

enableNetworkIsolation boolean

Isolates the model container. No inbound or outbound network calls can be made to or from the model container.

executionRoleArn string

A role that SageMaker can assume to access model artifacts and docker images for deployment.

inferenceExecutionConfig ModelInferenceExecutionConfigArgs

Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.

name string

The name of the model (must be unique). If omitted, this provider will assign a random, unique name.

primaryContainer ModelPrimaryContainerArgs

The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.

tags {[key: string]: string}

A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

tagsAll {[key: string]: string}

A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

vpcConfig ModelVpcConfigArgs

Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.

arn str

The Amazon Resource Name (ARN) assigned by AWS to this model.

containers Sequence[ModelContainerArgs]

Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.

enable_network_isolation bool

Isolates the model container. No inbound or outbound network calls can be made to or from the model container.

execution_role_arn str

A role that SageMaker can assume to access model artifacts and docker images for deployment.

inference_execution_config ModelInferenceExecutionConfigArgs

Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.

name str

The name of the model (must be unique). If omitted, this provider will assign a random, unique name.

primary_container ModelPrimaryContainerArgs

The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.

tags Mapping[str, str]

A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

tags_all Mapping[str, str]

A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

vpc_config ModelVpcConfigArgs

Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.

arn String

The Amazon Resource Name (ARN) assigned by AWS to this model.

containers List<Property Map>

Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.

enableNetworkIsolation Boolean

Isolates the model container. No inbound or outbound network calls can be made to or from the model container.

executionRoleArn String

A role that SageMaker can assume to access model artifacts and docker images for deployment.

inferenceExecutionConfig Property Map

Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.

name String

The name of the model (must be unique). If omitted, this provider will assign a random, unique name.

primaryContainer Property Map

The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.

tags Map<String>

A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

tagsAll Map<String>

A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

vpcConfig Property Map

Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.

Supporting Types

ModelContainer

Image string

The registry path where the inference code image is stored in Amazon ECR.

ContainerHostname string

The DNS host name for the container.

Environment Dictionary<string, string>

Environment variables for the Docker container. A list of key value pairs.

ImageConfig Pulumi.Aws.Sagemaker.Inputs.ModelContainerImageConfig

Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.

Mode string

The container hosts value SingleModel/MultiModel. The default value is SingleModel.

ModelDataUrl string

The URL for the S3 location where model artifacts are stored.

Image string

The registry path where the inference code image is stored in Amazon ECR.

ContainerHostname string

The DNS host name for the container.

Environment map[string]string

Environment variables for the Docker container. A list of key value pairs.

ImageConfig ModelContainerImageConfig

Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.

Mode string

The container hosts value SingleModel/MultiModel. The default value is SingleModel.

ModelDataUrl string

The URL for the S3 location where model artifacts are stored.

image String

The registry path where the inference code image is stored in Amazon ECR.

containerHostname String

The DNS host name for the container.

environment Map<String,String>

Environment variables for the Docker container. A list of key value pairs.

imageConfig ModelContainerImageConfig

Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.

mode String

The container hosts value SingleModel/MultiModel. The default value is SingleModel.

modelDataUrl String

The URL for the S3 location where model artifacts are stored.

image string

The registry path where the inference code image is stored in Amazon ECR.

containerHostname string

The DNS host name for the container.

environment {[key: string]: string}

Environment variables for the Docker container. A list of key value pairs.

imageConfig ModelContainerImageConfig

Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.

mode string

The container hosts value SingleModel/MultiModel. The default value is SingleModel.

modelDataUrl string

The URL for the S3 location where model artifacts are stored.

image str

The registry path where the inference code image is stored in Amazon ECR.

container_hostname str

The DNS host name for the container.

environment Mapping[str, str]

Environment variables for the Docker container. A list of key value pairs.

image_config ModelContainerImageConfig

Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.

mode str

The container hosts value SingleModel/MultiModel. The default value is SingleModel.

model_data_url str

The URL for the S3 location where model artifacts are stored.

image String

The registry path where the inference code image is stored in Amazon ECR.

containerHostname String

The DNS host name for the container.

environment Map<String>

Environment variables for the Docker container. A list of key value pairs.

imageConfig Property Map

Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.

mode String

The container hosts value SingleModel/MultiModel. The default value is SingleModel.

modelDataUrl String

The URL for the S3 location where model artifacts are stored.

ModelContainerImageConfig

RepositoryAccessMode string

Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.

RepositoryAuthConfig Pulumi.Aws.Sagemaker.Inputs.ModelContainerImageConfigRepositoryAuthConfig

Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.

RepositoryAccessMode string

Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.

RepositoryAuthConfig ModelContainerImageConfigRepositoryAuthConfig

Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.

repositoryAccessMode String

Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.

repositoryAuthConfig ModelContainerImageConfigRepositoryAuthConfig

Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.

repositoryAccessMode string

Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.

repositoryAuthConfig ModelContainerImageConfigRepositoryAuthConfig

Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.

repository_access_mode str

Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.

repository_auth_config ModelContainerImageConfigRepositoryAuthConfig

Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.

repositoryAccessMode String

Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.

repositoryAuthConfig Property Map

Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.

ModelContainerImageConfigRepositoryAuthConfig

RepositoryCredentialsProviderArn string

The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.

RepositoryCredentialsProviderArn string

The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.

repositoryCredentialsProviderArn String

The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.

repositoryCredentialsProviderArn string

The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.

repository_credentials_provider_arn str

The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.

repositoryCredentialsProviderArn String

The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.

ModelInferenceExecutionConfig

Mode string

The container hosts value SingleModel/MultiModel. The default value is SingleModel.

Mode string

The container hosts value SingleModel/MultiModel. The default value is SingleModel.

mode String

The container hosts value SingleModel/MultiModel. The default value is SingleModel.

mode string

The container hosts value SingleModel/MultiModel. The default value is SingleModel.

mode str

The container hosts value SingleModel/MultiModel. The default value is SingleModel.

mode String

The container hosts value SingleModel/MultiModel. The default value is SingleModel.

ModelPrimaryContainer

Image string

The registry path where the inference code image is stored in Amazon ECR.

ContainerHostname string

The DNS host name for the container.

Environment Dictionary<string, string>

Environment variables for the Docker container. A list of key value pairs.

ImageConfig Pulumi.Aws.Sagemaker.Inputs.ModelPrimaryContainerImageConfig

Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.

Mode string

The container hosts value SingleModel/MultiModel. The default value is SingleModel.

ModelDataUrl string

The URL for the S3 location where model artifacts are stored.

Image string

The registry path where the inference code image is stored in Amazon ECR.

ContainerHostname string

The DNS host name for the container.

Environment map[string]string

Environment variables for the Docker container. A list of key value pairs.

ImageConfig ModelPrimaryContainerImageConfig

Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.

Mode string

The container hosts value SingleModel/MultiModel. The default value is SingleModel.

ModelDataUrl string

The URL for the S3 location where model artifacts are stored.

image String

The registry path where the inference code image is stored in Amazon ECR.

containerHostname String

The DNS host name for the container.

environment Map<String,String>

Environment variables for the Docker container. A list of key value pairs.

imageConfig ModelPrimaryContainerImageConfig

Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.

mode String

The container hosts value SingleModel/MultiModel. The default value is SingleModel.

modelDataUrl String

The URL for the S3 location where model artifacts are stored.

image string

The registry path where the inference code image is stored in Amazon ECR.

containerHostname string

The DNS host name for the container.

environment {[key: string]: string}

Environment variables for the Docker container. A list of key value pairs.

imageConfig ModelPrimaryContainerImageConfig

Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.

mode string

The container hosts value SingleModel/MultiModel. The default value is SingleModel.

modelDataUrl string

The URL for the S3 location where model artifacts are stored.

image str

The registry path where the inference code image is stored in Amazon ECR.

container_hostname str

The DNS host name for the container.

environment Mapping[str, str]

Environment variables for the Docker container. A list of key value pairs.

image_config ModelPrimaryContainerImageConfig

Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.

mode str

The container hosts value SingleModel/MultiModel. The default value is SingleModel.

model_data_url str

The URL for the S3 location where model artifacts are stored.

image String

The registry path where the inference code image is stored in Amazon ECR.

containerHostname String

The DNS host name for the container.

environment Map<String>

Environment variables for the Docker container. A list of key value pairs.

imageConfig Property Map

Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.

mode String

The container hosts value SingleModel/MultiModel. The default value is SingleModel.

modelDataUrl String

The URL for the S3 location where model artifacts are stored.

ModelPrimaryContainerImageConfig

RepositoryAccessMode string

Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.

RepositoryAuthConfig Pulumi.Aws.Sagemaker.Inputs.ModelPrimaryContainerImageConfigRepositoryAuthConfig

Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.

RepositoryAccessMode string

Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.

RepositoryAuthConfig ModelPrimaryContainerImageConfigRepositoryAuthConfig

Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.

repositoryAccessMode String

Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.

repositoryAuthConfig ModelPrimaryContainerImageConfigRepositoryAuthConfig

Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.

repositoryAccessMode string

Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.

repositoryAuthConfig ModelPrimaryContainerImageConfigRepositoryAuthConfig

Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.

repository_access_mode str

Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.

repository_auth_config ModelPrimaryContainerImageConfigRepositoryAuthConfig

Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.

repositoryAccessMode String

Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.

repositoryAuthConfig Property Map

Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.

ModelPrimaryContainerImageConfigRepositoryAuthConfig

RepositoryCredentialsProviderArn string

The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.

RepositoryCredentialsProviderArn string

The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.

repositoryCredentialsProviderArn String

The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.

repositoryCredentialsProviderArn string

The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.

repository_credentials_provider_arn str

The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.

repositoryCredentialsProviderArn String

The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.

ModelVpcConfig

SecurityGroupIds List<string>
Subnets List<string>
SecurityGroupIds []string
Subnets []string
securityGroupIds List<String>
subnets List<String>
securityGroupIds string[]
subnets string[]
security_group_ids Sequence[str]
subnets Sequence[str]
securityGroupIds List<String>
subnets List<String>

Import

Models can be imported using the name, e.g.,

 $ pulumi import aws:sagemaker/model:Model test_model model-foo

Package Details

Repository
AWS Classic pulumi/pulumi-aws
License
Apache-2.0
Notes

This Pulumi package is based on the aws Terraform Provider.