1. Packages
  2. AWS
  3. API Docs
  4. sagemaker
  5. Model
AWS v6.63.0 published on Wednesday, Dec 4, 2024 by Pulumi

aws.sagemaker.Model

Explore with Pulumi AI

aws logo
AWS v6.63.0 published on Wednesday, Dec 4, 2024 by Pulumi

    Provides a SageMaker model resource.

    Example Usage

    Basic usage:

    import * as pulumi from "@pulumi/pulumi";
    import * as aws from "@pulumi/aws";
    
    const assumeRole = aws.iam.getPolicyDocument({
        statements: [{
            actions: ["sts:AssumeRole"],
            principals: [{
                type: "Service",
                identifiers: ["sagemaker.amazonaws.com"],
            }],
        }],
    });
    const exampleRole = new aws.iam.Role("example", {assumeRolePolicy: assumeRole.then(assumeRole => assumeRole.json)});
    const test = aws.sagemaker.getPrebuiltEcrImage({
        repositoryName: "kmeans",
    });
    const example = new aws.sagemaker.Model("example", {
        name: "my-model",
        executionRoleArn: exampleRole.arn,
        primaryContainer: {
            image: test.then(test => test.registryPath),
        },
    });
    
    import pulumi
    import pulumi_aws as aws
    
    assume_role = aws.iam.get_policy_document(statements=[{
        "actions": ["sts:AssumeRole"],
        "principals": [{
            "type": "Service",
            "identifiers": ["sagemaker.amazonaws.com"],
        }],
    }])
    example_role = aws.iam.Role("example", assume_role_policy=assume_role.json)
    test = aws.sagemaker.get_prebuilt_ecr_image(repository_name="kmeans")
    example = aws.sagemaker.Model("example",
        name="my-model",
        execution_role_arn=example_role.arn,
        primary_container={
            "image": test.registry_path,
        })
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam"
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/sagemaker"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		assumeRole, err := iam.GetPolicyDocument(ctx, &iam.GetPolicyDocumentArgs{
    			Statements: []iam.GetPolicyDocumentStatement{
    				{
    					Actions: []string{
    						"sts:AssumeRole",
    					},
    					Principals: []iam.GetPolicyDocumentStatementPrincipal{
    						{
    							Type: "Service",
    							Identifiers: []string{
    								"sagemaker.amazonaws.com",
    							},
    						},
    					},
    				},
    			},
    		}, nil)
    		if err != nil {
    			return err
    		}
    		exampleRole, err := iam.NewRole(ctx, "example", &iam.RoleArgs{
    			AssumeRolePolicy: pulumi.String(assumeRole.Json),
    		})
    		if err != nil {
    			return err
    		}
    		test, err := sagemaker.GetPrebuiltEcrImage(ctx, &sagemaker.GetPrebuiltEcrImageArgs{
    			RepositoryName: "kmeans",
    		}, nil)
    		if err != nil {
    			return err
    		}
    		_, err = sagemaker.NewModel(ctx, "example", &sagemaker.ModelArgs{
    			Name:             pulumi.String("my-model"),
    			ExecutionRoleArn: exampleRole.Arn,
    			PrimaryContainer: &sagemaker.ModelPrimaryContainerArgs{
    				Image: pulumi.String(test.RegistryPath),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Aws = Pulumi.Aws;
    
    return await Deployment.RunAsync(() => 
    {
        var assumeRole = Aws.Iam.GetPolicyDocument.Invoke(new()
        {
            Statements = new[]
            {
                new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs
                {
                    Actions = new[]
                    {
                        "sts:AssumeRole",
                    },
                    Principals = new[]
                    {
                        new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs
                        {
                            Type = "Service",
                            Identifiers = new[]
                            {
                                "sagemaker.amazonaws.com",
                            },
                        },
                    },
                },
            },
        });
    
        var exampleRole = new Aws.Iam.Role("example", new()
        {
            AssumeRolePolicy = assumeRole.Apply(getPolicyDocumentResult => getPolicyDocumentResult.Json),
        });
    
        var test = Aws.Sagemaker.GetPrebuiltEcrImage.Invoke(new()
        {
            RepositoryName = "kmeans",
        });
    
        var example = new Aws.Sagemaker.Model("example", new()
        {
            Name = "my-model",
            ExecutionRoleArn = exampleRole.Arn,
            PrimaryContainer = new Aws.Sagemaker.Inputs.ModelPrimaryContainerArgs
            {
                Image = test.Apply(getPrebuiltEcrImageResult => getPrebuiltEcrImageResult.RegistryPath),
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.aws.iam.IamFunctions;
    import com.pulumi.aws.iam.inputs.GetPolicyDocumentArgs;
    import com.pulumi.aws.iam.Role;
    import com.pulumi.aws.iam.RoleArgs;
    import com.pulumi.aws.sagemaker.SagemakerFunctions;
    import com.pulumi.aws.sagemaker.inputs.GetPrebuiltEcrImageArgs;
    import com.pulumi.aws.sagemaker.Model;
    import com.pulumi.aws.sagemaker.ModelArgs;
    import com.pulumi.aws.sagemaker.inputs.ModelPrimaryContainerArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var assumeRole = IamFunctions.getPolicyDocument(GetPolicyDocumentArgs.builder()
                .statements(GetPolicyDocumentStatementArgs.builder()
                    .actions("sts:AssumeRole")
                    .principals(GetPolicyDocumentStatementPrincipalArgs.builder()
                        .type("Service")
                        .identifiers("sagemaker.amazonaws.com")
                        .build())
                    .build())
                .build());
    
            var exampleRole = new Role("exampleRole", RoleArgs.builder()
                .assumeRolePolicy(assumeRole.applyValue(getPolicyDocumentResult -> getPolicyDocumentResult.json()))
                .build());
    
            final var test = SagemakerFunctions.getPrebuiltEcrImage(GetPrebuiltEcrImageArgs.builder()
                .repositoryName("kmeans")
                .build());
    
            var example = new Model("example", ModelArgs.builder()
                .name("my-model")
                .executionRoleArn(exampleRole.arn())
                .primaryContainer(ModelPrimaryContainerArgs.builder()
                    .image(test.applyValue(getPrebuiltEcrImageResult -> getPrebuiltEcrImageResult.registryPath()))
                    .build())
                .build());
    
        }
    }
    
    resources:
      example:
        type: aws:sagemaker:Model
        properties:
          name: my-model
          executionRoleArn: ${exampleRole.arn}
          primaryContainer:
            image: ${test.registryPath}
      exampleRole:
        type: aws:iam:Role
        name: example
        properties:
          assumeRolePolicy: ${assumeRole.json}
    variables:
      assumeRole:
        fn::invoke:
          Function: aws:iam:getPolicyDocument
          Arguments:
            statements:
              - actions:
                  - sts:AssumeRole
                principals:
                  - type: Service
                    identifiers:
                      - sagemaker.amazonaws.com
      test:
        fn::invoke:
          Function: aws:sagemaker:getPrebuiltEcrImage
          Arguments:
            repositoryName: kmeans
    

    Inference Execution Config

    • mode - (Required) How containers in a multi-container are run. The following values are valid Serial and Direct.

    Create Model Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new Model(name: string, args: ModelArgs, opts?: CustomResourceOptions);
    @overload
    def Model(resource_name: str,
              args: ModelArgs,
              opts: Optional[ResourceOptions] = None)
    
    @overload
    def Model(resource_name: str,
              opts: Optional[ResourceOptions] = None,
              execution_role_arn: Optional[str] = None,
              containers: Optional[Sequence[ModelContainerArgs]] = None,
              enable_network_isolation: Optional[bool] = None,
              inference_execution_config: Optional[ModelInferenceExecutionConfigArgs] = None,
              name: Optional[str] = None,
              primary_container: Optional[ModelPrimaryContainerArgs] = None,
              tags: Optional[Mapping[str, str]] = None,
              vpc_config: Optional[ModelVpcConfigArgs] = None)
    func NewModel(ctx *Context, name string, args ModelArgs, opts ...ResourceOption) (*Model, error)
    public Model(string name, ModelArgs args, CustomResourceOptions? opts = null)
    public Model(String name, ModelArgs args)
    public Model(String name, ModelArgs args, CustomResourceOptions options)
    
    type: aws:sagemaker:Model
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args ModelArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args ModelArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args ModelArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args ModelArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args ModelArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var examplemodelResourceResourceFromSagemakermodel = new Aws.Sagemaker.Model("examplemodelResourceResourceFromSagemakermodel", new()
    {
        ExecutionRoleArn = "string",
        Containers = new[]
        {
            new Aws.Sagemaker.Inputs.ModelContainerArgs
            {
                ContainerHostname = "string",
                Environment = 
                {
                    { "string", "string" },
                },
                Image = "string",
                ImageConfig = new Aws.Sagemaker.Inputs.ModelContainerImageConfigArgs
                {
                    RepositoryAccessMode = "string",
                    RepositoryAuthConfig = new Aws.Sagemaker.Inputs.ModelContainerImageConfigRepositoryAuthConfigArgs
                    {
                        RepositoryCredentialsProviderArn = "string",
                    },
                },
                InferenceSpecificationName = "string",
                Mode = "string",
                ModelDataSource = new Aws.Sagemaker.Inputs.ModelContainerModelDataSourceArgs
                {
                    S3DataSources = new[]
                    {
                        new Aws.Sagemaker.Inputs.ModelContainerModelDataSourceS3DataSourceArgs
                        {
                            CompressionType = "string",
                            S3DataType = "string",
                            S3Uri = "string",
                            ModelAccessConfig = new Aws.Sagemaker.Inputs.ModelContainerModelDataSourceS3DataSourceModelAccessConfigArgs
                            {
                                AcceptEula = false,
                            },
                        },
                    },
                },
                ModelDataUrl = "string",
                ModelPackageName = "string",
                MultiModelConfig = new Aws.Sagemaker.Inputs.ModelContainerMultiModelConfigArgs
                {
                    ModelCacheSetting = "string",
                },
            },
        },
        EnableNetworkIsolation = false,
        InferenceExecutionConfig = new Aws.Sagemaker.Inputs.ModelInferenceExecutionConfigArgs
        {
            Mode = "string",
        },
        Name = "string",
        PrimaryContainer = new Aws.Sagemaker.Inputs.ModelPrimaryContainerArgs
        {
            ContainerHostname = "string",
            Environment = 
            {
                { "string", "string" },
            },
            Image = "string",
            ImageConfig = new Aws.Sagemaker.Inputs.ModelPrimaryContainerImageConfigArgs
            {
                RepositoryAccessMode = "string",
                RepositoryAuthConfig = new Aws.Sagemaker.Inputs.ModelPrimaryContainerImageConfigRepositoryAuthConfigArgs
                {
                    RepositoryCredentialsProviderArn = "string",
                },
            },
            InferenceSpecificationName = "string",
            Mode = "string",
            ModelDataSource = new Aws.Sagemaker.Inputs.ModelPrimaryContainerModelDataSourceArgs
            {
                S3DataSources = new[]
                {
                    new Aws.Sagemaker.Inputs.ModelPrimaryContainerModelDataSourceS3DataSourceArgs
                    {
                        CompressionType = "string",
                        S3DataType = "string",
                        S3Uri = "string",
                        ModelAccessConfig = new Aws.Sagemaker.Inputs.ModelPrimaryContainerModelDataSourceS3DataSourceModelAccessConfigArgs
                        {
                            AcceptEula = false,
                        },
                    },
                },
            },
            ModelDataUrl = "string",
            ModelPackageName = "string",
            MultiModelConfig = new Aws.Sagemaker.Inputs.ModelPrimaryContainerMultiModelConfigArgs
            {
                ModelCacheSetting = "string",
            },
        },
        Tags = 
        {
            { "string", "string" },
        },
        VpcConfig = new Aws.Sagemaker.Inputs.ModelVpcConfigArgs
        {
            SecurityGroupIds = new[]
            {
                "string",
            },
            Subnets = new[]
            {
                "string",
            },
        },
    });
    
    example, err := sagemaker.NewModel(ctx, "examplemodelResourceResourceFromSagemakermodel", &sagemaker.ModelArgs{
    	ExecutionRoleArn: pulumi.String("string"),
    	Containers: sagemaker.ModelContainerArray{
    		&sagemaker.ModelContainerArgs{
    			ContainerHostname: pulumi.String("string"),
    			Environment: pulumi.StringMap{
    				"string": pulumi.String("string"),
    			},
    			Image: pulumi.String("string"),
    			ImageConfig: &sagemaker.ModelContainerImageConfigArgs{
    				RepositoryAccessMode: pulumi.String("string"),
    				RepositoryAuthConfig: &sagemaker.ModelContainerImageConfigRepositoryAuthConfigArgs{
    					RepositoryCredentialsProviderArn: pulumi.String("string"),
    				},
    			},
    			InferenceSpecificationName: pulumi.String("string"),
    			Mode:                       pulumi.String("string"),
    			ModelDataSource: &sagemaker.ModelContainerModelDataSourceArgs{
    				S3DataSources: sagemaker.ModelContainerModelDataSourceS3DataSourceArray{
    					&sagemaker.ModelContainerModelDataSourceS3DataSourceArgs{
    						CompressionType: pulumi.String("string"),
    						S3DataType:      pulumi.String("string"),
    						S3Uri:           pulumi.String("string"),
    						ModelAccessConfig: &sagemaker.ModelContainerModelDataSourceS3DataSourceModelAccessConfigArgs{
    							AcceptEula: pulumi.Bool(false),
    						},
    					},
    				},
    			},
    			ModelDataUrl:     pulumi.String("string"),
    			ModelPackageName: pulumi.String("string"),
    			MultiModelConfig: &sagemaker.ModelContainerMultiModelConfigArgs{
    				ModelCacheSetting: pulumi.String("string"),
    			},
    		},
    	},
    	EnableNetworkIsolation: pulumi.Bool(false),
    	InferenceExecutionConfig: &sagemaker.ModelInferenceExecutionConfigArgs{
    		Mode: pulumi.String("string"),
    	},
    	Name: pulumi.String("string"),
    	PrimaryContainer: &sagemaker.ModelPrimaryContainerArgs{
    		ContainerHostname: pulumi.String("string"),
    		Environment: pulumi.StringMap{
    			"string": pulumi.String("string"),
    		},
    		Image: pulumi.String("string"),
    		ImageConfig: &sagemaker.ModelPrimaryContainerImageConfigArgs{
    			RepositoryAccessMode: pulumi.String("string"),
    			RepositoryAuthConfig: &sagemaker.ModelPrimaryContainerImageConfigRepositoryAuthConfigArgs{
    				RepositoryCredentialsProviderArn: pulumi.String("string"),
    			},
    		},
    		InferenceSpecificationName: pulumi.String("string"),
    		Mode:                       pulumi.String("string"),
    		ModelDataSource: &sagemaker.ModelPrimaryContainerModelDataSourceArgs{
    			S3DataSources: sagemaker.ModelPrimaryContainerModelDataSourceS3DataSourceArray{
    				&sagemaker.ModelPrimaryContainerModelDataSourceS3DataSourceArgs{
    					CompressionType: pulumi.String("string"),
    					S3DataType:      pulumi.String("string"),
    					S3Uri:           pulumi.String("string"),
    					ModelAccessConfig: &sagemaker.ModelPrimaryContainerModelDataSourceS3DataSourceModelAccessConfigArgs{
    						AcceptEula: pulumi.Bool(false),
    					},
    				},
    			},
    		},
    		ModelDataUrl:     pulumi.String("string"),
    		ModelPackageName: pulumi.String("string"),
    		MultiModelConfig: &sagemaker.ModelPrimaryContainerMultiModelConfigArgs{
    			ModelCacheSetting: pulumi.String("string"),
    		},
    	},
    	Tags: pulumi.StringMap{
    		"string": pulumi.String("string"),
    	},
    	VpcConfig: &sagemaker.ModelVpcConfigArgs{
    		SecurityGroupIds: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		Subnets: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    	},
    })
    
    var examplemodelResourceResourceFromSagemakermodel = new Model("examplemodelResourceResourceFromSagemakermodel", ModelArgs.builder()
        .executionRoleArn("string")
        .containers(ModelContainerArgs.builder()
            .containerHostname("string")
            .environment(Map.of("string", "string"))
            .image("string")
            .imageConfig(ModelContainerImageConfigArgs.builder()
                .repositoryAccessMode("string")
                .repositoryAuthConfig(ModelContainerImageConfigRepositoryAuthConfigArgs.builder()
                    .repositoryCredentialsProviderArn("string")
                    .build())
                .build())
            .inferenceSpecificationName("string")
            .mode("string")
            .modelDataSource(ModelContainerModelDataSourceArgs.builder()
                .s3DataSources(ModelContainerModelDataSourceS3DataSourceArgs.builder()
                    .compressionType("string")
                    .s3DataType("string")
                    .s3Uri("string")
                    .modelAccessConfig(ModelContainerModelDataSourceS3DataSourceModelAccessConfigArgs.builder()
                        .acceptEula(false)
                        .build())
                    .build())
                .build())
            .modelDataUrl("string")
            .modelPackageName("string")
            .multiModelConfig(ModelContainerMultiModelConfigArgs.builder()
                .modelCacheSetting("string")
                .build())
            .build())
        .enableNetworkIsolation(false)
        .inferenceExecutionConfig(ModelInferenceExecutionConfigArgs.builder()
            .mode("string")
            .build())
        .name("string")
        .primaryContainer(ModelPrimaryContainerArgs.builder()
            .containerHostname("string")
            .environment(Map.of("string", "string"))
            .image("string")
            .imageConfig(ModelPrimaryContainerImageConfigArgs.builder()
                .repositoryAccessMode("string")
                .repositoryAuthConfig(ModelPrimaryContainerImageConfigRepositoryAuthConfigArgs.builder()
                    .repositoryCredentialsProviderArn("string")
                    .build())
                .build())
            .inferenceSpecificationName("string")
            .mode("string")
            .modelDataSource(ModelPrimaryContainerModelDataSourceArgs.builder()
                .s3DataSources(ModelPrimaryContainerModelDataSourceS3DataSourceArgs.builder()
                    .compressionType("string")
                    .s3DataType("string")
                    .s3Uri("string")
                    .modelAccessConfig(ModelPrimaryContainerModelDataSourceS3DataSourceModelAccessConfigArgs.builder()
                        .acceptEula(false)
                        .build())
                    .build())
                .build())
            .modelDataUrl("string")
            .modelPackageName("string")
            .multiModelConfig(ModelPrimaryContainerMultiModelConfigArgs.builder()
                .modelCacheSetting("string")
                .build())
            .build())
        .tags(Map.of("string", "string"))
        .vpcConfig(ModelVpcConfigArgs.builder()
            .securityGroupIds("string")
            .subnets("string")
            .build())
        .build());
    
    examplemodel_resource_resource_from_sagemakermodel = aws.sagemaker.Model("examplemodelResourceResourceFromSagemakermodel",
        execution_role_arn="string",
        containers=[{
            "container_hostname": "string",
            "environment": {
                "string": "string",
            },
            "image": "string",
            "image_config": {
                "repository_access_mode": "string",
                "repository_auth_config": {
                    "repository_credentials_provider_arn": "string",
                },
            },
            "inference_specification_name": "string",
            "mode": "string",
            "model_data_source": {
                "s3_data_sources": [{
                    "compression_type": "string",
                    "s3_data_type": "string",
                    "s3_uri": "string",
                    "model_access_config": {
                        "accept_eula": False,
                    },
                }],
            },
            "model_data_url": "string",
            "model_package_name": "string",
            "multi_model_config": {
                "model_cache_setting": "string",
            },
        }],
        enable_network_isolation=False,
        inference_execution_config={
            "mode": "string",
        },
        name="string",
        primary_container={
            "container_hostname": "string",
            "environment": {
                "string": "string",
            },
            "image": "string",
            "image_config": {
                "repository_access_mode": "string",
                "repository_auth_config": {
                    "repository_credentials_provider_arn": "string",
                },
            },
            "inference_specification_name": "string",
            "mode": "string",
            "model_data_source": {
                "s3_data_sources": [{
                    "compression_type": "string",
                    "s3_data_type": "string",
                    "s3_uri": "string",
                    "model_access_config": {
                        "accept_eula": False,
                    },
                }],
            },
            "model_data_url": "string",
            "model_package_name": "string",
            "multi_model_config": {
                "model_cache_setting": "string",
            },
        },
        tags={
            "string": "string",
        },
        vpc_config={
            "security_group_ids": ["string"],
            "subnets": ["string"],
        })
    
    const examplemodelResourceResourceFromSagemakermodel = new aws.sagemaker.Model("examplemodelResourceResourceFromSagemakermodel", {
        executionRoleArn: "string",
        containers: [{
            containerHostname: "string",
            environment: {
                string: "string",
            },
            image: "string",
            imageConfig: {
                repositoryAccessMode: "string",
                repositoryAuthConfig: {
                    repositoryCredentialsProviderArn: "string",
                },
            },
            inferenceSpecificationName: "string",
            mode: "string",
            modelDataSource: {
                s3DataSources: [{
                    compressionType: "string",
                    s3DataType: "string",
                    s3Uri: "string",
                    modelAccessConfig: {
                        acceptEula: false,
                    },
                }],
            },
            modelDataUrl: "string",
            modelPackageName: "string",
            multiModelConfig: {
                modelCacheSetting: "string",
            },
        }],
        enableNetworkIsolation: false,
        inferenceExecutionConfig: {
            mode: "string",
        },
        name: "string",
        primaryContainer: {
            containerHostname: "string",
            environment: {
                string: "string",
            },
            image: "string",
            imageConfig: {
                repositoryAccessMode: "string",
                repositoryAuthConfig: {
                    repositoryCredentialsProviderArn: "string",
                },
            },
            inferenceSpecificationName: "string",
            mode: "string",
            modelDataSource: {
                s3DataSources: [{
                    compressionType: "string",
                    s3DataType: "string",
                    s3Uri: "string",
                    modelAccessConfig: {
                        acceptEula: false,
                    },
                }],
            },
            modelDataUrl: "string",
            modelPackageName: "string",
            multiModelConfig: {
                modelCacheSetting: "string",
            },
        },
        tags: {
            string: "string",
        },
        vpcConfig: {
            securityGroupIds: ["string"],
            subnets: ["string"],
        },
    });
    
    type: aws:sagemaker:Model
    properties:
        containers:
            - containerHostname: string
              environment:
                string: string
              image: string
              imageConfig:
                repositoryAccessMode: string
                repositoryAuthConfig:
                    repositoryCredentialsProviderArn: string
              inferenceSpecificationName: string
              mode: string
              modelDataSource:
                s3DataSources:
                    - compressionType: string
                      modelAccessConfig:
                        acceptEula: false
                      s3DataType: string
                      s3Uri: string
              modelDataUrl: string
              modelPackageName: string
              multiModelConfig:
                modelCacheSetting: string
        enableNetworkIsolation: false
        executionRoleArn: string
        inferenceExecutionConfig:
            mode: string
        name: string
        primaryContainer:
            containerHostname: string
            environment:
                string: string
            image: string
            imageConfig:
                repositoryAccessMode: string
                repositoryAuthConfig:
                    repositoryCredentialsProviderArn: string
            inferenceSpecificationName: string
            mode: string
            modelDataSource:
                s3DataSources:
                    - compressionType: string
                      modelAccessConfig:
                        acceptEula: false
                      s3DataType: string
                      s3Uri: string
            modelDataUrl: string
            modelPackageName: string
            multiModelConfig:
                modelCacheSetting: string
        tags:
            string: string
        vpcConfig:
            securityGroupIds:
                - string
            subnets:
                - string
    

    Model Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

    The Model resource accepts the following input properties:

    ExecutionRoleArn string
    A role that SageMaker can assume to access model artifacts and docker images for deployment.
    Containers List<ModelContainer>
    Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.
    EnableNetworkIsolation bool
    Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
    InferenceExecutionConfig ModelInferenceExecutionConfig
    Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
    Name string
    The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
    PrimaryContainer ModelPrimaryContainer
    The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.
    Tags Dictionary<string, string>
    A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
    VpcConfig ModelVpcConfig
    Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
    ExecutionRoleArn string
    A role that SageMaker can assume to access model artifacts and docker images for deployment.
    Containers []ModelContainerArgs
    Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.
    EnableNetworkIsolation bool
    Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
    InferenceExecutionConfig ModelInferenceExecutionConfigArgs
    Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
    Name string
    The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
    PrimaryContainer ModelPrimaryContainerArgs
    The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.
    Tags map[string]string
    A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
    VpcConfig ModelVpcConfigArgs
    Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
    executionRoleArn String
    A role that SageMaker can assume to access model artifacts and docker images for deployment.
    containers List<ModelContainer>
    Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.
    enableNetworkIsolation Boolean
    Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
    inferenceExecutionConfig ModelInferenceExecutionConfig
    Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
    name String
    The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
    primaryContainer ModelPrimaryContainer
    The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.
    tags Map<String,String>
    A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
    vpcConfig ModelVpcConfig
    Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
    executionRoleArn string
    A role that SageMaker can assume to access model artifacts and docker images for deployment.
    containers ModelContainer[]
    Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.
    enableNetworkIsolation boolean
    Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
    inferenceExecutionConfig ModelInferenceExecutionConfig
    Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
    name string
    The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
    primaryContainer ModelPrimaryContainer
    The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.
    tags {[key: string]: string}
    A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
    vpcConfig ModelVpcConfig
    Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
    execution_role_arn str
    A role that SageMaker can assume to access model artifacts and docker images for deployment.
    containers Sequence[ModelContainerArgs]
    Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.
    enable_network_isolation bool
    Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
    inference_execution_config ModelInferenceExecutionConfigArgs
    Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
    name str
    The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
    primary_container ModelPrimaryContainerArgs
    The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.
    tags Mapping[str, str]
    A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
    vpc_config ModelVpcConfigArgs
    Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
    executionRoleArn String
    A role that SageMaker can assume to access model artifacts and docker images for deployment.
    containers List<Property Map>
    Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.
    enableNetworkIsolation Boolean
    Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
    inferenceExecutionConfig Property Map
    Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
    name String
    The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
    primaryContainer Property Map
    The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.
    tags Map<String>
    A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
    vpcConfig Property Map
    Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the Model resource produces the following output properties:

    Arn string
    The Amazon Resource Name (ARN) assigned by AWS to this model.
    Id string
    The provider-assigned unique ID for this managed resource.
    TagsAll Dictionary<string, string>
    A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated: Please use tags instead.

    Arn string
    The Amazon Resource Name (ARN) assigned by AWS to this model.
    Id string
    The provider-assigned unique ID for this managed resource.
    TagsAll map[string]string
    A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated: Please use tags instead.

    arn String
    The Amazon Resource Name (ARN) assigned by AWS to this model.
    id String
    The provider-assigned unique ID for this managed resource.
    tagsAll Map<String,String>
    A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated: Please use tags instead.

    arn string
    The Amazon Resource Name (ARN) assigned by AWS to this model.
    id string
    The provider-assigned unique ID for this managed resource.
    tagsAll {[key: string]: string}
    A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated: Please use tags instead.

    arn str
    The Amazon Resource Name (ARN) assigned by AWS to this model.
    id str
    The provider-assigned unique ID for this managed resource.
    tags_all Mapping[str, str]
    A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated: Please use tags instead.

    arn String
    The Amazon Resource Name (ARN) assigned by AWS to this model.
    id String
    The provider-assigned unique ID for this managed resource.
    tagsAll Map<String>
    A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated: Please use tags instead.

    Look up Existing Model Resource

    Get an existing Model resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: ModelState, opts?: CustomResourceOptions): Model
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            arn: Optional[str] = None,
            containers: Optional[Sequence[ModelContainerArgs]] = None,
            enable_network_isolation: Optional[bool] = None,
            execution_role_arn: Optional[str] = None,
            inference_execution_config: Optional[ModelInferenceExecutionConfigArgs] = None,
            name: Optional[str] = None,
            primary_container: Optional[ModelPrimaryContainerArgs] = None,
            tags: Optional[Mapping[str, str]] = None,
            tags_all: Optional[Mapping[str, str]] = None,
            vpc_config: Optional[ModelVpcConfigArgs] = None) -> Model
    func GetModel(ctx *Context, name string, id IDInput, state *ModelState, opts ...ResourceOption) (*Model, error)
    public static Model Get(string name, Input<string> id, ModelState? state, CustomResourceOptions? opts = null)
    public static Model get(String name, Output<String> id, ModelState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    Arn string
    The Amazon Resource Name (ARN) assigned by AWS to this model.
    Containers List<ModelContainer>
    Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.
    EnableNetworkIsolation bool
    Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
    ExecutionRoleArn string
    A role that SageMaker can assume to access model artifacts and docker images for deployment.
    InferenceExecutionConfig ModelInferenceExecutionConfig
    Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
    Name string
    The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
    PrimaryContainer ModelPrimaryContainer
    The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.
    Tags Dictionary<string, string>
    A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
    TagsAll Dictionary<string, string>
    A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated: Please use tags instead.

    VpcConfig ModelVpcConfig
    Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
    Arn string
    The Amazon Resource Name (ARN) assigned by AWS to this model.
    Containers []ModelContainerArgs
    Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.
    EnableNetworkIsolation bool
    Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
    ExecutionRoleArn string
    A role that SageMaker can assume to access model artifacts and docker images for deployment.
    InferenceExecutionConfig ModelInferenceExecutionConfigArgs
    Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
    Name string
    The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
    PrimaryContainer ModelPrimaryContainerArgs
    The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.
    Tags map[string]string
    A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
    TagsAll map[string]string
    A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated: Please use tags instead.

    VpcConfig ModelVpcConfigArgs
    Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
    arn String
    The Amazon Resource Name (ARN) assigned by AWS to this model.
    containers List<ModelContainer>
    Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.
    enableNetworkIsolation Boolean
    Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
    executionRoleArn String
    A role that SageMaker can assume to access model artifacts and docker images for deployment.
    inferenceExecutionConfig ModelInferenceExecutionConfig
    Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
    name String
    The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
    primaryContainer ModelPrimaryContainer
    The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.
    tags Map<String,String>
    A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
    tagsAll Map<String,String>
    A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated: Please use tags instead.

    vpcConfig ModelVpcConfig
    Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
    arn string
    The Amazon Resource Name (ARN) assigned by AWS to this model.
    containers ModelContainer[]
    Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.
    enableNetworkIsolation boolean
    Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
    executionRoleArn string
    A role that SageMaker can assume to access model artifacts and docker images for deployment.
    inferenceExecutionConfig ModelInferenceExecutionConfig
    Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
    name string
    The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
    primaryContainer ModelPrimaryContainer
    The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.
    tags {[key: string]: string}
    A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
    tagsAll {[key: string]: string}
    A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated: Please use tags instead.

    vpcConfig ModelVpcConfig
    Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
    arn str
    The Amazon Resource Name (ARN) assigned by AWS to this model.
    containers Sequence[ModelContainerArgs]
    Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.
    enable_network_isolation bool
    Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
    execution_role_arn str
    A role that SageMaker can assume to access model artifacts and docker images for deployment.
    inference_execution_config ModelInferenceExecutionConfigArgs
    Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
    name str
    The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
    primary_container ModelPrimaryContainerArgs
    The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.
    tags Mapping[str, str]
    A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
    tags_all Mapping[str, str]
    A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated: Please use tags instead.

    vpc_config ModelVpcConfigArgs
    Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
    arn String
    The Amazon Resource Name (ARN) assigned by AWS to this model.
    containers List<Property Map>
    Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.
    enableNetworkIsolation Boolean
    Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
    executionRoleArn String
    A role that SageMaker can assume to access model artifacts and docker images for deployment.
    inferenceExecutionConfig Property Map
    Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
    name String
    The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
    primaryContainer Property Map
    The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.
    tags Map<String>
    A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
    tagsAll Map<String>
    A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

    Deprecated: Please use tags instead.

    vpcConfig Property Map
    Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.

    Supporting Types

    ModelContainer, ModelContainerArgs

    ContainerHostname string
    The DNS host name for the container.
    Environment Dictionary<string, string>
    Environment variables for the Docker container. A list of key value pairs.
    Image string
    The registry path where the inference code image is stored in Amazon ECR.
    ImageConfig ModelContainerImageConfig
    Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
    InferenceSpecificationName string
    The inference specification name in the model package version.
    Mode string
    The container hosts value SingleModel/MultiModel. The default value is SingleModel.
    ModelDataSource ModelContainerModelDataSource
    The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
    ModelDataUrl string
    The URL for the S3 location where model artifacts are stored.
    ModelPackageName string
    The Amazon Resource Name (ARN) of the model package to use to create the model.
    MultiModelConfig ModelContainerMultiModelConfig
    Specifies additional configuration for multi-model endpoints. see Multi Model Config.
    ContainerHostname string
    The DNS host name for the container.
    Environment map[string]string
    Environment variables for the Docker container. A list of key value pairs.
    Image string
    The registry path where the inference code image is stored in Amazon ECR.
    ImageConfig ModelContainerImageConfig
    Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
    InferenceSpecificationName string
    The inference specification name in the model package version.
    Mode string
    The container hosts value SingleModel/MultiModel. The default value is SingleModel.
    ModelDataSource ModelContainerModelDataSource
    The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
    ModelDataUrl string
    The URL for the S3 location where model artifacts are stored.
    ModelPackageName string
    The Amazon Resource Name (ARN) of the model package to use to create the model.
    MultiModelConfig ModelContainerMultiModelConfig
    Specifies additional configuration for multi-model endpoints. see Multi Model Config.
    containerHostname String
    The DNS host name for the container.
    environment Map<String,String>
    Environment variables for the Docker container. A list of key value pairs.
    image String
    The registry path where the inference code image is stored in Amazon ECR.
    imageConfig ModelContainerImageConfig
    Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
    inferenceSpecificationName String
    The inference specification name in the model package version.
    mode String
    The container hosts value SingleModel/MultiModel. The default value is SingleModel.
    modelDataSource ModelContainerModelDataSource
    The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
    modelDataUrl String
    The URL for the S3 location where model artifacts are stored.
    modelPackageName String
    The Amazon Resource Name (ARN) of the model package to use to create the model.
    multiModelConfig ModelContainerMultiModelConfig
    Specifies additional configuration for multi-model endpoints. see Multi Model Config.
    containerHostname string
    The DNS host name for the container.
    environment {[key: string]: string}
    Environment variables for the Docker container. A list of key value pairs.
    image string
    The registry path where the inference code image is stored in Amazon ECR.
    imageConfig ModelContainerImageConfig
    Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
    inferenceSpecificationName string
    The inference specification name in the model package version.
    mode string
    The container hosts value SingleModel/MultiModel. The default value is SingleModel.
    modelDataSource ModelContainerModelDataSource
    The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
    modelDataUrl string
    The URL for the S3 location where model artifacts are stored.
    modelPackageName string
    The Amazon Resource Name (ARN) of the model package to use to create the model.
    multiModelConfig ModelContainerMultiModelConfig
    Specifies additional configuration for multi-model endpoints. see Multi Model Config.
    container_hostname str
    The DNS host name for the container.
    environment Mapping[str, str]
    Environment variables for the Docker container. A list of key value pairs.
    image str
    The registry path where the inference code image is stored in Amazon ECR.
    image_config ModelContainerImageConfig
    Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
    inference_specification_name str
    The inference specification name in the model package version.
    mode str
    The container hosts value SingleModel/MultiModel. The default value is SingleModel.
    model_data_source ModelContainerModelDataSource
    The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
    model_data_url str
    The URL for the S3 location where model artifacts are stored.
    model_package_name str
    The Amazon Resource Name (ARN) of the model package to use to create the model.
    multi_model_config ModelContainerMultiModelConfig
    Specifies additional configuration for multi-model endpoints. see Multi Model Config.
    containerHostname String
    The DNS host name for the container.
    environment Map<String>
    Environment variables for the Docker container. A list of key value pairs.
    image String
    The registry path where the inference code image is stored in Amazon ECR.
    imageConfig Property Map
    Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
    inferenceSpecificationName String
    The inference specification name in the model package version.
    mode String
    The container hosts value SingleModel/MultiModel. The default value is SingleModel.
    modelDataSource Property Map
    The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
    modelDataUrl String
    The URL for the S3 location where model artifacts are stored.
    modelPackageName String
    The Amazon Resource Name (ARN) of the model package to use to create the model.
    multiModelConfig Property Map
    Specifies additional configuration for multi-model endpoints. see Multi Model Config.

    ModelContainerImageConfig, ModelContainerImageConfigArgs

    RepositoryAccessMode string
    Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.
    RepositoryAuthConfig ModelContainerImageConfigRepositoryAuthConfig
    Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
    RepositoryAccessMode string
    Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.
    RepositoryAuthConfig ModelContainerImageConfigRepositoryAuthConfig
    Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
    repositoryAccessMode String
    Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.
    repositoryAuthConfig ModelContainerImageConfigRepositoryAuthConfig
    Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
    repositoryAccessMode string
    Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.
    repositoryAuthConfig ModelContainerImageConfigRepositoryAuthConfig
    Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
    repository_access_mode str
    Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.
    repository_auth_config ModelContainerImageConfigRepositoryAuthConfig
    Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
    repositoryAccessMode String
    Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.
    repositoryAuthConfig Property Map
    Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.

    ModelContainerImageConfigRepositoryAuthConfig, ModelContainerImageConfigRepositoryAuthConfigArgs

    RepositoryCredentialsProviderArn string
    The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
    RepositoryCredentialsProviderArn string
    The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
    repositoryCredentialsProviderArn String
    The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
    repositoryCredentialsProviderArn string
    The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
    repository_credentials_provider_arn str
    The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
    repositoryCredentialsProviderArn String
    The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.

    ModelContainerModelDataSource, ModelContainerModelDataSourceArgs

    S3DataSources List<ModelContainerModelDataSourceS3DataSource>
    The S3 location of model data to deploy.
    S3DataSources []ModelContainerModelDataSourceS3DataSource
    The S3 location of model data to deploy.
    s3DataSources List<ModelContainerModelDataSourceS3DataSource>
    The S3 location of model data to deploy.
    s3DataSources ModelContainerModelDataSourceS3DataSource[]
    The S3 location of model data to deploy.
    s3DataSources List<Property Map>
    The S3 location of model data to deploy.

    ModelContainerModelDataSourceS3DataSource, ModelContainerModelDataSourceS3DataSourceArgs

    CompressionType string
    How the model data is prepared. Allowed values are: None and Gzip.
    S3DataType string
    The type of model data to deploy. Allowed values are: S3Object and S3Prefix.
    S3Uri string
    The S3 path of model data to deploy.
    ModelAccessConfig ModelContainerModelDataSourceS3DataSourceModelAccessConfig
    Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_config configuration block]. see Model Access Config.
    CompressionType string
    How the model data is prepared. Allowed values are: None and Gzip.
    S3DataType string
    The type of model data to deploy. Allowed values are: S3Object and S3Prefix.
    S3Uri string
    The S3 path of model data to deploy.
    ModelAccessConfig ModelContainerModelDataSourceS3DataSourceModelAccessConfig
    Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_config configuration block]. see Model Access Config.
    compressionType String
    How the model data is prepared. Allowed values are: None and Gzip.
    s3DataType String
    The type of model data to deploy. Allowed values are: S3Object and S3Prefix.
    s3Uri String
    The S3 path of model data to deploy.
    modelAccessConfig ModelContainerModelDataSourceS3DataSourceModelAccessConfig
    Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_config configuration block]. see Model Access Config.
    compressionType string
    How the model data is prepared. Allowed values are: None and Gzip.
    s3DataType string
    The type of model data to deploy. Allowed values are: S3Object and S3Prefix.
    s3Uri string
    The S3 path of model data to deploy.
    modelAccessConfig ModelContainerModelDataSourceS3DataSourceModelAccessConfig
    Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_config configuration block]. see Model Access Config.
    compression_type str
    How the model data is prepared. Allowed values are: None and Gzip.
    s3_data_type str
    The type of model data to deploy. Allowed values are: S3Object and S3Prefix.
    s3_uri str
    The S3 path of model data to deploy.
    model_access_config ModelContainerModelDataSourceS3DataSourceModelAccessConfig
    Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_config configuration block]. see Model Access Config.
    compressionType String
    How the model data is prepared. Allowed values are: None and Gzip.
    s3DataType String
    The type of model data to deploy. Allowed values are: S3Object and S3Prefix.
    s3Uri String
    The S3 path of model data to deploy.
    modelAccessConfig Property Map
    Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_config configuration block]. see Model Access Config.

    ModelContainerModelDataSourceS3DataSourceModelAccessConfig, ModelContainerModelDataSourceS3DataSourceModelAccessConfigArgs

    AcceptEula bool
    Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as true in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
    AcceptEula bool
    Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as true in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
    acceptEula Boolean
    Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as true in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
    acceptEula boolean
    Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as true in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
    accept_eula bool
    Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as true in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
    acceptEula Boolean
    Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as true in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.

    ModelContainerMultiModelConfig, ModelContainerMultiModelConfigArgs

    ModelCacheSetting string
    Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are: Enabled and Disabled.
    ModelCacheSetting string
    Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are: Enabled and Disabled.
    modelCacheSetting String
    Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are: Enabled and Disabled.
    modelCacheSetting string
    Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are: Enabled and Disabled.
    model_cache_setting str
    Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are: Enabled and Disabled.
    modelCacheSetting String
    Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are: Enabled and Disabled.

    ModelInferenceExecutionConfig, ModelInferenceExecutionConfigArgs

    Mode string
    The container hosts value SingleModel/MultiModel. The default value is SingleModel.
    Mode string
    The container hosts value SingleModel/MultiModel. The default value is SingleModel.
    mode String
    The container hosts value SingleModel/MultiModel. The default value is SingleModel.
    mode string
    The container hosts value SingleModel/MultiModel. The default value is SingleModel.
    mode str
    The container hosts value SingleModel/MultiModel. The default value is SingleModel.
    mode String
    The container hosts value SingleModel/MultiModel. The default value is SingleModel.

    ModelPrimaryContainer, ModelPrimaryContainerArgs

    ContainerHostname string
    The DNS host name for the container.
    Environment Dictionary<string, string>
    Environment variables for the Docker container. A list of key value pairs.
    Image string
    The registry path where the inference code image is stored in Amazon ECR.
    ImageConfig ModelPrimaryContainerImageConfig
    Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
    InferenceSpecificationName string
    The inference specification name in the model package version.
    Mode string
    The container hosts value SingleModel/MultiModel. The default value is SingleModel.
    ModelDataSource ModelPrimaryContainerModelDataSource
    The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
    ModelDataUrl string
    The URL for the S3 location where model artifacts are stored.
    ModelPackageName string
    The Amazon Resource Name (ARN) of the model package to use to create the model.
    MultiModelConfig ModelPrimaryContainerMultiModelConfig
    Specifies additional configuration for multi-model endpoints. see Multi Model Config.
    ContainerHostname string
    The DNS host name for the container.
    Environment map[string]string
    Environment variables for the Docker container. A list of key value pairs.
    Image string
    The registry path where the inference code image is stored in Amazon ECR.
    ImageConfig ModelPrimaryContainerImageConfig
    Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
    InferenceSpecificationName string
    The inference specification name in the model package version.
    Mode string
    The container hosts value SingleModel/MultiModel. The default value is SingleModel.
    ModelDataSource ModelPrimaryContainerModelDataSource
    The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
    ModelDataUrl string
    The URL for the S3 location where model artifacts are stored.
    ModelPackageName string
    The Amazon Resource Name (ARN) of the model package to use to create the model.
    MultiModelConfig ModelPrimaryContainerMultiModelConfig
    Specifies additional configuration for multi-model endpoints. see Multi Model Config.
    containerHostname String
    The DNS host name for the container.
    environment Map<String,String>
    Environment variables for the Docker container. A list of key value pairs.
    image String
    The registry path where the inference code image is stored in Amazon ECR.
    imageConfig ModelPrimaryContainerImageConfig
    Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
    inferenceSpecificationName String
    The inference specification name in the model package version.
    mode String
    The container hosts value SingleModel/MultiModel. The default value is SingleModel.
    modelDataSource ModelPrimaryContainerModelDataSource
    The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
    modelDataUrl String
    The URL for the S3 location where model artifacts are stored.
    modelPackageName String
    The Amazon Resource Name (ARN) of the model package to use to create the model.
    multiModelConfig ModelPrimaryContainerMultiModelConfig
    Specifies additional configuration for multi-model endpoints. see Multi Model Config.
    containerHostname string
    The DNS host name for the container.
    environment {[key: string]: string}
    Environment variables for the Docker container. A list of key value pairs.
    image string
    The registry path where the inference code image is stored in Amazon ECR.
    imageConfig ModelPrimaryContainerImageConfig
    Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
    inferenceSpecificationName string
    The inference specification name in the model package version.
    mode string
    The container hosts value SingleModel/MultiModel. The default value is SingleModel.
    modelDataSource ModelPrimaryContainerModelDataSource
    The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
    modelDataUrl string
    The URL for the S3 location where model artifacts are stored.
    modelPackageName string
    The Amazon Resource Name (ARN) of the model package to use to create the model.
    multiModelConfig ModelPrimaryContainerMultiModelConfig
    Specifies additional configuration for multi-model endpoints. see Multi Model Config.
    container_hostname str
    The DNS host name for the container.
    environment Mapping[str, str]
    Environment variables for the Docker container. A list of key value pairs.
    image str
    The registry path where the inference code image is stored in Amazon ECR.
    image_config ModelPrimaryContainerImageConfig
    Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
    inference_specification_name str
    The inference specification name in the model package version.
    mode str
    The container hosts value SingleModel/MultiModel. The default value is SingleModel.
    model_data_source ModelPrimaryContainerModelDataSource
    The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
    model_data_url str
    The URL for the S3 location where model artifacts are stored.
    model_package_name str
    The Amazon Resource Name (ARN) of the model package to use to create the model.
    multi_model_config ModelPrimaryContainerMultiModelConfig
    Specifies additional configuration for multi-model endpoints. see Multi Model Config.
    containerHostname String
    The DNS host name for the container.
    environment Map<String>
    Environment variables for the Docker container. A list of key value pairs.
    image String
    The registry path where the inference code image is stored in Amazon ECR.
    imageConfig Property Map
    Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
    inferenceSpecificationName String
    The inference specification name in the model package version.
    mode String
    The container hosts value SingleModel/MultiModel. The default value is SingleModel.
    modelDataSource Property Map
    The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
    modelDataUrl String
    The URL for the S3 location where model artifacts are stored.
    modelPackageName String
    The Amazon Resource Name (ARN) of the model package to use to create the model.
    multiModelConfig Property Map
    Specifies additional configuration for multi-model endpoints. see Multi Model Config.

    ModelPrimaryContainerImageConfig, ModelPrimaryContainerImageConfigArgs

    RepositoryAccessMode string
    Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.
    RepositoryAuthConfig ModelPrimaryContainerImageConfigRepositoryAuthConfig
    Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
    RepositoryAccessMode string
    Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.
    RepositoryAuthConfig ModelPrimaryContainerImageConfigRepositoryAuthConfig
    Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
    repositoryAccessMode String
    Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.
    repositoryAuthConfig ModelPrimaryContainerImageConfigRepositoryAuthConfig
    Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
    repositoryAccessMode string
    Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.
    repositoryAuthConfig ModelPrimaryContainerImageConfigRepositoryAuthConfig
    Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
    repository_access_mode str
    Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.
    repository_auth_config ModelPrimaryContainerImageConfigRepositoryAuthConfig
    Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
    repositoryAccessMode String
    Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.
    repositoryAuthConfig Property Map
    Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.

    ModelPrimaryContainerImageConfigRepositoryAuthConfig, ModelPrimaryContainerImageConfigRepositoryAuthConfigArgs

    RepositoryCredentialsProviderArn string
    The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
    RepositoryCredentialsProviderArn string
    The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
    repositoryCredentialsProviderArn String
    The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
    repositoryCredentialsProviderArn string
    The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
    repository_credentials_provider_arn str
    The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
    repositoryCredentialsProviderArn String
    The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.

    ModelPrimaryContainerModelDataSource, ModelPrimaryContainerModelDataSourceArgs

    s3DataSources List<Property Map>
    The S3 location of model data to deploy.

    ModelPrimaryContainerModelDataSourceS3DataSource, ModelPrimaryContainerModelDataSourceS3DataSourceArgs

    CompressionType string
    How the model data is prepared. Allowed values are: None and Gzip.
    S3DataType string
    The type of model data to deploy. Allowed values are: S3Object and S3Prefix.
    S3Uri string
    The S3 path of model data to deploy.
    ModelAccessConfig ModelPrimaryContainerModelDataSourceS3DataSourceModelAccessConfig
    Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_config configuration block]. see Model Access Config.
    CompressionType string
    How the model data is prepared. Allowed values are: None and Gzip.
    S3DataType string
    The type of model data to deploy. Allowed values are: S3Object and S3Prefix.
    S3Uri string
    The S3 path of model data to deploy.
    ModelAccessConfig ModelPrimaryContainerModelDataSourceS3DataSourceModelAccessConfig
    Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_config configuration block]. see Model Access Config.
    compressionType String
    How the model data is prepared. Allowed values are: None and Gzip.
    s3DataType String
    The type of model data to deploy. Allowed values are: S3Object and S3Prefix.
    s3Uri String
    The S3 path of model data to deploy.
    modelAccessConfig ModelPrimaryContainerModelDataSourceS3DataSourceModelAccessConfig
    Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_config configuration block]. see Model Access Config.
    compressionType string
    How the model data is prepared. Allowed values are: None and Gzip.
    s3DataType string
    The type of model data to deploy. Allowed values are: S3Object and S3Prefix.
    s3Uri string
    The S3 path of model data to deploy.
    modelAccessConfig ModelPrimaryContainerModelDataSourceS3DataSourceModelAccessConfig
    Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_config configuration block]. see Model Access Config.
    compression_type str
    How the model data is prepared. Allowed values are: None and Gzip.
    s3_data_type str
    The type of model data to deploy. Allowed values are: S3Object and S3Prefix.
    s3_uri str
    The S3 path of model data to deploy.
    model_access_config ModelPrimaryContainerModelDataSourceS3DataSourceModelAccessConfig
    Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_config configuration block]. see Model Access Config.
    compressionType String
    How the model data is prepared. Allowed values are: None and Gzip.
    s3DataType String
    The type of model data to deploy. Allowed values are: S3Object and S3Prefix.
    s3Uri String
    The S3 path of model data to deploy.
    modelAccessConfig Property Map
    Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_config configuration block]. see Model Access Config.

    ModelPrimaryContainerModelDataSourceS3DataSourceModelAccessConfig, ModelPrimaryContainerModelDataSourceS3DataSourceModelAccessConfigArgs

    AcceptEula bool
    Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as true in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
    AcceptEula bool
    Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as true in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
    acceptEula Boolean
    Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as true in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
    acceptEula boolean
    Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as true in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
    accept_eula bool
    Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as true in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
    acceptEula Boolean
    Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as true in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.

    ModelPrimaryContainerMultiModelConfig, ModelPrimaryContainerMultiModelConfigArgs

    ModelCacheSetting string
    Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are: Enabled and Disabled.
    ModelCacheSetting string
    Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are: Enabled and Disabled.
    modelCacheSetting String
    Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are: Enabled and Disabled.
    modelCacheSetting string
    Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are: Enabled and Disabled.
    model_cache_setting str
    Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are: Enabled and Disabled.
    modelCacheSetting String
    Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are: Enabled and Disabled.

    ModelVpcConfig, ModelVpcConfigArgs

    SecurityGroupIds List<string>
    Subnets List<string>
    SecurityGroupIds []string
    Subnets []string
    securityGroupIds List<String>
    subnets List<String>
    securityGroupIds string[]
    subnets string[]
    security_group_ids Sequence[str]
    subnets Sequence[str]
    securityGroupIds List<String>
    subnets List<String>

    Import

    Using pulumi import, import models using the name. For example:

    $ pulumi import aws:sagemaker/model:Model test_model model-foo
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    AWS Classic pulumi/pulumi-aws
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the aws Terraform Provider.
    aws logo
    AWS v6.63.0 published on Wednesday, Dec 4, 2024 by Pulumi