Provides a SageMaker AI monitoring schedule resource.
Example Usage
Basic usage:
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const test = new aws.sagemaker.MonitoringSchedule("test", {
name: "my-monitoring-schedule",
monitoringScheduleConfig: {
monitoringJobDefinitionName: testAwsSagemakerDataQualityJobDefinition.name,
monitoringType: "DataQuality",
},
});
import pulumi
import pulumi_aws as aws
test = aws.sagemaker.MonitoringSchedule("test",
name="my-monitoring-schedule",
monitoring_schedule_config={
"monitoring_job_definition_name": test_aws_sagemaker_data_quality_job_definition["name"],
"monitoring_type": "DataQuality",
})
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v7/go/aws/sagemaker"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := sagemaker.NewMonitoringSchedule(ctx, "test", &sagemaker.MonitoringScheduleArgs{
Name: pulumi.String("my-monitoring-schedule"),
MonitoringScheduleConfig: &sagemaker.MonitoringScheduleMonitoringScheduleConfigArgs{
MonitoringJobDefinitionName: pulumi.Any(testAwsSagemakerDataQualityJobDefinition.Name),
MonitoringType: pulumi.String("DataQuality"),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var test = new Aws.Sagemaker.MonitoringSchedule("test", new()
{
Name = "my-monitoring-schedule",
MonitoringScheduleConfig = new Aws.Sagemaker.Inputs.MonitoringScheduleMonitoringScheduleConfigArgs
{
MonitoringJobDefinitionName = testAwsSagemakerDataQualityJobDefinition.Name,
MonitoringType = "DataQuality",
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.sagemaker.MonitoringSchedule;
import com.pulumi.aws.sagemaker.MonitoringScheduleArgs;
import com.pulumi.aws.sagemaker.inputs.MonitoringScheduleMonitoringScheduleConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var test = new MonitoringSchedule("test", MonitoringScheduleArgs.builder()
.name("my-monitoring-schedule")
.monitoringScheduleConfig(MonitoringScheduleMonitoringScheduleConfigArgs.builder()
.monitoringJobDefinitionName(testAwsSagemakerDataQualityJobDefinition.name())
.monitoringType("DataQuality")
.build())
.build());
}
}
resources:
test:
type: aws:sagemaker:MonitoringSchedule
properties:
name: my-monitoring-schedule
monitoringScheduleConfig:
monitoringJobDefinitionName: ${testAwsSagemakerDataQualityJobDefinition.name}
monitoringType: DataQuality
Create MonitoringSchedule Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new MonitoringSchedule(name: string, args: MonitoringScheduleArgs, opts?: CustomResourceOptions);@overload
def MonitoringSchedule(resource_name: str,
args: MonitoringScheduleArgs,
opts: Optional[ResourceOptions] = None)
@overload
def MonitoringSchedule(resource_name: str,
opts: Optional[ResourceOptions] = None,
monitoring_schedule_config: Optional[MonitoringScheduleMonitoringScheduleConfigArgs] = None,
name: Optional[str] = None,
region: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None)func NewMonitoringSchedule(ctx *Context, name string, args MonitoringScheduleArgs, opts ...ResourceOption) (*MonitoringSchedule, error)public MonitoringSchedule(string name, MonitoringScheduleArgs args, CustomResourceOptions? opts = null)
public MonitoringSchedule(String name, MonitoringScheduleArgs args)
public MonitoringSchedule(String name, MonitoringScheduleArgs args, CustomResourceOptions options)
type: aws:sagemaker:MonitoringSchedule
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args MonitoringScheduleArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args MonitoringScheduleArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args MonitoringScheduleArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args MonitoringScheduleArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args MonitoringScheduleArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var monitoringScheduleResource = new Aws.Sagemaker.MonitoringSchedule("monitoringScheduleResource", new()
{
MonitoringScheduleConfig = new Aws.Sagemaker.Inputs.MonitoringScheduleMonitoringScheduleConfigArgs
{
MonitoringType = "string",
MonitoringJobDefinition = new Aws.Sagemaker.Inputs.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionArgs
{
MonitoringAppSpecification = new Aws.Sagemaker.Inputs.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringAppSpecificationArgs
{
ImageUri = "string",
ContainerArguments = new[]
{
"string",
},
ContainerEntrypoints = new[]
{
"string",
},
PostAnalyticsProcessorSourceUri = "string",
RecordPreprocessorSourceUri = "string",
},
MonitoringInputs = new Aws.Sagemaker.Inputs.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringInputsArgs
{
BatchTransformInput = new Aws.Sagemaker.Inputs.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringInputsBatchTransformInputArgs
{
DataCapturedDestinationS3Uri = "string",
DatasetFormat = new Aws.Sagemaker.Inputs.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringInputsBatchTransformInputDatasetFormatArgs
{
Csv = new Aws.Sagemaker.Inputs.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringInputsBatchTransformInputDatasetFormatCsvArgs
{
Header = false,
},
Json = new Aws.Sagemaker.Inputs.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringInputsBatchTransformInputDatasetFormatJsonArgs
{
Line = false,
},
},
LocalPath = "string",
EndTimeOffset = "string",
ExcludeFeaturesAttribute = "string",
FeaturesAttribute = "string",
InferenceAttribute = "string",
ProbabilityAttribute = "string",
ProbabilityThresholdAttribute = 0,
S3DataDistributionType = "string",
S3InputMode = "string",
StartTimeOffset = "string",
},
EndpointInput = new Aws.Sagemaker.Inputs.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringInputsEndpointInputArgs
{
EndpointName = "string",
LocalPath = "string",
EndTimeOffset = "string",
ExcludeFeaturesAttribute = "string",
FeaturesAttribute = "string",
InferenceAttribute = "string",
ProbabilityAttribute = "string",
ProbabilityThresholdAttribute = 0,
S3DataDistributionType = "string",
S3InputMode = "string",
StartTimeOffset = "string",
},
},
MonitoringOutputConfig = new Aws.Sagemaker.Inputs.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringOutputConfigArgs
{
MonitoringOutputs = new Aws.Sagemaker.Inputs.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringOutputConfigMonitoringOutputsArgs
{
S3Output = new Aws.Sagemaker.Inputs.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringOutputConfigMonitoringOutputsS3OutputArgs
{
LocalPath = "string",
S3Uri = "string",
S3UploadMode = "string",
},
},
KmsKeyId = "string",
},
MonitoringResources = new Aws.Sagemaker.Inputs.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringResourcesArgs
{
ClusterConfig = new Aws.Sagemaker.Inputs.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringResourcesClusterConfigArgs
{
InstanceCount = 0,
InstanceType = "string",
VolumeSizeInGb = 0,
VolumeKmsKeyId = "string",
},
},
RoleArn = "string",
Baseline = new Aws.Sagemaker.Inputs.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionBaselineArgs
{
BaseliningJobName = "string",
ConstraintsResource = new Aws.Sagemaker.Inputs.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionBaselineConstraintsResourceArgs
{
S3Uri = "string",
},
StatisticsResource = new Aws.Sagemaker.Inputs.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionBaselineStatisticsResourceArgs
{
S3Uri = "string",
},
},
Environment =
{
{ "string", "string" },
},
NetworkConfig = new Aws.Sagemaker.Inputs.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionNetworkConfigArgs
{
EnableInterContainerTrafficEncryption = false,
EnableNetworkIsolation = false,
VpcConfig = new Aws.Sagemaker.Inputs.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionNetworkConfigVpcConfigArgs
{
SecurityGroupIds = new[]
{
"string",
},
Subnets = new[]
{
"string",
},
},
},
StoppingConditions = new[]
{
new Aws.Sagemaker.Inputs.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionStoppingConditionArgs
{
MaxRuntimeInSeconds = 0,
},
},
},
MonitoringJobDefinitionName = "string",
ScheduleConfig = new Aws.Sagemaker.Inputs.MonitoringScheduleMonitoringScheduleConfigScheduleConfigArgs
{
ScheduleExpression = "string",
},
},
Name = "string",
Region = "string",
Tags =
{
{ "string", "string" },
},
});
example, err := sagemaker.NewMonitoringSchedule(ctx, "monitoringScheduleResource", &sagemaker.MonitoringScheduleArgs{
MonitoringScheduleConfig: &sagemaker.MonitoringScheduleMonitoringScheduleConfigArgs{
MonitoringType: pulumi.String("string"),
MonitoringJobDefinition: &sagemaker.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionArgs{
MonitoringAppSpecification: &sagemaker.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringAppSpecificationArgs{
ImageUri: pulumi.String("string"),
ContainerArguments: pulumi.StringArray{
pulumi.String("string"),
},
ContainerEntrypoints: pulumi.StringArray{
pulumi.String("string"),
},
PostAnalyticsProcessorSourceUri: pulumi.String("string"),
RecordPreprocessorSourceUri: pulumi.String("string"),
},
MonitoringInputs: &sagemaker.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringInputsArgs{
BatchTransformInput: &sagemaker.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringInputsBatchTransformInputArgs{
DataCapturedDestinationS3Uri: pulumi.String("string"),
DatasetFormat: &sagemaker.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringInputsBatchTransformInputDatasetFormatArgs{
Csv: &sagemaker.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringInputsBatchTransformInputDatasetFormatCsvArgs{
Header: pulumi.Bool(false),
},
Json: &sagemaker.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringInputsBatchTransformInputDatasetFormatJsonArgs{
Line: pulumi.Bool(false),
},
},
LocalPath: pulumi.String("string"),
EndTimeOffset: pulumi.String("string"),
ExcludeFeaturesAttribute: pulumi.String("string"),
FeaturesAttribute: pulumi.String("string"),
InferenceAttribute: pulumi.String("string"),
ProbabilityAttribute: pulumi.String("string"),
ProbabilityThresholdAttribute: pulumi.Float64(0),
S3DataDistributionType: pulumi.String("string"),
S3InputMode: pulumi.String("string"),
StartTimeOffset: pulumi.String("string"),
},
EndpointInput: &sagemaker.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringInputsEndpointInputArgs{
EndpointName: pulumi.String("string"),
LocalPath: pulumi.String("string"),
EndTimeOffset: pulumi.String("string"),
ExcludeFeaturesAttribute: pulumi.String("string"),
FeaturesAttribute: pulumi.String("string"),
InferenceAttribute: pulumi.String("string"),
ProbabilityAttribute: pulumi.String("string"),
ProbabilityThresholdAttribute: pulumi.Float64(0),
S3DataDistributionType: pulumi.String("string"),
S3InputMode: pulumi.String("string"),
StartTimeOffset: pulumi.String("string"),
},
},
MonitoringOutputConfig: &sagemaker.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringOutputConfigArgs{
MonitoringOutputs: &sagemaker.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringOutputConfigMonitoringOutputsArgs{
S3Output: sagemaker.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringOutputConfigMonitoringOutputsS3OutputArgs{
LocalPath: pulumi.String("string"),
S3Uri: pulumi.String("string"),
S3UploadMode: pulumi.String("string"),
},
},
KmsKeyId: pulumi.String("string"),
},
MonitoringResources: &sagemaker.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringResourcesArgs{
ClusterConfig: &sagemaker.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringResourcesClusterConfigArgs{
InstanceCount: pulumi.Int(0),
InstanceType: pulumi.String("string"),
VolumeSizeInGb: pulumi.Int(0),
VolumeKmsKeyId: pulumi.String("string"),
},
},
RoleArn: pulumi.String("string"),
Baseline: &sagemaker.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionBaselineArgs{
BaseliningJobName: pulumi.String("string"),
ConstraintsResource: &sagemaker.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionBaselineConstraintsResourceArgs{
S3Uri: pulumi.String("string"),
},
StatisticsResource: &sagemaker.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionBaselineStatisticsResourceArgs{
S3Uri: pulumi.String("string"),
},
},
Environment: pulumi.StringMap{
"string": pulumi.String("string"),
},
NetworkConfig: &sagemaker.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionNetworkConfigArgs{
EnableInterContainerTrafficEncryption: pulumi.Bool(false),
EnableNetworkIsolation: pulumi.Bool(false),
VpcConfig: &sagemaker.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionNetworkConfigVpcConfigArgs{
SecurityGroupIds: pulumi.StringArray{
pulumi.String("string"),
},
Subnets: pulumi.StringArray{
pulumi.String("string"),
},
},
},
StoppingConditions: sagemaker.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionStoppingConditionArray{
&sagemaker.MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionStoppingConditionArgs{
MaxRuntimeInSeconds: pulumi.Int(0),
},
},
},
MonitoringJobDefinitionName: pulumi.String("string"),
ScheduleConfig: &sagemaker.MonitoringScheduleMonitoringScheduleConfigScheduleConfigArgs{
ScheduleExpression: pulumi.String("string"),
},
},
Name: pulumi.String("string"),
Region: pulumi.String("string"),
Tags: pulumi.StringMap{
"string": pulumi.String("string"),
},
})
var monitoringScheduleResource = new MonitoringSchedule("monitoringScheduleResource", MonitoringScheduleArgs.builder()
.monitoringScheduleConfig(MonitoringScheduleMonitoringScheduleConfigArgs.builder()
.monitoringType("string")
.monitoringJobDefinition(MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionArgs.builder()
.monitoringAppSpecification(MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringAppSpecificationArgs.builder()
.imageUri("string")
.containerArguments("string")
.containerEntrypoints("string")
.postAnalyticsProcessorSourceUri("string")
.recordPreprocessorSourceUri("string")
.build())
.monitoringInputs(MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringInputsArgs.builder()
.batchTransformInput(MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringInputsBatchTransformInputArgs.builder()
.dataCapturedDestinationS3Uri("string")
.datasetFormat(MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringInputsBatchTransformInputDatasetFormatArgs.builder()
.csv(MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringInputsBatchTransformInputDatasetFormatCsvArgs.builder()
.header(false)
.build())
.json(MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringInputsBatchTransformInputDatasetFormatJsonArgs.builder()
.line(false)
.build())
.build())
.localPath("string")
.endTimeOffset("string")
.excludeFeaturesAttribute("string")
.featuresAttribute("string")
.inferenceAttribute("string")
.probabilityAttribute("string")
.probabilityThresholdAttribute(0.0)
.s3DataDistributionType("string")
.s3InputMode("string")
.startTimeOffset("string")
.build())
.endpointInput(MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringInputsEndpointInputArgs.builder()
.endpointName("string")
.localPath("string")
.endTimeOffset("string")
.excludeFeaturesAttribute("string")
.featuresAttribute("string")
.inferenceAttribute("string")
.probabilityAttribute("string")
.probabilityThresholdAttribute(0.0)
.s3DataDistributionType("string")
.s3InputMode("string")
.startTimeOffset("string")
.build())
.build())
.monitoringOutputConfig(MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringOutputConfigArgs.builder()
.monitoringOutputs(MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringOutputConfigMonitoringOutputsArgs.builder()
.s3Output(MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringOutputConfigMonitoringOutputsS3OutputArgs.builder()
.localPath("string")
.s3Uri("string")
.s3UploadMode("string")
.build())
.build())
.kmsKeyId("string")
.build())
.monitoringResources(MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringResourcesArgs.builder()
.clusterConfig(MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringResourcesClusterConfigArgs.builder()
.instanceCount(0)
.instanceType("string")
.volumeSizeInGb(0)
.volumeKmsKeyId("string")
.build())
.build())
.roleArn("string")
.baseline(MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionBaselineArgs.builder()
.baseliningJobName("string")
.constraintsResource(MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionBaselineConstraintsResourceArgs.builder()
.s3Uri("string")
.build())
.statisticsResource(MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionBaselineStatisticsResourceArgs.builder()
.s3Uri("string")
.build())
.build())
.environment(Map.of("string", "string"))
.networkConfig(MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionNetworkConfigArgs.builder()
.enableInterContainerTrafficEncryption(false)
.enableNetworkIsolation(false)
.vpcConfig(MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionNetworkConfigVpcConfigArgs.builder()
.securityGroupIds("string")
.subnets("string")
.build())
.build())
.stoppingConditions(MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionStoppingConditionArgs.builder()
.maxRuntimeInSeconds(0)
.build())
.build())
.monitoringJobDefinitionName("string")
.scheduleConfig(MonitoringScheduleMonitoringScheduleConfigScheduleConfigArgs.builder()
.scheduleExpression("string")
.build())
.build())
.name("string")
.region("string")
.tags(Map.of("string", "string"))
.build());
monitoring_schedule_resource = aws.sagemaker.MonitoringSchedule("monitoringScheduleResource",
monitoring_schedule_config={
"monitoring_type": "string",
"monitoring_job_definition": {
"monitoring_app_specification": {
"image_uri": "string",
"container_arguments": ["string"],
"container_entrypoints": ["string"],
"post_analytics_processor_source_uri": "string",
"record_preprocessor_source_uri": "string",
},
"monitoring_inputs": {
"batch_transform_input": {
"data_captured_destination_s3_uri": "string",
"dataset_format": {
"csv": {
"header": False,
},
"json": {
"line": False,
},
},
"local_path": "string",
"end_time_offset": "string",
"exclude_features_attribute": "string",
"features_attribute": "string",
"inference_attribute": "string",
"probability_attribute": "string",
"probability_threshold_attribute": 0,
"s3_data_distribution_type": "string",
"s3_input_mode": "string",
"start_time_offset": "string",
},
"endpoint_input": {
"endpoint_name": "string",
"local_path": "string",
"end_time_offset": "string",
"exclude_features_attribute": "string",
"features_attribute": "string",
"inference_attribute": "string",
"probability_attribute": "string",
"probability_threshold_attribute": 0,
"s3_data_distribution_type": "string",
"s3_input_mode": "string",
"start_time_offset": "string",
},
},
"monitoring_output_config": {
"monitoring_outputs": {
"s3_output": {
"local_path": "string",
"s3_uri": "string",
"s3_upload_mode": "string",
},
},
"kms_key_id": "string",
},
"monitoring_resources": {
"cluster_config": {
"instance_count": 0,
"instance_type": "string",
"volume_size_in_gb": 0,
"volume_kms_key_id": "string",
},
},
"role_arn": "string",
"baseline": {
"baselining_job_name": "string",
"constraints_resource": {
"s3_uri": "string",
},
"statistics_resource": {
"s3_uri": "string",
},
},
"environment": {
"string": "string",
},
"network_config": {
"enable_inter_container_traffic_encryption": False,
"enable_network_isolation": False,
"vpc_config": {
"security_group_ids": ["string"],
"subnets": ["string"],
},
},
"stopping_conditions": [{
"max_runtime_in_seconds": 0,
}],
},
"monitoring_job_definition_name": "string",
"schedule_config": {
"schedule_expression": "string",
},
},
name="string",
region="string",
tags={
"string": "string",
})
const monitoringScheduleResource = new aws.sagemaker.MonitoringSchedule("monitoringScheduleResource", {
monitoringScheduleConfig: {
monitoringType: "string",
monitoringJobDefinition: {
monitoringAppSpecification: {
imageUri: "string",
containerArguments: ["string"],
containerEntrypoints: ["string"],
postAnalyticsProcessorSourceUri: "string",
recordPreprocessorSourceUri: "string",
},
monitoringInputs: {
batchTransformInput: {
dataCapturedDestinationS3Uri: "string",
datasetFormat: {
csv: {
header: false,
},
json: {
line: false,
},
},
localPath: "string",
endTimeOffset: "string",
excludeFeaturesAttribute: "string",
featuresAttribute: "string",
inferenceAttribute: "string",
probabilityAttribute: "string",
probabilityThresholdAttribute: 0,
s3DataDistributionType: "string",
s3InputMode: "string",
startTimeOffset: "string",
},
endpointInput: {
endpointName: "string",
localPath: "string",
endTimeOffset: "string",
excludeFeaturesAttribute: "string",
featuresAttribute: "string",
inferenceAttribute: "string",
probabilityAttribute: "string",
probabilityThresholdAttribute: 0,
s3DataDistributionType: "string",
s3InputMode: "string",
startTimeOffset: "string",
},
},
monitoringOutputConfig: {
monitoringOutputs: {
s3Output: {
localPath: "string",
s3Uri: "string",
s3UploadMode: "string",
},
},
kmsKeyId: "string",
},
monitoringResources: {
clusterConfig: {
instanceCount: 0,
instanceType: "string",
volumeSizeInGb: 0,
volumeKmsKeyId: "string",
},
},
roleArn: "string",
baseline: {
baseliningJobName: "string",
constraintsResource: {
s3Uri: "string",
},
statisticsResource: {
s3Uri: "string",
},
},
environment: {
string: "string",
},
networkConfig: {
enableInterContainerTrafficEncryption: false,
enableNetworkIsolation: false,
vpcConfig: {
securityGroupIds: ["string"],
subnets: ["string"],
},
},
stoppingConditions: [{
maxRuntimeInSeconds: 0,
}],
},
monitoringJobDefinitionName: "string",
scheduleConfig: {
scheduleExpression: "string",
},
},
name: "string",
region: "string",
tags: {
string: "string",
},
});
type: aws:sagemaker:MonitoringSchedule
properties:
monitoringScheduleConfig:
monitoringJobDefinition:
baseline:
baseliningJobName: string
constraintsResource:
s3Uri: string
statisticsResource:
s3Uri: string
environment:
string: string
monitoringAppSpecification:
containerArguments:
- string
containerEntrypoints:
- string
imageUri: string
postAnalyticsProcessorSourceUri: string
recordPreprocessorSourceUri: string
monitoringInputs:
batchTransformInput:
dataCapturedDestinationS3Uri: string
datasetFormat:
csv:
header: false
json:
line: false
endTimeOffset: string
excludeFeaturesAttribute: string
featuresAttribute: string
inferenceAttribute: string
localPath: string
probabilityAttribute: string
probabilityThresholdAttribute: 0
s3DataDistributionType: string
s3InputMode: string
startTimeOffset: string
endpointInput:
endTimeOffset: string
endpointName: string
excludeFeaturesAttribute: string
featuresAttribute: string
inferenceAttribute: string
localPath: string
probabilityAttribute: string
probabilityThresholdAttribute: 0
s3DataDistributionType: string
s3InputMode: string
startTimeOffset: string
monitoringOutputConfig:
kmsKeyId: string
monitoringOutputs:
s3Output:
localPath: string
s3UploadMode: string
s3Uri: string
monitoringResources:
clusterConfig:
instanceCount: 0
instanceType: string
volumeKmsKeyId: string
volumeSizeInGb: 0
networkConfig:
enableInterContainerTrafficEncryption: false
enableNetworkIsolation: false
vpcConfig:
securityGroupIds:
- string
subnets:
- string
roleArn: string
stoppingConditions:
- maxRuntimeInSeconds: 0
monitoringJobDefinitionName: string
monitoringType: string
scheduleConfig:
scheduleExpression: string
name: string
region: string
tags:
string: string
MonitoringSchedule Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The MonitoringSchedule resource accepts the following input properties:
- Monitoring
Schedule MonitoringConfig Schedule Monitoring Schedule Config - The configuration object that specifies the monitoring schedule and defines the monitoring job. Fields are documented below.
- Name string
- The name of the monitoring schedule. The name must be unique within an AWS Region within an AWS account. If omitted, the provider will assign a random, unique name.
- Region string
- Region where this resource will be managed. Defaults to the Region set in the provider configuration.
- Dictionary<string, string>
- A mapping of tags to assign to the resource. If configured with a provider
default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level.
- Monitoring
Schedule MonitoringConfig Schedule Monitoring Schedule Config Args - The configuration object that specifies the monitoring schedule and defines the monitoring job. Fields are documented below.
- Name string
- The name of the monitoring schedule. The name must be unique within an AWS Region within an AWS account. If omitted, the provider will assign a random, unique name.
- Region string
- Region where this resource will be managed. Defaults to the Region set in the provider configuration.
- map[string]string
- A mapping of tags to assign to the resource. If configured with a provider
default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level.
- monitoring
Schedule MonitoringConfig Schedule Monitoring Schedule Config - The configuration object that specifies the monitoring schedule and defines the monitoring job. Fields are documented below.
- name String
- The name of the monitoring schedule. The name must be unique within an AWS Region within an AWS account. If omitted, the provider will assign a random, unique name.
- region String
- Region where this resource will be managed. Defaults to the Region set in the provider configuration.
- Map<String,String>
- A mapping of tags to assign to the resource. If configured with a provider
default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level.
- monitoring
Schedule MonitoringConfig Schedule Monitoring Schedule Config - The configuration object that specifies the monitoring schedule and defines the monitoring job. Fields are documented below.
- name string
- The name of the monitoring schedule. The name must be unique within an AWS Region within an AWS account. If omitted, the provider will assign a random, unique name.
- region string
- Region where this resource will be managed. Defaults to the Region set in the provider configuration.
- {[key: string]: string}
- A mapping of tags to assign to the resource. If configured with a provider
default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level.
- monitoring_
schedule_ Monitoringconfig Schedule Monitoring Schedule Config Args - The configuration object that specifies the monitoring schedule and defines the monitoring job. Fields are documented below.
- name str
- The name of the monitoring schedule. The name must be unique within an AWS Region within an AWS account. If omitted, the provider will assign a random, unique name.
- region str
- Region where this resource will be managed. Defaults to the Region set in the provider configuration.
- Mapping[str, str]
- A mapping of tags to assign to the resource. If configured with a provider
default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level.
- monitoring
Schedule Property MapConfig - The configuration object that specifies the monitoring schedule and defines the monitoring job. Fields are documented below.
- name String
- The name of the monitoring schedule. The name must be unique within an AWS Region within an AWS account. If omitted, the provider will assign a random, unique name.
- region String
- Region where this resource will be managed. Defaults to the Region set in the provider configuration.
- Map<String>
- A mapping of tags to assign to the resource. If configured with a provider
default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level.
Outputs
All input properties are implicitly available as output properties. Additionally, the MonitoringSchedule resource produces the following output properties:
Look up Existing MonitoringSchedule Resource
Get an existing MonitoringSchedule resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: MonitoringScheduleState, opts?: CustomResourceOptions): MonitoringSchedule@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
arn: Optional[str] = None,
monitoring_schedule_config: Optional[MonitoringScheduleMonitoringScheduleConfigArgs] = None,
name: Optional[str] = None,
region: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
tags_all: Optional[Mapping[str, str]] = None) -> MonitoringSchedulefunc GetMonitoringSchedule(ctx *Context, name string, id IDInput, state *MonitoringScheduleState, opts ...ResourceOption) (*MonitoringSchedule, error)public static MonitoringSchedule Get(string name, Input<string> id, MonitoringScheduleState? state, CustomResourceOptions? opts = null)public static MonitoringSchedule get(String name, Output<String> id, MonitoringScheduleState state, CustomResourceOptions options)resources: _: type: aws:sagemaker:MonitoringSchedule get: id: ${id}- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Arn string
- The Amazon Resource Name (ARN) assigned by AWS to this monitoring schedule.
- Monitoring
Schedule MonitoringConfig Schedule Monitoring Schedule Config - The configuration object that specifies the monitoring schedule and defines the monitoring job. Fields are documented below.
- Name string
- The name of the monitoring schedule. The name must be unique within an AWS Region within an AWS account. If omitted, the provider will assign a random, unique name.
- Region string
- Region where this resource will be managed. Defaults to the Region set in the provider configuration.
- Dictionary<string, string>
- A mapping of tags to assign to the resource. If configured with a provider
default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level. - Dictionary<string, string>
- A map of tags assigned to the resource, including those inherited from the provider
default_tagsconfiguration block.
- Arn string
- The Amazon Resource Name (ARN) assigned by AWS to this monitoring schedule.
- Monitoring
Schedule MonitoringConfig Schedule Monitoring Schedule Config Args - The configuration object that specifies the monitoring schedule and defines the monitoring job. Fields are documented below.
- Name string
- The name of the monitoring schedule. The name must be unique within an AWS Region within an AWS account. If omitted, the provider will assign a random, unique name.
- Region string
- Region where this resource will be managed. Defaults to the Region set in the provider configuration.
- map[string]string
- A mapping of tags to assign to the resource. If configured with a provider
default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level. - map[string]string
- A map of tags assigned to the resource, including those inherited from the provider
default_tagsconfiguration block.
- arn String
- The Amazon Resource Name (ARN) assigned by AWS to this monitoring schedule.
- monitoring
Schedule MonitoringConfig Schedule Monitoring Schedule Config - The configuration object that specifies the monitoring schedule and defines the monitoring job. Fields are documented below.
- name String
- The name of the monitoring schedule. The name must be unique within an AWS Region within an AWS account. If omitted, the provider will assign a random, unique name.
- region String
- Region where this resource will be managed. Defaults to the Region set in the provider configuration.
- Map<String,String>
- A mapping of tags to assign to the resource. If configured with a provider
default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level. - Map<String,String>
- A map of tags assigned to the resource, including those inherited from the provider
default_tagsconfiguration block.
- arn string
- The Amazon Resource Name (ARN) assigned by AWS to this monitoring schedule.
- monitoring
Schedule MonitoringConfig Schedule Monitoring Schedule Config - The configuration object that specifies the monitoring schedule and defines the monitoring job. Fields are documented below.
- name string
- The name of the monitoring schedule. The name must be unique within an AWS Region within an AWS account. If omitted, the provider will assign a random, unique name.
- region string
- Region where this resource will be managed. Defaults to the Region set in the provider configuration.
- {[key: string]: string}
- A mapping of tags to assign to the resource. If configured with a provider
default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level. - {[key: string]: string}
- A map of tags assigned to the resource, including those inherited from the provider
default_tagsconfiguration block.
- arn str
- The Amazon Resource Name (ARN) assigned by AWS to this monitoring schedule.
- monitoring_
schedule_ Monitoringconfig Schedule Monitoring Schedule Config Args - The configuration object that specifies the monitoring schedule and defines the monitoring job. Fields are documented below.
- name str
- The name of the monitoring schedule. The name must be unique within an AWS Region within an AWS account. If omitted, the provider will assign a random, unique name.
- region str
- Region where this resource will be managed. Defaults to the Region set in the provider configuration.
- Mapping[str, str]
- A mapping of tags to assign to the resource. If configured with a provider
default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level. - Mapping[str, str]
- A map of tags assigned to the resource, including those inherited from the provider
default_tagsconfiguration block.
- arn String
- The Amazon Resource Name (ARN) assigned by AWS to this monitoring schedule.
- monitoring
Schedule Property MapConfig - The configuration object that specifies the monitoring schedule and defines the monitoring job. Fields are documented below.
- name String
- The name of the monitoring schedule. The name must be unique within an AWS Region within an AWS account. If omitted, the provider will assign a random, unique name.
- region String
- Region where this resource will be managed. Defaults to the Region set in the provider configuration.
- Map<String>
- A mapping of tags to assign to the resource. If configured with a provider
default_tagsconfiguration block present, tags with matching keys will overwrite those defined at the provider-level. - Map<String>
- A map of tags assigned to the resource, including those inherited from the provider
default_tagsconfiguration block.
Supporting Types
MonitoringScheduleMonitoringScheduleConfig, MonitoringScheduleMonitoringScheduleConfigArgs
- Monitoring
Type string - The type of the monitoring job definition to schedule. Valid values are
DataQuality,ModelQuality,ModelBiasorModelExplainability - Monitoring
Job MonitoringDefinition Schedule Monitoring Schedule Config Monitoring Job Definition - Defines the monitoring job. Fields are documented below.
- Monitoring
Job stringDefinition Name - The name of the monitoring job definition to schedule.
- Schedule
Config MonitoringSchedule Monitoring Schedule Config Schedule Config - Configures the monitoring schedule. Fields are documented below.
- Monitoring
Type string - The type of the monitoring job definition to schedule. Valid values are
DataQuality,ModelQuality,ModelBiasorModelExplainability - Monitoring
Job MonitoringDefinition Schedule Monitoring Schedule Config Monitoring Job Definition - Defines the monitoring job. Fields are documented below.
- Monitoring
Job stringDefinition Name - The name of the monitoring job definition to schedule.
- Schedule
Config MonitoringSchedule Monitoring Schedule Config Schedule Config - Configures the monitoring schedule. Fields are documented below.
- monitoring
Type String - The type of the monitoring job definition to schedule. Valid values are
DataQuality,ModelQuality,ModelBiasorModelExplainability - monitoring
Job MonitoringDefinition Schedule Monitoring Schedule Config Monitoring Job Definition - Defines the monitoring job. Fields are documented below.
- monitoring
Job StringDefinition Name - The name of the monitoring job definition to schedule.
- schedule
Config MonitoringSchedule Monitoring Schedule Config Schedule Config - Configures the monitoring schedule. Fields are documented below.
- monitoring
Type string - The type of the monitoring job definition to schedule. Valid values are
DataQuality,ModelQuality,ModelBiasorModelExplainability - monitoring
Job MonitoringDefinition Schedule Monitoring Schedule Config Monitoring Job Definition - Defines the monitoring job. Fields are documented below.
- monitoring
Job stringDefinition Name - The name of the monitoring job definition to schedule.
- schedule
Config MonitoringSchedule Monitoring Schedule Config Schedule Config - Configures the monitoring schedule. Fields are documented below.
- monitoring_
type str - The type of the monitoring job definition to schedule. Valid values are
DataQuality,ModelQuality,ModelBiasorModelExplainability - monitoring_
job_ Monitoringdefinition Schedule Monitoring Schedule Config Monitoring Job Definition - Defines the monitoring job. Fields are documented below.
- monitoring_
job_ strdefinition_ name - The name of the monitoring job definition to schedule.
- schedule_
config MonitoringSchedule Monitoring Schedule Config Schedule Config - Configures the monitoring schedule. Fields are documented below.
- monitoring
Type String - The type of the monitoring job definition to schedule. Valid values are
DataQuality,ModelQuality,ModelBiasorModelExplainability - monitoring
Job Property MapDefinition - Defines the monitoring job. Fields are documented below.
- monitoring
Job StringDefinition Name - The name of the monitoring job definition to schedule.
- schedule
Config Property Map - Configures the monitoring schedule. Fields are documented below.
MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinition, MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionArgs
- Monitoring
App MonitoringSpecification Schedule Monitoring Schedule Config Monitoring Job Definition Monitoring App Specification - Configures the monitoring job to run a specified Docker container image. Fields are documented below.
- Monitoring
Inputs MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Monitoring Inputs - Inputs for the monitoring job. Fields are documented below.
- Monitoring
Output MonitoringConfig Schedule Monitoring Schedule Config Monitoring Job Definition Monitoring Output Config - Outputs from the monitoring job to be uploaded to Amazon S3. Fields are documented below.
- Monitoring
Resources MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Monitoring Resources - Identifies the resources, ML compute instances, and ML storage volumes to deploy for a monitoring job. Fields are documented below.
- Role
Arn string - ARN of an IAM role that Amazon SageMaker AI can assume to perform tasks on your behalf.
- Baseline
Monitoring
Schedule Monitoring Schedule Config Monitoring Job Definition Baseline - Baseline configuration used to validate that the data conforms to the specified constraints and statistics. Fields are documented below.
- Environment Dictionary<string, string>
- Map of environment variables in the Docker container.
- Network
Config MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Network Config - Networking options for the monitoring job. Fields are documented below.
- Stopping
Conditions List<MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Stopping Condition> - How long the monitoring job is allowed to run. Fields are documented below.
- Monitoring
App MonitoringSpecification Schedule Monitoring Schedule Config Monitoring Job Definition Monitoring App Specification - Configures the monitoring job to run a specified Docker container image. Fields are documented below.
- Monitoring
Inputs MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Monitoring Inputs - Inputs for the monitoring job. Fields are documented below.
- Monitoring
Output MonitoringConfig Schedule Monitoring Schedule Config Monitoring Job Definition Monitoring Output Config - Outputs from the monitoring job to be uploaded to Amazon S3. Fields are documented below.
- Monitoring
Resources MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Monitoring Resources - Identifies the resources, ML compute instances, and ML storage volumes to deploy for a monitoring job. Fields are documented below.
- Role
Arn string - ARN of an IAM role that Amazon SageMaker AI can assume to perform tasks on your behalf.
- Baseline
Monitoring
Schedule Monitoring Schedule Config Monitoring Job Definition Baseline - Baseline configuration used to validate that the data conforms to the specified constraints and statistics. Fields are documented below.
- Environment map[string]string
- Map of environment variables in the Docker container.
- Network
Config MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Network Config - Networking options for the monitoring job. Fields are documented below.
- Stopping
Conditions []MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Stopping Condition - How long the monitoring job is allowed to run. Fields are documented below.
- monitoring
App MonitoringSpecification Schedule Monitoring Schedule Config Monitoring Job Definition Monitoring App Specification - Configures the monitoring job to run a specified Docker container image. Fields are documented below.
- monitoring
Inputs MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Monitoring Inputs - Inputs for the monitoring job. Fields are documented below.
- monitoring
Output MonitoringConfig Schedule Monitoring Schedule Config Monitoring Job Definition Monitoring Output Config - Outputs from the monitoring job to be uploaded to Amazon S3. Fields are documented below.
- monitoring
Resources MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Monitoring Resources - Identifies the resources, ML compute instances, and ML storage volumes to deploy for a monitoring job. Fields are documented below.
- role
Arn String - ARN of an IAM role that Amazon SageMaker AI can assume to perform tasks on your behalf.
- baseline
Monitoring
Schedule Monitoring Schedule Config Monitoring Job Definition Baseline - Baseline configuration used to validate that the data conforms to the specified constraints and statistics. Fields are documented below.
- environment Map<String,String>
- Map of environment variables in the Docker container.
- network
Config MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Network Config - Networking options for the monitoring job. Fields are documented below.
- stopping
Conditions List<MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Stopping Condition> - How long the monitoring job is allowed to run. Fields are documented below.
- monitoring
App MonitoringSpecification Schedule Monitoring Schedule Config Monitoring Job Definition Monitoring App Specification - Configures the monitoring job to run a specified Docker container image. Fields are documented below.
- monitoring
Inputs MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Monitoring Inputs - Inputs for the monitoring job. Fields are documented below.
- monitoring
Output MonitoringConfig Schedule Monitoring Schedule Config Monitoring Job Definition Monitoring Output Config - Outputs from the monitoring job to be uploaded to Amazon S3. Fields are documented below.
- monitoring
Resources MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Monitoring Resources - Identifies the resources, ML compute instances, and ML storage volumes to deploy for a monitoring job. Fields are documented below.
- role
Arn string - ARN of an IAM role that Amazon SageMaker AI can assume to perform tasks on your behalf.
- baseline
Monitoring
Schedule Monitoring Schedule Config Monitoring Job Definition Baseline - Baseline configuration used to validate that the data conforms to the specified constraints and statistics. Fields are documented below.
- environment {[key: string]: string}
- Map of environment variables in the Docker container.
- network
Config MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Network Config - Networking options for the monitoring job. Fields are documented below.
- stopping
Conditions MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Stopping Condition[] - How long the monitoring job is allowed to run. Fields are documented below.
- monitoring_
app_ Monitoringspecification Schedule Monitoring Schedule Config Monitoring Job Definition Monitoring App Specification - Configures the monitoring job to run a specified Docker container image. Fields are documented below.
- monitoring_
inputs MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Monitoring Inputs - Inputs for the monitoring job. Fields are documented below.
- monitoring_
output_ Monitoringconfig Schedule Monitoring Schedule Config Monitoring Job Definition Monitoring Output Config - Outputs from the monitoring job to be uploaded to Amazon S3. Fields are documented below.
- monitoring_
resources MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Monitoring Resources - Identifies the resources, ML compute instances, and ML storage volumes to deploy for a monitoring job. Fields are documented below.
- role_
arn str - ARN of an IAM role that Amazon SageMaker AI can assume to perform tasks on your behalf.
- baseline
Monitoring
Schedule Monitoring Schedule Config Monitoring Job Definition Baseline - Baseline configuration used to validate that the data conforms to the specified constraints and statistics. Fields are documented below.
- environment Mapping[str, str]
- Map of environment variables in the Docker container.
- network_
config MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Network Config - Networking options for the monitoring job. Fields are documented below.
- stopping_
conditions Sequence[MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Stopping Condition] - How long the monitoring job is allowed to run. Fields are documented below.
- monitoring
App Property MapSpecification - Configures the monitoring job to run a specified Docker container image. Fields are documented below.
- monitoring
Inputs Property Map - Inputs for the monitoring job. Fields are documented below.
- monitoring
Output Property MapConfig - Outputs from the monitoring job to be uploaded to Amazon S3. Fields are documented below.
- monitoring
Resources Property Map - Identifies the resources, ML compute instances, and ML storage volumes to deploy for a monitoring job. Fields are documented below.
- role
Arn String - ARN of an IAM role that Amazon SageMaker AI can assume to perform tasks on your behalf.
- baseline Property Map
- Baseline configuration used to validate that the data conforms to the specified constraints and statistics. Fields are documented below.
- environment Map<String>
- Map of environment variables in the Docker container.
- network
Config Property Map - Networking options for the monitoring job. Fields are documented below.
- stopping
Conditions List<Property Map> - How long the monitoring job is allowed to run. Fields are documented below.
MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionBaseline, MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionBaselineArgs
MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionBaselineConstraintsResource, MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionBaselineConstraintsResourceArgs
- S3Uri string
- URI that identifies the Amazon S3 storage location where Amazon SageMaker AI saves the results of a monitoring job.
- S3Uri string
- URI that identifies the Amazon S3 storage location where Amazon SageMaker AI saves the results of a monitoring job.
- s3Uri String
- URI that identifies the Amazon S3 storage location where Amazon SageMaker AI saves the results of a monitoring job.
- s3Uri string
- URI that identifies the Amazon S3 storage location where Amazon SageMaker AI saves the results of a monitoring job.
- s3_
uri str - URI that identifies the Amazon S3 storage location where Amazon SageMaker AI saves the results of a monitoring job.
- s3Uri String
- URI that identifies the Amazon S3 storage location where Amazon SageMaker AI saves the results of a monitoring job.
MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionBaselineStatisticsResource, MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionBaselineStatisticsResourceArgs
- S3Uri string
- URI that identifies the Amazon S3 storage location where Amazon SageMaker AI saves the results of a monitoring job.
- S3Uri string
- URI that identifies the Amazon S3 storage location where Amazon SageMaker AI saves the results of a monitoring job.
- s3Uri String
- URI that identifies the Amazon S3 storage location where Amazon SageMaker AI saves the results of a monitoring job.
- s3Uri string
- URI that identifies the Amazon S3 storage location where Amazon SageMaker AI saves the results of a monitoring job.
- s3_
uri str - URI that identifies the Amazon S3 storage location where Amazon SageMaker AI saves the results of a monitoring job.
- s3Uri String
- URI that identifies the Amazon S3 storage location where Amazon SageMaker AI saves the results of a monitoring job.
MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringAppSpecification, MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringAppSpecificationArgs
- Image
Uri string - Container image to be run by the monitoring job.
- Container
Arguments List<string> - List of arguments for the container used to run the monitoring job.
- Container
Entrypoints List<string> - Entrypoint for the container used to run the monitoring job.
- Post
Analytics stringProcessor Source Uri - Script that is called after analysis has been performed.
- Record
Preprocessor stringSource Uri - Script that is called per row prior to running analysis.
- Image
Uri string - Container image to be run by the monitoring job.
- Container
Arguments []string - List of arguments for the container used to run the monitoring job.
- Container
Entrypoints []string - Entrypoint for the container used to run the monitoring job.
- Post
Analytics stringProcessor Source Uri - Script that is called after analysis has been performed.
- Record
Preprocessor stringSource Uri - Script that is called per row prior to running analysis.
- image
Uri String - Container image to be run by the monitoring job.
- container
Arguments List<String> - List of arguments for the container used to run the monitoring job.
- container
Entrypoints List<String> - Entrypoint for the container used to run the monitoring job.
- post
Analytics StringProcessor Source Uri - Script that is called after analysis has been performed.
- record
Preprocessor StringSource Uri - Script that is called per row prior to running analysis.
- image
Uri string - Container image to be run by the monitoring job.
- container
Arguments string[] - List of arguments for the container used to run the monitoring job.
- container
Entrypoints string[] - Entrypoint for the container used to run the monitoring job.
- post
Analytics stringProcessor Source Uri - Script that is called after analysis has been performed.
- record
Preprocessor stringSource Uri - Script that is called per row prior to running analysis.
- image_
uri str - Container image to be run by the monitoring job.
- container_
arguments Sequence[str] - List of arguments for the container used to run the monitoring job.
- container_
entrypoints Sequence[str] - Entrypoint for the container used to run the monitoring job.
- post_
analytics_ strprocessor_ source_ uri - Script that is called after analysis has been performed.
- record_
preprocessor_ strsource_ uri - Script that is called per row prior to running analysis.
- image
Uri String - Container image to be run by the monitoring job.
- container
Arguments List<String> - List of arguments for the container used to run the monitoring job.
- container
Entrypoints List<String> - Entrypoint for the container used to run the monitoring job.
- post
Analytics StringProcessor Source Uri - Script that is called after analysis has been performed.
- record
Preprocessor StringSource Uri - Script that is called per row prior to running analysis.
MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringInputs, MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringInputsArgs
- Batch
Transform MonitoringInput Schedule Monitoring Schedule Config Monitoring Job Definition Monitoring Inputs Batch Transform Input - Input object for the batch transform job. Fields are documented below.
- Endpoint
Input MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Monitoring Inputs Endpoint Input - Endpoint for a monitoring job. Fields are documented below.
- Batch
Transform MonitoringInput Schedule Monitoring Schedule Config Monitoring Job Definition Monitoring Inputs Batch Transform Input - Input object for the batch transform job. Fields are documented below.
- Endpoint
Input MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Monitoring Inputs Endpoint Input - Endpoint for a monitoring job. Fields are documented below.
- batch
Transform MonitoringInput Schedule Monitoring Schedule Config Monitoring Job Definition Monitoring Inputs Batch Transform Input - Input object for the batch transform job. Fields are documented below.
- endpoint
Input MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Monitoring Inputs Endpoint Input - Endpoint for a monitoring job. Fields are documented below.
- batch
Transform MonitoringInput Schedule Monitoring Schedule Config Monitoring Job Definition Monitoring Inputs Batch Transform Input - Input object for the batch transform job. Fields are documented below.
- endpoint
Input MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Monitoring Inputs Endpoint Input - Endpoint for a monitoring job. Fields are documented below.
- batch_
transform_ Monitoringinput Schedule Monitoring Schedule Config Monitoring Job Definition Monitoring Inputs Batch Transform Input - Input object for the batch transform job. Fields are documented below.
- endpoint_
input MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Monitoring Inputs Endpoint Input - Endpoint for a monitoring job. Fields are documented below.
- batch
Transform Property MapInput - Input object for the batch transform job. Fields are documented below.
- endpoint
Input Property Map - Endpoint for a monitoring job. Fields are documented below.
MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringInputsBatchTransformInput, MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringInputsBatchTransformInputArgs
- Data
Captured stringDestination S3Uri - Amazon S3 location being used to capture the data.
- Dataset
Format MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Monitoring Inputs Batch Transform Input Dataset Format - Dataset format for the batch transform job. Fields are documented below.
- Local
Path string - Path to the filesystem where the batch transform data is available to the container.
- End
Time stringOffset - Monitoring jobs subtract this time from the end time.
- Exclude
Features stringAttribute - Attributes of the input data to exclude from the analysis.
- Features
Attribute string - Attributes of the input data that are the input features.
- Inference
Attribute string - Attribute of the input data that represents the ground truth label.
- Probability
Attribute string - In a classification problem, the attribute that represents the class probability.
- Probability
Threshold doubleAttribute - Threshold for the class probability to be evaluated as a positive result.
- S3Data
Distribution stringType - Whether input data distributed in Amazon S3 is fully replicated or sharded by an S3 key. Valid values:
FullyReplicated,ShardedByS3Key. - S3Input
Mode string - Input mode for transferring data for the monitoring job. Valid values:
Pipe,File. - Start
Time stringOffset - Monitoring jobs subtract this time from the start time.
- Data
Captured stringDestination S3Uri - Amazon S3 location being used to capture the data.
- Dataset
Format MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Monitoring Inputs Batch Transform Input Dataset Format - Dataset format for the batch transform job. Fields are documented below.
- Local
Path string - Path to the filesystem where the batch transform data is available to the container.
- End
Time stringOffset - Monitoring jobs subtract this time from the end time.
- Exclude
Features stringAttribute - Attributes of the input data to exclude from the analysis.
- Features
Attribute string - Attributes of the input data that are the input features.
- Inference
Attribute string - Attribute of the input data that represents the ground truth label.
- Probability
Attribute string - In a classification problem, the attribute that represents the class probability.
- Probability
Threshold float64Attribute - Threshold for the class probability to be evaluated as a positive result.
- S3Data
Distribution stringType - Whether input data distributed in Amazon S3 is fully replicated or sharded by an S3 key. Valid values:
FullyReplicated,ShardedByS3Key. - S3Input
Mode string - Input mode for transferring data for the monitoring job. Valid values:
Pipe,File. - Start
Time stringOffset - Monitoring jobs subtract this time from the start time.
- data
Captured StringDestination S3Uri - Amazon S3 location being used to capture the data.
- dataset
Format MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Monitoring Inputs Batch Transform Input Dataset Format - Dataset format for the batch transform job. Fields are documented below.
- local
Path String - Path to the filesystem where the batch transform data is available to the container.
- end
Time StringOffset - Monitoring jobs subtract this time from the end time.
- exclude
Features StringAttribute - Attributes of the input data to exclude from the analysis.
- features
Attribute String - Attributes of the input data that are the input features.
- inference
Attribute String - Attribute of the input data that represents the ground truth label.
- probability
Attribute String - In a classification problem, the attribute that represents the class probability.
- probability
Threshold DoubleAttribute - Threshold for the class probability to be evaluated as a positive result.
- s3Data
Distribution StringType - Whether input data distributed in Amazon S3 is fully replicated or sharded by an S3 key. Valid values:
FullyReplicated,ShardedByS3Key. - s3Input
Mode String - Input mode for transferring data for the monitoring job. Valid values:
Pipe,File. - start
Time StringOffset - Monitoring jobs subtract this time from the start time.
- data
Captured stringDestination S3Uri - Amazon S3 location being used to capture the data.
- dataset
Format MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Monitoring Inputs Batch Transform Input Dataset Format - Dataset format for the batch transform job. Fields are documented below.
- local
Path string - Path to the filesystem where the batch transform data is available to the container.
- end
Time stringOffset - Monitoring jobs subtract this time from the end time.
- exclude
Features stringAttribute - Attributes of the input data to exclude from the analysis.
- features
Attribute string - Attributes of the input data that are the input features.
- inference
Attribute string - Attribute of the input data that represents the ground truth label.
- probability
Attribute string - In a classification problem, the attribute that represents the class probability.
- probability
Threshold numberAttribute - Threshold for the class probability to be evaluated as a positive result.
- s3Data
Distribution stringType - Whether input data distributed in Amazon S3 is fully replicated or sharded by an S3 key. Valid values:
FullyReplicated,ShardedByS3Key. - s3Input
Mode string - Input mode for transferring data for the monitoring job. Valid values:
Pipe,File. - start
Time stringOffset - Monitoring jobs subtract this time from the start time.
- data_
captured_ strdestination_ s3_ uri - Amazon S3 location being used to capture the data.
- dataset_
format MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Monitoring Inputs Batch Transform Input Dataset Format - Dataset format for the batch transform job. Fields are documented below.
- local_
path str - Path to the filesystem where the batch transform data is available to the container.
- end_
time_ stroffset - Monitoring jobs subtract this time from the end time.
- exclude_
features_ strattribute - Attributes of the input data to exclude from the analysis.
- features_
attribute str - Attributes of the input data that are the input features.
- inference_
attribute str - Attribute of the input data that represents the ground truth label.
- probability_
attribute str - In a classification problem, the attribute that represents the class probability.
- probability_
threshold_ floatattribute - Threshold for the class probability to be evaluated as a positive result.
- s3_
data_ strdistribution_ type - Whether input data distributed in Amazon S3 is fully replicated or sharded by an S3 key. Valid values:
FullyReplicated,ShardedByS3Key. - s3_
input_ strmode - Input mode for transferring data for the monitoring job. Valid values:
Pipe,File. - start_
time_ stroffset - Monitoring jobs subtract this time from the start time.
- data
Captured StringDestination S3Uri - Amazon S3 location being used to capture the data.
- dataset
Format Property Map - Dataset format for the batch transform job. Fields are documented below.
- local
Path String - Path to the filesystem where the batch transform data is available to the container.
- end
Time StringOffset - Monitoring jobs subtract this time from the end time.
- exclude
Features StringAttribute - Attributes of the input data to exclude from the analysis.
- features
Attribute String - Attributes of the input data that are the input features.
- inference
Attribute String - Attribute of the input data that represents the ground truth label.
- probability
Attribute String - In a classification problem, the attribute that represents the class probability.
- probability
Threshold NumberAttribute - Threshold for the class probability to be evaluated as a positive result.
- s3Data
Distribution StringType - Whether input data distributed in Amazon S3 is fully replicated or sharded by an S3 key. Valid values:
FullyReplicated,ShardedByS3Key. - s3Input
Mode String - Input mode for transferring data for the monitoring job. Valid values:
Pipe,File. - start
Time StringOffset - Monitoring jobs subtract this time from the start time.
MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringInputsBatchTransformInputDatasetFormat, MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringInputsBatchTransformInputDatasetFormatArgs
- Csv
Monitoring
Schedule Monitoring Schedule Config Monitoring Job Definition Monitoring Inputs Batch Transform Input Dataset Format Csv - CSV dataset used in the monitoring job. Fields are documented below.
- Json
Monitoring
Schedule Monitoring Schedule Config Monitoring Job Definition Monitoring Inputs Batch Transform Input Dataset Format Json - JSON dataset used in the monitoring job. Fields are documented below.
- Csv
Monitoring
Schedule Monitoring Schedule Config Monitoring Job Definition Monitoring Inputs Batch Transform Input Dataset Format Csv - CSV dataset used in the monitoring job. Fields are documented below.
- Json
Monitoring
Schedule Monitoring Schedule Config Monitoring Job Definition Monitoring Inputs Batch Transform Input Dataset Format Json - JSON dataset used in the monitoring job. Fields are documented below.
- csv
Monitoring
Schedule Monitoring Schedule Config Monitoring Job Definition Monitoring Inputs Batch Transform Input Dataset Format Csv - CSV dataset used in the monitoring job. Fields are documented below.
- json
Monitoring
Schedule Monitoring Schedule Config Monitoring Job Definition Monitoring Inputs Batch Transform Input Dataset Format Json - JSON dataset used in the monitoring job. Fields are documented below.
- csv
Monitoring
Schedule Monitoring Schedule Config Monitoring Job Definition Monitoring Inputs Batch Transform Input Dataset Format Csv - CSV dataset used in the monitoring job. Fields are documented below.
- json
Monitoring
Schedule Monitoring Schedule Config Monitoring Job Definition Monitoring Inputs Batch Transform Input Dataset Format Json - JSON dataset used in the monitoring job. Fields are documented below.
- csv
Monitoring
Schedule Monitoring Schedule Config Monitoring Job Definition Monitoring Inputs Batch Transform Input Dataset Format Csv - CSV dataset used in the monitoring job. Fields are documented below.
- json
Monitoring
Schedule Monitoring Schedule Config Monitoring Job Definition Monitoring Inputs Batch Transform Input Dataset Format Json - JSON dataset used in the monitoring job. Fields are documented below.
- csv Property Map
- CSV dataset used in the monitoring job. Fields are documented below.
- json Property Map
- JSON dataset used in the monitoring job. Fields are documented below.
MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringInputsBatchTransformInputDatasetFormatCsv, MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringInputsBatchTransformInputDatasetFormatCsvArgs
- Header bool
- Indicates if the CSV data has a header.
- Header bool
- Indicates if the CSV data has a header.
- header Boolean
- Indicates if the CSV data has a header.
- header boolean
- Indicates if the CSV data has a header.
- header bool
- Indicates if the CSV data has a header.
- header Boolean
- Indicates if the CSV data has a header.
MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringInputsBatchTransformInputDatasetFormatJson, MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringInputsBatchTransformInputDatasetFormatJsonArgs
- Line bool
- Indicates if the file should be read as a JSON object per line.
- Line bool
- Indicates if the file should be read as a JSON object per line.
- line Boolean
- Indicates if the file should be read as a JSON object per line.
- line boolean
- Indicates if the file should be read as a JSON object per line.
- line bool
- Indicates if the file should be read as a JSON object per line.
- line Boolean
- Indicates if the file should be read as a JSON object per line.
MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringInputsEndpointInput, MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringInputsEndpointInputArgs
- Endpoint
Name string - Endpoint in customer's account which has enabled
DataCaptureConfig. - Local
Path string - Path to the filesystem where the endpoint data is available to the container.
- End
Time stringOffset - Monitoring jobs subtract this time from the end time.
- Exclude
Features stringAttribute - Attributes of the input data to exclude from the analysis.
- Features
Attribute string - Attributes of the input data that are the input features.
- Inference
Attribute string - Attribute of the input data that represents the ground truth label.
- Probability
Attribute string - In a classification problem, the attribute that represents the class probability.
- Probability
Threshold doubleAttribute - Threshold for the class probability to be evaluated as a positive result.
- S3Data
Distribution stringType - Whether input data distributed in Amazon S3 is fully replicated or sharded by an S3 key. Valid values:
FullyReplicated,ShardedByS3Key. - S3Input
Mode string - Input mode for transferring data for the monitoring job. Valid values:
Pipe,File. - Start
Time stringOffset - Monitoring jobs subtract this time from the start time.
- Endpoint
Name string - Endpoint in customer's account which has enabled
DataCaptureConfig. - Local
Path string - Path to the filesystem where the endpoint data is available to the container.
- End
Time stringOffset - Monitoring jobs subtract this time from the end time.
- Exclude
Features stringAttribute - Attributes of the input data to exclude from the analysis.
- Features
Attribute string - Attributes of the input data that are the input features.
- Inference
Attribute string - Attribute of the input data that represents the ground truth label.
- Probability
Attribute string - In a classification problem, the attribute that represents the class probability.
- Probability
Threshold float64Attribute - Threshold for the class probability to be evaluated as a positive result.
- S3Data
Distribution stringType - Whether input data distributed in Amazon S3 is fully replicated or sharded by an S3 key. Valid values:
FullyReplicated,ShardedByS3Key. - S3Input
Mode string - Input mode for transferring data for the monitoring job. Valid values:
Pipe,File. - Start
Time stringOffset - Monitoring jobs subtract this time from the start time.
- endpoint
Name String - Endpoint in customer's account which has enabled
DataCaptureConfig. - local
Path String - Path to the filesystem where the endpoint data is available to the container.
- end
Time StringOffset - Monitoring jobs subtract this time from the end time.
- exclude
Features StringAttribute - Attributes of the input data to exclude from the analysis.
- features
Attribute String - Attributes of the input data that are the input features.
- inference
Attribute String - Attribute of the input data that represents the ground truth label.
- probability
Attribute String - In a classification problem, the attribute that represents the class probability.
- probability
Threshold DoubleAttribute - Threshold for the class probability to be evaluated as a positive result.
- s3Data
Distribution StringType - Whether input data distributed in Amazon S3 is fully replicated or sharded by an S3 key. Valid values:
FullyReplicated,ShardedByS3Key. - s3Input
Mode String - Input mode for transferring data for the monitoring job. Valid values:
Pipe,File. - start
Time StringOffset - Monitoring jobs subtract this time from the start time.
- endpoint
Name string - Endpoint in customer's account which has enabled
DataCaptureConfig. - local
Path string - Path to the filesystem where the endpoint data is available to the container.
- end
Time stringOffset - Monitoring jobs subtract this time from the end time.
- exclude
Features stringAttribute - Attributes of the input data to exclude from the analysis.
- features
Attribute string - Attributes of the input data that are the input features.
- inference
Attribute string - Attribute of the input data that represents the ground truth label.
- probability
Attribute string - In a classification problem, the attribute that represents the class probability.
- probability
Threshold numberAttribute - Threshold for the class probability to be evaluated as a positive result.
- s3Data
Distribution stringType - Whether input data distributed in Amazon S3 is fully replicated or sharded by an S3 key. Valid values:
FullyReplicated,ShardedByS3Key. - s3Input
Mode string - Input mode for transferring data for the monitoring job. Valid values:
Pipe,File. - start
Time stringOffset - Monitoring jobs subtract this time from the start time.
- endpoint_
name str - Endpoint in customer's account which has enabled
DataCaptureConfig. - local_
path str - Path to the filesystem where the endpoint data is available to the container.
- end_
time_ stroffset - Monitoring jobs subtract this time from the end time.
- exclude_
features_ strattribute - Attributes of the input data to exclude from the analysis.
- features_
attribute str - Attributes of the input data that are the input features.
- inference_
attribute str - Attribute of the input data that represents the ground truth label.
- probability_
attribute str - In a classification problem, the attribute that represents the class probability.
- probability_
threshold_ floatattribute - Threshold for the class probability to be evaluated as a positive result.
- s3_
data_ strdistribution_ type - Whether input data distributed in Amazon S3 is fully replicated or sharded by an S3 key. Valid values:
FullyReplicated,ShardedByS3Key. - s3_
input_ strmode - Input mode for transferring data for the monitoring job. Valid values:
Pipe,File. - start_
time_ stroffset - Monitoring jobs subtract this time from the start time.
- endpoint
Name String - Endpoint in customer's account which has enabled
DataCaptureConfig. - local
Path String - Path to the filesystem where the endpoint data is available to the container.
- end
Time StringOffset - Monitoring jobs subtract this time from the end time.
- exclude
Features StringAttribute - Attributes of the input data to exclude from the analysis.
- features
Attribute String - Attributes of the input data that are the input features.
- inference
Attribute String - Attribute of the input data that represents the ground truth label.
- probability
Attribute String - In a classification problem, the attribute that represents the class probability.
- probability
Threshold NumberAttribute - Threshold for the class probability to be evaluated as a positive result.
- s3Data
Distribution StringType - Whether input data distributed in Amazon S3 is fully replicated or sharded by an S3 key. Valid values:
FullyReplicated,ShardedByS3Key. - s3Input
Mode String - Input mode for transferring data for the monitoring job. Valid values:
Pipe,File. - start
Time StringOffset - Monitoring jobs subtract this time from the start time.
MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringOutputConfig, MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringOutputConfigArgs
- Monitoring
Outputs MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Monitoring Output Config Monitoring Outputs - Monitoring outputs for monitoring jobs. Fields are documented below.
- Kms
Key stringId - AWS KMS key that Amazon SageMaker AI uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.
- Monitoring
Outputs MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Monitoring Output Config Monitoring Outputs - Monitoring outputs for monitoring jobs. Fields are documented below.
- Kms
Key stringId - AWS KMS key that Amazon SageMaker AI uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.
- monitoring
Outputs MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Monitoring Output Config Monitoring Outputs - Monitoring outputs for monitoring jobs. Fields are documented below.
- kms
Key StringId - AWS KMS key that Amazon SageMaker AI uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.
- monitoring
Outputs MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Monitoring Output Config Monitoring Outputs - Monitoring outputs for monitoring jobs. Fields are documented below.
- kms
Key stringId - AWS KMS key that Amazon SageMaker AI uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.
- monitoring_
outputs MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Monitoring Output Config Monitoring Outputs - Monitoring outputs for monitoring jobs. Fields are documented below.
- kms_
key_ strid - AWS KMS key that Amazon SageMaker AI uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.
- monitoring
Outputs Property Map - Monitoring outputs for monitoring jobs. Fields are documented below.
- kms
Key StringId - AWS KMS key that Amazon SageMaker AI uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.
MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringOutputConfigMonitoringOutputs, MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringOutputConfigMonitoringOutputsArgs
- S3Output
Monitoring
Schedule Monitoring Schedule Config Monitoring Job Definition Monitoring Output Config Monitoring Outputs S3Output - Amazon S3 storage location where the results of a monitoring job are saved. Fields are documented below.
- S3Output
Monitoring
Schedule Monitoring Schedule Config Monitoring Job Definition Monitoring Output Config Monitoring Outputs S3Output - Amazon S3 storage location where the results of a monitoring job are saved. Fields are documented below.
- s3Output
Monitoring
Schedule Monitoring Schedule Config Monitoring Job Definition Monitoring Output Config Monitoring Outputs S3Output - Amazon S3 storage location where the results of a monitoring job are saved. Fields are documented below.
- s3Output
Monitoring
Schedule Monitoring Schedule Config Monitoring Job Definition Monitoring Output Config Monitoring Outputs S3Output - Amazon S3 storage location where the results of a monitoring job are saved. Fields are documented below.
- s3_
output MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Monitoring Output Config Monitoring Outputs S3Output - Amazon S3 storage location where the results of a monitoring job are saved. Fields are documented below.
- s3Output Property Map
- Amazon S3 storage location where the results of a monitoring job are saved. Fields are documented below.
MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringOutputConfigMonitoringOutputsS3Output, MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringOutputConfigMonitoringOutputsS3OutputArgs
- Local
Path string - Local path to the Amazon S3 storage location where Amazon SageMaker AI saves the results of a monitoring job.
- S3Uri string
- URI that identifies the Amazon S3 storage location where Amazon SageMaker AI saves the results of a monitoring job.
- S3Upload
Mode string - Whether to upload the results of the monitoring job continuously or after the job completes. Valid values:
Continuous,EndOfJob.
- Local
Path string - Local path to the Amazon S3 storage location where Amazon SageMaker AI saves the results of a monitoring job.
- S3Uri string
- URI that identifies the Amazon S3 storage location where Amazon SageMaker AI saves the results of a monitoring job.
- S3Upload
Mode string - Whether to upload the results of the monitoring job continuously or after the job completes. Valid values:
Continuous,EndOfJob.
- local
Path String - Local path to the Amazon S3 storage location where Amazon SageMaker AI saves the results of a monitoring job.
- s3Uri String
- URI that identifies the Amazon S3 storage location where Amazon SageMaker AI saves the results of a monitoring job.
- s3Upload
Mode String - Whether to upload the results of the monitoring job continuously or after the job completes. Valid values:
Continuous,EndOfJob.
- local
Path string - Local path to the Amazon S3 storage location where Amazon SageMaker AI saves the results of a monitoring job.
- s3Uri string
- URI that identifies the Amazon S3 storage location where Amazon SageMaker AI saves the results of a monitoring job.
- s3Upload
Mode string - Whether to upload the results of the monitoring job continuously or after the job completes. Valid values:
Continuous,EndOfJob.
- local_
path str - Local path to the Amazon S3 storage location where Amazon SageMaker AI saves the results of a monitoring job.
- s3_
uri str - URI that identifies the Amazon S3 storage location where Amazon SageMaker AI saves the results of a monitoring job.
- s3_
upload_ strmode - Whether to upload the results of the monitoring job continuously or after the job completes. Valid values:
Continuous,EndOfJob.
- local
Path String - Local path to the Amazon S3 storage location where Amazon SageMaker AI saves the results of a monitoring job.
- s3Uri String
- URI that identifies the Amazon S3 storage location where Amazon SageMaker AI saves the results of a monitoring job.
- s3Upload
Mode String - Whether to upload the results of the monitoring job continuously or after the job completes. Valid values:
Continuous,EndOfJob.
MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringResources, MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringResourcesArgs
- Cluster
Config MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Monitoring Resources Cluster Config - Configuration for the cluster resources used to run the processing job. Fields are documented below.
- Cluster
Config MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Monitoring Resources Cluster Config - Configuration for the cluster resources used to run the processing job. Fields are documented below.
- cluster
Config MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Monitoring Resources Cluster Config - Configuration for the cluster resources used to run the processing job. Fields are documented below.
- cluster
Config MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Monitoring Resources Cluster Config - Configuration for the cluster resources used to run the processing job. Fields are documented below.
- cluster_
config MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Monitoring Resources Cluster Config - Configuration for the cluster resources used to run the processing job. Fields are documented below.
- cluster
Config Property Map - Configuration for the cluster resources used to run the processing job. Fields are documented below.
MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringResourcesClusterConfig, MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringResourcesClusterConfigArgs
- Instance
Count int - Number of ML compute instances to use in the model monitoring job.
- Instance
Type string - ML compute instance type for the processing job.
- Volume
Size intIn Gb - size of the ML storage volume, in gigabytes, to provision.
- Volume
Kms stringKey Id - AWS KMS key that Amazon SageMaker AI uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the model monitoring job.
- Instance
Count int - Number of ML compute instances to use in the model monitoring job.
- Instance
Type string - ML compute instance type for the processing job.
- Volume
Size intIn Gb - size of the ML storage volume, in gigabytes, to provision.
- Volume
Kms stringKey Id - AWS KMS key that Amazon SageMaker AI uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the model monitoring job.
- instance
Count Integer - Number of ML compute instances to use in the model monitoring job.
- instance
Type String - ML compute instance type for the processing job.
- volume
Size IntegerIn Gb - size of the ML storage volume, in gigabytes, to provision.
- volume
Kms StringKey Id - AWS KMS key that Amazon SageMaker AI uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the model monitoring job.
- instance
Count number - Number of ML compute instances to use in the model monitoring job.
- instance
Type string - ML compute instance type for the processing job.
- volume
Size numberIn Gb - size of the ML storage volume, in gigabytes, to provision.
- volume
Kms stringKey Id - AWS KMS key that Amazon SageMaker AI uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the model monitoring job.
- instance_
count int - Number of ML compute instances to use in the model monitoring job.
- instance_
type str - ML compute instance type for the processing job.
- volume_
size_ intin_ gb - size of the ML storage volume, in gigabytes, to provision.
- volume_
kms_ strkey_ id - AWS KMS key that Amazon SageMaker AI uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the model monitoring job.
- instance
Count Number - Number of ML compute instances to use in the model monitoring job.
- instance
Type String - ML compute instance type for the processing job.
- volume
Size NumberIn Gb - size of the ML storage volume, in gigabytes, to provision.
- volume
Kms StringKey Id - AWS KMS key that Amazon SageMaker AI uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the model monitoring job.
MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionNetworkConfig, MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionNetworkConfigArgs
- Enable
Inter boolContainer Traffic Encryption - Whether to encrypt all communications between distributed processing jobs.
- Enable
Network boolIsolation - Whether to allow inbound and outbound network calls to and from the containers used for the processing job.
- Vpc
Config MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Network Config Vpc Config - VPC that SageMaker jobs, hosted models, and compute resources have access to. Fields are documented below.
- Enable
Inter boolContainer Traffic Encryption - Whether to encrypt all communications between distributed processing jobs.
- Enable
Network boolIsolation - Whether to allow inbound and outbound network calls to and from the containers used for the processing job.
- Vpc
Config MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Network Config Vpc Config - VPC that SageMaker jobs, hosted models, and compute resources have access to. Fields are documented below.
- enable
Inter BooleanContainer Traffic Encryption - Whether to encrypt all communications between distributed processing jobs.
- enable
Network BooleanIsolation - Whether to allow inbound and outbound network calls to and from the containers used for the processing job.
- vpc
Config MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Network Config Vpc Config - VPC that SageMaker jobs, hosted models, and compute resources have access to. Fields are documented below.
- enable
Inter booleanContainer Traffic Encryption - Whether to encrypt all communications between distributed processing jobs.
- enable
Network booleanIsolation - Whether to allow inbound and outbound network calls to and from the containers used for the processing job.
- vpc
Config MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Network Config Vpc Config - VPC that SageMaker jobs, hosted models, and compute resources have access to. Fields are documented below.
- enable_
inter_ boolcontainer_ traffic_ encryption - Whether to encrypt all communications between distributed processing jobs.
- enable_
network_ boolisolation - Whether to allow inbound and outbound network calls to and from the containers used for the processing job.
- vpc_
config MonitoringSchedule Monitoring Schedule Config Monitoring Job Definition Network Config Vpc Config - VPC that SageMaker jobs, hosted models, and compute resources have access to. Fields are documented below.
- enable
Inter BooleanContainer Traffic Encryption - Whether to encrypt all communications between distributed processing jobs.
- enable
Network BooleanIsolation - Whether to allow inbound and outbound network calls to and from the containers used for the processing job.
- vpc
Config Property Map - VPC that SageMaker jobs, hosted models, and compute resources have access to. Fields are documented below.
MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionNetworkConfigVpcConfig, MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionNetworkConfigVpcConfigArgs
- Security
Group List<string>Ids - VPC security group IDs.
- Subnets List<string>
- Subnet IDs.
- Security
Group []stringIds - VPC security group IDs.
- Subnets []string
- Subnet IDs.
- security
Group List<String>Ids - VPC security group IDs.
- subnets List<String>
- Subnet IDs.
- security
Group string[]Ids - VPC security group IDs.
- subnets string[]
- Subnet IDs.
- security_
group_ Sequence[str]ids - VPC security group IDs.
- subnets Sequence[str]
- Subnet IDs.
- security
Group List<String>Ids - VPC security group IDs.
- subnets List<String>
- Subnet IDs.
MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionStoppingCondition, MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionStoppingConditionArgs
- Max
Runtime intIn Seconds - Maximum runtime allowed in seconds.
- Max
Runtime intIn Seconds - Maximum runtime allowed in seconds.
- max
Runtime IntegerIn Seconds - Maximum runtime allowed in seconds.
- max
Runtime numberIn Seconds - Maximum runtime allowed in seconds.
- max_
runtime_ intin_ seconds - Maximum runtime allowed in seconds.
- max
Runtime NumberIn Seconds - Maximum runtime allowed in seconds.
MonitoringScheduleMonitoringScheduleConfigScheduleConfig, MonitoringScheduleMonitoringScheduleConfigScheduleConfigArgs
- Schedule
Expression string - A cron expression that describes details about the monitoring schedule. For example, and hourly schedule would be
cron(0 * ? * * *).
- Schedule
Expression string - A cron expression that describes details about the monitoring schedule. For example, and hourly schedule would be
cron(0 * ? * * *).
- schedule
Expression String - A cron expression that describes details about the monitoring schedule. For example, and hourly schedule would be
cron(0 * ? * * *).
- schedule
Expression string - A cron expression that describes details about the monitoring schedule. For example, and hourly schedule would be
cron(0 * ? * * *).
- schedule_
expression str - A cron expression that describes details about the monitoring schedule. For example, and hourly schedule would be
cron(0 * ? * * *).
- schedule
Expression String - A cron expression that describes details about the monitoring schedule. For example, and hourly schedule would be
cron(0 * ? * * *).
Import
Using pulumi import, import monitoring schedules using the name. For example:
$ pulumi import aws:sagemaker/monitoringSchedule:MonitoringSchedule test_monitoring_schedule monitoring-schedule-foo
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- AWS Classic pulumi/pulumi-aws
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
awsTerraform Provider.
