1. Packages
  2. Azure Native
  3. API Docs
  4. machinelearningservices
  5. Job
This is the latest version of Azure Native. Use the Azure Native v1 docs if using the v1 version of this package.
Azure Native v2.42.1 published on Wednesday, May 22, 2024 by Pulumi

azure-native.machinelearningservices.Job

Explore with Pulumi AI

azure-native logo
This is the latest version of Azure Native. Use the Azure Native v1 docs if using the v1 version of this package.
Azure Native v2.42.1 published on Wednesday, May 22, 2024 by Pulumi

    Azure Resource Manager resource envelope. Azure REST API version: 2023-04-01. Prior API version in Azure Native 1.x: 2021-03-01-preview.

    Other available API versions: 2021-03-01-preview, 2022-02-01-preview, 2023-04-01-preview, 2023-06-01-preview, 2023-08-01-preview, 2023-10-01, 2024-01-01-preview, 2024-04-01, 2024-04-01-preview.

    Example Usage

    CreateOrUpdate AutoML Job.

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using AzureNative = Pulumi.AzureNative;
    
    return await Deployment.RunAsync(() => 
    {
        var job = new AzureNative.MachineLearningServices.Job("job", new()
        {
            Id = "string",
            JobBaseProperties = new AzureNative.MachineLearningServices.Inputs.AutoMLJobArgs
            {
                ComputeId = "string",
                Description = "string",
                DisplayName = "string",
                EnvironmentId = "string",
                EnvironmentVariables = 
                {
                    { "string", "string" },
                },
                ExperimentName = "string",
                Identity = new AzureNative.MachineLearningServices.Inputs.AmlTokenArgs
                {
                    IdentityType = "AMLToken",
                },
                IsArchived = false,
                JobType = "AutoML",
                Outputs = 
                {
                    { "string", new AzureNative.MachineLearningServices.Inputs.UriFileJobOutputArgs
                    {
                        Description = "string",
                        JobOutputType = "uri_file",
                        Mode = AzureNative.MachineLearningServices.OutputDeliveryMode.ReadWriteMount,
                        Uri = "string",
                    } },
                },
                Properties = 
                {
                    { "string", "string" },
                },
                Resources = new AzureNative.MachineLearningServices.Inputs.JobResourceConfigurationArgs
                {
                    InstanceCount = 1,
                    InstanceType = "string",
                    Properties = 
                    {
                        { "string", 
                        {
                            { "9bec0ab0-c62f-4fa9-a97c-7b24bbcc90ad", null },
                        } },
                    },
                },
                Services = 
                {
                    { "string", new AzureNative.MachineLearningServices.Inputs.JobServiceArgs
                    {
                        Endpoint = "string",
                        JobServiceType = "string",
                        Port = 1,
                        Properties = 
                        {
                            { "string", "string" },
                        },
                    } },
                },
                Tags = 
                {
                    { "string", "string" },
                },
                TaskDetails = new AzureNative.MachineLearningServices.Inputs.ImageClassificationArgs
                {
                    LimitSettings = new AzureNative.MachineLearningServices.Inputs.ImageLimitSettingsArgs
                    {
                        MaxTrials = 2,
                    },
                    ModelSettings = new AzureNative.MachineLearningServices.Inputs.ImageModelSettingsClassificationArgs
                    {
                        ValidationCropSize = 2,
                    },
                    SearchSpace = new[]
                    {
                        new AzureNative.MachineLearningServices.Inputs.ImageModelDistributionSettingsClassificationArgs
                        {
                            ValidationCropSize = "choice(2, 360)",
                        },
                    },
                    TargetColumnName = "string",
                    TaskType = "ImageClassification",
                    TrainingData = new AzureNative.MachineLearningServices.Inputs.MLTableJobInputArgs
                    {
                        JobInputType = "mltable",
                        Uri = "string",
                    },
                },
            },
            ResourceGroupName = "test-rg",
            WorkspaceName = "my-aml-workspace",
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-azure-native-sdk/machinelearningservices/v2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    func main() {
    pulumi.Run(func(ctx *pulumi.Context) error {
    _, err := machinelearningservices.NewJob(ctx, "job", &machinelearningservices.JobArgs{
    Id: pulumi.String("string"),
    JobBaseProperties: machinelearningservices.AutoMLJob{
    ComputeId: "string",
    Description: "string",
    DisplayName: "string",
    EnvironmentId: "string",
    EnvironmentVariables: map[string]interface{}{
    "string": "string",
    },
    ExperimentName: "string",
    Identity: machinelearningservices.AmlToken{
    IdentityType: "AMLToken",
    },
    IsArchived: false,
    JobType: "AutoML",
    Outputs: map[string]interface{}{
    "string": machinelearningservices.UriFileJobOutput{
    Description: "string",
    JobOutputType: "uri_file",
    Mode: machinelearningservices.OutputDeliveryModeReadWriteMount,
    Uri: "string",
    },
    },
    Properties: map[string]interface{}{
    "string": "string",
    },
    Resources: machinelearningservices.JobResourceConfiguration{
    InstanceCount: 1,
    InstanceType: "string",
    Properties: map[string]interface{}{
    "string": map[string]interface{}{
    "9bec0ab0-c62f-4fa9-a97c-7b24bbcc90ad": nil,
    },
    },
    },
    Services: interface{}{
    String: machinelearningservices.JobService{
    Endpoint: "string",
    JobServiceType: "string",
    Port: 1,
    Properties: map[string]interface{}{
    "string": "string",
    },
    },
    },
    Tags: map[string]interface{}{
    "string": "string",
    },
    TaskDetails: machinelearningservices.ImageClassification{
    LimitSettings: machinelearningservices.ImageLimitSettings{
    MaxTrials: 2,
    },
    ModelSettings: machinelearningservices.ImageModelSettingsClassification{
    ValidationCropSize: 2,
    },
    SearchSpace: []machinelearningservices.ImageModelDistributionSettingsClassification{
    {
    ValidationCropSize: "choice(2, 360)",
    },
    },
    TargetColumnName: "string",
    TaskType: "ImageClassification",
    TrainingData: machinelearningservices.MLTableJobInput{
    JobInputType: "mltable",
    Uri: "string",
    },
    },
    },
    ResourceGroupName: pulumi.String("test-rg"),
    WorkspaceName: pulumi.String("my-aml-workspace"),
    })
    if err != nil {
    return err
    }
    return nil
    })
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.azurenative.machinelearningservices.Job;
    import com.pulumi.azurenative.machinelearningservices.JobArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var job = new Job("job", JobArgs.builder()
                .id("string")
                .jobBaseProperties(AutoMLJobArgs.builder()
                    .computeId("string")
                    .description("string")
                    .displayName("string")
                    .environmentId("string")
                    .environmentVariables(Map.of("string", "string"))
                    .experimentName("string")
                    .identity(AmlTokenArgs.builder()
                        .identityType("AMLToken")
                        .build())
                    .isArchived(false)
                    .jobType("AutoML")
                    .outputs(Map.of("string", Map.ofEntries(
                        Map.entry("description", "string"),
                        Map.entry("jobOutputType", "uri_file"),
                        Map.entry("mode", "ReadWriteMount"),
                        Map.entry("uri", "string")
                    )))
                    .properties(Map.of("string", "string"))
                    .resources(JobResourceConfigurationArgs.builder()
                        .instanceCount(1)
                        .instanceType("string")
                        .properties(Map.of("string", Map.of("9bec0ab0-c62f-4fa9-a97c-7b24bbcc90ad", null)))
                        .build())
                    .services(Map.of("string", Map.ofEntries(
                        Map.entry("endpoint", "string"),
                        Map.entry("jobServiceType", "string"),
                        Map.entry("port", 1),
                        Map.entry("properties", Map.of("string", "string"))
                    )))
                    .tags(Map.of("string", "string"))
                    .taskDetails(ImageClassificationArgs.builder()
                        .limitSettings(ImageLimitSettingsArgs.builder()
                            .maxTrials(2)
                            .build())
                        .modelSettings(ImageModelSettingsClassificationArgs.builder()
                            .validationCropSize(2)
                            .build())
                        .searchSpace(ImageModelDistributionSettingsClassificationArgs.builder()
                            .validationCropSize("choice(2, 360)")
                            .build())
                        .targetColumnName("string")
                        .taskType("ImageClassification")
                        .trainingData(MLTableJobInputArgs.builder()
                            .jobInputType("mltable")
                            .uri("string")
                            .build())
                        .build())
                    .build())
                .resourceGroupName("test-rg")
                .workspaceName("my-aml-workspace")
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_azure_native as azure_native
    
    job = azure_native.machinelearningservices.Job("job",
        id="string",
        job_base_properties=azure_native.machinelearningservices.AutoMLJobArgs(
            compute_id="string",
            description="string",
            display_name="string",
            environment_id="string",
            environment_variables={
                "string": "string",
            },
            experiment_name="string",
            identity=azure_native.machinelearningservices.AmlTokenArgs(
                identity_type="AMLToken",
            ),
            is_archived=False,
            job_type="AutoML",
            outputs={
                "string": azure_native.machinelearningservices.UriFileJobOutputArgs(
                    description="string",
                    job_output_type="uri_file",
                    mode=azure_native.machinelearningservices.OutputDeliveryMode.READ_WRITE_MOUNT,
                    uri="string",
                ),
            },
            properties={
                "string": "string",
            },
            resources=azure_native.machinelearningservices.JobResourceConfigurationArgs(
                instance_count=1,
                instance_type="string",
                properties={
                    "string": {
                        "9bec0ab0-c62f-4fa9-a97c-7b24bbcc90ad": None,
                    },
                },
            ),
            services={
                "string": azure_native.machinelearningservices.JobServiceArgs(
                    endpoint="string",
                    job_service_type="string",
                    port=1,
                    properties={
                        "string": "string",
                    },
                ),
            },
            tags={
                "string": "string",
            },
            task_details=azure_native.machinelearningservices.ImageClassificationArgs(
                limit_settings=azure_native.machinelearningservices.ImageLimitSettingsArgs(
                    max_trials=2,
                ),
                model_settings=azure_native.machinelearningservices.ImageModelSettingsClassificationArgs(
                    validation_crop_size=2,
                ),
                search_space=[azure_native.machinelearningservices.ImageModelDistributionSettingsClassificationArgs(
                    validation_crop_size="choice(2, 360)",
                )],
                target_column_name="string",
                task_type="ImageClassification",
                training_data=azure_native.machinelearningservices.MLTableJobInputArgs(
                    job_input_type="mltable",
                    uri="string",
                ),
            ),
        ),
        resource_group_name="test-rg",
        workspace_name="my-aml-workspace")
    
    import * as pulumi from "@pulumi/pulumi";
    import * as azure_native from "@pulumi/azure-native";
    
    const job = new azure_native.machinelearningservices.Job("job", {
        id: "string",
        jobBaseProperties: {
            computeId: "string",
            description: "string",
            displayName: "string",
            environmentId: "string",
            environmentVariables: {
                string: "string",
            },
            experimentName: "string",
            identity: {
                identityType: "AMLToken",
            },
            isArchived: false,
            jobType: "AutoML",
            outputs: {
                string: {
                    description: "string",
                    jobOutputType: "uri_file",
                    mode: azure_native.machinelearningservices.OutputDeliveryMode.ReadWriteMount,
                    uri: "string",
                },
            },
            properties: {
                string: "string",
            },
            resources: {
                instanceCount: 1,
                instanceType: "string",
                properties: {
                    string: {
                        "9bec0ab0-c62f-4fa9-a97c-7b24bbcc90ad": undefined,
                    },
                },
            },
            services: {
                string: {
                    endpoint: "string",
                    jobServiceType: "string",
                    port: 1,
                    properties: {
                        string: "string",
                    },
                },
            },
            tags: {
                string: "string",
            },
            taskDetails: {
                limitSettings: {
                    maxTrials: 2,
                },
                modelSettings: {
                    validationCropSize: 2,
                },
                searchSpace: [{
                    validationCropSize: "choice(2, 360)",
                }],
                targetColumnName: "string",
                taskType: "ImageClassification",
                trainingData: {
                    jobInputType: "mltable",
                    uri: "string",
                },
            },
        },
        resourceGroupName: "test-rg",
        workspaceName: "my-aml-workspace",
    });
    
    resources:
      job:
        type: azure-native:machinelearningservices:Job
        properties:
          id: string
          jobBaseProperties:
            computeId: string
            description: string
            displayName: string
            environmentId: string
            environmentVariables:
              string: string
            experimentName: string
            identity:
              identityType: AMLToken
            isArchived: false
            jobType: AutoML
            outputs:
              string:
                description: string
                jobOutputType: uri_file
                mode: ReadWriteMount
                uri: string
            properties:
              string: string
            resources:
              instanceCount: 1
              instanceType: string
              properties:
                string:
                  9bec0ab0-c62f-4fa9-a97c-7b24bbcc90ad: null
            services:
              string:
                endpoint: string
                jobServiceType: string
                port: 1
                properties:
                  string: string
            tags:
              string: string
            taskDetails:
              limitSettings:
                maxTrials: 2
              modelSettings:
                validationCropSize: 2
              searchSpace:
                - validationCropSize: choice(2, 360)
              targetColumnName: string
              taskType: ImageClassification
              trainingData:
                jobInputType: mltable
                uri: string
          resourceGroupName: test-rg
          workspaceName: my-aml-workspace
    

    CreateOrUpdate Command Job.

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using AzureNative = Pulumi.AzureNative;
    
    return await Deployment.RunAsync(() => 
    {
        var job = new AzureNative.MachineLearningServices.Job("job", new()
        {
            Id = "string",
            JobBaseProperties = new AzureNative.MachineLearningServices.Inputs.CommandJobArgs
            {
                CodeId = "string",
                Command = "string",
                ComputeId = "string",
                Description = "string",
                DisplayName = "string",
                Distribution = new AzureNative.MachineLearningServices.Inputs.TensorFlowArgs
                {
                    DistributionType = "TensorFlow",
                    ParameterServerCount = 1,
                    WorkerCount = 1,
                },
                EnvironmentId = "string",
                EnvironmentVariables = 
                {
                    { "string", "string" },
                },
                ExperimentName = "string",
                Identity = new AzureNative.MachineLearningServices.Inputs.AmlTokenArgs
                {
                    IdentityType = "AMLToken",
                },
                Inputs = 
                {
                    { "string", new AzureNative.MachineLearningServices.Inputs.LiteralJobInputArgs
                    {
                        Description = "string",
                        JobInputType = "literal",
                        Value = "string",
                    } },
                },
                JobType = "Command",
                Limits = new AzureNative.MachineLearningServices.Inputs.CommandJobLimitsArgs
                {
                    JobLimitsType = "Command",
                    Timeout = "PT5M",
                },
                Outputs = 
                {
                    { "string", new AzureNative.MachineLearningServices.Inputs.UriFileJobOutputArgs
                    {
                        Description = "string",
                        JobOutputType = "uri_file",
                        Mode = AzureNative.MachineLearningServices.OutputDeliveryMode.ReadWriteMount,
                        Uri = "string",
                    } },
                },
                Properties = 
                {
                    { "string", "string" },
                },
                Resources = new AzureNative.MachineLearningServices.Inputs.JobResourceConfigurationArgs
                {
                    InstanceCount = 1,
                    InstanceType = "string",
                    Properties = 
                    {
                        { "string", 
                        {
                            { "e6b6493e-7d5e-4db3-be1e-306ec641327e", null },
                        } },
                    },
                },
                Services = 
                {
                    { "string", new AzureNative.MachineLearningServices.Inputs.JobServiceArgs
                    {
                        Endpoint = "string",
                        JobServiceType = "string",
                        Port = 1,
                        Properties = 
                        {
                            { "string", "string" },
                        },
                    } },
                },
                Tags = 
                {
                    { "string", "string" },
                },
            },
            ResourceGroupName = "test-rg",
            WorkspaceName = "my-aml-workspace",
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-azure-native-sdk/machinelearningservices/v2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    func main() {
    pulumi.Run(func(ctx *pulumi.Context) error {
    _, err := machinelearningservices.NewJob(ctx, "job", &machinelearningservices.JobArgs{
    Id: pulumi.String("string"),
    JobBaseProperties: machinelearningservices.CommandJob{
    CodeId: "string",
    Command: "string",
    ComputeId: "string",
    Description: "string",
    DisplayName: "string",
    Distribution: machinelearningservices.TensorFlow{
    DistributionType: "TensorFlow",
    ParameterServerCount: 1,
    WorkerCount: 1,
    },
    EnvironmentId: "string",
    EnvironmentVariables: map[string]interface{}{
    "string": "string",
    },
    ExperimentName: "string",
    Identity: machinelearningservices.AmlToken{
    IdentityType: "AMLToken",
    },
    Inputs: map[string]interface{}{
    "string": machinelearningservices.LiteralJobInput{
    Description: "string",
    JobInputType: "literal",
    Value: "string",
    },
    },
    JobType: "Command",
    Limits: machinelearningservices.CommandJobLimits{
    JobLimitsType: "Command",
    Timeout: "PT5M",
    },
    Outputs: map[string]interface{}{
    "string": machinelearningservices.UriFileJobOutput{
    Description: "string",
    JobOutputType: "uri_file",
    Mode: machinelearningservices.OutputDeliveryModeReadWriteMount,
    Uri: "string",
    },
    },
    Properties: map[string]interface{}{
    "string": "string",
    },
    Resources: machinelearningservices.JobResourceConfiguration{
    InstanceCount: 1,
    InstanceType: "string",
    Properties: map[string]interface{}{
    "string": map[string]interface{}{
    "e6b6493e-7d5e-4db3-be1e-306ec641327e": nil,
    },
    },
    },
    Services: interface{}{
    String: machinelearningservices.JobService{
    Endpoint: "string",
    JobServiceType: "string",
    Port: 1,
    Properties: map[string]interface{}{
    "string": "string",
    },
    },
    },
    Tags: map[string]interface{}{
    "string": "string",
    },
    },
    ResourceGroupName: pulumi.String("test-rg"),
    WorkspaceName: pulumi.String("my-aml-workspace"),
    })
    if err != nil {
    return err
    }
    return nil
    })
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.azurenative.machinelearningservices.Job;
    import com.pulumi.azurenative.machinelearningservices.JobArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var job = new Job("job", JobArgs.builder()
                .id("string")
                .jobBaseProperties(CommandJobArgs.builder()
                    .codeId("string")
                    .command("string")
                    .computeId("string")
                    .description("string")
                    .displayName("string")
                    .distribution(TensorFlowArgs.builder()
                        .distributionType("TensorFlow")
                        .parameterServerCount(1)
                        .workerCount(1)
                        .build())
                    .environmentId("string")
                    .environmentVariables(Map.of("string", "string"))
                    .experimentName("string")
                    .identity(AmlTokenArgs.builder()
                        .identityType("AMLToken")
                        .build())
                    .inputs(Map.of("string", Map.ofEntries(
                        Map.entry("description", "string"),
                        Map.entry("jobInputType", "literal"),
                        Map.entry("value", "string")
                    )))
                    .jobType("Command")
                    .limits(CommandJobLimitsArgs.builder()
                        .jobLimitsType("Command")
                        .timeout("PT5M")
                        .build())
                    .outputs(Map.of("string", Map.ofEntries(
                        Map.entry("description", "string"),
                        Map.entry("jobOutputType", "uri_file"),
                        Map.entry("mode", "ReadWriteMount"),
                        Map.entry("uri", "string")
                    )))
                    .properties(Map.of("string", "string"))
                    .resources(JobResourceConfigurationArgs.builder()
                        .instanceCount(1)
                        .instanceType("string")
                        .properties(Map.of("string", Map.of("e6b6493e-7d5e-4db3-be1e-306ec641327e", null)))
                        .build())
                    .services(Map.of("string", Map.ofEntries(
                        Map.entry("endpoint", "string"),
                        Map.entry("jobServiceType", "string"),
                        Map.entry("port", 1),
                        Map.entry("properties", Map.of("string", "string"))
                    )))
                    .tags(Map.of("string", "string"))
                    .build())
                .resourceGroupName("test-rg")
                .workspaceName("my-aml-workspace")
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_azure_native as azure_native
    
    job = azure_native.machinelearningservices.Job("job",
        id="string",
        job_base_properties=azure_native.machinelearningservices.CommandJobArgs(
            code_id="string",
            command="string",
            compute_id="string",
            description="string",
            display_name="string",
            distribution=azure_native.machinelearningservices.TensorFlowArgs(
                distribution_type="TensorFlow",
                parameter_server_count=1,
                worker_count=1,
            ),
            environment_id="string",
            environment_variables={
                "string": "string",
            },
            experiment_name="string",
            identity=azure_native.machinelearningservices.AmlTokenArgs(
                identity_type="AMLToken",
            ),
            inputs={
                "string": azure_native.machinelearningservices.LiteralJobInputArgs(
                    description="string",
                    job_input_type="literal",
                    value="string",
                ),
            },
            job_type="Command",
            limits=azure_native.machinelearningservices.CommandJobLimitsArgs(
                job_limits_type="Command",
                timeout="PT5M",
            ),
            outputs={
                "string": azure_native.machinelearningservices.UriFileJobOutputArgs(
                    description="string",
                    job_output_type="uri_file",
                    mode=azure_native.machinelearningservices.OutputDeliveryMode.READ_WRITE_MOUNT,
                    uri="string",
                ),
            },
            properties={
                "string": "string",
            },
            resources=azure_native.machinelearningservices.JobResourceConfigurationArgs(
                instance_count=1,
                instance_type="string",
                properties={
                    "string": {
                        "e6b6493e-7d5e-4db3-be1e-306ec641327e": None,
                    },
                },
            ),
            services={
                "string": azure_native.machinelearningservices.JobServiceArgs(
                    endpoint="string",
                    job_service_type="string",
                    port=1,
                    properties={
                        "string": "string",
                    },
                ),
            },
            tags={
                "string": "string",
            },
        ),
        resource_group_name="test-rg",
        workspace_name="my-aml-workspace")
    
    import * as pulumi from "@pulumi/pulumi";
    import * as azure_native from "@pulumi/azure-native";
    
    const job = new azure_native.machinelearningservices.Job("job", {
        id: "string",
        jobBaseProperties: {
            codeId: "string",
            command: "string",
            computeId: "string",
            description: "string",
            displayName: "string",
            distribution: {
                distributionType: "TensorFlow",
                parameterServerCount: 1,
                workerCount: 1,
            },
            environmentId: "string",
            environmentVariables: {
                string: "string",
            },
            experimentName: "string",
            identity: {
                identityType: "AMLToken",
            },
            inputs: {
                string: {
                    description: "string",
                    jobInputType: "literal",
                    value: "string",
                },
            },
            jobType: "Command",
            limits: {
                jobLimitsType: "Command",
                timeout: "PT5M",
            },
            outputs: {
                string: {
                    description: "string",
                    jobOutputType: "uri_file",
                    mode: azure_native.machinelearningservices.OutputDeliveryMode.ReadWriteMount,
                    uri: "string",
                },
            },
            properties: {
                string: "string",
            },
            resources: {
                instanceCount: 1,
                instanceType: "string",
                properties: {
                    string: {
                        "e6b6493e-7d5e-4db3-be1e-306ec641327e": undefined,
                    },
                },
            },
            services: {
                string: {
                    endpoint: "string",
                    jobServiceType: "string",
                    port: 1,
                    properties: {
                        string: "string",
                    },
                },
            },
            tags: {
                string: "string",
            },
        },
        resourceGroupName: "test-rg",
        workspaceName: "my-aml-workspace",
    });
    
    resources:
      job:
        type: azure-native:machinelearningservices:Job
        properties:
          id: string
          jobBaseProperties:
            codeId: string
            command: string
            computeId: string
            description: string
            displayName: string
            distribution:
              distributionType: TensorFlow
              parameterServerCount: 1
              workerCount: 1
            environmentId: string
            environmentVariables:
              string: string
            experimentName: string
            identity:
              identityType: AMLToken
            inputs:
              string:
                description: string
                jobInputType: literal
                value: string
            jobType: Command
            limits:
              jobLimitsType: Command
              timeout: PT5M
            outputs:
              string:
                description: string
                jobOutputType: uri_file
                mode: ReadWriteMount
                uri: string
            properties:
              string: string
            resources:
              instanceCount: 1
              instanceType: string
              properties:
                string:
                  e6b6493e-7d5e-4db3-be1e-306ec641327e: null
            services:
              string:
                endpoint: string
                jobServiceType: string
                port: 1
                properties:
                  string: string
            tags:
              string: string
          resourceGroupName: test-rg
          workspaceName: my-aml-workspace
    

    CreateOrUpdate Pipeline Job.

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using AzureNative = Pulumi.AzureNative;
    
    return await Deployment.RunAsync(() => 
    {
        var job = new AzureNative.MachineLearningServices.Job("job", new()
        {
            Id = "string",
            JobBaseProperties = new AzureNative.MachineLearningServices.Inputs.PipelineJobArgs
            {
                ComputeId = "string",
                Description = "string",
                DisplayName = "string",
                ExperimentName = "string",
                Inputs = 
                {
                    { "string", new AzureNative.MachineLearningServices.Inputs.LiteralJobInputArgs
                    {
                        Description = "string",
                        JobInputType = "literal",
                        Value = "string",
                    } },
                },
                JobType = "Pipeline",
                Outputs = 
                {
                    { "string", new AzureNative.MachineLearningServices.Inputs.UriFileJobOutputArgs
                    {
                        Description = "string",
                        JobOutputType = "uri_file",
                        Mode = AzureNative.MachineLearningServices.OutputDeliveryMode.Upload,
                        Uri = "string",
                    } },
                },
                Properties = 
                {
                    { "string", "string" },
                },
                Services = 
                {
                    { "string", new AzureNative.MachineLearningServices.Inputs.JobServiceArgs
                    {
                        Endpoint = "string",
                        JobServiceType = "string",
                        Port = 1,
                        Properties = 
                        {
                            { "string", "string" },
                        },
                    } },
                },
                Settings = null,
                Tags = 
                {
                    { "string", "string" },
                },
            },
            ResourceGroupName = "test-rg",
            WorkspaceName = "my-aml-workspace",
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-azure-native-sdk/machinelearningservices/v2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    func main() {
    pulumi.Run(func(ctx *pulumi.Context) error {
    _, err := machinelearningservices.NewJob(ctx, "job", &machinelearningservices.JobArgs{
    Id: pulumi.String("string"),
    JobBaseProperties: machinelearningservices.PipelineJob{
    ComputeId: "string",
    Description: "string",
    DisplayName: "string",
    ExperimentName: "string",
    Inputs: map[string]interface{}{
    "string": machinelearningservices.LiteralJobInput{
    Description: "string",
    JobInputType: "literal",
    Value: "string",
    },
    },
    JobType: "Pipeline",
    Outputs: map[string]interface{}{
    "string": machinelearningservices.UriFileJobOutput{
    Description: "string",
    JobOutputType: "uri_file",
    Mode: machinelearningservices.OutputDeliveryModeUpload,
    Uri: "string",
    },
    },
    Properties: map[string]interface{}{
    "string": "string",
    },
    Services: interface{}{
    String: machinelearningservices.JobService{
    Endpoint: "string",
    JobServiceType: "string",
    Port: 1,
    Properties: map[string]interface{}{
    "string": "string",
    },
    },
    },
    Settings: nil,
    Tags: map[string]interface{}{
    "string": "string",
    },
    },
    ResourceGroupName: pulumi.String("test-rg"),
    WorkspaceName: pulumi.String("my-aml-workspace"),
    })
    if err != nil {
    return err
    }
    return nil
    })
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.azurenative.machinelearningservices.Job;
    import com.pulumi.azurenative.machinelearningservices.JobArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var job = new Job("job", JobArgs.builder()
                .id("string")
                .jobBaseProperties(PipelineJobArgs.builder()
                    .computeId("string")
                    .description("string")
                    .displayName("string")
                    .experimentName("string")
                    .inputs(Map.of("string", Map.ofEntries(
                        Map.entry("description", "string"),
                        Map.entry("jobInputType", "literal"),
                        Map.entry("value", "string")
                    )))
                    .jobType("Pipeline")
                    .outputs(Map.of("string", Map.ofEntries(
                        Map.entry("description", "string"),
                        Map.entry("jobOutputType", "uri_file"),
                        Map.entry("mode", "Upload"),
                        Map.entry("uri", "string")
                    )))
                    .properties(Map.of("string", "string"))
                    .services(Map.of("string", Map.ofEntries(
                        Map.entry("endpoint", "string"),
                        Map.entry("jobServiceType", "string"),
                        Map.entry("port", 1),
                        Map.entry("properties", Map.of("string", "string"))
                    )))
                    .settings()
                    .tags(Map.of("string", "string"))
                    .build())
                .resourceGroupName("test-rg")
                .workspaceName("my-aml-workspace")
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_azure_native as azure_native
    
    job = azure_native.machinelearningservices.Job("job",
        id="string",
        job_base_properties=azure_native.machinelearningservices.PipelineJobArgs(
            compute_id="string",
            description="string",
            display_name="string",
            experiment_name="string",
            inputs={
                "string": azure_native.machinelearningservices.LiteralJobInputArgs(
                    description="string",
                    job_input_type="literal",
                    value="string",
                ),
            },
            job_type="Pipeline",
            outputs={
                "string": azure_native.machinelearningservices.UriFileJobOutputArgs(
                    description="string",
                    job_output_type="uri_file",
                    mode=azure_native.machinelearningservices.OutputDeliveryMode.UPLOAD,
                    uri="string",
                ),
            },
            properties={
                "string": "string",
            },
            services={
                "string": azure_native.machinelearningservices.JobServiceArgs(
                    endpoint="string",
                    job_service_type="string",
                    port=1,
                    properties={
                        "string": "string",
                    },
                ),
            },
            settings={},
            tags={
                "string": "string",
            },
        ),
        resource_group_name="test-rg",
        workspace_name="my-aml-workspace")
    
    import * as pulumi from "@pulumi/pulumi";
    import * as azure_native from "@pulumi/azure-native";
    
    const job = new azure_native.machinelearningservices.Job("job", {
        id: "string",
        jobBaseProperties: {
            computeId: "string",
            description: "string",
            displayName: "string",
            experimentName: "string",
            inputs: {
                string: {
                    description: "string",
                    jobInputType: "literal",
                    value: "string",
                },
            },
            jobType: "Pipeline",
            outputs: {
                string: {
                    description: "string",
                    jobOutputType: "uri_file",
                    mode: azure_native.machinelearningservices.OutputDeliveryMode.Upload,
                    uri: "string",
                },
            },
            properties: {
                string: "string",
            },
            services: {
                string: {
                    endpoint: "string",
                    jobServiceType: "string",
                    port: 1,
                    properties: {
                        string: "string",
                    },
                },
            },
            settings: {},
            tags: {
                string: "string",
            },
        },
        resourceGroupName: "test-rg",
        workspaceName: "my-aml-workspace",
    });
    
    resources:
      job:
        type: azure-native:machinelearningservices:Job
        properties:
          id: string
          jobBaseProperties:
            computeId: string
            description: string
            displayName: string
            experimentName: string
            inputs:
              string:
                description: string
                jobInputType: literal
                value: string
            jobType: Pipeline
            outputs:
              string:
                description: string
                jobOutputType: uri_file
                mode: Upload
                uri: string
            properties:
              string: string
            services:
              string:
                endpoint: string
                jobServiceType: string
                port: 1
                properties:
                  string: string
            settings: {}
            tags:
              string: string
          resourceGroupName: test-rg
          workspaceName: my-aml-workspace
    

    CreateOrUpdate Sweep Job.

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using AzureNative = Pulumi.AzureNative;
    
    return await Deployment.RunAsync(() => 
    {
        var job = new AzureNative.MachineLearningServices.Job("job", new()
        {
            Id = "string",
            JobBaseProperties = new AzureNative.MachineLearningServices.Inputs.SweepJobArgs
            {
                ComputeId = "string",
                Description = "string",
                DisplayName = "string",
                EarlyTermination = new AzureNative.MachineLearningServices.Inputs.MedianStoppingPolicyArgs
                {
                    DelayEvaluation = 1,
                    EvaluationInterval = 1,
                    PolicyType = "MedianStopping",
                },
                ExperimentName = "string",
                JobType = "Sweep",
                Limits = new AzureNative.MachineLearningServices.Inputs.SweepJobLimitsArgs
                {
                    JobLimitsType = "Sweep",
                    MaxConcurrentTrials = 1,
                    MaxTotalTrials = 1,
                    TrialTimeout = "PT1S",
                },
                Objective = new AzureNative.MachineLearningServices.Inputs.ObjectiveArgs
                {
                    Goal = AzureNative.MachineLearningServices.Goal.Minimize,
                    PrimaryMetric = "string",
                },
                Properties = 
                {
                    { "string", "string" },
                },
                SamplingAlgorithm = new AzureNative.MachineLearningServices.Inputs.GridSamplingAlgorithmArgs
                {
                    SamplingAlgorithmType = "Grid",
                },
                SearchSpace = 
                {
                    { "string", null },
                },
                Services = 
                {
                    { "string", new AzureNative.MachineLearningServices.Inputs.JobServiceArgs
                    {
                        Endpoint = "string",
                        JobServiceType = "string",
                        Port = 1,
                        Properties = 
                        {
                            { "string", "string" },
                        },
                    } },
                },
                Tags = 
                {
                    { "string", "string" },
                },
                Trial = new AzureNative.MachineLearningServices.Inputs.TrialComponentArgs
                {
                    CodeId = "string",
                    Command = "string",
                    Distribution = new AzureNative.MachineLearningServices.Inputs.MpiArgs
                    {
                        DistributionType = "Mpi",
                        ProcessCountPerInstance = 1,
                    },
                    EnvironmentId = "string",
                    EnvironmentVariables = 
                    {
                        { "string", "string" },
                    },
                    Resources = new AzureNative.MachineLearningServices.Inputs.JobResourceConfigurationArgs
                    {
                        InstanceCount = 1,
                        InstanceType = "string",
                        Properties = 
                        {
                            { "string", 
                            {
                                { "e6b6493e-7d5e-4db3-be1e-306ec641327e", null },
                            } },
                        },
                    },
                },
            },
            ResourceGroupName = "test-rg",
            WorkspaceName = "my-aml-workspace",
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-azure-native-sdk/machinelearningservices/v2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    func main() {
    pulumi.Run(func(ctx *pulumi.Context) error {
    _, err := machinelearningservices.NewJob(ctx, "job", &machinelearningservices.JobArgs{
    Id: pulumi.String("string"),
    JobBaseProperties: machinelearningservices.SweepJob{
    ComputeId: "string",
    Description: "string",
    DisplayName: "string",
    EarlyTermination: machinelearningservices.MedianStoppingPolicy{
    DelayEvaluation: 1,
    EvaluationInterval: 1,
    PolicyType: "MedianStopping",
    },
    ExperimentName: "string",
    JobType: "Sweep",
    Limits: machinelearningservices.SweepJobLimits{
    JobLimitsType: "Sweep",
    MaxConcurrentTrials: 1,
    MaxTotalTrials: 1,
    TrialTimeout: "PT1S",
    },
    Objective: machinelearningservices.Objective{
    Goal: machinelearningservices.GoalMinimize,
    PrimaryMetric: "string",
    },
    Properties: map[string]interface{}{
    "string": "string",
    },
    SamplingAlgorithm: machinelearningservices.GridSamplingAlgorithm{
    SamplingAlgorithmType: "Grid",
    },
    SearchSpace: map[string]interface{}{
    "string": nil,
    },
    Services: interface{}{
    String: machinelearningservices.JobService{
    Endpoint: "string",
    JobServiceType: "string",
    Port: 1,
    Properties: map[string]interface{}{
    "string": "string",
    },
    },
    },
    Tags: map[string]interface{}{
    "string": "string",
    },
    Trial: machinelearningservices.TrialComponent{
    CodeId: "string",
    Command: "string",
    Distribution: machinelearningservices.Mpi{
    DistributionType: "Mpi",
    ProcessCountPerInstance: 1,
    },
    EnvironmentId: "string",
    EnvironmentVariables: map[string]interface{}{
    "string": "string",
    },
    Resources: machinelearningservices.JobResourceConfiguration{
    InstanceCount: 1,
    InstanceType: "string",
    Properties: map[string]interface{}{
    "string": map[string]interface{}{
    "e6b6493e-7d5e-4db3-be1e-306ec641327e": nil,
    },
    },
    },
    },
    },
    ResourceGroupName: pulumi.String("test-rg"),
    WorkspaceName: pulumi.String("my-aml-workspace"),
    })
    if err != nil {
    return err
    }
    return nil
    })
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.azurenative.machinelearningservices.Job;
    import com.pulumi.azurenative.machinelearningservices.JobArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var job = new Job("job", JobArgs.builder()
                .id("string")
                .jobBaseProperties(SweepJobArgs.builder()
                    .computeId("string")
                    .description("string")
                    .displayName("string")
                    .earlyTermination(MedianStoppingPolicyArgs.builder()
                        .delayEvaluation(1)
                        .evaluationInterval(1)
                        .policyType("MedianStopping")
                        .build())
                    .experimentName("string")
                    .jobType("Sweep")
                    .limits(SweepJobLimitsArgs.builder()
                        .jobLimitsType("Sweep")
                        .maxConcurrentTrials(1)
                        .maxTotalTrials(1)
                        .trialTimeout("PT1S")
                        .build())
                    .objective(ObjectiveArgs.builder()
                        .goal("Minimize")
                        .primaryMetric("string")
                        .build())
                    .properties(Map.of("string", "string"))
                    .samplingAlgorithm(GridSamplingAlgorithmArgs.builder()
                        .samplingAlgorithmType("Grid")
                        .build())
                    .searchSpace(Map.of("string", ))
                    .services(Map.of("string", Map.ofEntries(
                        Map.entry("endpoint", "string"),
                        Map.entry("jobServiceType", "string"),
                        Map.entry("port", 1),
                        Map.entry("properties", Map.of("string", "string"))
                    )))
                    .tags(Map.of("string", "string"))
                    .trial(TrialComponentArgs.builder()
                        .codeId("string")
                        .command("string")
                        .distribution(MpiArgs.builder()
                            .distributionType("Mpi")
                            .processCountPerInstance(1)
                            .build())
                        .environmentId("string")
                        .environmentVariables(Map.of("string", "string"))
                        .resources(JobResourceConfigurationArgs.builder()
                            .instanceCount(1)
                            .instanceType("string")
                            .properties(Map.of("string", Map.of("e6b6493e-7d5e-4db3-be1e-306ec641327e", null)))
                            .build())
                        .build())
                    .build())
                .resourceGroupName("test-rg")
                .workspaceName("my-aml-workspace")
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_azure_native as azure_native
    
    job = azure_native.machinelearningservices.Job("job",
        id="string",
        job_base_properties=azure_native.machinelearningservices.SweepJobArgs(
            compute_id="string",
            description="string",
            display_name="string",
            early_termination=azure_native.machinelearningservices.MedianStoppingPolicyArgs(
                delay_evaluation=1,
                evaluation_interval=1,
                policy_type="MedianStopping",
            ),
            experiment_name="string",
            job_type="Sweep",
            limits=azure_native.machinelearningservices.SweepJobLimitsArgs(
                job_limits_type="Sweep",
                max_concurrent_trials=1,
                max_total_trials=1,
                trial_timeout="PT1S",
            ),
            objective=azure_native.machinelearningservices.ObjectiveArgs(
                goal=azure_native.machinelearningservices.Goal.MINIMIZE,
                primary_metric="string",
            ),
            properties={
                "string": "string",
            },
            sampling_algorithm=azure_native.machinelearningservices.GridSamplingAlgorithmArgs(
                sampling_algorithm_type="Grid",
            ),
            search_space={
                "string": {},
            },
            services={
                "string": azure_native.machinelearningservices.JobServiceArgs(
                    endpoint="string",
                    job_service_type="string",
                    port=1,
                    properties={
                        "string": "string",
                    },
                ),
            },
            tags={
                "string": "string",
            },
            trial=azure_native.machinelearningservices.TrialComponentArgs(
                code_id="string",
                command="string",
                distribution=azure_native.machinelearningservices.MpiArgs(
                    distribution_type="Mpi",
                    process_count_per_instance=1,
                ),
                environment_id="string",
                environment_variables={
                    "string": "string",
                },
                resources=azure_native.machinelearningservices.JobResourceConfigurationArgs(
                    instance_count=1,
                    instance_type="string",
                    properties={
                        "string": {
                            "e6b6493e-7d5e-4db3-be1e-306ec641327e": None,
                        },
                    },
                ),
            ),
        ),
        resource_group_name="test-rg",
        workspace_name="my-aml-workspace")
    
    import * as pulumi from "@pulumi/pulumi";
    import * as azure_native from "@pulumi/azure-native";
    
    const job = new azure_native.machinelearningservices.Job("job", {
        id: "string",
        jobBaseProperties: {
            computeId: "string",
            description: "string",
            displayName: "string",
            earlyTermination: {
                delayEvaluation: 1,
                evaluationInterval: 1,
                policyType: "MedianStopping",
            },
            experimentName: "string",
            jobType: "Sweep",
            limits: {
                jobLimitsType: "Sweep",
                maxConcurrentTrials: 1,
                maxTotalTrials: 1,
                trialTimeout: "PT1S",
            },
            objective: {
                goal: azure_native.machinelearningservices.Goal.Minimize,
                primaryMetric: "string",
            },
            properties: {
                string: "string",
            },
            samplingAlgorithm: {
                samplingAlgorithmType: "Grid",
            },
            searchSpace: {
                string: {},
            },
            services: {
                string: {
                    endpoint: "string",
                    jobServiceType: "string",
                    port: 1,
                    properties: {
                        string: "string",
                    },
                },
            },
            tags: {
                string: "string",
            },
            trial: {
                codeId: "string",
                command: "string",
                distribution: {
                    distributionType: "Mpi",
                    processCountPerInstance: 1,
                },
                environmentId: "string",
                environmentVariables: {
                    string: "string",
                },
                resources: {
                    instanceCount: 1,
                    instanceType: "string",
                    properties: {
                        string: {
                            "e6b6493e-7d5e-4db3-be1e-306ec641327e": undefined,
                        },
                    },
                },
            },
        },
        resourceGroupName: "test-rg",
        workspaceName: "my-aml-workspace",
    });
    
    resources:
      job:
        type: azure-native:machinelearningservices:Job
        properties:
          id: string
          jobBaseProperties:
            computeId: string
            description: string
            displayName: string
            earlyTermination:
              delayEvaluation: 1
              evaluationInterval: 1
              policyType: MedianStopping
            experimentName: string
            jobType: Sweep
            limits:
              jobLimitsType: Sweep
              maxConcurrentTrials: 1
              maxTotalTrials: 1
              trialTimeout: PT1S
            objective:
              goal: Minimize
              primaryMetric: string
            properties:
              string: string
            samplingAlgorithm:
              samplingAlgorithmType: Grid
            searchSpace:
              string: {}
            services:
              string:
                endpoint: string
                jobServiceType: string
                port: 1
                properties:
                  string: string
            tags:
              string: string
            trial:
              codeId: string
              command: string
              distribution:
                distributionType: Mpi
                processCountPerInstance: 1
              environmentId: string
              environmentVariables:
                string: string
              resources:
                instanceCount: 1
                instanceType: string
                properties:
                  string:
                    e6b6493e-7d5e-4db3-be1e-306ec641327e: null
          resourceGroupName: test-rg
          workspaceName: my-aml-workspace
    

    Create Job Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new Job(name: string, args: JobArgs, opts?: CustomResourceOptions);
    @overload
    def Job(resource_name: str,
            args: JobArgs,
            opts: Optional[ResourceOptions] = None)
    
    @overload
    def Job(resource_name: str,
            opts: Optional[ResourceOptions] = None,
            job_base_properties: Optional[Union[AutoMLJobArgs, CommandJobArgs, PipelineJobArgs, SweepJobArgs]] = None,
            resource_group_name: Optional[str] = None,
            workspace_name: Optional[str] = None,
            id: Optional[str] = None)
    func NewJob(ctx *Context, name string, args JobArgs, opts ...ResourceOption) (*Job, error)
    public Job(string name, JobArgs args, CustomResourceOptions? opts = null)
    public Job(String name, JobArgs args)
    public Job(String name, JobArgs args, CustomResourceOptions options)
    
    type: azure-native:machinelearningservices:Job
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args JobArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args JobArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args JobArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args JobArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args JobArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Example

    The following reference example uses placeholder values for all input properties.

    var examplejobResourceResourceFromMachinelearningservices = new AzureNative.MachineLearningServices.Job("examplejobResourceResourceFromMachinelearningservices", new()
    {
        JobBaseProperties = new AzureNative.MachineLearningServices.Inputs.AutoMLJobArgs
        {
            JobType = "AutoML",
            TaskDetails = new AzureNative.MachineLearningServices.Inputs.ClassificationArgs
            {
                TaskType = "Classification",
                TrainingData = new AzureNative.MachineLearningServices.Inputs.MLTableJobInputArgs
                {
                    JobInputType = "mltable",
                    Uri = "string",
                    Description = "string",
                    Mode = "string",
                },
                NCrossValidations = new AzureNative.MachineLearningServices.Inputs.AutoNCrossValidationsArgs
                {
                    Mode = "Auto",
                },
                TestData = new AzureNative.MachineLearningServices.Inputs.MLTableJobInputArgs
                {
                    JobInputType = "mltable",
                    Uri = "string",
                    Description = "string",
                    Mode = "string",
                },
                CvSplitColumnNames = new[]
                {
                    "string",
                },
                PositiveLabel = "string",
                PrimaryMetric = "string",
                TargetColumnName = "string",
                LimitSettings = new AzureNative.MachineLearningServices.Inputs.TableVerticalLimitSettingsArgs
                {
                    EnableEarlyTermination = false,
                    ExitScore = 0,
                    MaxConcurrentTrials = 0,
                    MaxCoresPerTrial = 0,
                    MaxTrials = 0,
                    Timeout = "string",
                    TrialTimeout = "string",
                },
                LogVerbosity = "string",
                TestDataSize = 0,
                FeaturizationSettings = new AzureNative.MachineLearningServices.Inputs.TableVerticalFeaturizationSettingsArgs
                {
                    BlockedTransformers = new[]
                    {
                        "string",
                    },
                    ColumnNameAndTypes = 
                    {
                        { "string", "string" },
                    },
                    DatasetLanguage = "string",
                    EnableDnnFeaturization = false,
                    Mode = "string",
                    TransformerParams = 
                    {
                        { "string", new[]
                        {
                            new AzureNative.MachineLearningServices.Inputs.ColumnTransformerArgs
                            {
                                Fields = new[]
                                {
                                    "string",
                                },
                                Parameters = "any",
                            },
                        } },
                    },
                },
                TrainingSettings = new AzureNative.MachineLearningServices.Inputs.ClassificationTrainingSettingsArgs
                {
                    AllowedTrainingAlgorithms = new[]
                    {
                        "string",
                    },
                    BlockedTrainingAlgorithms = new[]
                    {
                        "string",
                    },
                    EnableDnnTraining = false,
                    EnableModelExplainability = false,
                    EnableOnnxCompatibleModels = false,
                    EnableStackEnsemble = false,
                    EnableVoteEnsemble = false,
                    EnsembleModelDownloadTimeout = "string",
                    StackEnsembleSettings = new AzureNative.MachineLearningServices.Inputs.StackEnsembleSettingsArgs
                    {
                        StackMetaLearnerKWargs = "any",
                        StackMetaLearnerTrainPercentage = 0,
                        StackMetaLearnerType = "string",
                    },
                },
                ValidationData = new AzureNative.MachineLearningServices.Inputs.MLTableJobInputArgs
                {
                    JobInputType = "mltable",
                    Uri = "string",
                    Description = "string",
                    Mode = "string",
                },
                ValidationDataSize = 0,
                WeightColumnName = "string",
            },
            IsArchived = false,
            Description = "string",
            EnvironmentId = "string",
            EnvironmentVariables = 
            {
                { "string", "string" },
            },
            ExperimentName = "string",
            Identity = new AzureNative.MachineLearningServices.Inputs.AmlTokenArgs
            {
                IdentityType = "AMLToken",
            },
            ComponentId = "string",
            DisplayName = "string",
            Outputs = 
            {
                { "string", new AzureNative.MachineLearningServices.Inputs.CustomModelJobOutputArgs
                {
                    JobOutputType = "custom_model",
                    Description = "string",
                    Mode = "string",
                    Uri = "string",
                } },
            },
            Properties = 
            {
                { "string", "string" },
            },
            Resources = new AzureNative.MachineLearningServices.Inputs.JobResourceConfigurationArgs
            {
                DockerArgs = "string",
                InstanceCount = 0,
                InstanceType = "string",
                Properties = 
                {
                    { "string", "any" },
                },
                ShmSize = "string",
            },
            Services = 
            {
                { "string", new AzureNative.MachineLearningServices.Inputs.JobServiceArgs
                {
                    Endpoint = "string",
                    JobServiceType = "string",
                    Nodes = new AzureNative.MachineLearningServices.Inputs.AllNodesArgs
                    {
                        NodesValueType = "All",
                    },
                    Port = 0,
                    Properties = 
                    {
                        { "string", "string" },
                    },
                } },
            },
            Tags = 
            {
                { "string", "string" },
            },
            ComputeId = "string",
        },
        ResourceGroupName = "string",
        WorkspaceName = "string",
        Id = "string",
    });
    
    example, err := machinelearningservices.NewJob(ctx, "examplejobResourceResourceFromMachinelearningservices", &machinelearningservices.JobArgs{
    JobBaseProperties: machinelearningservices.AutoMLJob{
    JobType: "AutoML",
    TaskDetails: machinelearningservices.Classification{
    TaskType: "Classification",
    TrainingData: machinelearningservices.MLTableJobInput{
    JobInputType: "mltable",
    Uri: "string",
    Description: "string",
    Mode: "string",
    },
    NCrossValidations: machinelearningservices.AutoNCrossValidations{
    Mode: "Auto",
    },
    TestData: machinelearningservices.MLTableJobInput{
    JobInputType: "mltable",
    Uri: "string",
    Description: "string",
    Mode: "string",
    },
    CvSplitColumnNames: []string{
    "string",
    },
    PositiveLabel: "string",
    PrimaryMetric: "string",
    TargetColumnName: "string",
    LimitSettings: machinelearningservices.TableVerticalLimitSettings{
    EnableEarlyTermination: false,
    ExitScore: 0,
    MaxConcurrentTrials: 0,
    MaxCoresPerTrial: 0,
    MaxTrials: 0,
    Timeout: "string",
    TrialTimeout: "string",
    },
    LogVerbosity: "string",
    TestDataSize: 0,
    FeaturizationSettings: machinelearningservices.TableVerticalFeaturizationSettings{
    BlockedTransformers: []machinelearningservices.BlockedTransformers{
    "string",
    },
    ColumnNameAndTypes: map[string]interface{}{
    "string": "string",
    },
    DatasetLanguage: "string",
    EnableDnnFeaturization: false,
    Mode: "string",
    TransformerParams: interface{}{
    String: []machinelearningservices.ColumnTransformer{
    {
    Fields: []string{
    "string",
    },
    Parameters: "any",
    },
    },
    },
    },
    TrainingSettings: machinelearningservices.ClassificationTrainingSettings{
    AllowedTrainingAlgorithms: []machinelearningservices.ClassificationModels{
    "string",
    },
    BlockedTrainingAlgorithms: []machinelearningservices.ClassificationModels{
    "string",
    },
    EnableDnnTraining: false,
    EnableModelExplainability: false,
    EnableOnnxCompatibleModels: false,
    EnableStackEnsemble: false,
    EnableVoteEnsemble: false,
    EnsembleModelDownloadTimeout: "string",
    StackEnsembleSettings: machinelearningservices.StackEnsembleSettings{
    StackMetaLearnerKWargs: "any",
    StackMetaLearnerTrainPercentage: 0,
    StackMetaLearnerType: "string",
    },
    },
    ValidationData: machinelearningservices.MLTableJobInput{
    JobInputType: "mltable",
    Uri: "string",
    Description: "string",
    Mode: "string",
    },
    ValidationDataSize: 0,
    WeightColumnName: "string",
    },
    IsArchived: false,
    Description: "string",
    EnvironmentId: "string",
    EnvironmentVariables: map[string]interface{}{
    "string": "string",
    },
    ExperimentName: "string",
    Identity: machinelearningservices.AmlToken{
    IdentityType: "AMLToken",
    },
    ComponentId: "string",
    DisplayName: "string",
    Outputs: map[string]interface{}{
    "string": machinelearningservices.CustomModelJobOutput{
    JobOutputType: "custom_model",
    Description: "string",
    Mode: "string",
    Uri: "string",
    },
    },
    Properties: map[string]interface{}{
    "string": "string",
    },
    Resources: machinelearningservices.JobResourceConfiguration{
    DockerArgs: "string",
    InstanceCount: 0,
    InstanceType: "string",
    Properties: map[string]interface{}{
    "string": "any",
    },
    ShmSize: "string",
    },
    Services: interface{}{
    String: machinelearningservices.JobService{
    Endpoint: "string",
    JobServiceType: "string",
    Nodes: machinelearningservices.AllNodes{
    NodesValueType: "All",
    },
    Port: 0,
    Properties: map[string]interface{}{
    "string": "string",
    },
    },
    },
    Tags: map[string]interface{}{
    "string": "string",
    },
    ComputeId: "string",
    },
    ResourceGroupName: pulumi.String("string"),
    WorkspaceName: pulumi.String("string"),
    Id: pulumi.String("string"),
    })
    
    var examplejobResourceResourceFromMachinelearningservices = new Job("examplejobResourceResourceFromMachinelearningservices", JobArgs.builder()        
        .jobBaseProperties(AutoMLJobArgs.builder()
            .jobType("AutoML")
            .taskDetails(ClassificationArgs.builder()
                .taskType("Classification")
                .trainingData(MLTableJobInputArgs.builder()
                    .jobInputType("mltable")
                    .uri("string")
                    .description("string")
                    .mode("string")
                    .build())
                .nCrossValidations(AutoNCrossValidationsArgs.builder()
                    .mode("Auto")
                    .build())
                .testData(MLTableJobInputArgs.builder()
                    .jobInputType("mltable")
                    .uri("string")
                    .description("string")
                    .mode("string")
                    .build())
                .cvSplitColumnNames("string")
                .positiveLabel("string")
                .primaryMetric("string")
                .targetColumnName("string")
                .limitSettings(TableVerticalLimitSettingsArgs.builder()
                    .enableEarlyTermination(false)
                    .exitScore(0)
                    .maxConcurrentTrials(0)
                    .maxCoresPerTrial(0)
                    .maxTrials(0)
                    .timeout("string")
                    .trialTimeout("string")
                    .build())
                .logVerbosity("string")
                .testDataSize(0)
                .featurizationSettings(TableVerticalFeaturizationSettingsArgs.builder()
                    .blockedTransformers("string")
                    .columnNameAndTypes(Map.of("string", "string"))
                    .datasetLanguage("string")
                    .enableDnnFeaturization(false)
                    .mode("string")
                    .transformerParams(Map.of("string", Map.ofEntries(
                        Map.entry("fields", "string"),
                        Map.entry("parameters", "any")
                    )))
                    .build())
                .trainingSettings(ClassificationTrainingSettingsArgs.builder()
                    .allowedTrainingAlgorithms("string")
                    .blockedTrainingAlgorithms("string")
                    .enableDnnTraining(false)
                    .enableModelExplainability(false)
                    .enableOnnxCompatibleModels(false)
                    .enableStackEnsemble(false)
                    .enableVoteEnsemble(false)
                    .ensembleModelDownloadTimeout("string")
                    .stackEnsembleSettings(StackEnsembleSettingsArgs.builder()
                        .stackMetaLearnerKWargs("any")
                        .stackMetaLearnerTrainPercentage(0)
                        .stackMetaLearnerType("string")
                        .build())
                    .build())
                .validationData(MLTableJobInputArgs.builder()
                    .jobInputType("mltable")
                    .uri("string")
                    .description("string")
                    .mode("string")
                    .build())
                .validationDataSize(0)
                .weightColumnName("string")
                .build())
            .isArchived(false)
            .description("string")
            .environmentId("string")
            .environmentVariables(Map.of("string", "string"))
            .experimentName("string")
            .identity(AmlTokenArgs.builder()
                .identityType("AMLToken")
                .build())
            .componentId("string")
            .displayName("string")
            .outputs(Map.of("string", Map.ofEntries(
                Map.entry("jobOutputType", "custom_model"),
                Map.entry("description", "string"),
                Map.entry("mode", "string"),
                Map.entry("uri", "string")
            )))
            .properties(Map.of("string", "string"))
            .resources(JobResourceConfigurationArgs.builder()
                .dockerArgs("string")
                .instanceCount(0)
                .instanceType("string")
                .properties(Map.of("string", "any"))
                .shmSize("string")
                .build())
            .services(Map.of("string", Map.ofEntries(
                Map.entry("endpoint", "string"),
                Map.entry("jobServiceType", "string"),
                Map.entry("nodes", Map.of("nodesValueType", "All")),
                Map.entry("port", 0),
                Map.entry("properties", Map.of("string", "string"))
            )))
            .tags(Map.of("string", "string"))
            .computeId("string")
            .build())
        .resourceGroupName("string")
        .workspaceName("string")
        .id("string")
        .build());
    
    examplejob_resource_resource_from_machinelearningservices = azure_native.machinelearningservices.Job("examplejobResourceResourceFromMachinelearningservices",
        job_base_properties=azure_native.machinelearningservices.AutoMLJobArgs(
            job_type="AutoML",
            task_details=azure_native.machinelearningservices.ClassificationArgs(
                task_type="Classification",
                training_data=azure_native.machinelearningservices.MLTableJobInputArgs(
                    job_input_type="mltable",
                    uri="string",
                    description="string",
                    mode="string",
                ),
                n_cross_validations=azure_native.machinelearningservices.AutoNCrossValidationsArgs(
                    mode="Auto",
                ),
                test_data=azure_native.machinelearningservices.MLTableJobInputArgs(
                    job_input_type="mltable",
                    uri="string",
                    description="string",
                    mode="string",
                ),
                cv_split_column_names=["string"],
                positive_label="string",
                primary_metric="string",
                target_column_name="string",
                limit_settings=azure_native.machinelearningservices.TableVerticalLimitSettingsArgs(
                    enable_early_termination=False,
                    exit_score=0,
                    max_concurrent_trials=0,
                    max_cores_per_trial=0,
                    max_trials=0,
                    timeout="string",
                    trial_timeout="string",
                ),
                log_verbosity="string",
                test_data_size=0,
                featurization_settings=azure_native.machinelearningservices.TableVerticalFeaturizationSettingsArgs(
                    blocked_transformers=["string"],
                    column_name_and_types={
                        "string": "string",
                    },
                    dataset_language="string",
                    enable_dnn_featurization=False,
                    mode="string",
                    transformer_params={
                        "string": [azure_native.machinelearningservices.ColumnTransformerArgs(
                            fields=["string"],
                            parameters="any",
                        )],
                    },
                ),
                training_settings=azure_native.machinelearningservices.ClassificationTrainingSettingsArgs(
                    allowed_training_algorithms=["string"],
                    blocked_training_algorithms=["string"],
                    enable_dnn_training=False,
                    enable_model_explainability=False,
                    enable_onnx_compatible_models=False,
                    enable_stack_ensemble=False,
                    enable_vote_ensemble=False,
                    ensemble_model_download_timeout="string",
                    stack_ensemble_settings=azure_native.machinelearningservices.StackEnsembleSettingsArgs(
                        stack_meta_learner_k_wargs="any",
                        stack_meta_learner_train_percentage=0,
                        stack_meta_learner_type="string",
                    ),
                ),
                validation_data=azure_native.machinelearningservices.MLTableJobInputArgs(
                    job_input_type="mltable",
                    uri="string",
                    description="string",
                    mode="string",
                ),
                validation_data_size=0,
                weight_column_name="string",
            ),
            is_archived=False,
            description="string",
            environment_id="string",
            environment_variables={
                "string": "string",
            },
            experiment_name="string",
            identity=azure_native.machinelearningservices.AmlTokenArgs(
                identity_type="AMLToken",
            ),
            component_id="string",
            display_name="string",
            outputs={
                "string": azure_native.machinelearningservices.CustomModelJobOutputArgs(
                    job_output_type="custom_model",
                    description="string",
                    mode="string",
                    uri="string",
                ),
            },
            properties={
                "string": "string",
            },
            resources=azure_native.machinelearningservices.JobResourceConfigurationArgs(
                docker_args="string",
                instance_count=0,
                instance_type="string",
                properties={
                    "string": "any",
                },
                shm_size="string",
            ),
            services={
                "string": azure_native.machinelearningservices.JobServiceArgs(
                    endpoint="string",
                    job_service_type="string",
                    nodes=azure_native.machinelearningservices.AllNodesArgs(
                        nodes_value_type="All",
                    ),
                    port=0,
                    properties={
                        "string": "string",
                    },
                ),
            },
            tags={
                "string": "string",
            },
            compute_id="string",
        ),
        resource_group_name="string",
        workspace_name="string",
        id="string")
    
    const examplejobResourceResourceFromMachinelearningservices = new azure_native.machinelearningservices.Job("examplejobResourceResourceFromMachinelearningservices", {
        jobBaseProperties: {
            jobType: "AutoML",
            taskDetails: {
                taskType: "Classification",
                trainingData: {
                    jobInputType: "mltable",
                    uri: "string",
                    description: "string",
                    mode: "string",
                },
                nCrossValidations: {
                    mode: "Auto",
                },
                testData: {
                    jobInputType: "mltable",
                    uri: "string",
                    description: "string",
                    mode: "string",
                },
                cvSplitColumnNames: ["string"],
                positiveLabel: "string",
                primaryMetric: "string",
                targetColumnName: "string",
                limitSettings: {
                    enableEarlyTermination: false,
                    exitScore: 0,
                    maxConcurrentTrials: 0,
                    maxCoresPerTrial: 0,
                    maxTrials: 0,
                    timeout: "string",
                    trialTimeout: "string",
                },
                logVerbosity: "string",
                testDataSize: 0,
                featurizationSettings: {
                    blockedTransformers: ["string"],
                    columnNameAndTypes: {
                        string: "string",
                    },
                    datasetLanguage: "string",
                    enableDnnFeaturization: false,
                    mode: "string",
                    transformerParams: {
                        string: [{
                            fields: ["string"],
                            parameters: "any",
                        }],
                    },
                },
                trainingSettings: {
                    allowedTrainingAlgorithms: ["string"],
                    blockedTrainingAlgorithms: ["string"],
                    enableDnnTraining: false,
                    enableModelExplainability: false,
                    enableOnnxCompatibleModels: false,
                    enableStackEnsemble: false,
                    enableVoteEnsemble: false,
                    ensembleModelDownloadTimeout: "string",
                    stackEnsembleSettings: {
                        stackMetaLearnerKWargs: "any",
                        stackMetaLearnerTrainPercentage: 0,
                        stackMetaLearnerType: "string",
                    },
                },
                validationData: {
                    jobInputType: "mltable",
                    uri: "string",
                    description: "string",
                    mode: "string",
                },
                validationDataSize: 0,
                weightColumnName: "string",
            },
            isArchived: false,
            description: "string",
            environmentId: "string",
            environmentVariables: {
                string: "string",
            },
            experimentName: "string",
            identity: {
                identityType: "AMLToken",
            },
            componentId: "string",
            displayName: "string",
            outputs: {
                string: {
                    jobOutputType: "custom_model",
                    description: "string",
                    mode: "string",
                    uri: "string",
                },
            },
            properties: {
                string: "string",
            },
            resources: {
                dockerArgs: "string",
                instanceCount: 0,
                instanceType: "string",
                properties: {
                    string: "any",
                },
                shmSize: "string",
            },
            services: {
                string: {
                    endpoint: "string",
                    jobServiceType: "string",
                    nodes: {
                        nodesValueType: "All",
                    },
                    port: 0,
                    properties: {
                        string: "string",
                    },
                },
            },
            tags: {
                string: "string",
            },
            computeId: "string",
        },
        resourceGroupName: "string",
        workspaceName: "string",
        id: "string",
    });
    
    type: azure-native:machinelearningservices:Job
    properties:
        id: string
        jobBaseProperties:
            componentId: string
            computeId: string
            description: string
            displayName: string
            environmentId: string
            environmentVariables:
                string: string
            experimentName: string
            identity:
                identityType: AMLToken
            isArchived: false
            jobType: AutoML
            outputs:
                string:
                    description: string
                    jobOutputType: custom_model
                    mode: string
                    uri: string
            properties:
                string: string
            resources:
                dockerArgs: string
                instanceCount: 0
                instanceType: string
                properties:
                    string: any
                shmSize: string
            services:
                string:
                    endpoint: string
                    jobServiceType: string
                    nodes:
                        nodesValueType: All
                    port: 0
                    properties:
                        string: string
            tags:
                string: string
            taskDetails:
                cvSplitColumnNames:
                    - string
                featurizationSettings:
                    blockedTransformers:
                        - string
                    columnNameAndTypes:
                        string: string
                    datasetLanguage: string
                    enableDnnFeaturization: false
                    mode: string
                    transformerParams:
                        string:
                            - fields:
                                - string
                              parameters: any
                limitSettings:
                    enableEarlyTermination: false
                    exitScore: 0
                    maxConcurrentTrials: 0
                    maxCoresPerTrial: 0
                    maxTrials: 0
                    timeout: string
                    trialTimeout: string
                logVerbosity: string
                nCrossValidations:
                    mode: Auto
                positiveLabel: string
                primaryMetric: string
                targetColumnName: string
                taskType: Classification
                testData:
                    description: string
                    jobInputType: mltable
                    mode: string
                    uri: string
                testDataSize: 0
                trainingData:
                    description: string
                    jobInputType: mltable
                    mode: string
                    uri: string
                trainingSettings:
                    allowedTrainingAlgorithms:
                        - string
                    blockedTrainingAlgorithms:
                        - string
                    enableDnnTraining: false
                    enableModelExplainability: false
                    enableOnnxCompatibleModels: false
                    enableStackEnsemble: false
                    enableVoteEnsemble: false
                    ensembleModelDownloadTimeout: string
                    stackEnsembleSettings:
                        stackMetaLearnerKWargs: any
                        stackMetaLearnerTrainPercentage: 0
                        stackMetaLearnerType: string
                validationData:
                    description: string
                    jobInputType: mltable
                    mode: string
                    uri: string
                validationDataSize: 0
                weightColumnName: string
        resourceGroupName: string
        workspaceName: string
    

    Job Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The Job resource accepts the following input properties:

    JobBaseProperties Pulumi.AzureNative.MachineLearningServices.Inputs.AutoMLJob | Pulumi.AzureNative.MachineLearningServices.Inputs.CommandJob | Pulumi.AzureNative.MachineLearningServices.Inputs.PipelineJob | Pulumi.AzureNative.MachineLearningServices.Inputs.SweepJob
    [Required] Additional attributes of the entity.
    ResourceGroupName string
    The name of the resource group. The name is case insensitive.
    WorkspaceName string
    Name of Azure Machine Learning workspace.
    Id string
    The name and identifier for the Job. This is case-sensitive.
    JobBaseProperties AutoMLJobArgs | CommandJobArgs | PipelineJobArgs | SweepJobArgs
    [Required] Additional attributes of the entity.
    ResourceGroupName string
    The name of the resource group. The name is case insensitive.
    WorkspaceName string
    Name of Azure Machine Learning workspace.
    Id string
    The name and identifier for the Job. This is case-sensitive.
    jobBaseProperties AutoMLJob | CommandJob | PipelineJob | SweepJob
    [Required] Additional attributes of the entity.
    resourceGroupName String
    The name of the resource group. The name is case insensitive.
    workspaceName String
    Name of Azure Machine Learning workspace.
    id String
    The name and identifier for the Job. This is case-sensitive.
    jobBaseProperties AutoMLJob | CommandJob | PipelineJob | SweepJob
    [Required] Additional attributes of the entity.
    resourceGroupName string
    The name of the resource group. The name is case insensitive.
    workspaceName string
    Name of Azure Machine Learning workspace.
    id string
    The name and identifier for the Job. This is case-sensitive.
    job_base_properties AutoMLJobArgs | CommandJobArgs | PipelineJobArgs | SweepJobArgs
    [Required] Additional attributes of the entity.
    resource_group_name str
    The name of the resource group. The name is case insensitive.
    workspace_name str
    Name of Azure Machine Learning workspace.
    id str
    The name and identifier for the Job. This is case-sensitive.
    jobBaseProperties Property Map | Property Map | Property Map | Property Map
    [Required] Additional attributes of the entity.
    resourceGroupName String
    The name of the resource group. The name is case insensitive.
    workspaceName String
    Name of Azure Machine Learning workspace.
    id String
    The name and identifier for the Job. This is case-sensitive.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the Job resource produces the following output properties:

    Id string
    The provider-assigned unique ID for this managed resource.
    Name string
    The name of the resource
    SystemData Pulumi.AzureNative.MachineLearningServices.Outputs.SystemDataResponse
    Azure Resource Manager metadata containing createdBy and modifiedBy information.
    Type string
    The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
    Id string
    The provider-assigned unique ID for this managed resource.
    Name string
    The name of the resource
    SystemData SystemDataResponse
    Azure Resource Manager metadata containing createdBy and modifiedBy information.
    Type string
    The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
    id String
    The provider-assigned unique ID for this managed resource.
    name String
    The name of the resource
    systemData SystemDataResponse
    Azure Resource Manager metadata containing createdBy and modifiedBy information.
    type String
    The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
    id string
    The provider-assigned unique ID for this managed resource.
    name string
    The name of the resource
    systemData SystemDataResponse
    Azure Resource Manager metadata containing createdBy and modifiedBy information.
    type string
    The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
    id str
    The provider-assigned unique ID for this managed resource.
    name str
    The name of the resource
    system_data SystemDataResponse
    Azure Resource Manager metadata containing createdBy and modifiedBy information.
    type str
    The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
    id String
    The provider-assigned unique ID for this managed resource.
    name String
    The name of the resource
    systemData Property Map
    Azure Resource Manager metadata containing createdBy and modifiedBy information.
    type String
    The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"

    Supporting Types

    AllNodes, AllNodesArgs

    AllNodesResponse, AllNodesResponseArgs

    AmlToken, AmlTokenArgs

    AmlTokenResponse, AmlTokenResponseArgs

    AutoForecastHorizon, AutoForecastHorizonArgs

    AutoForecastHorizonResponse, AutoForecastHorizonResponseArgs

    AutoMLJob, AutoMLJobArgs

    TaskDetails Pulumi.AzureNative.MachineLearningServices.Inputs.Classification | Pulumi.AzureNative.MachineLearningServices.Inputs.Forecasting | Pulumi.AzureNative.MachineLearningServices.Inputs.ImageClassification | Pulumi.AzureNative.MachineLearningServices.Inputs.ImageClassificationMultilabel | Pulumi.AzureNative.MachineLearningServices.Inputs.ImageInstanceSegmentation | Pulumi.AzureNative.MachineLearningServices.Inputs.ImageObjectDetection | Pulumi.AzureNative.MachineLearningServices.Inputs.Regression | Pulumi.AzureNative.MachineLearningServices.Inputs.TextClassification | Pulumi.AzureNative.MachineLearningServices.Inputs.TextClassificationMultilabel | Pulumi.AzureNative.MachineLearningServices.Inputs.TextNer
    [Required] This represents scenario which can be one of Tables/NLP/Image
    ComponentId string
    ARM resource ID of the component resource.
    ComputeId string
    ARM resource ID of the compute resource.
    Description string
    The asset description text.
    DisplayName string
    Display name of job.
    EnvironmentId string
    The ARM resource ID of the Environment specification for the job. This is optional value to provide, if not provided, AutoML will default this to Production AutoML curated environment version when running the job.
    EnvironmentVariables Dictionary<string, string>
    Environment variables included in the job.
    ExperimentName string
    The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment.
    Identity Pulumi.AzureNative.MachineLearningServices.Inputs.AmlToken | Pulumi.AzureNative.MachineLearningServices.Inputs.ManagedIdentity | Pulumi.AzureNative.MachineLearningServices.Inputs.UserIdentity
    Identity configuration. If set, this should be one of AmlToken, ManagedIdentity, UserIdentity or null. Defaults to AmlToken if null.
    IsArchived bool
    Is the asset archived?
    Outputs Dictionary<string, object>
    Mapping of output data bindings used in the job.
    Properties Dictionary<string, string>
    The asset property dictionary.
    Resources Pulumi.AzureNative.MachineLearningServices.Inputs.JobResourceConfiguration
    Compute Resource configuration for the job.
    Services Dictionary<string, Pulumi.AzureNative.MachineLearningServices.Inputs.JobService>
    List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
    Tags Dictionary<string, string>
    Tag dictionary. Tags can be added, removed, and updated.
    TaskDetails Classification | Forecasting | ImageClassification | ImageClassificationMultilabel | ImageInstanceSegmentation | ImageObjectDetection | Regression | TextClassification | TextClassificationMultilabel | TextNer
    [Required] This represents scenario which can be one of Tables/NLP/Image
    ComponentId string
    ARM resource ID of the component resource.
    ComputeId string
    ARM resource ID of the compute resource.
    Description string
    The asset description text.
    DisplayName string
    Display name of job.
    EnvironmentId string
    The ARM resource ID of the Environment specification for the job. This is optional value to provide, if not provided, AutoML will default this to Production AutoML curated environment version when running the job.
    EnvironmentVariables map[string]string
    Environment variables included in the job.
    ExperimentName string
    The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment.
    Identity AmlToken | ManagedIdentity | UserIdentity
    Identity configuration. If set, this should be one of AmlToken, ManagedIdentity, UserIdentity or null. Defaults to AmlToken if null.
    IsArchived bool
    Is the asset archived?
    Outputs map[string]interface{}
    Mapping of output data bindings used in the job.
    Properties map[string]string
    The asset property dictionary.
    Resources JobResourceConfiguration
    Compute Resource configuration for the job.
    Services map[string]JobService
    List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
    Tags map[string]string
    Tag dictionary. Tags can be added, removed, and updated.
    taskDetails Classification | Forecasting | ImageClassification | ImageClassificationMultilabel | ImageInstanceSegmentation | ImageObjectDetection | Regression | TextClassification | TextClassificationMultilabel | TextNer
    [Required] This represents scenario which can be one of Tables/NLP/Image
    componentId String
    ARM resource ID of the component resource.
    computeId String
    ARM resource ID of the compute resource.
    description String
    The asset description text.
    displayName String
    Display name of job.
    environmentId String
    The ARM resource ID of the Environment specification for the job. This is optional value to provide, if not provided, AutoML will default this to Production AutoML curated environment version when running the job.
    environmentVariables Map<String,String>
    Environment variables included in the job.
    experimentName String
    The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment.
    identity AmlToken | ManagedIdentity | UserIdentity
    Identity configuration. If set, this should be one of AmlToken, ManagedIdentity, UserIdentity or null. Defaults to AmlToken if null.
    isArchived Boolean
    Is the asset archived?
    outputs Map<String,Object>
    Mapping of output data bindings used in the job.
    properties Map<String,String>
    The asset property dictionary.
    resources JobResourceConfiguration
    Compute Resource configuration for the job.
    services Map<String,JobService>
    List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
    tags Map<String,String>
    Tag dictionary. Tags can be added, removed, and updated.
    taskDetails Classification | Forecasting | ImageClassification | ImageClassificationMultilabel | ImageInstanceSegmentation | ImageObjectDetection | Regression | TextClassification | TextClassificationMultilabel | TextNer
    [Required] This represents scenario which can be one of Tables/NLP/Image
    componentId string
    ARM resource ID of the component resource.
    computeId string
    ARM resource ID of the compute resource.
    description string
    The asset description text.
    displayName string
    Display name of job.
    environmentId string
    The ARM resource ID of the Environment specification for the job. This is optional value to provide, if not provided, AutoML will default this to Production AutoML curated environment version when running the job.
    environmentVariables {[key: string]: string}
    Environment variables included in the job.
    experimentName string
    The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment.
    identity AmlToken | ManagedIdentity | UserIdentity
    Identity configuration. If set, this should be one of AmlToken, ManagedIdentity, UserIdentity or null. Defaults to AmlToken if null.
    isArchived boolean
    Is the asset archived?
    outputs {[key: string]: CustomModelJobOutput | MLFlowModelJobOutput | MLTableJobOutput | TritonModelJobOutput | UriFileJobOutput | UriFolderJobOutput}
    Mapping of output data bindings used in the job.
    properties {[key: string]: string}
    The asset property dictionary.
    resources JobResourceConfiguration
    Compute Resource configuration for the job.
    services {[key: string]: JobService}
    List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
    tags {[key: string]: string}
    Tag dictionary. Tags can be added, removed, and updated.
    task_details Classification | Forecasting | ImageClassification | ImageClassificationMultilabel | ImageInstanceSegmentation | ImageObjectDetection | Regression | TextClassification | TextClassificationMultilabel | TextNer
    [Required] This represents scenario which can be one of Tables/NLP/Image
    component_id str
    ARM resource ID of the component resource.
    compute_id str
    ARM resource ID of the compute resource.
    description str
    The asset description text.
    display_name str
    Display name of job.
    environment_id str
    The ARM resource ID of the Environment specification for the job. This is optional value to provide, if not provided, AutoML will default this to Production AutoML curated environment version when running the job.
    environment_variables Mapping[str, str]
    Environment variables included in the job.
    experiment_name str
    The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment.
    identity AmlToken | ManagedIdentity | UserIdentity
    Identity configuration. If set, this should be one of AmlToken, ManagedIdentity, UserIdentity or null. Defaults to AmlToken if null.
    is_archived bool
    Is the asset archived?
    outputs Mapping[str, Union[CustomModelJobOutput, MLFlowModelJobOutput, MLTableJobOutput, TritonModelJobOutput, UriFileJobOutput, UriFolderJobOutput]]
    Mapping of output data bindings used in the job.
    properties Mapping[str, str]
    The asset property dictionary.
    resources JobResourceConfiguration
    Compute Resource configuration for the job.
    services Mapping[str, JobService]
    List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
    tags Mapping[str, str]
    Tag dictionary. Tags can be added, removed, and updated.
    taskDetails Property Map | Property Map | Property Map | Property Map | Property Map | Property Map | Property Map | Property Map | Property Map | Property Map
    [Required] This represents scenario which can be one of Tables/NLP/Image
    componentId String
    ARM resource ID of the component resource.
    computeId String
    ARM resource ID of the compute resource.
    description String
    The asset description text.
    displayName String
    Display name of job.
    environmentId String
    The ARM resource ID of the Environment specification for the job. This is optional value to provide, if not provided, AutoML will default this to Production AutoML curated environment version when running the job.
    environmentVariables Map<String>
    Environment variables included in the job.
    experimentName String
    The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment.
    identity Property Map | Property Map | Property Map
    Identity configuration. If set, this should be one of AmlToken, ManagedIdentity, UserIdentity or null. Defaults to AmlToken if null.
    isArchived Boolean
    Is the asset archived?
    outputs Map<Property Map | Property Map | Property Map | Property Map | Property Map | Property Map>
    Mapping of output data bindings used in the job.
    properties Map<String>
    The asset property dictionary.
    resources Property Map
    Compute Resource configuration for the job.
    services Map<Property Map>
    List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
    tags Map<String>
    Tag dictionary. Tags can be added, removed, and updated.

    AutoMLJobResponse, AutoMLJobResponseArgs

    Status string
    Status of the job.
    TaskDetails Pulumi.AzureNative.MachineLearningServices.Inputs.ClassificationResponse | Pulumi.AzureNative.MachineLearningServices.Inputs.ForecastingResponse | Pulumi.AzureNative.MachineLearningServices.Inputs.ImageClassificationResponse | Pulumi.AzureNative.MachineLearningServices.Inputs.ImageClassificationMultilabelResponse | Pulumi.AzureNative.MachineLearningServices.Inputs.ImageInstanceSegmentationResponse | Pulumi.AzureNative.MachineLearningServices.Inputs.ImageObjectDetectionResponse | Pulumi.AzureNative.MachineLearningServices.Inputs.RegressionResponse | Pulumi.AzureNative.MachineLearningServices.Inputs.TextClassificationResponse | Pulumi.AzureNative.MachineLearningServices.Inputs.TextClassificationMultilabelResponse | Pulumi.AzureNative.MachineLearningServices.Inputs.TextNerResponse
    [Required] This represents scenario which can be one of Tables/NLP/Image
    ComponentId string
    ARM resource ID of the component resource.
    ComputeId string
    ARM resource ID of the compute resource.
    Description string
    The asset description text.
    DisplayName string
    Display name of job.
    EnvironmentId string
    The ARM resource ID of the Environment specification for the job. This is optional value to provide, if not provided, AutoML will default this to Production AutoML curated environment version when running the job.
    EnvironmentVariables Dictionary<string, string>
    Environment variables included in the job.
    ExperimentName string
    The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment.
    Identity Pulumi.AzureNative.MachineLearningServices.Inputs.AmlTokenResponse | Pulumi.AzureNative.MachineLearningServices.Inputs.ManagedIdentityResponse | Pulumi.AzureNative.MachineLearningServices.Inputs.UserIdentityResponse
    Identity configuration. If set, this should be one of AmlToken, ManagedIdentity, UserIdentity or null. Defaults to AmlToken if null.
    IsArchived bool
    Is the asset archived?
    Outputs Dictionary<string, object>
    Mapping of output data bindings used in the job.
    Properties Dictionary<string, string>
    The asset property dictionary.
    Resources Pulumi.AzureNative.MachineLearningServices.Inputs.JobResourceConfigurationResponse
    Compute Resource configuration for the job.
    Services Dictionary<string, Pulumi.AzureNative.MachineLearningServices.Inputs.JobServiceResponse>
    List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
    Tags Dictionary<string, string>
    Tag dictionary. Tags can be added, removed, and updated.
    Status string
    Status of the job.
    TaskDetails ClassificationResponse | ForecastingResponse | ImageClassificationResponse | ImageClassificationMultilabelResponse | ImageInstanceSegmentationResponse | ImageObjectDetectionResponse | RegressionResponse | TextClassificationResponse | TextClassificationMultilabelResponse | TextNerResponse
    [Required] This represents scenario which can be one of Tables/NLP/Image
    ComponentId string
    ARM resource ID of the component resource.
    ComputeId string
    ARM resource ID of the compute resource.
    Description string
    The asset description text.
    DisplayName string
    Display name of job.
    EnvironmentId string
    The ARM resource ID of the Environment specification for the job. This is optional value to provide, if not provided, AutoML will default this to Production AutoML curated environment version when running the job.
    EnvironmentVariables map[string]string
    Environment variables included in the job.
    ExperimentName string
    The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment.
    Identity AmlTokenResponse | ManagedIdentityResponse | UserIdentityResponse
    Identity configuration. If set, this should be one of AmlToken, ManagedIdentity, UserIdentity or null. Defaults to AmlToken if null.
    IsArchived bool
    Is the asset archived?
    Outputs map[string]interface{}
    Mapping of output data bindings used in the job.
    Properties map[string]string
    The asset property dictionary.
    Resources JobResourceConfigurationResponse
    Compute Resource configuration for the job.
    Services map[string]JobServiceResponse
    List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
    Tags map[string]string
    Tag dictionary. Tags can be added, removed, and updated.
    status String
    Status of the job.
    taskDetails ClassificationResponse | ForecastingResponse | ImageClassificationResponse | ImageClassificationMultilabelResponse | ImageInstanceSegmentationResponse | ImageObjectDetectionResponse | RegressionResponse | TextClassificationResponse | TextClassificationMultilabelResponse | TextNerResponse
    [Required] This represents scenario which can be one of Tables/NLP/Image
    componentId String
    ARM resource ID of the component resource.
    computeId String
    ARM resource ID of the compute resource.
    description String
    The asset description text.
    displayName String
    Display name of job.
    environmentId String
    The ARM resource ID of the Environment specification for the job. This is optional value to provide, if not provided, AutoML will default this to Production AutoML curated environment version when running the job.
    environmentVariables Map<String,String>
    Environment variables included in the job.
    experimentName String
    The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment.
    identity AmlTokenResponse | ManagedIdentityResponse | UserIdentityResponse
    Identity configuration. If set, this should be one of AmlToken, ManagedIdentity, UserIdentity or null. Defaults to AmlToken if null.
    isArchived Boolean
    Is the asset archived?
    outputs Map<String,Object>
    Mapping of output data bindings used in the job.
    properties Map<String,String>
    The asset property dictionary.
    resources JobResourceConfigurationResponse
    Compute Resource configuration for the job.
    services Map<String,JobServiceResponse>
    List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
    tags Map<String,String>
    Tag dictionary. Tags can be added, removed, and updated.
    status string
    Status of the job.
    taskDetails ClassificationResponse | ForecastingResponse | ImageClassificationResponse | ImageClassificationMultilabelResponse | ImageInstanceSegmentationResponse | ImageObjectDetectionResponse | RegressionResponse | TextClassificationResponse | TextClassificationMultilabelResponse | TextNerResponse
    [Required] This represents scenario which can be one of Tables/NLP/Image
    componentId string
    ARM resource ID of the component resource.
    computeId string
    ARM resource ID of the compute resource.
    description string
    The asset description text.
    displayName string
    Display name of job.
    environmentId string
    The ARM resource ID of the Environment specification for the job. This is optional value to provide, if not provided, AutoML will default this to Production AutoML curated environment version when running the job.
    environmentVariables {[key: string]: string}
    Environment variables included in the job.
    experimentName string
    The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment.
    identity AmlTokenResponse | ManagedIdentityResponse | UserIdentityResponse
    Identity configuration. If set, this should be one of AmlToken, ManagedIdentity, UserIdentity or null. Defaults to AmlToken if null.
    isArchived boolean
    Is the asset archived?
    outputs {[key: string]: CustomModelJobOutputResponse | MLFlowModelJobOutputResponse | MLTableJobOutputResponse | TritonModelJobOutputResponse | UriFileJobOutputResponse | UriFolderJobOutputResponse}
    Mapping of output data bindings used in the job.
    properties {[key: string]: string}
    The asset property dictionary.
    resources JobResourceConfigurationResponse
    Compute Resource configuration for the job.
    services {[key: string]: JobServiceResponse}
    List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
    tags {[key: string]: string}
    Tag dictionary. Tags can be added, removed, and updated.
    status str
    Status of the job.
    task_details ClassificationResponse | ForecastingResponse | ImageClassificationResponse | ImageClassificationMultilabelResponse | ImageInstanceSegmentationResponse | ImageObjectDetectionResponse | RegressionResponse | TextClassificationResponse | TextClassificationMultilabelResponse | TextNerResponse
    [Required] This represents scenario which can be one of Tables/NLP/Image
    component_id str
    ARM resource ID of the component resource.
    compute_id str
    ARM resource ID of the compute resource.
    description str
    The asset description text.
    display_name str
    Display name of job.
    environment_id str
    The ARM resource ID of the Environment specification for the job. This is optional value to provide, if not provided, AutoML will default this to Production AutoML curated environment version when running the job.
    environment_variables Mapping[str, str]
    Environment variables included in the job.
    experiment_name str
    The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment.
    identity AmlTokenResponse | ManagedIdentityResponse | UserIdentityResponse
    Identity configuration. If set, this should be one of AmlToken, ManagedIdentity, UserIdentity or null. Defaults to AmlToken if null.
    is_archived bool
    Is the asset archived?
    outputs Mapping[str, Union[CustomModelJobOutputResponse, MLFlowModelJobOutputResponse, MLTableJobOutputResponse, TritonModelJobOutputResponse, UriFileJobOutputResponse, UriFolderJobOutputResponse]]
    Mapping of output data bindings used in the job.
    properties Mapping[str, str]
    The asset property dictionary.
    resources JobResourceConfigurationResponse
    Compute Resource configuration for the job.
    services Mapping[str, JobServiceResponse]
    List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
    tags Mapping[str, str]
    Tag dictionary. Tags can be added, removed, and updated.
    status String
    Status of the job.
    taskDetails Property Map | Property Map | Property Map | Property Map | Property Map | Property Map | Property Map | Property Map | Property Map | Property Map
    [Required] This represents scenario which can be one of Tables/NLP/Image
    componentId String
    ARM resource ID of the component resource.
    computeId String
    ARM resource ID of the compute resource.
    description String
    The asset description text.
    displayName String
    Display name of job.
    environmentId String
    The ARM resource ID of the Environment specification for the job. This is optional value to provide, if not provided, AutoML will default this to Production AutoML curated environment version when running the job.
    environmentVariables Map<String>
    Environment variables included in the job.
    experimentName String
    The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment.
    identity Property Map | Property Map | Property Map
    Identity configuration. If set, this should be one of AmlToken, ManagedIdentity, UserIdentity or null. Defaults to AmlToken if null.
    isArchived Boolean
    Is the asset archived?
    outputs Map<Property Map | Property Map | Property Map | Property Map | Property Map | Property Map>
    Mapping of output data bindings used in the job.
    properties Map<String>
    The asset property dictionary.
    resources Property Map
    Compute Resource configuration for the job.
    services Map<Property Map>
    List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
    tags Map<String>
    Tag dictionary. Tags can be added, removed, and updated.

    AutoNCrossValidations, AutoNCrossValidationsArgs

    AutoNCrossValidationsResponse, AutoNCrossValidationsResponseArgs

    AutoSeasonality, AutoSeasonalityArgs

    AutoSeasonalityResponse, AutoSeasonalityResponseArgs

    AutoTargetLags, AutoTargetLagsArgs

    AutoTargetLagsResponse, AutoTargetLagsResponseArgs

    AutoTargetRollingWindowSize, AutoTargetRollingWindowSizeArgs

    AutoTargetRollingWindowSizeResponse, AutoTargetRollingWindowSizeResponseArgs

    BanditPolicy, BanditPolicyArgs

    DelayEvaluation int
    Number of intervals by which to delay the first evaluation.
    EvaluationInterval int
    Interval (number of runs) between policy evaluations.
    SlackAmount double
    Absolute distance allowed from the best performing run.
    SlackFactor double
    Ratio of the allowed distance from the best performing run.
    DelayEvaluation int
    Number of intervals by which to delay the first evaluation.
    EvaluationInterval int
    Interval (number of runs) between policy evaluations.
    SlackAmount float64
    Absolute distance allowed from the best performing run.
    SlackFactor float64
    Ratio of the allowed distance from the best performing run.
    delayEvaluation Integer
    Number of intervals by which to delay the first evaluation.
    evaluationInterval Integer
    Interval (number of runs) between policy evaluations.
    slackAmount Double
    Absolute distance allowed from the best performing run.
    slackFactor Double
    Ratio of the allowed distance from the best performing run.
    delayEvaluation number
    Number of intervals by which to delay the first evaluation.
    evaluationInterval number
    Interval (number of runs) between policy evaluations.
    slackAmount number
    Absolute distance allowed from the best performing run.
    slackFactor number
    Ratio of the allowed distance from the best performing run.
    delay_evaluation int
    Number of intervals by which to delay the first evaluation.
    evaluation_interval int
    Interval (number of runs) between policy evaluations.
    slack_amount float
    Absolute distance allowed from the best performing run.
    slack_factor float
    Ratio of the allowed distance from the best performing run.
    delayEvaluation Number
    Number of intervals by which to delay the first evaluation.
    evaluationInterval Number
    Interval (number of runs) between policy evaluations.
    slackAmount Number
    Absolute distance allowed from the best performing run.
    slackFactor Number
    Ratio of the allowed distance from the best performing run.

    BanditPolicyResponse, BanditPolicyResponseArgs

    DelayEvaluation int
    Number of intervals by which to delay the first evaluation.
    EvaluationInterval int
    Interval (number of runs) between policy evaluations.
    SlackAmount double
    Absolute distance allowed from the best performing run.
    SlackFactor double
    Ratio of the allowed distance from the best performing run.
    DelayEvaluation int
    Number of intervals by which to delay the first evaluation.
    EvaluationInterval int
    Interval (number of runs) between policy evaluations.
    SlackAmount float64
    Absolute distance allowed from the best performing run.
    SlackFactor float64
    Ratio of the allowed distance from the best performing run.
    delayEvaluation Integer
    Number of intervals by which to delay the first evaluation.
    evaluationInterval Integer
    Interval (number of runs) between policy evaluations.
    slackAmount Double
    Absolute distance allowed from the best performing run.
    slackFactor Double
    Ratio of the allowed distance from the best performing run.
    delayEvaluation number
    Number of intervals by which to delay the first evaluation.
    evaluationInterval number
    Interval (number of runs) between policy evaluations.
    slackAmount number
    Absolute distance allowed from the best performing run.
    slackFactor number
    Ratio of the allowed distance from the best performing run.
    delay_evaluation int
    Number of intervals by which to delay the first evaluation.
    evaluation_interval int
    Interval (number of runs) between policy evaluations.
    slack_amount float
    Absolute distance allowed from the best performing run.
    slack_factor float
    Ratio of the allowed distance from the best performing run.
    delayEvaluation Number
    Number of intervals by which to delay the first evaluation.
    evaluationInterval Number
    Interval (number of runs) between policy evaluations.
    slackAmount Number
    Absolute distance allowed from the best performing run.
    slackFactor Number
    Ratio of the allowed distance from the best performing run.

    BayesianSamplingAlgorithm, BayesianSamplingAlgorithmArgs

    BayesianSamplingAlgorithmResponse, BayesianSamplingAlgorithmResponseArgs

    BlockedTransformers, BlockedTransformersArgs

    TextTargetEncoder
    TextTargetEncoderTarget encoding for text data.
    OneHotEncoder
    OneHotEncoderOhe hot encoding creates a binary feature transformation.
    CatTargetEncoder
    CatTargetEncoderTarget encoding for categorical data.
    TfIdf
    TfIdfTf-Idf stands for, term-frequency times inverse document-frequency. This is a common term weighting scheme for identifying information from documents.
    WoETargetEncoder
    WoETargetEncoderWeight of Evidence encoding is a technique used to encode categorical variables. It uses the natural log of the P(1)/P(0) to create weights.
    LabelEncoder
    LabelEncoderLabel encoder converts labels/categorical variables in a numerical form.
    WordEmbedding
    WordEmbeddingWord embedding helps represents words or phrases as a vector, or a series of numbers.
    NaiveBayes
    NaiveBayesNaive Bayes is a classified that is used for classification of discrete features that are categorically distributed.
    CountVectorizer
    CountVectorizerCount Vectorizer converts a collection of text documents to a matrix of token counts.
    HashOneHotEncoder
    HashOneHotEncoderHashing One Hot Encoder can turn categorical variables into a limited number of new features. This is often used for high-cardinality categorical features.
    BlockedTransformersTextTargetEncoder
    TextTargetEncoderTarget encoding for text data.
    BlockedTransformersOneHotEncoder
    OneHotEncoderOhe hot encoding creates a binary feature transformation.
    BlockedTransformersCatTargetEncoder
    CatTargetEncoderTarget encoding for categorical data.
    BlockedTransformersTfIdf
    TfIdfTf-Idf stands for, term-frequency times inverse document-frequency. This is a common term weighting scheme for identifying information from documents.
    BlockedTransformersWoETargetEncoder
    WoETargetEncoderWeight of Evidence encoding is a technique used to encode categorical variables. It uses the natural log of the P(1)/P(0) to create weights.
    BlockedTransformersLabelEncoder
    LabelEncoderLabel encoder converts labels/categorical variables in a numerical form.
    BlockedTransformersWordEmbedding
    WordEmbeddingWord embedding helps represents words or phrases as a vector, or a series of numbers.
    BlockedTransformersNaiveBayes
    NaiveBayesNaive Bayes is a classified that is used for classification of discrete features that are categorically distributed.
    BlockedTransformersCountVectorizer
    CountVectorizerCount Vectorizer converts a collection of text documents to a matrix of token counts.
    BlockedTransformersHashOneHotEncoder
    HashOneHotEncoderHashing One Hot Encoder can turn categorical variables into a limited number of new features. This is often used for high-cardinality categorical features.
    TextTargetEncoder
    TextTargetEncoderTarget encoding for text data.
    OneHotEncoder
    OneHotEncoderOhe hot encoding creates a binary feature transformation.
    CatTargetEncoder
    CatTargetEncoderTarget encoding for categorical data.
    TfIdf
    TfIdfTf-Idf stands for, term-frequency times inverse document-frequency. This is a common term weighting scheme for identifying information from documents.
    WoETargetEncoder
    WoETargetEncoderWeight of Evidence encoding is a technique used to encode categorical variables. It uses the natural log of the P(1)/P(0) to create weights.
    LabelEncoder
    LabelEncoderLabel encoder converts labels/categorical variables in a numerical form.
    WordEmbedding
    WordEmbeddingWord embedding helps represents words or phrases as a vector, or a series of numbers.
    NaiveBayes
    NaiveBayesNaive Bayes is a classified that is used for classification of discrete features that are categorically distributed.
    CountVectorizer
    CountVectorizerCount Vectorizer converts a collection of text documents to a matrix of token counts.
    HashOneHotEncoder
    HashOneHotEncoderHashing One Hot Encoder can turn categorical variables into a limited number of new features. This is often used for high-cardinality categorical features.
    TextTargetEncoder
    TextTargetEncoderTarget encoding for text data.
    OneHotEncoder
    OneHotEncoderOhe hot encoding creates a binary feature transformation.
    CatTargetEncoder
    CatTargetEncoderTarget encoding for categorical data.
    TfIdf
    TfIdfTf-Idf stands for, term-frequency times inverse document-frequency. This is a common term weighting scheme for identifying information from documents.
    WoETargetEncoder
    WoETargetEncoderWeight of Evidence encoding is a technique used to encode categorical variables. It uses the natural log of the P(1)/P(0) to create weights.
    LabelEncoder
    LabelEncoderLabel encoder converts labels/categorical variables in a numerical form.
    WordEmbedding
    WordEmbeddingWord embedding helps represents words or phrases as a vector, or a series of numbers.
    NaiveBayes
    NaiveBayesNaive Bayes is a classified that is used for classification of discrete features that are categorically distributed.
    CountVectorizer
    CountVectorizerCount Vectorizer converts a collection of text documents to a matrix of token counts.
    HashOneHotEncoder
    HashOneHotEncoderHashing One Hot Encoder can turn categorical variables into a limited number of new features. This is often used for high-cardinality categorical features.
    TEXT_TARGET_ENCODER
    TextTargetEncoderTarget encoding for text data.
    ONE_HOT_ENCODER
    OneHotEncoderOhe hot encoding creates a binary feature transformation.
    CAT_TARGET_ENCODER
    CatTargetEncoderTarget encoding for categorical data.
    TF_IDF
    TfIdfTf-Idf stands for, term-frequency times inverse document-frequency. This is a common term weighting scheme for identifying information from documents.
    WO_E_TARGET_ENCODER
    WoETargetEncoderWeight of Evidence encoding is a technique used to encode categorical variables. It uses the natural log of the P(1)/P(0) to create weights.
    LABEL_ENCODER
    LabelEncoderLabel encoder converts labels/categorical variables in a numerical form.
    WORD_EMBEDDING
    WordEmbeddingWord embedding helps represents words or phrases as a vector, or a series of numbers.
    NAIVE_BAYES
    NaiveBayesNaive Bayes is a classified that is used for classification of discrete features that are categorically distributed.
    COUNT_VECTORIZER
    CountVectorizerCount Vectorizer converts a collection of text documents to a matrix of token counts.
    HASH_ONE_HOT_ENCODER
    HashOneHotEncoderHashing One Hot Encoder can turn categorical variables into a limited number of new features. This is often used for high-cardinality categorical features.
    "TextTargetEncoder"
    TextTargetEncoderTarget encoding for text data.
    "OneHotEncoder"
    OneHotEncoderOhe hot encoding creates a binary feature transformation.
    "CatTargetEncoder"
    CatTargetEncoderTarget encoding for categorical data.
    "TfIdf"
    TfIdfTf-Idf stands for, term-frequency times inverse document-frequency. This is a common term weighting scheme for identifying information from documents.
    "WoETargetEncoder"
    WoETargetEncoderWeight of Evidence encoding is a technique used to encode categorical variables. It uses the natural log of the P(1)/P(0) to create weights.
    "LabelEncoder"
    LabelEncoderLabel encoder converts labels/categorical variables in a numerical form.
    "WordEmbedding"
    WordEmbeddingWord embedding helps represents words or phrases as a vector, or a series of numbers.
    "NaiveBayes"
    NaiveBayesNaive Bayes is a classified that is used for classification of discrete features that are categorically distributed.
    "CountVectorizer"
    CountVectorizerCount Vectorizer converts a collection of text documents to a matrix of token counts.
    "HashOneHotEncoder"
    HashOneHotEncoderHashing One Hot Encoder can turn categorical variables into a limited number of new features. This is often used for high-cardinality categorical features.

    Classification, ClassificationArgs

    TrainingData Pulumi.AzureNative.MachineLearningServices.Inputs.MLTableJobInput
    [Required] Training data input.
    CvSplitColumnNames List<string>
    Columns to use for CVSplit data.
    FeaturizationSettings Pulumi.AzureNative.MachineLearningServices.Inputs.TableVerticalFeaturizationSettings
    Featurization inputs needed for AutoML job.
    LimitSettings Pulumi.AzureNative.MachineLearningServices.Inputs.TableVerticalLimitSettings
    Execution constraints for AutoMLJob.
    LogVerbosity string | Pulumi.AzureNative.MachineLearningServices.LogVerbosity
    Log verbosity for the job.
    NCrossValidations Pulumi.AzureNative.MachineLearningServices.Inputs.AutoNCrossValidations | Pulumi.AzureNative.MachineLearningServices.Inputs.CustomNCrossValidations
    Number of cross validation folds to be applied on training dataset when validation dataset is not provided.
    PositiveLabel string
    Positive label for binary metrics calculation.
    PrimaryMetric string | Pulumi.AzureNative.MachineLearningServices.ClassificationPrimaryMetrics
    Primary metric for the task.
    TargetColumnName string
    Target column name: This is prediction values column. Also known as label column name in context of classification tasks.
    TestData Pulumi.AzureNative.MachineLearningServices.Inputs.MLTableJobInput
    Test data input.
    TestDataSize double
    The fraction of test dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    TrainingSettings Pulumi.AzureNative.MachineLearningServices.Inputs.ClassificationTrainingSettings
    Inputs for training phase for an AutoML Job.
    ValidationData Pulumi.AzureNative.MachineLearningServices.Inputs.MLTableJobInput
    Validation data inputs.
    ValidationDataSize double
    The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    WeightColumnName string
    The name of the sample weight column. Automated ML supports a weighted column as an input, causing rows in the data to be weighted up or down.
    TrainingData MLTableJobInput
    [Required] Training data input.
    CvSplitColumnNames []string
    Columns to use for CVSplit data.
    FeaturizationSettings TableVerticalFeaturizationSettings
    Featurization inputs needed for AutoML job.
    LimitSettings TableVerticalLimitSettings
    Execution constraints for AutoMLJob.
    LogVerbosity string | LogVerbosity
    Log verbosity for the job.
    NCrossValidations AutoNCrossValidations | CustomNCrossValidations
    Number of cross validation folds to be applied on training dataset when validation dataset is not provided.
    PositiveLabel string
    Positive label for binary metrics calculation.
    PrimaryMetric string | ClassificationPrimaryMetrics
    Primary metric for the task.
    TargetColumnName string
    Target column name: This is prediction values column. Also known as label column name in context of classification tasks.
    TestData MLTableJobInput
    Test data input.
    TestDataSize float64
    The fraction of test dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    TrainingSettings ClassificationTrainingSettings
    Inputs for training phase for an AutoML Job.
    ValidationData MLTableJobInput
    Validation data inputs.
    ValidationDataSize float64
    The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    WeightColumnName string
    The name of the sample weight column. Automated ML supports a weighted column as an input, causing rows in the data to be weighted up or down.
    trainingData MLTableJobInput
    [Required] Training data input.
    cvSplitColumnNames List<String>
    Columns to use for CVSplit data.
    featurizationSettings TableVerticalFeaturizationSettings
    Featurization inputs needed for AutoML job.
    limitSettings TableVerticalLimitSettings
    Execution constraints for AutoMLJob.
    logVerbosity String | LogVerbosity
    Log verbosity for the job.
    nCrossValidations AutoNCrossValidations | CustomNCrossValidations
    Number of cross validation folds to be applied on training dataset when validation dataset is not provided.
    positiveLabel String
    Positive label for binary metrics calculation.
    primaryMetric String | ClassificationPrimaryMetrics
    Primary metric for the task.
    targetColumnName String
    Target column name: This is prediction values column. Also known as label column name in context of classification tasks.
    testData MLTableJobInput
    Test data input.
    testDataSize Double
    The fraction of test dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    trainingSettings ClassificationTrainingSettings
    Inputs for training phase for an AutoML Job.
    validationData MLTableJobInput
    Validation data inputs.
    validationDataSize Double
    The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    weightColumnName String
    The name of the sample weight column. Automated ML supports a weighted column as an input, causing rows in the data to be weighted up or down.
    trainingData MLTableJobInput
    [Required] Training data input.
    cvSplitColumnNames string[]
    Columns to use for CVSplit data.
    featurizationSettings TableVerticalFeaturizationSettings
    Featurization inputs needed for AutoML job.
    limitSettings TableVerticalLimitSettings
    Execution constraints for AutoMLJob.
    logVerbosity string | LogVerbosity
    Log verbosity for the job.
    nCrossValidations AutoNCrossValidations | CustomNCrossValidations
    Number of cross validation folds to be applied on training dataset when validation dataset is not provided.
    positiveLabel string
    Positive label for binary metrics calculation.
    primaryMetric string | ClassificationPrimaryMetrics
    Primary metric for the task.
    targetColumnName string
    Target column name: This is prediction values column. Also known as label column name in context of classification tasks.
    testData MLTableJobInput
    Test data input.
    testDataSize number
    The fraction of test dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    trainingSettings ClassificationTrainingSettings
    Inputs for training phase for an AutoML Job.
    validationData MLTableJobInput
    Validation data inputs.
    validationDataSize number
    The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    weightColumnName string
    The name of the sample weight column. Automated ML supports a weighted column as an input, causing rows in the data to be weighted up or down.
    training_data MLTableJobInput
    [Required] Training data input.
    cv_split_column_names Sequence[str]
    Columns to use for CVSplit data.
    featurization_settings TableVerticalFeaturizationSettings
    Featurization inputs needed for AutoML job.
    limit_settings TableVerticalLimitSettings
    Execution constraints for AutoMLJob.
    log_verbosity str | LogVerbosity
    Log verbosity for the job.
    n_cross_validations AutoNCrossValidations | CustomNCrossValidations
    Number of cross validation folds to be applied on training dataset when validation dataset is not provided.
    positive_label str
    Positive label for binary metrics calculation.
    primary_metric str | ClassificationPrimaryMetrics
    Primary metric for the task.
    target_column_name str
    Target column name: This is prediction values column. Also known as label column name in context of classification tasks.
    test_data MLTableJobInput
    Test data input.
    test_data_size float
    The fraction of test dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    training_settings ClassificationTrainingSettings
    Inputs for training phase for an AutoML Job.
    validation_data MLTableJobInput
    Validation data inputs.
    validation_data_size float
    The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    weight_column_name str
    The name of the sample weight column. Automated ML supports a weighted column as an input, causing rows in the data to be weighted up or down.
    trainingData Property Map
    [Required] Training data input.
    cvSplitColumnNames List<String>
    Columns to use for CVSplit data.
    featurizationSettings Property Map
    Featurization inputs needed for AutoML job.
    limitSettings Property Map
    Execution constraints for AutoMLJob.
    logVerbosity String | "NotSet" | "Debug" | "Info" | "Warning" | "Error" | "Critical"
    Log verbosity for the job.
    nCrossValidations Property Map | Property Map
    Number of cross validation folds to be applied on training dataset when validation dataset is not provided.
    positiveLabel String
    Positive label for binary metrics calculation.
    primaryMetric String | "AUCWeighted" | "Accuracy" | "NormMacroRecall" | "AveragePrecisionScoreWeighted" | "PrecisionScoreWeighted"
    Primary metric for the task.
    targetColumnName String
    Target column name: This is prediction values column. Also known as label column name in context of classification tasks.
    testData Property Map
    Test data input.
    testDataSize Number
    The fraction of test dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    trainingSettings Property Map
    Inputs for training phase for an AutoML Job.
    validationData Property Map
    Validation data inputs.
    validationDataSize Number
    The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    weightColumnName String
    The name of the sample weight column. Automated ML supports a weighted column as an input, causing rows in the data to be weighted up or down.

    ClassificationModels, ClassificationModelsArgs

    LogisticRegression
    LogisticRegressionLogistic regression is a fundamental classification technique. It belongs to the group of linear classifiers and is somewhat similar to polynomial and linear regression. Logistic regression is fast and relatively uncomplicated, and it's convenient for you to interpret the results. Although it's essentially a method for binary classification, it can also be applied to multiclass problems.
    SGD
    SGDSGD: Stochastic gradient descent is an optimization algorithm often used in machine learning applications to find the model parameters that correspond to the best fit between predicted and actual outputs.
    MultinomialNaiveBayes
    MultinomialNaiveBayesThe multinomial Naive Bayes classifier is suitable for classification with discrete features (e.g., word counts for text classification). The multinomial distribution normally requires integer feature counts. However, in practice, fractional counts such as tf-idf may also work.
    BernoulliNaiveBayes
    BernoulliNaiveBayesNaive Bayes classifier for multivariate Bernoulli models.
    SVM
    SVMA support vector machine (SVM) is a supervised machine learning model that uses classification algorithms for two-group classification problems. After giving an SVM model sets of labeled training data for each category, they're able to categorize new text.
    LinearSVM
    LinearSVMA support vector machine (SVM) is a supervised machine learning model that uses classification algorithms for two-group classification problems. After giving an SVM model sets of labeled training data for each category, they're able to categorize new text. Linear SVM performs best when input data is linear, i.e., data can be easily classified by drawing the straight line between classified values on a plotted graph.
    KNN
    KNNK-nearest neighbors (KNN) algorithm uses 'feature similarity' to predict the values of new datapoints which further means that the new data point will be assigned a value based on how closely it matches the points in the training set.
    DecisionTree
    DecisionTreeDecision Trees are a non-parametric supervised learning method used for both classification and regression tasks. The goal is to create a model that predicts the value of a target variable by learning simple decision rules inferred from the data features.
    RandomForest
    RandomForestRandom forest is a supervised learning algorithm. The "forest" it builds, is an ensemble of decision trees, usually trained with the “bagging” method. The general idea of the bagging method is that a combination of learning models increases the overall result.
    ExtremeRandomTrees
    ExtremeRandomTreesExtreme Trees is an ensemble machine learning algorithm that combines the predictions from many decision trees. It is related to the widely used random forest algorithm.
    LightGBM
    LightGBMLightGBM is a gradient boosting framework that uses tree based learning algorithms.
    GradientBoosting
    GradientBoostingThe technique of transiting week learners into a strong learner is called Boosting. The gradient boosting algorithm process works on this theory of execution.
    XGBoostClassifier
    XGBoostClassifierXGBoost: Extreme Gradient Boosting Algorithm. This algorithm is used for structured data where target column values can be divided into distinct class values.
    ClassificationModelsLogisticRegression
    LogisticRegressionLogistic regression is a fundamental classification technique. It belongs to the group of linear classifiers and is somewhat similar to polynomial and linear regression. Logistic regression is fast and relatively uncomplicated, and it's convenient for you to interpret the results. Although it's essentially a method for binary classification, it can also be applied to multiclass problems.
    ClassificationModelsSGD
    SGDSGD: Stochastic gradient descent is an optimization algorithm often used in machine learning applications to find the model parameters that correspond to the best fit between predicted and actual outputs.
    ClassificationModelsMultinomialNaiveBayes
    MultinomialNaiveBayesThe multinomial Naive Bayes classifier is suitable for classification with discrete features (e.g., word counts for text classification). The multinomial distribution normally requires integer feature counts. However, in practice, fractional counts such as tf-idf may also work.
    ClassificationModelsBernoulliNaiveBayes
    BernoulliNaiveBayesNaive Bayes classifier for multivariate Bernoulli models.
    ClassificationModelsSVM
    SVMA support vector machine (SVM) is a supervised machine learning model that uses classification algorithms for two-group classification problems. After giving an SVM model sets of labeled training data for each category, they're able to categorize new text.
    ClassificationModelsLinearSVM
    LinearSVMA support vector machine (SVM) is a supervised machine learning model that uses classification algorithms for two-group classification problems. After giving an SVM model sets of labeled training data for each category, they're able to categorize new text. Linear SVM performs best when input data is linear, i.e., data can be easily classified by drawing the straight line between classified values on a plotted graph.
    ClassificationModelsKNN
    KNNK-nearest neighbors (KNN) algorithm uses 'feature similarity' to predict the values of new datapoints which further means that the new data point will be assigned a value based on how closely it matches the points in the training set.
    ClassificationModelsDecisionTree
    DecisionTreeDecision Trees are a non-parametric supervised learning method used for both classification and regression tasks. The goal is to create a model that predicts the value of a target variable by learning simple decision rules inferred from the data features.
    ClassificationModelsRandomForest
    RandomForestRandom forest is a supervised learning algorithm. The "forest" it builds, is an ensemble of decision trees, usually trained with the “bagging” method. The general idea of the bagging method is that a combination of learning models increases the overall result.
    ClassificationModelsExtremeRandomTrees
    ExtremeRandomTreesExtreme Trees is an ensemble machine learning algorithm that combines the predictions from many decision trees. It is related to the widely used random forest algorithm.
    ClassificationModelsLightGBM
    LightGBMLightGBM is a gradient boosting framework that uses tree based learning algorithms.
    ClassificationModelsGradientBoosting
    GradientBoostingThe technique of transiting week learners into a strong learner is called Boosting. The gradient boosting algorithm process works on this theory of execution.
    ClassificationModelsXGBoostClassifier
    XGBoostClassifierXGBoost: Extreme Gradient Boosting Algorithm. This algorithm is used for structured data where target column values can be divided into distinct class values.
    LogisticRegression
    LogisticRegressionLogistic regression is a fundamental classification technique. It belongs to the group of linear classifiers and is somewhat similar to polynomial and linear regression. Logistic regression is fast and relatively uncomplicated, and it's convenient for you to interpret the results. Although it's essentially a method for binary classification, it can also be applied to multiclass problems.
    SGD
    SGDSGD: Stochastic gradient descent is an optimization algorithm often used in machine learning applications to find the model parameters that correspond to the best fit between predicted and actual outputs.
    MultinomialNaiveBayes
    MultinomialNaiveBayesThe multinomial Naive Bayes classifier is suitable for classification with discrete features (e.g., word counts for text classification). The multinomial distribution normally requires integer feature counts. However, in practice, fractional counts such as tf-idf may also work.
    BernoulliNaiveBayes
    BernoulliNaiveBayesNaive Bayes classifier for multivariate Bernoulli models.
    SVM
    SVMA support vector machine (SVM) is a supervised machine learning model that uses classification algorithms for two-group classification problems. After giving an SVM model sets of labeled training data for each category, they're able to categorize new text.
    LinearSVM
    LinearSVMA support vector machine (SVM) is a supervised machine learning model that uses classification algorithms for two-group classification problems. After giving an SVM model sets of labeled training data for each category, they're able to categorize new text. Linear SVM performs best when input data is linear, i.e., data can be easily classified by drawing the straight line between classified values on a plotted graph.
    KNN
    KNNK-nearest neighbors (KNN) algorithm uses 'feature similarity' to predict the values of new datapoints which further means that the new data point will be assigned a value based on how closely it matches the points in the training set.
    DecisionTree
    DecisionTreeDecision Trees are a non-parametric supervised learning method used for both classification and regression tasks. The goal is to create a model that predicts the value of a target variable by learning simple decision rules inferred from the data features.
    RandomForest
    RandomForestRandom forest is a supervised learning algorithm. The "forest" it builds, is an ensemble of decision trees, usually trained with the “bagging” method. The general idea of the bagging method is that a combination of learning models increases the overall result.
    ExtremeRandomTrees
    ExtremeRandomTreesExtreme Trees is an ensemble machine learning algorithm that combines the predictions from many decision trees. It is related to the widely used random forest algorithm.
    LightGBM
    LightGBMLightGBM is a gradient boosting framework that uses tree based learning algorithms.
    GradientBoosting
    GradientBoostingThe technique of transiting week learners into a strong learner is called Boosting. The gradient boosting algorithm process works on this theory of execution.
    XGBoostClassifier
    XGBoostClassifierXGBoost: Extreme Gradient Boosting Algorithm. This algorithm is used for structured data where target column values can be divided into distinct class values.
    LogisticRegression
    LogisticRegressionLogistic regression is a fundamental classification technique. It belongs to the group of linear classifiers and is somewhat similar to polynomial and linear regression. Logistic regression is fast and relatively uncomplicated, and it's convenient for you to interpret the results. Although it's essentially a method for binary classification, it can also be applied to multiclass problems.
    SGD
    SGDSGD: Stochastic gradient descent is an optimization algorithm often used in machine learning applications to find the model parameters that correspond to the best fit between predicted and actual outputs.
    MultinomialNaiveBayes
    MultinomialNaiveBayesThe multinomial Naive Bayes classifier is suitable for classification with discrete features (e.g., word counts for text classification). The multinomial distribution normally requires integer feature counts. However, in practice, fractional counts such as tf-idf may also work.
    BernoulliNaiveBayes
    BernoulliNaiveBayesNaive Bayes classifier for multivariate Bernoulli models.
    SVM
    SVMA support vector machine (SVM) is a supervised machine learning model that uses classification algorithms for two-group classification problems. After giving an SVM model sets of labeled training data for each category, they're able to categorize new text.
    LinearSVM
    LinearSVMA support vector machine (SVM) is a supervised machine learning model that uses classification algorithms for two-group classification problems. After giving an SVM model sets of labeled training data for each category, they're able to categorize new text. Linear SVM performs best when input data is linear, i.e., data can be easily classified by drawing the straight line between classified values on a plotted graph.
    KNN
    KNNK-nearest neighbors (KNN) algorithm uses 'feature similarity' to predict the values of new datapoints which further means that the new data point will be assigned a value based on how closely it matches the points in the training set.
    DecisionTree
    DecisionTreeDecision Trees are a non-parametric supervised learning method used for both classification and regression tasks. The goal is to create a model that predicts the value of a target variable by learning simple decision rules inferred from the data features.
    RandomForest
    RandomForestRandom forest is a supervised learning algorithm. The "forest" it builds, is an ensemble of decision trees, usually trained with the “bagging” method. The general idea of the bagging method is that a combination of learning models increases the overall result.
    ExtremeRandomTrees
    ExtremeRandomTreesExtreme Trees is an ensemble machine learning algorithm that combines the predictions from many decision trees. It is related to the widely used random forest algorithm.
    LightGBM
    LightGBMLightGBM is a gradient boosting framework that uses tree based learning algorithms.
    GradientBoosting
    GradientBoostingThe technique of transiting week learners into a strong learner is called Boosting. The gradient boosting algorithm process works on this theory of execution.
    XGBoostClassifier
    XGBoostClassifierXGBoost: Extreme Gradient Boosting Algorithm. This algorithm is used for structured data where target column values can be divided into distinct class values.
    LOGISTIC_REGRESSION
    LogisticRegressionLogistic regression is a fundamental classification technique. It belongs to the group of linear classifiers and is somewhat similar to polynomial and linear regression. Logistic regression is fast and relatively uncomplicated, and it's convenient for you to interpret the results. Although it's essentially a method for binary classification, it can also be applied to multiclass problems.
    SGD
    SGDSGD: Stochastic gradient descent is an optimization algorithm often used in machine learning applications to find the model parameters that correspond to the best fit between predicted and actual outputs.
    MULTINOMIAL_NAIVE_BAYES
    MultinomialNaiveBayesThe multinomial Naive Bayes classifier is suitable for classification with discrete features (e.g., word counts for text classification). The multinomial distribution normally requires integer feature counts. However, in practice, fractional counts such as tf-idf may also work.
    BERNOULLI_NAIVE_BAYES
    BernoulliNaiveBayesNaive Bayes classifier for multivariate Bernoulli models.
    SVM
    SVMA support vector machine (SVM) is a supervised machine learning model that uses classification algorithms for two-group classification problems. After giving an SVM model sets of labeled training data for each category, they're able to categorize new text.
    LINEAR_SVM
    LinearSVMA support vector machine (SVM) is a supervised machine learning model that uses classification algorithms for two-group classification problems. After giving an SVM model sets of labeled training data for each category, they're able to categorize new text. Linear SVM performs best when input data is linear, i.e., data can be easily classified by drawing the straight line between classified values on a plotted graph.
    KNN
    KNNK-nearest neighbors (KNN) algorithm uses 'feature similarity' to predict the values of new datapoints which further means that the new data point will be assigned a value based on how closely it matches the points in the training set.
    DECISION_TREE
    DecisionTreeDecision Trees are a non-parametric supervised learning method used for both classification and regression tasks. The goal is to create a model that predicts the value of a target variable by learning simple decision rules inferred from the data features.
    RANDOM_FOREST
    RandomForestRandom forest is a supervised learning algorithm. The "forest" it builds, is an ensemble of decision trees, usually trained with the “bagging” method. The general idea of the bagging method is that a combination of learning models increases the overall result.
    EXTREME_RANDOM_TREES
    ExtremeRandomTreesExtreme Trees is an ensemble machine learning algorithm that combines the predictions from many decision trees. It is related to the widely used random forest algorithm.
    LIGHT_GBM
    LightGBMLightGBM is a gradient boosting framework that uses tree based learning algorithms.
    GRADIENT_BOOSTING
    GradientBoostingThe technique of transiting week learners into a strong learner is called Boosting. The gradient boosting algorithm process works on this theory of execution.
    XG_BOOST_CLASSIFIER
    XGBoostClassifierXGBoost: Extreme Gradient Boosting Algorithm. This algorithm is used for structured data where target column values can be divided into distinct class values.
    "LogisticRegression"
    LogisticRegressionLogistic regression is a fundamental classification technique. It belongs to the group of linear classifiers and is somewhat similar to polynomial and linear regression. Logistic regression is fast and relatively uncomplicated, and it's convenient for you to interpret the results. Although it's essentially a method for binary classification, it can also be applied to multiclass problems.
    "SGD"
    SGDSGD: Stochastic gradient descent is an optimization algorithm often used in machine learning applications to find the model parameters that correspond to the best fit between predicted and actual outputs.
    "MultinomialNaiveBayes"
    MultinomialNaiveBayesThe multinomial Naive Bayes classifier is suitable for classification with discrete features (e.g., word counts for text classification). The multinomial distribution normally requires integer feature counts. However, in practice, fractional counts such as tf-idf may also work.
    "BernoulliNaiveBayes"
    BernoulliNaiveBayesNaive Bayes classifier for multivariate Bernoulli models.
    "SVM"
    SVMA support vector machine (SVM) is a supervised machine learning model that uses classification algorithms for two-group classification problems. After giving an SVM model sets of labeled training data for each category, they're able to categorize new text.
    "LinearSVM"
    LinearSVMA support vector machine (SVM) is a supervised machine learning model that uses classification algorithms for two-group classification problems. After giving an SVM model sets of labeled training data for each category, they're able to categorize new text. Linear SVM performs best when input data is linear, i.e., data can be easily classified by drawing the straight line between classified values on a plotted graph.
    "KNN"
    KNNK-nearest neighbors (KNN) algorithm uses 'feature similarity' to predict the values of new datapoints which further means that the new data point will be assigned a value based on how closely it matches the points in the training set.
    "DecisionTree"
    DecisionTreeDecision Trees are a non-parametric supervised learning method used for both classification and regression tasks. The goal is to create a model that predicts the value of a target variable by learning simple decision rules inferred from the data features.
    "RandomForest"
    RandomForestRandom forest is a supervised learning algorithm. The "forest" it builds, is an ensemble of decision trees, usually trained with the “bagging” method. The general idea of the bagging method is that a combination of learning models increases the overall result.
    "ExtremeRandomTrees"
    ExtremeRandomTreesExtreme Trees is an ensemble machine learning algorithm that combines the predictions from many decision trees. It is related to the widely used random forest algorithm.
    "LightGBM"
    LightGBMLightGBM is a gradient boosting framework that uses tree based learning algorithms.
    "GradientBoosting"
    GradientBoostingThe technique of transiting week learners into a strong learner is called Boosting. The gradient boosting algorithm process works on this theory of execution.
    "XGBoostClassifier"
    XGBoostClassifierXGBoost: Extreme Gradient Boosting Algorithm. This algorithm is used for structured data where target column values can be divided into distinct class values.

    ClassificationMultilabelPrimaryMetrics, ClassificationMultilabelPrimaryMetricsArgs

    AUCWeighted
    AUCWeightedAUC is the Area under the curve. This metric represents arithmetic mean of the score for each class, weighted by the number of true instances in each class.
    Accuracy
    AccuracyAccuracy is the ratio of predictions that exactly match the true class labels.
    NormMacroRecall
    NormMacroRecallNormalized macro recall is recall macro-averaged and normalized, so that random performance has a score of 0, and perfect performance has a score of 1.
    AveragePrecisionScoreWeighted
    AveragePrecisionScoreWeightedThe arithmetic mean of the average precision score for each class, weighted by the number of true instances in each class.
    PrecisionScoreWeighted
    PrecisionScoreWeightedThe arithmetic mean of precision for each class, weighted by number of true instances in each class.
    IOU
    IOUIntersection Over Union. Intersection of predictions divided by union of predictions.
    ClassificationMultilabelPrimaryMetricsAUCWeighted
    AUCWeightedAUC is the Area under the curve. This metric represents arithmetic mean of the score for each class, weighted by the number of true instances in each class.
    ClassificationMultilabelPrimaryMetricsAccuracy
    AccuracyAccuracy is the ratio of predictions that exactly match the true class labels.
    ClassificationMultilabelPrimaryMetricsNormMacroRecall
    NormMacroRecallNormalized macro recall is recall macro-averaged and normalized, so that random performance has a score of 0, and perfect performance has a score of 1.
    ClassificationMultilabelPrimaryMetricsAveragePrecisionScoreWeighted
    AveragePrecisionScoreWeightedThe arithmetic mean of the average precision score for each class, weighted by the number of true instances in each class.
    ClassificationMultilabelPrimaryMetricsPrecisionScoreWeighted
    PrecisionScoreWeightedThe arithmetic mean of precision for each class, weighted by number of true instances in each class.
    ClassificationMultilabelPrimaryMetricsIOU
    IOUIntersection Over Union. Intersection of predictions divided by union of predictions.
    AUCWeighted
    AUCWeightedAUC is the Area under the curve. This metric represents arithmetic mean of the score for each class, weighted by the number of true instances in each class.
    Accuracy
    AccuracyAccuracy is the ratio of predictions that exactly match the true class labels.
    NormMacroRecall
    NormMacroRecallNormalized macro recall is recall macro-averaged and normalized, so that random performance has a score of 0, and perfect performance has a score of 1.
    AveragePrecisionScoreWeighted
    AveragePrecisionScoreWeightedThe arithmetic mean of the average precision score for each class, weighted by the number of true instances in each class.
    PrecisionScoreWeighted
    PrecisionScoreWeightedThe arithmetic mean of precision for each class, weighted by number of true instances in each class.
    IOU
    IOUIntersection Over Union. Intersection of predictions divided by union of predictions.
    AUCWeighted
    AUCWeightedAUC is the Area under the curve. This metric represents arithmetic mean of the score for each class, weighted by the number of true instances in each class.
    Accuracy
    AccuracyAccuracy is the ratio of predictions that exactly match the true class labels.
    NormMacroRecall
    NormMacroRecallNormalized macro recall is recall macro-averaged and normalized, so that random performance has a score of 0, and perfect performance has a score of 1.
    AveragePrecisionScoreWeighted
    AveragePrecisionScoreWeightedThe arithmetic mean of the average precision score for each class, weighted by the number of true instances in each class.
    PrecisionScoreWeighted
    PrecisionScoreWeightedThe arithmetic mean of precision for each class, weighted by number of true instances in each class.
    IOU
    IOUIntersection Over Union. Intersection of predictions divided by union of predictions.
    AUC_WEIGHTED
    AUCWeightedAUC is the Area under the curve. This metric represents arithmetic mean of the score for each class, weighted by the number of true instances in each class.
    ACCURACY
    AccuracyAccuracy is the ratio of predictions that exactly match the true class labels.
    NORM_MACRO_RECALL
    NormMacroRecallNormalized macro recall is recall macro-averaged and normalized, so that random performance has a score of 0, and perfect performance has a score of 1.
    AVERAGE_PRECISION_SCORE_WEIGHTED
    AveragePrecisionScoreWeightedThe arithmetic mean of the average precision score for each class, weighted by the number of true instances in each class.
    PRECISION_SCORE_WEIGHTED
    PrecisionScoreWeightedThe arithmetic mean of precision for each class, weighted by number of true instances in each class.
    IOU
    IOUIntersection Over Union. Intersection of predictions divided by union of predictions.
    "AUCWeighted"
    AUCWeightedAUC is the Area under the curve. This metric represents arithmetic mean of the score for each class, weighted by the number of true instances in each class.
    "Accuracy"
    AccuracyAccuracy is the ratio of predictions that exactly match the true class labels.
    "NormMacroRecall"
    NormMacroRecallNormalized macro recall is recall macro-averaged and normalized, so that random performance has a score of 0, and perfect performance has a score of 1.
    "AveragePrecisionScoreWeighted"
    AveragePrecisionScoreWeightedThe arithmetic mean of the average precision score for each class, weighted by the number of true instances in each class.
    "PrecisionScoreWeighted"
    PrecisionScoreWeightedThe arithmetic mean of precision for each class, weighted by number of true instances in each class.
    "IOU"
    IOUIntersection Over Union. Intersection of predictions divided by union of predictions.

    ClassificationPrimaryMetrics, ClassificationPrimaryMetricsArgs

    AUCWeighted
    AUCWeightedAUC is the Area under the curve. This metric represents arithmetic mean of the score for each class, weighted by the number of true instances in each class.
    Accuracy
    AccuracyAccuracy is the ratio of predictions that exactly match the true class labels.
    NormMacroRecall
    NormMacroRecallNormalized macro recall is recall macro-averaged and normalized, so that random performance has a score of 0, and perfect performance has a score of 1.
    AveragePrecisionScoreWeighted
    AveragePrecisionScoreWeightedThe arithmetic mean of the average precision score for each class, weighted by the number of true instances in each class.
    PrecisionScoreWeighted
    PrecisionScoreWeightedThe arithmetic mean of precision for each class, weighted by number of true instances in each class.
    ClassificationPrimaryMetricsAUCWeighted
    AUCWeightedAUC is the Area under the curve. This metric represents arithmetic mean of the score for each class, weighted by the number of true instances in each class.
    ClassificationPrimaryMetricsAccuracy
    AccuracyAccuracy is the ratio of predictions that exactly match the true class labels.
    ClassificationPrimaryMetricsNormMacroRecall
    NormMacroRecallNormalized macro recall is recall macro-averaged and normalized, so that random performance has a score of 0, and perfect performance has a score of 1.
    ClassificationPrimaryMetricsAveragePrecisionScoreWeighted
    AveragePrecisionScoreWeightedThe arithmetic mean of the average precision score for each class, weighted by the number of true instances in each class.
    ClassificationPrimaryMetricsPrecisionScoreWeighted
    PrecisionScoreWeightedThe arithmetic mean of precision for each class, weighted by number of true instances in each class.
    AUCWeighted
    AUCWeightedAUC is the Area under the curve. This metric represents arithmetic mean of the score for each class, weighted by the number of true instances in each class.
    Accuracy
    AccuracyAccuracy is the ratio of predictions that exactly match the true class labels.
    NormMacroRecall
    NormMacroRecallNormalized macro recall is recall macro-averaged and normalized, so that random performance has a score of 0, and perfect performance has a score of 1.
    AveragePrecisionScoreWeighted
    AveragePrecisionScoreWeightedThe arithmetic mean of the average precision score for each class, weighted by the number of true instances in each class.
    PrecisionScoreWeighted
    PrecisionScoreWeightedThe arithmetic mean of precision for each class, weighted by number of true instances in each class.
    AUCWeighted
    AUCWeightedAUC is the Area under the curve. This metric represents arithmetic mean of the score for each class, weighted by the number of true instances in each class.
    Accuracy
    AccuracyAccuracy is the ratio of predictions that exactly match the true class labels.
    NormMacroRecall
    NormMacroRecallNormalized macro recall is recall macro-averaged and normalized, so that random performance has a score of 0, and perfect performance has a score of 1.
    AveragePrecisionScoreWeighted
    AveragePrecisionScoreWeightedThe arithmetic mean of the average precision score for each class, weighted by the number of true instances in each class.
    PrecisionScoreWeighted
    PrecisionScoreWeightedThe arithmetic mean of precision for each class, weighted by number of true instances in each class.
    AUC_WEIGHTED
    AUCWeightedAUC is the Area under the curve. This metric represents arithmetic mean of the score for each class, weighted by the number of true instances in each class.
    ACCURACY
    AccuracyAccuracy is the ratio of predictions that exactly match the true class labels.
    NORM_MACRO_RECALL
    NormMacroRecallNormalized macro recall is recall macro-averaged and normalized, so that random performance has a score of 0, and perfect performance has a score of 1.
    AVERAGE_PRECISION_SCORE_WEIGHTED
    AveragePrecisionScoreWeightedThe arithmetic mean of the average precision score for each class, weighted by the number of true instances in each class.
    PRECISION_SCORE_WEIGHTED
    PrecisionScoreWeightedThe arithmetic mean of precision for each class, weighted by number of true instances in each class.
    "AUCWeighted"
    AUCWeightedAUC is the Area under the curve. This metric represents arithmetic mean of the score for each class, weighted by the number of true instances in each class.
    "Accuracy"
    AccuracyAccuracy is the ratio of predictions that exactly match the true class labels.
    "NormMacroRecall"
    NormMacroRecallNormalized macro recall is recall macro-averaged and normalized, so that random performance has a score of 0, and perfect performance has a score of 1.
    "AveragePrecisionScoreWeighted"
    AveragePrecisionScoreWeightedThe arithmetic mean of the average precision score for each class, weighted by the number of true instances in each class.
    "PrecisionScoreWeighted"
    PrecisionScoreWeightedThe arithmetic mean of precision for each class, weighted by number of true instances in each class.

    ClassificationResponse, ClassificationResponseArgs

    TrainingData Pulumi.AzureNative.MachineLearningServices.Inputs.MLTableJobInputResponse
    [Required] Training data input.
    CvSplitColumnNames List<string>
    Columns to use for CVSplit data.
    FeaturizationSettings Pulumi.AzureNative.MachineLearningServices.Inputs.TableVerticalFeaturizationSettingsResponse
    Featurization inputs needed for AutoML job.
    LimitSettings Pulumi.AzureNative.MachineLearningServices.Inputs.TableVerticalLimitSettingsResponse
    Execution constraints for AutoMLJob.
    LogVerbosity string
    Log verbosity for the job.
    NCrossValidations Pulumi.AzureNative.MachineLearningServices.Inputs.AutoNCrossValidationsResponse | Pulumi.AzureNative.MachineLearningServices.Inputs.CustomNCrossValidationsResponse
    Number of cross validation folds to be applied on training dataset when validation dataset is not provided.
    PositiveLabel string
    Positive label for binary metrics calculation.
    PrimaryMetric string
    Primary metric for the task.
    TargetColumnName string
    Target column name: This is prediction values column. Also known as label column name in context of classification tasks.
    TestData Pulumi.AzureNative.MachineLearningServices.Inputs.MLTableJobInputResponse
    Test data input.
    TestDataSize double
    The fraction of test dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    TrainingSettings Pulumi.AzureNative.MachineLearningServices.Inputs.ClassificationTrainingSettingsResponse
    Inputs for training phase for an AutoML Job.
    ValidationData Pulumi.AzureNative.MachineLearningServices.Inputs.MLTableJobInputResponse
    Validation data inputs.
    ValidationDataSize double
    The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    WeightColumnName string
    The name of the sample weight column. Automated ML supports a weighted column as an input, causing rows in the data to be weighted up or down.
    TrainingData MLTableJobInputResponse
    [Required] Training data input.
    CvSplitColumnNames []string
    Columns to use for CVSplit data.
    FeaturizationSettings TableVerticalFeaturizationSettingsResponse
    Featurization inputs needed for AutoML job.
    LimitSettings TableVerticalLimitSettingsResponse
    Execution constraints for AutoMLJob.
    LogVerbosity string
    Log verbosity for the job.
    NCrossValidations AutoNCrossValidationsResponse | CustomNCrossValidationsResponse
    Number of cross validation folds to be applied on training dataset when validation dataset is not provided.
    PositiveLabel string
    Positive label for binary metrics calculation.
    PrimaryMetric string
    Primary metric for the task.
    TargetColumnName string
    Target column name: This is prediction values column. Also known as label column name in context of classification tasks.
    TestData MLTableJobInputResponse
    Test data input.
    TestDataSize float64
    The fraction of test dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    TrainingSettings ClassificationTrainingSettingsResponse
    Inputs for training phase for an AutoML Job.
    ValidationData MLTableJobInputResponse
    Validation data inputs.
    ValidationDataSize float64
    The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    WeightColumnName string
    The name of the sample weight column. Automated ML supports a weighted column as an input, causing rows in the data to be weighted up or down.
    trainingData MLTableJobInputResponse
    [Required] Training data input.
    cvSplitColumnNames List<String>
    Columns to use for CVSplit data.
    featurizationSettings TableVerticalFeaturizationSettingsResponse
    Featurization inputs needed for AutoML job.
    limitSettings TableVerticalLimitSettingsResponse
    Execution constraints for AutoMLJob.
    logVerbosity String
    Log verbosity for the job.
    nCrossValidations AutoNCrossValidationsResponse | CustomNCrossValidationsResponse
    Number of cross validation folds to be applied on training dataset when validation dataset is not provided.
    positiveLabel String
    Positive label for binary metrics calculation.
    primaryMetric String
    Primary metric for the task.
    targetColumnName String
    Target column name: This is prediction values column. Also known as label column name in context of classification tasks.
    testData MLTableJobInputResponse
    Test data input.
    testDataSize Double
    The fraction of test dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    trainingSettings ClassificationTrainingSettingsResponse
    Inputs for training phase for an AutoML Job.
    validationData MLTableJobInputResponse
    Validation data inputs.
    validationDataSize Double
    The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    weightColumnName String
    The name of the sample weight column. Automated ML supports a weighted column as an input, causing rows in the data to be weighted up or down.
    trainingData MLTableJobInputResponse
    [Required] Training data input.
    cvSplitColumnNames string[]
    Columns to use for CVSplit data.
    featurizationSettings TableVerticalFeaturizationSettingsResponse
    Featurization inputs needed for AutoML job.
    limitSettings TableVerticalLimitSettingsResponse
    Execution constraints for AutoMLJob.
    logVerbosity string
    Log verbosity for the job.
    nCrossValidations AutoNCrossValidationsResponse | CustomNCrossValidationsResponse
    Number of cross validation folds to be applied on training dataset when validation dataset is not provided.
    positiveLabel string
    Positive label for binary metrics calculation.
    primaryMetric string
    Primary metric for the task.
    targetColumnName string
    Target column name: This is prediction values column. Also known as label column name in context of classification tasks.
    testData MLTableJobInputResponse
    Test data input.
    testDataSize number
    The fraction of test dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    trainingSettings ClassificationTrainingSettingsResponse
    Inputs for training phase for an AutoML Job.
    validationData MLTableJobInputResponse
    Validation data inputs.
    validationDataSize number
    The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    weightColumnName string
    The name of the sample weight column. Automated ML supports a weighted column as an input, causing rows in the data to be weighted up or down.
    training_data MLTableJobInputResponse
    [Required] Training data input.
    cv_split_column_names Sequence[str]
    Columns to use for CVSplit data.
    featurization_settings TableVerticalFeaturizationSettingsResponse
    Featurization inputs needed for AutoML job.
    limit_settings TableVerticalLimitSettingsResponse
    Execution constraints for AutoMLJob.
    log_verbosity str
    Log verbosity for the job.
    n_cross_validations AutoNCrossValidationsResponse | CustomNCrossValidationsResponse
    Number of cross validation folds to be applied on training dataset when validation dataset is not provided.
    positive_label str
    Positive label for binary metrics calculation.
    primary_metric str
    Primary metric for the task.
    target_column_name str
    Target column name: This is prediction values column. Also known as label column name in context of classification tasks.
    test_data MLTableJobInputResponse
    Test data input.
    test_data_size float
    The fraction of test dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    training_settings ClassificationTrainingSettingsResponse
    Inputs for training phase for an AutoML Job.
    validation_data MLTableJobInputResponse
    Validation data inputs.
    validation_data_size float
    The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    weight_column_name str
    The name of the sample weight column. Automated ML supports a weighted column as an input, causing rows in the data to be weighted up or down.
    trainingData Property Map
    [Required] Training data input.
    cvSplitColumnNames List<String>
    Columns to use for CVSplit data.
    featurizationSettings Property Map
    Featurization inputs needed for AutoML job.
    limitSettings Property Map
    Execution constraints for AutoMLJob.
    logVerbosity String
    Log verbosity for the job.
    nCrossValidations Property Map | Property Map
    Number of cross validation folds to be applied on training dataset when validation dataset is not provided.
    positiveLabel String
    Positive label for binary metrics calculation.
    primaryMetric String
    Primary metric for the task.
    targetColumnName String
    Target column name: This is prediction values column. Also known as label column name in context of classification tasks.
    testData Property Map
    Test data input.
    testDataSize Number
    The fraction of test dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    trainingSettings Property Map
    Inputs for training phase for an AutoML Job.
    validationData Property Map
    Validation data inputs.
    validationDataSize Number
    The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    weightColumnName String
    The name of the sample weight column. Automated ML supports a weighted column as an input, causing rows in the data to be weighted up or down.

    ClassificationTrainingSettings, ClassificationTrainingSettingsArgs

    AllowedTrainingAlgorithms List<Union<string, Pulumi.AzureNative.MachineLearningServices.ClassificationModels>>
    Allowed models for classification task.
    BlockedTrainingAlgorithms List<Union<string, Pulumi.AzureNative.MachineLearningServices.ClassificationModels>>
    Blocked models for classification task.
    EnableDnnTraining bool
    Enable recommendation of DNN models.
    EnableModelExplainability bool
    Flag to turn on explainability on best model.
    EnableOnnxCompatibleModels bool
    Flag for enabling onnx compatible models.
    EnableStackEnsemble bool
    Enable stack ensemble run.
    EnableVoteEnsemble bool
    Enable voting ensemble run.
    EnsembleModelDownloadTimeout string
    During VotingEnsemble and StackEnsemble model generation, multiple fitted models from the previous child runs are downloaded. Configure this parameter with a higher value than 300 secs, if more time is needed.
    StackEnsembleSettings Pulumi.AzureNative.MachineLearningServices.Inputs.StackEnsembleSettings
    Stack ensemble settings for stack ensemble run.
    AllowedTrainingAlgorithms []string
    Allowed models for classification task.
    BlockedTrainingAlgorithms []string
    Blocked models for classification task.
    EnableDnnTraining bool
    Enable recommendation of DNN models.
    EnableModelExplainability bool
    Flag to turn on explainability on best model.
    EnableOnnxCompatibleModels bool
    Flag for enabling onnx compatible models.
    EnableStackEnsemble bool
    Enable stack ensemble run.
    EnableVoteEnsemble bool
    Enable voting ensemble run.
    EnsembleModelDownloadTimeout string
    During VotingEnsemble and StackEnsemble model generation, multiple fitted models from the previous child runs are downloaded. Configure this parameter with a higher value than 300 secs, if more time is needed.
    StackEnsembleSettings StackEnsembleSettings
    Stack ensemble settings for stack ensemble run.
    allowedTrainingAlgorithms List<Either<String,ClassificationModels>>
    Allowed models for classification task.
    blockedTrainingAlgorithms List<Either<String,ClassificationModels>>
    Blocked models for classification task.
    enableDnnTraining Boolean
    Enable recommendation of DNN models.
    enableModelExplainability Boolean
    Flag to turn on explainability on best model.
    enableOnnxCompatibleModels Boolean
    Flag for enabling onnx compatible models.
    enableStackEnsemble Boolean
    Enable stack ensemble run.
    enableVoteEnsemble Boolean
    Enable voting ensemble run.
    ensembleModelDownloadTimeout String
    During VotingEnsemble and StackEnsemble model generation, multiple fitted models from the previous child runs are downloaded. Configure this parameter with a higher value than 300 secs, if more time is needed.
    stackEnsembleSettings StackEnsembleSettings
    Stack ensemble settings for stack ensemble run.
    allowedTrainingAlgorithms (string | ClassificationModels)[]
    Allowed models for classification task.
    blockedTrainingAlgorithms (string | ClassificationModels)[]
    Blocked models for classification task.
    enableDnnTraining boolean
    Enable recommendation of DNN models.
    enableModelExplainability boolean
    Flag to turn on explainability on best model.
    enableOnnxCompatibleModels boolean
    Flag for enabling onnx compatible models.
    enableStackEnsemble boolean
    Enable stack ensemble run.
    enableVoteEnsemble boolean
    Enable voting ensemble run.
    ensembleModelDownloadTimeout string
    During VotingEnsemble and StackEnsemble model generation, multiple fitted models from the previous child runs are downloaded. Configure this parameter with a higher value than 300 secs, if more time is needed.
    stackEnsembleSettings StackEnsembleSettings
    Stack ensemble settings for stack ensemble run.
    allowed_training_algorithms Sequence[Union[str, ClassificationModels]]
    Allowed models for classification task.
    blocked_training_algorithms Sequence[Union[str, ClassificationModels]]
    Blocked models for classification task.
    enable_dnn_training bool
    Enable recommendation of DNN models.
    enable_model_explainability bool
    Flag to turn on explainability on best model.
    enable_onnx_compatible_models bool
    Flag for enabling onnx compatible models.
    enable_stack_ensemble bool
    Enable stack ensemble run.
    enable_vote_ensemble bool
    Enable voting ensemble run.
    ensemble_model_download_timeout str
    During VotingEnsemble and StackEnsemble model generation, multiple fitted models from the previous child runs are downloaded. Configure this parameter with a higher value than 300 secs, if more time is needed.
    stack_ensemble_settings StackEnsembleSettings
    Stack ensemble settings for stack ensemble run.
    allowedTrainingAlgorithms List<String | "LogisticRegression" | "SGD" | "MultinomialNaiveBayes" | "BernoulliNaiveBayes" | "SVM" | "LinearSVM" | "KNN" | "DecisionTree" | "RandomForest" | "ExtremeRandomTrees" | "LightGBM" | "GradientBoosting" | "XGBoostClassifier">
    Allowed models for classification task.
    blockedTrainingAlgorithms List<String | "LogisticRegression" | "SGD" | "MultinomialNaiveBayes" | "BernoulliNaiveBayes" | "SVM" | "LinearSVM" | "KNN" | "DecisionTree" | "RandomForest" | "ExtremeRandomTrees" | "LightGBM" | "GradientBoosting" | "XGBoostClassifier">
    Blocked models for classification task.
    enableDnnTraining Boolean
    Enable recommendation of DNN models.
    enableModelExplainability Boolean
    Flag to turn on explainability on best model.
    enableOnnxCompatibleModels Boolean
    Flag for enabling onnx compatible models.
    enableStackEnsemble Boolean
    Enable stack ensemble run.
    enableVoteEnsemble Boolean
    Enable voting ensemble run.
    ensembleModelDownloadTimeout String
    During VotingEnsemble and StackEnsemble model generation, multiple fitted models from the previous child runs are downloaded. Configure this parameter with a higher value than 300 secs, if more time is needed.
    stackEnsembleSettings Property Map
    Stack ensemble settings for stack ensemble run.

    ClassificationTrainingSettingsResponse, ClassificationTrainingSettingsResponseArgs

    AllowedTrainingAlgorithms List<string>
    Allowed models for classification task.
    BlockedTrainingAlgorithms List<string>
    Blocked models for classification task.
    EnableDnnTraining bool
    Enable recommendation of DNN models.
    EnableModelExplainability bool
    Flag to turn on explainability on best model.
    EnableOnnxCompatibleModels bool
    Flag for enabling onnx compatible models.
    EnableStackEnsemble bool
    Enable stack ensemble run.
    EnableVoteEnsemble bool
    Enable voting ensemble run.
    EnsembleModelDownloadTimeout string
    During VotingEnsemble and StackEnsemble model generation, multiple fitted models from the previous child runs are downloaded. Configure this parameter with a higher value than 300 secs, if more time is needed.
    StackEnsembleSettings Pulumi.AzureNative.MachineLearningServices.Inputs.StackEnsembleSettingsResponse
    Stack ensemble settings for stack ensemble run.
    AllowedTrainingAlgorithms []string
    Allowed models for classification task.
    BlockedTrainingAlgorithms []string
    Blocked models for classification task.
    EnableDnnTraining bool
    Enable recommendation of DNN models.
    EnableModelExplainability bool
    Flag to turn on explainability on best model.
    EnableOnnxCompatibleModels bool
    Flag for enabling onnx compatible models.
    EnableStackEnsemble bool
    Enable stack ensemble run.
    EnableVoteEnsemble bool
    Enable voting ensemble run.
    EnsembleModelDownloadTimeout string
    During VotingEnsemble and StackEnsemble model generation, multiple fitted models from the previous child runs are downloaded. Configure this parameter with a higher value than 300 secs, if more time is needed.
    StackEnsembleSettings StackEnsembleSettingsResponse
    Stack ensemble settings for stack ensemble run.
    allowedTrainingAlgorithms List<String>
    Allowed models for classification task.
    blockedTrainingAlgorithms List<String>
    Blocked models for classification task.
    enableDnnTraining Boolean
    Enable recommendation of DNN models.
    enableModelExplainability Boolean
    Flag to turn on explainability on best model.
    enableOnnxCompatibleModels Boolean
    Flag for enabling onnx compatible models.
    enableStackEnsemble Boolean
    Enable stack ensemble run.
    enableVoteEnsemble Boolean
    Enable voting ensemble run.
    ensembleModelDownloadTimeout String
    During VotingEnsemble and StackEnsemble model generation, multiple fitted models from the previous child runs are downloaded. Configure this parameter with a higher value than 300 secs, if more time is needed.
    stackEnsembleSettings StackEnsembleSettingsResponse
    Stack ensemble settings for stack ensemble run.
    allowedTrainingAlgorithms string[]
    Allowed models for classification task.
    blockedTrainingAlgorithms string[]
    Blocked models for classification task.
    enableDnnTraining boolean
    Enable recommendation of DNN models.
    enableModelExplainability boolean
    Flag to turn on explainability on best model.
    enableOnnxCompatibleModels boolean
    Flag for enabling onnx compatible models.
    enableStackEnsemble boolean
    Enable stack ensemble run.
    enableVoteEnsemble boolean
    Enable voting ensemble run.
    ensembleModelDownloadTimeout string
    During VotingEnsemble and StackEnsemble model generation, multiple fitted models from the previous child runs are downloaded. Configure this parameter with a higher value than 300 secs, if more time is needed.
    stackEnsembleSettings StackEnsembleSettingsResponse
    Stack ensemble settings for stack ensemble run.
    allowed_training_algorithms Sequence[str]
    Allowed models for classification task.
    blocked_training_algorithms Sequence[str]
    Blocked models for classification task.
    enable_dnn_training bool
    Enable recommendation of DNN models.
    enable_model_explainability bool
    Flag to turn on explainability on best model.
    enable_onnx_compatible_models bool
    Flag for enabling onnx compatible models.
    enable_stack_ensemble bool
    Enable stack ensemble run.
    enable_vote_ensemble bool
    Enable voting ensemble run.
    ensemble_model_download_timeout str
    During VotingEnsemble and StackEnsemble model generation, multiple fitted models from the previous child runs are downloaded. Configure this parameter with a higher value than 300 secs, if more time is needed.
    stack_ensemble_settings StackEnsembleSettingsResponse
    Stack ensemble settings for stack ensemble run.
    allowedTrainingAlgorithms List<String>
    Allowed models for classification task.
    blockedTrainingAlgorithms List<String>
    Blocked models for classification task.
    enableDnnTraining Boolean
    Enable recommendation of DNN models.
    enableModelExplainability Boolean
    Flag to turn on explainability on best model.
    enableOnnxCompatibleModels Boolean
    Flag for enabling onnx compatible models.
    enableStackEnsemble Boolean
    Enable stack ensemble run.
    enableVoteEnsemble Boolean
    Enable voting ensemble run.
    ensembleModelDownloadTimeout String
    During VotingEnsemble and StackEnsemble model generation, multiple fitted models from the previous child runs are downloaded. Configure this parameter with a higher value than 300 secs, if more time is needed.
    stackEnsembleSettings Property Map
    Stack ensemble settings for stack ensemble run.

    ColumnTransformer, ColumnTransformerArgs

    Fields List<string>
    Fields to apply transformer logic on.
    Parameters object
    Different properties to be passed to transformer. Input expected is dictionary of key,value pairs in JSON format.
    Fields []string
    Fields to apply transformer logic on.
    Parameters interface{}
    Different properties to be passed to transformer. Input expected is dictionary of key,value pairs in JSON format.
    fields List<String>
    Fields to apply transformer logic on.
    parameters Object
    Different properties to be passed to transformer. Input expected is dictionary of key,value pairs in JSON format.
    fields string[]
    Fields to apply transformer logic on.
    parameters any
    Different properties to be passed to transformer. Input expected is dictionary of key,value pairs in JSON format.
    fields Sequence[str]
    Fields to apply transformer logic on.
    parameters Any
    Different properties to be passed to transformer. Input expected is dictionary of key,value pairs in JSON format.
    fields List<String>
    Fields to apply transformer logic on.
    parameters Any
    Different properties to be passed to transformer. Input expected is dictionary of key,value pairs in JSON format.

    ColumnTransformerResponse, ColumnTransformerResponseArgs

    Fields List<string>
    Fields to apply transformer logic on.
    Parameters object
    Different properties to be passed to transformer. Input expected is dictionary of key,value pairs in JSON format.
    Fields []string
    Fields to apply transformer logic on.
    Parameters interface{}
    Different properties to be passed to transformer. Input expected is dictionary of key,value pairs in JSON format.
    fields List<String>
    Fields to apply transformer logic on.
    parameters Object
    Different properties to be passed to transformer. Input expected is dictionary of key,value pairs in JSON format.
    fields string[]
    Fields to apply transformer logic on.
    parameters any
    Different properties to be passed to transformer. Input expected is dictionary of key,value pairs in JSON format.
    fields Sequence[str]
    Fields to apply transformer logic on.
    parameters Any
    Different properties to be passed to transformer. Input expected is dictionary of key,value pairs in JSON format.
    fields List<String>
    Fields to apply transformer logic on.
    parameters Any
    Different properties to be passed to transformer. Input expected is dictionary of key,value pairs in JSON format.

    CommandJob, CommandJobArgs

    Command string
    [Required] The command to execute on startup of the job. eg. "python train.py"
    EnvironmentId string
    [Required] The ARM resource ID of the Environment specification for the job.
    CodeId string
    ARM resource ID of the code asset.
    ComponentId string
    ARM resource ID of the component resource.
    ComputeId string
    ARM resource ID of the compute resource.
    Description string
    The asset description text.
    DisplayName string
    Display name of job.
    Distribution Pulumi.AzureNative.MachineLearningServices.Inputs.Mpi | Pulumi.AzureNative.MachineLearningServices.Inputs.PyTorch | Pulumi.AzureNative.MachineLearningServices.Inputs.TensorFlow
    Distribution configuration of the job. If set, this should be one of Mpi, Tensorflow, PyTorch, or null.
    EnvironmentVariables Dictionary<string, string>
    Environment variables included in the job.
    ExperimentName string
    The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment.
    Identity Pulumi.AzureNative.MachineLearningServices.Inputs.AmlToken | Pulumi.AzureNative.MachineLearningServices.Inputs.ManagedIdentity | Pulumi.AzureNative.MachineLearningServices.Inputs.UserIdentity
    Identity configuration. If set, this should be one of AmlToken, ManagedIdentity, UserIdentity or null. Defaults to AmlToken if null.
    Inputs Dictionary<string, object>
    Mapping of input data bindings used in the job.
    IsArchived bool
    Is the asset archived?
    Limits Pulumi.AzureNative.MachineLearningServices.Inputs.CommandJobLimits
    Command Job limit.
    Outputs Dictionary<string, object>
    Mapping of output data bindings used in the job.
    Properties Dictionary<string, string>
    The asset property dictionary.
    Resources Pulumi.AzureNative.MachineLearningServices.Inputs.JobResourceConfiguration
    Compute Resource configuration for the job.
    Services Dictionary<string, Pulumi.AzureNative.MachineLearningServices.Inputs.JobService>
    List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
    Tags Dictionary<string, string>
    Tag dictionary. Tags can be added, removed, and updated.
    Command string
    [Required] The command to execute on startup of the job. eg. "python train.py"
    EnvironmentId string
    [Required] The ARM resource ID of the Environment specification for the job.
    CodeId string
    ARM resource ID of the code asset.
    ComponentId string
    ARM resource ID of the component resource.
    ComputeId string
    ARM resource ID of the compute resource.
    Description string
    The asset description text.
    DisplayName string
    Display name of job.
    Distribution Mpi | PyTorch | TensorFlow
    Distribution configuration of the job. If set, this should be one of Mpi, Tensorflow, PyTorch, or null.
    EnvironmentVariables map[string]string
    Environment variables included in the job.
    ExperimentName string
    The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment.
    Identity AmlToken | ManagedIdentity | UserIdentity
    Identity configuration. If set, this should be one of AmlToken, ManagedIdentity, UserIdentity or null. Defaults to AmlToken if null.
    Inputs map[string]interface{}
    Mapping of input data bindings used in the job.
    IsArchived bool
    Is the asset archived?
    Limits CommandJobLimits
    Command Job limit.
    Outputs map[string]interface{}
    Mapping of output data bindings used in the job.
    Properties map[string]string
    The asset property dictionary.
    Resources JobResourceConfiguration
    Compute Resource configuration for the job.
    Services map[string]JobService
    List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
    Tags map[string]string
    Tag dictionary. Tags can be added, removed, and updated.
    command String
    [Required] The command to execute on startup of the job. eg. "python train.py"
    environmentId String
    [Required] The ARM resource ID of the Environment specification for the job.
    codeId String
    ARM resource ID of the code asset.
    componentId String
    ARM resource ID of the component resource.
    computeId String
    ARM resource ID of the compute resource.
    description String
    The asset description text.
    displayName String
    Display name of job.
    distribution Mpi | PyTorch | TensorFlow
    Distribution configuration of the job. If set, this should be one of Mpi, Tensorflow, PyTorch, or null.
    environmentVariables Map<String,String>
    Environment variables included in the job.
    experimentName String
    The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment.
    identity AmlToken | ManagedIdentity | UserIdentity
    Identity configuration. If set, this should be one of AmlToken, ManagedIdentity, UserIdentity or null. Defaults to AmlToken if null.
    inputs Map<String,Object>
    Mapping of input data bindings used in the job.
    isArchived Boolean
    Is the asset archived?
    limits CommandJobLimits
    Command Job limit.
    outputs Map<String,Object>
    Mapping of output data bindings used in the job.
    properties Map<String,String>
    The asset property dictionary.
    resources JobResourceConfiguration
    Compute Resource configuration for the job.
    services Map<String,JobService>
    List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
    tags Map<String,String>
    Tag dictionary. Tags can be added, removed, and updated.
    command string
    [Required] The command to execute on startup of the job. eg. "python train.py"
    environmentId string
    [Required] The ARM resource ID of the Environment specification for the job.
    codeId string
    ARM resource ID of the code asset.
    componentId string
    ARM resource ID of the component resource.
    computeId string
    ARM resource ID of the compute resource.
    description string
    The asset description text.
    displayName string
    Display name of job.
    distribution Mpi | PyTorch | TensorFlow
    Distribution configuration of the job. If set, this should be one of Mpi, Tensorflow, PyTorch, or null.
    environmentVariables {[key: string]: string}
    Environment variables included in the job.
    experimentName string
    The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment.
    identity AmlToken | ManagedIdentity | UserIdentity
    Identity configuration. If set, this should be one of AmlToken, ManagedIdentity, UserIdentity or null. Defaults to AmlToken if null.
    inputs {[key: string]: CustomModelJobInput | LiteralJobInput | MLFlowModelJobInput | MLTableJobInput | TritonModelJobInput | UriFileJobInput | UriFolderJobInput}
    Mapping of input data bindings used in the job.
    isArchived boolean
    Is the asset archived?
    limits CommandJobLimits
    Command Job limit.
    outputs {[key: string]: CustomModelJobOutput | MLFlowModelJobOutput | MLTableJobOutput | TritonModelJobOutput | UriFileJobOutput | UriFolderJobOutput}
    Mapping of output data bindings used in the job.
    properties {[key: string]: string}
    The asset property dictionary.
    resources JobResourceConfiguration
    Compute Resource configuration for the job.
    services {[key: string]: JobService}
    List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
    tags {[key: string]: string}
    Tag dictionary. Tags can be added, removed, and updated.
    command str
    [Required] The command to execute on startup of the job. eg. "python train.py"
    environment_id str
    [Required] The ARM resource ID of the Environment specification for the job.
    code_id str
    ARM resource ID of the code asset.
    component_id str
    ARM resource ID of the component resource.
    compute_id str
    ARM resource ID of the compute resource.
    description str
    The asset description text.
    display_name str
    Display name of job.
    distribution Mpi | PyTorch | TensorFlow
    Distribution configuration of the job. If set, this should be one of Mpi, Tensorflow, PyTorch, or null.
    environment_variables Mapping[str, str]
    Environment variables included in the job.
    experiment_name str
    The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment.
    identity AmlToken | ManagedIdentity | UserIdentity
    Identity configuration. If set, this should be one of AmlToken, ManagedIdentity, UserIdentity or null. Defaults to AmlToken if null.
    inputs Mapping[str, Union[CustomModelJobInput, LiteralJobInput, MLFlowModelJobInput, MLTableJobInput, TritonModelJobInput, UriFileJobInput, UriFolderJobInput]]
    Mapping of input data bindings used in the job.
    is_archived bool
    Is the asset archived?
    limits CommandJobLimits
    Command Job limit.
    outputs Mapping[str, Union[CustomModelJobOutput, MLFlowModelJobOutput, MLTableJobOutput, TritonModelJobOutput, UriFileJobOutput, UriFolderJobOutput]]
    Mapping of output data bindings used in the job.
    properties Mapping[str, str]
    The asset property dictionary.
    resources JobResourceConfiguration
    Compute Resource configuration for the job.
    services Mapping[str, JobService]
    List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
    tags Mapping[str, str]
    Tag dictionary. Tags can be added, removed, and updated.
    command String
    [Required] The command to execute on startup of the job. eg. "python train.py"
    environmentId String
    [Required] The ARM resource ID of the Environment specification for the job.
    codeId String
    ARM resource ID of the code asset.
    componentId String
    ARM resource ID of the component resource.
    computeId String
    ARM resource ID of the compute resource.
    description String
    The asset description text.
    displayName String
    Display name of job.
    distribution Property Map | Property Map | Property Map
    Distribution configuration of the job. If set, this should be one of Mpi, Tensorflow, PyTorch, or null.
    environmentVariables Map<String>
    Environment variables included in the job.
    experimentName String
    The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment.
    identity Property Map | Property Map | Property Map
    Identity configuration. If set, this should be one of AmlToken, ManagedIdentity, UserIdentity or null. Defaults to AmlToken if null.
    inputs Map<Property Map | Property Map | Property Map | Property Map | Property Map | Property Map | Property Map>
    Mapping of input data bindings used in the job.
    isArchived Boolean
    Is the asset archived?
    limits Property Map
    Command Job limit.
    outputs Map<Property Map | Property Map | Property Map | Property Map | Property Map | Property Map>
    Mapping of output data bindings used in the job.
    properties Map<String>
    The asset property dictionary.
    resources Property Map
    Compute Resource configuration for the job.
    services Map<Property Map>
    List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
    tags Map<String>
    Tag dictionary. Tags can be added, removed, and updated.

    CommandJobLimits, CommandJobLimitsArgs

    Timeout string
    The max run duration in ISO 8601 format, after which the job will be cancelled. Only supports duration with precision as low as Seconds.
    Timeout string
    The max run duration in ISO 8601 format, after which the job will be cancelled. Only supports duration with precision as low as Seconds.
    timeout String
    The max run duration in ISO 8601 format, after which the job will be cancelled. Only supports duration with precision as low as Seconds.
    timeout string
    The max run duration in ISO 8601 format, after which the job will be cancelled. Only supports duration with precision as low as Seconds.
    timeout str
    The max run duration in ISO 8601 format, after which the job will be cancelled. Only supports duration with precision as low as Seconds.
    timeout String
    The max run duration in ISO 8601 format, after which the job will be cancelled. Only supports duration with precision as low as Seconds.

    CommandJobLimitsResponse, CommandJobLimitsResponseArgs

    Timeout string
    The max run duration in ISO 8601 format, after which the job will be cancelled. Only supports duration with precision as low as Seconds.
    Timeout string
    The max run duration in ISO 8601 format, after which the job will be cancelled. Only supports duration with precision as low as Seconds.
    timeout String
    The max run duration in ISO 8601 format, after which the job will be cancelled. Only supports duration with precision as low as Seconds.
    timeout string
    The max run duration in ISO 8601 format, after which the job will be cancelled. Only supports duration with precision as low as Seconds.
    timeout str
    The max run duration in ISO 8601 format, after which the job will be cancelled. Only supports duration with precision as low as Seconds.
    timeout String
    The max run duration in ISO 8601 format, after which the job will be cancelled. Only supports duration with precision as low as Seconds.

    CommandJobResponse, CommandJobResponseArgs

    Command string
    [Required] The command to execute on startup of the job. eg. "python train.py"
    EnvironmentId string
    [Required] The ARM resource ID of the Environment specification for the job.
    Parameters object
    Input parameters.
    Status string
    Status of the job.
    CodeId string
    ARM resource ID of the code asset.
    ComponentId string
    ARM resource ID of the component resource.
    ComputeId string
    ARM resource ID of the compute resource.
    Description string
    The asset description text.
    DisplayName string
    Display name of job.
    Distribution Pulumi.AzureNative.MachineLearningServices.Inputs.MpiResponse | Pulumi.AzureNative.MachineLearningServices.Inputs.PyTorchResponse | Pulumi.AzureNative.MachineLearningServices.Inputs.TensorFlowResponse
    Distribution configuration of the job. If set, this should be one of Mpi, Tensorflow, PyTorch, or null.
    EnvironmentVariables Dictionary<string, string>
    Environment variables included in the job.
    ExperimentName string
    The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment.
    Identity Pulumi.AzureNative.MachineLearningServices.Inputs.AmlTokenResponse | Pulumi.AzureNative.MachineLearningServices.Inputs.ManagedIdentityResponse | Pulumi.AzureNative.MachineLearningServices.Inputs.UserIdentityResponse
    Identity configuration. If set, this should be one of AmlToken, ManagedIdentity, UserIdentity or null. Defaults to AmlToken if null.
    Inputs Dictionary<string, object>
    Mapping of input data bindings used in the job.
    IsArchived bool
    Is the asset archived?
    Limits Pulumi.AzureNative.MachineLearningServices.Inputs.CommandJobLimitsResponse
    Command Job limit.
    Outputs Dictionary<string, object>
    Mapping of output data bindings used in the job.
    Properties Dictionary<string, string>
    The asset property dictionary.
    Resources Pulumi.AzureNative.MachineLearningServices.Inputs.JobResourceConfigurationResponse
    Compute Resource configuration for the job.
    Services Dictionary<string, Pulumi.AzureNative.MachineLearningServices.Inputs.JobServiceResponse>
    List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
    Tags Dictionary<string, string>
    Tag dictionary. Tags can be added, removed, and updated.
    Command string
    [Required] The command to execute on startup of the job. eg. "python train.py"
    EnvironmentId string
    [Required] The ARM resource ID of the Environment specification for the job.
    Parameters interface{}
    Input parameters.
    Status string
    Status of the job.
    CodeId string
    ARM resource ID of the code asset.
    ComponentId string
    ARM resource ID of the component resource.
    ComputeId string
    ARM resource ID of the compute resource.
    Description string
    The asset description text.
    DisplayName string
    Display name of job.
    Distribution MpiResponse | PyTorchResponse | TensorFlowResponse
    Distribution configuration of the job. If set, this should be one of Mpi, Tensorflow, PyTorch, or null.
    EnvironmentVariables map[string]string
    Environment variables included in the job.
    ExperimentName string
    The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment.
    Identity AmlTokenResponse | ManagedIdentityResponse | UserIdentityResponse
    Identity configuration. If set, this should be one of AmlToken, ManagedIdentity, UserIdentity or null. Defaults to AmlToken if null.
    Inputs map[string]interface{}
    Mapping of input data bindings used in the job.
    IsArchived bool
    Is the asset archived?
    Limits CommandJobLimitsResponse
    Command Job limit.
    Outputs map[string]interface{}
    Mapping of output data bindings used in the job.
    Properties map[string]string
    The asset property dictionary.
    Resources JobResourceConfigurationResponse
    Compute Resource configuration for the job.
    Services map[string]JobServiceResponse
    List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
    Tags map[string]string
    Tag dictionary. Tags can be added, removed, and updated.
    command String
    [Required] The command to execute on startup of the job. eg. "python train.py"
    environmentId String
    [Required] The ARM resource ID of the Environment specification for the job.
    parameters Object
    Input parameters.
    status String
    Status of the job.
    codeId String
    ARM resource ID of the code asset.
    componentId String
    ARM resource ID of the component resource.
    computeId String
    ARM resource ID of the compute resource.
    description String
    The asset description text.
    displayName String
    Display name of job.
    distribution MpiResponse | PyTorchResponse | TensorFlowResponse
    Distribution configuration of the job. If set, this should be one of Mpi, Tensorflow, PyTorch, or null.
    environmentVariables Map<String,String>
    Environment variables included in the job.
    experimentName String
    The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment.
    identity AmlTokenResponse | ManagedIdentityResponse | UserIdentityResponse
    Identity configuration. If set, this should be one of AmlToken, ManagedIdentity, UserIdentity or null. Defaults to AmlToken if null.
    inputs Map<String,Object>
    Mapping of input data bindings used in the job.
    isArchived Boolean
    Is the asset archived?
    limits CommandJobLimitsResponse
    Command Job limit.
    outputs Map<String,Object>
    Mapping of output data bindings used in the job.
    properties Map<String,String>
    The asset property dictionary.
    resources JobResourceConfigurationResponse
    Compute Resource configuration for the job.
    services Map<String,JobServiceResponse>
    List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
    tags Map<String,String>
    Tag dictionary. Tags can be added, removed, and updated.
    command string
    [Required] The command to execute on startup of the job. eg. "python train.py"
    environmentId string
    [Required] The ARM resource ID of the Environment specification for the job.
    parameters any
    Input parameters.
    status string
    Status of the job.
    codeId string
    ARM resource ID of the code asset.
    componentId string
    ARM resource ID of the component resource.
    computeId string
    ARM resource ID of the compute resource.
    description string
    The asset description text.
    displayName string
    Display name of job.
    distribution MpiResponse | PyTorchResponse | TensorFlowResponse
    Distribution configuration of the job. If set, this should be one of Mpi, Tensorflow, PyTorch, or null.
    environmentVariables {[key: string]: string}
    Environment variables included in the job.
    experimentName string
    The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment.
    identity AmlTokenResponse | ManagedIdentityResponse | UserIdentityResponse
    Identity configuration. If set, this should be one of AmlToken, ManagedIdentity, UserIdentity or null. Defaults to AmlToken if null.
    inputs {[key: string]: CustomModelJobInputResponse | LiteralJobInputResponse | MLFlowModelJobInputResponse | MLTableJobInputResponse | TritonModelJobInputResponse | UriFileJobInputResponse | UriFolderJobInputResponse}
    Mapping of input data bindings used in the job.
    isArchived boolean
    Is the asset archived?
    limits CommandJobLimitsResponse
    Command Job limit.
    outputs {[key: string]: CustomModelJobOutputResponse | MLFlowModelJobOutputResponse | MLTableJobOutputResponse | TritonModelJobOutputResponse | UriFileJobOutputResponse | UriFolderJobOutputResponse}
    Mapping of output data bindings used in the job.
    properties {[key: string]: string}
    The asset property dictionary.
    resources JobResourceConfigurationResponse
    Compute Resource configuration for the job.
    services {[key: string]: JobServiceResponse}
    List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
    tags {[key: string]: string}
    Tag dictionary. Tags can be added, removed, and updated.
    command str
    [Required] The command to execute on startup of the job. eg. "python train.py"
    environment_id str
    [Required] The ARM resource ID of the Environment specification for the job.
    parameters Any
    Input parameters.
    status str
    Status of the job.
    code_id str
    ARM resource ID of the code asset.
    component_id str
    ARM resource ID of the component resource.
    compute_id str
    ARM resource ID of the compute resource.
    description str
    The asset description text.
    display_name str
    Display name of job.
    distribution MpiResponse | PyTorchResponse | TensorFlowResponse
    Distribution configuration of the job. If set, this should be one of Mpi, Tensorflow, PyTorch, or null.
    environment_variables Mapping[str, str]
    Environment variables included in the job.
    experiment_name str
    The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment.
    identity AmlTokenResponse | ManagedIdentityResponse | UserIdentityResponse
    Identity configuration. If set, this should be one of AmlToken, ManagedIdentity, UserIdentity or null. Defaults to AmlToken if null.
    inputs Mapping[str, Union[CustomModelJobInputResponse, LiteralJobInputResponse, MLFlowModelJobInputResponse, MLTableJobInputResponse, TritonModelJobInputResponse, UriFileJobInputResponse, UriFolderJobInputResponse]]
    Mapping of input data bindings used in the job.
    is_archived bool
    Is the asset archived?
    limits CommandJobLimitsResponse
    Command Job limit.
    outputs Mapping[str, Union[CustomModelJobOutputResponse, MLFlowModelJobOutputResponse, MLTableJobOutputResponse, TritonModelJobOutputResponse, UriFileJobOutputResponse, UriFolderJobOutputResponse]]
    Mapping of output data bindings used in the job.
    properties Mapping[str, str]
    The asset property dictionary.
    resources JobResourceConfigurationResponse
    Compute Resource configuration for the job.
    services Mapping[str, JobServiceResponse]
    List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
    tags Mapping[str, str]
    Tag dictionary. Tags can be added, removed, and updated.
    command String
    [Required] The command to execute on startup of the job. eg. "python train.py"
    environmentId String
    [Required] The ARM resource ID of the Environment specification for the job.
    parameters Any
    Input parameters.
    status String
    Status of the job.
    codeId String
    ARM resource ID of the code asset.
    componentId String
    ARM resource ID of the component resource.
    computeId String
    ARM resource ID of the compute resource.
    description String
    The asset description text.
    displayName String
    Display name of job.
    distribution Property Map | Property Map | Property Map
    Distribution configuration of the job. If set, this should be one of Mpi, Tensorflow, PyTorch, or null.
    environmentVariables Map<String>
    Environment variables included in the job.
    experimentName String
    The name of the experiment the job belongs to. If not set, the job is placed in the "Default" experiment.
    identity Property Map | Property Map | Property Map
    Identity configuration. If set, this should be one of AmlToken, ManagedIdentity, UserIdentity or null. Defaults to AmlToken if null.
    inputs Map<Property Map | Property Map | Property Map | Property Map | Property Map | Property Map | Property Map>
    Mapping of input data bindings used in the job.
    isArchived Boolean
    Is the asset archived?
    limits Property Map
    Command Job limit.
    outputs Map<Property Map | Property Map | Property Map | Property Map | Property Map | Property Map>
    Mapping of output data bindings used in the job.
    properties Map<String>
    The asset property dictionary.
    resources Property Map
    Compute Resource configuration for the job.
    services Map<Property Map>
    List of JobEndpoints. For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
    tags Map<String>
    Tag dictionary. Tags can be added, removed, and updated.

    CustomForecastHorizon, CustomForecastHorizonArgs

    Value int
    [Required] Forecast horizon value.
    Value int
    [Required] Forecast horizon value.
    value Integer
    [Required] Forecast horizon value.
    value number
    [Required] Forecast horizon value.
    value int
    [Required] Forecast horizon value.
    value Number
    [Required] Forecast horizon value.

    CustomForecastHorizonResponse, CustomForecastHorizonResponseArgs

    Value int
    [Required] Forecast horizon value.
    Value int
    [Required] Forecast horizon value.
    value Integer
    [Required] Forecast horizon value.
    value number
    [Required] Forecast horizon value.
    value int
    [Required] Forecast horizon value.
    value Number
    [Required] Forecast horizon value.

    CustomModelJobInput, CustomModelJobInputArgs

    Uri string
    [Required] Input Asset URI.
    Description string
    Description for the input.
    Mode string | Pulumi.AzureNative.MachineLearningServices.InputDeliveryMode
    Input Asset Delivery Mode.
    Uri string
    [Required] Input Asset URI.
    Description string
    Description for the input.
    Mode string | InputDeliveryMode
    Input Asset Delivery Mode.
    uri String
    [Required] Input Asset URI.
    description String
    Description for the input.
    mode String | InputDeliveryMode
    Input Asset Delivery Mode.
    uri string
    [Required] Input Asset URI.
    description string
    Description for the input.
    mode string | InputDeliveryMode
    Input Asset Delivery Mode.
    uri str
    [Required] Input Asset URI.
    description str
    Description for the input.
    mode str | InputDeliveryMode
    Input Asset Delivery Mode.
    uri String
    [Required] Input Asset URI.
    description String
    Description for the input.
    mode String | "ReadOnlyMount" | "ReadWriteMount" | "Download" | "Direct" | "EvalMount" | "EvalDownload"
    Input Asset Delivery Mode.

    CustomModelJobInputResponse, CustomModelJobInputResponseArgs

    Uri string
    [Required] Input Asset URI.
    Description string
    Description for the input.
    Mode string
    Input Asset Delivery Mode.
    Uri string
    [Required] Input Asset URI.
    Description string
    Description for the input.
    Mode string
    Input Asset Delivery Mode.
    uri String
    [Required] Input Asset URI.
    description String
    Description for the input.
    mode String
    Input Asset Delivery Mode.
    uri string
    [Required] Input Asset URI.
    description string
    Description for the input.
    mode string
    Input Asset Delivery Mode.
    uri str
    [Required] Input Asset URI.
    description str
    Description for the input.
    mode str
    Input Asset Delivery Mode.
    uri String
    [Required] Input Asset URI.
    description String
    Description for the input.
    mode String
    Input Asset Delivery Mode.

    CustomModelJobOutput, CustomModelJobOutputArgs

    Description string
    Description for the output.
    Mode string | Pulumi.AzureNative.MachineLearningServices.OutputDeliveryMode
    Output Asset Delivery Mode.
    Uri string
    Output Asset URI.
    Description string
    Description for the output.
    Mode string | OutputDeliveryMode
    Output Asset Delivery Mode.
    Uri string
    Output Asset URI.
    description String
    Description for the output.
    mode String | OutputDeliveryMode
    Output Asset Delivery Mode.
    uri String
    Output Asset URI.
    description string
    Description for the output.
    mode string | OutputDeliveryMode
    Output Asset Delivery Mode.
    uri string
    Output Asset URI.
    description str
    Description for the output.
    mode str | OutputDeliveryMode
    Output Asset Delivery Mode.
    uri str
    Output Asset URI.
    description String
    Description for the output.
    mode String | "ReadWriteMount" | "Upload"
    Output Asset Delivery Mode.
    uri String
    Output Asset URI.

    CustomModelJobOutputResponse, CustomModelJobOutputResponseArgs

    Description string
    Description for the output.
    Mode string
    Output Asset Delivery Mode.
    Uri string
    Output Asset URI.
    Description string
    Description for the output.
    Mode string
    Output Asset Delivery Mode.
    Uri string
    Output Asset URI.
    description String
    Description for the output.
    mode String
    Output Asset Delivery Mode.
    uri String
    Output Asset URI.
    description string
    Description for the output.
    mode string
    Output Asset Delivery Mode.
    uri string
    Output Asset URI.
    description str
    Description for the output.
    mode str
    Output Asset Delivery Mode.
    uri str
    Output Asset URI.
    description String
    Description for the output.
    mode String
    Output Asset Delivery Mode.
    uri String
    Output Asset URI.

    CustomNCrossValidations, CustomNCrossValidationsArgs

    Value int
    [Required] N-Cross validations value.
    Value int
    [Required] N-Cross validations value.
    value Integer
    [Required] N-Cross validations value.
    value number
    [Required] N-Cross validations value.
    value int
    [Required] N-Cross validations value.
    value Number
    [Required] N-Cross validations value.

    CustomNCrossValidationsResponse, CustomNCrossValidationsResponseArgs

    Value int
    [Required] N-Cross validations value.
    Value int
    [Required] N-Cross validations value.
    value Integer
    [Required] N-Cross validations value.
    value number
    [Required] N-Cross validations value.
    value int
    [Required] N-Cross validations value.
    value Number
    [Required] N-Cross validations value.

    CustomSeasonality, CustomSeasonalityArgs

    Value int
    [Required] Seasonality value.
    Value int
    [Required] Seasonality value.
    value Integer
    [Required] Seasonality value.
    value number
    [Required] Seasonality value.
    value int
    [Required] Seasonality value.
    value Number
    [Required] Seasonality value.

    CustomSeasonalityResponse, CustomSeasonalityResponseArgs

    Value int
    [Required] Seasonality value.
    Value int
    [Required] Seasonality value.
    value Integer
    [Required] Seasonality value.
    value number
    [Required] Seasonality value.
    value int
    [Required] Seasonality value.
    value Number
    [Required] Seasonality value.

    CustomTargetLags, CustomTargetLagsArgs

    Values List<int>
    [Required] Set target lags values.
    Values []int
    [Required] Set target lags values.
    values List<Integer>
    [Required] Set target lags values.
    values number[]
    [Required] Set target lags values.
    values Sequence[int]
    [Required] Set target lags values.
    values List<Number>
    [Required] Set target lags values.

    CustomTargetLagsResponse, CustomTargetLagsResponseArgs

    Values List<int>
    [Required] Set target lags values.
    Values []int
    [Required] Set target lags values.
    values List<Integer>
    [Required] Set target lags values.
    values number[]
    [Required] Set target lags values.
    values Sequence[int]
    [Required] Set target lags values.
    values List<Number>
    [Required] Set target lags values.

    CustomTargetRollingWindowSize, CustomTargetRollingWindowSizeArgs

    Value int
    [Required] TargetRollingWindowSize value.
    Value int
    [Required] TargetRollingWindowSize value.
    value Integer
    [Required] TargetRollingWindowSize value.
    value number
    [Required] TargetRollingWindowSize value.
    value int
    [Required] TargetRollingWindowSize value.
    value Number
    [Required] TargetRollingWindowSize value.

    CustomTargetRollingWindowSizeResponse, CustomTargetRollingWindowSizeResponseArgs

    Value int
    [Required] TargetRollingWindowSize value.
    Value int
    [Required] TargetRollingWindowSize value.
    value Integer
    [Required] TargetRollingWindowSize value.
    value number
    [Required] TargetRollingWindowSize value.
    value int
    [Required] TargetRollingWindowSize value.
    value Number
    [Required] TargetRollingWindowSize value.

    FeatureLags, FeatureLagsArgs

    None
    NoneNo feature lags generated.
    Auto
    AutoSystem auto-generates feature lags.
    FeatureLagsNone
    NoneNo feature lags generated.
    FeatureLagsAuto
    AutoSystem auto-generates feature lags.
    None
    NoneNo feature lags generated.
    Auto
    AutoSystem auto-generates feature lags.
    None
    NoneNo feature lags generated.
    Auto
    AutoSystem auto-generates feature lags.
    NONE
    NoneNo feature lags generated.
    AUTO
    AutoSystem auto-generates feature lags.
    "None"
    NoneNo feature lags generated.
    "Auto"
    AutoSystem auto-generates feature lags.

    FeaturizationMode, FeaturizationModeArgs

    Auto
    AutoAuto mode, system performs featurization without any custom featurization inputs.
    Custom
    CustomCustom featurization.
    Off
    OffFeaturization off. 'Forecasting' task cannot use this value.
    FeaturizationModeAuto
    AutoAuto mode, system performs featurization without any custom featurization inputs.
    FeaturizationModeCustom
    CustomCustom featurization.
    FeaturizationModeOff
    OffFeaturization off. 'Forecasting' task cannot use this value.
    Auto
    AutoAuto mode, system performs featurization without any custom featurization inputs.
    Custom
    CustomCustom featurization.
    Off
    OffFeaturization off. 'Forecasting' task cannot use this value.
    Auto
    AutoAuto mode, system performs featurization without any custom featurization inputs.
    Custom
    CustomCustom featurization.
    Off
    OffFeaturization off. 'Forecasting' task cannot use this value.
    AUTO
    AutoAuto mode, system performs featurization without any custom featurization inputs.
    CUSTOM
    CustomCustom featurization.
    OFF
    OffFeaturization off. 'Forecasting' task cannot use this value.
    "Auto"
    AutoAuto mode, system performs featurization without any custom featurization inputs.
    "Custom"
    CustomCustom featurization.
    "Off"
    OffFeaturization off. 'Forecasting' task cannot use this value.

    Forecasting, ForecastingArgs

    TrainingData Pulumi.AzureNative.MachineLearningServices.Inputs.MLTableJobInput
    [Required] Training data input.
    CvSplitColumnNames List<string>
    Columns to use for CVSplit data.
    FeaturizationSettings Pulumi.AzureNative.MachineLearningServices.Inputs.TableVerticalFeaturizationSettings
    Featurization inputs needed for AutoML job.
    ForecastingSettings Pulumi.AzureNative.MachineLearningServices.Inputs.ForecastingSettings
    Forecasting task specific inputs.
    LimitSettings Pulumi.AzureNative.MachineLearningServices.Inputs.TableVerticalLimitSettings
    Execution constraints for AutoMLJob.
    LogVerbosity string | Pulumi.AzureNative.MachineLearningServices.LogVerbosity
    Log verbosity for the job.
    NCrossValidations Pulumi.AzureNative.MachineLearningServices.Inputs.AutoNCrossValidations | Pulumi.AzureNative.MachineLearningServices.Inputs.CustomNCrossValidations
    Number of cross validation folds to be applied on training dataset when validation dataset is not provided.
    PrimaryMetric string | Pulumi.AzureNative.MachineLearningServices.ForecastingPrimaryMetrics
    Primary metric for forecasting task.
    TargetColumnName string
    Target column name: This is prediction values column. Also known as label column name in context of classification tasks.
    TestData Pulumi.AzureNative.MachineLearningServices.Inputs.MLTableJobInput
    Test data input.
    TestDataSize double
    The fraction of test dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    TrainingSettings Pulumi.AzureNative.MachineLearningServices.Inputs.ForecastingTrainingSettings
    Inputs for training phase for an AutoML Job.
    ValidationData Pulumi.AzureNative.MachineLearningServices.Inputs.MLTableJobInput
    Validation data inputs.
    ValidationDataSize double
    The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    WeightColumnName string
    The name of the sample weight column. Automated ML supports a weighted column as an input, causing rows in the data to be weighted up or down.
    TrainingData MLTableJobInput
    [Required] Training data input.
    CvSplitColumnNames []string
    Columns to use for CVSplit data.
    FeaturizationSettings TableVerticalFeaturizationSettings
    Featurization inputs needed for AutoML job.
    ForecastingSettings ForecastingSettings
    Forecasting task specific inputs.
    LimitSettings TableVerticalLimitSettings
    Execution constraints for AutoMLJob.
    LogVerbosity string | LogVerbosity
    Log verbosity for the job.
    NCrossValidations AutoNCrossValidations | CustomNCrossValidations
    Number of cross validation folds to be applied on training dataset when validation dataset is not provided.
    PrimaryMetric string | ForecastingPrimaryMetrics
    Primary metric for forecasting task.
    TargetColumnName string
    Target column name: This is prediction values column. Also known as label column name in context of classification tasks.
    TestData MLTableJobInput
    Test data input.
    TestDataSize float64
    The fraction of test dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    TrainingSettings ForecastingTrainingSettings
    Inputs for training phase for an AutoML Job.
    ValidationData MLTableJobInput
    Validation data inputs.
    ValidationDataSize float64
    The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    WeightColumnName string
    The name of the sample weight column. Automated ML supports a weighted column as an input, causing rows in the data to be weighted up or down.
    trainingData MLTableJobInput
    [Required] Training data input.
    cvSplitColumnNames List<String>
    Columns to use for CVSplit data.
    featurizationSettings TableVerticalFeaturizationSettings
    Featurization inputs needed for AutoML job.
    forecastingSettings ForecastingSettings
    Forecasting task specific inputs.
    limitSettings TableVerticalLimitSettings
    Execution constraints for AutoMLJob.
    logVerbosity String | LogVerbosity
    Log verbosity for the job.
    nCrossValidations AutoNCrossValidations | CustomNCrossValidations
    Number of cross validation folds to be applied on training dataset when validation dataset is not provided.
    primaryMetric String | ForecastingPrimaryMetrics
    Primary metric for forecasting task.
    targetColumnName String
    Target column name: This is prediction values column. Also known as label column name in context of classification tasks.
    testData MLTableJobInput
    Test data input.
    testDataSize Double
    The fraction of test dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    trainingSettings ForecastingTrainingSettings
    Inputs for training phase for an AutoML Job.
    validationData MLTableJobInput
    Validation data inputs.
    validationDataSize Double
    The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    weightColumnName String
    The name of the sample weight column. Automated ML supports a weighted column as an input, causing rows in the data to be weighted up or down.
    trainingData MLTableJobInput
    [Required] Training data input.
    cvSplitColumnNames string[]
    Columns to use for CVSplit data.
    featurizationSettings TableVerticalFeaturizationSettings
    Featurization inputs needed for AutoML job.
    forecastingSettings ForecastingSettings
    Forecasting task specific inputs.
    limitSettings TableVerticalLimitSettings
    Execution constraints for AutoMLJob.
    logVerbosity string | LogVerbosity
    Log verbosity for the job.
    nCrossValidations AutoNCrossValidations | CustomNCrossValidations
    Number of cross validation folds to be applied on training dataset when validation dataset is not provided.
    primaryMetric string | ForecastingPrimaryMetrics
    Primary metric for forecasting task.
    targetColumnName string
    Target column name: This is prediction values column. Also known as label column name in context of classification tasks.
    testData MLTableJobInput
    Test data input.
    testDataSize number
    The fraction of test dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    trainingSettings ForecastingTrainingSettings
    Inputs for training phase for an AutoML Job.
    validationData MLTableJobInput
    Validation data inputs.
    validationDataSize number
    The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    weightColumnName string
    The name of the sample weight column. Automated ML supports a weighted column as an input, causing rows in the data to be weighted up or down.
    training_data MLTableJobInput
    [Required] Training data input.
    cv_split_column_names Sequence[str]
    Columns to use for CVSplit data.
    featurization_settings TableVerticalFeaturizationSettings
    Featurization inputs needed for AutoML job.
    forecasting_settings ForecastingSettings
    Forecasting task specific inputs.
    limit_settings TableVerticalLimitSettings
    Execution constraints for AutoMLJob.
    log_verbosity str | LogVerbosity
    Log verbosity for the job.
    n_cross_validations AutoNCrossValidations | CustomNCrossValidations
    Number of cross validation folds to be applied on training dataset when validation dataset is not provided.
    primary_metric str | ForecastingPrimaryMetrics
    Primary metric for forecasting task.
    target_column_name str
    Target column name: This is prediction values column. Also known as label column name in context of classification tasks.
    test_data MLTableJobInput
    Test data input.
    test_data_size float
    The fraction of test dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    training_settings ForecastingTrainingSettings
    Inputs for training phase for an AutoML Job.
    validation_data MLTableJobInput
    Validation data inputs.
    validation_data_size float
    The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    weight_column_name str
    The name of the sample weight column. Automated ML supports a weighted column as an input, causing rows in the data to be weighted up or down.
    trainingData Property Map
    [Required] Training data input.
    cvSplitColumnNames List<String>
    Columns to use for CVSplit data.
    featurizationSettings Property Map
    Featurization inputs needed for AutoML job.
    forecastingSettings Property Map
    Forecasting task specific inputs.
    limitSettings Property Map
    Execution constraints for AutoMLJob.
    logVerbosity String | "NotSet" | "Debug" | "Info" | "Warning" | "Error" | "Critical"
    Log verbosity for the job.
    nCrossValidations Property Map | Property Map
    Number of cross validation folds to be applied on training dataset when validation dataset is not provided.
    primaryMetric String | "SpearmanCorrelation" | "NormalizedRootMeanSquaredError" | "R2Score" | "NormalizedMeanAbsoluteError"
    Primary metric for forecasting task.
    targetColumnName String
    Target column name: This is prediction values column. Also known as label column name in context of classification tasks.
    testData Property Map
    Test data input.
    testDataSize Number
    The fraction of test dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    trainingSettings Property Map
    Inputs for training phase for an AutoML Job.
    validationData Property Map
    Validation data inputs.
    validationDataSize Number
    The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    weightColumnName String
    The name of the sample weight column. Automated ML supports a weighted column as an input, causing rows in the data to be weighted up or down.

    ForecastingModels, ForecastingModelsArgs

    AutoArima
    AutoArimaAuto-Autoregressive Integrated Moving Average (ARIMA) model uses time-series data and statistical analysis to interpret the data and make future predictions. This model aims to explain data by using time series data on its past values and uses linear regression to make predictions.
    Prophet
    ProphetProphet is a procedure for forecasting time series data based on an additive model where non-linear trends are fit with yearly, weekly, and daily seasonality, plus holiday effects. It works best with time series that have strong seasonal effects and several seasons of historical data. Prophet is robust to missing data and shifts in the trend, and typically handles outliers well.
    Naive
    NaiveThe Naive forecasting model makes predictions by carrying forward the latest target value for each time-series in the training data.
    SeasonalNaive
    SeasonalNaiveThe Seasonal Naive forecasting model makes predictions by carrying forward the latest season of target values for each time-series in the training data.
    Average
    AverageThe Average forecasting model makes predictions by carrying forward the average of the target values for each time-series in the training data.
    SeasonalAverage
    SeasonalAverageThe Seasonal Average forecasting model makes predictions by carrying forward the average value of the latest season of data for each time-series in the training data.
    ExponentialSmoothing
    ExponentialSmoothingExponential smoothing is a time series forecasting method for univariate data that can be extended to support data with a systematic trend or seasonal component.
    Arimax
    ArimaxAn Autoregressive Integrated Moving Average with Explanatory Variable (ARIMAX) model can be viewed as a multiple regression model with one or more autoregressive (AR) terms and/or one or more moving average (MA) terms. This method is suitable for forecasting when data is stationary/non stationary, and multivariate with any type of data pattern, i.e., level/trend /seasonality/cyclicity.
    TCNForecaster
    TCNForecasterTCNForecaster: Temporal Convolutional Networks Forecaster. //TODO: Ask forecasting team for brief intro.
    ElasticNet
    ElasticNetElastic net is a popular type of regularized linear regression that combines two popular penalties, specifically the L1 and L2 penalty functions.
    GradientBoosting
    GradientBoostingThe technique of transiting week learners into a strong learner is called Boosting. The gradient boosting algorithm process works on this theory of execution.
    DecisionTree
    DecisionTreeDecision Trees are a non-parametric supervised learning method used for both classification and regression tasks. The goal is to create a model that predicts the value of a target variable by learning simple decision rules inferred from the data features.
    KNN
    KNNK-nearest neighbors (KNN) algorithm uses 'feature similarity' to predict the values of new datapoints which further means that the new data point will be assigned a value based on how closely it matches the points in the training set.
    LassoLars
    LassoLarsLasso model fit with Least Angle Regression a.k.a. Lars. It is a Linear Model trained with an L1 prior as regularizer.
    SGD
    SGDSGD: Stochastic gradient descent is an optimization algorithm often used in machine learning applications to find the model parameters that correspond to the best fit between predicted and actual outputs. It's an inexact but powerful technique.
    RandomForest
    RandomForestRandom forest is a supervised learning algorithm. The "forest" it builds, is an ensemble of decision trees, usually trained with the “bagging” method. The general idea of the bagging method is that a combination of learning models increases the overall result.
    ExtremeRandomTrees
    ExtremeRandomTreesExtreme Trees is an ensemble machine learning algorithm that combines the predictions from many decision trees. It is related to the widely used random forest algorithm.
    LightGBM
    LightGBMLightGBM is a gradient boosting framework that uses tree based learning algorithms.
    XGBoostRegressor
    XGBoostRegressorXGBoostRegressor: Extreme Gradient Boosting Regressor is a supervised machine learning model using ensemble of base learners.
    ForecastingModelsAutoArima
    AutoArimaAuto-Autoregressive Integrated Moving Average (ARIMA) model uses time-series data and statistical analysis to interpret the data and make future predictions. This model aims to explain data by using time series data on its past values and uses linear regression to make predictions.
    ForecastingModelsProphet
    ProphetProphet is a procedure for forecasting time series data based on an additive model where non-linear trends are fit with yearly, weekly, and daily seasonality, plus holiday effects. It works best with time series that have strong seasonal effects and several seasons of historical data. Prophet is robust to missing data and shifts in the trend, and typically handles outliers well.
    ForecastingModelsNaive
    NaiveThe Naive forecasting model makes predictions by carrying forward the latest target value for each time-series in the training data.
    ForecastingModelsSeasonalNaive
    SeasonalNaiveThe Seasonal Naive forecasting model makes predictions by carrying forward the latest season of target values for each time-series in the training data.
    ForecastingModelsAverage
    AverageThe Average forecasting model makes predictions by carrying forward the average of the target values for each time-series in the training data.
    ForecastingModelsSeasonalAverage
    SeasonalAverageThe Seasonal Average forecasting model makes predictions by carrying forward the average value of the latest season of data for each time-series in the training data.
    ForecastingModelsExponentialSmoothing
    ExponentialSmoothingExponential smoothing is a time series forecasting method for univariate data that can be extended to support data with a systematic trend or seasonal component.
    ForecastingModelsArimax
    ArimaxAn Autoregressive Integrated Moving Average with Explanatory Variable (ARIMAX) model can be viewed as a multiple regression model with one or more autoregressive (AR) terms and/or one or more moving average (MA) terms. This method is suitable for forecasting when data is stationary/non stationary, and multivariate with any type of data pattern, i.e., level/trend /seasonality/cyclicity.
    ForecastingModelsTCNForecaster
    TCNForecasterTCNForecaster: Temporal Convolutional Networks Forecaster. //TODO: Ask forecasting team for brief intro.
    ForecastingModelsElasticNet
    ElasticNetElastic net is a popular type of regularized linear regression that combines two popular penalties, specifically the L1 and L2 penalty functions.
    ForecastingModelsGradientBoosting
    GradientBoostingThe technique of transiting week learners into a strong learner is called Boosting. The gradient boosting algorithm process works on this theory of execution.
    ForecastingModelsDecisionTree
    DecisionTreeDecision Trees are a non-parametric supervised learning method used for both classification and regression tasks. The goal is to create a model that predicts the value of a target variable by learning simple decision rules inferred from the data features.
    ForecastingModelsKNN
    KNNK-nearest neighbors (KNN) algorithm uses 'feature similarity' to predict the values of new datapoints which further means that the new data point will be assigned a value based on how closely it matches the points in the training set.
    ForecastingModelsLassoLars
    LassoLarsLasso model fit with Least Angle Regression a.k.a. Lars. It is a Linear Model trained with an L1 prior as regularizer.
    ForecastingModelsSGD
    SGDSGD: Stochastic gradient descent is an optimization algorithm often used in machine learning applications to find the model parameters that correspond to the best fit between predicted and actual outputs. It's an inexact but powerful technique.
    ForecastingModelsRandomForest
    RandomForestRandom forest is a supervised learning algorithm. The "forest" it builds, is an ensemble of decision trees, usually trained with the “bagging” method. The general idea of the bagging method is that a combination of learning models increases the overall result.
    ForecastingModelsExtremeRandomTrees
    ExtremeRandomTreesExtreme Trees is an ensemble machine learning algorithm that combines the predictions from many decision trees. It is related to the widely used random forest algorithm.
    ForecastingModelsLightGBM
    LightGBMLightGBM is a gradient boosting framework that uses tree based learning algorithms.
    ForecastingModelsXGBoostRegressor
    XGBoostRegressorXGBoostRegressor: Extreme Gradient Boosting Regressor is a supervised machine learning model using ensemble of base learners.
    AutoArima
    AutoArimaAuto-Autoregressive Integrated Moving Average (ARIMA) model uses time-series data and statistical analysis to interpret the data and make future predictions. This model aims to explain data by using time series data on its past values and uses linear regression to make predictions.
    Prophet
    ProphetProphet is a procedure for forecasting time series data based on an additive model where non-linear trends are fit with yearly, weekly, and daily seasonality, plus holiday effects. It works best with time series that have strong seasonal effects and several seasons of historical data. Prophet is robust to missing data and shifts in the trend, and typically handles outliers well.
    Naive
    NaiveThe Naive forecasting model makes predictions by carrying forward the latest target value for each time-series in the training data.
    SeasonalNaive
    SeasonalNaiveThe Seasonal Naive forecasting model makes predictions by carrying forward the latest season of target values for each time-series in the training data.
    Average
    AverageThe Average forecasting model makes predictions by carrying forward the average of the target values for each time-series in the training data.
    SeasonalAverage
    SeasonalAverageThe Seasonal Average forecasting model makes predictions by carrying forward the average value of the latest season of data for each time-series in the training data.
    ExponentialSmoothing
    ExponentialSmoothingExponential smoothing is a time series forecasting method for univariate data that can be extended to support data with a systematic trend or seasonal component.
    Arimax
    ArimaxAn Autoregressive Integrated Moving Average with Explanatory Variable (ARIMAX) model can be viewed as a multiple regression model with one or more autoregressive (AR) terms and/or one or more moving average (MA) terms. This method is suitable for forecasting when data is stationary/non stationary, and multivariate with any type of data pattern, i.e., level/trend /seasonality/cyclicity.
    TCNForecaster
    TCNForecasterTCNForecaster: Temporal Convolutional Networks Forecaster. //TODO: Ask forecasting team for brief intro.
    ElasticNet
    ElasticNetElastic net is a popular type of regularized linear regression that combines two popular penalties, specifically the L1 and L2 penalty functions.
    GradientBoosting
    GradientBoostingThe technique of transiting week learners into a strong learner is called Boosting. The gradient boosting algorithm process works on this theory of execution.
    DecisionTree
    DecisionTreeDecision Trees are a non-parametric supervised learning method used for both classification and regression tasks. The goal is to create a model that predicts the value of a target variable by learning simple decision rules inferred from the data features.
    KNN
    KNNK-nearest neighbors (KNN) algorithm uses 'feature similarity' to predict the values of new datapoints which further means that the new data point will be assigned a value based on how closely it matches the points in the training set.
    LassoLars
    LassoLarsLasso model fit with Least Angle Regression a.k.a. Lars. It is a Linear Model trained with an L1 prior as regularizer.
    SGD
    SGDSGD: Stochastic gradient descent is an optimization algorithm often used in machine learning applications to find the model parameters that correspond to the best fit between predicted and actual outputs. It's an inexact but powerful technique.
    RandomForest
    RandomForestRandom forest is a supervised learning algorithm. The "forest" it builds, is an ensemble of decision trees, usually trained with the “bagging” method. The general idea of the bagging method is that a combination of learning models increases the overall result.
    ExtremeRandomTrees
    ExtremeRandomTreesExtreme Trees is an ensemble machine learning algorithm that combines the predictions from many decision trees. It is related to the widely used random forest algorithm.
    LightGBM
    LightGBMLightGBM is a gradient boosting framework that uses tree based learning algorithms.
    XGBoostRegressor
    XGBoostRegressorXGBoostRegressor: Extreme Gradient Boosting Regressor is a supervised machine learning model using ensemble of base learners.
    AutoArima
    AutoArimaAuto-Autoregressive Integrated Moving Average (ARIMA) model uses time-series data and statistical analysis to interpret the data and make future predictions. This model aims to explain data by using time series data on its past values and uses linear regression to make predictions.
    Prophet
    ProphetProphet is a procedure for forecasting time series data based on an additive model where non-linear trends are fit with yearly, weekly, and daily seasonality, plus holiday effects. It works best with time series that have strong seasonal effects and several seasons of historical data. Prophet is robust to missing data and shifts in the trend, and typically handles outliers well.
    Naive
    NaiveThe Naive forecasting model makes predictions by carrying forward the latest target value for each time-series in the training data.
    SeasonalNaive
    SeasonalNaiveThe Seasonal Naive forecasting model makes predictions by carrying forward the latest season of target values for each time-series in the training data.
    Average
    AverageThe Average forecasting model makes predictions by carrying forward the average of the target values for each time-series in the training data.
    SeasonalAverage
    SeasonalAverageThe Seasonal Average forecasting model makes predictions by carrying forward the average value of the latest season of data for each time-series in the training data.
    ExponentialSmoothing
    ExponentialSmoothingExponential smoothing is a time series forecasting method for univariate data that can be extended to support data with a systematic trend or seasonal component.
    Arimax
    ArimaxAn Autoregressive Integrated Moving Average with Explanatory Variable (ARIMAX) model can be viewed as a multiple regression model with one or more autoregressive (AR) terms and/or one or more moving average (MA) terms. This method is suitable for forecasting when data is stationary/non stationary, and multivariate with any type of data pattern, i.e., level/trend /seasonality/cyclicity.
    TCNForecaster
    TCNForecasterTCNForecaster: Temporal Convolutional Networks Forecaster. //TODO: Ask forecasting team for brief intro.
    ElasticNet
    ElasticNetElastic net is a popular type of regularized linear regression that combines two popular penalties, specifically the L1 and L2 penalty functions.
    GradientBoosting
    GradientBoostingThe technique of transiting week learners into a strong learner is called Boosting. The gradient boosting algorithm process works on this theory of execution.
    DecisionTree
    DecisionTreeDecision Trees are a non-parametric supervised learning method used for both classification and regression tasks. The goal is to create a model that predicts the value of a target variable by learning simple decision rules inferred from the data features.
    KNN
    KNNK-nearest neighbors (KNN) algorithm uses 'feature similarity' to predict the values of new datapoints which further means that the new data point will be assigned a value based on how closely it matches the points in the training set.
    LassoLars
    LassoLarsLasso model fit with Least Angle Regression a.k.a. Lars. It is a Linear Model trained with an L1 prior as regularizer.
    SGD
    SGDSGD: Stochastic gradient descent is an optimization algorithm often used in machine learning applications to find the model parameters that correspond to the best fit between predicted and actual outputs. It's an inexact but powerful technique.
    RandomForest
    RandomForestRandom forest is a supervised learning algorithm. The "forest" it builds, is an ensemble of decision trees, usually trained with the “bagging” method. The general idea of the bagging method is that a combination of learning models increases the overall result.
    ExtremeRandomTrees
    ExtremeRandomTreesExtreme Trees is an ensemble machine learning algorithm that combines the predictions from many decision trees. It is related to the widely used random forest algorithm.
    LightGBM
    LightGBMLightGBM is a gradient boosting framework that uses tree based learning algorithms.
    XGBoostRegressor
    XGBoostRegressorXGBoostRegressor: Extreme Gradient Boosting Regressor is a supervised machine learning model using ensemble of base learners.
    AUTO_ARIMA
    AutoArimaAuto-Autoregressive Integrated Moving Average (ARIMA) model uses time-series data and statistical analysis to interpret the data and make future predictions. This model aims to explain data by using time series data on its past values and uses linear regression to make predictions.
    PROPHET
    ProphetProphet is a procedure for forecasting time series data based on an additive model where non-linear trends are fit with yearly, weekly, and daily seasonality, plus holiday effects. It works best with time series that have strong seasonal effects and several seasons of historical data. Prophet is robust to missing data and shifts in the trend, and typically handles outliers well.
    NAIVE
    NaiveThe Naive forecasting model makes predictions by carrying forward the latest target value for each time-series in the training data.
    SEASONAL_NAIVE
    SeasonalNaiveThe Seasonal Naive forecasting model makes predictions by carrying forward the latest season of target values for each time-series in the training data.
    AVERAGE
    AverageThe Average forecasting model makes predictions by carrying forward the average of the target values for each time-series in the training data.
    SEASONAL_AVERAGE
    SeasonalAverageThe Seasonal Average forecasting model makes predictions by carrying forward the average value of the latest season of data for each time-series in the training data.
    EXPONENTIAL_SMOOTHING
    ExponentialSmoothingExponential smoothing is a time series forecasting method for univariate data that can be extended to support data with a systematic trend or seasonal component.
    ARIMAX
    ArimaxAn Autoregressive Integrated Moving Average with Explanatory Variable (ARIMAX) model can be viewed as a multiple regression model with one or more autoregressive (AR) terms and/or one or more moving average (MA) terms. This method is suitable for forecasting when data is stationary/non stationary, and multivariate with any type of data pattern, i.e., level/trend /seasonality/cyclicity.
    TCN_FORECASTER
    TCNForecasterTCNForecaster: Temporal Convolutional Networks Forecaster. //TODO: Ask forecasting team for brief intro.
    ELASTIC_NET
    ElasticNetElastic net is a popular type of regularized linear regression that combines two popular penalties, specifically the L1 and L2 penalty functions.
    GRADIENT_BOOSTING
    GradientBoostingThe technique of transiting week learners into a strong learner is called Boosting. The gradient boosting algorithm process works on this theory of execution.
    DECISION_TREE
    DecisionTreeDecision Trees are a non-parametric supervised learning method used for both classification and regression tasks. The goal is to create a model that predicts the value of a target variable by learning simple decision rules inferred from the data features.
    KNN
    KNNK-nearest neighbors (KNN) algorithm uses 'feature similarity' to predict the values of new datapoints which further means that the new data point will be assigned a value based on how closely it matches the points in the training set.
    LASSO_LARS
    LassoLarsLasso model fit with Least Angle Regression a.k.a. Lars. It is a Linear Model trained with an L1 prior as regularizer.
    SGD
    SGDSGD: Stochastic gradient descent is an optimization algorithm often used in machine learning applications to find the model parameters that correspond to the best fit between predicted and actual outputs. It's an inexact but powerful technique.
    RANDOM_FOREST
    RandomForestRandom forest is a supervised learning algorithm. The "forest" it builds, is an ensemble of decision trees, usually trained with the “bagging” method. The general idea of the bagging method is that a combination of learning models increases the overall result.
    EXTREME_RANDOM_TREES
    ExtremeRandomTreesExtreme Trees is an ensemble machine learning algorithm that combines the predictions from many decision trees. It is related to the widely used random forest algorithm.
    LIGHT_GBM
    LightGBMLightGBM is a gradient boosting framework that uses tree based learning algorithms.
    XG_BOOST_REGRESSOR
    XGBoostRegressorXGBoostRegressor: Extreme Gradient Boosting Regressor is a supervised machine learning model using ensemble of base learners.
    "AutoArima"
    AutoArimaAuto-Autoregressive Integrated Moving Average (ARIMA) model uses time-series data and statistical analysis to interpret the data and make future predictions. This model aims to explain data by using time series data on its past values and uses linear regression to make predictions.
    "Prophet"
    ProphetProphet is a procedure for forecasting time series data based on an additive model where non-linear trends are fit with yearly, weekly, and daily seasonality, plus holiday effects. It works best with time series that have strong seasonal effects and several seasons of historical data. Prophet is robust to missing data and shifts in the trend, and typically handles outliers well.
    "Naive"
    NaiveThe Naive forecasting model makes predictions by carrying forward the latest target value for each time-series in the training data.
    "SeasonalNaive"
    SeasonalNaiveThe Seasonal Naive forecasting model makes predictions by carrying forward the latest season of target values for each time-series in the training data.
    "Average"
    AverageThe Average forecasting model makes predictions by carrying forward the average of the target values for each time-series in the training data.
    "SeasonalAverage"
    SeasonalAverageThe Seasonal Average forecasting model makes predictions by carrying forward the average value of the latest season of data for each time-series in the training data.
    "ExponentialSmoothing"
    ExponentialSmoothingExponential smoothing is a time series forecasting method for univariate data that can be extended to support data with a systematic trend or seasonal component.
    "Arimax"
    ArimaxAn Autoregressive Integrated Moving Average with Explanatory Variable (ARIMAX) model can be viewed as a multiple regression model with one or more autoregressive (AR) terms and/or one or more moving average (MA) terms. This method is suitable for forecasting when data is stationary/non stationary, and multivariate with any type of data pattern, i.e., level/trend /seasonality/cyclicity.
    "TCNForecaster"
    TCNForecasterTCNForecaster: Temporal Convolutional Networks Forecaster. //TODO: Ask forecasting team for brief intro.
    "ElasticNet"
    ElasticNetElastic net is a popular type of regularized linear regression that combines two popular penalties, specifically the L1 and L2 penalty functions.
    "GradientBoosting"
    GradientBoostingThe technique of transiting week learners into a strong learner is called Boosting. The gradient boosting algorithm process works on this theory of execution.
    "DecisionTree"
    DecisionTreeDecision Trees are a non-parametric supervised learning method used for both classification and regression tasks. The goal is to create a model that predicts the value of a target variable by learning simple decision rules inferred from the data features.
    "KNN"
    KNNK-nearest neighbors (KNN) algorithm uses 'feature similarity' to predict the values of new datapoints which further means that the new data point will be assigned a value based on how closely it matches the points in the training set.
    "LassoLars"
    LassoLarsLasso model fit with Least Angle Regression a.k.a. Lars. It is a Linear Model trained with an L1 prior as regularizer.
    "SGD"
    SGDSGD: Stochastic gradient descent is an optimization algorithm often used in machine learning applications to find the model parameters that correspond to the best fit between predicted and actual outputs. It's an inexact but powerful technique.
    "RandomForest"
    RandomForestRandom forest is a supervised learning algorithm. The "forest" it builds, is an ensemble of decision trees, usually trained with the “bagging” method. The general idea of the bagging method is that a combination of learning models increases the overall result.
    "ExtremeRandomTrees"
    ExtremeRandomTreesExtreme Trees is an ensemble machine learning algorithm that combines the predictions from many decision trees. It is related to the widely used random forest algorithm.
    "LightGBM"
    LightGBMLightGBM is a gradient boosting framework that uses tree based learning algorithms.
    "XGBoostRegressor"
    XGBoostRegressorXGBoostRegressor: Extreme Gradient Boosting Regressor is a supervised machine learning model using ensemble of base learners.

    ForecastingPrimaryMetrics, ForecastingPrimaryMetricsArgs

    SpearmanCorrelation
    SpearmanCorrelationThe Spearman's rank coefficient of correlation is a non-parametric measure of rank correlation.
    NormalizedRootMeanSquaredError
    NormalizedRootMeanSquaredErrorThe Normalized Root Mean Squared Error (NRMSE) the RMSE facilitates the comparison between models with different scales.
    R2Score
    R2ScoreThe R2 score is one of the performance evaluation measures for forecasting-based machine learning models.
    NormalizedMeanAbsoluteError
    NormalizedMeanAbsoluteErrorThe Normalized Mean Absolute Error (NMAE) is a validation metric to compare the Mean Absolute Error (MAE) of (time) series with different scales.
    ForecastingPrimaryMetricsSpearmanCorrelation
    SpearmanCorrelationThe Spearman's rank coefficient of correlation is a non-parametric measure of rank correlation.
    ForecastingPrimaryMetricsNormalizedRootMeanSquaredError
    NormalizedRootMeanSquaredErrorThe Normalized Root Mean Squared Error (NRMSE) the RMSE facilitates the comparison between models with different scales.
    ForecastingPrimaryMetricsR2Score
    R2ScoreThe R2 score is one of the performance evaluation measures for forecasting-based machine learning models.
    ForecastingPrimaryMetricsNormalizedMeanAbsoluteError
    NormalizedMeanAbsoluteErrorThe Normalized Mean Absolute Error (NMAE) is a validation metric to compare the Mean Absolute Error (MAE) of (time) series with different scales.
    SpearmanCorrelation
    SpearmanCorrelationThe Spearman's rank coefficient of correlation is a non-parametric measure of rank correlation.
    NormalizedRootMeanSquaredError
    NormalizedRootMeanSquaredErrorThe Normalized Root Mean Squared Error (NRMSE) the RMSE facilitates the comparison between models with different scales.
    R2Score
    R2ScoreThe R2 score is one of the performance evaluation measures for forecasting-based machine learning models.
    NormalizedMeanAbsoluteError
    NormalizedMeanAbsoluteErrorThe Normalized Mean Absolute Error (NMAE) is a validation metric to compare the Mean Absolute Error (MAE) of (time) series with different scales.
    SpearmanCorrelation
    SpearmanCorrelationThe Spearman's rank coefficient of correlation is a non-parametric measure of rank correlation.
    NormalizedRootMeanSquaredError
    NormalizedRootMeanSquaredErrorThe Normalized Root Mean Squared Error (NRMSE) the RMSE facilitates the comparison between models with different scales.
    R2Score
    R2ScoreThe R2 score is one of the performance evaluation measures for forecasting-based machine learning models.
    NormalizedMeanAbsoluteError
    NormalizedMeanAbsoluteErrorThe Normalized Mean Absolute Error (NMAE) is a validation metric to compare the Mean Absolute Error (MAE) of (time) series with different scales.
    SPEARMAN_CORRELATION
    SpearmanCorrelationThe Spearman's rank coefficient of correlation is a non-parametric measure of rank correlation.
    NORMALIZED_ROOT_MEAN_SQUARED_ERROR
    NormalizedRootMeanSquaredErrorThe Normalized Root Mean Squared Error (NRMSE) the RMSE facilitates the comparison between models with different scales.
    R2_SCORE
    R2ScoreThe R2 score is one of the performance evaluation measures for forecasting-based machine learning models.
    NORMALIZED_MEAN_ABSOLUTE_ERROR
    NormalizedMeanAbsoluteErrorThe Normalized Mean Absolute Error (NMAE) is a validation metric to compare the Mean Absolute Error (MAE) of (time) series with different scales.
    "SpearmanCorrelation"
    SpearmanCorrelationThe Spearman's rank coefficient of correlation is a non-parametric measure of rank correlation.
    "NormalizedRootMeanSquaredError"
    NormalizedRootMeanSquaredErrorThe Normalized Root Mean Squared Error (NRMSE) the RMSE facilitates the comparison between models with different scales.
    "R2Score"
    R2ScoreThe R2 score is one of the performance evaluation measures for forecasting-based machine learning models.
    "NormalizedMeanAbsoluteError"
    NormalizedMeanAbsoluteErrorThe Normalized Mean Absolute Error (NMAE) is a validation metric to compare the Mean Absolute Error (MAE) of (time) series with different scales.

    ForecastingResponse, ForecastingResponseArgs

    TrainingData Pulumi.AzureNative.MachineLearningServices.Inputs.MLTableJobInputResponse
    [Required] Training data input.
    CvSplitColumnNames List<string>
    Columns to use for CVSplit data.
    FeaturizationSettings Pulumi.AzureNative.MachineLearningServices.Inputs.TableVerticalFeaturizationSettingsResponse
    Featurization inputs needed for AutoML job.
    ForecastingSettings Pulumi.AzureNative.MachineLearningServices.Inputs.ForecastingSettingsResponse
    Forecasting task specific inputs.
    LimitSettings Pulumi.AzureNative.MachineLearningServices.Inputs.TableVerticalLimitSettingsResponse
    Execution constraints for AutoMLJob.
    LogVerbosity string
    Log verbosity for the job.
    NCrossValidations Pulumi.AzureNative.MachineLearningServices.Inputs.AutoNCrossValidationsResponse | Pulumi.AzureNative.MachineLearningServices.Inputs.CustomNCrossValidationsResponse
    Number of cross validation folds to be applied on training dataset when validation dataset is not provided.
    PrimaryMetric string
    Primary metric for forecasting task.
    TargetColumnName string
    Target column name: This is prediction values column. Also known as label column name in context of classification tasks.
    TestData Pulumi.AzureNative.MachineLearningServices.Inputs.MLTableJobInputResponse
    Test data input.
    TestDataSize double
    The fraction of test dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    TrainingSettings Pulumi.AzureNative.MachineLearningServices.Inputs.ForecastingTrainingSettingsResponse
    Inputs for training phase for an AutoML Job.
    ValidationData Pulumi.AzureNative.MachineLearningServices.Inputs.MLTableJobInputResponse
    Validation data inputs.
    ValidationDataSize double
    The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    WeightColumnName string
    The name of the sample weight column. Automated ML supports a weighted column as an input, causing rows in the data to be weighted up or down.
    TrainingData MLTableJobInputResponse
    [Required] Training data input.
    CvSplitColumnNames []string
    Columns to use for CVSplit data.
    FeaturizationSettings TableVerticalFeaturizationSettingsResponse
    Featurization inputs needed for AutoML job.
    ForecastingSettings ForecastingSettingsResponse
    Forecasting task specific inputs.
    LimitSettings TableVerticalLimitSettingsResponse
    Execution constraints for AutoMLJob.
    LogVerbosity string
    Log verbosity for the job.
    NCrossValidations AutoNCrossValidationsResponse | CustomNCrossValidationsResponse
    Number of cross validation folds to be applied on training dataset when validation dataset is not provided.
    PrimaryMetric string
    Primary metric for forecasting task.
    TargetColumnName string
    Target column name: This is prediction values column. Also known as label column name in context of classification tasks.
    TestData MLTableJobInputResponse
    Test data input.
    TestDataSize float64
    The fraction of test dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    TrainingSettings ForecastingTrainingSettingsResponse
    Inputs for training phase for an AutoML Job.
    ValidationData MLTableJobInputResponse
    Validation data inputs.
    ValidationDataSize float64
    The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    WeightColumnName string
    The name of the sample weight column. Automated ML supports a weighted column as an input, causing rows in the data to be weighted up or down.
    trainingData MLTableJobInputResponse
    [Required] Training data input.
    cvSplitColumnNames List<String>
    Columns to use for CVSplit data.
    featurizationSettings TableVerticalFeaturizationSettingsResponse
    Featurization inputs needed for AutoML job.
    forecastingSettings ForecastingSettingsResponse
    Forecasting task specific inputs.
    limitSettings TableVerticalLimitSettingsResponse
    Execution constraints for AutoMLJob.
    logVerbosity String
    Log verbosity for the job.
    nCrossValidations AutoNCrossValidationsResponse | CustomNCrossValidationsResponse
    Number of cross validation folds to be applied on training dataset when validation dataset is not provided.
    primaryMetric String
    Primary metric for forecasting task.
    targetColumnName String
    Target column name: This is prediction values column. Also known as label column name in context of classification tasks.
    testData MLTableJobInputResponse
    Test data input.
    testDataSize Double
    The fraction of test dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    trainingSettings ForecastingTrainingSettingsResponse
    Inputs for training phase for an AutoML Job.
    validationData MLTableJobInputResponse
    Validation data inputs.
    validationDataSize Double
    The fraction of training dataset that needs to be set aside for validation purpose. Values between (0.0 , 1.0) Applied when validation dataset is not provided.
    weightColumnName String
    The name of the sample weight column. Automated ML supports a weighted column as an input, causing rows in the data to be weighted up or down.
    trainingData MLTableJobInputResponse
    [Required] Training data input.