1. Packages
  2. Azure Native
  3. API Docs
  4. containerservice
  5. AgentPool
This is the latest version of Azure Native. Use the Azure Native v1 docs if using the v1 version of this package.
Azure Native v2.19.0 published on Tuesday, Nov 21, 2023 by Pulumi

azure-native.containerservice.AgentPool

Explore with Pulumi AI

azure-native logo
This is the latest version of Azure Native. Use the Azure Native v1 docs if using the v1 version of this package.
Azure Native v2.19.0 published on Tuesday, Nov 21, 2023 by Pulumi

    Agent Pool. Azure REST API version: 2023-04-01. Prior API version in Azure Native 1.x: 2021-03-01.

    Other available API versions: 2019-02-01, 2019-04-01, 2020-06-01, 2021-02-01, 2021-08-01, 2022-04-02-preview, 2023-05-02-preview, 2023-06-01, 2023-06-02-preview, 2023-07-01, 2023-07-02-preview, 2023-08-01, 2023-08-02-preview, 2023-09-01, 2023-09-02-preview, 2023-10-01, 2023-10-02-preview.

    Example Usage

    Create Agent Pool using an agent pool snapshot

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using AzureNative = Pulumi.AzureNative;
    
    return await Deployment.RunAsync(() => 
    {
        var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
        {
            AgentPoolName = "agentpool1",
            Count = 3,
            CreationData = new AzureNative.ContainerService.Inputs.CreationDataArgs
            {
                SourceResourceId = "/subscriptions/subid1/resourceGroups/rg1/providers/Microsoft.ContainerService/snapshots/snapshot1",
            },
            EnableFIPS = true,
            OrchestratorVersion = "",
            OsType = "Linux",
            ResourceGroupName = "rg1",
            ResourceName = "clustername1",
            VmSize = "Standard_DS2_v2",
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
    			AgentPoolName: pulumi.String("agentpool1"),
    			Count:         pulumi.Int(3),
    			CreationData: &containerservice.CreationDataArgs{
    				SourceResourceId: pulumi.String("/subscriptions/subid1/resourceGroups/rg1/providers/Microsoft.ContainerService/snapshots/snapshot1"),
    			},
    			EnableFIPS:          pulumi.Bool(true),
    			OrchestratorVersion: pulumi.String(""),
    			OsType:              pulumi.String("Linux"),
    			ResourceGroupName:   pulumi.String("rg1"),
    			ResourceName:        pulumi.String("clustername1"),
    			VmSize:              pulumi.String("Standard_DS2_v2"),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.azurenative.containerservice.AgentPool;
    import com.pulumi.azurenative.containerservice.AgentPoolArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()        
                .agentPoolName("agentpool1")
                .count(3)
                .creationData(Map.of("sourceResourceId", "/subscriptions/subid1/resourceGroups/rg1/providers/Microsoft.ContainerService/snapshots/snapshot1"))
                .enableFIPS(true)
                .orchestratorVersion("")
                .osType("Linux")
                .resourceGroupName("rg1")
                .resourceName("clustername1")
                .vmSize("Standard_DS2_v2")
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_azure_native as azure_native
    
    agent_pool = azure_native.containerservice.AgentPool("agentPool",
        agent_pool_name="agentpool1",
        count=3,
        creation_data=azure_native.containerservice.CreationDataArgs(
            source_resource_id="/subscriptions/subid1/resourceGroups/rg1/providers/Microsoft.ContainerService/snapshots/snapshot1",
        ),
        enable_fips=True,
        orchestrator_version="",
        os_type="Linux",
        resource_group_name="rg1",
        resource_name_="clustername1",
        vm_size="Standard_DS2_v2")
    
    import * as pulumi from "@pulumi/pulumi";
    import * as azure_native from "@pulumi/azure-native";
    
    const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
        agentPoolName: "agentpool1",
        count: 3,
        creationData: {
            sourceResourceId: "/subscriptions/subid1/resourceGroups/rg1/providers/Microsoft.ContainerService/snapshots/snapshot1",
        },
        enableFIPS: true,
        orchestratorVersion: "",
        osType: "Linux",
        resourceGroupName: "rg1",
        resourceName: "clustername1",
        vmSize: "Standard_DS2_v2",
    });
    
    resources:
      agentPool:
        type: azure-native:containerservice:AgentPool
        properties:
          agentPoolName: agentpool1
          count: 3
          creationData:
            sourceResourceId: /subscriptions/subid1/resourceGroups/rg1/providers/Microsoft.ContainerService/snapshots/snapshot1
          enableFIPS: true
          orchestratorVersion:
          osType: Linux
          resourceGroupName: rg1
          resourceName: clustername1
          vmSize: Standard_DS2_v2
    

    Create Agent Pool with Dedicated Host Group

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using AzureNative = Pulumi.AzureNative;
    
    return await Deployment.RunAsync(() => 
    {
        var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
        {
            AgentPoolName = "agentpool1",
            Count = 3,
            HostGroupID = "/subscriptions/subid1/resourcegroups/rg/providers/Microsoft.Compute/hostGroups/hostgroup1",
            OrchestratorVersion = "",
            OsType = "Linux",
            ResourceGroupName = "rg1",
            ResourceName = "clustername1",
            VmSize = "Standard_DS2_v2",
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
    			AgentPoolName:       pulumi.String("agentpool1"),
    			Count:               pulumi.Int(3),
    			HostGroupID:         pulumi.String("/subscriptions/subid1/resourcegroups/rg/providers/Microsoft.Compute/hostGroups/hostgroup1"),
    			OrchestratorVersion: pulumi.String(""),
    			OsType:              pulumi.String("Linux"),
    			ResourceGroupName:   pulumi.String("rg1"),
    			ResourceName:        pulumi.String("clustername1"),
    			VmSize:              pulumi.String("Standard_DS2_v2"),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.azurenative.containerservice.AgentPool;
    import com.pulumi.azurenative.containerservice.AgentPoolArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()        
                .agentPoolName("agentpool1")
                .count(3)
                .hostGroupID("/subscriptions/subid1/resourcegroups/rg/providers/Microsoft.Compute/hostGroups/hostgroup1")
                .orchestratorVersion("")
                .osType("Linux")
                .resourceGroupName("rg1")
                .resourceName("clustername1")
                .vmSize("Standard_DS2_v2")
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_azure_native as azure_native
    
    agent_pool = azure_native.containerservice.AgentPool("agentPool",
        agent_pool_name="agentpool1",
        count=3,
        host_group_id="/subscriptions/subid1/resourcegroups/rg/providers/Microsoft.Compute/hostGroups/hostgroup1",
        orchestrator_version="",
        os_type="Linux",
        resource_group_name="rg1",
        resource_name_="clustername1",
        vm_size="Standard_DS2_v2")
    
    import * as pulumi from "@pulumi/pulumi";
    import * as azure_native from "@pulumi/azure-native";
    
    const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
        agentPoolName: "agentpool1",
        count: 3,
        hostGroupID: "/subscriptions/subid1/resourcegroups/rg/providers/Microsoft.Compute/hostGroups/hostgroup1",
        orchestratorVersion: "",
        osType: "Linux",
        resourceGroupName: "rg1",
        resourceName: "clustername1",
        vmSize: "Standard_DS2_v2",
    });
    
    resources:
      agentPool:
        type: azure-native:containerservice:AgentPool
        properties:
          agentPoolName: agentpool1
          count: 3
          hostGroupID: /subscriptions/subid1/resourcegroups/rg/providers/Microsoft.Compute/hostGroups/hostgroup1
          orchestratorVersion:
          osType: Linux
          resourceGroupName: rg1
          resourceName: clustername1
          vmSize: Standard_DS2_v2
    

    Create Agent Pool with EncryptionAtHost enabled

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using AzureNative = Pulumi.AzureNative;
    
    return await Deployment.RunAsync(() => 
    {
        var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
        {
            AgentPoolName = "agentpool1",
            Count = 3,
            EnableEncryptionAtHost = true,
            OrchestratorVersion = "",
            OsType = "Linux",
            ResourceGroupName = "rg1",
            ResourceName = "clustername1",
            VmSize = "Standard_DS2_v2",
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
    			AgentPoolName:          pulumi.String("agentpool1"),
    			Count:                  pulumi.Int(3),
    			EnableEncryptionAtHost: pulumi.Bool(true),
    			OrchestratorVersion:    pulumi.String(""),
    			OsType:                 pulumi.String("Linux"),
    			ResourceGroupName:      pulumi.String("rg1"),
    			ResourceName:           pulumi.String("clustername1"),
    			VmSize:                 pulumi.String("Standard_DS2_v2"),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.azurenative.containerservice.AgentPool;
    import com.pulumi.azurenative.containerservice.AgentPoolArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()        
                .agentPoolName("agentpool1")
                .count(3)
                .enableEncryptionAtHost(true)
                .orchestratorVersion("")
                .osType("Linux")
                .resourceGroupName("rg1")
                .resourceName("clustername1")
                .vmSize("Standard_DS2_v2")
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_azure_native as azure_native
    
    agent_pool = azure_native.containerservice.AgentPool("agentPool",
        agent_pool_name="agentpool1",
        count=3,
        enable_encryption_at_host=True,
        orchestrator_version="",
        os_type="Linux",
        resource_group_name="rg1",
        resource_name_="clustername1",
        vm_size="Standard_DS2_v2")
    
    import * as pulumi from "@pulumi/pulumi";
    import * as azure_native from "@pulumi/azure-native";
    
    const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
        agentPoolName: "agentpool1",
        count: 3,
        enableEncryptionAtHost: true,
        orchestratorVersion: "",
        osType: "Linux",
        resourceGroupName: "rg1",
        resourceName: "clustername1",
        vmSize: "Standard_DS2_v2",
    });
    
    resources:
      agentPool:
        type: azure-native:containerservice:AgentPool
        properties:
          agentPoolName: agentpool1
          count: 3
          enableEncryptionAtHost: true
          orchestratorVersion:
          osType: Linux
          resourceGroupName: rg1
          resourceName: clustername1
          vmSize: Standard_DS2_v2
    

    Create Agent Pool with Ephemeral OS Disk

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using AzureNative = Pulumi.AzureNative;
    
    return await Deployment.RunAsync(() => 
    {
        var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
        {
            AgentPoolName = "agentpool1",
            Count = 3,
            OrchestratorVersion = "",
            OsDiskSizeGB = 64,
            OsDiskType = "Ephemeral",
            OsType = "Linux",
            ResourceGroupName = "rg1",
            ResourceName = "clustername1",
            VmSize = "Standard_DS2_v2",
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
    			AgentPoolName:       pulumi.String("agentpool1"),
    			Count:               pulumi.Int(3),
    			OrchestratorVersion: pulumi.String(""),
    			OsDiskSizeGB:        pulumi.Int(64),
    			OsDiskType:          pulumi.String("Ephemeral"),
    			OsType:              pulumi.String("Linux"),
    			ResourceGroupName:   pulumi.String("rg1"),
    			ResourceName:        pulumi.String("clustername1"),
    			VmSize:              pulumi.String("Standard_DS2_v2"),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.azurenative.containerservice.AgentPool;
    import com.pulumi.azurenative.containerservice.AgentPoolArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()        
                .agentPoolName("agentpool1")
                .count(3)
                .orchestratorVersion("")
                .osDiskSizeGB(64)
                .osDiskType("Ephemeral")
                .osType("Linux")
                .resourceGroupName("rg1")
                .resourceName("clustername1")
                .vmSize("Standard_DS2_v2")
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_azure_native as azure_native
    
    agent_pool = azure_native.containerservice.AgentPool("agentPool",
        agent_pool_name="agentpool1",
        count=3,
        orchestrator_version="",
        os_disk_size_gb=64,
        os_disk_type="Ephemeral",
        os_type="Linux",
        resource_group_name="rg1",
        resource_name_="clustername1",
        vm_size="Standard_DS2_v2")
    
    import * as pulumi from "@pulumi/pulumi";
    import * as azure_native from "@pulumi/azure-native";
    
    const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
        agentPoolName: "agentpool1",
        count: 3,
        orchestratorVersion: "",
        osDiskSizeGB: 64,
        osDiskType: "Ephemeral",
        osType: "Linux",
        resourceGroupName: "rg1",
        resourceName: "clustername1",
        vmSize: "Standard_DS2_v2",
    });
    
    resources:
      agentPool:
        type: azure-native:containerservice:AgentPool
        properties:
          agentPoolName: agentpool1
          count: 3
          orchestratorVersion:
          osDiskSizeGB: 64
          osDiskType: Ephemeral
          osType: Linux
          resourceGroupName: rg1
          resourceName: clustername1
          vmSize: Standard_DS2_v2
    

    Create Agent Pool with FIPS enabled OS

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using AzureNative = Pulumi.AzureNative;
    
    return await Deployment.RunAsync(() => 
    {
        var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
        {
            AgentPoolName = "agentpool1",
            Count = 3,
            EnableFIPS = true,
            OrchestratorVersion = "",
            OsType = "Linux",
            ResourceGroupName = "rg1",
            ResourceName = "clustername1",
            VmSize = "Standard_DS2_v2",
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
    			AgentPoolName:       pulumi.String("agentpool1"),
    			Count:               pulumi.Int(3),
    			EnableFIPS:          pulumi.Bool(true),
    			OrchestratorVersion: pulumi.String(""),
    			OsType:              pulumi.String("Linux"),
    			ResourceGroupName:   pulumi.String("rg1"),
    			ResourceName:        pulumi.String("clustername1"),
    			VmSize:              pulumi.String("Standard_DS2_v2"),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.azurenative.containerservice.AgentPool;
    import com.pulumi.azurenative.containerservice.AgentPoolArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()        
                .agentPoolName("agentpool1")
                .count(3)
                .enableFIPS(true)
                .orchestratorVersion("")
                .osType("Linux")
                .resourceGroupName("rg1")
                .resourceName("clustername1")
                .vmSize("Standard_DS2_v2")
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_azure_native as azure_native
    
    agent_pool = azure_native.containerservice.AgentPool("agentPool",
        agent_pool_name="agentpool1",
        count=3,
        enable_fips=True,
        orchestrator_version="",
        os_type="Linux",
        resource_group_name="rg1",
        resource_name_="clustername1",
        vm_size="Standard_DS2_v2")
    
    import * as pulumi from "@pulumi/pulumi";
    import * as azure_native from "@pulumi/azure-native";
    
    const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
        agentPoolName: "agentpool1",
        count: 3,
        enableFIPS: true,
        orchestratorVersion: "",
        osType: "Linux",
        resourceGroupName: "rg1",
        resourceName: "clustername1",
        vmSize: "Standard_DS2_v2",
    });
    
    resources:
      agentPool:
        type: azure-native:containerservice:AgentPool
        properties:
          agentPoolName: agentpool1
          count: 3
          enableFIPS: true
          orchestratorVersion:
          osType: Linux
          resourceGroupName: rg1
          resourceName: clustername1
          vmSize: Standard_DS2_v2
    

    Create Agent Pool with GPUMIG

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using AzureNative = Pulumi.AzureNative;
    
    return await Deployment.RunAsync(() => 
    {
        var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
        {
            AgentPoolName = "agentpool1",
            Count = 3,
            GpuInstanceProfile = "MIG2g",
            KubeletConfig = new AzureNative.ContainerService.Inputs.KubeletConfigArgs
            {
                AllowedUnsafeSysctls = new[]
                {
                    "kernel.msg*",
                    "net.core.somaxconn",
                },
                CpuCfsQuota = true,
                CpuCfsQuotaPeriod = "200ms",
                CpuManagerPolicy = "static",
                FailSwapOn = false,
                ImageGcHighThreshold = 90,
                ImageGcLowThreshold = 70,
                TopologyManagerPolicy = "best-effort",
            },
            LinuxOSConfig = new AzureNative.ContainerService.Inputs.LinuxOSConfigArgs
            {
                SwapFileSizeMB = 1500,
                Sysctls = new AzureNative.ContainerService.Inputs.SysctlConfigArgs
                {
                    KernelThreadsMax = 99999,
                    NetCoreWmemDefault = 12345,
                    NetIpv4IpLocalPortRange = "20000 60000",
                    NetIpv4TcpTwReuse = true,
                },
                TransparentHugePageDefrag = "madvise",
                TransparentHugePageEnabled = "always",
            },
            OrchestratorVersion = "",
            OsType = "Linux",
            ResourceGroupName = "rg1",
            ResourceName = "clustername1",
            VmSize = "Standard_ND96asr_v4",
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
    			AgentPoolName:      pulumi.String("agentpool1"),
    			Count:              pulumi.Int(3),
    			GpuInstanceProfile: pulumi.String("MIG2g"),
    			KubeletConfig: &containerservice.KubeletConfigArgs{
    				AllowedUnsafeSysctls: pulumi.StringArray{
    					pulumi.String("kernel.msg*"),
    					pulumi.String("net.core.somaxconn"),
    				},
    				CpuCfsQuota:           pulumi.Bool(true),
    				CpuCfsQuotaPeriod:     pulumi.String("200ms"),
    				CpuManagerPolicy:      pulumi.String("static"),
    				FailSwapOn:            pulumi.Bool(false),
    				ImageGcHighThreshold:  pulumi.Int(90),
    				ImageGcLowThreshold:   pulumi.Int(70),
    				TopologyManagerPolicy: pulumi.String("best-effort"),
    			},
    			LinuxOSConfig: containerservice.LinuxOSConfigResponse{
    				SwapFileSizeMB: pulumi.Int(1500),
    				Sysctls: &containerservice.SysctlConfigArgs{
    					KernelThreadsMax:        pulumi.Int(99999),
    					NetCoreWmemDefault:      pulumi.Int(12345),
    					NetIpv4IpLocalPortRange: pulumi.String("20000 60000"),
    					NetIpv4TcpTwReuse:       pulumi.Bool(true),
    				},
    				TransparentHugePageDefrag:  pulumi.String("madvise"),
    				TransparentHugePageEnabled: pulumi.String("always"),
    			},
    			OrchestratorVersion: pulumi.String(""),
    			OsType:              pulumi.String("Linux"),
    			ResourceGroupName:   pulumi.String("rg1"),
    			ResourceName:        pulumi.String("clustername1"),
    			VmSize:              pulumi.String("Standard_ND96asr_v4"),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.azurenative.containerservice.AgentPool;
    import com.pulumi.azurenative.containerservice.AgentPoolArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()        
                .agentPoolName("agentpool1")
                .count(3)
                .gpuInstanceProfile("MIG2g")
                .kubeletConfig(Map.ofEntries(
                    Map.entry("allowedUnsafeSysctls",                 
                        "kernel.msg*",
                        "net.core.somaxconn"),
                    Map.entry("cpuCfsQuota", true),
                    Map.entry("cpuCfsQuotaPeriod", "200ms"),
                    Map.entry("cpuManagerPolicy", "static"),
                    Map.entry("failSwapOn", false),
                    Map.entry("imageGcHighThreshold", 90),
                    Map.entry("imageGcLowThreshold", 70),
                    Map.entry("topologyManagerPolicy", "best-effort")
                ))
                .linuxOSConfig(Map.ofEntries(
                    Map.entry("swapFileSizeMB", 1500),
                    Map.entry("sysctls", Map.ofEntries(
                        Map.entry("kernelThreadsMax", 99999),
                        Map.entry("netCoreWmemDefault", 12345),
                        Map.entry("netIpv4IpLocalPortRange", "20000 60000"),
                        Map.entry("netIpv4TcpTwReuse", true)
                    )),
                    Map.entry("transparentHugePageDefrag", "madvise"),
                    Map.entry("transparentHugePageEnabled", "always")
                ))
                .orchestratorVersion("")
                .osType("Linux")
                .resourceGroupName("rg1")
                .resourceName("clustername1")
                .vmSize("Standard_ND96asr_v4")
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_azure_native as azure_native
    
    agent_pool = azure_native.containerservice.AgentPool("agentPool",
        agent_pool_name="agentpool1",
        count=3,
        gpu_instance_profile="MIG2g",
        kubelet_config=azure_native.containerservice.KubeletConfigArgs(
            allowed_unsafe_sysctls=[
                "kernel.msg*",
                "net.core.somaxconn",
            ],
            cpu_cfs_quota=True,
            cpu_cfs_quota_period="200ms",
            cpu_manager_policy="static",
            fail_swap_on=False,
            image_gc_high_threshold=90,
            image_gc_low_threshold=70,
            topology_manager_policy="best-effort",
        ),
        linux_os_config=azure_native.containerservice.LinuxOSConfigResponseArgs(
            swap_file_size_mb=1500,
            sysctls=azure_native.containerservice.SysctlConfigArgs(
                kernel_threads_max=99999,
                net_core_wmem_default=12345,
                net_ipv4_ip_local_port_range="20000 60000",
                net_ipv4_tcp_tw_reuse=True,
            ),
            transparent_huge_page_defrag="madvise",
            transparent_huge_page_enabled="always",
        ),
        orchestrator_version="",
        os_type="Linux",
        resource_group_name="rg1",
        resource_name_="clustername1",
        vm_size="Standard_ND96asr_v4")
    
    import * as pulumi from "@pulumi/pulumi";
    import * as azure_native from "@pulumi/azure-native";
    
    const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
        agentPoolName: "agentpool1",
        count: 3,
        gpuInstanceProfile: "MIG2g",
        kubeletConfig: {
            allowedUnsafeSysctls: [
                "kernel.msg*",
                "net.core.somaxconn",
            ],
            cpuCfsQuota: true,
            cpuCfsQuotaPeriod: "200ms",
            cpuManagerPolicy: "static",
            failSwapOn: false,
            imageGcHighThreshold: 90,
            imageGcLowThreshold: 70,
            topologyManagerPolicy: "best-effort",
        },
        linuxOSConfig: {
            swapFileSizeMB: 1500,
            sysctls: {
                kernelThreadsMax: 99999,
                netCoreWmemDefault: 12345,
                netIpv4IpLocalPortRange: "20000 60000",
                netIpv4TcpTwReuse: true,
            },
            transparentHugePageDefrag: "madvise",
            transparentHugePageEnabled: "always",
        },
        orchestratorVersion: "",
        osType: "Linux",
        resourceGroupName: "rg1",
        resourceName: "clustername1",
        vmSize: "Standard_ND96asr_v4",
    });
    
    resources:
      agentPool:
        type: azure-native:containerservice:AgentPool
        properties:
          agentPoolName: agentpool1
          count: 3
          gpuInstanceProfile: MIG2g
          kubeletConfig:
            allowedUnsafeSysctls:
              - kernel.msg*
              - net.core.somaxconn
            cpuCfsQuota: true
            cpuCfsQuotaPeriod: 200ms
            cpuManagerPolicy: static
            failSwapOn: false
            imageGcHighThreshold: 90
            imageGcLowThreshold: 70
            topologyManagerPolicy: best-effort
          linuxOSConfig:
            swapFileSizeMB: 1500
            sysctls:
              kernelThreadsMax: 99999
              netCoreWmemDefault: 12345
              netIpv4IpLocalPortRange: 20000 60000
              netIpv4TcpTwReuse: true
            transparentHugePageDefrag: madvise
            transparentHugePageEnabled: always
          orchestratorVersion:
          osType: Linux
          resourceGroupName: rg1
          resourceName: clustername1
          vmSize: Standard_ND96asr_v4
    

    Create Agent Pool with Krustlet and the WASI runtime

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using AzureNative = Pulumi.AzureNative;
    
    return await Deployment.RunAsync(() => 
    {
        var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
        {
            AgentPoolName = "agentpool1",
            Count = 3,
            Mode = "User",
            OrchestratorVersion = "",
            OsDiskSizeGB = 64,
            OsType = "Linux",
            ResourceGroupName = "rg1",
            ResourceName = "clustername1",
            VmSize = "Standard_DS2_v2",
            WorkloadRuntime = "WasmWasi",
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
    			AgentPoolName:       pulumi.String("agentpool1"),
    			Count:               pulumi.Int(3),
    			Mode:                pulumi.String("User"),
    			OrchestratorVersion: pulumi.String(""),
    			OsDiskSizeGB:        pulumi.Int(64),
    			OsType:              pulumi.String("Linux"),
    			ResourceGroupName:   pulumi.String("rg1"),
    			ResourceName:        pulumi.String("clustername1"),
    			VmSize:              pulumi.String("Standard_DS2_v2"),
    			WorkloadRuntime:     pulumi.String("WasmWasi"),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.azurenative.containerservice.AgentPool;
    import com.pulumi.azurenative.containerservice.AgentPoolArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()        
                .agentPoolName("agentpool1")
                .count(3)
                .mode("User")
                .orchestratorVersion("")
                .osDiskSizeGB(64)
                .osType("Linux")
                .resourceGroupName("rg1")
                .resourceName("clustername1")
                .vmSize("Standard_DS2_v2")
                .workloadRuntime("WasmWasi")
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_azure_native as azure_native
    
    agent_pool = azure_native.containerservice.AgentPool("agentPool",
        agent_pool_name="agentpool1",
        count=3,
        mode="User",
        orchestrator_version="",
        os_disk_size_gb=64,
        os_type="Linux",
        resource_group_name="rg1",
        resource_name_="clustername1",
        vm_size="Standard_DS2_v2",
        workload_runtime="WasmWasi")
    
    import * as pulumi from "@pulumi/pulumi";
    import * as azure_native from "@pulumi/azure-native";
    
    const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
        agentPoolName: "agentpool1",
        count: 3,
        mode: "User",
        orchestratorVersion: "",
        osDiskSizeGB: 64,
        osType: "Linux",
        resourceGroupName: "rg1",
        resourceName: "clustername1",
        vmSize: "Standard_DS2_v2",
        workloadRuntime: "WasmWasi",
    });
    
    resources:
      agentPool:
        type: azure-native:containerservice:AgentPool
        properties:
          agentPoolName: agentpool1
          count: 3
          mode: User
          orchestratorVersion:
          osDiskSizeGB: 64
          osType: Linux
          resourceGroupName: rg1
          resourceName: clustername1
          vmSize: Standard_DS2_v2
          workloadRuntime: WasmWasi
    

    Create Agent Pool with KubeletConfig and LinuxOSConfig

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using AzureNative = Pulumi.AzureNative;
    
    return await Deployment.RunAsync(() => 
    {
        var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
        {
            AgentPoolName = "agentpool1",
            Count = 3,
            KubeletConfig = new AzureNative.ContainerService.Inputs.KubeletConfigArgs
            {
                AllowedUnsafeSysctls = new[]
                {
                    "kernel.msg*",
                    "net.core.somaxconn",
                },
                CpuCfsQuota = true,
                CpuCfsQuotaPeriod = "200ms",
                CpuManagerPolicy = "static",
                FailSwapOn = false,
                ImageGcHighThreshold = 90,
                ImageGcLowThreshold = 70,
                TopologyManagerPolicy = "best-effort",
            },
            LinuxOSConfig = new AzureNative.ContainerService.Inputs.LinuxOSConfigArgs
            {
                SwapFileSizeMB = 1500,
                Sysctls = new AzureNative.ContainerService.Inputs.SysctlConfigArgs
                {
                    KernelThreadsMax = 99999,
                    NetCoreWmemDefault = 12345,
                    NetIpv4IpLocalPortRange = "20000 60000",
                    NetIpv4TcpTwReuse = true,
                },
                TransparentHugePageDefrag = "madvise",
                TransparentHugePageEnabled = "always",
            },
            OrchestratorVersion = "",
            OsType = "Linux",
            ResourceGroupName = "rg1",
            ResourceName = "clustername1",
            VmSize = "Standard_DS2_v2",
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
    			AgentPoolName: pulumi.String("agentpool1"),
    			Count:         pulumi.Int(3),
    			KubeletConfig: &containerservice.KubeletConfigArgs{
    				AllowedUnsafeSysctls: pulumi.StringArray{
    					pulumi.String("kernel.msg*"),
    					pulumi.String("net.core.somaxconn"),
    				},
    				CpuCfsQuota:           pulumi.Bool(true),
    				CpuCfsQuotaPeriod:     pulumi.String("200ms"),
    				CpuManagerPolicy:      pulumi.String("static"),
    				FailSwapOn:            pulumi.Bool(false),
    				ImageGcHighThreshold:  pulumi.Int(90),
    				ImageGcLowThreshold:   pulumi.Int(70),
    				TopologyManagerPolicy: pulumi.String("best-effort"),
    			},
    			LinuxOSConfig: containerservice.LinuxOSConfigResponse{
    				SwapFileSizeMB: pulumi.Int(1500),
    				Sysctls: &containerservice.SysctlConfigArgs{
    					KernelThreadsMax:        pulumi.Int(99999),
    					NetCoreWmemDefault:      pulumi.Int(12345),
    					NetIpv4IpLocalPortRange: pulumi.String("20000 60000"),
    					NetIpv4TcpTwReuse:       pulumi.Bool(true),
    				},
    				TransparentHugePageDefrag:  pulumi.String("madvise"),
    				TransparentHugePageEnabled: pulumi.String("always"),
    			},
    			OrchestratorVersion: pulumi.String(""),
    			OsType:              pulumi.String("Linux"),
    			ResourceGroupName:   pulumi.String("rg1"),
    			ResourceName:        pulumi.String("clustername1"),
    			VmSize:              pulumi.String("Standard_DS2_v2"),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.azurenative.containerservice.AgentPool;
    import com.pulumi.azurenative.containerservice.AgentPoolArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()        
                .agentPoolName("agentpool1")
                .count(3)
                .kubeletConfig(Map.ofEntries(
                    Map.entry("allowedUnsafeSysctls",                 
                        "kernel.msg*",
                        "net.core.somaxconn"),
                    Map.entry("cpuCfsQuota", true),
                    Map.entry("cpuCfsQuotaPeriod", "200ms"),
                    Map.entry("cpuManagerPolicy", "static"),
                    Map.entry("failSwapOn", false),
                    Map.entry("imageGcHighThreshold", 90),
                    Map.entry("imageGcLowThreshold", 70),
                    Map.entry("topologyManagerPolicy", "best-effort")
                ))
                .linuxOSConfig(Map.ofEntries(
                    Map.entry("swapFileSizeMB", 1500),
                    Map.entry("sysctls", Map.ofEntries(
                        Map.entry("kernelThreadsMax", 99999),
                        Map.entry("netCoreWmemDefault", 12345),
                        Map.entry("netIpv4IpLocalPortRange", "20000 60000"),
                        Map.entry("netIpv4TcpTwReuse", true)
                    )),
                    Map.entry("transparentHugePageDefrag", "madvise"),
                    Map.entry("transparentHugePageEnabled", "always")
                ))
                .orchestratorVersion("")
                .osType("Linux")
                .resourceGroupName("rg1")
                .resourceName("clustername1")
                .vmSize("Standard_DS2_v2")
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_azure_native as azure_native
    
    agent_pool = azure_native.containerservice.AgentPool("agentPool",
        agent_pool_name="agentpool1",
        count=3,
        kubelet_config=azure_native.containerservice.KubeletConfigArgs(
            allowed_unsafe_sysctls=[
                "kernel.msg*",
                "net.core.somaxconn",
            ],
            cpu_cfs_quota=True,
            cpu_cfs_quota_period="200ms",
            cpu_manager_policy="static",
            fail_swap_on=False,
            image_gc_high_threshold=90,
            image_gc_low_threshold=70,
            topology_manager_policy="best-effort",
        ),
        linux_os_config=azure_native.containerservice.LinuxOSConfigResponseArgs(
            swap_file_size_mb=1500,
            sysctls=azure_native.containerservice.SysctlConfigArgs(
                kernel_threads_max=99999,
                net_core_wmem_default=12345,
                net_ipv4_ip_local_port_range="20000 60000",
                net_ipv4_tcp_tw_reuse=True,
            ),
            transparent_huge_page_defrag="madvise",
            transparent_huge_page_enabled="always",
        ),
        orchestrator_version="",
        os_type="Linux",
        resource_group_name="rg1",
        resource_name_="clustername1",
        vm_size="Standard_DS2_v2")
    
    import * as pulumi from "@pulumi/pulumi";
    import * as azure_native from "@pulumi/azure-native";
    
    const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
        agentPoolName: "agentpool1",
        count: 3,
        kubeletConfig: {
            allowedUnsafeSysctls: [
                "kernel.msg*",
                "net.core.somaxconn",
            ],
            cpuCfsQuota: true,
            cpuCfsQuotaPeriod: "200ms",
            cpuManagerPolicy: "static",
            failSwapOn: false,
            imageGcHighThreshold: 90,
            imageGcLowThreshold: 70,
            topologyManagerPolicy: "best-effort",
        },
        linuxOSConfig: {
            swapFileSizeMB: 1500,
            sysctls: {
                kernelThreadsMax: 99999,
                netCoreWmemDefault: 12345,
                netIpv4IpLocalPortRange: "20000 60000",
                netIpv4TcpTwReuse: true,
            },
            transparentHugePageDefrag: "madvise",
            transparentHugePageEnabled: "always",
        },
        orchestratorVersion: "",
        osType: "Linux",
        resourceGroupName: "rg1",
        resourceName: "clustername1",
        vmSize: "Standard_DS2_v2",
    });
    
    resources:
      agentPool:
        type: azure-native:containerservice:AgentPool
        properties:
          agentPoolName: agentpool1
          count: 3
          kubeletConfig:
            allowedUnsafeSysctls:
              - kernel.msg*
              - net.core.somaxconn
            cpuCfsQuota: true
            cpuCfsQuotaPeriod: 200ms
            cpuManagerPolicy: static
            failSwapOn: false
            imageGcHighThreshold: 90
            imageGcLowThreshold: 70
            topologyManagerPolicy: best-effort
          linuxOSConfig:
            swapFileSizeMB: 1500
            sysctls:
              kernelThreadsMax: 99999
              netCoreWmemDefault: 12345
              netIpv4IpLocalPortRange: 20000 60000
              netIpv4TcpTwReuse: true
            transparentHugePageDefrag: madvise
            transparentHugePageEnabled: always
          orchestratorVersion:
          osType: Linux
          resourceGroupName: rg1
          resourceName: clustername1
          vmSize: Standard_DS2_v2
    

    Create Agent Pool with OSSKU

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using AzureNative = Pulumi.AzureNative;
    
    return await Deployment.RunAsync(() => 
    {
        var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
        {
            AgentPoolName = "agentpool1",
            Count = 3,
            KubeletConfig = new AzureNative.ContainerService.Inputs.KubeletConfigArgs
            {
                AllowedUnsafeSysctls = new[]
                {
                    "kernel.msg*",
                    "net.core.somaxconn",
                },
                CpuCfsQuota = true,
                CpuCfsQuotaPeriod = "200ms",
                CpuManagerPolicy = "static",
                FailSwapOn = false,
                ImageGcHighThreshold = 90,
                ImageGcLowThreshold = 70,
                TopologyManagerPolicy = "best-effort",
            },
            LinuxOSConfig = new AzureNative.ContainerService.Inputs.LinuxOSConfigArgs
            {
                SwapFileSizeMB = 1500,
                Sysctls = new AzureNative.ContainerService.Inputs.SysctlConfigArgs
                {
                    KernelThreadsMax = 99999,
                    NetCoreWmemDefault = 12345,
                    NetIpv4IpLocalPortRange = "20000 60000",
                    NetIpv4TcpTwReuse = true,
                },
                TransparentHugePageDefrag = "madvise",
                TransparentHugePageEnabled = "always",
            },
            OrchestratorVersion = "",
            OsSKU = "AzureLinux",
            OsType = "Linux",
            ResourceGroupName = "rg1",
            ResourceName = "clustername1",
            VmSize = "Standard_DS2_v2",
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
    			AgentPoolName: pulumi.String("agentpool1"),
    			Count:         pulumi.Int(3),
    			KubeletConfig: &containerservice.KubeletConfigArgs{
    				AllowedUnsafeSysctls: pulumi.StringArray{
    					pulumi.String("kernel.msg*"),
    					pulumi.String("net.core.somaxconn"),
    				},
    				CpuCfsQuota:           pulumi.Bool(true),
    				CpuCfsQuotaPeriod:     pulumi.String("200ms"),
    				CpuManagerPolicy:      pulumi.String("static"),
    				FailSwapOn:            pulumi.Bool(false),
    				ImageGcHighThreshold:  pulumi.Int(90),
    				ImageGcLowThreshold:   pulumi.Int(70),
    				TopologyManagerPolicy: pulumi.String("best-effort"),
    			},
    			LinuxOSConfig: containerservice.LinuxOSConfigResponse{
    				SwapFileSizeMB: pulumi.Int(1500),
    				Sysctls: &containerservice.SysctlConfigArgs{
    					KernelThreadsMax:        pulumi.Int(99999),
    					NetCoreWmemDefault:      pulumi.Int(12345),
    					NetIpv4IpLocalPortRange: pulumi.String("20000 60000"),
    					NetIpv4TcpTwReuse:       pulumi.Bool(true),
    				},
    				TransparentHugePageDefrag:  pulumi.String("madvise"),
    				TransparentHugePageEnabled: pulumi.String("always"),
    			},
    			OrchestratorVersion: pulumi.String(""),
    			OsSKU:               pulumi.String("AzureLinux"),
    			OsType:              pulumi.String("Linux"),
    			ResourceGroupName:   pulumi.String("rg1"),
    			ResourceName:        pulumi.String("clustername1"),
    			VmSize:              pulumi.String("Standard_DS2_v2"),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.azurenative.containerservice.AgentPool;
    import com.pulumi.azurenative.containerservice.AgentPoolArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()        
                .agentPoolName("agentpool1")
                .count(3)
                .kubeletConfig(Map.ofEntries(
                    Map.entry("allowedUnsafeSysctls",                 
                        "kernel.msg*",
                        "net.core.somaxconn"),
                    Map.entry("cpuCfsQuota", true),
                    Map.entry("cpuCfsQuotaPeriod", "200ms"),
                    Map.entry("cpuManagerPolicy", "static"),
                    Map.entry("failSwapOn", false),
                    Map.entry("imageGcHighThreshold", 90),
                    Map.entry("imageGcLowThreshold", 70),
                    Map.entry("topologyManagerPolicy", "best-effort")
                ))
                .linuxOSConfig(Map.ofEntries(
                    Map.entry("swapFileSizeMB", 1500),
                    Map.entry("sysctls", Map.ofEntries(
                        Map.entry("kernelThreadsMax", 99999),
                        Map.entry("netCoreWmemDefault", 12345),
                        Map.entry("netIpv4IpLocalPortRange", "20000 60000"),
                        Map.entry("netIpv4TcpTwReuse", true)
                    )),
                    Map.entry("transparentHugePageDefrag", "madvise"),
                    Map.entry("transparentHugePageEnabled", "always")
                ))
                .orchestratorVersion("")
                .osSKU("AzureLinux")
                .osType("Linux")
                .resourceGroupName("rg1")
                .resourceName("clustername1")
                .vmSize("Standard_DS2_v2")
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_azure_native as azure_native
    
    agent_pool = azure_native.containerservice.AgentPool("agentPool",
        agent_pool_name="agentpool1",
        count=3,
        kubelet_config=azure_native.containerservice.KubeletConfigArgs(
            allowed_unsafe_sysctls=[
                "kernel.msg*",
                "net.core.somaxconn",
            ],
            cpu_cfs_quota=True,
            cpu_cfs_quota_period="200ms",
            cpu_manager_policy="static",
            fail_swap_on=False,
            image_gc_high_threshold=90,
            image_gc_low_threshold=70,
            topology_manager_policy="best-effort",
        ),
        linux_os_config=azure_native.containerservice.LinuxOSConfigResponseArgs(
            swap_file_size_mb=1500,
            sysctls=azure_native.containerservice.SysctlConfigArgs(
                kernel_threads_max=99999,
                net_core_wmem_default=12345,
                net_ipv4_ip_local_port_range="20000 60000",
                net_ipv4_tcp_tw_reuse=True,
            ),
            transparent_huge_page_defrag="madvise",
            transparent_huge_page_enabled="always",
        ),
        orchestrator_version="",
        os_sku="AzureLinux",
        os_type="Linux",
        resource_group_name="rg1",
        resource_name_="clustername1",
        vm_size="Standard_DS2_v2")
    
    import * as pulumi from "@pulumi/pulumi";
    import * as azure_native from "@pulumi/azure-native";
    
    const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
        agentPoolName: "agentpool1",
        count: 3,
        kubeletConfig: {
            allowedUnsafeSysctls: [
                "kernel.msg*",
                "net.core.somaxconn",
            ],
            cpuCfsQuota: true,
            cpuCfsQuotaPeriod: "200ms",
            cpuManagerPolicy: "static",
            failSwapOn: false,
            imageGcHighThreshold: 90,
            imageGcLowThreshold: 70,
            topologyManagerPolicy: "best-effort",
        },
        linuxOSConfig: {
            swapFileSizeMB: 1500,
            sysctls: {
                kernelThreadsMax: 99999,
                netCoreWmemDefault: 12345,
                netIpv4IpLocalPortRange: "20000 60000",
                netIpv4TcpTwReuse: true,
            },
            transparentHugePageDefrag: "madvise",
            transparentHugePageEnabled: "always",
        },
        orchestratorVersion: "",
        osSKU: "AzureLinux",
        osType: "Linux",
        resourceGroupName: "rg1",
        resourceName: "clustername1",
        vmSize: "Standard_DS2_v2",
    });
    
    resources:
      agentPool:
        type: azure-native:containerservice:AgentPool
        properties:
          agentPoolName: agentpool1
          count: 3
          kubeletConfig:
            allowedUnsafeSysctls:
              - kernel.msg*
              - net.core.somaxconn
            cpuCfsQuota: true
            cpuCfsQuotaPeriod: 200ms
            cpuManagerPolicy: static
            failSwapOn: false
            imageGcHighThreshold: 90
            imageGcLowThreshold: 70
            topologyManagerPolicy: best-effort
          linuxOSConfig:
            swapFileSizeMB: 1500
            sysctls:
              kernelThreadsMax: 99999
              netCoreWmemDefault: 12345
              netIpv4IpLocalPortRange: 20000 60000
              netIpv4TcpTwReuse: true
            transparentHugePageDefrag: madvise
            transparentHugePageEnabled: always
          orchestratorVersion:
          osSKU: AzureLinux
          osType: Linux
          resourceGroupName: rg1
          resourceName: clustername1
          vmSize: Standard_DS2_v2
    

    Create Agent Pool with PPG

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using AzureNative = Pulumi.AzureNative;
    
    return await Deployment.RunAsync(() => 
    {
        var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
        {
            AgentPoolName = "agentpool1",
            Count = 3,
            OrchestratorVersion = "",
            OsType = "Linux",
            ProximityPlacementGroupID = "/subscriptions/subid1/resourcegroups/rg1/providers/Microsoft.Compute/proximityPlacementGroups/ppg1",
            ResourceGroupName = "rg1",
            ResourceName = "clustername1",
            VmSize = "Standard_DS2_v2",
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
    			AgentPoolName:             pulumi.String("agentpool1"),
    			Count:                     pulumi.Int(3),
    			OrchestratorVersion:       pulumi.String(""),
    			OsType:                    pulumi.String("Linux"),
    			ProximityPlacementGroupID: pulumi.String("/subscriptions/subid1/resourcegroups/rg1/providers/Microsoft.Compute/proximityPlacementGroups/ppg1"),
    			ResourceGroupName:         pulumi.String("rg1"),
    			ResourceName:              pulumi.String("clustername1"),
    			VmSize:                    pulumi.String("Standard_DS2_v2"),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.azurenative.containerservice.AgentPool;
    import com.pulumi.azurenative.containerservice.AgentPoolArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()        
                .agentPoolName("agentpool1")
                .count(3)
                .orchestratorVersion("")
                .osType("Linux")
                .proximityPlacementGroupID("/subscriptions/subid1/resourcegroups/rg1/providers/Microsoft.Compute/proximityPlacementGroups/ppg1")
                .resourceGroupName("rg1")
                .resourceName("clustername1")
                .vmSize("Standard_DS2_v2")
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_azure_native as azure_native
    
    agent_pool = azure_native.containerservice.AgentPool("agentPool",
        agent_pool_name="agentpool1",
        count=3,
        orchestrator_version="",
        os_type="Linux",
        proximity_placement_group_id="/subscriptions/subid1/resourcegroups/rg1/providers/Microsoft.Compute/proximityPlacementGroups/ppg1",
        resource_group_name="rg1",
        resource_name_="clustername1",
        vm_size="Standard_DS2_v2")
    
    import * as pulumi from "@pulumi/pulumi";
    import * as azure_native from "@pulumi/azure-native";
    
    const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
        agentPoolName: "agentpool1",
        count: 3,
        orchestratorVersion: "",
        osType: "Linux",
        proximityPlacementGroupID: "/subscriptions/subid1/resourcegroups/rg1/providers/Microsoft.Compute/proximityPlacementGroups/ppg1",
        resourceGroupName: "rg1",
        resourceName: "clustername1",
        vmSize: "Standard_DS2_v2",
    });
    
    resources:
      agentPool:
        type: azure-native:containerservice:AgentPool
        properties:
          agentPoolName: agentpool1
          count: 3
          orchestratorVersion:
          osType: Linux
          proximityPlacementGroupID: /subscriptions/subid1/resourcegroups/rg1/providers/Microsoft.Compute/proximityPlacementGroups/ppg1
          resourceGroupName: rg1
          resourceName: clustername1
          vmSize: Standard_DS2_v2
    

    Create Agent Pool with UltraSSD enabled

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using AzureNative = Pulumi.AzureNative;
    
    return await Deployment.RunAsync(() => 
    {
        var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
        {
            AgentPoolName = "agentpool1",
            Count = 3,
            EnableUltraSSD = true,
            OrchestratorVersion = "",
            OsType = "Linux",
            ResourceGroupName = "rg1",
            ResourceName = "clustername1",
            VmSize = "Standard_DS2_v2",
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
    			AgentPoolName:       pulumi.String("agentpool1"),
    			Count:               pulumi.Int(3),
    			EnableUltraSSD:      pulumi.Bool(true),
    			OrchestratorVersion: pulumi.String(""),
    			OsType:              pulumi.String("Linux"),
    			ResourceGroupName:   pulumi.String("rg1"),
    			ResourceName:        pulumi.String("clustername1"),
    			VmSize:              pulumi.String("Standard_DS2_v2"),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.azurenative.containerservice.AgentPool;
    import com.pulumi.azurenative.containerservice.AgentPoolArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()        
                .agentPoolName("agentpool1")
                .count(3)
                .enableUltraSSD(true)
                .orchestratorVersion("")
                .osType("Linux")
                .resourceGroupName("rg1")
                .resourceName("clustername1")
                .vmSize("Standard_DS2_v2")
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_azure_native as azure_native
    
    agent_pool = azure_native.containerservice.AgentPool("agentPool",
        agent_pool_name="agentpool1",
        count=3,
        enable_ultra_ssd=True,
        orchestrator_version="",
        os_type="Linux",
        resource_group_name="rg1",
        resource_name_="clustername1",
        vm_size="Standard_DS2_v2")
    
    import * as pulumi from "@pulumi/pulumi";
    import * as azure_native from "@pulumi/azure-native";
    
    const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
        agentPoolName: "agentpool1",
        count: 3,
        enableUltraSSD: true,
        orchestratorVersion: "",
        osType: "Linux",
        resourceGroupName: "rg1",
        resourceName: "clustername1",
        vmSize: "Standard_DS2_v2",
    });
    
    resources:
      agentPool:
        type: azure-native:containerservice:AgentPool
        properties:
          agentPoolName: agentpool1
          count: 3
          enableUltraSSD: true
          orchestratorVersion:
          osType: Linux
          resourceGroupName: rg1
          resourceName: clustername1
          vmSize: Standard_DS2_v2
    

    Create Agent Pool with Windows OSSKU

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using AzureNative = Pulumi.AzureNative;
    
    return await Deployment.RunAsync(() => 
    {
        var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
        {
            AgentPoolName = "wnp2",
            Count = 3,
            OrchestratorVersion = "1.23.3",
            OsSKU = "Windows2022",
            OsType = "Windows",
            ResourceGroupName = "rg1",
            ResourceName = "clustername1",
            VmSize = "Standard_D4s_v3",
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
    			AgentPoolName:       pulumi.String("wnp2"),
    			Count:               pulumi.Int(3),
    			OrchestratorVersion: pulumi.String("1.23.3"),
    			OsSKU:               pulumi.String("Windows2022"),
    			OsType:              pulumi.String("Windows"),
    			ResourceGroupName:   pulumi.String("rg1"),
    			ResourceName:        pulumi.String("clustername1"),
    			VmSize:              pulumi.String("Standard_D4s_v3"),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.azurenative.containerservice.AgentPool;
    import com.pulumi.azurenative.containerservice.AgentPoolArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()        
                .agentPoolName("wnp2")
                .count(3)
                .orchestratorVersion("1.23.3")
                .osSKU("Windows2022")
                .osType("Windows")
                .resourceGroupName("rg1")
                .resourceName("clustername1")
                .vmSize("Standard_D4s_v3")
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_azure_native as azure_native
    
    agent_pool = azure_native.containerservice.AgentPool("agentPool",
        agent_pool_name="wnp2",
        count=3,
        orchestrator_version="1.23.3",
        os_sku="Windows2022",
        os_type="Windows",
        resource_group_name="rg1",
        resource_name_="clustername1",
        vm_size="Standard_D4s_v3")
    
    import * as pulumi from "@pulumi/pulumi";
    import * as azure_native from "@pulumi/azure-native";
    
    const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
        agentPoolName: "wnp2",
        count: 3,
        orchestratorVersion: "1.23.3",
        osSKU: "Windows2022",
        osType: "Windows",
        resourceGroupName: "rg1",
        resourceName: "clustername1",
        vmSize: "Standard_D4s_v3",
    });
    
    resources:
      agentPool:
        type: azure-native:containerservice:AgentPool
        properties:
          agentPoolName: wnp2
          count: 3
          orchestratorVersion: 1.23.3
          osSKU: Windows2022
          osType: Windows
          resourceGroupName: rg1
          resourceName: clustername1
          vmSize: Standard_D4s_v3
    

    Create Spot Agent Pool

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using AzureNative = Pulumi.AzureNative;
    
    return await Deployment.RunAsync(() => 
    {
        var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
        {
            AgentPoolName = "agentpool1",
            Count = 3,
            NodeLabels = 
            {
                { "key1", "val1" },
            },
            NodeTaints = new[]
            {
                "Key1=Value1:NoSchedule",
            },
            OrchestratorVersion = "",
            OsType = "Linux",
            ResourceGroupName = "rg1",
            ResourceName = "clustername1",
            ScaleSetEvictionPolicy = "Delete",
            ScaleSetPriority = "Spot",
            Tags = 
            {
                { "name1", "val1" },
            },
            VmSize = "Standard_DS1_v2",
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
    			AgentPoolName: pulumi.String("agentpool1"),
    			Count:         pulumi.Int(3),
    			NodeLabels: pulumi.StringMap{
    				"key1": pulumi.String("val1"),
    			},
    			NodeTaints: pulumi.StringArray{
    				pulumi.String("Key1=Value1:NoSchedule"),
    			},
    			OrchestratorVersion:    pulumi.String(""),
    			OsType:                 pulumi.String("Linux"),
    			ResourceGroupName:      pulumi.String("rg1"),
    			ResourceName:           pulumi.String("clustername1"),
    			ScaleSetEvictionPolicy: pulumi.String("Delete"),
    			ScaleSetPriority:       pulumi.String("Spot"),
    			Tags: pulumi.StringMap{
    				"name1": pulumi.String("val1"),
    			},
    			VmSize: pulumi.String("Standard_DS1_v2"),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.azurenative.containerservice.AgentPool;
    import com.pulumi.azurenative.containerservice.AgentPoolArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()        
                .agentPoolName("agentpool1")
                .count(3)
                .nodeLabels(Map.of("key1", "val1"))
                .nodeTaints("Key1=Value1:NoSchedule")
                .orchestratorVersion("")
                .osType("Linux")
                .resourceGroupName("rg1")
                .resourceName("clustername1")
                .scaleSetEvictionPolicy("Delete")
                .scaleSetPriority("Spot")
                .tags(Map.of("name1", "val1"))
                .vmSize("Standard_DS1_v2")
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_azure_native as azure_native
    
    agent_pool = azure_native.containerservice.AgentPool("agentPool",
        agent_pool_name="agentpool1",
        count=3,
        node_labels={
            "key1": "val1",
        },
        node_taints=["Key1=Value1:NoSchedule"],
        orchestrator_version="",
        os_type="Linux",
        resource_group_name="rg1",
        resource_name_="clustername1",
        scale_set_eviction_policy="Delete",
        scale_set_priority="Spot",
        tags={
            "name1": "val1",
        },
        vm_size="Standard_DS1_v2")
    
    import * as pulumi from "@pulumi/pulumi";
    import * as azure_native from "@pulumi/azure-native";
    
    const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
        agentPoolName: "agentpool1",
        count: 3,
        nodeLabels: {
            key1: "val1",
        },
        nodeTaints: ["Key1=Value1:NoSchedule"],
        orchestratorVersion: "",
        osType: "Linux",
        resourceGroupName: "rg1",
        resourceName: "clustername1",
        scaleSetEvictionPolicy: "Delete",
        scaleSetPriority: "Spot",
        tags: {
            name1: "val1",
        },
        vmSize: "Standard_DS1_v2",
    });
    
    resources:
      agentPool:
        type: azure-native:containerservice:AgentPool
        properties:
          agentPoolName: agentpool1
          count: 3
          nodeLabels:
            key1: val1
          nodeTaints:
            - Key1=Value1:NoSchedule
          orchestratorVersion:
          osType: Linux
          resourceGroupName: rg1
          resourceName: clustername1
          scaleSetEvictionPolicy: Delete
          scaleSetPriority: Spot
          tags:
            name1: val1
          vmSize: Standard_DS1_v2
    

    Create/Update Agent Pool

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using AzureNative = Pulumi.AzureNative;
    
    return await Deployment.RunAsync(() => 
    {
        var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
        {
            AgentPoolName = "agentpool1",
            Count = 3,
            Mode = "User",
            NodeLabels = 
            {
                { "key1", "val1" },
            },
            NodeTaints = new[]
            {
                "Key1=Value1:NoSchedule",
            },
            OrchestratorVersion = "",
            OsType = "Linux",
            ResourceGroupName = "rg1",
            ResourceName = "clustername1",
            ScaleSetEvictionPolicy = "Delete",
            ScaleSetPriority = "Spot",
            Tags = 
            {
                { "name1", "val1" },
            },
            VmSize = "Standard_DS1_v2",
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
    			AgentPoolName: pulumi.String("agentpool1"),
    			Count:         pulumi.Int(3),
    			Mode:          pulumi.String("User"),
    			NodeLabels: pulumi.StringMap{
    				"key1": pulumi.String("val1"),
    			},
    			NodeTaints: pulumi.StringArray{
    				pulumi.String("Key1=Value1:NoSchedule"),
    			},
    			OrchestratorVersion:    pulumi.String(""),
    			OsType:                 pulumi.String("Linux"),
    			ResourceGroupName:      pulumi.String("rg1"),
    			ResourceName:           pulumi.String("clustername1"),
    			ScaleSetEvictionPolicy: pulumi.String("Delete"),
    			ScaleSetPriority:       pulumi.String("Spot"),
    			Tags: pulumi.StringMap{
    				"name1": pulumi.String("val1"),
    			},
    			VmSize: pulumi.String("Standard_DS1_v2"),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.azurenative.containerservice.AgentPool;
    import com.pulumi.azurenative.containerservice.AgentPoolArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()        
                .agentPoolName("agentpool1")
                .count(3)
                .mode("User")
                .nodeLabels(Map.of("key1", "val1"))
                .nodeTaints("Key1=Value1:NoSchedule")
                .orchestratorVersion("")
                .osType("Linux")
                .resourceGroupName("rg1")
                .resourceName("clustername1")
                .scaleSetEvictionPolicy("Delete")
                .scaleSetPriority("Spot")
                .tags(Map.of("name1", "val1"))
                .vmSize("Standard_DS1_v2")
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_azure_native as azure_native
    
    agent_pool = azure_native.containerservice.AgentPool("agentPool",
        agent_pool_name="agentpool1",
        count=3,
        mode="User",
        node_labels={
            "key1": "val1",
        },
        node_taints=["Key1=Value1:NoSchedule"],
        orchestrator_version="",
        os_type="Linux",
        resource_group_name="rg1",
        resource_name_="clustername1",
        scale_set_eviction_policy="Delete",
        scale_set_priority="Spot",
        tags={
            "name1": "val1",
        },
        vm_size="Standard_DS1_v2")
    
    import * as pulumi from "@pulumi/pulumi";
    import * as azure_native from "@pulumi/azure-native";
    
    const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
        agentPoolName: "agentpool1",
        count: 3,
        mode: "User",
        nodeLabels: {
            key1: "val1",
        },
        nodeTaints: ["Key1=Value1:NoSchedule"],
        orchestratorVersion: "",
        osType: "Linux",
        resourceGroupName: "rg1",
        resourceName: "clustername1",
        scaleSetEvictionPolicy: "Delete",
        scaleSetPriority: "Spot",
        tags: {
            name1: "val1",
        },
        vmSize: "Standard_DS1_v2",
    });
    
    resources:
      agentPool:
        type: azure-native:containerservice:AgentPool
        properties:
          agentPoolName: agentpool1
          count: 3
          mode: User
          nodeLabels:
            key1: val1
          nodeTaints:
            - Key1=Value1:NoSchedule
          orchestratorVersion:
          osType: Linux
          resourceGroupName: rg1
          resourceName: clustername1
          scaleSetEvictionPolicy: Delete
          scaleSetPriority: Spot
          tags:
            name1: val1
          vmSize: Standard_DS1_v2
    

    Start Agent Pool

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using AzureNative = Pulumi.AzureNative;
    
    return await Deployment.RunAsync(() => 
    {
        var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
        {
            AgentPoolName = "agentpool1",
            PowerState = new AzureNative.ContainerService.Inputs.PowerStateArgs
            {
                Code = "Running",
            },
            ResourceGroupName = "rg1",
            ResourceName = "clustername1",
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
    			AgentPoolName: pulumi.String("agentpool1"),
    			PowerState: &containerservice.PowerStateArgs{
    				Code: pulumi.String("Running"),
    			},
    			ResourceGroupName: pulumi.String("rg1"),
    			ResourceName:      pulumi.String("clustername1"),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.azurenative.containerservice.AgentPool;
    import com.pulumi.azurenative.containerservice.AgentPoolArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()        
                .agentPoolName("agentpool1")
                .powerState(Map.of("code", "Running"))
                .resourceGroupName("rg1")
                .resourceName("clustername1")
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_azure_native as azure_native
    
    agent_pool = azure_native.containerservice.AgentPool("agentPool",
        agent_pool_name="agentpool1",
        power_state=azure_native.containerservice.PowerStateArgs(
            code="Running",
        ),
        resource_group_name="rg1",
        resource_name_="clustername1")
    
    import * as pulumi from "@pulumi/pulumi";
    import * as azure_native from "@pulumi/azure-native";
    
    const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
        agentPoolName: "agentpool1",
        powerState: {
            code: "Running",
        },
        resourceGroupName: "rg1",
        resourceName: "clustername1",
    });
    
    resources:
      agentPool:
        type: azure-native:containerservice:AgentPool
        properties:
          agentPoolName: agentpool1
          powerState:
            code: Running
          resourceGroupName: rg1
          resourceName: clustername1
    

    Stop Agent Pool

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using AzureNative = Pulumi.AzureNative;
    
    return await Deployment.RunAsync(() => 
    {
        var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
        {
            AgentPoolName = "agentpool1",
            PowerState = new AzureNative.ContainerService.Inputs.PowerStateArgs
            {
                Code = "Stopped",
            },
            ResourceGroupName = "rg1",
            ResourceName = "clustername1",
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
    			AgentPoolName: pulumi.String("agentpool1"),
    			PowerState: &containerservice.PowerStateArgs{
    				Code: pulumi.String("Stopped"),
    			},
    			ResourceGroupName: pulumi.String("rg1"),
    			ResourceName:      pulumi.String("clustername1"),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.azurenative.containerservice.AgentPool;
    import com.pulumi.azurenative.containerservice.AgentPoolArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()        
                .agentPoolName("agentpool1")
                .powerState(Map.of("code", "Stopped"))
                .resourceGroupName("rg1")
                .resourceName("clustername1")
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_azure_native as azure_native
    
    agent_pool = azure_native.containerservice.AgentPool("agentPool",
        agent_pool_name="agentpool1",
        power_state=azure_native.containerservice.PowerStateArgs(
            code="Stopped",
        ),
        resource_group_name="rg1",
        resource_name_="clustername1")
    
    import * as pulumi from "@pulumi/pulumi";
    import * as azure_native from "@pulumi/azure-native";
    
    const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
        agentPoolName: "agentpool1",
        powerState: {
            code: "Stopped",
        },
        resourceGroupName: "rg1",
        resourceName: "clustername1",
    });
    
    resources:
      agentPool:
        type: azure-native:containerservice:AgentPool
        properties:
          agentPoolName: agentpool1
          powerState:
            code: Stopped
          resourceGroupName: rg1
          resourceName: clustername1
    

    Update Agent Pool

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using AzureNative = Pulumi.AzureNative;
    
    return await Deployment.RunAsync(() => 
    {
        var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
        {
            AgentPoolName = "agentpool1",
            Count = 3,
            EnableAutoScaling = true,
            MaxCount = 2,
            MinCount = 2,
            NodeTaints = new[]
            {
                "Key1=Value1:NoSchedule",
            },
            OrchestratorVersion = "",
            OsType = "Linux",
            ResourceGroupName = "rg1",
            ResourceName = "clustername1",
            ScaleSetEvictionPolicy = "Delete",
            ScaleSetPriority = "Spot",
            VmSize = "Standard_DS1_v2",
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
    			AgentPoolName:     pulumi.String("agentpool1"),
    			Count:             pulumi.Int(3),
    			EnableAutoScaling: pulumi.Bool(true),
    			MaxCount:          pulumi.Int(2),
    			MinCount:          pulumi.Int(2),
    			NodeTaints: pulumi.StringArray{
    				pulumi.String("Key1=Value1:NoSchedule"),
    			},
    			OrchestratorVersion:    pulumi.String(""),
    			OsType:                 pulumi.String("Linux"),
    			ResourceGroupName:      pulumi.String("rg1"),
    			ResourceName:           pulumi.String("clustername1"),
    			ScaleSetEvictionPolicy: pulumi.String("Delete"),
    			ScaleSetPriority:       pulumi.String("Spot"),
    			VmSize:                 pulumi.String("Standard_DS1_v2"),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.azurenative.containerservice.AgentPool;
    import com.pulumi.azurenative.containerservice.AgentPoolArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()        
                .agentPoolName("agentpool1")
                .count(3)
                .enableAutoScaling(true)
                .maxCount(2)
                .minCount(2)
                .nodeTaints("Key1=Value1:NoSchedule")
                .orchestratorVersion("")
                .osType("Linux")
                .resourceGroupName("rg1")
                .resourceName("clustername1")
                .scaleSetEvictionPolicy("Delete")
                .scaleSetPriority("Spot")
                .vmSize("Standard_DS1_v2")
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_azure_native as azure_native
    
    agent_pool = azure_native.containerservice.AgentPool("agentPool",
        agent_pool_name="agentpool1",
        count=3,
        enable_auto_scaling=True,
        max_count=2,
        min_count=2,
        node_taints=["Key1=Value1:NoSchedule"],
        orchestrator_version="",
        os_type="Linux",
        resource_group_name="rg1",
        resource_name_="clustername1",
        scale_set_eviction_policy="Delete",
        scale_set_priority="Spot",
        vm_size="Standard_DS1_v2")
    
    import * as pulumi from "@pulumi/pulumi";
    import * as azure_native from "@pulumi/azure-native";
    
    const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
        agentPoolName: "agentpool1",
        count: 3,
        enableAutoScaling: true,
        maxCount: 2,
        minCount: 2,
        nodeTaints: ["Key1=Value1:NoSchedule"],
        orchestratorVersion: "",
        osType: "Linux",
        resourceGroupName: "rg1",
        resourceName: "clustername1",
        scaleSetEvictionPolicy: "Delete",
        scaleSetPriority: "Spot",
        vmSize: "Standard_DS1_v2",
    });
    
    resources:
      agentPool:
        type: azure-native:containerservice:AgentPool
        properties:
          agentPoolName: agentpool1
          count: 3
          enableAutoScaling: true
          maxCount: 2
          minCount: 2
          nodeTaints:
            - Key1=Value1:NoSchedule
          orchestratorVersion:
          osType: Linux
          resourceGroupName: rg1
          resourceName: clustername1
          scaleSetEvictionPolicy: Delete
          scaleSetPriority: Spot
          vmSize: Standard_DS1_v2
    

    Create AgentPool Resource

    new AgentPool(name: string, args: AgentPoolArgs, opts?: CustomResourceOptions);
    @overload
    def AgentPool(resource_name: str,
                  opts: Optional[ResourceOptions] = None,
                  agent_pool_name: Optional[str] = None,
                  availability_zones: Optional[Sequence[str]] = None,
                  count: Optional[int] = None,
                  creation_data: Optional[CreationDataArgs] = None,
                  enable_auto_scaling: Optional[bool] = None,
                  enable_encryption_at_host: Optional[bool] = None,
                  enable_fips: Optional[bool] = None,
                  enable_node_public_ip: Optional[bool] = None,
                  enable_ultra_ssd: Optional[bool] = None,
                  gpu_instance_profile: Optional[Union[str, GPUInstanceProfile]] = None,
                  host_group_id: Optional[str] = None,
                  kubelet_config: Optional[KubeletConfigArgs] = None,
                  kubelet_disk_type: Optional[Union[str, KubeletDiskType]] = None,
                  linux_os_config: Optional[LinuxOSConfigArgs] = None,
                  max_count: Optional[int] = None,
                  max_pods: Optional[int] = None,
                  min_count: Optional[int] = None,
                  mode: Optional[Union[str, AgentPoolMode]] = None,
                  node_labels: Optional[Mapping[str, str]] = None,
                  node_public_ip_prefix_id: Optional[str] = None,
                  node_taints: Optional[Sequence[str]] = None,
                  orchestrator_version: Optional[str] = None,
                  os_disk_size_gb: Optional[int] = None,
                  os_disk_type: Optional[Union[str, OSDiskType]] = None,
                  os_sku: Optional[Union[str, OSSKU]] = None,
                  os_type: Optional[Union[str, OSType]] = None,
                  pod_subnet_id: Optional[str] = None,
                  power_state: Optional[PowerStateArgs] = None,
                  proximity_placement_group_id: Optional[str] = None,
                  resource_group_name: Optional[str] = None,
                  resource_name_: Optional[str] = None,
                  scale_down_mode: Optional[Union[str, ScaleDownMode]] = None,
                  scale_set_eviction_policy: Optional[Union[str, ScaleSetEvictionPolicy]] = None,
                  scale_set_priority: Optional[Union[str, ScaleSetPriority]] = None,
                  spot_max_price: Optional[float] = None,
                  tags: Optional[Mapping[str, str]] = None,
                  type: Optional[Union[str, AgentPoolType]] = None,
                  upgrade_settings: Optional[AgentPoolUpgradeSettingsArgs] = None,
                  vm_size: Optional[str] = None,
                  vnet_subnet_id: Optional[str] = None,
                  workload_runtime: Optional[Union[str, WorkloadRuntime]] = None)
    @overload
    def AgentPool(resource_name: str,
                  args: AgentPoolArgs,
                  opts: Optional[ResourceOptions] = None)
    func NewAgentPool(ctx *Context, name string, args AgentPoolArgs, opts ...ResourceOption) (*AgentPool, error)
    public AgentPool(string name, AgentPoolArgs args, CustomResourceOptions? opts = null)
    public AgentPool(String name, AgentPoolArgs args)
    public AgentPool(String name, AgentPoolArgs args, CustomResourceOptions options)
    
    type: azure-native:containerservice:AgentPool
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    
    name string
    The unique name of the resource.
    args AgentPoolArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args AgentPoolArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args AgentPoolArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args AgentPoolArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args AgentPoolArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    AgentPool Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The AgentPool resource accepts the following input properties:

    ResourceGroupName string

    The name of the resource group. The name is case insensitive.

    ResourceName string

    The name of the managed cluster resource.

    AgentPoolName string

    The name of the agent pool.

    AvailabilityZones List<string>

    The list of Availability zones to use for nodes. This can only be specified if the AgentPoolType property is 'VirtualMachineScaleSets'.

    Count int

    Number of agents (VMs) to host docker containers. Allowed values must be in the range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for system pools. The default value is 1.

    CreationData Pulumi.AzureNative.ContainerService.Inputs.CreationData

    CreationData to be used to specify the source Snapshot ID if the node pool will be created/upgraded using a snapshot.

    EnableAutoScaling bool

    Whether to enable auto-scaler

    EnableEncryptionAtHost bool

    This is only supported on certain VM sizes and in certain Azure regions. For more information, see: https://docs.microsoft.com/azure/aks/enable-host-encryption

    EnableFIPS bool

    See Add a FIPS-enabled node pool for more details.

    EnableNodePublicIP bool

    Some scenarios may require nodes in a node pool to receive their own dedicated public IP addresses. A common scenario is for gaming workloads, where a console needs to make a direct connection to a cloud virtual machine to minimize hops. For more information see assigning a public IP per node. The default is false.

    EnableUltraSSD bool

    Whether to enable UltraSSD

    GpuInstanceProfile string | Pulumi.AzureNative.ContainerService.GPUInstanceProfile

    GPUInstanceProfile to be used to specify GPU MIG instance profile for supported GPU VM SKU.

    HostGroupID string

    This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}. For more information see Azure dedicated hosts.

    KubeletConfig Pulumi.AzureNative.ContainerService.Inputs.KubeletConfig

    The Kubelet configuration on the agent pool nodes.

    KubeletDiskType string | Pulumi.AzureNative.ContainerService.KubeletDiskType

    Determines the placement of emptyDir volumes, container runtime data root, and Kubelet ephemeral storage.

    LinuxOSConfig Pulumi.AzureNative.ContainerService.Inputs.LinuxOSConfig

    The OS configuration of Linux agent nodes.

    MaxCount int

    The maximum number of nodes for auto-scaling

    MaxPods int

    The maximum number of pods that can run on a node.

    MinCount int

    The minimum number of nodes for auto-scaling

    Mode string | Pulumi.AzureNative.ContainerService.AgentPoolMode

    A cluster must have at least one 'System' Agent Pool at all times. For additional information on agent pool restrictions and best practices, see: https://docs.microsoft.com/azure/aks/use-system-pools

    NodeLabels Dictionary<string, string>

    The node labels to be persisted across all nodes in agent pool.

    NodePublicIPPrefixID string

    This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}

    NodeTaints List<string>

    The taints added to new nodes during node pool create and scale. For example, key=value:NoSchedule.

    OrchestratorVersion string

    Both patch version <major.minor.patch> (e.g. 1.20.13) and <major.minor> (e.g. 1.20) are supported. When <major.minor> is specified, the latest supported GA patch version is chosen automatically. Updating the cluster with the same <major.minor> once it has been created (e.g. 1.14.x -> 1.14) will not trigger an upgrade, even if a newer patch version is available. As a best practice, you should upgrade all node pools in an AKS cluster to the same Kubernetes version. The node pool version must have the same major version as the control plane. The node pool minor version must be within two minor versions of the control plane version. The node pool version cannot be greater than the control plane version. For more information see upgrading a node pool.

    OsDiskSizeGB int

    OS Disk Size in GB to be used to specify the disk size for every machine in the master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified.

    OsDiskType string | Pulumi.AzureNative.ContainerService.OSDiskType

    The default is 'Ephemeral' if the VM supports it and has a cache disk larger than the requested OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed after creation. For more information see Ephemeral OS.

    OsSKU string | Pulumi.AzureNative.ContainerService.OSSKU

    Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType is Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >= 1.25 if OSType is Windows.

    OsType string | Pulumi.AzureNative.ContainerService.OSType

    The operating system type. The default is Linux.

    PodSubnetID string

    If omitted, pod IPs are statically assigned on the node subnet (see vnetSubnetID for more details). This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}

    PowerState Pulumi.AzureNative.ContainerService.Inputs.PowerState

    When an Agent Pool is first created it is initially Running. The Agent Pool can be stopped by setting this field to Stopped. A stopped Agent Pool stops all of its VMs and does not accrue billing charges. An Agent Pool can only be stopped if it is Running and provisioning state is Succeeded

    ProximityPlacementGroupID string

    The ID for Proximity Placement Group.

    ScaleDownMode string | Pulumi.AzureNative.ContainerService.ScaleDownMode

    This also effects the cluster autoscaler behavior. If not specified, it defaults to Delete.

    ScaleSetEvictionPolicy string | Pulumi.AzureNative.ContainerService.ScaleSetEvictionPolicy

    This cannot be specified unless the scaleSetPriority is 'Spot'. If not specified, the default is 'Delete'.

    ScaleSetPriority string | Pulumi.AzureNative.ContainerService.ScaleSetPriority

    The Virtual Machine Scale Set priority. If not specified, the default is 'Regular'.

    SpotMaxPrice double

    Possible values are any decimal value greater than zero or -1 which indicates the willingness to pay any on-demand price. For more details on spot pricing, see spot VMs pricing

    Tags Dictionary<string, string>

    The tags to be persisted on the agent pool virtual machine scale set.

    Type string | Pulumi.AzureNative.ContainerService.AgentPoolType

    The type of Agent Pool.

    UpgradeSettings Pulumi.AzureNative.ContainerService.Inputs.AgentPoolUpgradeSettings

    Settings for upgrading the agentpool

    VmSize string

    VM size availability varies by region. If a node contains insufficient compute resources (memory, cpu, etc) pods might fail to run correctly. For more details on restricted VM sizes, see: https://docs.microsoft.com/azure/aks/quotas-skus-regions

    VnetSubnetID string

    If this is not specified, a VNET and subnet will be generated and used. If no podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just nodes. This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}

    WorkloadRuntime string | Pulumi.AzureNative.ContainerService.WorkloadRuntime

    Determines the type of workload a node can run.

    ResourceGroupName string

    The name of the resource group. The name is case insensitive.

    ResourceName string

    The name of the managed cluster resource.

    AgentPoolName string

    The name of the agent pool.

    AvailabilityZones []string

    The list of Availability zones to use for nodes. This can only be specified if the AgentPoolType property is 'VirtualMachineScaleSets'.

    Count int

    Number of agents (VMs) to host docker containers. Allowed values must be in the range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for system pools. The default value is 1.

    CreationData CreationDataArgs

    CreationData to be used to specify the source Snapshot ID if the node pool will be created/upgraded using a snapshot.

    EnableAutoScaling bool

    Whether to enable auto-scaler

    EnableEncryptionAtHost bool

    This is only supported on certain VM sizes and in certain Azure regions. For more information, see: https://docs.microsoft.com/azure/aks/enable-host-encryption

    EnableFIPS bool

    See Add a FIPS-enabled node pool for more details.

    EnableNodePublicIP bool

    Some scenarios may require nodes in a node pool to receive their own dedicated public IP addresses. A common scenario is for gaming workloads, where a console needs to make a direct connection to a cloud virtual machine to minimize hops. For more information see assigning a public IP per node. The default is false.

    EnableUltraSSD bool

    Whether to enable UltraSSD

    GpuInstanceProfile string | GPUInstanceProfile

    GPUInstanceProfile to be used to specify GPU MIG instance profile for supported GPU VM SKU.

    HostGroupID string

    This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}. For more information see Azure dedicated hosts.

    KubeletConfig KubeletConfigArgs

    The Kubelet configuration on the agent pool nodes.

    KubeletDiskType string | KubeletDiskType

    Determines the placement of emptyDir volumes, container runtime data root, and Kubelet ephemeral storage.

    LinuxOSConfig LinuxOSConfigArgs

    The OS configuration of Linux agent nodes.

    MaxCount int

    The maximum number of nodes for auto-scaling

    MaxPods int

    The maximum number of pods that can run on a node.

    MinCount int

    The minimum number of nodes for auto-scaling

    Mode string | AgentPoolMode

    A cluster must have at least one 'System' Agent Pool at all times. For additional information on agent pool restrictions and best practices, see: https://docs.microsoft.com/azure/aks/use-system-pools

    NodeLabels map[string]string

    The node labels to be persisted across all nodes in agent pool.

    NodePublicIPPrefixID string

    This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}

    NodeTaints []string

    The taints added to new nodes during node pool create and scale. For example, key=value:NoSchedule.

    OrchestratorVersion string

    Both patch version <major.minor.patch> (e.g. 1.20.13) and <major.minor> (e.g. 1.20) are supported. When <major.minor> is specified, the latest supported GA patch version is chosen automatically. Updating the cluster with the same <major.minor> once it has been created (e.g. 1.14.x -> 1.14) will not trigger an upgrade, even if a newer patch version is available. As a best practice, you should upgrade all node pools in an AKS cluster to the same Kubernetes version. The node pool version must have the same major version as the control plane. The node pool minor version must be within two minor versions of the control plane version. The node pool version cannot be greater than the control plane version. For more information see upgrading a node pool.

    OsDiskSizeGB int

    OS Disk Size in GB to be used to specify the disk size for every machine in the master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified.

    OsDiskType string | OSDiskType

    The default is 'Ephemeral' if the VM supports it and has a cache disk larger than the requested OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed after creation. For more information see Ephemeral OS.

    OsSKU string | OSSKU

    Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType is Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >= 1.25 if OSType is Windows.

    OsType string | OSType

    The operating system type. The default is Linux.

    PodSubnetID string

    If omitted, pod IPs are statically assigned on the node subnet (see vnetSubnetID for more details). This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}

    PowerState PowerStateArgs

    When an Agent Pool is first created it is initially Running. The Agent Pool can be stopped by setting this field to Stopped. A stopped Agent Pool stops all of its VMs and does not accrue billing charges. An Agent Pool can only be stopped if it is Running and provisioning state is Succeeded

    ProximityPlacementGroupID string

    The ID for Proximity Placement Group.

    ScaleDownMode string | ScaleDownMode

    This also effects the cluster autoscaler behavior. If not specified, it defaults to Delete.

    ScaleSetEvictionPolicy string | ScaleSetEvictionPolicy

    This cannot be specified unless the scaleSetPriority is 'Spot'. If not specified, the default is 'Delete'.

    ScaleSetPriority string | ScaleSetPriority

    The Virtual Machine Scale Set priority. If not specified, the default is 'Regular'.

    SpotMaxPrice float64

    Possible values are any decimal value greater than zero or -1 which indicates the willingness to pay any on-demand price. For more details on spot pricing, see spot VMs pricing

    Tags map[string]string

    The tags to be persisted on the agent pool virtual machine scale set.

    Type string | AgentPoolType

    The type of Agent Pool.

    UpgradeSettings AgentPoolUpgradeSettingsArgs

    Settings for upgrading the agentpool

    VmSize string

    VM size availability varies by region. If a node contains insufficient compute resources (memory, cpu, etc) pods might fail to run correctly. For more details on restricted VM sizes, see: https://docs.microsoft.com/azure/aks/quotas-skus-regions

    VnetSubnetID string

    If this is not specified, a VNET and subnet will be generated and used. If no podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just nodes. This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}

    WorkloadRuntime string | WorkloadRuntime

    Determines the type of workload a node can run.

    resourceGroupName String

    The name of the resource group. The name is case insensitive.

    resourceName String

    The name of the managed cluster resource.

    agentPoolName String

    The name of the agent pool.

    availabilityZones List<String>

    The list of Availability zones to use for nodes. This can only be specified if the AgentPoolType property is 'VirtualMachineScaleSets'.

    count Integer

    Number of agents (VMs) to host docker containers. Allowed values must be in the range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for system pools. The default value is 1.

    creationData CreationData

    CreationData to be used to specify the source Snapshot ID if the node pool will be created/upgraded using a snapshot.

    enableAutoScaling Boolean

    Whether to enable auto-scaler

    enableEncryptionAtHost Boolean

    This is only supported on certain VM sizes and in certain Azure regions. For more information, see: https://docs.microsoft.com/azure/aks/enable-host-encryption

    enableFIPS Boolean

    See Add a FIPS-enabled node pool for more details.

    enableNodePublicIP Boolean

    Some scenarios may require nodes in a node pool to receive their own dedicated public IP addresses. A common scenario is for gaming workloads, where a console needs to make a direct connection to a cloud virtual machine to minimize hops. For more information see assigning a public IP per node. The default is false.

    enableUltraSSD Boolean

    Whether to enable UltraSSD

    gpuInstanceProfile String | GPUInstanceProfile

    GPUInstanceProfile to be used to specify GPU MIG instance profile for supported GPU VM SKU.

    hostGroupID String

    This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}. For more information see Azure dedicated hosts.

    kubeletConfig KubeletConfig

    The Kubelet configuration on the agent pool nodes.

    kubeletDiskType String | KubeletDiskType

    Determines the placement of emptyDir volumes, container runtime data root, and Kubelet ephemeral storage.

    linuxOSConfig LinuxOSConfig

    The OS configuration of Linux agent nodes.

    maxCount Integer

    The maximum number of nodes for auto-scaling

    maxPods Integer

    The maximum number of pods that can run on a node.

    minCount Integer

    The minimum number of nodes for auto-scaling

    mode String | AgentPoolMode

    A cluster must have at least one 'System' Agent Pool at all times. For additional information on agent pool restrictions and best practices, see: https://docs.microsoft.com/azure/aks/use-system-pools

    nodeLabels Map<String,String>

    The node labels to be persisted across all nodes in agent pool.

    nodePublicIPPrefixID String

    This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}

    nodeTaints List<String>

    The taints added to new nodes during node pool create and scale. For example, key=value:NoSchedule.

    orchestratorVersion String

    Both patch version <major.minor.patch> (e.g. 1.20.13) and <major.minor> (e.g. 1.20) are supported. When <major.minor> is specified, the latest supported GA patch version is chosen automatically. Updating the cluster with the same <major.minor> once it has been created (e.g. 1.14.x -> 1.14) will not trigger an upgrade, even if a newer patch version is available. As a best practice, you should upgrade all node pools in an AKS cluster to the same Kubernetes version. The node pool version must have the same major version as the control plane. The node pool minor version must be within two minor versions of the control plane version. The node pool version cannot be greater than the control plane version. For more information see upgrading a node pool.

    osDiskSizeGB Integer

    OS Disk Size in GB to be used to specify the disk size for every machine in the master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified.

    osDiskType String | OSDiskType

    The default is 'Ephemeral' if the VM supports it and has a cache disk larger than the requested OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed after creation. For more information see Ephemeral OS.

    osSKU String | OSSKU

    Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType is Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >= 1.25 if OSType is Windows.

    osType String | OSType

    The operating system type. The default is Linux.

    podSubnetID String

    If omitted, pod IPs are statically assigned on the node subnet (see vnetSubnetID for more details). This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}

    powerState PowerState

    When an Agent Pool is first created it is initially Running. The Agent Pool can be stopped by setting this field to Stopped. A stopped Agent Pool stops all of its VMs and does not accrue billing charges. An Agent Pool can only be stopped if it is Running and provisioning state is Succeeded

    proximityPlacementGroupID String

    The ID for Proximity Placement Group.

    scaleDownMode String | ScaleDownMode

    This also effects the cluster autoscaler behavior. If not specified, it defaults to Delete.

    scaleSetEvictionPolicy String | ScaleSetEvictionPolicy

    This cannot be specified unless the scaleSetPriority is 'Spot'. If not specified, the default is 'Delete'.

    scaleSetPriority String | ScaleSetPriority

    The Virtual Machine Scale Set priority. If not specified, the default is 'Regular'.

    spotMaxPrice Double

    Possible values are any decimal value greater than zero or -1 which indicates the willingness to pay any on-demand price. For more details on spot pricing, see spot VMs pricing

    tags Map<String,String>

    The tags to be persisted on the agent pool virtual machine scale set.

    type String | AgentPoolType

    The type of Agent Pool.

    upgradeSettings AgentPoolUpgradeSettings

    Settings for upgrading the agentpool

    vmSize String

    VM size availability varies by region. If a node contains insufficient compute resources (memory, cpu, etc) pods might fail to run correctly. For more details on restricted VM sizes, see: https://docs.microsoft.com/azure/aks/quotas-skus-regions

    vnetSubnetID String

    If this is not specified, a VNET and subnet will be generated and used. If no podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just nodes. This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}

    workloadRuntime String | WorkloadRuntime

    Determines the type of workload a node can run.

    resourceGroupName string

    The name of the resource group. The name is case insensitive.

    resourceName string

    The name of the managed cluster resource.

    agentPoolName string

    The name of the agent pool.

    availabilityZones string[]

    The list of Availability zones to use for nodes. This can only be specified if the AgentPoolType property is 'VirtualMachineScaleSets'.

    count number

    Number of agents (VMs) to host docker containers. Allowed values must be in the range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for system pools. The default value is 1.

    creationData CreationData

    CreationData to be used to specify the source Snapshot ID if the node pool will be created/upgraded using a snapshot.

    enableAutoScaling boolean

    Whether to enable auto-scaler

    enableEncryptionAtHost boolean

    This is only supported on certain VM sizes and in certain Azure regions. For more information, see: https://docs.microsoft.com/azure/aks/enable-host-encryption

    enableFIPS boolean

    See Add a FIPS-enabled node pool for more details.

    enableNodePublicIP boolean

    Some scenarios may require nodes in a node pool to receive their own dedicated public IP addresses. A common scenario is for gaming workloads, where a console needs to make a direct connection to a cloud virtual machine to minimize hops. For more information see assigning a public IP per node. The default is false.

    enableUltraSSD boolean

    Whether to enable UltraSSD

    gpuInstanceProfile string | GPUInstanceProfile

    GPUInstanceProfile to be used to specify GPU MIG instance profile for supported GPU VM SKU.

    hostGroupID string

    This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}. For more information see Azure dedicated hosts.

    kubeletConfig KubeletConfig

    The Kubelet configuration on the agent pool nodes.

    kubeletDiskType string | KubeletDiskType

    Determines the placement of emptyDir volumes, container runtime data root, and Kubelet ephemeral storage.

    linuxOSConfig LinuxOSConfig

    The OS configuration of Linux agent nodes.

    maxCount number

    The maximum number of nodes for auto-scaling

    maxPods number

    The maximum number of pods that can run on a node.

    minCount number

    The minimum number of nodes for auto-scaling

    mode string | AgentPoolMode

    A cluster must have at least one 'System' Agent Pool at all times. For additional information on agent pool restrictions and best practices, see: https://docs.microsoft.com/azure/aks/use-system-pools

    nodeLabels {[key: string]: string}

    The node labels to be persisted across all nodes in agent pool.

    nodePublicIPPrefixID string

    This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}

    nodeTaints string[]

    The taints added to new nodes during node pool create and scale. For example, key=value:NoSchedule.

    orchestratorVersion string

    Both patch version <major.minor.patch> (e.g. 1.20.13) and <major.minor> (e.g. 1.20) are supported. When <major.minor> is specified, the latest supported GA patch version is chosen automatically. Updating the cluster with the same <major.minor> once it has been created (e.g. 1.14.x -> 1.14) will not trigger an upgrade, even if a newer patch version is available. As a best practice, you should upgrade all node pools in an AKS cluster to the same Kubernetes version. The node pool version must have the same major version as the control plane. The node pool minor version must be within two minor versions of the control plane version. The node pool version cannot be greater than the control plane version. For more information see upgrading a node pool.

    osDiskSizeGB number

    OS Disk Size in GB to be used to specify the disk size for every machine in the master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified.

    osDiskType string | OSDiskType

    The default is 'Ephemeral' if the VM supports it and has a cache disk larger than the requested OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed after creation. For more information see Ephemeral OS.

    osSKU string | OSSKU

    Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType is Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >= 1.25 if OSType is Windows.

    osType string | OSType

    The operating system type. The default is Linux.

    podSubnetID string

    If omitted, pod IPs are statically assigned on the node subnet (see vnetSubnetID for more details). This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}

    powerState PowerState

    When an Agent Pool is first created it is initially Running. The Agent Pool can be stopped by setting this field to Stopped. A stopped Agent Pool stops all of its VMs and does not accrue billing charges. An Agent Pool can only be stopped if it is Running and provisioning state is Succeeded

    proximityPlacementGroupID string

    The ID for Proximity Placement Group.

    scaleDownMode string | ScaleDownMode

    This also effects the cluster autoscaler behavior. If not specified, it defaults to Delete.

    scaleSetEvictionPolicy string | ScaleSetEvictionPolicy

    This cannot be specified unless the scaleSetPriority is 'Spot'. If not specified, the default is 'Delete'.

    scaleSetPriority string | ScaleSetPriority

    The Virtual Machine Scale Set priority. If not specified, the default is 'Regular'.

    spotMaxPrice number

    Possible values are any decimal value greater than zero or -1 which indicates the willingness to pay any on-demand price. For more details on spot pricing, see spot VMs pricing

    tags {[key: string]: string}

    The tags to be persisted on the agent pool virtual machine scale set.

    type string | AgentPoolType

    The type of Agent Pool.

    upgradeSettings AgentPoolUpgradeSettings

    Settings for upgrading the agentpool

    vmSize string

    VM size availability varies by region. If a node contains insufficient compute resources (memory, cpu, etc) pods might fail to run correctly. For more details on restricted VM sizes, see: https://docs.microsoft.com/azure/aks/quotas-skus-regions

    vnetSubnetID string

    If this is not specified, a VNET and subnet will be generated and used. If no podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just nodes. This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}

    workloadRuntime string | WorkloadRuntime

    Determines the type of workload a node can run.

    resource_group_name str

    The name of the resource group. The name is case insensitive.

    resource_name str

    The name of the managed cluster resource.

    agent_pool_name str

    The name of the agent pool.

    availability_zones Sequence[str]

    The list of Availability zones to use for nodes. This can only be specified if the AgentPoolType property is 'VirtualMachineScaleSets'.

    count int

    Number of agents (VMs) to host docker containers. Allowed values must be in the range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for system pools. The default value is 1.

    creation_data CreationDataArgs

    CreationData to be used to specify the source Snapshot ID if the node pool will be created/upgraded using a snapshot.

    enable_auto_scaling bool

    Whether to enable auto-scaler

    enable_encryption_at_host bool

    This is only supported on certain VM sizes and in certain Azure regions. For more information, see: https://docs.microsoft.com/azure/aks/enable-host-encryption

    enable_fips bool

    See Add a FIPS-enabled node pool for more details.

    enable_node_public_ip bool

    Some scenarios may require nodes in a node pool to receive their own dedicated public IP addresses. A common scenario is for gaming workloads, where a console needs to make a direct connection to a cloud virtual machine to minimize hops. For more information see assigning a public IP per node. The default is false.

    enable_ultra_ssd bool

    Whether to enable UltraSSD

    gpu_instance_profile str | GPUInstanceProfile

    GPUInstanceProfile to be used to specify GPU MIG instance profile for supported GPU VM SKU.

    host_group_id str

    This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}. For more information see Azure dedicated hosts.

    kubelet_config KubeletConfigArgs

    The Kubelet configuration on the agent pool nodes.

    kubelet_disk_type str | KubeletDiskType

    Determines the placement of emptyDir volumes, container runtime data root, and Kubelet ephemeral storage.

    linux_os_config LinuxOSConfigArgs

    The OS configuration of Linux agent nodes.

    max_count int

    The maximum number of nodes for auto-scaling

    max_pods int

    The maximum number of pods that can run on a node.

    min_count int

    The minimum number of nodes for auto-scaling

    mode str | AgentPoolMode

    A cluster must have at least one 'System' Agent Pool at all times. For additional information on agent pool restrictions and best practices, see: https://docs.microsoft.com/azure/aks/use-system-pools

    node_labels Mapping[str, str]

    The node labels to be persisted across all nodes in agent pool.

    node_public_ip_prefix_id str

    This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}

    node_taints Sequence[str]

    The taints added to new nodes during node pool create and scale. For example, key=value:NoSchedule.

    orchestrator_version str

    Both patch version <major.minor.patch> (e.g. 1.20.13) and <major.minor> (e.g. 1.20) are supported. When <major.minor> is specified, the latest supported GA patch version is chosen automatically. Updating the cluster with the same <major.minor> once it has been created (e.g. 1.14.x -> 1.14) will not trigger an upgrade, even if a newer patch version is available. As a best practice, you should upgrade all node pools in an AKS cluster to the same Kubernetes version. The node pool version must have the same major version as the control plane. The node pool minor version must be within two minor versions of the control plane version. The node pool version cannot be greater than the control plane version. For more information see upgrading a node pool.

    os_disk_size_gb int

    OS Disk Size in GB to be used to specify the disk size for every machine in the master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified.

    os_disk_type str | OSDiskType

    The default is 'Ephemeral' if the VM supports it and has a cache disk larger than the requested OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed after creation. For more information see Ephemeral OS.

    os_sku str | OSSKU

    Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType is Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >= 1.25 if OSType is Windows.

    os_type str | OSType

    The operating system type. The default is Linux.

    pod_subnet_id str

    If omitted, pod IPs are statically assigned on the node subnet (see vnetSubnetID for more details). This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}

    power_state PowerStateArgs

    When an Agent Pool is first created it is initially Running. The Agent Pool can be stopped by setting this field to Stopped. A stopped Agent Pool stops all of its VMs and does not accrue billing charges. An Agent Pool can only be stopped if it is Running and provisioning state is Succeeded

    proximity_placement_group_id str

    The ID for Proximity Placement Group.

    scale_down_mode str | ScaleDownMode

    This also effects the cluster autoscaler behavior. If not specified, it defaults to Delete.

    scale_set_eviction_policy str | ScaleSetEvictionPolicy

    This cannot be specified unless the scaleSetPriority is 'Spot'. If not specified, the default is 'Delete'.

    scale_set_priority str | ScaleSetPriority

    The Virtual Machine Scale Set priority. If not specified, the default is 'Regular'.

    spot_max_price float

    Possible values are any decimal value greater than zero or -1 which indicates the willingness to pay any on-demand price. For more details on spot pricing, see spot VMs pricing

    tags Mapping[str, str]

    The tags to be persisted on the agent pool virtual machine scale set.

    type str | AgentPoolType

    The type of Agent Pool.

    upgrade_settings AgentPoolUpgradeSettingsArgs

    Settings for upgrading the agentpool

    vm_size str

    VM size availability varies by region. If a node contains insufficient compute resources (memory, cpu, etc) pods might fail to run correctly. For more details on restricted VM sizes, see: https://docs.microsoft.com/azure/aks/quotas-skus-regions

    vnet_subnet_id str

    If this is not specified, a VNET and subnet will be generated and used. If no podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just nodes. This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}

    workload_runtime str | WorkloadRuntime

    Determines the type of workload a node can run.

    resourceGroupName String

    The name of the resource group. The name is case insensitive.

    resourceName String

    The name of the managed cluster resource.

    agentPoolName String

    The name of the agent pool.

    availabilityZones List<String>

    The list of Availability zones to use for nodes. This can only be specified if the AgentPoolType property is 'VirtualMachineScaleSets'.

    count Number

    Number of agents (VMs) to host docker containers. Allowed values must be in the range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for system pools. The default value is 1.

    creationData Property Map

    CreationData to be used to specify the source Snapshot ID if the node pool will be created/upgraded using a snapshot.

    enableAutoScaling Boolean

    Whether to enable auto-scaler

    enableEncryptionAtHost Boolean

    This is only supported on certain VM sizes and in certain Azure regions. For more information, see: https://docs.microsoft.com/azure/aks/enable-host-encryption

    enableFIPS Boolean

    See Add a FIPS-enabled node pool for more details.

    enableNodePublicIP Boolean

    Some scenarios may require nodes in a node pool to receive their own dedicated public IP addresses. A common scenario is for gaming workloads, where a console needs to make a direct connection to a cloud virtual machine to minimize hops. For more information see assigning a public IP per node. The default is false.

    enableUltraSSD Boolean

    Whether to enable UltraSSD

    gpuInstanceProfile String | "MIG1g" | "MIG2g" | "MIG3g" | "MIG4g" | "MIG7g"

    GPUInstanceProfile to be used to specify GPU MIG instance profile for supported GPU VM SKU.

    hostGroupID String

    This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}. For more information see Azure dedicated hosts.

    kubeletConfig Property Map

    The Kubelet configuration on the agent pool nodes.

    kubeletDiskType String | "OS" | "Temporary"

    Determines the placement of emptyDir volumes, container runtime data root, and Kubelet ephemeral storage.

    linuxOSConfig Property Map

    The OS configuration of Linux agent nodes.

    maxCount Number

    The maximum number of nodes for auto-scaling

    maxPods Number

    The maximum number of pods that can run on a node.

    minCount Number

    The minimum number of nodes for auto-scaling

    mode String | "System" | "User"

    A cluster must have at least one 'System' Agent Pool at all times. For additional information on agent pool restrictions and best practices, see: https://docs.microsoft.com/azure/aks/use-system-pools

    nodeLabels Map<String>

    The node labels to be persisted across all nodes in agent pool.

    nodePublicIPPrefixID String

    This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}

    nodeTaints List<String>

    The taints added to new nodes during node pool create and scale. For example, key=value:NoSchedule.

    orchestratorVersion String

    Both patch version <major.minor.patch> (e.g. 1.20.13) and <major.minor> (e.g. 1.20) are supported. When <major.minor> is specified, the latest supported GA patch version is chosen automatically. Updating the cluster with the same <major.minor> once it has been created (e.g. 1.14.x -> 1.14) will not trigger an upgrade, even if a newer patch version is available. As a best practice, you should upgrade all node pools in an AKS cluster to the same Kubernetes version. The node pool version must have the same major version as the control plane. The node pool minor version must be within two minor versions of the control plane version. The node pool version cannot be greater than the control plane version. For more information see upgrading a node pool.

    osDiskSizeGB Number

    OS Disk Size in GB to be used to specify the disk size for every machine in the master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified.

    osDiskType String | "Managed" | "Ephemeral"

    The default is 'Ephemeral' if the VM supports it and has a cache disk larger than the requested OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed after creation. For more information see Ephemeral OS.

    osSKU String | "Ubuntu" | "AzureLinux" | "CBLMariner" | "Windows2019" | "Windows2022"

    Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType is Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >= 1.25 if OSType is Windows.

    osType String | "Linux" | "Windows"

    The operating system type. The default is Linux.

    podSubnetID String

    If omitted, pod IPs are statically assigned on the node subnet (see vnetSubnetID for more details). This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}

    powerState Property Map

    When an Agent Pool is first created it is initially Running. The Agent Pool can be stopped by setting this field to Stopped. A stopped Agent Pool stops all of its VMs and does not accrue billing charges. An Agent Pool can only be stopped if it is Running and provisioning state is Succeeded

    proximityPlacementGroupID String

    The ID for Proximity Placement Group.

    scaleDownMode String | "Delete" | "Deallocate"

    This also effects the cluster autoscaler behavior. If not specified, it defaults to Delete.

    scaleSetEvictionPolicy String | "Delete" | "Deallocate"

    This cannot be specified unless the scaleSetPriority is 'Spot'. If not specified, the default is 'Delete'.

    scaleSetPriority String | "Spot" | "Regular"

    The Virtual Machine Scale Set priority. If not specified, the default is 'Regular'.

    spotMaxPrice Number

    Possible values are any decimal value greater than zero or -1 which indicates the willingness to pay any on-demand price. For more details on spot pricing, see spot VMs pricing

    tags Map<String>

    The tags to be persisted on the agent pool virtual machine scale set.

    type String | "VirtualMachineScaleSets" | "AvailabilitySet"

    The type of Agent Pool.

    upgradeSettings Property Map

    Settings for upgrading the agentpool

    vmSize String

    VM size availability varies by region. If a node contains insufficient compute resources (memory, cpu, etc) pods might fail to run correctly. For more details on restricted VM sizes, see: https://docs.microsoft.com/azure/aks/quotas-skus-regions

    vnetSubnetID String

    If this is not specified, a VNET and subnet will be generated and used. If no podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just nodes. This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}

    workloadRuntime String | "OCIContainer" | "WasmWasi"

    Determines the type of workload a node can run.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the AgentPool resource produces the following output properties:

    CurrentOrchestratorVersion string

    If orchestratorVersion is a fully specified version <major.minor.patch>, this field will be exactly equal to it. If orchestratorVersion is <major.minor>, this field will contain the full <major.minor.patch> version being used.

    Id string

    The provider-assigned unique ID for this managed resource.

    Name string

    The name of the resource that is unique within a resource group. This name can be used to access the resource.

    NodeImageVersion string

    The version of node image

    ProvisioningState string

    The current deployment or provisioning state.

    CurrentOrchestratorVersion string

    If orchestratorVersion is a fully specified version <major.minor.patch>, this field will be exactly equal to it. If orchestratorVersion is <major.minor>, this field will contain the full <major.minor.patch> version being used.

    Id string

    The provider-assigned unique ID for this managed resource.

    Name string

    The name of the resource that is unique within a resource group. This name can be used to access the resource.

    NodeImageVersion string

    The version of node image

    ProvisioningState string

    The current deployment or provisioning state.

    currentOrchestratorVersion String

    If orchestratorVersion is a fully specified version <major.minor.patch>, this field will be exactly equal to it. If orchestratorVersion is <major.minor>, this field will contain the full <major.minor.patch> version being used.

    id String

    The provider-assigned unique ID for this managed resource.

    name String

    The name of the resource that is unique within a resource group. This name can be used to access the resource.

    nodeImageVersion String

    The version of node image

    provisioningState String

    The current deployment or provisioning state.

    currentOrchestratorVersion string

    If orchestratorVersion is a fully specified version <major.minor.patch>, this field will be exactly equal to it. If orchestratorVersion is <major.minor>, this field will contain the full <major.minor.patch> version being used.

    id string

    The provider-assigned unique ID for this managed resource.

    name string

    The name of the resource that is unique within a resource group. This name can be used to access the resource.

    nodeImageVersion string

    The version of node image

    provisioningState string

    The current deployment or provisioning state.

    current_orchestrator_version str

    If orchestratorVersion is a fully specified version <major.minor.patch>, this field will be exactly equal to it. If orchestratorVersion is <major.minor>, this field will contain the full <major.minor.patch> version being used.

    id str

    The provider-assigned unique ID for this managed resource.

    name str

    The name of the resource that is unique within a resource group. This name can be used to access the resource.

    node_image_version str

    The version of node image

    provisioning_state str

    The current deployment or provisioning state.

    currentOrchestratorVersion String

    If orchestratorVersion is a fully specified version <major.minor.patch>, this field will be exactly equal to it. If orchestratorVersion is <major.minor>, this field will contain the full <major.minor.patch> version being used.

    id String

    The provider-assigned unique ID for this managed resource.

    name String

    The name of the resource that is unique within a resource group. This name can be used to access the resource.

    nodeImageVersion String

    The version of node image

    provisioningState String

    The current deployment or provisioning state.

    Supporting Types

    AgentPoolMode, AgentPoolModeArgs

    System
    System

    System agent pools are primarily for hosting critical system pods such as CoreDNS and metrics-server. System agent pools osType must be Linux. System agent pools VM SKU must have at least 2vCPUs and 4GB of memory.

    User
    User

    User agent pools are primarily for hosting your application pods.

    AgentPoolModeSystem
    System

    System agent pools are primarily for hosting critical system pods such as CoreDNS and metrics-server. System agent pools osType must be Linux. System agent pools VM SKU must have at least 2vCPUs and 4GB of memory.

    AgentPoolModeUser
    User

    User agent pools are primarily for hosting your application pods.

    System
    System

    System agent pools are primarily for hosting critical system pods such as CoreDNS and metrics-server. System agent pools osType must be Linux. System agent pools VM SKU must have at least 2vCPUs and 4GB of memory.

    User
    User

    User agent pools are primarily for hosting your application pods.

    System
    System

    System agent pools are primarily for hosting critical system pods such as CoreDNS and metrics-server. System agent pools osType must be Linux. System agent pools VM SKU must have at least 2vCPUs and 4GB of memory.

    User
    User

    User agent pools are primarily for hosting your application pods.

    SYSTEM
    System

    System agent pools are primarily for hosting critical system pods such as CoreDNS and metrics-server. System agent pools osType must be Linux. System agent pools VM SKU must have at least 2vCPUs and 4GB of memory.

    USER
    User

    User agent pools are primarily for hosting your application pods.

    "System"
    System

    System agent pools are primarily for hosting critical system pods such as CoreDNS and metrics-server. System agent pools osType must be Linux. System agent pools VM SKU must have at least 2vCPUs and 4GB of memory.

    "User"
    User

    User agent pools are primarily for hosting your application pods.

    AgentPoolType, AgentPoolTypeArgs

    VirtualMachineScaleSets
    VirtualMachineScaleSets

    Create an Agent Pool backed by a Virtual Machine Scale Set.

    AvailabilitySet
    AvailabilitySet

    Use of this is strongly discouraged.

    AgentPoolTypeVirtualMachineScaleSets
    VirtualMachineScaleSets

    Create an Agent Pool backed by a Virtual Machine Scale Set.

    AgentPoolTypeAvailabilitySet
    AvailabilitySet

    Use of this is strongly discouraged.

    VirtualMachineScaleSets
    VirtualMachineScaleSets

    Create an Agent Pool backed by a Virtual Machine Scale Set.

    AvailabilitySet
    AvailabilitySet

    Use of this is strongly discouraged.

    VirtualMachineScaleSets
    VirtualMachineScaleSets

    Create an Agent Pool backed by a Virtual Machine Scale Set.

    AvailabilitySet
    AvailabilitySet

    Use of this is strongly discouraged.

    VIRTUAL_MACHINE_SCALE_SETS
    VirtualMachineScaleSets

    Create an Agent Pool backed by a Virtual Machine Scale Set.

    AVAILABILITY_SET
    AvailabilitySet

    Use of this is strongly discouraged.

    "VirtualMachineScaleSets"
    VirtualMachineScaleSets

    Create an Agent Pool backed by a Virtual Machine Scale Set.

    "AvailabilitySet"
    AvailabilitySet

    Use of this is strongly discouraged.

    AgentPoolUpgradeSettings, AgentPoolUpgradeSettingsArgs

    MaxSurge string

    This can either be set to an integer (e.g. '5') or a percentage (e.g. '50%'). If a percentage is specified, it is the percentage of the total agent pool size at the time of the upgrade. For percentages, fractional nodes are rounded up. If not specified, the default is 1. For more information, including best practices, see: https://docs.microsoft.com/azure/aks/upgrade-cluster#customize-node-surge-upgrade

    MaxSurge string

    This can either be set to an integer (e.g. '5') or a percentage (e.g. '50%'). If a percentage is specified, it is the percentage of the total agent pool size at the time of the upgrade. For percentages, fractional nodes are rounded up. If not specified, the default is 1. For more information, including best practices, see: https://docs.microsoft.com/azure/aks/upgrade-cluster#customize-node-surge-upgrade

    maxSurge String

    This can either be set to an integer (e.g. '5') or a percentage (e.g. '50%'). If a percentage is specified, it is the percentage of the total agent pool size at the time of the upgrade. For percentages, fractional nodes are rounded up. If not specified, the default is 1. For more information, including best practices, see: https://docs.microsoft.com/azure/aks/upgrade-cluster#customize-node-surge-upgrade

    maxSurge string

    This can either be set to an integer (e.g. '5') or a percentage (e.g. '50%'). If a percentage is specified, it is the percentage of the total agent pool size at the time of the upgrade. For percentages, fractional nodes are rounded up. If not specified, the default is 1. For more information, including best practices, see: https://docs.microsoft.com/azure/aks/upgrade-cluster#customize-node-surge-upgrade

    max_surge str

    This can either be set to an integer (e.g. '5') or a percentage (e.g. '50%'). If a percentage is specified, it is the percentage of the total agent pool size at the time of the upgrade. For percentages, fractional nodes are rounded up. If not specified, the default is 1. For more information, including best practices, see: https://docs.microsoft.com/azure/aks/upgrade-cluster#customize-node-surge-upgrade

    maxSurge String

    This can either be set to an integer (e.g. '5') or a percentage (e.g. '50%'). If a percentage is specified, it is the percentage of the total agent pool size at the time of the upgrade. For percentages, fractional nodes are rounded up. If not specified, the default is 1. For more information, including best practices, see: https://docs.microsoft.com/azure/aks/upgrade-cluster#customize-node-surge-upgrade

    AgentPoolUpgradeSettingsResponse, AgentPoolUpgradeSettingsResponseArgs

    MaxSurge string

    This can either be set to an integer (e.g. '5') or a percentage (e.g. '50%'). If a percentage is specified, it is the percentage of the total agent pool size at the time of the upgrade. For percentages, fractional nodes are rounded up. If not specified, the default is 1. For more information, including best practices, see: https://docs.microsoft.com/azure/aks/upgrade-cluster#customize-node-surge-upgrade

    MaxSurge string

    This can either be set to an integer (e.g. '5') or a percentage (e.g. '50%'). If a percentage is specified, it is the percentage of the total agent pool size at the time of the upgrade. For percentages, fractional nodes are rounded up. If not specified, the default is 1. For more information, including best practices, see: https://docs.microsoft.com/azure/aks/upgrade-cluster#customize-node-surge-upgrade

    maxSurge String

    This can either be set to an integer (e.g. '5') or a percentage (e.g. '50%'). If a percentage is specified, it is the percentage of the total agent pool size at the time of the upgrade. For percentages, fractional nodes are rounded up. If not specified, the default is 1. For more information, including best practices, see: https://docs.microsoft.com/azure/aks/upgrade-cluster#customize-node-surge-upgrade

    maxSurge string

    This can either be set to an integer (e.g. '5') or a percentage (e.g. '50%'). If a percentage is specified, it is the percentage of the total agent pool size at the time of the upgrade. For percentages, fractional nodes are rounded up. If not specified, the default is 1. For more information, including best practices, see: https://docs.microsoft.com/azure/aks/upgrade-cluster#customize-node-surge-upgrade

    max_surge str

    This can either be set to an integer (e.g. '5') or a percentage (e.g. '50%'). If a percentage is specified, it is the percentage of the total agent pool size at the time of the upgrade. For percentages, fractional nodes are rounded up. If not specified, the default is 1. For more information, including best practices, see: https://docs.microsoft.com/azure/aks/upgrade-cluster#customize-node-surge-upgrade

    maxSurge String

    This can either be set to an integer (e.g. '5') or a percentage (e.g. '50%'). If a percentage is specified, it is the percentage of the total agent pool size at the time of the upgrade. For percentages, fractional nodes are rounded up. If not specified, the default is 1. For more information, including best practices, see: https://docs.microsoft.com/azure/aks/upgrade-cluster#customize-node-surge-upgrade

    Code, CodeArgs

    Running
    Running

    The cluster is running.

    Stopped
    Stopped

    The cluster is stopped.

    CodeRunning
    Running

    The cluster is running.

    CodeStopped
    Stopped

    The cluster is stopped.

    Running
    Running

    The cluster is running.

    Stopped
    Stopped

    The cluster is stopped.

    Running
    Running

    The cluster is running.

    Stopped
    Stopped

    The cluster is stopped.

    RUNNING
    Running

    The cluster is running.

    STOPPED
    Stopped

    The cluster is stopped.

    "Running"
    Running

    The cluster is running.

    "Stopped"
    Stopped

    The cluster is stopped.

    CreationData, CreationDataArgs

    SourceResourceId string

    This is the ARM ID of the source object to be used to create the target object.

    SourceResourceId string

    This is the ARM ID of the source object to be used to create the target object.

    sourceResourceId String

    This is the ARM ID of the source object to be used to create the target object.

    sourceResourceId string

    This is the ARM ID of the source object to be used to create the target object.

    source_resource_id str

    This is the ARM ID of the source object to be used to create the target object.

    sourceResourceId String

    This is the ARM ID of the source object to be used to create the target object.

    CreationDataResponse, CreationDataResponseArgs

    SourceResourceId string

    This is the ARM ID of the source object to be used to create the target object.

    SourceResourceId string

    This is the ARM ID of the source object to be used to create the target object.

    sourceResourceId String

    This is the ARM ID of the source object to be used to create the target object.

    sourceResourceId string

    This is the ARM ID of the source object to be used to create the target object.

    source_resource_id str

    This is the ARM ID of the source object to be used to create the target object.

    sourceResourceId String

    This is the ARM ID of the source object to be used to create the target object.

    GPUInstanceProfile, GPUInstanceProfileArgs

    MIG1g
    MIG1g
    MIG2g
    MIG2g
    MIG3g
    MIG3g
    MIG4g
    MIG4g
    MIG7g
    MIG7g
    GPUInstanceProfileMIG1g
    MIG1g
    GPUInstanceProfileMIG2g
    MIG2g
    GPUInstanceProfileMIG3g
    MIG3g
    GPUInstanceProfileMIG4g
    MIG4g
    GPUInstanceProfileMIG7g
    MIG7g
    MIG1g
    MIG1g
    MIG2g
    MIG2g
    MIG3g
    MIG3g
    MIG4g
    MIG4g
    MIG7g
    MIG7g
    MIG1g
    MIG1g
    MIG2g
    MIG2g
    MIG3g
    MIG3g
    MIG4g
    MIG4g
    MIG7g
    MIG7g
    MIG1G
    MIG1g
    MIG2G
    MIG2g
    MIG3G
    MIG3g
    MIG4G
    MIG4g
    MIG7G
    MIG7g
    "MIG1g"
    MIG1g
    "MIG2g"
    MIG2g
    "MIG3g"
    MIG3g
    "MIG4g"
    MIG4g
    "MIG7g"
    MIG7g

    KubeletConfig, KubeletConfigArgs

    AllowedUnsafeSysctls List<string>

    Allowed list of unsafe sysctls or unsafe sysctl patterns (ending in *).

    ContainerLogMaxFiles int

    The maximum number of container log files that can be present for a container. The number must be ≥ 2.

    ContainerLogMaxSizeMB int

    The maximum size (e.g. 10Mi) of container log file before it is rotated.

    CpuCfsQuota bool

    The default is true.

    CpuCfsQuotaPeriod string

    The default is '100ms.' Valid values are a sequence of decimal numbers with an optional fraction and a unit suffix. For example: '300ms', '2h45m'. Supported units are 'ns', 'us', 'ms', 's', 'm', and 'h'.

    CpuManagerPolicy string

    The default is 'none'. See Kubernetes CPU management policies for more information. Allowed values are 'none' and 'static'.

    FailSwapOn bool

    If set to true it will make the Kubelet fail to start if swap is enabled on the node.

    ImageGcHighThreshold int

    To disable image garbage collection, set to 100. The default is 85%

    ImageGcLowThreshold int

    This cannot be set higher than imageGcHighThreshold. The default is 80%

    PodMaxPids int

    The maximum number of processes per pod.

    TopologyManagerPolicy string

    For more information see Kubernetes Topology Manager. The default is 'none'. Allowed values are 'none', 'best-effort', 'restricted', and 'single-numa-node'.

    AllowedUnsafeSysctls []string

    Allowed list of unsafe sysctls or unsafe sysctl patterns (ending in *).

    ContainerLogMaxFiles int

    The maximum number of container log files that can be present for a container. The number must be ≥ 2.

    ContainerLogMaxSizeMB int

    The maximum size (e.g. 10Mi) of container log file before it is rotated.

    CpuCfsQuota bool

    The default is true.

    CpuCfsQuotaPeriod string

    The default is '100ms.' Valid values are a sequence of decimal numbers with an optional fraction and a unit suffix. For example: '300ms', '2h45m'. Supported units are 'ns', 'us', 'ms', 's', 'm', and 'h'.

    CpuManagerPolicy string

    The default is 'none'. See Kubernetes CPU management policies for more information. Allowed values are 'none' and 'static'.

    FailSwapOn bool

    If set to true it will make the Kubelet fail to start if swap is enabled on the node.

    ImageGcHighThreshold int

    To disable image garbage collection, set to 100. The default is 85%

    ImageGcLowThreshold int

    This cannot be set higher than imageGcHighThreshold. The default is 80%

    PodMaxPids int

    The maximum number of processes per pod.

    TopologyManagerPolicy string

    For more information see Kubernetes Topology Manager. The default is 'none'. Allowed values are 'none', 'best-effort', 'restricted', and 'single-numa-node'.

    allowedUnsafeSysctls List<String>

    Allowed list of unsafe sysctls or unsafe sysctl patterns (ending in *).

    containerLogMaxFiles Integer

    The maximum number of container log files that can be present for a container. The number must be ≥ 2.

    containerLogMaxSizeMB Integer

    The maximum size (e.g. 10Mi) of container log file before it is rotated.

    cpuCfsQuota Boolean

    The default is true.

    cpuCfsQuotaPeriod String

    The default is '100ms.' Valid values are a sequence of decimal numbers with an optional fraction and a unit suffix. For example: '300ms', '2h45m'. Supported units are 'ns', 'us', 'ms', 's', 'm', and 'h'.

    cpuManagerPolicy String

    The default is 'none'. See Kubernetes CPU management policies for more information. Allowed values are 'none' and 'static'.

    failSwapOn Boolean

    If set to true it will make the Kubelet fail to start if swap is enabled on the node.

    imageGcHighThreshold Integer

    To disable image garbage collection, set to 100. The default is 85%

    imageGcLowThreshold Integer

    This cannot be set higher than imageGcHighThreshold. The default is 80%

    podMaxPids Integer

    The maximum number of processes per pod.

    topologyManagerPolicy String

    For more information see Kubernetes Topology Manager. The default is 'none'. Allowed values are 'none', 'best-effort', 'restricted', and 'single-numa-node'.

    allowedUnsafeSysctls string[]

    Allowed list of unsafe sysctls or unsafe sysctl patterns (ending in *).

    containerLogMaxFiles number

    The maximum number of container log files that can be present for a container. The number must be ≥ 2.

    containerLogMaxSizeMB number

    The maximum size (e.g. 10Mi) of container log file before it is rotated.

    cpuCfsQuota boolean

    The default is true.

    cpuCfsQuotaPeriod string

    The default is '100ms.' Valid values are a sequence of decimal numbers with an optional fraction and a unit suffix. For example: '300ms', '2h45m'. Supported units are 'ns', 'us', 'ms', 's', 'm', and 'h'.

    cpuManagerPolicy string

    The default is 'none'. See Kubernetes CPU management policies for more information. Allowed values are 'none' and 'static'.

    failSwapOn boolean

    If set to true it will make the Kubelet fail to start if swap is enabled on the node.

    imageGcHighThreshold number

    To disable image garbage collection, set to 100. The default is 85%

    imageGcLowThreshold number

    This cannot be set higher than imageGcHighThreshold. The default is 80%

    podMaxPids number

    The maximum number of processes per pod.

    topologyManagerPolicy string

    For more information see Kubernetes Topology Manager. The default is 'none'. Allowed values are 'none', 'best-effort', 'restricted', and 'single-numa-node'.

    allowed_unsafe_sysctls Sequence[str]

    Allowed list of unsafe sysctls or unsafe sysctl patterns (ending in *).

    container_log_max_files int

    The maximum number of container log files that can be present for a container. The number must be ≥ 2.

    container_log_max_size_mb int

    The maximum size (e.g. 10Mi) of container log file before it is rotated.

    cpu_cfs_quota bool

    The default is true.

    cpu_cfs_quota_period str

    The default is '100ms.' Valid values are a sequence of decimal numbers with an optional fraction and a unit suffix. For example: '300ms', '2h45m'. Supported units are 'ns', 'us', 'ms', 's', 'm', and 'h'.

    cpu_manager_policy str

    The default is 'none'. See Kubernetes CPU management policies for more information. Allowed values are 'none' and 'static'.

    fail_swap_on bool

    If set to true it will make the Kubelet fail to start if swap is enabled on the node.

    image_gc_high_threshold int

    To disable image garbage collection, set to 100. The default is 85%

    image_gc_low_threshold int

    This cannot be set higher than imageGcHighThreshold. The default is 80%

    pod_max_pids int

    The maximum number of processes per pod.

    topology_manager_policy str

    For more information see Kubernetes Topology Manager. The default is 'none'. Allowed values are 'none', 'best-effort', 'restricted', and 'single-numa-node'.

    allowedUnsafeSysctls List<String>

    Allowed list of unsafe sysctls or unsafe sysctl patterns (ending in *).

    containerLogMaxFiles Number

    The maximum number of container log files that can be present for a container. The number must be ≥ 2.

    containerLogMaxSizeMB Number

    The maximum size (e.g. 10Mi) of container log file before it is rotated.

    cpuCfsQuota Boolean

    The default is true.

    cpuCfsQuotaPeriod String

    The default is '100ms.' Valid values are a sequence of decimal numbers with an optional fraction and a unit suffix. For example: '300ms', '2h45m'. Supported units are 'ns', 'us', 'ms', 's', 'm', and 'h'.

    cpuManagerPolicy String

    The default is 'none'. See Kubernetes CPU management policies for more information. Allowed values are 'none' and 'static'.

    failSwapOn Boolean

    If set to true it will make the Kubelet fail to start if swap is enabled on the node.

    imageGcHighThreshold Number

    To disable image garbage collection, set to 100. The default is 85%

    imageGcLowThreshold Number

    This cannot be set higher than imageGcHighThreshold. The default is 80%

    podMaxPids Number

    The maximum number of processes per pod.

    topologyManagerPolicy String

    For more information see Kubernetes Topology Manager. The default is 'none'. Allowed values are 'none', 'best-effort', 'restricted', and 'single-numa-node'.

    KubeletConfigResponse, KubeletConfigResponseArgs

    AllowedUnsafeSysctls List<string>

    Allowed list of unsafe sysctls or unsafe sysctl patterns (ending in *).

    ContainerLogMaxFiles int

    The maximum number of container log files that can be present for a container. The number must be ≥ 2.

    ContainerLogMaxSizeMB int

    The maximum size (e.g. 10Mi) of container log file before it is rotated.

    CpuCfsQuota bool

    The default is true.

    CpuCfsQuotaPeriod string

    The default is '100ms.' Valid values are a sequence of decimal numbers with an optional fraction and a unit suffix. For example: '300ms', '2h45m'. Supported units are 'ns', 'us', 'ms', 's', 'm', and 'h'.

    CpuManagerPolicy string

    The default is 'none'. See Kubernetes CPU management policies for more information. Allowed values are 'none' and 'static'.

    FailSwapOn bool

    If set to true it will make the Kubelet fail to start if swap is enabled on the node.

    ImageGcHighThreshold int

    To disable image garbage collection, set to 100. The default is 85%

    ImageGcLowThreshold int

    This cannot be set higher than imageGcHighThreshold. The default is 80%

    PodMaxPids int

    The maximum number of processes per pod.

    TopologyManagerPolicy string

    For more information see Kubernetes Topology Manager. The default is 'none'. Allowed values are 'none', 'best-effort', 'restricted', and 'single-numa-node'.

    AllowedUnsafeSysctls []string

    Allowed list of unsafe sysctls or unsafe sysctl patterns (ending in *).

    ContainerLogMaxFiles int

    The maximum number of container log files that can be present for a container. The number must be ≥ 2.

    ContainerLogMaxSizeMB int

    The maximum size (e.g. 10Mi) of container log file before it is rotated.

    CpuCfsQuota bool

    The default is true.

    CpuCfsQuotaPeriod string

    The default is '100ms.' Valid values are a sequence of decimal numbers with an optional fraction and a unit suffix. For example: '300ms', '2h45m'. Supported units are 'ns', 'us', 'ms', 's', 'm', and 'h'.

    CpuManagerPolicy string

    The default is 'none'. See Kubernetes CPU management policies for more information. Allowed values are 'none' and 'static'.

    FailSwapOn bool

    If set to true it will make the Kubelet fail to start if swap is enabled on the node.

    ImageGcHighThreshold int

    To disable image garbage collection, set to 100. The default is 85%

    ImageGcLowThreshold int

    This cannot be set higher than imageGcHighThreshold. The default is 80%

    PodMaxPids int

    The maximum number of processes per pod.

    TopologyManagerPolicy string

    For more information see Kubernetes Topology Manager. The default is 'none'. Allowed values are 'none', 'best-effort', 'restricted', and 'single-numa-node'.

    allowedUnsafeSysctls List<String>

    Allowed list of unsafe sysctls or unsafe sysctl patterns (ending in *).

    containerLogMaxFiles Integer

    The maximum number of container log files that can be present for a container. The number must be ≥ 2.

    containerLogMaxSizeMB Integer

    The maximum size (e.g. 10Mi) of container log file before it is rotated.

    cpuCfsQuota Boolean

    The default is true.

    cpuCfsQuotaPeriod String

    The default is '100ms.' Valid values are a sequence of decimal numbers with an optional fraction and a unit suffix. For example: '300ms', '2h45m'. Supported units are 'ns', 'us', 'ms', 's', 'm', and 'h'.

    cpuManagerPolicy String

    The default is 'none'. See Kubernetes CPU management policies for more information. Allowed values are 'none' and 'static'.

    failSwapOn Boolean

    If set to true it will make the Kubelet fail to start if swap is enabled on the node.

    imageGcHighThreshold Integer

    To disable image garbage collection, set to 100. The default is 85%

    imageGcLowThreshold Integer

    This cannot be set higher than imageGcHighThreshold. The default is 80%

    podMaxPids Integer

    The maximum number of processes per pod.

    topologyManagerPolicy String

    For more information see Kubernetes Topology Manager. The default is 'none'. Allowed values are 'none', 'best-effort', 'restricted', and 'single-numa-node'.

    allowedUnsafeSysctls string[]

    Allowed list of unsafe sysctls or unsafe sysctl patterns (ending in *).

    containerLogMaxFiles number

    The maximum number of container log files that can be present for a container. The number must be ≥ 2.

    containerLogMaxSizeMB number

    The maximum size (e.g. 10Mi) of container log file before it is rotated.

    cpuCfsQuota boolean

    The default is true.

    cpuCfsQuotaPeriod string

    The default is '100ms.' Valid values are a sequence of decimal numbers with an optional fraction and a unit suffix. For example: '300ms', '2h45m'. Supported units are 'ns', 'us', 'ms', 's', 'm', and 'h'.

    cpuManagerPolicy string

    The default is 'none'. See Kubernetes CPU management policies for more information. Allowed values are 'none' and 'static'.

    failSwapOn boolean

    If set to true it will make the Kubelet fail to start if swap is enabled on the node.

    imageGcHighThreshold number

    To disable image garbage collection, set to 100. The default is 85%

    imageGcLowThreshold number

    This cannot be set higher than imageGcHighThreshold. The default is 80%

    podMaxPids number

    The maximum number of processes per pod.

    topologyManagerPolicy string

    For more information see Kubernetes Topology Manager. The default is 'none'. Allowed values are 'none', 'best-effort', 'restricted', and 'single-numa-node'.

    allowed_unsafe_sysctls Sequence[str]

    Allowed list of unsafe sysctls or unsafe sysctl patterns (ending in *).

    container_log_max_files int

    The maximum number of container log files that can be present for a container. The number must be ≥ 2.

    container_log_max_size_mb int

    The maximum size (e.g. 10Mi) of container log file before it is rotated.

    cpu_cfs_quota bool

    The default is true.

    cpu_cfs_quota_period str

    The default is '100ms.' Valid values are a sequence of decimal numbers with an optional fraction and a unit suffix. For example: '300ms', '2h45m'. Supported units are 'ns', 'us', 'ms', 's', 'm', and 'h'.

    cpu_manager_policy str

    The default is 'none'. See Kubernetes CPU management policies for more information. Allowed values are 'none' and 'static'.

    fail_swap_on bool

    If set to true it will make the Kubelet fail to start if swap is enabled on the node.

    image_gc_high_threshold int

    To disable image garbage collection, set to 100. The default is 85%

    image_gc_low_threshold int

    This cannot be set higher than imageGcHighThreshold. The default is 80%

    pod_max_pids int

    The maximum number of processes per pod.

    topology_manager_policy str

    For more information see Kubernetes Topology Manager. The default is 'none'. Allowed values are 'none', 'best-effort', 'restricted', and 'single-numa-node'.

    allowedUnsafeSysctls List<String>

    Allowed list of unsafe sysctls or unsafe sysctl patterns (ending in *).

    containerLogMaxFiles Number

    The maximum number of container log files that can be present for a container. The number must be ≥ 2.

    containerLogMaxSizeMB Number

    The maximum size (e.g. 10Mi) of container log file before it is rotated.

    cpuCfsQuota Boolean

    The default is true.

    cpuCfsQuotaPeriod String

    The default is '100ms.' Valid values are a sequence of decimal numbers with an optional fraction and a unit suffix. For example: '300ms', '2h45m'. Supported units are 'ns', 'us', 'ms', 's', 'm', and 'h'.

    cpuManagerPolicy String

    The default is 'none'. See Kubernetes CPU management policies for more information. Allowed values are 'none' and 'static'.

    failSwapOn Boolean

    If set to true it will make the Kubelet fail to start if swap is enabled on the node.

    imageGcHighThreshold Number

    To disable image garbage collection, set to 100. The default is 85%

    imageGcLowThreshold Number

    This cannot be set higher than imageGcHighThreshold. The default is 80%

    podMaxPids Number

    The maximum number of processes per pod.

    topologyManagerPolicy String

    For more information see Kubernetes Topology Manager. The default is 'none'. Allowed values are 'none', 'best-effort', 'restricted', and 'single-numa-node'.

    KubeletDiskType, KubeletDiskTypeArgs

    OS
    OS

    Kubelet will use the OS disk for its data.

    Temporary
    Temporary

    Kubelet will use the temporary disk for its data.

    KubeletDiskTypeOS
    OS

    Kubelet will use the OS disk for its data.

    KubeletDiskTypeTemporary
    Temporary

    Kubelet will use the temporary disk for its data.

    OS
    OS

    Kubelet will use the OS disk for its data.

    Temporary
    Temporary

    Kubelet will use the temporary disk for its data.

    OS
    OS

    Kubelet will use the OS disk for its data.

    Temporary
    Temporary

    Kubelet will use the temporary disk for its data.

    OS
    OS

    Kubelet will use the OS disk for its data.

    TEMPORARY
    Temporary

    Kubelet will use the temporary disk for its data.

    "OS"
    OS

    Kubelet will use the OS disk for its data.

    "Temporary"
    Temporary

    Kubelet will use the temporary disk for its data.

    LinuxOSConfig, LinuxOSConfigArgs

    SwapFileSizeMB int

    The size in MB of a swap file that will be created on each node.

    Sysctls Pulumi.AzureNative.ContainerService.Inputs.SysctlConfig

    Sysctl settings for Linux agent nodes.

    TransparentHugePageDefrag string

    Valid values are 'always', 'defer', 'defer+madvise', 'madvise' and 'never'. The default is 'madvise'. For more information see Transparent Hugepages.

    TransparentHugePageEnabled string

    Valid values are 'always', 'madvise', and 'never'. The default is 'always'. For more information see Transparent Hugepages.

    SwapFileSizeMB int

    The size in MB of a swap file that will be created on each node.

    Sysctls SysctlConfig

    Sysctl settings for Linux agent nodes.

    TransparentHugePageDefrag string

    Valid values are 'always', 'defer', 'defer+madvise', 'madvise' and 'never'. The default is 'madvise'. For more information see Transparent Hugepages.

    TransparentHugePageEnabled string

    Valid values are 'always', 'madvise', and 'never'. The default is 'always'. For more information see Transparent Hugepages.

    swapFileSizeMB Integer

    The size in MB of a swap file that will be created on each node.

    sysctls SysctlConfig

    Sysctl settings for Linux agent nodes.

    transparentHugePageDefrag String

    Valid values are 'always', 'defer', 'defer+madvise', 'madvise' and 'never'. The default is 'madvise'. For more information see Transparent Hugepages.

    transparentHugePageEnabled String

    Valid values are 'always', 'madvise', and 'never'. The default is 'always'. For more information see Transparent Hugepages.

    swapFileSizeMB number

    The size in MB of a swap file that will be created on each node.

    sysctls SysctlConfig

    Sysctl settings for Linux agent nodes.

    transparentHugePageDefrag string

    Valid values are 'always', 'defer', 'defer+madvise', 'madvise' and 'never'. The default is 'madvise'. For more information see Transparent Hugepages.

    transparentHugePageEnabled string

    Valid values are 'always', 'madvise', and 'never'. The default is 'always'. For more information see Transparent Hugepages.

    swap_file_size_mb int

    The size in MB of a swap file that will be created on each node.

    sysctls SysctlConfig

    Sysctl settings for Linux agent nodes.

    transparent_huge_page_defrag str

    Valid values are 'always', 'defer', 'defer+madvise', 'madvise' and 'never'. The default is 'madvise'. For more information see Transparent Hugepages.

    transparent_huge_page_enabled str

    Valid values are 'always', 'madvise', and 'never'. The default is 'always'. For more information see Transparent Hugepages.

    swapFileSizeMB Number

    The size in MB of a swap file that will be created on each node.

    sysctls Property Map

    Sysctl settings for Linux agent nodes.

    transparentHugePageDefrag String

    Valid values are 'always', 'defer', 'defer+madvise', 'madvise' and 'never'. The default is 'madvise'. For more information see Transparent Hugepages.

    transparentHugePageEnabled String

    Valid values are 'always', 'madvise', and 'never'. The default is 'always'. For more information see Transparent Hugepages.

    LinuxOSConfigResponse, LinuxOSConfigResponseArgs

    SwapFileSizeMB int

    The size in MB of a swap file that will be created on each node.

    Sysctls Pulumi.AzureNative.ContainerService.Inputs.SysctlConfigResponse

    Sysctl settings for Linux agent nodes.

    TransparentHugePageDefrag string

    Valid values are 'always', 'defer', 'defer+madvise', 'madvise' and 'never'. The default is 'madvise'. For more information see Transparent Hugepages.

    TransparentHugePageEnabled string

    Valid values are 'always', 'madvise', and 'never'. The default is 'always'. For more information see Transparent Hugepages.

    SwapFileSizeMB int

    The size in MB of a swap file that will be created on each node.

    Sysctls SysctlConfigResponse

    Sysctl settings for Linux agent nodes.

    TransparentHugePageDefrag string

    Valid values are 'always', 'defer', 'defer+madvise', 'madvise' and 'never'. The default is 'madvise'. For more information see Transparent Hugepages.

    TransparentHugePageEnabled string

    Valid values are 'always', 'madvise', and 'never'. The default is 'always'. For more information see Transparent Hugepages.

    swapFileSizeMB Integer

    The size in MB of a swap file that will be created on each node.

    sysctls SysctlConfigResponse

    Sysctl settings for Linux agent nodes.

    transparentHugePageDefrag String

    Valid values are 'always', 'defer', 'defer+madvise', 'madvise' and 'never'. The default is 'madvise'. For more information see Transparent Hugepages.

    transparentHugePageEnabled String

    Valid values are 'always', 'madvise', and 'never'. The default is 'always'. For more information see Transparent Hugepages.

    swapFileSizeMB number

    The size in MB of a swap file that will be created on each node.

    sysctls SysctlConfigResponse

    Sysctl settings for Linux agent nodes.

    transparentHugePageDefrag string

    Valid values are 'always', 'defer', 'defer+madvise', 'madvise' and 'never'. The default is 'madvise'. For more information see Transparent Hugepages.

    transparentHugePageEnabled string

    Valid values are 'always', 'madvise', and 'never'. The default is 'always'. For more information see Transparent Hugepages.

    swap_file_size_mb int

    The size in MB of a swap file that will be created on each node.

    sysctls SysctlConfigResponse

    Sysctl settings for Linux agent nodes.

    transparent_huge_page_defrag str

    Valid values are 'always', 'defer', 'defer+madvise', 'madvise' and 'never'. The default is 'madvise'. For more information see Transparent Hugepages.

    transparent_huge_page_enabled str

    Valid values are 'always', 'madvise', and 'never'. The default is 'always'. For more information see Transparent Hugepages.

    swapFileSizeMB Number

    The size in MB of a swap file that will be created on each node.

    sysctls Property Map

    Sysctl settings for Linux agent nodes.

    transparentHugePageDefrag String

    Valid values are 'always', 'defer', 'defer+madvise', 'madvise' and 'never'. The default is 'madvise'. For more information see Transparent Hugepages.

    transparentHugePageEnabled String

    Valid values are 'always', 'madvise', and 'never'. The default is 'always'. For more information see Transparent Hugepages.

    OSDiskType, OSDiskTypeArgs

    Managed
    Managed

    Azure replicates the operating system disk for a virtual machine to Azure storage to avoid data loss should the VM need to be relocated to another host. Since containers aren't designed to have local state persisted, this behavior offers limited value while providing some drawbacks, including slower node provisioning and higher read/write latency.

    Ephemeral
    Ephemeral

    Ephemeral OS disks are stored only on the host machine, just like a temporary disk. This provides lower read/write latency, along with faster node scaling and cluster upgrades.

    OSDiskTypeManaged
    Managed

    Azure replicates the operating system disk for a virtual machine to Azure storage to avoid data loss should the VM need to be relocated to another host. Since containers aren't designed to have local state persisted, this behavior offers limited value while providing some drawbacks, including slower node provisioning and higher read/write latency.

    OSDiskTypeEphemeral
    Ephemeral

    Ephemeral OS disks are stored only on the host machine, just like a temporary disk. This provides lower read/write latency, along with faster node scaling and cluster upgrades.

    Managed
    Managed

    Azure replicates the operating system disk for a virtual machine to Azure storage to avoid data loss should the VM need to be relocated to another host. Since containers aren't designed to have local state persisted, this behavior offers limited value while providing some drawbacks, including slower node provisioning and higher read/write latency.

    Ephemeral
    Ephemeral

    Ephemeral OS disks are stored only on the host machine, just like a temporary disk. This provides lower read/write latency, along with faster node scaling and cluster upgrades.

    Managed
    Managed

    Azure replicates the operating system disk for a virtual machine to Azure storage to avoid data loss should the VM need to be relocated to another host. Since containers aren't designed to have local state persisted, this behavior offers limited value while providing some drawbacks, including slower node provisioning and higher read/write latency.

    Ephemeral
    Ephemeral

    Ephemeral OS disks are stored only on the host machine, just like a temporary disk. This provides lower read/write latency, along with faster node scaling and cluster upgrades.

    MANAGED
    Managed

    Azure replicates the operating system disk for a virtual machine to Azure storage to avoid data loss should the VM need to be relocated to another host. Since containers aren't designed to have local state persisted, this behavior offers limited value while providing some drawbacks, including slower node provisioning and higher read/write latency.

    EPHEMERAL
    Ephemeral

    Ephemeral OS disks are stored only on the host machine, just like a temporary disk. This provides lower read/write latency, along with faster node scaling and cluster upgrades.

    "Managed"
    Managed

    Azure replicates the operating system disk for a virtual machine to Azure storage to avoid data loss should the VM need to be relocated to another host. Since containers aren't designed to have local state persisted, this behavior offers limited value while providing some drawbacks, including slower node provisioning and higher read/write latency.

    "Ephemeral"
    Ephemeral

    Ephemeral OS disks are stored only on the host machine, just like a temporary disk. This provides lower read/write latency, along with faster node scaling and cluster upgrades.

    OSSKU, OSSKUArgs

    Ubuntu
    Ubuntu

    Use Ubuntu as the OS for node images.

    AzureLinux
    AzureLinux

    Use AzureLinux as the OS for node images. Azure Linux is a container-optimized Linux distro built by Microsoft, visit https://aka.ms/azurelinux for more information.

    CBLMariner
    CBLMariner

    Deprecated OSSKU. Microsoft recommends that new deployments choose 'AzureLinux' instead.

    Windows2019
    Windows2019

    Use Windows2019 as the OS for node images. Unsupported for system node pools. Windows2019 only supports Windows2019 containers; it cannot run Windows2022 containers and vice versa.

    Windows2022
    Windows2022

    Use Windows2022 as the OS for node images. Unsupported for system node pools. Windows2022 only supports Windows2022 containers; it cannot run Windows2019 containers and vice versa.

    OSSKUUbuntu
    Ubuntu

    Use Ubuntu as the OS for node images.

    OSSKUAzureLinux
    AzureLinux

    Use AzureLinux as the OS for node images. Azure Linux is a container-optimized Linux distro built by Microsoft, visit https://aka.ms/azurelinux for more information.

    OSSKUCBLMariner
    CBLMariner

    Deprecated OSSKU. Microsoft recommends that new deployments choose 'AzureLinux' instead.

    OSSKUWindows2019
    Windows2019

    Use Windows2019 as the OS for node images. Unsupported for system node pools. Windows2019 only supports Windows2019 containers; it cannot run Windows2022 containers and vice versa.

    OSSKUWindows2022
    Windows2022

    Use Windows2022 as the OS for node images. Unsupported for system node pools. Windows2022 only supports Windows2022 containers; it cannot run Windows2019 containers and vice versa.

    Ubuntu
    Ubuntu

    Use Ubuntu as the OS for node images.

    AzureLinux
    AzureLinux

    Use AzureLinux as the OS for node images. Azure Linux is a container-optimized Linux distro built by Microsoft, visit https://aka.ms/azurelinux for more information.

    CBLMariner
    CBLMariner

    Deprecated OSSKU. Microsoft recommends that new deployments choose 'AzureLinux' instead.

    Windows2019
    Windows2019

    Use Windows2019 as the OS for node images. Unsupported for system node pools. Windows2019 only supports Windows2019 containers; it cannot run Windows2022 containers and vice versa.

    Windows2022
    Windows2022

    Use Windows2022 as the OS for node images. Unsupported for system node pools. Windows2022 only supports Windows2022 containers; it cannot run Windows2019 containers and vice versa.

    Ubuntu
    Ubuntu

    Use Ubuntu as the OS for node images.

    AzureLinux
    AzureLinux

    Use AzureLinux as the OS for node images. Azure Linux is a container-optimized Linux distro built by Microsoft, visit https://aka.ms/azurelinux for more information.

    CBLMariner
    CBLMariner

    Deprecated OSSKU. Microsoft recommends that new deployments choose 'AzureLinux' instead.

    Windows2019
    Windows2019

    Use Windows2019 as the OS for node images. Unsupported for system node pools. Windows2019 only supports Windows2019 containers; it cannot run Windows2022 containers and vice versa.

    Windows2022
    Windows2022

    Use Windows2022 as the OS for node images. Unsupported for system node pools. Windows2022 only supports Windows2022 containers; it cannot run Windows2019 containers and vice versa.

    UBUNTU
    Ubuntu

    Use Ubuntu as the OS for node images.

    AZURE_LINUX
    AzureLinux

    Use AzureLinux as the OS for node images. Azure Linux is a container-optimized Linux distro built by Microsoft, visit https://aka.ms/azurelinux for more information.

    CBL_MARINER
    CBLMariner

    Deprecated OSSKU. Microsoft recommends that new deployments choose 'AzureLinux' instead.

    WINDOWS2019
    Windows2019

    Use Windows2019 as the OS for node images. Unsupported for system node pools. Windows2019 only supports Windows2019 containers; it cannot run Windows2022 containers and vice versa.

    WINDOWS2022
    Windows2022

    Use Windows2022 as the OS for node images. Unsupported for system node pools. Windows2022 only supports Windows2022 containers; it cannot run Windows2019 containers and vice versa.

    "Ubuntu"
    Ubuntu

    Use Ubuntu as the OS for node images.

    "AzureLinux"
    AzureLinux

    Use AzureLinux as the OS for node images. Azure Linux is a container-optimized Linux distro built by Microsoft, visit https://aka.ms/azurelinux for more information.

    "CBLMariner"
    CBLMariner

    Deprecated OSSKU. Microsoft recommends that new deployments choose 'AzureLinux' instead.

    "Windows2019"
    Windows2019

    Use Windows2019 as the OS for node images. Unsupported for system node pools. Windows2019 only supports Windows2019 containers; it cannot run Windows2022 containers and vice versa.

    "Windows2022"
    Windows2022

    Use Windows2022 as the OS for node images. Unsupported for system node pools. Windows2022 only supports Windows2022 containers; it cannot run Windows2019 containers and vice versa.

    OSType, OSTypeArgs

    Linux
    Linux
    Windows
    Windows
    OSTypeLinux
    Linux
    OSTypeWindows
    Windows
    Linux
    Linux
    Windows
    Windows
    Linux
    Linux
    Windows
    Windows
    LINUX
    Linux
    WINDOWS
    Windows
    "Linux"
    Linux
    "Windows"
    Windows

    PowerState, PowerStateArgs

    Code string | Pulumi.AzureNative.ContainerService.Code

    Tells whether the cluster is Running or Stopped

    Code string | Code

    Tells whether the cluster is Running or Stopped

    code String | Code

    Tells whether the cluster is Running or Stopped

    code string | Code

    Tells whether the cluster is Running or Stopped

    code str | Code

    Tells whether the cluster is Running or Stopped

    code String | "Running" | "Stopped"

    Tells whether the cluster is Running or Stopped

    PowerStateResponse, PowerStateResponseArgs

    Code string

    Tells whether the cluster is Running or Stopped

    Code string

    Tells whether the cluster is Running or Stopped

    code String

    Tells whether the cluster is Running or Stopped

    code string

    Tells whether the cluster is Running or Stopped

    code str

    Tells whether the cluster is Running or Stopped

    code String

    Tells whether the cluster is Running or Stopped

    ScaleDownMode, ScaleDownModeArgs

    Delete
    Delete

    Create new instances during scale up and remove instances during scale down.

    Deallocate
    Deallocate

    Attempt to start deallocated instances (if they exist) during scale up and deallocate instances during scale down.

    ScaleDownModeDelete
    Delete

    Create new instances during scale up and remove instances during scale down.

    ScaleDownModeDeallocate
    Deallocate

    Attempt to start deallocated instances (if they exist) during scale up and deallocate instances during scale down.

    Delete
    Delete

    Create new instances during scale up and remove instances during scale down.

    Deallocate
    Deallocate

    Attempt to start deallocated instances (if they exist) during scale up and deallocate instances during scale down.

    Delete
    Delete

    Create new instances during scale up and remove instances during scale down.

    Deallocate
    Deallocate

    Attempt to start deallocated instances (if they exist) during scale up and deallocate instances during scale down.

    DELETE
    Delete

    Create new instances during scale up and remove instances during scale down.

    DEALLOCATE
    Deallocate

    Attempt to start deallocated instances (if they exist) during scale up and deallocate instances during scale down.

    "Delete"
    Delete

    Create new instances during scale up and remove instances during scale down.

    "Deallocate"
    Deallocate

    Attempt to start deallocated instances (if they exist) during scale up and deallocate instances during scale down.

    ScaleSetEvictionPolicy, ScaleSetEvictionPolicyArgs

    Delete
    Delete

    Nodes in the underlying Scale Set of the node pool are deleted when they're evicted.

    Deallocate
    Deallocate

    Nodes in the underlying Scale Set of the node pool are set to the stopped-deallocated state upon eviction. Nodes in the stopped-deallocated state count against your compute quota and can cause issues with cluster scaling or upgrading.

    ScaleSetEvictionPolicyDelete
    Delete

    Nodes in the underlying Scale Set of the node pool are deleted when they're evicted.

    ScaleSetEvictionPolicyDeallocate
    Deallocate

    Nodes in the underlying Scale Set of the node pool are set to the stopped-deallocated state upon eviction. Nodes in the stopped-deallocated state count against your compute quota and can cause issues with cluster scaling or upgrading.

    Delete
    Delete

    Nodes in the underlying Scale Set of the node pool are deleted when they're evicted.

    Deallocate
    Deallocate

    Nodes in the underlying Scale Set of the node pool are set to the stopped-deallocated state upon eviction. Nodes in the stopped-deallocated state count against your compute quota and can cause issues with cluster scaling or upgrading.

    Delete
    Delete

    Nodes in the underlying Scale Set of the node pool are deleted when they're evicted.

    Deallocate
    Deallocate

    Nodes in the underlying Scale Set of the node pool are set to the stopped-deallocated state upon eviction. Nodes in the stopped-deallocated state count against your compute quota and can cause issues with cluster scaling or upgrading.

    DELETE
    Delete

    Nodes in the underlying Scale Set of the node pool are deleted when they're evicted.

    DEALLOCATE
    Deallocate

    Nodes in the underlying Scale Set of the node pool are set to the stopped-deallocated state upon eviction. Nodes in the stopped-deallocated state count against your compute quota and can cause issues with cluster scaling or upgrading.

    "Delete"
    Delete

    Nodes in the underlying Scale Set of the node pool are deleted when they're evicted.

    "Deallocate"
    Deallocate

    Nodes in the underlying Scale Set of the node pool are set to the stopped-deallocated state upon eviction. Nodes in the stopped-deallocated state count against your compute quota and can cause issues with cluster scaling or upgrading.

    ScaleSetPriority, ScaleSetPriorityArgs

    Spot
    Spot

    Spot priority VMs will be used. There is no SLA for spot nodes. See spot on AKS for more information.

    Regular
    Regular

    Regular VMs will be used.

    ScaleSetPrioritySpot
    Spot

    Spot priority VMs will be used. There is no SLA for spot nodes. See spot on AKS for more information.

    ScaleSetPriorityRegular
    Regular

    Regular VMs will be used.

    Spot
    Spot

    Spot priority VMs will be used. There is no SLA for spot nodes. See spot on AKS for more information.

    Regular
    Regular

    Regular VMs will be used.

    Spot
    Spot

    Spot priority VMs will be used. There is no SLA for spot nodes. See spot on AKS for more information.

    Regular
    Regular

    Regular VMs will be used.

    SPOT
    Spot

    Spot priority VMs will be used. There is no SLA for spot nodes. See spot on AKS for more information.

    REGULAR
    Regular

    Regular VMs will be used.

    "Spot"
    Spot

    Spot priority VMs will be used. There is no SLA for spot nodes. See spot on AKS for more information.

    "Regular"
    Regular

    Regular VMs will be used.

    SysctlConfig, SysctlConfigArgs

    FsAioMaxNr int

    Sysctl setting fs.aio-max-nr.

    FsFileMax int

    Sysctl setting fs.file-max.

    FsInotifyMaxUserWatches int

    Sysctl setting fs.inotify.max_user_watches.

    FsNrOpen int

    Sysctl setting fs.nr_open.

    KernelThreadsMax int

    Sysctl setting kernel.threads-max.

    NetCoreNetdevMaxBacklog int

    Sysctl setting net.core.netdev_max_backlog.

    NetCoreOptmemMax int

    Sysctl setting net.core.optmem_max.

    NetCoreRmemDefault int

    Sysctl setting net.core.rmem_default.

    NetCoreRmemMax int

    Sysctl setting net.core.rmem_max.

    NetCoreSomaxconn int

    Sysctl setting net.core.somaxconn.

    NetCoreWmemDefault int

    Sysctl setting net.core.wmem_default.

    NetCoreWmemMax int

    Sysctl setting net.core.wmem_max.

    NetIpv4IpLocalPortRange string

    Sysctl setting net.ipv4.ip_local_port_range.

    NetIpv4NeighDefaultGcThresh1 int

    Sysctl setting net.ipv4.neigh.default.gc_thresh1.

    NetIpv4NeighDefaultGcThresh2 int

    Sysctl setting net.ipv4.neigh.default.gc_thresh2.

    NetIpv4NeighDefaultGcThresh3 int

    Sysctl setting net.ipv4.neigh.default.gc_thresh3.

    NetIpv4TcpFinTimeout int

    Sysctl setting net.ipv4.tcp_fin_timeout.

    NetIpv4TcpKeepaliveProbes int

    Sysctl setting net.ipv4.tcp_keepalive_probes.

    NetIpv4TcpKeepaliveTime int

    Sysctl setting net.ipv4.tcp_keepalive_time.

    NetIpv4TcpMaxSynBacklog int

    Sysctl setting net.ipv4.tcp_max_syn_backlog.

    NetIpv4TcpMaxTwBuckets int

    Sysctl setting net.ipv4.tcp_max_tw_buckets.

    NetIpv4TcpTwReuse bool

    Sysctl setting net.ipv4.tcp_tw_reuse.

    NetIpv4TcpkeepaliveIntvl int

    Sysctl setting net.ipv4.tcp_keepalive_intvl.

    NetNetfilterNfConntrackBuckets int

    Sysctl setting net.netfilter.nf_conntrack_buckets.

    NetNetfilterNfConntrackMax int

    Sysctl setting net.netfilter.nf_conntrack_max.

    VmMaxMapCount int

    Sysctl setting vm.max_map_count.

    VmSwappiness int

    Sysctl setting vm.swappiness.

    VmVfsCachePressure int

    Sysctl setting vm.vfs_cache_pressure.

    FsAioMaxNr int

    Sysctl setting fs.aio-max-nr.

    FsFileMax int

    Sysctl setting fs.file-max.

    FsInotifyMaxUserWatches int

    Sysctl setting fs.inotify.max_user_watches.

    FsNrOpen int

    Sysctl setting fs.nr_open.

    KernelThreadsMax int

    Sysctl setting kernel.threads-max.

    NetCoreNetdevMaxBacklog int

    Sysctl setting net.core.netdev_max_backlog.

    NetCoreOptmemMax int

    Sysctl setting net.core.optmem_max.

    NetCoreRmemDefault int

    Sysctl setting net.core.rmem_default.

    NetCoreRmemMax int

    Sysctl setting net.core.rmem_max.

    NetCoreSomaxconn int

    Sysctl setting net.core.somaxconn.

    NetCoreWmemDefault int

    Sysctl setting net.core.wmem_default.

    NetCoreWmemMax int

    Sysctl setting net.core.wmem_max.

    NetIpv4IpLocalPortRange string

    Sysctl setting net.ipv4.ip_local_port_range.

    NetIpv4NeighDefaultGcThresh1 int

    Sysctl setting net.ipv4.neigh.default.gc_thresh1.

    NetIpv4NeighDefaultGcThresh2 int

    Sysctl setting net.ipv4.neigh.default.gc_thresh2.

    NetIpv4NeighDefaultGcThresh3 int

    Sysctl setting net.ipv4.neigh.default.gc_thresh3.

    NetIpv4TcpFinTimeout int

    Sysctl setting net.ipv4.tcp_fin_timeout.

    NetIpv4TcpKeepaliveProbes int

    Sysctl setting net.ipv4.tcp_keepalive_probes.

    NetIpv4TcpKeepaliveTime int

    Sysctl setting net.ipv4.tcp_keepalive_time.

    NetIpv4TcpMaxSynBacklog int

    Sysctl setting net.ipv4.tcp_max_syn_backlog.

    NetIpv4TcpMaxTwBuckets int

    Sysctl setting net.ipv4.tcp_max_tw_buckets.

    NetIpv4TcpTwReuse bool

    Sysctl setting net.ipv4.tcp_tw_reuse.

    NetIpv4TcpkeepaliveIntvl int

    Sysctl setting net.ipv4.tcp_keepalive_intvl.

    NetNetfilterNfConntrackBuckets int

    Sysctl setting net.netfilter.nf_conntrack_buckets.

    NetNetfilterNfConntrackMax int

    Sysctl setting net.netfilter.nf_conntrack_max.

    VmMaxMapCount int

    Sysctl setting vm.max_map_count.

    VmSwappiness int

    Sysctl setting vm.swappiness.

    VmVfsCachePressure int

    Sysctl setting vm.vfs_cache_pressure.

    fsAioMaxNr Integer

    Sysctl setting fs.aio-max-nr.

    fsFileMax Integer

    Sysctl setting fs.file-max.

    fsInotifyMaxUserWatches Integer

    Sysctl setting fs.inotify.max_user_watches.

    fsNrOpen Integer

    Sysctl setting fs.nr_open.

    kernelThreadsMax Integer

    Sysctl setting kernel.threads-max.

    netCoreNetdevMaxBacklog Integer

    Sysctl setting net.core.netdev_max_backlog.

    netCoreOptmemMax Integer

    Sysctl setting net.core.optmem_max.

    netCoreRmemDefault Integer

    Sysctl setting net.core.rmem_default.

    netCoreRmemMax Integer

    Sysctl setting net.core.rmem_max.

    netCoreSomaxconn Integer

    Sysctl setting net.core.somaxconn.

    netCoreWmemDefault Integer

    Sysctl setting net.core.wmem_default.

    netCoreWmemMax Integer

    Sysctl setting net.core.wmem_max.

    netIpv4IpLocalPortRange String

    Sysctl setting net.ipv4.ip_local_port_range.

    netIpv4NeighDefaultGcThresh1 Integer

    Sysctl setting net.ipv4.neigh.default.gc_thresh1.

    netIpv4NeighDefaultGcThresh2 Integer

    Sysctl setting net.ipv4.neigh.default.gc_thresh2.

    netIpv4NeighDefaultGcThresh3 Integer

    Sysctl setting net.ipv4.neigh.default.gc_thresh3.

    netIpv4TcpFinTimeout Integer

    Sysctl setting net.ipv4.tcp_fin_timeout.

    netIpv4TcpKeepaliveProbes Integer

    Sysctl setting net.ipv4.tcp_keepalive_probes.

    netIpv4TcpKeepaliveTime Integer

    Sysctl setting net.ipv4.tcp_keepalive_time.

    netIpv4TcpMaxSynBacklog Integer

    Sysctl setting net.ipv4.tcp_max_syn_backlog.

    netIpv4TcpMaxTwBuckets Integer

    Sysctl setting net.ipv4.tcp_max_tw_buckets.

    netIpv4TcpTwReuse Boolean

    Sysctl setting net.ipv4.tcp_tw_reuse.

    netIpv4TcpkeepaliveIntvl Integer

    Sysctl setting net.ipv4.tcp_keepalive_intvl.

    netNetfilterNfConntrackBuckets Integer

    Sysctl setting net.netfilter.nf_conntrack_buckets.

    netNetfilterNfConntrackMax Integer

    Sysctl setting net.netfilter.nf_conntrack_max.

    vmMaxMapCount Integer

    Sysctl setting vm.max_map_count.

    vmSwappiness Integer

    Sysctl setting vm.swappiness.

    vmVfsCachePressure Integer

    Sysctl setting vm.vfs_cache_pressure.

    fsAioMaxNr number

    Sysctl setting fs.aio-max-nr.

    fsFileMax number

    Sysctl setting fs.file-max.

    fsInotifyMaxUserWatches number

    Sysctl setting fs.inotify.max_user_watches.

    fsNrOpen number

    Sysctl setting fs.nr_open.

    kernelThreadsMax number

    Sysctl setting kernel.threads-max.

    netCoreNetdevMaxBacklog number

    Sysctl setting net.core.netdev_max_backlog.

    netCoreOptmemMax number

    Sysctl setting net.core.optmem_max.

    netCoreRmemDefault number

    Sysctl setting net.core.rmem_default.

    netCoreRmemMax number

    Sysctl setting net.core.rmem_max.

    netCoreSomaxconn number

    Sysctl setting net.core.somaxconn.

    netCoreWmemDefault number

    Sysctl setting net.core.wmem_default.

    netCoreWmemMax number

    Sysctl setting net.core.wmem_max.

    netIpv4IpLocalPortRange string

    Sysctl setting net.ipv4.ip_local_port_range.

    netIpv4NeighDefaultGcThresh1 number

    Sysctl setting net.ipv4.neigh.default.gc_thresh1.

    netIpv4NeighDefaultGcThresh2 number

    Sysctl setting net.ipv4.neigh.default.gc_thresh2.

    netIpv4NeighDefaultGcThresh3 number

    Sysctl setting net.ipv4.neigh.default.gc_thresh3.

    netIpv4TcpFinTimeout number

    Sysctl setting net.ipv4.tcp_fin_timeout.

    netIpv4TcpKeepaliveProbes number

    Sysctl setting net.ipv4.tcp_keepalive_probes.

    netIpv4TcpKeepaliveTime number

    Sysctl setting net.ipv4.tcp_keepalive_time.

    netIpv4TcpMaxSynBacklog number

    Sysctl setting net.ipv4.tcp_max_syn_backlog.

    netIpv4TcpMaxTwBuckets number

    Sysctl setting net.ipv4.tcp_max_tw_buckets.

    netIpv4TcpTwReuse boolean

    Sysctl setting net.ipv4.tcp_tw_reuse.

    netIpv4TcpkeepaliveIntvl number

    Sysctl setting net.ipv4.tcp_keepalive_intvl.

    netNetfilterNfConntrackBuckets number

    Sysctl setting net.netfilter.nf_conntrack_buckets.

    netNetfilterNfConntrackMax number

    Sysctl setting net.netfilter.nf_conntrack_max.

    vmMaxMapCount number

    Sysctl setting vm.max_map_count.

    vmSwappiness number

    Sysctl setting vm.swappiness.

    vmVfsCachePressure number

    Sysctl setting vm.vfs_cache_pressure.

    fs_aio_max_nr int

    Sysctl setting fs.aio-max-nr.

    fs_file_max int

    Sysctl setting fs.file-max.

    fs_inotify_max_user_watches int

    Sysctl setting fs.inotify.max_user_watches.

    fs_nr_open int

    Sysctl setting fs.nr_open.

    kernel_threads_max int

    Sysctl setting kernel.threads-max.

    net_core_netdev_max_backlog int

    Sysctl setting net.core.netdev_max_backlog.

    net_core_optmem_max int

    Sysctl setting net.core.optmem_max.

    net_core_rmem_default int

    Sysctl setting net.core.rmem_default.

    net_core_rmem_max int

    Sysctl setting net.core.rmem_max.

    net_core_somaxconn int

    Sysctl setting net.core.somaxconn.

    net_core_wmem_default int

    Sysctl setting net.core.wmem_default.

    net_core_wmem_max int

    Sysctl setting net.core.wmem_max.

    net_ipv4_ip_local_port_range str

    Sysctl setting net.ipv4.ip_local_port_range.

    net_ipv4_neigh_default_gc_thresh1 int

    Sysctl setting net.ipv4.neigh.default.gc_thresh1.

    net_ipv4_neigh_default_gc_thresh2 int

    Sysctl setting net.ipv4.neigh.default.gc_thresh2.

    net_ipv4_neigh_default_gc_thresh3 int

    Sysctl setting net.ipv4.neigh.default.gc_thresh3.

    net_ipv4_tcp_fin_timeout int

    Sysctl setting net.ipv4.tcp_fin_timeout.

    net_ipv4_tcp_keepalive_probes int

    Sysctl setting net.ipv4.tcp_keepalive_probes.

    net_ipv4_tcp_keepalive_time int

    Sysctl setting net.ipv4.tcp_keepalive_time.

    net_ipv4_tcp_max_syn_backlog int

    Sysctl setting net.ipv4.tcp_max_syn_backlog.

    net_ipv4_tcp_max_tw_buckets int

    Sysctl setting net.ipv4.tcp_max_tw_buckets.

    net_ipv4_tcp_tw_reuse bool

    Sysctl setting net.ipv4.tcp_tw_reuse.

    net_ipv4_tcpkeepalive_intvl int

    Sysctl setting net.ipv4.tcp_keepalive_intvl.

    net_netfilter_nf_conntrack_buckets int

    Sysctl setting net.netfilter.nf_conntrack_buckets.

    net_netfilter_nf_conntrack_max int

    Sysctl setting net.netfilter.nf_conntrack_max.

    vm_max_map_count int

    Sysctl setting vm.max_map_count.

    vm_swappiness int

    Sysctl setting vm.swappiness.

    vm_vfs_cache_pressure int

    Sysctl setting vm.vfs_cache_pressure.

    fsAioMaxNr Number

    Sysctl setting fs.aio-max-nr.

    fsFileMax Number

    Sysctl setting fs.file-max.

    fsInotifyMaxUserWatches Number

    Sysctl setting fs.inotify.max_user_watches.

    fsNrOpen Number

    Sysctl setting fs.nr_open.

    kernelThreadsMax Number

    Sysctl setting kernel.threads-max.

    netCoreNetdevMaxBacklog Number

    Sysctl setting net.core.netdev_max_backlog.

    netCoreOptmemMax Number

    Sysctl setting net.core.optmem_max.

    netCoreRmemDefault Number

    Sysctl setting net.core.rmem_default.

    netCoreRmemMax Number

    Sysctl setting net.core.rmem_max.

    netCoreSomaxconn Number

    Sysctl setting net.core.somaxconn.

    netCoreWmemDefault Number

    Sysctl setting net.core.wmem_default.

    netCoreWmemMax Number

    Sysctl setting net.core.wmem_max.

    netIpv4IpLocalPortRange String

    Sysctl setting net.ipv4.ip_local_port_range.

    netIpv4NeighDefaultGcThresh1 Number

    Sysctl setting net.ipv4.neigh.default.gc_thresh1.

    netIpv4NeighDefaultGcThresh2 Number

    Sysctl setting net.ipv4.neigh.default.gc_thresh2.

    netIpv4NeighDefaultGcThresh3 Number

    Sysctl setting net.ipv4.neigh.default.gc_thresh3.

    netIpv4TcpFinTimeout Number

    Sysctl setting net.ipv4.tcp_fin_timeout.

    netIpv4TcpKeepaliveProbes Number

    Sysctl setting net.ipv4.tcp_keepalive_probes.

    netIpv4TcpKeepaliveTime Number

    Sysctl setting net.ipv4.tcp_keepalive_time.

    netIpv4TcpMaxSynBacklog Number

    Sysctl setting net.ipv4.tcp_max_syn_backlog.

    netIpv4TcpMaxTwBuckets Number

    Sysctl setting net.ipv4.tcp_max_tw_buckets.

    netIpv4TcpTwReuse Boolean

    Sysctl setting net.ipv4.tcp_tw_reuse.

    netIpv4TcpkeepaliveIntvl Number

    Sysctl setting net.ipv4.tcp_keepalive_intvl.

    netNetfilterNfConntrackBuckets Number

    Sysctl setting net.netfilter.nf_conntrack_buckets.

    netNetfilterNfConntrackMax Number

    Sysctl setting net.netfilter.nf_conntrack_max.

    vmMaxMapCount Number

    Sysctl setting vm.max_map_count.

    vmSwappiness Number

    Sysctl setting vm.swappiness.

    vmVfsCachePressure Number

    Sysctl setting vm.vfs_cache_pressure.

    SysctlConfigResponse, SysctlConfigResponseArgs

    FsAioMaxNr int

    Sysctl setting fs.aio-max-nr.

    FsFileMax int

    Sysctl setting fs.file-max.

    FsInotifyMaxUserWatches int

    Sysctl setting fs.inotify.max_user_watches.

    FsNrOpen int

    Sysctl setting fs.nr_open.

    KernelThreadsMax int

    Sysctl setting kernel.threads-max.

    NetCoreNetdevMaxBacklog int

    Sysctl setting net.core.netdev_max_backlog.

    NetCoreOptmemMax int

    Sysctl setting net.core.optmem_max.

    NetCoreRmemDefault int

    Sysctl setting net.core.rmem_default.

    NetCoreRmemMax int

    Sysctl setting net.core.rmem_max.

    NetCoreSomaxconn int

    Sysctl setting net.core.somaxconn.

    NetCoreWmemDefault int

    Sysctl setting net.core.wmem_default.

    NetCoreWmemMax int

    Sysctl setting net.core.wmem_max.

    NetIpv4IpLocalPortRange string

    Sysctl setting net.ipv4.ip_local_port_range.

    NetIpv4NeighDefaultGcThresh1 int

    Sysctl setting net.ipv4.neigh.default.gc_thresh1.

    NetIpv4NeighDefaultGcThresh2 int

    Sysctl setting net.ipv4.neigh.default.gc_thresh2.

    NetIpv4NeighDefaultGcThresh3 int

    Sysctl setting net.ipv4.neigh.default.gc_thresh3.

    NetIpv4TcpFinTimeout int

    Sysctl setting net.ipv4.tcp_fin_timeout.

    NetIpv4TcpKeepaliveProbes int

    Sysctl setting net.ipv4.tcp_keepalive_probes.

    NetIpv4TcpKeepaliveTime int

    Sysctl setting net.ipv4.tcp_keepalive_time.

    NetIpv4TcpMaxSynBacklog int

    Sysctl setting net.ipv4.tcp_max_syn_backlog.

    NetIpv4TcpMaxTwBuckets int

    Sysctl setting net.ipv4.tcp_max_tw_buckets.

    NetIpv4TcpTwReuse bool

    Sysctl setting net.ipv4.tcp_tw_reuse.

    NetIpv4TcpkeepaliveIntvl int

    Sysctl setting net.ipv4.tcp_keepalive_intvl.

    NetNetfilterNfConntrackBuckets int

    Sysctl setting net.netfilter.nf_conntrack_buckets.

    NetNetfilterNfConntrackMax int

    Sysctl setting net.netfilter.nf_conntrack_max.

    VmMaxMapCount int

    Sysctl setting vm.max_map_count.

    VmSwappiness int

    Sysctl setting vm.swappiness.

    VmVfsCachePressure int

    Sysctl setting vm.vfs_cache_pressure.

    FsAioMaxNr int

    Sysctl setting fs.aio-max-nr.

    FsFileMax int

    Sysctl setting fs.file-max.

    FsInotifyMaxUserWatches int

    Sysctl setting fs.inotify.max_user_watches.

    FsNrOpen int

    Sysctl setting fs.nr_open.

    KernelThreadsMax int

    Sysctl setting kernel.threads-max.

    NetCoreNetdevMaxBacklog int

    Sysctl setting net.core.netdev_max_backlog.

    NetCoreOptmemMax int

    Sysctl setting net.core.optmem_max.

    NetCoreRmemDefault int

    Sysctl setting net.core.rmem_default.

    NetCoreRmemMax int

    Sysctl setting net.core.rmem_max.

    NetCoreSomaxconn int

    Sysctl setting net.core.somaxconn.

    NetCoreWmemDefault int

    Sysctl setting net.core.wmem_default.

    NetCoreWmemMax int

    Sysctl setting net.core.wmem_max.

    NetIpv4IpLocalPortRange string

    Sysctl setting net.ipv4.ip_local_port_range.

    NetIpv4NeighDefaultGcThresh1 int

    Sysctl setting net.ipv4.neigh.default.gc_thresh1.

    NetIpv4NeighDefaultGcThresh2 int

    Sysctl setting net.ipv4.neigh.default.gc_thresh2.

    NetIpv4NeighDefaultGcThresh3 int

    Sysctl setting net.ipv4.neigh.default.gc_thresh3.

    NetIpv4TcpFinTimeout int

    Sysctl setting net.ipv4.tcp_fin_timeout.

    NetIpv4TcpKeepaliveProbes int

    Sysctl setting net.ipv4.tcp_keepalive_probes.

    NetIpv4TcpKeepaliveTime int

    Sysctl setting net.ipv4.tcp_keepalive_time.

    NetIpv4TcpMaxSynBacklog int

    Sysctl setting net.ipv4.tcp_max_syn_backlog.

    NetIpv4TcpMaxTwBuckets int

    Sysctl setting net.ipv4.tcp_max_tw_buckets.

    NetIpv4TcpTwReuse bool

    Sysctl setting net.ipv4.tcp_tw_reuse.

    NetIpv4TcpkeepaliveIntvl int

    Sysctl setting net.ipv4.tcp_keepalive_intvl.

    NetNetfilterNfConntrackBuckets int

    Sysctl setting net.netfilter.nf_conntrack_buckets.

    NetNetfilterNfConntrackMax int

    Sysctl setting net.netfilter.nf_conntrack_max.

    VmMaxMapCount int

    Sysctl setting vm.max_map_count.

    VmSwappiness int

    Sysctl setting vm.swappiness.

    VmVfsCachePressure int

    Sysctl setting vm.vfs_cache_pressure.

    fsAioMaxNr Integer

    Sysctl setting fs.aio-max-nr.

    fsFileMax Integer

    Sysctl setting fs.file-max.

    fsInotifyMaxUserWatches Integer

    Sysctl setting fs.inotify.max_user_watches.

    fsNrOpen Integer

    Sysctl setting fs.nr_open.

    kernelThreadsMax Integer

    Sysctl setting kernel.threads-max.

    netCoreNetdevMaxBacklog Integer

    Sysctl setting net.core.netdev_max_backlog.

    netCoreOptmemMax Integer

    Sysctl setting net.core.optmem_max.

    netCoreRmemDefault Integer

    Sysctl setting net.core.rmem_default.

    netCoreRmemMax Integer

    Sysctl setting net.core.rmem_max.

    netCoreSomaxconn Integer

    Sysctl setting net.core.somaxconn.

    netCoreWmemDefault Integer

    Sysctl setting net.core.wmem_default.

    netCoreWmemMax Integer

    Sysctl setting net.core.wmem_max.

    netIpv4IpLocalPortRange String

    Sysctl setting net.ipv4.ip_local_port_range.

    netIpv4NeighDefaultGcThresh1 Integer

    Sysctl setting net.ipv4.neigh.default.gc_thresh1.

    netIpv4NeighDefaultGcThresh2 Integer

    Sysctl setting net.ipv4.neigh.default.gc_thresh2.

    netIpv4NeighDefaultGcThresh3 Integer

    Sysctl setting net.ipv4.neigh.default.gc_thresh3.

    netIpv4TcpFinTimeout Integer

    Sysctl setting net.ipv4.tcp_fin_timeout.

    netIpv4TcpKeepaliveProbes Integer

    Sysctl setting net.ipv4.tcp_keepalive_probes.

    netIpv4TcpKeepaliveTime Integer

    Sysctl setting net.ipv4.tcp_keepalive_time.

    netIpv4TcpMaxSynBacklog Integer

    Sysctl setting net.ipv4.tcp_max_syn_backlog.

    netIpv4TcpMaxTwBuckets Integer

    Sysctl setting net.ipv4.tcp_max_tw_buckets.

    netIpv4TcpTwReuse Boolean

    Sysctl setting net.ipv4.tcp_tw_reuse.

    netIpv4TcpkeepaliveIntvl Integer

    Sysctl setting net.ipv4.tcp_keepalive_intvl.

    netNetfilterNfConntrackBuckets Integer

    Sysctl setting net.netfilter.nf_conntrack_buckets.

    netNetfilterNfConntrackMax Integer

    Sysctl setting net.netfilter.nf_conntrack_max.

    vmMaxMapCount Integer

    Sysctl setting vm.max_map_count.

    vmSwappiness Integer

    Sysctl setting vm.swappiness.

    vmVfsCachePressure Integer

    Sysctl setting vm.vfs_cache_pressure.

    fsAioMaxNr number

    Sysctl setting fs.aio-max-nr.

    fsFileMax number

    Sysctl setting fs.file-max.

    fsInotifyMaxUserWatches number

    Sysctl setting fs.inotify.max_user_watches.

    fsNrOpen number

    Sysctl setting fs.nr_open.

    kernelThreadsMax number

    Sysctl setting kernel.threads-max.

    netCoreNetdevMaxBacklog number

    Sysctl setting net.core.netdev_max_backlog.

    netCoreOptmemMax number

    Sysctl setting net.core.optmem_max.

    netCoreRmemDefault number

    Sysctl setting net.core.rmem_default.

    netCoreRmemMax number

    Sysctl setting net.core.rmem_max.

    netCoreSomaxconn number

    Sysctl setting net.core.somaxconn.

    netCoreWmemDefault number

    Sysctl setting net.core.wmem_default.

    netCoreWmemMax number

    Sysctl setting net.core.wmem_max.

    netIpv4IpLocalPortRange string

    Sysctl setting net.ipv4.ip_local_port_range.

    netIpv4NeighDefaultGcThresh1 number

    Sysctl setting net.ipv4.neigh.default.gc_thresh1.

    netIpv4NeighDefaultGcThresh2 number

    Sysctl setting net.ipv4.neigh.default.gc_thresh2.

    netIpv4NeighDefaultGcThresh3 number

    Sysctl setting net.ipv4.neigh.default.gc_thresh3.

    netIpv4TcpFinTimeout number

    Sysctl setting net.ipv4.tcp_fin_timeout.

    netIpv4TcpKeepaliveProbes number

    Sysctl setting net.ipv4.tcp_keepalive_probes.

    netIpv4TcpKeepaliveTime number

    Sysctl setting net.ipv4.tcp_keepalive_time.

    netIpv4TcpMaxSynBacklog number

    Sysctl setting net.ipv4.tcp_max_syn_backlog.

    netIpv4TcpMaxTwBuckets number

    Sysctl setting net.ipv4.tcp_max_tw_buckets.

    netIpv4TcpTwReuse boolean

    Sysctl setting net.ipv4.tcp_tw_reuse.

    netIpv4TcpkeepaliveIntvl number

    Sysctl setting net.ipv4.tcp_keepalive_intvl.

    netNetfilterNfConntrackBuckets number

    Sysctl setting net.netfilter.nf_conntrack_buckets.

    netNetfilterNfConntrackMax number

    Sysctl setting net.netfilter.nf_conntrack_max.

    vmMaxMapCount number

    Sysctl setting vm.max_map_count.

    vmSwappiness number

    Sysctl setting vm.swappiness.

    vmVfsCachePressure number

    Sysctl setting vm.vfs_cache_pressure.

    fs_aio_max_nr int

    Sysctl setting fs.aio-max-nr.

    fs_file_max int

    Sysctl setting fs.file-max.

    fs_inotify_max_user_watches int

    Sysctl setting fs.inotify.max_user_watches.

    fs_nr_open int

    Sysctl setting fs.nr_open.

    kernel_threads_max int

    Sysctl setting kernel.threads-max.

    net_core_netdev_max_backlog int

    Sysctl setting net.core.netdev_max_backlog.

    net_core_optmem_max int

    Sysctl setting net.core.optmem_max.

    net_core_rmem_default int

    Sysctl setting net.core.rmem_default.

    net_core_rmem_max int

    Sysctl setting net.core.rmem_max.

    net_core_somaxconn int

    Sysctl setting net.core.somaxconn.

    net_core_wmem_default int

    Sysctl setting net.core.wmem_default.

    net_core_wmem_max int

    Sysctl setting net.core.wmem_max.

    net_ipv4_ip_local_port_range str

    Sysctl setting net.ipv4.ip_local_port_range.

    net_ipv4_neigh_default_gc_thresh1 int

    Sysctl setting net.ipv4.neigh.default.gc_thresh1.

    net_ipv4_neigh_default_gc_thresh2 int

    Sysctl setting net.ipv4.neigh.default.gc_thresh2.

    net_ipv4_neigh_default_gc_thresh3 int

    Sysctl setting net.ipv4.neigh.default.gc_thresh3.

    net_ipv4_tcp_fin_timeout int

    Sysctl setting net.ipv4.tcp_fin_timeout.

    net_ipv4_tcp_keepalive_probes int

    Sysctl setting net.ipv4.tcp_keepalive_probes.

    net_ipv4_tcp_keepalive_time int

    Sysctl setting net.ipv4.tcp_keepalive_time.

    net_ipv4_tcp_max_syn_backlog int

    Sysctl setting net.ipv4.tcp_max_syn_backlog.

    net_ipv4_tcp_max_tw_buckets int

    Sysctl setting net.ipv4.tcp_max_tw_buckets.

    net_ipv4_tcp_tw_reuse bool

    Sysctl setting net.ipv4.tcp_tw_reuse.

    net_ipv4_tcpkeepalive_intvl int

    Sysctl setting net.ipv4.tcp_keepalive_intvl.

    net_netfilter_nf_conntrack_buckets int

    Sysctl setting net.netfilter.nf_conntrack_buckets.

    net_netfilter_nf_conntrack_max int

    Sysctl setting net.netfilter.nf_conntrack_max.

    vm_max_map_count int

    Sysctl setting vm.max_map_count.

    vm_swappiness int

    Sysctl setting vm.swappiness.

    vm_vfs_cache_pressure int

    Sysctl setting vm.vfs_cache_pressure.

    fsAioMaxNr Number

    Sysctl setting fs.aio-max-nr.

    fsFileMax Number

    Sysctl setting fs.file-max.

    fsInotifyMaxUserWatches Number

    Sysctl setting fs.inotify.max_user_watches.

    fsNrOpen Number

    Sysctl setting fs.nr_open.

    kernelThreadsMax Number

    Sysctl setting kernel.threads-max.

    netCoreNetdevMaxBacklog Number

    Sysctl setting net.core.netdev_max_backlog.

    netCoreOptmemMax Number

    Sysctl setting net.core.optmem_max.

    netCoreRmemDefault Number

    Sysctl setting net.core.rmem_default.

    netCoreRmemMax Number

    Sysctl setting net.core.rmem_max.

    netCoreSomaxconn Number

    Sysctl setting net.core.somaxconn.

    netCoreWmemDefault Number

    Sysctl setting net.core.wmem_default.

    netCoreWmemMax Number

    Sysctl setting net.core.wmem_max.

    netIpv4IpLocalPortRange String

    Sysctl setting net.ipv4.ip_local_port_range.

    netIpv4NeighDefaultGcThresh1 Number

    Sysctl setting net.ipv4.neigh.default.gc_thresh1.

    netIpv4NeighDefaultGcThresh2 Number

    Sysctl setting net.ipv4.neigh.default.gc_thresh2.

    netIpv4NeighDefaultGcThresh3 Number

    Sysctl setting net.ipv4.neigh.default.gc_thresh3.

    netIpv4TcpFinTimeout Number

    Sysctl setting net.ipv4.tcp_fin_timeout.

    netIpv4TcpKeepaliveProbes Number

    Sysctl setting net.ipv4.tcp_keepalive_probes.

    netIpv4TcpKeepaliveTime Number

    Sysctl setting net.ipv4.tcp_keepalive_time.

    netIpv4TcpMaxSynBacklog Number

    Sysctl setting net.ipv4.tcp_max_syn_backlog.

    netIpv4TcpMaxTwBuckets Number

    Sysctl setting net.ipv4.tcp_max_tw_buckets.

    netIpv4TcpTwReuse Boolean

    Sysctl setting net.ipv4.tcp_tw_reuse.

    netIpv4TcpkeepaliveIntvl Number

    Sysctl setting net.ipv4.tcp_keepalive_intvl.

    netNetfilterNfConntrackBuckets Number

    Sysctl setting net.netfilter.nf_conntrack_buckets.

    netNetfilterNfConntrackMax Number

    Sysctl setting net.netfilter.nf_conntrack_max.

    vmMaxMapCount Number

    Sysctl setting vm.max_map_count.

    vmSwappiness Number

    Sysctl setting vm.swappiness.

    vmVfsCachePressure Number

    Sysctl setting vm.vfs_cache_pressure.

    WorkloadRuntime, WorkloadRuntimeArgs

    OCIContainer
    OCIContainer

    Nodes will use Kubelet to run standard OCI container workloads.

    WasmWasi
    WasmWasi

    Nodes will use Krustlet to run WASM workloads using the WASI provider (Preview).

    WorkloadRuntimeOCIContainer
    OCIContainer

    Nodes will use Kubelet to run standard OCI container workloads.

    WorkloadRuntimeWasmWasi
    WasmWasi

    Nodes will use Krustlet to run WASM workloads using the WASI provider (Preview).

    OCIContainer
    OCIContainer

    Nodes will use Kubelet to run standard OCI container workloads.

    WasmWasi
    WasmWasi

    Nodes will use Krustlet to run WASM workloads using the WASI provider (Preview).

    OCIContainer
    OCIContainer

    Nodes will use Kubelet to run standard OCI container workloads.

    WasmWasi
    WasmWasi

    Nodes will use Krustlet to run WASM workloads using the WASI provider (Preview).

    OCI_CONTAINER
    OCIContainer

    Nodes will use Kubelet to run standard OCI container workloads.

    WASM_WASI
    WasmWasi

    Nodes will use Krustlet to run WASM workloads using the WASI provider (Preview).

    "OCIContainer"
    OCIContainer

    Nodes will use Kubelet to run standard OCI container workloads.

    "WasmWasi"
    WasmWasi

    Nodes will use Krustlet to run WASM workloads using the WASI provider (Preview).

    Import

    An existing resource can be imported using its type token, name, and identifier, e.g.

    $ pulumi import azure-native:containerservice:AgentPool agentpool1 /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName} 
    

    Package Details

    Repository
    Azure Native pulumi/pulumi-azure-native
    License
    Apache-2.0
    azure-native logo
    This is the latest version of Azure Native. Use the Azure Native v1 docs if using the v1 version of this package.
    Azure Native v2.19.0 published on Tuesday, Nov 21, 2023 by Pulumi