1. Packages
  2. Rancher2
  3. API Docs
  4. Cluster
Rancher 2 v5.2.1 published on Saturday, Oct 28, 2023 by Pulumi

rancher2.Cluster

Explore with Pulumi AI

rancher2 logo
Rancher 2 v5.2.1 published on Saturday, Oct 28, 2023 by Pulumi

    Provides a Rancher v2 Cluster resource. This can be used to create Clusters for Rancher v2 environments and retrieve their information.

    Example Usage

    Creating Rancher v2 RKE cluster enabling and customizing monitoring

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Rancher2 = Pulumi.Rancher2;
    
    return await Deployment.RunAsync(() => 
    {
        // Create a new rancher2 RKE Cluster
        var foo_custom = new Rancher2.Cluster("foo-custom", new()
        {
            ClusterMonitoringInput = new Rancher2.Inputs.ClusterClusterMonitoringInputArgs
            {
                Answers = 
                {
                    { "exporter-kubelets.https", true },
                    { "exporter-node.enabled", true },
                    { "exporter-node.ports.metrics.port", 9796 },
                    { "exporter-node.resources.limits.cpu", "200m" },
                    { "exporter-node.resources.limits.memory", "200Mi" },
                    { "grafana.persistence.enabled", false },
                    { "grafana.persistence.size", "10Gi" },
                    { "grafana.persistence.storageClass", "default" },
                    { "operator.resources.limits.memory", "500Mi" },
                    { "prometheus.persistence.enabled", "false" },
                    { "prometheus.persistence.size", "50Gi" },
                    { "prometheus.persistence.storageClass", "default" },
                    { "prometheus.persistent.useReleaseName", "true" },
                    { "prometheus.resources.core.limits.cpu", "1000m" },
                    { "prometheus.resources.core.limits.memory", "1500Mi" },
                    { "prometheus.resources.core.requests.cpu", "750m" },
                    { "prometheus.resources.core.requests.memory", "750Mi" },
                    { "prometheus.retention", "12h" },
                },
                Version = "0.1.0",
            },
            Description = "Foo rancher2 custom cluster",
            EnableClusterMonitoring = true,
            RkeConfig = new Rancher2.Inputs.ClusterRkeConfigArgs
            {
                Network = new Rancher2.Inputs.ClusterRkeConfigNetworkArgs
                {
                    Plugin = "canal",
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-rancher2/sdk/v5/go/rancher2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := rancher2.NewCluster(ctx, "foo-custom", &rancher2.ClusterArgs{
    			ClusterMonitoringInput: &rancher2.ClusterClusterMonitoringInputArgs{
    				Answers: pulumi.Map{
    					"exporter-kubelets.https":                   pulumi.Any(true),
    					"exporter-node.enabled":                     pulumi.Any(true),
    					"exporter-node.ports.metrics.port":          pulumi.Any(9796),
    					"exporter-node.resources.limits.cpu":        pulumi.Any("200m"),
    					"exporter-node.resources.limits.memory":     pulumi.Any("200Mi"),
    					"grafana.persistence.enabled":               pulumi.Any(false),
    					"grafana.persistence.size":                  pulumi.Any("10Gi"),
    					"grafana.persistence.storageClass":          pulumi.Any("default"),
    					"operator.resources.limits.memory":          pulumi.Any("500Mi"),
    					"prometheus.persistence.enabled":            pulumi.Any("false"),
    					"prometheus.persistence.size":               pulumi.Any("50Gi"),
    					"prometheus.persistence.storageClass":       pulumi.Any("default"),
    					"prometheus.persistent.useReleaseName":      pulumi.Any("true"),
    					"prometheus.resources.core.limits.cpu":      pulumi.Any("1000m"),
    					"prometheus.resources.core.limits.memory":   pulumi.Any("1500Mi"),
    					"prometheus.resources.core.requests.cpu":    pulumi.Any("750m"),
    					"prometheus.resources.core.requests.memory": pulumi.Any("750Mi"),
    					"prometheus.retention":                      pulumi.Any("12h"),
    				},
    				Version: pulumi.String("0.1.0"),
    			},
    			Description:             pulumi.String("Foo rancher2 custom cluster"),
    			EnableClusterMonitoring: pulumi.Bool(true),
    			RkeConfig: &rancher2.ClusterRkeConfigArgs{
    				Network: &rancher2.ClusterRkeConfigNetworkArgs{
    					Plugin: pulumi.String("canal"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.rancher2.Cluster;
    import com.pulumi.rancher2.ClusterArgs;
    import com.pulumi.rancher2.inputs.ClusterClusterMonitoringInputArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigNetworkArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var foo_custom = new Cluster("foo-custom", ClusterArgs.builder()        
                .clusterMonitoringInput(ClusterClusterMonitoringInputArgs.builder()
                    .answers(Map.ofEntries(
                        Map.entry("exporter-kubelets.https", true),
                        Map.entry("exporter-node.enabled", true),
                        Map.entry("exporter-node.ports.metrics.port", 9796),
                        Map.entry("exporter-node.resources.limits.cpu", "200m"),
                        Map.entry("exporter-node.resources.limits.memory", "200Mi"),
                        Map.entry("grafana.persistence.enabled", false),
                        Map.entry("grafana.persistence.size", "10Gi"),
                        Map.entry("grafana.persistence.storageClass", "default"),
                        Map.entry("operator.resources.limits.memory", "500Mi"),
                        Map.entry("prometheus.persistence.enabled", "false"),
                        Map.entry("prometheus.persistence.size", "50Gi"),
                        Map.entry("prometheus.persistence.storageClass", "default"),
                        Map.entry("prometheus.persistent.useReleaseName", "true"),
                        Map.entry("prometheus.resources.core.limits.cpu", "1000m"),
                        Map.entry("prometheus.resources.core.limits.memory", "1500Mi"),
                        Map.entry("prometheus.resources.core.requests.cpu", "750m"),
                        Map.entry("prometheus.resources.core.requests.memory", "750Mi"),
                        Map.entry("prometheus.retention", "12h")
                    ))
                    .version("0.1.0")
                    .build())
                .description("Foo rancher2 custom cluster")
                .enableClusterMonitoring(true)
                .rkeConfig(ClusterRkeConfigArgs.builder()
                    .network(ClusterRkeConfigNetworkArgs.builder()
                        .plugin("canal")
                        .build())
                    .build())
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_rancher2 as rancher2
    
    # Create a new rancher2 RKE Cluster
    foo_custom = rancher2.Cluster("foo-custom",
        cluster_monitoring_input=rancher2.ClusterClusterMonitoringInputArgs(
            answers={
                "exporter-kubelets.https": True,
                "exporter-node.enabled": True,
                "exporter-node.ports.metrics.port": 9796,
                "exporter-node.resources.limits.cpu": "200m",
                "exporter-node.resources.limits.memory": "200Mi",
                "grafana.persistence.enabled": False,
                "grafana.persistence.size": "10Gi",
                "grafana.persistence.storageClass": "default",
                "operator.resources.limits.memory": "500Mi",
                "prometheus.persistence.enabled": "false",
                "prometheus.persistence.size": "50Gi",
                "prometheus.persistence.storageClass": "default",
                "prometheus.persistent.useReleaseName": "true",
                "prometheus.resources.core.limits.cpu": "1000m",
                "prometheus.resources.core.limits.memory": "1500Mi",
                "prometheus.resources.core.requests.cpu": "750m",
                "prometheus.resources.core.requests.memory": "750Mi",
                "prometheus.retention": "12h",
            },
            version="0.1.0",
        ),
        description="Foo rancher2 custom cluster",
        enable_cluster_monitoring=True,
        rke_config=rancher2.ClusterRkeConfigArgs(
            network=rancher2.ClusterRkeConfigNetworkArgs(
                plugin="canal",
            ),
        ))
    
    import * as pulumi from "@pulumi/pulumi";
    import * as rancher2 from "@pulumi/rancher2";
    
    // Create a new rancher2 RKE Cluster
    const foo_custom = new rancher2.Cluster("foo-custom", {
        clusterMonitoringInput: {
            answers: {
                "exporter-kubelets.https": true,
                "exporter-node.enabled": true,
                "exporter-node.ports.metrics.port": 9796,
                "exporter-node.resources.limits.cpu": "200m",
                "exporter-node.resources.limits.memory": "200Mi",
                "grafana.persistence.enabled": false,
                "grafana.persistence.size": "10Gi",
                "grafana.persistence.storageClass": "default",
                "operator.resources.limits.memory": "500Mi",
                "prometheus.persistence.enabled": "false",
                "prometheus.persistence.size": "50Gi",
                "prometheus.persistence.storageClass": "default",
                "prometheus.persistent.useReleaseName": "true",
                "prometheus.resources.core.limits.cpu": "1000m",
                "prometheus.resources.core.limits.memory": "1500Mi",
                "prometheus.resources.core.requests.cpu": "750m",
                "prometheus.resources.core.requests.memory": "750Mi",
                "prometheus.retention": "12h",
            },
            version: "0.1.0",
        },
        description: "Foo rancher2 custom cluster",
        enableClusterMonitoring: true,
        rkeConfig: {
            network: {
                plugin: "canal",
            },
        },
    });
    
    resources:
      # Create a new rancher2 RKE Cluster
      foo-custom:
        type: rancher2:Cluster
        properties:
          clusterMonitoringInput:
            answers:
              exporter-kubelets.https: true
              exporter-node.enabled: true
              exporter-node.ports.metrics.port: 9796
              exporter-node.resources.limits.cpu: 200m
              exporter-node.resources.limits.memory: 200Mi
              grafana.persistence.enabled: false
              grafana.persistence.size: 10Gi
              grafana.persistence.storageClass: default
              operator.resources.limits.memory: 500Mi
              prometheus.persistence.enabled: 'false'
              prometheus.persistence.size: 50Gi
              prometheus.persistence.storageClass: default
              prometheus.persistent.useReleaseName: 'true'
              prometheus.resources.core.limits.cpu: 1000m
              prometheus.resources.core.limits.memory: 1500Mi
              prometheus.resources.core.requests.cpu: 750m
              prometheus.resources.core.requests.memory: 750Mi
              prometheus.retention: 12h
            version: 0.1.0
          description: Foo rancher2 custom cluster
          enableClusterMonitoring: true
          rkeConfig:
            network:
              plugin: canal
    

    Creating Rancher v2 RKE cluster enabling/customizing monitoring and istio

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Rancher2 = Pulumi.Rancher2;
    
    return await Deployment.RunAsync(() => 
    {
        // Create a new rancher2 RKE Cluster
        var foo_customCluster = new Rancher2.Cluster("foo-customCluster", new()
        {
            Description = "Foo rancher2 custom cluster",
            RkeConfig = new Rancher2.Inputs.ClusterRkeConfigArgs
            {
                Network = new Rancher2.Inputs.ClusterRkeConfigNetworkArgs
                {
                    Plugin = "canal",
                },
            },
            EnableClusterMonitoring = true,
            ClusterMonitoringInput = new Rancher2.Inputs.ClusterClusterMonitoringInputArgs
            {
                Answers = 
                {
                    { "exporter-kubelets.https", true },
                    { "exporter-node.enabled", true },
                    { "exporter-node.ports.metrics.port", 9796 },
                    { "exporter-node.resources.limits.cpu", "200m" },
                    { "exporter-node.resources.limits.memory", "200Mi" },
                    { "grafana.persistence.enabled", false },
                    { "grafana.persistence.size", "10Gi" },
                    { "grafana.persistence.storageClass", "default" },
                    { "operator.resources.limits.memory", "500Mi" },
                    { "prometheus.persistence.enabled", "false" },
                    { "prometheus.persistence.size", "50Gi" },
                    { "prometheus.persistence.storageClass", "default" },
                    { "prometheus.persistent.useReleaseName", "true" },
                    { "prometheus.resources.core.limits.cpu", "1000m" },
                    { "prometheus.resources.core.limits.memory", "1500Mi" },
                    { "prometheus.resources.core.requests.cpu", "750m" },
                    { "prometheus.resources.core.requests.memory", "750Mi" },
                    { "prometheus.retention", "12h" },
                },
                Version = "0.1.0",
            },
        });
    
        // Create a new rancher2 Cluster Sync for foo-custom cluster
        var foo_customClusterSync = new Rancher2.ClusterSync("foo-customClusterSync", new()
        {
            ClusterId = foo_customCluster.Id,
            WaitMonitoring = foo_customCluster.EnableClusterMonitoring,
        });
    
        // Create a new rancher2 Namespace
        var foo_istio = new Rancher2.Namespace("foo-istio", new()
        {
            ProjectId = foo_customClusterSync.SystemProjectId,
            Description = "istio namespace",
        });
    
        // Create a new rancher2 App deploying istio (should wait until monitoring is up and running)
        var istio = new Rancher2.App("istio", new()
        {
            CatalogName = "system-library",
            Description = "Terraform app acceptance test",
            ProjectId = foo_istio.ProjectId,
            TemplateName = "rancher-istio",
            TemplateVersion = "0.1.1",
            TargetNamespace = foo_istio.Id,
            Answers = 
            {
                { "certmanager.enabled", false },
                { "enableCRDs", true },
                { "galley.enabled", true },
                { "gateways.enabled", false },
                { "gateways.istio-ingressgateway.resources.limits.cpu", "2000m" },
                { "gateways.istio-ingressgateway.resources.limits.memory", "1024Mi" },
                { "gateways.istio-ingressgateway.resources.requests.cpu", "100m" },
                { "gateways.istio-ingressgateway.resources.requests.memory", "128Mi" },
                { "gateways.istio-ingressgateway.type", "NodePort" },
                { "global.monitoring.type", "cluster-monitoring" },
                { "global.rancher.clusterId", foo_customClusterSync.ClusterId },
                { "istio_cni.enabled", "false" },
                { "istiocoredns.enabled", "false" },
                { "kiali.enabled", "true" },
                { "mixer.enabled", "true" },
                { "mixer.policy.enabled", "true" },
                { "mixer.policy.resources.limits.cpu", "4800m" },
                { "mixer.policy.resources.limits.memory", "4096Mi" },
                { "mixer.policy.resources.requests.cpu", "1000m" },
                { "mixer.policy.resources.requests.memory", "1024Mi" },
                { "mixer.telemetry.resources.limits.cpu", "4800m" },
                { "mixer.telemetry.resources.limits.memory", "4096Mi" },
                { "mixer.telemetry.resources.requests.cpu", "1000m" },
                { "mixer.telemetry.resources.requests.memory", "1024Mi" },
                { "mtls.enabled", false },
                { "nodeagent.enabled", false },
                { "pilot.enabled", true },
                { "pilot.resources.limits.cpu", "1000m" },
                { "pilot.resources.limits.memory", "4096Mi" },
                { "pilot.resources.requests.cpu", "500m" },
                { "pilot.resources.requests.memory", "2048Mi" },
                { "pilot.traceSampling", "1" },
                { "security.enabled", true },
                { "sidecarInjectorWebhook.enabled", true },
                { "tracing.enabled", true },
                { "tracing.jaeger.resources.limits.cpu", "500m" },
                { "tracing.jaeger.resources.limits.memory", "1024Mi" },
                { "tracing.jaeger.resources.requests.cpu", "100m" },
                { "tracing.jaeger.resources.requests.memory", "100Mi" },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-rancher2/sdk/v5/go/rancher2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := rancher2.NewCluster(ctx, "foo-customCluster", &rancher2.ClusterArgs{
    			Description: pulumi.String("Foo rancher2 custom cluster"),
    			RkeConfig: &rancher2.ClusterRkeConfigArgs{
    				Network: &rancher2.ClusterRkeConfigNetworkArgs{
    					Plugin: pulumi.String("canal"),
    				},
    			},
    			EnableClusterMonitoring: pulumi.Bool(true),
    			ClusterMonitoringInput: &rancher2.ClusterClusterMonitoringInputArgs{
    				Answers: pulumi.Map{
    					"exporter-kubelets.https":                   pulumi.Any(true),
    					"exporter-node.enabled":                     pulumi.Any(true),
    					"exporter-node.ports.metrics.port":          pulumi.Any(9796),
    					"exporter-node.resources.limits.cpu":        pulumi.Any("200m"),
    					"exporter-node.resources.limits.memory":     pulumi.Any("200Mi"),
    					"grafana.persistence.enabled":               pulumi.Any(false),
    					"grafana.persistence.size":                  pulumi.Any("10Gi"),
    					"grafana.persistence.storageClass":          pulumi.Any("default"),
    					"operator.resources.limits.memory":          pulumi.Any("500Mi"),
    					"prometheus.persistence.enabled":            pulumi.Any("false"),
    					"prometheus.persistence.size":               pulumi.Any("50Gi"),
    					"prometheus.persistence.storageClass":       pulumi.Any("default"),
    					"prometheus.persistent.useReleaseName":      pulumi.Any("true"),
    					"prometheus.resources.core.limits.cpu":      pulumi.Any("1000m"),
    					"prometheus.resources.core.limits.memory":   pulumi.Any("1500Mi"),
    					"prometheus.resources.core.requests.cpu":    pulumi.Any("750m"),
    					"prometheus.resources.core.requests.memory": pulumi.Any("750Mi"),
    					"prometheus.retention":                      pulumi.Any("12h"),
    				},
    				Version: pulumi.String("0.1.0"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = rancher2.NewClusterSync(ctx, "foo-customClusterSync", &rancher2.ClusterSyncArgs{
    			ClusterId:      foo_customCluster.ID(),
    			WaitMonitoring: foo_customCluster.EnableClusterMonitoring,
    		})
    		if err != nil {
    			return err
    		}
    		_, err = rancher2.NewNamespace(ctx, "foo-istio", &rancher2.NamespaceArgs{
    			ProjectId:   foo_customClusterSync.SystemProjectId,
    			Description: pulumi.String("istio namespace"),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = rancher2.NewApp(ctx, "istio", &rancher2.AppArgs{
    			CatalogName:     pulumi.String("system-library"),
    			Description:     pulumi.String("Terraform app acceptance test"),
    			ProjectId:       foo_istio.ProjectId,
    			TemplateName:    pulumi.String("rancher-istio"),
    			TemplateVersion: pulumi.String("0.1.1"),
    			TargetNamespace: foo_istio.ID(),
    			Answers: pulumi.Map{
    				"certmanager.enabled": pulumi.Any(false),
    				"enableCRDs":          pulumi.Any(true),
    				"galley.enabled":      pulumi.Any(true),
    				"gateways.enabled":    pulumi.Any(false),
    				"gateways.istio-ingressgateway.resources.limits.cpu":      pulumi.Any("2000m"),
    				"gateways.istio-ingressgateway.resources.limits.memory":   pulumi.Any("1024Mi"),
    				"gateways.istio-ingressgateway.resources.requests.cpu":    pulumi.Any("100m"),
    				"gateways.istio-ingressgateway.resources.requests.memory": pulumi.Any("128Mi"),
    				"gateways.istio-ingressgateway.type":                      pulumi.Any("NodePort"),
    				"global.monitoring.type":                                  pulumi.Any("cluster-monitoring"),
    				"global.rancher.clusterId":                                foo_customClusterSync.ClusterId,
    				"istio_cni.enabled":                                       pulumi.Any("false"),
    				"istiocoredns.enabled":                                    pulumi.Any("false"),
    				"kiali.enabled":                                           pulumi.Any("true"),
    				"mixer.enabled":                                           pulumi.Any("true"),
    				"mixer.policy.enabled":                                    pulumi.Any("true"),
    				"mixer.policy.resources.limits.cpu":                       pulumi.Any("4800m"),
    				"mixer.policy.resources.limits.memory":                    pulumi.Any("4096Mi"),
    				"mixer.policy.resources.requests.cpu":                     pulumi.Any("1000m"),
    				"mixer.policy.resources.requests.memory":                  pulumi.Any("1024Mi"),
    				"mixer.telemetry.resources.limits.cpu":                    pulumi.Any("4800m"),
    				"mixer.telemetry.resources.limits.memory":                 pulumi.Any("4096Mi"),
    				"mixer.telemetry.resources.requests.cpu":                  pulumi.Any("1000m"),
    				"mixer.telemetry.resources.requests.memory":               pulumi.Any("1024Mi"),
    				"mtls.enabled":                                            pulumi.Any(false),
    				"nodeagent.enabled":                                       pulumi.Any(false),
    				"pilot.enabled":                                           pulumi.Any(true),
    				"pilot.resources.limits.cpu":                              pulumi.Any("1000m"),
    				"pilot.resources.limits.memory":                           pulumi.Any("4096Mi"),
    				"pilot.resources.requests.cpu":                            pulumi.Any("500m"),
    				"pilot.resources.requests.memory":                         pulumi.Any("2048Mi"),
    				"pilot.traceSampling":                                     pulumi.Any("1"),
    				"security.enabled":                                        pulumi.Any(true),
    				"sidecarInjectorWebhook.enabled":                          pulumi.Any(true),
    				"tracing.enabled":                                         pulumi.Any(true),
    				"tracing.jaeger.resources.limits.cpu":                     pulumi.Any("500m"),
    				"tracing.jaeger.resources.limits.memory":                  pulumi.Any("1024Mi"),
    				"tracing.jaeger.resources.requests.cpu":                   pulumi.Any("100m"),
    				"tracing.jaeger.resources.requests.memory":                pulumi.Any("100Mi"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.rancher2.Cluster;
    import com.pulumi.rancher2.ClusterArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigNetworkArgs;
    import com.pulumi.rancher2.inputs.ClusterClusterMonitoringInputArgs;
    import com.pulumi.rancher2.ClusterSync;
    import com.pulumi.rancher2.ClusterSyncArgs;
    import com.pulumi.rancher2.Namespace;
    import com.pulumi.rancher2.NamespaceArgs;
    import com.pulumi.rancher2.App;
    import com.pulumi.rancher2.AppArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var foo_customCluster = new Cluster("foo-customCluster", ClusterArgs.builder()        
                .description("Foo rancher2 custom cluster")
                .rkeConfig(ClusterRkeConfigArgs.builder()
                    .network(ClusterRkeConfigNetworkArgs.builder()
                        .plugin("canal")
                        .build())
                    .build())
                .enableClusterMonitoring(true)
                .clusterMonitoringInput(ClusterClusterMonitoringInputArgs.builder()
                    .answers(Map.ofEntries(
                        Map.entry("exporter-kubelets.https", true),
                        Map.entry("exporter-node.enabled", true),
                        Map.entry("exporter-node.ports.metrics.port", 9796),
                        Map.entry("exporter-node.resources.limits.cpu", "200m"),
                        Map.entry("exporter-node.resources.limits.memory", "200Mi"),
                        Map.entry("grafana.persistence.enabled", false),
                        Map.entry("grafana.persistence.size", "10Gi"),
                        Map.entry("grafana.persistence.storageClass", "default"),
                        Map.entry("operator.resources.limits.memory", "500Mi"),
                        Map.entry("prometheus.persistence.enabled", "false"),
                        Map.entry("prometheus.persistence.size", "50Gi"),
                        Map.entry("prometheus.persistence.storageClass", "default"),
                        Map.entry("prometheus.persistent.useReleaseName", "true"),
                        Map.entry("prometheus.resources.core.limits.cpu", "1000m"),
                        Map.entry("prometheus.resources.core.limits.memory", "1500Mi"),
                        Map.entry("prometheus.resources.core.requests.cpu", "750m"),
                        Map.entry("prometheus.resources.core.requests.memory", "750Mi"),
                        Map.entry("prometheus.retention", "12h")
                    ))
                    .version("0.1.0")
                    .build())
                .build());
    
            var foo_customClusterSync = new ClusterSync("foo-customClusterSync", ClusterSyncArgs.builder()        
                .clusterId(foo_customCluster.id())
                .waitMonitoring(foo_customCluster.enableClusterMonitoring())
                .build());
    
            var foo_istio = new Namespace("foo-istio", NamespaceArgs.builder()        
                .projectId(foo_customClusterSync.systemProjectId())
                .description("istio namespace")
                .build());
    
            var istio = new App("istio", AppArgs.builder()        
                .catalogName("system-library")
                .description("Terraform app acceptance test")
                .projectId(foo_istio.projectId())
                .templateName("rancher-istio")
                .templateVersion("0.1.1")
                .targetNamespace(foo_istio.id())
                .answers(Map.ofEntries(
                    Map.entry("certmanager.enabled", false),
                    Map.entry("enableCRDs", true),
                    Map.entry("galley.enabled", true),
                    Map.entry("gateways.enabled", false),
                    Map.entry("gateways.istio-ingressgateway.resources.limits.cpu", "2000m"),
                    Map.entry("gateways.istio-ingressgateway.resources.limits.memory", "1024Mi"),
                    Map.entry("gateways.istio-ingressgateway.resources.requests.cpu", "100m"),
                    Map.entry("gateways.istio-ingressgateway.resources.requests.memory", "128Mi"),
                    Map.entry("gateways.istio-ingressgateway.type", "NodePort"),
                    Map.entry("global.monitoring.type", "cluster-monitoring"),
                    Map.entry("global.rancher.clusterId", foo_customClusterSync.clusterId()),
                    Map.entry("istio_cni.enabled", "false"),
                    Map.entry("istiocoredns.enabled", "false"),
                    Map.entry("kiali.enabled", "true"),
                    Map.entry("mixer.enabled", "true"),
                    Map.entry("mixer.policy.enabled", "true"),
                    Map.entry("mixer.policy.resources.limits.cpu", "4800m"),
                    Map.entry("mixer.policy.resources.limits.memory", "4096Mi"),
                    Map.entry("mixer.policy.resources.requests.cpu", "1000m"),
                    Map.entry("mixer.policy.resources.requests.memory", "1024Mi"),
                    Map.entry("mixer.telemetry.resources.limits.cpu", "4800m"),
                    Map.entry("mixer.telemetry.resources.limits.memory", "4096Mi"),
                    Map.entry("mixer.telemetry.resources.requests.cpu", "1000m"),
                    Map.entry("mixer.telemetry.resources.requests.memory", "1024Mi"),
                    Map.entry("mtls.enabled", false),
                    Map.entry("nodeagent.enabled", false),
                    Map.entry("pilot.enabled", true),
                    Map.entry("pilot.resources.limits.cpu", "1000m"),
                    Map.entry("pilot.resources.limits.memory", "4096Mi"),
                    Map.entry("pilot.resources.requests.cpu", "500m"),
                    Map.entry("pilot.resources.requests.memory", "2048Mi"),
                    Map.entry("pilot.traceSampling", "1"),
                    Map.entry("security.enabled", true),
                    Map.entry("sidecarInjectorWebhook.enabled", true),
                    Map.entry("tracing.enabled", true),
                    Map.entry("tracing.jaeger.resources.limits.cpu", "500m"),
                    Map.entry("tracing.jaeger.resources.limits.memory", "1024Mi"),
                    Map.entry("tracing.jaeger.resources.requests.cpu", "100m"),
                    Map.entry("tracing.jaeger.resources.requests.memory", "100Mi")
                ))
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_rancher2 as rancher2
    
    # Create a new rancher2 RKE Cluster
    foo_custom_cluster = rancher2.Cluster("foo-customCluster",
        description="Foo rancher2 custom cluster",
        rke_config=rancher2.ClusterRkeConfigArgs(
            network=rancher2.ClusterRkeConfigNetworkArgs(
                plugin="canal",
            ),
        ),
        enable_cluster_monitoring=True,
        cluster_monitoring_input=rancher2.ClusterClusterMonitoringInputArgs(
            answers={
                "exporter-kubelets.https": True,
                "exporter-node.enabled": True,
                "exporter-node.ports.metrics.port": 9796,
                "exporter-node.resources.limits.cpu": "200m",
                "exporter-node.resources.limits.memory": "200Mi",
                "grafana.persistence.enabled": False,
                "grafana.persistence.size": "10Gi",
                "grafana.persistence.storageClass": "default",
                "operator.resources.limits.memory": "500Mi",
                "prometheus.persistence.enabled": "false",
                "prometheus.persistence.size": "50Gi",
                "prometheus.persistence.storageClass": "default",
                "prometheus.persistent.useReleaseName": "true",
                "prometheus.resources.core.limits.cpu": "1000m",
                "prometheus.resources.core.limits.memory": "1500Mi",
                "prometheus.resources.core.requests.cpu": "750m",
                "prometheus.resources.core.requests.memory": "750Mi",
                "prometheus.retention": "12h",
            },
            version="0.1.0",
        ))
    # Create a new rancher2 Cluster Sync for foo-custom cluster
    foo_custom_cluster_sync = rancher2.ClusterSync("foo-customClusterSync",
        cluster_id=foo_custom_cluster.id,
        wait_monitoring=foo_custom_cluster.enable_cluster_monitoring)
    # Create a new rancher2 Namespace
    foo_istio = rancher2.Namespace("foo-istio",
        project_id=foo_custom_cluster_sync.system_project_id,
        description="istio namespace")
    # Create a new rancher2 App deploying istio (should wait until monitoring is up and running)
    istio = rancher2.App("istio",
        catalog_name="system-library",
        description="Terraform app acceptance test",
        project_id=foo_istio.project_id,
        template_name="rancher-istio",
        template_version="0.1.1",
        target_namespace=foo_istio.id,
        answers={
            "certmanager.enabled": False,
            "enableCRDs": True,
            "galley.enabled": True,
            "gateways.enabled": False,
            "gateways.istio-ingressgateway.resources.limits.cpu": "2000m",
            "gateways.istio-ingressgateway.resources.limits.memory": "1024Mi",
            "gateways.istio-ingressgateway.resources.requests.cpu": "100m",
            "gateways.istio-ingressgateway.resources.requests.memory": "128Mi",
            "gateways.istio-ingressgateway.type": "NodePort",
            "global.monitoring.type": "cluster-monitoring",
            "global.rancher.clusterId": foo_custom_cluster_sync.cluster_id,
            "istio_cni.enabled": "false",
            "istiocoredns.enabled": "false",
            "kiali.enabled": "true",
            "mixer.enabled": "true",
            "mixer.policy.enabled": "true",
            "mixer.policy.resources.limits.cpu": "4800m",
            "mixer.policy.resources.limits.memory": "4096Mi",
            "mixer.policy.resources.requests.cpu": "1000m",
            "mixer.policy.resources.requests.memory": "1024Mi",
            "mixer.telemetry.resources.limits.cpu": "4800m",
            "mixer.telemetry.resources.limits.memory": "4096Mi",
            "mixer.telemetry.resources.requests.cpu": "1000m",
            "mixer.telemetry.resources.requests.memory": "1024Mi",
            "mtls.enabled": False,
            "nodeagent.enabled": False,
            "pilot.enabled": True,
            "pilot.resources.limits.cpu": "1000m",
            "pilot.resources.limits.memory": "4096Mi",
            "pilot.resources.requests.cpu": "500m",
            "pilot.resources.requests.memory": "2048Mi",
            "pilot.traceSampling": "1",
            "security.enabled": True,
            "sidecarInjectorWebhook.enabled": True,
            "tracing.enabled": True,
            "tracing.jaeger.resources.limits.cpu": "500m",
            "tracing.jaeger.resources.limits.memory": "1024Mi",
            "tracing.jaeger.resources.requests.cpu": "100m",
            "tracing.jaeger.resources.requests.memory": "100Mi",
        })
    
    import * as pulumi from "@pulumi/pulumi";
    import * as rancher2 from "@pulumi/rancher2";
    
    // Create a new rancher2 RKE Cluster
    const foo_customCluster = new rancher2.Cluster("foo-customCluster", {
        description: "Foo rancher2 custom cluster",
        rkeConfig: {
            network: {
                plugin: "canal",
            },
        },
        enableClusterMonitoring: true,
        clusterMonitoringInput: {
            answers: {
                "exporter-kubelets.https": true,
                "exporter-node.enabled": true,
                "exporter-node.ports.metrics.port": 9796,
                "exporter-node.resources.limits.cpu": "200m",
                "exporter-node.resources.limits.memory": "200Mi",
                "grafana.persistence.enabled": false,
                "grafana.persistence.size": "10Gi",
                "grafana.persistence.storageClass": "default",
                "operator.resources.limits.memory": "500Mi",
                "prometheus.persistence.enabled": "false",
                "prometheus.persistence.size": "50Gi",
                "prometheus.persistence.storageClass": "default",
                "prometheus.persistent.useReleaseName": "true",
                "prometheus.resources.core.limits.cpu": "1000m",
                "prometheus.resources.core.limits.memory": "1500Mi",
                "prometheus.resources.core.requests.cpu": "750m",
                "prometheus.resources.core.requests.memory": "750Mi",
                "prometheus.retention": "12h",
            },
            version: "0.1.0",
        },
    });
    // Create a new rancher2 Cluster Sync for foo-custom cluster
    const foo_customClusterSync = new rancher2.ClusterSync("foo-customClusterSync", {
        clusterId: foo_customCluster.id,
        waitMonitoring: foo_customCluster.enableClusterMonitoring,
    });
    // Create a new rancher2 Namespace
    const foo_istio = new rancher2.Namespace("foo-istio", {
        projectId: foo_customClusterSync.systemProjectId,
        description: "istio namespace",
    });
    // Create a new rancher2 App deploying istio (should wait until monitoring is up and running)
    const istio = new rancher2.App("istio", {
        catalogName: "system-library",
        description: "Terraform app acceptance test",
        projectId: foo_istio.projectId,
        templateName: "rancher-istio",
        templateVersion: "0.1.1",
        targetNamespace: foo_istio.id,
        answers: {
            "certmanager.enabled": false,
            enableCRDs: true,
            "galley.enabled": true,
            "gateways.enabled": false,
            "gateways.istio-ingressgateway.resources.limits.cpu": "2000m",
            "gateways.istio-ingressgateway.resources.limits.memory": "1024Mi",
            "gateways.istio-ingressgateway.resources.requests.cpu": "100m",
            "gateways.istio-ingressgateway.resources.requests.memory": "128Mi",
            "gateways.istio-ingressgateway.type": "NodePort",
            "global.monitoring.type": "cluster-monitoring",
            "global.rancher.clusterId": foo_customClusterSync.clusterId,
            "istio_cni.enabled": "false",
            "istiocoredns.enabled": "false",
            "kiali.enabled": "true",
            "mixer.enabled": "true",
            "mixer.policy.enabled": "true",
            "mixer.policy.resources.limits.cpu": "4800m",
            "mixer.policy.resources.limits.memory": "4096Mi",
            "mixer.policy.resources.requests.cpu": "1000m",
            "mixer.policy.resources.requests.memory": "1024Mi",
            "mixer.telemetry.resources.limits.cpu": "4800m",
            "mixer.telemetry.resources.limits.memory": "4096Mi",
            "mixer.telemetry.resources.requests.cpu": "1000m",
            "mixer.telemetry.resources.requests.memory": "1024Mi",
            "mtls.enabled": false,
            "nodeagent.enabled": false,
            "pilot.enabled": true,
            "pilot.resources.limits.cpu": "1000m",
            "pilot.resources.limits.memory": "4096Mi",
            "pilot.resources.requests.cpu": "500m",
            "pilot.resources.requests.memory": "2048Mi",
            "pilot.traceSampling": "1",
            "security.enabled": true,
            "sidecarInjectorWebhook.enabled": true,
            "tracing.enabled": true,
            "tracing.jaeger.resources.limits.cpu": "500m",
            "tracing.jaeger.resources.limits.memory": "1024Mi",
            "tracing.jaeger.resources.requests.cpu": "100m",
            "tracing.jaeger.resources.requests.memory": "100Mi",
        },
    });
    
    resources:
      # Create a new rancher2 RKE Cluster
      foo-customCluster:
        type: rancher2:Cluster
        properties:
          description: Foo rancher2 custom cluster
          rkeConfig:
            network:
              plugin: canal
          enableClusterMonitoring: true
          clusterMonitoringInput:
            answers:
              exporter-kubelets.https: true
              exporter-node.enabled: true
              exporter-node.ports.metrics.port: 9796
              exporter-node.resources.limits.cpu: 200m
              exporter-node.resources.limits.memory: 200Mi
              grafana.persistence.enabled: false
              grafana.persistence.size: 10Gi
              grafana.persistence.storageClass: default
              operator.resources.limits.memory: 500Mi
              prometheus.persistence.enabled: 'false'
              prometheus.persistence.size: 50Gi
              prometheus.persistence.storageClass: default
              prometheus.persistent.useReleaseName: 'true'
              prometheus.resources.core.limits.cpu: 1000m
              prometheus.resources.core.limits.memory: 1500Mi
              prometheus.resources.core.requests.cpu: 750m
              prometheus.resources.core.requests.memory: 750Mi
              prometheus.retention: 12h
            version: 0.1.0
      # Create a new rancher2 Cluster Sync for foo-custom cluster
      foo-customClusterSync:
        type: rancher2:ClusterSync
        properties:
          clusterId: ${["foo-customCluster"].id}
          waitMonitoring: ${["foo-customCluster"].enableClusterMonitoring}
      # Create a new rancher2 Namespace
      foo-istio:
        type: rancher2:Namespace
        properties:
          projectId: ${["foo-customClusterSync"].systemProjectId}
          description: istio namespace
      # Create a new rancher2 App deploying istio (should wait until monitoring is up and running)
      istio:
        type: rancher2:App
        properties:
          catalogName: system-library
          description: Terraform app acceptance test
          projectId: ${["foo-istio"].projectId}
          templateName: rancher-istio
          templateVersion: 0.1.1
          targetNamespace: ${["foo-istio"].id}
          answers:
            certmanager.enabled: false
            enableCRDs: true
            galley.enabled: true
            gateways.enabled: false
            gateways.istio-ingressgateway.resources.limits.cpu: 2000m
            gateways.istio-ingressgateway.resources.limits.memory: 1024Mi
            gateways.istio-ingressgateway.resources.requests.cpu: 100m
            gateways.istio-ingressgateway.resources.requests.memory: 128Mi
            gateways.istio-ingressgateway.type: NodePort
            global.monitoring.type: cluster-monitoring
            global.rancher.clusterId: ${["foo-customClusterSync"].clusterId}
            istio_cni.enabled: 'false'
            istiocoredns.enabled: 'false'
            kiali.enabled: 'true'
            mixer.enabled: 'true'
            mixer.policy.enabled: 'true'
            mixer.policy.resources.limits.cpu: 4800m
            mixer.policy.resources.limits.memory: 4096Mi
            mixer.policy.resources.requests.cpu: 1000m
            mixer.policy.resources.requests.memory: 1024Mi
            mixer.telemetry.resources.limits.cpu: 4800m
            mixer.telemetry.resources.limits.memory: 4096Mi
            mixer.telemetry.resources.requests.cpu: 1000m
            mixer.telemetry.resources.requests.memory: 1024Mi
            mtls.enabled: false
            nodeagent.enabled: false
            pilot.enabled: true
            pilot.resources.limits.cpu: 1000m
            pilot.resources.limits.memory: 4096Mi
            pilot.resources.requests.cpu: 500m
            pilot.resources.requests.memory: 2048Mi
            pilot.traceSampling: '1'
            security.enabled: true
            sidecarInjectorWebhook.enabled: true
            tracing.enabled: true
            tracing.jaeger.resources.limits.cpu: 500m
            tracing.jaeger.resources.limits.memory: 1024Mi
            tracing.jaeger.resources.requests.cpu: 100m
            tracing.jaeger.resources.requests.memory: 100Mi
    

    Creating Rancher v2 RKE cluster assigning a node pool (overlapped planes)

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Rancher2 = Pulumi.Rancher2;
    
    return await Deployment.RunAsync(() => 
    {
        // Create a new rancher2 RKE Cluster
        var foo_custom = new Rancher2.Cluster("foo-custom", new()
        {
            Description = "Foo rancher2 custom cluster",
            RkeConfig = new Rancher2.Inputs.ClusterRkeConfigArgs
            {
                Network = new Rancher2.Inputs.ClusterRkeConfigNetworkArgs
                {
                    Plugin = "canal",
                },
            },
        });
    
        // Create a new rancher2 Node Template
        var fooNodeTemplate = new Rancher2.NodeTemplate("fooNodeTemplate", new()
        {
            Description = "foo test",
            Amazonec2Config = new Rancher2.Inputs.NodeTemplateAmazonec2ConfigArgs
            {
                AccessKey = "<AWS_ACCESS_KEY>",
                SecretKey = "<AWS_SECRET_KEY>",
                Ami = "<AMI_ID>",
                Region = "<REGION>",
                SecurityGroups = new[]
                {
                    "<AWS_SECURITY_GROUP>",
                },
                SubnetId = "<SUBNET_ID>",
                VpcId = "<VPC_ID>",
                Zone = "<ZONE>",
            },
        });
    
        // Create a new rancher2 Node Pool
        var fooNodePool = new Rancher2.NodePool("fooNodePool", new()
        {
            ClusterId = foo_custom.Id,
            HostnamePrefix = "foo-cluster-0",
            NodeTemplateId = fooNodeTemplate.Id,
            Quantity = 3,
            ControlPlane = true,
            Etcd = true,
            Worker = true,
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-rancher2/sdk/v5/go/rancher2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := rancher2.NewCluster(ctx, "foo-custom", &rancher2.ClusterArgs{
    			Description: pulumi.String("Foo rancher2 custom cluster"),
    			RkeConfig: &rancher2.ClusterRkeConfigArgs{
    				Network: &rancher2.ClusterRkeConfigNetworkArgs{
    					Plugin: pulumi.String("canal"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		fooNodeTemplate, err := rancher2.NewNodeTemplate(ctx, "fooNodeTemplate", &rancher2.NodeTemplateArgs{
    			Description: pulumi.String("foo test"),
    			Amazonec2Config: &rancher2.NodeTemplateAmazonec2ConfigArgs{
    				AccessKey: pulumi.String("<AWS_ACCESS_KEY>"),
    				SecretKey: pulumi.String("<AWS_SECRET_KEY>"),
    				Ami:       pulumi.String("<AMI_ID>"),
    				Region:    pulumi.String("<REGION>"),
    				SecurityGroups: pulumi.StringArray{
    					pulumi.String("<AWS_SECURITY_GROUP>"),
    				},
    				SubnetId: pulumi.String("<SUBNET_ID>"),
    				VpcId:    pulumi.String("<VPC_ID>"),
    				Zone:     pulumi.String("<ZONE>"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = rancher2.NewNodePool(ctx, "fooNodePool", &rancher2.NodePoolArgs{
    			ClusterId:      foo_custom.ID(),
    			HostnamePrefix: pulumi.String("foo-cluster-0"),
    			NodeTemplateId: fooNodeTemplate.ID(),
    			Quantity:       pulumi.Int(3),
    			ControlPlane:   pulumi.Bool(true),
    			Etcd:           pulumi.Bool(true),
    			Worker:         pulumi.Bool(true),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.rancher2.Cluster;
    import com.pulumi.rancher2.ClusterArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigNetworkArgs;
    import com.pulumi.rancher2.NodeTemplate;
    import com.pulumi.rancher2.NodeTemplateArgs;
    import com.pulumi.rancher2.inputs.NodeTemplateAmazonec2ConfigArgs;
    import com.pulumi.rancher2.NodePool;
    import com.pulumi.rancher2.NodePoolArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var foo_custom = new Cluster("foo-custom", ClusterArgs.builder()        
                .description("Foo rancher2 custom cluster")
                .rkeConfig(ClusterRkeConfigArgs.builder()
                    .network(ClusterRkeConfigNetworkArgs.builder()
                        .plugin("canal")
                        .build())
                    .build())
                .build());
    
            var fooNodeTemplate = new NodeTemplate("fooNodeTemplate", NodeTemplateArgs.builder()        
                .description("foo test")
                .amazonec2Config(NodeTemplateAmazonec2ConfigArgs.builder()
                    .accessKey("<AWS_ACCESS_KEY>")
                    .secretKey("<AWS_SECRET_KEY>")
                    .ami("<AMI_ID>")
                    .region("<REGION>")
                    .securityGroups("<AWS_SECURITY_GROUP>")
                    .subnetId("<SUBNET_ID>")
                    .vpcId("<VPC_ID>")
                    .zone("<ZONE>")
                    .build())
                .build());
    
            var fooNodePool = new NodePool("fooNodePool", NodePoolArgs.builder()        
                .clusterId(foo_custom.id())
                .hostnamePrefix("foo-cluster-0")
                .nodeTemplateId(fooNodeTemplate.id())
                .quantity(3)
                .controlPlane(true)
                .etcd(true)
                .worker(true)
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_rancher2 as rancher2
    
    # Create a new rancher2 RKE Cluster
    foo_custom = rancher2.Cluster("foo-custom",
        description="Foo rancher2 custom cluster",
        rke_config=rancher2.ClusterRkeConfigArgs(
            network=rancher2.ClusterRkeConfigNetworkArgs(
                plugin="canal",
            ),
        ))
    # Create a new rancher2 Node Template
    foo_node_template = rancher2.NodeTemplate("fooNodeTemplate",
        description="foo test",
        amazonec2_config=rancher2.NodeTemplateAmazonec2ConfigArgs(
            access_key="<AWS_ACCESS_KEY>",
            secret_key="<AWS_SECRET_KEY>",
            ami="<AMI_ID>",
            region="<REGION>",
            security_groups=["<AWS_SECURITY_GROUP>"],
            subnet_id="<SUBNET_ID>",
            vpc_id="<VPC_ID>",
            zone="<ZONE>",
        ))
    # Create a new rancher2 Node Pool
    foo_node_pool = rancher2.NodePool("fooNodePool",
        cluster_id=foo_custom.id,
        hostname_prefix="foo-cluster-0",
        node_template_id=foo_node_template.id,
        quantity=3,
        control_plane=True,
        etcd=True,
        worker=True)
    
    import * as pulumi from "@pulumi/pulumi";
    import * as rancher2 from "@pulumi/rancher2";
    
    // Create a new rancher2 RKE Cluster
    const foo_custom = new rancher2.Cluster("foo-custom", {
        description: "Foo rancher2 custom cluster",
        rkeConfig: {
            network: {
                plugin: "canal",
            },
        },
    });
    // Create a new rancher2 Node Template
    const fooNodeTemplate = new rancher2.NodeTemplate("fooNodeTemplate", {
        description: "foo test",
        amazonec2Config: {
            accessKey: "<AWS_ACCESS_KEY>",
            secretKey: "<AWS_SECRET_KEY>",
            ami: "<AMI_ID>",
            region: "<REGION>",
            securityGroups: ["<AWS_SECURITY_GROUP>"],
            subnetId: "<SUBNET_ID>",
            vpcId: "<VPC_ID>",
            zone: "<ZONE>",
        },
    });
    // Create a new rancher2 Node Pool
    const fooNodePool = new rancher2.NodePool("fooNodePool", {
        clusterId: foo_custom.id,
        hostnamePrefix: "foo-cluster-0",
        nodeTemplateId: fooNodeTemplate.id,
        quantity: 3,
        controlPlane: true,
        etcd: true,
        worker: true,
    });
    
    resources:
      # Create a new rancher2 RKE Cluster
      foo-custom:
        type: rancher2:Cluster
        properties:
          description: Foo rancher2 custom cluster
          rkeConfig:
            network:
              plugin: canal
      # Create a new rancher2 Node Template
      fooNodeTemplate:
        type: rancher2:NodeTemplate
        properties:
          description: foo test
          amazonec2Config:
            accessKey: <AWS_ACCESS_KEY>
            secretKey: <AWS_SECRET_KEY>
            ami: <AMI_ID>
            region: <REGION>
            securityGroups:
              - <AWS_SECURITY_GROUP>
            subnetId: <SUBNET_ID>
            vpcId: <VPC_ID>
            zone: <ZONE>
      # Create a new rancher2 Node Pool
      fooNodePool:
        type: rancher2:NodePool
        properties:
          clusterId: ${["foo-custom"].id}
          hostnamePrefix: foo-cluster-0
          nodeTemplateId: ${fooNodeTemplate.id}
          quantity: 3
          controlPlane: true
          etcd: true
          worker: true
    

    Creating Rancher v2 RKE cluster from template. For Rancher v2.3.x and above.

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Rancher2 = Pulumi.Rancher2;
    
    return await Deployment.RunAsync(() => 
    {
        // Create a new rancher2 cluster template
        var fooClusterTemplate = new Rancher2.ClusterTemplate("fooClusterTemplate", new()
        {
            Members = new[]
            {
                new Rancher2.Inputs.ClusterTemplateMemberArgs
                {
                    AccessType = "owner",
                    UserPrincipalId = "local://user-XXXXX",
                },
            },
            TemplateRevisions = new[]
            {
                new Rancher2.Inputs.ClusterTemplateTemplateRevisionArgs
                {
                    Name = "V1",
                    ClusterConfig = new Rancher2.Inputs.ClusterTemplateTemplateRevisionClusterConfigArgs
                    {
                        RkeConfig = new Rancher2.Inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigArgs
                        {
                            Network = new Rancher2.Inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigNetworkArgs
                            {
                                Plugin = "canal",
                            },
                            Services = new Rancher2.Inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesArgs
                            {
                                Etcd = new Rancher2.Inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesEtcdArgs
                                {
                                    Creation = "6h",
                                    Retention = "24h",
                                },
                            },
                        },
                    },
                    Default = true,
                },
            },
            Description = "Test cluster template v2",
        });
    
        // Create a new rancher2 RKE Cluster from template
        var fooCluster = new Rancher2.Cluster("fooCluster", new()
        {
            ClusterTemplateId = fooClusterTemplate.Id,
            ClusterTemplateRevisionId = fooClusterTemplate.TemplateRevisions.Apply(templateRevisions => templateRevisions[0].Id),
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-rancher2/sdk/v5/go/rancher2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		fooClusterTemplate, err := rancher2.NewClusterTemplate(ctx, "fooClusterTemplate", &rancher2.ClusterTemplateArgs{
    			Members: rancher2.ClusterTemplateMemberArray{
    				&rancher2.ClusterTemplateMemberArgs{
    					AccessType:      pulumi.String("owner"),
    					UserPrincipalId: pulumi.String("local://user-XXXXX"),
    				},
    			},
    			TemplateRevisions: rancher2.ClusterTemplateTemplateRevisionArray{
    				&rancher2.ClusterTemplateTemplateRevisionArgs{
    					Name: pulumi.String("V1"),
    					ClusterConfig: &rancher2.ClusterTemplateTemplateRevisionClusterConfigArgs{
    						RkeConfig: &rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigArgs{
    							Network: &rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigNetworkArgs{
    								Plugin: pulumi.String("canal"),
    							},
    							Services: &rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesArgs{
    								Etcd: &rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesEtcdArgs{
    									Creation:  pulumi.String("6h"),
    									Retention: pulumi.String("24h"),
    								},
    							},
    						},
    					},
    					Default: pulumi.Bool(true),
    				},
    			},
    			Description: pulumi.String("Test cluster template v2"),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = rancher2.NewCluster(ctx, "fooCluster", &rancher2.ClusterArgs{
    			ClusterTemplateId: fooClusterTemplate.ID(),
    			ClusterTemplateRevisionId: fooClusterTemplate.TemplateRevisions.ApplyT(func(templateRevisions []rancher2.ClusterTemplateTemplateRevision) (*string, error) {
    				return &templateRevisions[0].Id, nil
    			}).(pulumi.StringPtrOutput),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.rancher2.ClusterTemplate;
    import com.pulumi.rancher2.ClusterTemplateArgs;
    import com.pulumi.rancher2.inputs.ClusterTemplateMemberArgs;
    import com.pulumi.rancher2.inputs.ClusterTemplateTemplateRevisionArgs;
    import com.pulumi.rancher2.inputs.ClusterTemplateTemplateRevisionClusterConfigArgs;
    import com.pulumi.rancher2.inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigArgs;
    import com.pulumi.rancher2.inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigNetworkArgs;
    import com.pulumi.rancher2.inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesArgs;
    import com.pulumi.rancher2.inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesEtcdArgs;
    import com.pulumi.rancher2.Cluster;
    import com.pulumi.rancher2.ClusterArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var fooClusterTemplate = new ClusterTemplate("fooClusterTemplate", ClusterTemplateArgs.builder()        
                .members(ClusterTemplateMemberArgs.builder()
                    .accessType("owner")
                    .userPrincipalId("local://user-XXXXX")
                    .build())
                .templateRevisions(ClusterTemplateTemplateRevisionArgs.builder()
                    .name("V1")
                    .clusterConfig(ClusterTemplateTemplateRevisionClusterConfigArgs.builder()
                        .rkeConfig(ClusterTemplateTemplateRevisionClusterConfigRkeConfigArgs.builder()
                            .network(ClusterTemplateTemplateRevisionClusterConfigRkeConfigNetworkArgs.builder()
                                .plugin("canal")
                                .build())
                            .services(ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesArgs.builder()
                                .etcd(ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesEtcdArgs.builder()
                                    .creation("6h")
                                    .retention("24h")
                                    .build())
                                .build())
                            .build())
                        .build())
                    .default_(true)
                    .build())
                .description("Test cluster template v2")
                .build());
    
            var fooCluster = new Cluster("fooCluster", ClusterArgs.builder()        
                .clusterTemplateId(fooClusterTemplate.id())
                .clusterTemplateRevisionId(fooClusterTemplate.templateRevisions().applyValue(templateRevisions -> templateRevisions[0].id()))
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_rancher2 as rancher2
    
    # Create a new rancher2 cluster template
    foo_cluster_template = rancher2.ClusterTemplate("fooClusterTemplate",
        members=[rancher2.ClusterTemplateMemberArgs(
            access_type="owner",
            user_principal_id="local://user-XXXXX",
        )],
        template_revisions=[rancher2.ClusterTemplateTemplateRevisionArgs(
            name="V1",
            cluster_config=rancher2.ClusterTemplateTemplateRevisionClusterConfigArgs(
                rke_config=rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigArgs(
                    network=rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigNetworkArgs(
                        plugin="canal",
                    ),
                    services=rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesArgs(
                        etcd=rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesEtcdArgs(
                            creation="6h",
                            retention="24h",
                        ),
                    ),
                ),
            ),
            default=True,
        )],
        description="Test cluster template v2")
    # Create a new rancher2 RKE Cluster from template
    foo_cluster = rancher2.Cluster("fooCluster",
        cluster_template_id=foo_cluster_template.id,
        cluster_template_revision_id=foo_cluster_template.template_revisions[0].id)
    
    import * as pulumi from "@pulumi/pulumi";
    import * as rancher2 from "@pulumi/rancher2";
    
    // Create a new rancher2 cluster template
    const fooClusterTemplate = new rancher2.ClusterTemplate("fooClusterTemplate", {
        members: [{
            accessType: "owner",
            userPrincipalId: "local://user-XXXXX",
        }],
        templateRevisions: [{
            name: "V1",
            clusterConfig: {
                rkeConfig: {
                    network: {
                        plugin: "canal",
                    },
                    services: {
                        etcd: {
                            creation: "6h",
                            retention: "24h",
                        },
                    },
                },
            },
            "default": true,
        }],
        description: "Test cluster template v2",
    });
    // Create a new rancher2 RKE Cluster from template
    const fooCluster = new rancher2.Cluster("fooCluster", {
        clusterTemplateId: fooClusterTemplate.id,
        clusterTemplateRevisionId: fooClusterTemplate.templateRevisions.apply(templateRevisions => templateRevisions[0].id),
    });
    
    resources:
      # Create a new rancher2 cluster template
      fooClusterTemplate:
        type: rancher2:ClusterTemplate
        properties:
          members:
            - accessType: owner
              userPrincipalId: local://user-XXXXX
          templateRevisions:
            - name: V1
              clusterConfig:
                rkeConfig:
                  network:
                    plugin: canal
                  services:
                    etcd:
                      creation: 6h
                      retention: 24h
              default: true
          description: Test cluster template v2
      # Create a new rancher2 RKE Cluster from template
      fooCluster:
        type: rancher2:Cluster
        properties:
          clusterTemplateId: ${fooClusterTemplate.id}
          clusterTemplateRevisionId: ${fooClusterTemplate.templateRevisions[0].id}
    

    Creating Rancher v2 RKE cluster with upgrade strategy. For Rancher v2.4.x and above.

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Rancher2 = Pulumi.Rancher2;
    
    return await Deployment.RunAsync(() => 
    {
        var foo = new Rancher2.Cluster("foo", new()
        {
            Description = "Terraform custom cluster",
            RkeConfig = new Rancher2.Inputs.ClusterRkeConfigArgs
            {
                Network = new Rancher2.Inputs.ClusterRkeConfigNetworkArgs
                {
                    Plugin = "canal",
                },
                Services = new Rancher2.Inputs.ClusterRkeConfigServicesArgs
                {
                    Etcd = new Rancher2.Inputs.ClusterRkeConfigServicesEtcdArgs
                    {
                        Creation = "6h",
                        Retention = "24h",
                    },
                    KubeApi = new Rancher2.Inputs.ClusterRkeConfigServicesKubeApiArgs
                    {
                        AuditLog = new Rancher2.Inputs.ClusterRkeConfigServicesKubeApiAuditLogArgs
                        {
                            Configuration = new Rancher2.Inputs.ClusterRkeConfigServicesKubeApiAuditLogConfigurationArgs
                            {
                                Format = "json",
                                MaxAge = 5,
                                MaxBackup = 5,
                                MaxSize = 100,
                                Path = "-",
                                Policy = @"apiVersion: audit.k8s.io/v1
    kind: Policy
    metadata:
      creationTimestamp: null
    omitStages:
    - RequestReceived
    rules:
    - level: RequestResponse
      resources:
      - resources:
        - pods
    
    ",
                            },
                            Enabled = true,
                        },
                    },
                },
                UpgradeStrategy = new Rancher2.Inputs.ClusterRkeConfigUpgradeStrategyArgs
                {
                    Drain = true,
                    MaxUnavailableWorker = "20%",
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-rancher2/sdk/v5/go/rancher2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := rancher2.NewCluster(ctx, "foo", &rancher2.ClusterArgs{
    			Description: pulumi.String("Terraform custom cluster"),
    			RkeConfig: &rancher2.ClusterRkeConfigArgs{
    				Network: &rancher2.ClusterRkeConfigNetworkArgs{
    					Plugin: pulumi.String("canal"),
    				},
    				Services: &rancher2.ClusterRkeConfigServicesArgs{
    					Etcd: &rancher2.ClusterRkeConfigServicesEtcdArgs{
    						Creation:  pulumi.String("6h"),
    						Retention: pulumi.String("24h"),
    					},
    					KubeApi: &rancher2.ClusterRkeConfigServicesKubeApiArgs{
    						AuditLog: &rancher2.ClusterRkeConfigServicesKubeApiAuditLogArgs{
    							Configuration: &rancher2.ClusterRkeConfigServicesKubeApiAuditLogConfigurationArgs{
    								Format:    pulumi.String("json"),
    								MaxAge:    pulumi.Int(5),
    								MaxBackup: pulumi.Int(5),
    								MaxSize:   pulumi.Int(100),
    								Path:      pulumi.String("-"),
    								Policy: pulumi.String(`apiVersion: audit.k8s.io/v1
    kind: Policy
    metadata:
      creationTimestamp: null
    omitStages:
    - RequestReceived
    rules:
    - level: RequestResponse
      resources:
      - resources:
        - pods
    
    `),
    							},
    							Enabled: pulumi.Bool(true),
    						},
    					},
    				},
    				UpgradeStrategy: &rancher2.ClusterRkeConfigUpgradeStrategyArgs{
    					Drain:                pulumi.Bool(true),
    					MaxUnavailableWorker: pulumi.String("20%"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.rancher2.Cluster;
    import com.pulumi.rancher2.ClusterArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigNetworkArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigServicesArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigServicesEtcdArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigServicesKubeApiArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigServicesKubeApiAuditLogArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigServicesKubeApiAuditLogConfigurationArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigUpgradeStrategyArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var foo = new Cluster("foo", ClusterArgs.builder()        
                .description("Terraform custom cluster")
                .rkeConfig(ClusterRkeConfigArgs.builder()
                    .network(ClusterRkeConfigNetworkArgs.builder()
                        .plugin("canal")
                        .build())
                    .services(ClusterRkeConfigServicesArgs.builder()
                        .etcd(ClusterRkeConfigServicesEtcdArgs.builder()
                            .creation("6h")
                            .retention("24h")
                            .build())
                        .kubeApi(ClusterRkeConfigServicesKubeApiArgs.builder()
                            .auditLog(ClusterRkeConfigServicesKubeApiAuditLogArgs.builder()
                                .configuration(ClusterRkeConfigServicesKubeApiAuditLogConfigurationArgs.builder()
                                    .format("json")
                                    .maxAge(5)
                                    .maxBackup(5)
                                    .maxSize(100)
                                    .path("-")
                                    .policy("""
    apiVersion: audit.k8s.io/v1
    kind: Policy
    metadata:
      creationTimestamp: null
    omitStages:
    - RequestReceived
    rules:
    - level: RequestResponse
      resources:
      - resources:
        - pods
    
                                    """)
                                    .build())
                                .enabled(true)
                                .build())
                            .build())
                        .build())
                    .upgradeStrategy(ClusterRkeConfigUpgradeStrategyArgs.builder()
                        .drain(true)
                        .maxUnavailableWorker("20%")
                        .build())
                    .build())
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_rancher2 as rancher2
    
    foo = rancher2.Cluster("foo",
        description="Terraform custom cluster",
        rke_config=rancher2.ClusterRkeConfigArgs(
            network=rancher2.ClusterRkeConfigNetworkArgs(
                plugin="canal",
            ),
            services=rancher2.ClusterRkeConfigServicesArgs(
                etcd=rancher2.ClusterRkeConfigServicesEtcdArgs(
                    creation="6h",
                    retention="24h",
                ),
                kube_api=rancher2.ClusterRkeConfigServicesKubeApiArgs(
                    audit_log=rancher2.ClusterRkeConfigServicesKubeApiAuditLogArgs(
                        configuration=rancher2.ClusterRkeConfigServicesKubeApiAuditLogConfigurationArgs(
                            format="json",
                            max_age=5,
                            max_backup=5,
                            max_size=100,
                            path="-",
                            policy="""apiVersion: audit.k8s.io/v1
    kind: Policy
    metadata:
      creationTimestamp: null
    omitStages:
    - RequestReceived
    rules:
    - level: RequestResponse
      resources:
      - resources:
        - pods
    
    """,
                        ),
                        enabled=True,
                    ),
                ),
            ),
            upgrade_strategy=rancher2.ClusterRkeConfigUpgradeStrategyArgs(
                drain=True,
                max_unavailable_worker="20%",
            ),
        ))
    
    import * as pulumi from "@pulumi/pulumi";
    import * as rancher2 from "@pulumi/rancher2";
    
    const foo = new rancher2.Cluster("foo", {
        description: "Terraform custom cluster",
        rkeConfig: {
            network: {
                plugin: "canal",
            },
            services: {
                etcd: {
                    creation: "6h",
                    retention: "24h",
                },
                kubeApi: {
                    auditLog: {
                        configuration: {
                            format: "json",
                            maxAge: 5,
                            maxBackup: 5,
                            maxSize: 100,
                            path: "-",
                            policy: `apiVersion: audit.k8s.io/v1
    kind: Policy
    metadata:
      creationTimestamp: null
    omitStages:
    - RequestReceived
    rules:
    - level: RequestResponse
      resources:
      - resources:
        - pods
    
    `,
                        },
                        enabled: true,
                    },
                },
            },
            upgradeStrategy: {
                drain: true,
                maxUnavailableWorker: "20%",
            },
        },
    });
    
    resources:
      foo:
        type: rancher2:Cluster
        properties:
          description: Terraform custom cluster
          rkeConfig:
            network:
              plugin: canal
            services:
              etcd:
                creation: 6h
                retention: 24h
              kubeApi:
                auditLog:
                  configuration:
                    format: json
                    maxAge: 5
                    maxBackup: 5
                    maxSize: 100
                    path: '-'
                    policy: |+
                      apiVersion: audit.k8s.io/v1
                      kind: Policy
                      metadata:
                        creationTimestamp: null
                      omitStages:
                      - RequestReceived
                      rules:
                      - level: RequestResponse
                        resources:
                        - resources:
                          - pods                  
    
                  enabled: true
            upgradeStrategy:
              drain: true
              maxUnavailableWorker: 20%
    

    Creating Rancher v2 RKE cluster with cluster agent customization. For Rancher v2.7.5 and above.

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Rancher2 = Pulumi.Rancher2;
    
    return await Deployment.RunAsync(() => 
    {
        var foo = new Rancher2.Cluster("foo", new()
        {
            ClusterAgentDeploymentCustomizations = new[]
            {
                new Rancher2.Inputs.ClusterClusterAgentDeploymentCustomizationArgs
                {
                    AppendTolerations = new[]
                    {
                        new Rancher2.Inputs.ClusterClusterAgentDeploymentCustomizationAppendTolerationArgs
                        {
                            Effect = "NoSchedule",
                            Key = "tolerate/control-plane",
                            Value = "true",
                        },
                    },
                    OverrideAffinity = @"{
      ""nodeAffinity"": {
        ""requiredDuringSchedulingIgnoredDuringExecution"": {
          ""nodeSelectorTerms"": [{
            ""matchExpressions"": [{
              ""key"": ""not.this/nodepool"",
              ""operator"": ""In"",
              ""values"": [
                ""true""
              ]
            }]
          }]
        }
      }
    }
    
    ",
                    OverrideResourceRequirements = new[]
                    {
                        new Rancher2.Inputs.ClusterClusterAgentDeploymentCustomizationOverrideResourceRequirementArgs
                        {
                            CpuLimit = "800",
                            CpuRequest = "500",
                            MemoryLimit = "800",
                            MemoryRequest = "500",
                        },
                    },
                },
            },
            Description = "Terraform cluster with agent customization",
            RkeConfig = new Rancher2.Inputs.ClusterRkeConfigArgs
            {
                Network = new Rancher2.Inputs.ClusterRkeConfigNetworkArgs
                {
                    Plugin = "canal",
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-rancher2/sdk/v5/go/rancher2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := rancher2.NewCluster(ctx, "foo", &rancher2.ClusterArgs{
    			ClusterAgentDeploymentCustomizations: rancher2.ClusterClusterAgentDeploymentCustomizationArray{
    				&rancher2.ClusterClusterAgentDeploymentCustomizationArgs{
    					AppendTolerations: rancher2.ClusterClusterAgentDeploymentCustomizationAppendTolerationArray{
    						&rancher2.ClusterClusterAgentDeploymentCustomizationAppendTolerationArgs{
    							Effect: pulumi.String("NoSchedule"),
    							Key:    pulumi.String("tolerate/control-plane"),
    							Value:  pulumi.String("true"),
    						},
    					},
    					OverrideAffinity: pulumi.String(`{
      "nodeAffinity": {
        "requiredDuringSchedulingIgnoredDuringExecution": {
          "nodeSelectorTerms": [{
            "matchExpressions": [{
              "key": "not.this/nodepool",
              "operator": "In",
              "values": [
                "true"
              ]
            }]
          }]
        }
      }
    }
    
    `),
    					OverrideResourceRequirements: rancher2.ClusterClusterAgentDeploymentCustomizationOverrideResourceRequirementArray{
    						&rancher2.ClusterClusterAgentDeploymentCustomizationOverrideResourceRequirementArgs{
    							CpuLimit:      pulumi.String("800"),
    							CpuRequest:    pulumi.String("500"),
    							MemoryLimit:   pulumi.String("800"),
    							MemoryRequest: pulumi.String("500"),
    						},
    					},
    				},
    			},
    			Description: pulumi.String("Terraform cluster with agent customization"),
    			RkeConfig: &rancher2.ClusterRkeConfigArgs{
    				Network: &rancher2.ClusterRkeConfigNetworkArgs{
    					Plugin: pulumi.String("canal"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.rancher2.Cluster;
    import com.pulumi.rancher2.ClusterArgs;
    import com.pulumi.rancher2.inputs.ClusterClusterAgentDeploymentCustomizationArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigNetworkArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var foo = new Cluster("foo", ClusterArgs.builder()        
                .clusterAgentDeploymentCustomizations(ClusterClusterAgentDeploymentCustomizationArgs.builder()
                    .appendTolerations(ClusterClusterAgentDeploymentCustomizationAppendTolerationArgs.builder()
                        .effect("NoSchedule")
                        .key("tolerate/control-plane")
                        .value("true")
                        .build())
                    .overrideAffinity("""
    {
      "nodeAffinity": {
        "requiredDuringSchedulingIgnoredDuringExecution": {
          "nodeSelectorTerms": [{
            "matchExpressions": [{
              "key": "not.this/nodepool",
              "operator": "In",
              "values": [
                "true"
              ]
            }]
          }]
        }
      }
    }
    
                    """)
                    .overrideResourceRequirements(ClusterClusterAgentDeploymentCustomizationOverrideResourceRequirementArgs.builder()
                        .cpuLimit("800")
                        .cpuRequest("500")
                        .memoryLimit("800")
                        .memoryRequest("500")
                        .build())
                    .build())
                .description("Terraform cluster with agent customization")
                .rkeConfig(ClusterRkeConfigArgs.builder()
                    .network(ClusterRkeConfigNetworkArgs.builder()
                        .plugin("canal")
                        .build())
                    .build())
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_rancher2 as rancher2
    
    foo = rancher2.Cluster("foo",
        cluster_agent_deployment_customizations=[rancher2.ClusterClusterAgentDeploymentCustomizationArgs(
            append_tolerations=[rancher2.ClusterClusterAgentDeploymentCustomizationAppendTolerationArgs(
                effect="NoSchedule",
                key="tolerate/control-plane",
                value="true",
            )],
            override_affinity="""{
      "nodeAffinity": {
        "requiredDuringSchedulingIgnoredDuringExecution": {
          "nodeSelectorTerms": [{
            "matchExpressions": [{
              "key": "not.this/nodepool",
              "operator": "In",
              "values": [
                "true"
              ]
            }]
          }]
        }
      }
    }
    
    """,
            override_resource_requirements=[rancher2.ClusterClusterAgentDeploymentCustomizationOverrideResourceRequirementArgs(
                cpu_limit="800",
                cpu_request="500",
                memory_limit="800",
                memory_request="500",
            )],
        )],
        description="Terraform cluster with agent customization",
        rke_config=rancher2.ClusterRkeConfigArgs(
            network=rancher2.ClusterRkeConfigNetworkArgs(
                plugin="canal",
            ),
        ))
    
    import * as pulumi from "@pulumi/pulumi";
    import * as rancher2 from "@pulumi/rancher2";
    
    const foo = new rancher2.Cluster("foo", {
        clusterAgentDeploymentCustomizations: [{
            appendTolerations: [{
                effect: "NoSchedule",
                key: "tolerate/control-plane",
                value: "true",
            }],
            overrideAffinity: `{
      "nodeAffinity": {
        "requiredDuringSchedulingIgnoredDuringExecution": {
          "nodeSelectorTerms": [{
            "matchExpressions": [{
              "key": "not.this/nodepool",
              "operator": "In",
              "values": [
                "true"
              ]
            }]
          }]
        }
      }
    }
    
    `,
            overrideResourceRequirements: [{
                cpuLimit: "800",
                cpuRequest: "500",
                memoryLimit: "800",
                memoryRequest: "500",
            }],
        }],
        description: "Terraform cluster with agent customization",
        rkeConfig: {
            network: {
                plugin: "canal",
            },
        },
    });
    
    resources:
      foo:
        type: rancher2:Cluster
        properties:
          clusterAgentDeploymentCustomizations:
            - appendTolerations:
                - effect: NoSchedule
                  key: tolerate/control-plane
                  value: 'true'
              overrideAffinity: |+
                {
                  "nodeAffinity": {
                    "requiredDuringSchedulingIgnoredDuringExecution": {
                      "nodeSelectorTerms": [{
                        "matchExpressions": [{
                          "key": "not.this/nodepool",
                          "operator": "In",
                          "values": [
                            "true"
                          ]
                        }]
                      }]
                    }
                  }
                }            
    
              overrideResourceRequirements:
                - cpuLimit: '800'
                  cpuRequest: '500'
                  memoryLimit: '800'
                  memoryRequest: '500'
          description: Terraform cluster with agent customization
          rkeConfig:
            network:
              plugin: canal
    

    Importing EKS cluster to Rancher v2, using eks_config_v2. For Rancher v2.5.x and above.

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Rancher2 = Pulumi.Rancher2;
    
    return await Deployment.RunAsync(() => 
    {
        var fooCloudCredential = new Rancher2.CloudCredential("fooCloudCredential", new()
        {
            Description = "foo test",
            Amazonec2CredentialConfig = new Rancher2.Inputs.CloudCredentialAmazonec2CredentialConfigArgs
            {
                AccessKey = "<AWS_ACCESS_KEY>",
                SecretKey = "<AWS_SECRET_KEY>",
            },
        });
    
        var fooCluster = new Rancher2.Cluster("fooCluster", new()
        {
            Description = "Terraform EKS cluster",
            EksConfigV2 = new Rancher2.Inputs.ClusterEksConfigV2Args
            {
                CloudCredentialId = fooCloudCredential.Id,
                Name = "<CLUSTER_NAME>",
                Region = "<EKS_REGION>",
                Imported = true,
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-rancher2/sdk/v5/go/rancher2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		fooCloudCredential, err := rancher2.NewCloudCredential(ctx, "fooCloudCredential", &rancher2.CloudCredentialArgs{
    			Description: pulumi.String("foo test"),
    			Amazonec2CredentialConfig: &rancher2.CloudCredentialAmazonec2CredentialConfigArgs{
    				AccessKey: pulumi.String("<AWS_ACCESS_KEY>"),
    				SecretKey: pulumi.String("<AWS_SECRET_KEY>"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = rancher2.NewCluster(ctx, "fooCluster", &rancher2.ClusterArgs{
    			Description: pulumi.String("Terraform EKS cluster"),
    			EksConfigV2: &rancher2.ClusterEksConfigV2Args{
    				CloudCredentialId: fooCloudCredential.ID(),
    				Name:              pulumi.String("<CLUSTER_NAME>"),
    				Region:            pulumi.String("<EKS_REGION>"),
    				Imported:          pulumi.Bool(true),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.rancher2.CloudCredential;
    import com.pulumi.rancher2.CloudCredentialArgs;
    import com.pulumi.rancher2.inputs.CloudCredentialAmazonec2CredentialConfigArgs;
    import com.pulumi.rancher2.Cluster;
    import com.pulumi.rancher2.ClusterArgs;
    import com.pulumi.rancher2.inputs.ClusterEksConfigV2Args;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var fooCloudCredential = new CloudCredential("fooCloudCredential", CloudCredentialArgs.builder()        
                .description("foo test")
                .amazonec2CredentialConfig(CloudCredentialAmazonec2CredentialConfigArgs.builder()
                    .accessKey("<AWS_ACCESS_KEY>")
                    .secretKey("<AWS_SECRET_KEY>")
                    .build())
                .build());
    
            var fooCluster = new Cluster("fooCluster", ClusterArgs.builder()        
                .description("Terraform EKS cluster")
                .eksConfigV2(ClusterEksConfigV2Args.builder()
                    .cloudCredentialId(fooCloudCredential.id())
                    .name("<CLUSTER_NAME>")
                    .region("<EKS_REGION>")
                    .imported(true)
                    .build())
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_rancher2 as rancher2
    
    foo_cloud_credential = rancher2.CloudCredential("fooCloudCredential",
        description="foo test",
        amazonec2_credential_config=rancher2.CloudCredentialAmazonec2CredentialConfigArgs(
            access_key="<AWS_ACCESS_KEY>",
            secret_key="<AWS_SECRET_KEY>",
        ))
    foo_cluster = rancher2.Cluster("fooCluster",
        description="Terraform EKS cluster",
        eks_config_v2=rancher2.ClusterEksConfigV2Args(
            cloud_credential_id=foo_cloud_credential.id,
            name="<CLUSTER_NAME>",
            region="<EKS_REGION>",
            imported=True,
        ))
    
    import * as pulumi from "@pulumi/pulumi";
    import * as rancher2 from "@pulumi/rancher2";
    
    const fooCloudCredential = new rancher2.CloudCredential("fooCloudCredential", {
        description: "foo test",
        amazonec2CredentialConfig: {
            accessKey: "<AWS_ACCESS_KEY>",
            secretKey: "<AWS_SECRET_KEY>",
        },
    });
    const fooCluster = new rancher2.Cluster("fooCluster", {
        description: "Terraform EKS cluster",
        eksConfigV2: {
            cloudCredentialId: fooCloudCredential.id,
            name: "<CLUSTER_NAME>",
            region: "<EKS_REGION>",
            imported: true,
        },
    });
    
    resources:
      fooCloudCredential:
        type: rancher2:CloudCredential
        properties:
          description: foo test
          amazonec2CredentialConfig:
            accessKey: <AWS_ACCESS_KEY>
            secretKey: <AWS_SECRET_KEY>
      fooCluster:
        type: rancher2:Cluster
        properties:
          description: Terraform EKS cluster
          eksConfigV2:
            cloudCredentialId: ${fooCloudCredential.id}
            name: <CLUSTER_NAME>
            region: <EKS_REGION>
            imported: true
    

    Creating EKS cluster from Rancher v2, using eks_config_v2. For Rancher v2.5.x and above.

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Rancher2 = Pulumi.Rancher2;
    
    return await Deployment.RunAsync(() => 
    {
        var fooCloudCredential = new Rancher2.CloudCredential("fooCloudCredential", new()
        {
            Description = "foo test",
            Amazonec2CredentialConfig = new Rancher2.Inputs.CloudCredentialAmazonec2CredentialConfigArgs
            {
                AccessKey = "<AWS_ACCESS_KEY>",
                SecretKey = "<AWS_SECRET_KEY>",
            },
        });
    
        var fooCluster = new Rancher2.Cluster("fooCluster", new()
        {
            Description = "Terraform EKS cluster",
            EksConfigV2 = new Rancher2.Inputs.ClusterEksConfigV2Args
            {
                CloudCredentialId = fooCloudCredential.Id,
                Region = "<EKS_REGION>",
                KubernetesVersion = "1.24",
                LoggingTypes = new[]
                {
                    "audit",
                    "api",
                },
                NodeGroups = new[]
                {
                    new Rancher2.Inputs.ClusterEksConfigV2NodeGroupArgs
                    {
                        Name = "node_group1",
                        InstanceType = "t3.medium",
                        DesiredSize = 3,
                        MaxSize = 5,
                    },
                    new Rancher2.Inputs.ClusterEksConfigV2NodeGroupArgs
                    {
                        Name = "node_group2",
                        InstanceType = "m5.xlarge",
                        DesiredSize = 2,
                        MaxSize = 3,
                        NodeRole = "arn:aws:iam::role/test-NodeInstanceRole",
                    },
                },
                PrivateAccess = true,
                PublicAccess = false,
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-rancher2/sdk/v5/go/rancher2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		fooCloudCredential, err := rancher2.NewCloudCredential(ctx, "fooCloudCredential", &rancher2.CloudCredentialArgs{
    			Description: pulumi.String("foo test"),
    			Amazonec2CredentialConfig: &rancher2.CloudCredentialAmazonec2CredentialConfigArgs{
    				AccessKey: pulumi.String("<AWS_ACCESS_KEY>"),
    				SecretKey: pulumi.String("<AWS_SECRET_KEY>"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = rancher2.NewCluster(ctx, "fooCluster", &rancher2.ClusterArgs{
    			Description: pulumi.String("Terraform EKS cluster"),
    			EksConfigV2: &rancher2.ClusterEksConfigV2Args{
    				CloudCredentialId: fooCloudCredential.ID(),
    				Region:            pulumi.String("<EKS_REGION>"),
    				KubernetesVersion: pulumi.String("1.24"),
    				LoggingTypes: pulumi.StringArray{
    					pulumi.String("audit"),
    					pulumi.String("api"),
    				},
    				NodeGroups: rancher2.ClusterEksConfigV2NodeGroupArray{
    					&rancher2.ClusterEksConfigV2NodeGroupArgs{
    						Name:         pulumi.String("node_group1"),
    						InstanceType: pulumi.String("t3.medium"),
    						DesiredSize:  pulumi.Int(3),
    						MaxSize:      pulumi.Int(5),
    					},
    					&rancher2.ClusterEksConfigV2NodeGroupArgs{
    						Name:         pulumi.String("node_group2"),
    						InstanceType: pulumi.String("m5.xlarge"),
    						DesiredSize:  pulumi.Int(2),
    						MaxSize:      pulumi.Int(3),
    						NodeRole:     pulumi.String("arn:aws:iam::role/test-NodeInstanceRole"),
    					},
    				},
    				PrivateAccess: pulumi.Bool(true),
    				PublicAccess:  pulumi.Bool(false),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.rancher2.CloudCredential;
    import com.pulumi.rancher2.CloudCredentialArgs;
    import com.pulumi.rancher2.inputs.CloudCredentialAmazonec2CredentialConfigArgs;
    import com.pulumi.rancher2.Cluster;
    import com.pulumi.rancher2.ClusterArgs;
    import com.pulumi.rancher2.inputs.ClusterEksConfigV2Args;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var fooCloudCredential = new CloudCredential("fooCloudCredential", CloudCredentialArgs.builder()        
                .description("foo test")
                .amazonec2CredentialConfig(CloudCredentialAmazonec2CredentialConfigArgs.builder()
                    .accessKey("<AWS_ACCESS_KEY>")
                    .secretKey("<AWS_SECRET_KEY>")
                    .build())
                .build());
    
            var fooCluster = new Cluster("fooCluster", ClusterArgs.builder()        
                .description("Terraform EKS cluster")
                .eksConfigV2(ClusterEksConfigV2Args.builder()
                    .cloudCredentialId(fooCloudCredential.id())
                    .region("<EKS_REGION>")
                    .kubernetesVersion("1.24")
                    .loggingTypes(                
                        "audit",
                        "api")
                    .nodeGroups(                
                        ClusterEksConfigV2NodeGroupArgs.builder()
                            .name("node_group1")
                            .instanceType("t3.medium")
                            .desiredSize(3)
                            .maxSize(5)
                            .build(),
                        ClusterEksConfigV2NodeGroupArgs.builder()
                            .name("node_group2")
                            .instanceType("m5.xlarge")
                            .desiredSize(2)
                            .maxSize(3)
                            .nodeRole("arn:aws:iam::role/test-NodeInstanceRole")
                            .build())
                    .privateAccess(true)
                    .publicAccess(false)
                    .build())
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_rancher2 as rancher2
    
    foo_cloud_credential = rancher2.CloudCredential("fooCloudCredential",
        description="foo test",
        amazonec2_credential_config=rancher2.CloudCredentialAmazonec2CredentialConfigArgs(
            access_key="<AWS_ACCESS_KEY>",
            secret_key="<AWS_SECRET_KEY>",
        ))
    foo_cluster = rancher2.Cluster("fooCluster",
        description="Terraform EKS cluster",
        eks_config_v2=rancher2.ClusterEksConfigV2Args(
            cloud_credential_id=foo_cloud_credential.id,
            region="<EKS_REGION>",
            kubernetes_version="1.24",
            logging_types=[
                "audit",
                "api",
            ],
            node_groups=[
                rancher2.ClusterEksConfigV2NodeGroupArgs(
                    name="node_group1",
                    instance_type="t3.medium",
                    desired_size=3,
                    max_size=5,
                ),
                rancher2.ClusterEksConfigV2NodeGroupArgs(
                    name="node_group2",
                    instance_type="m5.xlarge",
                    desired_size=2,
                    max_size=3,
                    node_role="arn:aws:iam::role/test-NodeInstanceRole",
                ),
            ],
            private_access=True,
            public_access=False,
        ))
    
    import * as pulumi from "@pulumi/pulumi";
    import * as rancher2 from "@pulumi/rancher2";
    
    const fooCloudCredential = new rancher2.CloudCredential("fooCloudCredential", {
        description: "foo test",
        amazonec2CredentialConfig: {
            accessKey: "<AWS_ACCESS_KEY>",
            secretKey: "<AWS_SECRET_KEY>",
        },
    });
    const fooCluster = new rancher2.Cluster("fooCluster", {
        description: "Terraform EKS cluster",
        eksConfigV2: {
            cloudCredentialId: fooCloudCredential.id,
            region: "<EKS_REGION>",
            kubernetesVersion: "1.24",
            loggingTypes: [
                "audit",
                "api",
            ],
            nodeGroups: [
                {
                    name: "node_group1",
                    instanceType: "t3.medium",
                    desiredSize: 3,
                    maxSize: 5,
                },
                {
                    name: "node_group2",
                    instanceType: "m5.xlarge",
                    desiredSize: 2,
                    maxSize: 3,
                    nodeRole: "arn:aws:iam::role/test-NodeInstanceRole",
                },
            ],
            privateAccess: true,
            publicAccess: false,
        },
    });
    
    resources:
      fooCloudCredential:
        type: rancher2:CloudCredential
        properties:
          description: foo test
          amazonec2CredentialConfig:
            accessKey: <AWS_ACCESS_KEY>
            secretKey: <AWS_SECRET_KEY>
      fooCluster:
        type: rancher2:Cluster
        properties:
          description: Terraform EKS cluster
          eksConfigV2:
            cloudCredentialId: ${fooCloudCredential.id}
            region: <EKS_REGION>
            kubernetesVersion: '1.24'
            loggingTypes:
              - audit
              - api
            nodeGroups:
              - name: node_group1
                instanceType: t3.medium
                desiredSize: 3
                maxSize: 5
              - name: node_group2
                instanceType: m5.xlarge
                desiredSize: 2
                maxSize: 3
                nodeRole: arn:aws:iam::role/test-NodeInstanceRole
            privateAccess: true
            publicAccess: false
    

    Creating EKS cluster from Rancher v2, using eks_config_v2 and launch template. For Rancher v2.5.6 and above.

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Rancher2 = Pulumi.Rancher2;
    
    return await Deployment.RunAsync(() => 
    {
        var fooCloudCredential = new Rancher2.CloudCredential("fooCloudCredential", new()
        {
            Description = "foo test",
            Amazonec2CredentialConfig = new Rancher2.Inputs.CloudCredentialAmazonec2CredentialConfigArgs
            {
                AccessKey = "<AWS_ACCESS_KEY>",
                SecretKey = "<AWS_SECRET_KEY>",
            },
        });
    
        var fooCluster = new Rancher2.Cluster("fooCluster", new()
        {
            Description = "Terraform EKS cluster",
            EksConfigV2 = new Rancher2.Inputs.ClusterEksConfigV2Args
            {
                CloudCredentialId = fooCloudCredential.Id,
                Region = "<EKS_REGION>",
                KubernetesVersion = "1.24",
                LoggingTypes = new[]
                {
                    "audit",
                    "api",
                },
                NodeGroups = new[]
                {
                    new Rancher2.Inputs.ClusterEksConfigV2NodeGroupArgs
                    {
                        DesiredSize = 3,
                        MaxSize = 5,
                        Name = "node_group1",
                        LaunchTemplates = new[]
                        {
                            new Rancher2.Inputs.ClusterEksConfigV2NodeGroupLaunchTemplateArgs
                            {
                                Id = "<EC2_LAUNCH_TEMPLATE_ID>",
                                Version = 1,
                            },
                        },
                    },
                },
                PrivateAccess = true,
                PublicAccess = true,
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-rancher2/sdk/v5/go/rancher2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		fooCloudCredential, err := rancher2.NewCloudCredential(ctx, "fooCloudCredential", &rancher2.CloudCredentialArgs{
    			Description: pulumi.String("foo test"),
    			Amazonec2CredentialConfig: &rancher2.CloudCredentialAmazonec2CredentialConfigArgs{
    				AccessKey: pulumi.String("<AWS_ACCESS_KEY>"),
    				SecretKey: pulumi.String("<AWS_SECRET_KEY>"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = rancher2.NewCluster(ctx, "fooCluster", &rancher2.ClusterArgs{
    			Description: pulumi.String("Terraform EKS cluster"),
    			EksConfigV2: &rancher2.ClusterEksConfigV2Args{
    				CloudCredentialId: fooCloudCredential.ID(),
    				Region:            pulumi.String("<EKS_REGION>"),
    				KubernetesVersion: pulumi.String("1.24"),
    				LoggingTypes: pulumi.StringArray{
    					pulumi.String("audit"),
    					pulumi.String("api"),
    				},
    				NodeGroups: rancher2.ClusterEksConfigV2NodeGroupArray{
    					&rancher2.ClusterEksConfigV2NodeGroupArgs{
    						DesiredSize: pulumi.Int(3),
    						MaxSize:     pulumi.Int(5),
    						Name:        pulumi.String("node_group1"),
    						LaunchTemplates: rancher2.ClusterEksConfigV2NodeGroupLaunchTemplateArray{
    							&rancher2.ClusterEksConfigV2NodeGroupLaunchTemplateArgs{
    								Id:      pulumi.String("<EC2_LAUNCH_TEMPLATE_ID>"),
    								Version: pulumi.Int(1),
    							},
    						},
    					},
    				},
    				PrivateAccess: pulumi.Bool(true),
    				PublicAccess:  pulumi.Bool(true),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.rancher2.CloudCredential;
    import com.pulumi.rancher2.CloudCredentialArgs;
    import com.pulumi.rancher2.inputs.CloudCredentialAmazonec2CredentialConfigArgs;
    import com.pulumi.rancher2.Cluster;
    import com.pulumi.rancher2.ClusterArgs;
    import com.pulumi.rancher2.inputs.ClusterEksConfigV2Args;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var fooCloudCredential = new CloudCredential("fooCloudCredential", CloudCredentialArgs.builder()        
                .description("foo test")
                .amazonec2CredentialConfig(CloudCredentialAmazonec2CredentialConfigArgs.builder()
                    .accessKey("<AWS_ACCESS_KEY>")
                    .secretKey("<AWS_SECRET_KEY>")
                    .build())
                .build());
    
            var fooCluster = new Cluster("fooCluster", ClusterArgs.builder()        
                .description("Terraform EKS cluster")
                .eksConfigV2(ClusterEksConfigV2Args.builder()
                    .cloudCredentialId(fooCloudCredential.id())
                    .region("<EKS_REGION>")
                    .kubernetesVersion("1.24")
                    .loggingTypes(                
                        "audit",
                        "api")
                    .nodeGroups(ClusterEksConfigV2NodeGroupArgs.builder()
                        .desiredSize(3)
                        .maxSize(5)
                        .name("node_group1")
                        .launchTemplates(ClusterEksConfigV2NodeGroupLaunchTemplateArgs.builder()
                            .id("<EC2_LAUNCH_TEMPLATE_ID>")
                            .version(1)
                            .build())
                        .build())
                    .privateAccess(true)
                    .publicAccess(true)
                    .build())
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_rancher2 as rancher2
    
    foo_cloud_credential = rancher2.CloudCredential("fooCloudCredential",
        description="foo test",
        amazonec2_credential_config=rancher2.CloudCredentialAmazonec2CredentialConfigArgs(
            access_key="<AWS_ACCESS_KEY>",
            secret_key="<AWS_SECRET_KEY>",
        ))
    foo_cluster = rancher2.Cluster("fooCluster",
        description="Terraform EKS cluster",
        eks_config_v2=rancher2.ClusterEksConfigV2Args(
            cloud_credential_id=foo_cloud_credential.id,
            region="<EKS_REGION>",
            kubernetes_version="1.24",
            logging_types=[
                "audit",
                "api",
            ],
            node_groups=[rancher2.ClusterEksConfigV2NodeGroupArgs(
                desired_size=3,
                max_size=5,
                name="node_group1",
                launch_templates=[rancher2.ClusterEksConfigV2NodeGroupLaunchTemplateArgs(
                    id="<EC2_LAUNCH_TEMPLATE_ID>",
                    version=1,
                )],
            )],
            private_access=True,
            public_access=True,
        ))
    
    import * as pulumi from "@pulumi/pulumi";
    import * as rancher2 from "@pulumi/rancher2";
    
    const fooCloudCredential = new rancher2.CloudCredential("fooCloudCredential", {
        description: "foo test",
        amazonec2CredentialConfig: {
            accessKey: "<AWS_ACCESS_KEY>",
            secretKey: "<AWS_SECRET_KEY>",
        },
    });
    const fooCluster = new rancher2.Cluster("fooCluster", {
        description: "Terraform EKS cluster",
        eksConfigV2: {
            cloudCredentialId: fooCloudCredential.id,
            region: "<EKS_REGION>",
            kubernetesVersion: "1.24",
            loggingTypes: [
                "audit",
                "api",
            ],
            nodeGroups: [{
                desiredSize: 3,
                maxSize: 5,
                name: "node_group1",
                launchTemplates: [{
                    id: "<EC2_LAUNCH_TEMPLATE_ID>",
                    version: 1,
                }],
            }],
            privateAccess: true,
            publicAccess: true,
        },
    });
    
    resources:
      fooCloudCredential:
        type: rancher2:CloudCredential
        properties:
          description: foo test
          amazonec2CredentialConfig:
            accessKey: <AWS_ACCESS_KEY>
            secretKey: <AWS_SECRET_KEY>
      fooCluster:
        type: rancher2:Cluster
        properties:
          description: Terraform EKS cluster
          eksConfigV2:
            cloudCredentialId: ${fooCloudCredential.id}
            region: <EKS_REGION>
            kubernetesVersion: '1.24'
            loggingTypes:
              - audit
              - api
            nodeGroups:
              - desiredSize: 3
                maxSize: 5
                name: node_group1
                launchTemplates:
                  - id: <EC2_LAUNCH_TEMPLATE_ID>
                    version: 1
            privateAccess: true
            publicAccess: true
    

    Creating AKS cluster from Rancher v2, using aks_config_v2. For Rancher v2.6.0 and above.

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Rancher2 = Pulumi.Rancher2;
    
    return await Deployment.RunAsync(() => 
    {
        var foo_aks = new Rancher2.CloudCredential("foo-aks", new()
        {
            AzureCredentialConfig = new Rancher2.Inputs.CloudCredentialAzureCredentialConfigArgs
            {
                ClientId = "<CLIENT_ID>",
                ClientSecret = "<CLIENT_SECRET>",
                SubscriptionId = "<SUBSCRIPTION_ID>",
            },
        });
    
        var foo = new Rancher2.Cluster("foo", new()
        {
            Description = "Terraform AKS cluster",
            AksConfigV2 = new Rancher2.Inputs.ClusterAksConfigV2Args
            {
                CloudCredentialId = foo_aks.Id,
                ResourceGroup = "<RESOURCE_GROUP>",
                ResourceLocation = "<RESOURCE_LOCATION>",
                DnsPrefix = "<DNS_PREFIX>",
                KubernetesVersion = "1.24.6",
                NetworkPlugin = "<NETWORK_PLUGIN>",
                NodePools = new[]
                {
                    new Rancher2.Inputs.ClusterAksConfigV2NodePoolArgs
                    {
                        AvailabilityZones = new[]
                        {
                            "1",
                            "2",
                            "3",
                        },
                        Name = "<NODEPOOL_NAME_1>",
                        Mode = "System",
                        Count = 1,
                        OrchestratorVersion = "1.21.2",
                        OsDiskSizeGb = 128,
                        VmSize = "Standard_DS2_v2",
                    },
                    new Rancher2.Inputs.ClusterAksConfigV2NodePoolArgs
                    {
                        AvailabilityZones = new[]
                        {
                            "1",
                            "2",
                            "3",
                        },
                        Name = "<NODEPOOL_NAME_2>",
                        Count = 1,
                        Mode = "User",
                        OrchestratorVersion = "1.21.2",
                        OsDiskSizeGb = 128,
                        VmSize = "Standard_DS2_v2",
                        MaxSurge = "25%",
                        Labels = 
                        {
                            { "test1", "data1" },
                            { "test2", "data2" },
                        },
                        Taints = new[]
                        {
                            "none:PreferNoSchedule",
                        },
                    },
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-rancher2/sdk/v5/go/rancher2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := rancher2.NewCloudCredential(ctx, "foo-aks", &rancher2.CloudCredentialArgs{
    			AzureCredentialConfig: &rancher2.CloudCredentialAzureCredentialConfigArgs{
    				ClientId:       pulumi.String("<CLIENT_ID>"),
    				ClientSecret:   pulumi.String("<CLIENT_SECRET>"),
    				SubscriptionId: pulumi.String("<SUBSCRIPTION_ID>"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = rancher2.NewCluster(ctx, "foo", &rancher2.ClusterArgs{
    			Description: pulumi.String("Terraform AKS cluster"),
    			AksConfigV2: &rancher2.ClusterAksConfigV2Args{
    				CloudCredentialId: foo_aks.ID(),
    				ResourceGroup:     pulumi.String("<RESOURCE_GROUP>"),
    				ResourceLocation:  pulumi.String("<RESOURCE_LOCATION>"),
    				DnsPrefix:         pulumi.String("<DNS_PREFIX>"),
    				KubernetesVersion: pulumi.String("1.24.6"),
    				NetworkPlugin:     pulumi.String("<NETWORK_PLUGIN>"),
    				NodePools: rancher2.ClusterAksConfigV2NodePoolArray{
    					&rancher2.ClusterAksConfigV2NodePoolArgs{
    						AvailabilityZones: pulumi.StringArray{
    							pulumi.String("1"),
    							pulumi.String("2"),
    							pulumi.String("3"),
    						},
    						Name:                pulumi.String("<NODEPOOL_NAME_1>"),
    						Mode:                pulumi.String("System"),
    						Count:               pulumi.Int(1),
    						OrchestratorVersion: pulumi.String("1.21.2"),
    						OsDiskSizeGb:        pulumi.Int(128),
    						VmSize:              pulumi.String("Standard_DS2_v2"),
    					},
    					&rancher2.ClusterAksConfigV2NodePoolArgs{
    						AvailabilityZones: pulumi.StringArray{
    							pulumi.String("1"),
    							pulumi.String("2"),
    							pulumi.String("3"),
    						},
    						Name:                pulumi.String("<NODEPOOL_NAME_2>"),
    						Count:               pulumi.Int(1),
    						Mode:                pulumi.String("User"),
    						OrchestratorVersion: pulumi.String("1.21.2"),
    						OsDiskSizeGb:        pulumi.Int(128),
    						VmSize:              pulumi.String("Standard_DS2_v2"),
    						MaxSurge:            pulumi.String("25%"),
    						Labels: pulumi.Map{
    							"test1": pulumi.Any("data1"),
    							"test2": pulumi.Any("data2"),
    						},
    						Taints: pulumi.StringArray{
    							pulumi.String("none:PreferNoSchedule"),
    						},
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.rancher2.CloudCredential;
    import com.pulumi.rancher2.CloudCredentialArgs;
    import com.pulumi.rancher2.inputs.CloudCredentialAzureCredentialConfigArgs;
    import com.pulumi.rancher2.Cluster;
    import com.pulumi.rancher2.ClusterArgs;
    import com.pulumi.rancher2.inputs.ClusterAksConfigV2Args;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var foo_aks = new CloudCredential("foo-aks", CloudCredentialArgs.builder()        
                .azureCredentialConfig(CloudCredentialAzureCredentialConfigArgs.builder()
                    .clientId("<CLIENT_ID>")
                    .clientSecret("<CLIENT_SECRET>")
                    .subscriptionId("<SUBSCRIPTION_ID>")
                    .build())
                .build());
    
            var foo = new Cluster("foo", ClusterArgs.builder()        
                .description("Terraform AKS cluster")
                .aksConfigV2(ClusterAksConfigV2Args.builder()
                    .cloudCredentialId(foo_aks.id())
                    .resourceGroup("<RESOURCE_GROUP>")
                    .resourceLocation("<RESOURCE_LOCATION>")
                    .dnsPrefix("<DNS_PREFIX>")
                    .kubernetesVersion("1.24.6")
                    .networkPlugin("<NETWORK_PLUGIN>")
                    .nodePools(                
                        ClusterAksConfigV2NodePoolArgs.builder()
                            .availabilityZones(                        
                                "1",
                                "2",
                                "3")
                            .name("<NODEPOOL_NAME_1>")
                            .mode("System")
                            .count(1)
                            .orchestratorVersion("1.21.2")
                            .osDiskSizeGb(128)
                            .vmSize("Standard_DS2_v2")
                            .build(),
                        ClusterAksConfigV2NodePoolArgs.builder()
                            .availabilityZones(                        
                                "1",
                                "2",
                                "3")
                            .name("<NODEPOOL_NAME_2>")
                            .count(1)
                            .mode("User")
                            .orchestratorVersion("1.21.2")
                            .osDiskSizeGb(128)
                            .vmSize("Standard_DS2_v2")
                            .maxSurge("25%")
                            .labels(Map.ofEntries(
                                Map.entry("test1", "data1"),
                                Map.entry("test2", "data2")
                            ))
                            .taints("none:PreferNoSchedule")
                            .build())
                    .build())
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_rancher2 as rancher2
    
    foo_aks = rancher2.CloudCredential("foo-aks", azure_credential_config=rancher2.CloudCredentialAzureCredentialConfigArgs(
        client_id="<CLIENT_ID>",
        client_secret="<CLIENT_SECRET>",
        subscription_id="<SUBSCRIPTION_ID>",
    ))
    foo = rancher2.Cluster("foo",
        description="Terraform AKS cluster",
        aks_config_v2=rancher2.ClusterAksConfigV2Args(
            cloud_credential_id=foo_aks.id,
            resource_group="<RESOURCE_GROUP>",
            resource_location="<RESOURCE_LOCATION>",
            dns_prefix="<DNS_PREFIX>",
            kubernetes_version="1.24.6",
            network_plugin="<NETWORK_PLUGIN>",
            node_pools=[
                rancher2.ClusterAksConfigV2NodePoolArgs(
                    availability_zones=[
                        "1",
                        "2",
                        "3",
                    ],
                    name="<NODEPOOL_NAME_1>",
                    mode="System",
                    count=1,
                    orchestrator_version="1.21.2",
                    os_disk_size_gb=128,
                    vm_size="Standard_DS2_v2",
                ),
                rancher2.ClusterAksConfigV2NodePoolArgs(
                    availability_zones=[
                        "1",
                        "2",
                        "3",
                    ],
                    name="<NODEPOOL_NAME_2>",
                    count=1,
                    mode="User",
                    orchestrator_version="1.21.2",
                    os_disk_size_gb=128,
                    vm_size="Standard_DS2_v2",
                    max_surge="25%",
                    labels={
                        "test1": "data1",
                        "test2": "data2",
                    },
                    taints=["none:PreferNoSchedule"],
                ),
            ],
        ))
    
    import * as pulumi from "@pulumi/pulumi";
    import * as rancher2 from "@pulumi/rancher2";
    
    const foo_aks = new rancher2.CloudCredential("foo-aks", {azureCredentialConfig: {
        clientId: "<CLIENT_ID>",
        clientSecret: "<CLIENT_SECRET>",
        subscriptionId: "<SUBSCRIPTION_ID>",
    }});
    const foo = new rancher2.Cluster("foo", {
        description: "Terraform AKS cluster",
        aksConfigV2: {
            cloudCredentialId: foo_aks.id,
            resourceGroup: "<RESOURCE_GROUP>",
            resourceLocation: "<RESOURCE_LOCATION>",
            dnsPrefix: "<DNS_PREFIX>",
            kubernetesVersion: "1.24.6",
            networkPlugin: "<NETWORK_PLUGIN>",
            nodePools: [
                {
                    availabilityZones: [
                        "1",
                        "2",
                        "3",
                    ],
                    name: "<NODEPOOL_NAME_1>",
                    mode: "System",
                    count: 1,
                    orchestratorVersion: "1.21.2",
                    osDiskSizeGb: 128,
                    vmSize: "Standard_DS2_v2",
                },
                {
                    availabilityZones: [
                        "1",
                        "2",
                        "3",
                    ],
                    name: "<NODEPOOL_NAME_2>",
                    count: 1,
                    mode: "User",
                    orchestratorVersion: "1.21.2",
                    osDiskSizeGb: 128,
                    vmSize: "Standard_DS2_v2",
                    maxSurge: "25%",
                    labels: {
                        test1: "data1",
                        test2: "data2",
                    },
                    taints: ["none:PreferNoSchedule"],
                },
            ],
        },
    });
    
    resources:
      foo-aks:
        type: rancher2:CloudCredential
        properties:
          azureCredentialConfig:
            clientId: <CLIENT_ID>
            clientSecret: <CLIENT_SECRET>
            subscriptionId: <SUBSCRIPTION_ID>
      foo:
        type: rancher2:Cluster
        properties:
          description: Terraform AKS cluster
          aksConfigV2:
            cloudCredentialId: ${["foo-aks"].id}
            resourceGroup: <RESOURCE_GROUP>
            resourceLocation: <RESOURCE_LOCATION>
            dnsPrefix: <DNS_PREFIX>
            kubernetesVersion: 1.24.6
            networkPlugin: <NETWORK_PLUGIN>
            nodePools:
              - availabilityZones:
                  - '1'
                  - '2'
                  - '3'
                name: <NODEPOOL_NAME_1>
                mode: System
                count: 1
                orchestratorVersion: 1.21.2
                osDiskSizeGb: 128
                vmSize: Standard_DS2_v2
              - availabilityZones:
                  - '1'
                  - '2'
                  - '3'
                name: <NODEPOOL_NAME_2>
                count: 1
                mode: User
                orchestratorVersion: 1.21.2
                osDiskSizeGb: 128
                vmSize: Standard_DS2_v2
                maxSurge: 25%
                labels:
                  test1: data1
                  test2: data2
                taints:
                  - none:PreferNoSchedule
    

    Create Cluster Resource

    new Cluster(name: string, args?: ClusterArgs, opts?: CustomResourceOptions);
    @overload
    def Cluster(resource_name: str,
                opts: Optional[ResourceOptions] = None,
                agent_env_vars: Optional[Sequence[ClusterAgentEnvVarArgs]] = None,
                aks_config: Optional[ClusterAksConfigArgs] = None,
                aks_config_v2: Optional[ClusterAksConfigV2Args] = None,
                annotations: Optional[Mapping[str, Any]] = None,
                cluster_agent_deployment_customizations: Optional[Sequence[ClusterClusterAgentDeploymentCustomizationArgs]] = None,
                cluster_auth_endpoint: Optional[ClusterClusterAuthEndpointArgs] = None,
                cluster_monitoring_input: Optional[ClusterClusterMonitoringInputArgs] = None,
                cluster_template_answers: Optional[ClusterClusterTemplateAnswersArgs] = None,
                cluster_template_id: Optional[str] = None,
                cluster_template_questions: Optional[Sequence[ClusterClusterTemplateQuestionArgs]] = None,
                cluster_template_revision_id: Optional[str] = None,
                default_pod_security_admission_configuration_template_name: Optional[str] = None,
                default_pod_security_policy_template_id: Optional[str] = None,
                description: Optional[str] = None,
                desired_agent_image: Optional[str] = None,
                desired_auth_image: Optional[str] = None,
                docker_root_dir: Optional[str] = None,
                driver: Optional[str] = None,
                eks_config: Optional[ClusterEksConfigArgs] = None,
                eks_config_v2: Optional[ClusterEksConfigV2Args] = None,
                enable_cluster_alerting: Optional[bool] = None,
                enable_cluster_monitoring: Optional[bool] = None,
                enable_network_policy: Optional[bool] = None,
                fleet_agent_deployment_customizations: Optional[Sequence[ClusterFleetAgentDeploymentCustomizationArgs]] = None,
                fleet_workspace_name: Optional[str] = None,
                gke_config: Optional[ClusterGkeConfigArgs] = None,
                gke_config_v2: Optional[ClusterGkeConfigV2Args] = None,
                k3s_config: Optional[ClusterK3sConfigArgs] = None,
                labels: Optional[Mapping[str, Any]] = None,
                name: Optional[str] = None,
                oke_config: Optional[ClusterOkeConfigArgs] = None,
                rke2_config: Optional[ClusterRke2ConfigArgs] = None,
                rke_config: Optional[ClusterRkeConfigArgs] = None,
                windows_prefered_cluster: Optional[bool] = None)
    @overload
    def Cluster(resource_name: str,
                args: Optional[ClusterArgs] = None,
                opts: Optional[ResourceOptions] = None)
    func NewCluster(ctx *Context, name string, args *ClusterArgs, opts ...ResourceOption) (*Cluster, error)
    public Cluster(string name, ClusterArgs? args = null, CustomResourceOptions? opts = null)
    public Cluster(String name, ClusterArgs args)
    public Cluster(String name, ClusterArgs args, CustomResourceOptions options)
    
    type: rancher2:Cluster
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    
    name string
    The unique name of the resource.
    args ClusterArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args ClusterArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args ClusterArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args ClusterArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args ClusterArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Cluster Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The Cluster resource accepts the following input properties:

    AgentEnvVars List<ClusterAgentEnvVar>

    Optional Agent Env Vars for Rancher agent. For Rancher v2.5.6 and above (list)

    AksConfig ClusterAksConfig

    The Azure AKS configuration for aks Clusters. Conflicts with aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    AksConfigV2 ClusterAksConfigV2

    The Azure AKS v2 configuration for creating/import aks Clusters. Conflicts with aks_config, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    Annotations Dictionary<string, object>

    Annotations for the Cluster (map)

    ClusterAgentDeploymentCustomizations List<ClusterClusterAgentDeploymentCustomization>

    Optional customization for cluster agent. For Rancher v2.7.5 and above (list)

    ClusterAuthEndpoint ClusterClusterAuthEndpoint

    Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)

    ClusterMonitoringInput ClusterClusterMonitoringInput

    Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)

    ClusterTemplateAnswers ClusterClusterTemplateAnswers

    Cluster template answers. For Rancher v2.3.x and above (list maxitems:1)

    ClusterTemplateId string

    Cluster template ID. For Rancher v2.3.x and above (string)

    ClusterTemplateQuestions List<ClusterClusterTemplateQuestion>

    Cluster template questions. For Rancher v2.3.x and above (list)

    ClusterTemplateRevisionId string

    Cluster template revision ID. For Rancher v2.3.x and above (string)

    DefaultPodSecurityAdmissionConfigurationTemplateName string

    Cluster default pod security admission configuration template name (string)

    DefaultPodSecurityPolicyTemplateId string

    Default pod security policy template id (string)

    Description string

    The description for Cluster (string)

    DesiredAgentImage string

    Desired agent image. For Rancher v2.3.x and above (string)

    DesiredAuthImage string

    Desired auth image. For Rancher v2.3.x and above (string)

    DockerRootDir string

    Desired auth image. For Rancher v2.3.x and above (string)

    Driver string

    (Computed) The driver used for the Cluster. imported, azurekubernetesservice, amazonelasticcontainerservice, googlekubernetesengine and rancherKubernetesEngine are supported (string)

    EksConfig ClusterEksConfig

    The Amazon EKS configuration for eks Clusters. Conflicts with aks_config, aks_config_v2, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    EksConfigV2 ClusterEksConfigV2

    The Amazon EKS V2 configuration to create or import eks Clusters. Conflicts with aks_config, eks_config, gke_config, gke_config_v2, oke_config k3s_config and rke_config. For Rancher v2.5.x and above (list maxitems:1)

    EnableClusterAlerting bool

    Enable built-in cluster alerting (bool)

    EnableClusterMonitoring bool

    Enable built-in cluster monitoring (bool)

    EnableNetworkPolicy bool

    Enable project network isolation (bool)

    FleetAgentDeploymentCustomizations List<ClusterFleetAgentDeploymentCustomization>

    Optional customization for fleet agent. For Rancher v2.7.5 and above (list)

    FleetWorkspaceName string

    Fleet workspace name (string)

    GkeConfig ClusterGkeConfig

    The Google GKE configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config_v2, oke_config, k3s_config and rke_config (list maxitems:1)

    GkeConfigV2 ClusterGkeConfigV2

    The Google GKE V2 configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, oke_config, k3s_config and rke_config. For Rancher v2.5.8 and above (list maxitems:1)

    K3sConfig ClusterK3sConfig

    The K3S configuration for k3s imported Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and rke_config (list maxitems:1)

    Labels Dictionary<string, object>

    Labels for the Cluster (map)

    Name string

    The name of the Cluster (string)

    OkeConfig ClusterOkeConfig

    The Oracle OKE configuration for oke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, k3s_config and rke_config (list maxitems:1)

    Rke2Config ClusterRke2Config

    The RKE2 configuration for rke2 Clusters. Conflicts with aks_config, aks_config_v2, eks_config, gke_config, oke_config, k3s_config and rke_config (list maxitems:1)

    RkeConfig ClusterRkeConfig

    The RKE configuration for rke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and k3s_config (list maxitems:1)

    WindowsPreferedCluster bool

    Windows preferred cluster. Default: false (bool)

    AgentEnvVars []ClusterAgentEnvVarArgs

    Optional Agent Env Vars for Rancher agent. For Rancher v2.5.6 and above (list)

    AksConfig ClusterAksConfigArgs

    The Azure AKS configuration for aks Clusters. Conflicts with aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    AksConfigV2 ClusterAksConfigV2Args

    The Azure AKS v2 configuration for creating/import aks Clusters. Conflicts with aks_config, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    Annotations map[string]interface{}

    Annotations for the Cluster (map)

    ClusterAgentDeploymentCustomizations []ClusterClusterAgentDeploymentCustomizationArgs

    Optional customization for cluster agent. For Rancher v2.7.5 and above (list)

    ClusterAuthEndpoint ClusterClusterAuthEndpointArgs

    Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)

    ClusterMonitoringInput ClusterClusterMonitoringInputArgs

    Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)

    ClusterTemplateAnswers ClusterClusterTemplateAnswersArgs

    Cluster template answers. For Rancher v2.3.x and above (list maxitems:1)

    ClusterTemplateId string

    Cluster template ID. For Rancher v2.3.x and above (string)

    ClusterTemplateQuestions []ClusterClusterTemplateQuestionArgs

    Cluster template questions. For Rancher v2.3.x and above (list)

    ClusterTemplateRevisionId string

    Cluster template revision ID. For Rancher v2.3.x and above (string)

    DefaultPodSecurityAdmissionConfigurationTemplateName string

    Cluster default pod security admission configuration template name (string)

    DefaultPodSecurityPolicyTemplateId string

    Default pod security policy template id (string)

    Description string

    The description for Cluster (string)

    DesiredAgentImage string

    Desired agent image. For Rancher v2.3.x and above (string)

    DesiredAuthImage string

    Desired auth image. For Rancher v2.3.x and above (string)

    DockerRootDir string

    Desired auth image. For Rancher v2.3.x and above (string)

    Driver string

    (Computed) The driver used for the Cluster. imported, azurekubernetesservice, amazonelasticcontainerservice, googlekubernetesengine and rancherKubernetesEngine are supported (string)

    EksConfig ClusterEksConfigArgs

    The Amazon EKS configuration for eks Clusters. Conflicts with aks_config, aks_config_v2, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    EksConfigV2 ClusterEksConfigV2Args

    The Amazon EKS V2 configuration to create or import eks Clusters. Conflicts with aks_config, eks_config, gke_config, gke_config_v2, oke_config k3s_config and rke_config. For Rancher v2.5.x and above (list maxitems:1)

    EnableClusterAlerting bool

    Enable built-in cluster alerting (bool)

    EnableClusterMonitoring bool

    Enable built-in cluster monitoring (bool)

    EnableNetworkPolicy bool

    Enable project network isolation (bool)

    FleetAgentDeploymentCustomizations []ClusterFleetAgentDeploymentCustomizationArgs

    Optional customization for fleet agent. For Rancher v2.7.5 and above (list)

    FleetWorkspaceName string

    Fleet workspace name (string)

    GkeConfig ClusterGkeConfigArgs

    The Google GKE configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config_v2, oke_config, k3s_config and rke_config (list maxitems:1)

    GkeConfigV2 ClusterGkeConfigV2Args

    The Google GKE V2 configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, oke_config, k3s_config and rke_config. For Rancher v2.5.8 and above (list maxitems:1)

    K3sConfig ClusterK3sConfigArgs

    The K3S configuration for k3s imported Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and rke_config (list maxitems:1)

    Labels map[string]interface{}

    Labels for the Cluster (map)

    Name string

    The name of the Cluster (string)

    OkeConfig ClusterOkeConfigArgs

    The Oracle OKE configuration for oke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, k3s_config and rke_config (list maxitems:1)

    Rke2Config ClusterRke2ConfigArgs

    The RKE2 configuration for rke2 Clusters. Conflicts with aks_config, aks_config_v2, eks_config, gke_config, oke_config, k3s_config and rke_config (list maxitems:1)

    RkeConfig ClusterRkeConfigArgs

    The RKE configuration for rke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and k3s_config (list maxitems:1)

    WindowsPreferedCluster bool

    Windows preferred cluster. Default: false (bool)

    agentEnvVars List<ClusterAgentEnvVar>

    Optional Agent Env Vars for Rancher agent. For Rancher v2.5.6 and above (list)

    aksConfig ClusterAksConfig

    The Azure AKS configuration for aks Clusters. Conflicts with aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    aksConfigV2 ClusterAksConfigV2

    The Azure AKS v2 configuration for creating/import aks Clusters. Conflicts with aks_config, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    annotations Map<String,Object>

    Annotations for the Cluster (map)

    clusterAgentDeploymentCustomizations List<ClusterClusterAgentDeploymentCustomization>

    Optional customization for cluster agent. For Rancher v2.7.5 and above (list)

    clusterAuthEndpoint ClusterClusterAuthEndpoint

    Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)

    clusterMonitoringInput ClusterClusterMonitoringInput

    Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)

    clusterTemplateAnswers ClusterClusterTemplateAnswers

    Cluster template answers. For Rancher v2.3.x and above (list maxitems:1)

    clusterTemplateId String

    Cluster template ID. For Rancher v2.3.x and above (string)

    clusterTemplateQuestions List<ClusterClusterTemplateQuestion>

    Cluster template questions. For Rancher v2.3.x and above (list)

    clusterTemplateRevisionId String

    Cluster template revision ID. For Rancher v2.3.x and above (string)

    defaultPodSecurityAdmissionConfigurationTemplateName String

    Cluster default pod security admission configuration template name (string)

    defaultPodSecurityPolicyTemplateId String

    Default pod security policy template id (string)

    description String

    The description for Cluster (string)

    desiredAgentImage String

    Desired agent image. For Rancher v2.3.x and above (string)

    desiredAuthImage String

    Desired auth image. For Rancher v2.3.x and above (string)

    dockerRootDir String

    Desired auth image. For Rancher v2.3.x and above (string)

    driver String

    (Computed) The driver used for the Cluster. imported, azurekubernetesservice, amazonelasticcontainerservice, googlekubernetesengine and rancherKubernetesEngine are supported (string)

    eksConfig ClusterEksConfig

    The Amazon EKS configuration for eks Clusters. Conflicts with aks_config, aks_config_v2, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    eksConfigV2 ClusterEksConfigV2

    The Amazon EKS V2 configuration to create or import eks Clusters. Conflicts with aks_config, eks_config, gke_config, gke_config_v2, oke_config k3s_config and rke_config. For Rancher v2.5.x and above (list maxitems:1)

    enableClusterAlerting Boolean

    Enable built-in cluster alerting (bool)

    enableClusterMonitoring Boolean

    Enable built-in cluster monitoring (bool)

    enableNetworkPolicy Boolean

    Enable project network isolation (bool)

    fleetAgentDeploymentCustomizations List<ClusterFleetAgentDeploymentCustomization>

    Optional customization for fleet agent. For Rancher v2.7.5 and above (list)

    fleetWorkspaceName String

    Fleet workspace name (string)

    gkeConfig ClusterGkeConfig

    The Google GKE configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config_v2, oke_config, k3s_config and rke_config (list maxitems:1)

    gkeConfigV2 ClusterGkeConfigV2

    The Google GKE V2 configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, oke_config, k3s_config and rke_config. For Rancher v2.5.8 and above (list maxitems:1)

    k3sConfig ClusterK3sConfig

    The K3S configuration for k3s imported Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and rke_config (list maxitems:1)

    labels Map<String,Object>

    Labels for the Cluster (map)

    name String

    The name of the Cluster (string)

    okeConfig ClusterOkeConfig

    The Oracle OKE configuration for oke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, k3s_config and rke_config (list maxitems:1)

    rke2Config ClusterRke2Config

    The RKE2 configuration for rke2 Clusters. Conflicts with aks_config, aks_config_v2, eks_config, gke_config, oke_config, k3s_config and rke_config (list maxitems:1)

    rkeConfig ClusterRkeConfig

    The RKE configuration for rke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and k3s_config (list maxitems:1)

    windowsPreferedCluster Boolean

    Windows preferred cluster. Default: false (bool)

    agentEnvVars ClusterAgentEnvVar[]

    Optional Agent Env Vars for Rancher agent. For Rancher v2.5.6 and above (list)

    aksConfig ClusterAksConfig

    The Azure AKS configuration for aks Clusters. Conflicts with aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    aksConfigV2 ClusterAksConfigV2

    The Azure AKS v2 configuration for creating/import aks Clusters. Conflicts with aks_config, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    annotations {[key: string]: any}

    Annotations for the Cluster (map)

    clusterAgentDeploymentCustomizations ClusterClusterAgentDeploymentCustomization[]

    Optional customization for cluster agent. For Rancher v2.7.5 and above (list)

    clusterAuthEndpoint ClusterClusterAuthEndpoint

    Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)

    clusterMonitoringInput ClusterClusterMonitoringInput

    Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)

    clusterTemplateAnswers ClusterClusterTemplateAnswers

    Cluster template answers. For Rancher v2.3.x and above (list maxitems:1)

    clusterTemplateId string

    Cluster template ID. For Rancher v2.3.x and above (string)

    clusterTemplateQuestions ClusterClusterTemplateQuestion[]

    Cluster template questions. For Rancher v2.3.x and above (list)

    clusterTemplateRevisionId string

    Cluster template revision ID. For Rancher v2.3.x and above (string)

    defaultPodSecurityAdmissionConfigurationTemplateName string

    Cluster default pod security admission configuration template name (string)

    defaultPodSecurityPolicyTemplateId string

    Default pod security policy template id (string)

    description string

    The description for Cluster (string)

    desiredAgentImage string

    Desired agent image. For Rancher v2.3.x and above (string)

    desiredAuthImage string

    Desired auth image. For Rancher v2.3.x and above (string)

    dockerRootDir string

    Desired auth image. For Rancher v2.3.x and above (string)

    driver string

    (Computed) The driver used for the Cluster. imported, azurekubernetesservice, amazonelasticcontainerservice, googlekubernetesengine and rancherKubernetesEngine are supported (string)

    eksConfig ClusterEksConfig

    The Amazon EKS configuration for eks Clusters. Conflicts with aks_config, aks_config_v2, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    eksConfigV2 ClusterEksConfigV2

    The Amazon EKS V2 configuration to create or import eks Clusters. Conflicts with aks_config, eks_config, gke_config, gke_config_v2, oke_config k3s_config and rke_config. For Rancher v2.5.x and above (list maxitems:1)

    enableClusterAlerting boolean

    Enable built-in cluster alerting (bool)

    enableClusterMonitoring boolean

    Enable built-in cluster monitoring (bool)

    enableNetworkPolicy boolean

    Enable project network isolation (bool)

    fleetAgentDeploymentCustomizations ClusterFleetAgentDeploymentCustomization[]

    Optional customization for fleet agent. For Rancher v2.7.5 and above (list)

    fleetWorkspaceName string

    Fleet workspace name (string)

    gkeConfig ClusterGkeConfig

    The Google GKE configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config_v2, oke_config, k3s_config and rke_config (list maxitems:1)

    gkeConfigV2 ClusterGkeConfigV2

    The Google GKE V2 configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, oke_config, k3s_config and rke_config. For Rancher v2.5.8 and above (list maxitems:1)

    k3sConfig ClusterK3sConfig

    The K3S configuration for k3s imported Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and rke_config (list maxitems:1)

    labels {[key: string]: any}

    Labels for the Cluster (map)

    name string

    The name of the Cluster (string)

    okeConfig ClusterOkeConfig

    The Oracle OKE configuration for oke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, k3s_config and rke_config (list maxitems:1)

    rke2Config ClusterRke2Config

    The RKE2 configuration for rke2 Clusters. Conflicts with aks_config, aks_config_v2, eks_config, gke_config, oke_config, k3s_config and rke_config (list maxitems:1)

    rkeConfig ClusterRkeConfig

    The RKE configuration for rke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and k3s_config (list maxitems:1)

    windowsPreferedCluster boolean

    Windows preferred cluster. Default: false (bool)

    agent_env_vars Sequence[ClusterAgentEnvVarArgs]

    Optional Agent Env Vars for Rancher agent. For Rancher v2.5.6 and above (list)

    aks_config ClusterAksConfigArgs

    The Azure AKS configuration for aks Clusters. Conflicts with aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    aks_config_v2 ClusterAksConfigV2Args

    The Azure AKS v2 configuration for creating/import aks Clusters. Conflicts with aks_config, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    annotations Mapping[str, Any]

    Annotations for the Cluster (map)

    cluster_agent_deployment_customizations Sequence[ClusterClusterAgentDeploymentCustomizationArgs]

    Optional customization for cluster agent. For Rancher v2.7.5 and above (list)

    cluster_auth_endpoint ClusterClusterAuthEndpointArgs

    Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)

    cluster_monitoring_input ClusterClusterMonitoringInputArgs

    Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)

    cluster_template_answers ClusterClusterTemplateAnswersArgs

    Cluster template answers. For Rancher v2.3.x and above (list maxitems:1)

    cluster_template_id str

    Cluster template ID. For Rancher v2.3.x and above (string)

    cluster_template_questions Sequence[ClusterClusterTemplateQuestionArgs]

    Cluster template questions. For Rancher v2.3.x and above (list)

    cluster_template_revision_id str

    Cluster template revision ID. For Rancher v2.3.x and above (string)

    default_pod_security_admission_configuration_template_name str

    Cluster default pod security admission configuration template name (string)

    default_pod_security_policy_template_id str

    Default pod security policy template id (string)

    description str

    The description for Cluster (string)

    desired_agent_image str

    Desired agent image. For Rancher v2.3.x and above (string)

    desired_auth_image str

    Desired auth image. For Rancher v2.3.x and above (string)

    docker_root_dir str

    Desired auth image. For Rancher v2.3.x and above (string)

    driver str

    (Computed) The driver used for the Cluster. imported, azurekubernetesservice, amazonelasticcontainerservice, googlekubernetesengine and rancherKubernetesEngine are supported (string)

    eks_config ClusterEksConfigArgs

    The Amazon EKS configuration for eks Clusters. Conflicts with aks_config, aks_config_v2, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    eks_config_v2 ClusterEksConfigV2Args

    The Amazon EKS V2 configuration to create or import eks Clusters. Conflicts with aks_config, eks_config, gke_config, gke_config_v2, oke_config k3s_config and rke_config. For Rancher v2.5.x and above (list maxitems:1)

    enable_cluster_alerting bool

    Enable built-in cluster alerting (bool)

    enable_cluster_monitoring bool

    Enable built-in cluster monitoring (bool)

    enable_network_policy bool

    Enable project network isolation (bool)

    fleet_agent_deployment_customizations Sequence[ClusterFleetAgentDeploymentCustomizationArgs]

    Optional customization for fleet agent. For Rancher v2.7.5 and above (list)

    fleet_workspace_name str

    Fleet workspace name (string)

    gke_config ClusterGkeConfigArgs

    The Google GKE configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config_v2, oke_config, k3s_config and rke_config (list maxitems:1)

    gke_config_v2 ClusterGkeConfigV2Args

    The Google GKE V2 configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, oke_config, k3s_config and rke_config. For Rancher v2.5.8 and above (list maxitems:1)

    k3s_config ClusterK3sConfigArgs

    The K3S configuration for k3s imported Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and rke_config (list maxitems:1)

    labels Mapping[str, Any]

    Labels for the Cluster (map)

    name str

    The name of the Cluster (string)

    oke_config ClusterOkeConfigArgs

    The Oracle OKE configuration for oke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, k3s_config and rke_config (list maxitems:1)

    rke2_config ClusterRke2ConfigArgs

    The RKE2 configuration for rke2 Clusters. Conflicts with aks_config, aks_config_v2, eks_config, gke_config, oke_config, k3s_config and rke_config (list maxitems:1)

    rke_config ClusterRkeConfigArgs

    The RKE configuration for rke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and k3s_config (list maxitems:1)

    windows_prefered_cluster bool

    Windows preferred cluster. Default: false (bool)

    agentEnvVars List<Property Map>

    Optional Agent Env Vars for Rancher agent. For Rancher v2.5.6 and above (list)

    aksConfig Property Map

    The Azure AKS configuration for aks Clusters. Conflicts with aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    aksConfigV2 Property Map

    The Azure AKS v2 configuration for creating/import aks Clusters. Conflicts with aks_config, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    annotations Map<Any>

    Annotations for the Cluster (map)

    clusterAgentDeploymentCustomizations List<Property Map>

    Optional customization for cluster agent. For Rancher v2.7.5 and above (list)

    clusterAuthEndpoint Property Map

    Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)

    clusterMonitoringInput Property Map

    Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)

    clusterTemplateAnswers Property Map

    Cluster template answers. For Rancher v2.3.x and above (list maxitems:1)

    clusterTemplateId String

    Cluster template ID. For Rancher v2.3.x and above (string)

    clusterTemplateQuestions List<Property Map>

    Cluster template questions. For Rancher v2.3.x and above (list)

    clusterTemplateRevisionId String

    Cluster template revision ID. For Rancher v2.3.x and above (string)

    defaultPodSecurityAdmissionConfigurationTemplateName String

    Cluster default pod security admission configuration template name (string)

    defaultPodSecurityPolicyTemplateId String

    Default pod security policy template id (string)

    description String

    The description for Cluster (string)

    desiredAgentImage String

    Desired agent image. For Rancher v2.3.x and above (string)

    desiredAuthImage String

    Desired auth image. For Rancher v2.3.x and above (string)

    dockerRootDir String

    Desired auth image. For Rancher v2.3.x and above (string)

    driver String

    (Computed) The driver used for the Cluster. imported, azurekubernetesservice, amazonelasticcontainerservice, googlekubernetesengine and rancherKubernetesEngine are supported (string)

    eksConfig Property Map

    The Amazon EKS configuration for eks Clusters. Conflicts with aks_config, aks_config_v2, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    eksConfigV2 Property Map

    The Amazon EKS V2 configuration to create or import eks Clusters. Conflicts with aks_config, eks_config, gke_config, gke_config_v2, oke_config k3s_config and rke_config. For Rancher v2.5.x and above (list maxitems:1)

    enableClusterAlerting Boolean

    Enable built-in cluster alerting (bool)

    enableClusterMonitoring Boolean

    Enable built-in cluster monitoring (bool)

    enableNetworkPolicy Boolean

    Enable project network isolation (bool)

    fleetAgentDeploymentCustomizations List<Property Map>

    Optional customization for fleet agent. For Rancher v2.7.5 and above (list)

    fleetWorkspaceName String

    Fleet workspace name (string)

    gkeConfig Property Map

    The Google GKE configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config_v2, oke_config, k3s_config and rke_config (list maxitems:1)

    gkeConfigV2 Property Map

    The Google GKE V2 configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, oke_config, k3s_config and rke_config. For Rancher v2.5.8 and above (list maxitems:1)

    k3sConfig Property Map

    The K3S configuration for k3s imported Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and rke_config (list maxitems:1)

    labels Map<Any>

    Labels for the Cluster (map)

    name String

    The name of the Cluster (string)

    okeConfig Property Map

    The Oracle OKE configuration for oke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, k3s_config and rke_config (list maxitems:1)

    rke2Config Property Map

    The RKE2 configuration for rke2 Clusters. Conflicts with aks_config, aks_config_v2, eks_config, gke_config, oke_config, k3s_config and rke_config (list maxitems:1)

    rkeConfig Property Map

    The RKE configuration for rke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and k3s_config (list maxitems:1)

    windowsPreferedCluster Boolean

    Windows preferred cluster. Default: false (bool)

    Outputs

    All input properties are implicitly available as output properties. Additionally, the Cluster resource produces the following output properties:

    CaCert string

    TLS CA certificate for etcd service (string)

    ClusterRegistrationToken ClusterClusterRegistrationToken

    (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)

    DefaultProjectId string

    (Computed) Default project ID for the cluster (string)

    EnableClusterIstio bool

    Deploy istio on system project and istio-system namespace, using rancher2.App resource instead. See above example.

    Deprecated:

    Deploy istio using rancher2_app resource instead

    Id string

    The provider-assigned unique ID for this managed resource.

    IstioEnabled bool

    (Computed) Is istio enabled at cluster? For Rancher v2.3.x and above (bool)

    KubeConfig string

    (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has cluster_auth_endpoint enabled, the kube_config will not be available until the cluster is connected (string)

    SystemProjectId string

    (Computed) System project ID for the cluster (string)

    CaCert string

    TLS CA certificate for etcd service (string)

    ClusterRegistrationToken ClusterClusterRegistrationToken

    (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)

    DefaultProjectId string

    (Computed) Default project ID for the cluster (string)

    EnableClusterIstio bool

    Deploy istio on system project and istio-system namespace, using rancher2.App resource instead. See above example.

    Deprecated:

    Deploy istio using rancher2_app resource instead

    Id string

    The provider-assigned unique ID for this managed resource.

    IstioEnabled bool

    (Computed) Is istio enabled at cluster? For Rancher v2.3.x and above (bool)

    KubeConfig string

    (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has cluster_auth_endpoint enabled, the kube_config will not be available until the cluster is connected (string)

    SystemProjectId string

    (Computed) System project ID for the cluster (string)

    caCert String

    TLS CA certificate for etcd service (string)

    clusterRegistrationToken ClusterClusterRegistrationToken

    (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)

    defaultProjectId String

    (Computed) Default project ID for the cluster (string)

    enableClusterIstio Boolean

    Deploy istio on system project and istio-system namespace, using rancher2.App resource instead. See above example.

    Deprecated:

    Deploy istio using rancher2_app resource instead

    id String

    The provider-assigned unique ID for this managed resource.

    istioEnabled Boolean

    (Computed) Is istio enabled at cluster? For Rancher v2.3.x and above (bool)

    kubeConfig String

    (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has cluster_auth_endpoint enabled, the kube_config will not be available until the cluster is connected (string)

    systemProjectId String

    (Computed) System project ID for the cluster (string)

    caCert string

    TLS CA certificate for etcd service (string)

    clusterRegistrationToken ClusterClusterRegistrationToken

    (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)

    defaultProjectId string

    (Computed) Default project ID for the cluster (string)

    enableClusterIstio boolean

    Deploy istio on system project and istio-system namespace, using rancher2.App resource instead. See above example.

    Deprecated:

    Deploy istio using rancher2_app resource instead

    id string

    The provider-assigned unique ID for this managed resource.

    istioEnabled boolean

    (Computed) Is istio enabled at cluster? For Rancher v2.3.x and above (bool)

    kubeConfig string

    (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has cluster_auth_endpoint enabled, the kube_config will not be available until the cluster is connected (string)

    systemProjectId string

    (Computed) System project ID for the cluster (string)

    ca_cert str

    TLS CA certificate for etcd service (string)

    cluster_registration_token ClusterClusterRegistrationToken

    (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)

    default_project_id str

    (Computed) Default project ID for the cluster (string)

    enable_cluster_istio bool

    Deploy istio on system project and istio-system namespace, using rancher2.App resource instead. See above example.

    Deprecated:

    Deploy istio using rancher2_app resource instead

    id str

    The provider-assigned unique ID for this managed resource.

    istio_enabled bool

    (Computed) Is istio enabled at cluster? For Rancher v2.3.x and above (bool)

    kube_config str

    (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has cluster_auth_endpoint enabled, the kube_config will not be available until the cluster is connected (string)

    system_project_id str

    (Computed) System project ID for the cluster (string)

    caCert String

    TLS CA certificate for etcd service (string)

    clusterRegistrationToken Property Map

    (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)

    defaultProjectId String

    (Computed) Default project ID for the cluster (string)

    enableClusterIstio Boolean

    Deploy istio on system project and istio-system namespace, using rancher2.App resource instead. See above example.

    Deprecated:

    Deploy istio using rancher2_app resource instead

    id String

    The provider-assigned unique ID for this managed resource.

    istioEnabled Boolean

    (Computed) Is istio enabled at cluster? For Rancher v2.3.x and above (bool)

    kubeConfig String

    (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has cluster_auth_endpoint enabled, the kube_config will not be available until the cluster is connected (string)

    systemProjectId String

    (Computed) System project ID for the cluster (string)

    Look up Existing Cluster Resource

    Get an existing Cluster resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: ClusterState, opts?: CustomResourceOptions): Cluster
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            agent_env_vars: Optional[Sequence[ClusterAgentEnvVarArgs]] = None,
            aks_config: Optional[ClusterAksConfigArgs] = None,
            aks_config_v2: Optional[ClusterAksConfigV2Args] = None,
            annotations: Optional[Mapping[str, Any]] = None,
            ca_cert: Optional[str] = None,
            cluster_agent_deployment_customizations: Optional[Sequence[ClusterClusterAgentDeploymentCustomizationArgs]] = None,
            cluster_auth_endpoint: Optional[ClusterClusterAuthEndpointArgs] = None,
            cluster_monitoring_input: Optional[ClusterClusterMonitoringInputArgs] = None,
            cluster_registration_token: Optional[ClusterClusterRegistrationTokenArgs] = None,
            cluster_template_answers: Optional[ClusterClusterTemplateAnswersArgs] = None,
            cluster_template_id: Optional[str] = None,
            cluster_template_questions: Optional[Sequence[ClusterClusterTemplateQuestionArgs]] = None,
            cluster_template_revision_id: Optional[str] = None,
            default_pod_security_admission_configuration_template_name: Optional[str] = None,
            default_pod_security_policy_template_id: Optional[str] = None,
            default_project_id: Optional[str] = None,
            description: Optional[str] = None,
            desired_agent_image: Optional[str] = None,
            desired_auth_image: Optional[str] = None,
            docker_root_dir: Optional[str] = None,
            driver: Optional[str] = None,
            eks_config: Optional[ClusterEksConfigArgs] = None,
            eks_config_v2: Optional[ClusterEksConfigV2Args] = None,
            enable_cluster_alerting: Optional[bool] = None,
            enable_cluster_istio: Optional[bool] = None,
            enable_cluster_monitoring: Optional[bool] = None,
            enable_network_policy: Optional[bool] = None,
            fleet_agent_deployment_customizations: Optional[Sequence[ClusterFleetAgentDeploymentCustomizationArgs]] = None,
            fleet_workspace_name: Optional[str] = None,
            gke_config: Optional[ClusterGkeConfigArgs] = None,
            gke_config_v2: Optional[ClusterGkeConfigV2Args] = None,
            istio_enabled: Optional[bool] = None,
            k3s_config: Optional[ClusterK3sConfigArgs] = None,
            kube_config: Optional[str] = None,
            labels: Optional[Mapping[str, Any]] = None,
            name: Optional[str] = None,
            oke_config: Optional[ClusterOkeConfigArgs] = None,
            rke2_config: Optional[ClusterRke2ConfigArgs] = None,
            rke_config: Optional[ClusterRkeConfigArgs] = None,
            system_project_id: Optional[str] = None,
            windows_prefered_cluster: Optional[bool] = None) -> Cluster
    func GetCluster(ctx *Context, name string, id IDInput, state *ClusterState, opts ...ResourceOption) (*Cluster, error)
    public static Cluster Get(string name, Input<string> id, ClusterState? state, CustomResourceOptions? opts = null)
    public static Cluster get(String name, Output<String> id, ClusterState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    AgentEnvVars List<ClusterAgentEnvVar>

    Optional Agent Env Vars for Rancher agent. For Rancher v2.5.6 and above (list)

    AksConfig ClusterAksConfig

    The Azure AKS configuration for aks Clusters. Conflicts with aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    AksConfigV2 ClusterAksConfigV2

    The Azure AKS v2 configuration for creating/import aks Clusters. Conflicts with aks_config, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    Annotations Dictionary<string, object>

    Annotations for the Cluster (map)

    CaCert string

    TLS CA certificate for etcd service (string)

    ClusterAgentDeploymentCustomizations List<ClusterClusterAgentDeploymentCustomization>

    Optional customization for cluster agent. For Rancher v2.7.5 and above (list)

    ClusterAuthEndpoint ClusterClusterAuthEndpoint

    Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)

    ClusterMonitoringInput ClusterClusterMonitoringInput

    Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)

    ClusterRegistrationToken ClusterClusterRegistrationToken

    (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)

    ClusterTemplateAnswers ClusterClusterTemplateAnswers

    Cluster template answers. For Rancher v2.3.x and above (list maxitems:1)

    ClusterTemplateId string

    Cluster template ID. For Rancher v2.3.x and above (string)

    ClusterTemplateQuestions List<ClusterClusterTemplateQuestion>

    Cluster template questions. For Rancher v2.3.x and above (list)

    ClusterTemplateRevisionId string

    Cluster template revision ID. For Rancher v2.3.x and above (string)

    DefaultPodSecurityAdmissionConfigurationTemplateName string

    Cluster default pod security admission configuration template name (string)

    DefaultPodSecurityPolicyTemplateId string

    Default pod security policy template id (string)

    DefaultProjectId string

    (Computed) Default project ID for the cluster (string)

    Description string

    The description for Cluster (string)

    DesiredAgentImage string

    Desired agent image. For Rancher v2.3.x and above (string)

    DesiredAuthImage string

    Desired auth image. For Rancher v2.3.x and above (string)

    DockerRootDir string

    Desired auth image. For Rancher v2.3.x and above (string)

    Driver string

    (Computed) The driver used for the Cluster. imported, azurekubernetesservice, amazonelasticcontainerservice, googlekubernetesengine and rancherKubernetesEngine are supported (string)

    EksConfig ClusterEksConfig

    The Amazon EKS configuration for eks Clusters. Conflicts with aks_config, aks_config_v2, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    EksConfigV2 ClusterEksConfigV2

    The Amazon EKS V2 configuration to create or import eks Clusters. Conflicts with aks_config, eks_config, gke_config, gke_config_v2, oke_config k3s_config and rke_config. For Rancher v2.5.x and above (list maxitems:1)

    EnableClusterAlerting bool

    Enable built-in cluster alerting (bool)

    EnableClusterIstio bool

    Deploy istio on system project and istio-system namespace, using rancher2.App resource instead. See above example.

    Deprecated:

    Deploy istio using rancher2_app resource instead

    EnableClusterMonitoring bool

    Enable built-in cluster monitoring (bool)

    EnableNetworkPolicy bool

    Enable project network isolation (bool)

    FleetAgentDeploymentCustomizations List<ClusterFleetAgentDeploymentCustomization>

    Optional customization for fleet agent. For Rancher v2.7.5 and above (list)

    FleetWorkspaceName string

    Fleet workspace name (string)

    GkeConfig ClusterGkeConfig

    The Google GKE configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config_v2, oke_config, k3s_config and rke_config (list maxitems:1)

    GkeConfigV2 ClusterGkeConfigV2

    The Google GKE V2 configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, oke_config, k3s_config and rke_config. For Rancher v2.5.8 and above (list maxitems:1)

    IstioEnabled bool

    (Computed) Is istio enabled at cluster? For Rancher v2.3.x and above (bool)

    K3sConfig ClusterK3sConfig

    The K3S configuration for k3s imported Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and rke_config (list maxitems:1)

    KubeConfig string

    (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has cluster_auth_endpoint enabled, the kube_config will not be available until the cluster is connected (string)

    Labels Dictionary<string, object>

    Labels for the Cluster (map)

    Name string

    The name of the Cluster (string)

    OkeConfig ClusterOkeConfig

    The Oracle OKE configuration for oke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, k3s_config and rke_config (list maxitems:1)

    Rke2Config ClusterRke2Config

    The RKE2 configuration for rke2 Clusters. Conflicts with aks_config, aks_config_v2, eks_config, gke_config, oke_config, k3s_config and rke_config (list maxitems:1)

    RkeConfig ClusterRkeConfig

    The RKE configuration for rke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and k3s_config (list maxitems:1)

    SystemProjectId string

    (Computed) System project ID for the cluster (string)

    WindowsPreferedCluster bool

    Windows preferred cluster. Default: false (bool)

    AgentEnvVars []ClusterAgentEnvVarArgs

    Optional Agent Env Vars for Rancher agent. For Rancher v2.5.6 and above (list)

    AksConfig ClusterAksConfigArgs

    The Azure AKS configuration for aks Clusters. Conflicts with aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    AksConfigV2 ClusterAksConfigV2Args

    The Azure AKS v2 configuration for creating/import aks Clusters. Conflicts with aks_config, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    Annotations map[string]interface{}

    Annotations for the Cluster (map)

    CaCert string

    TLS CA certificate for etcd service (string)

    ClusterAgentDeploymentCustomizations []ClusterClusterAgentDeploymentCustomizationArgs

    Optional customization for cluster agent. For Rancher v2.7.5 and above (list)

    ClusterAuthEndpoint ClusterClusterAuthEndpointArgs

    Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)

    ClusterMonitoringInput ClusterClusterMonitoringInputArgs

    Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)

    ClusterRegistrationToken ClusterClusterRegistrationTokenArgs

    (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)

    ClusterTemplateAnswers ClusterClusterTemplateAnswersArgs

    Cluster template answers. For Rancher v2.3.x and above (list maxitems:1)

    ClusterTemplateId string

    Cluster template ID. For Rancher v2.3.x and above (string)

    ClusterTemplateQuestions []ClusterClusterTemplateQuestionArgs

    Cluster template questions. For Rancher v2.3.x and above (list)

    ClusterTemplateRevisionId string

    Cluster template revision ID. For Rancher v2.3.x and above (string)

    DefaultPodSecurityAdmissionConfigurationTemplateName string

    Cluster default pod security admission configuration template name (string)

    DefaultPodSecurityPolicyTemplateId string

    Default pod security policy template id (string)

    DefaultProjectId string

    (Computed) Default project ID for the cluster (string)

    Description string

    The description for Cluster (string)

    DesiredAgentImage string

    Desired agent image. For Rancher v2.3.x and above (string)

    DesiredAuthImage string

    Desired auth image. For Rancher v2.3.x and above (string)

    DockerRootDir string

    Desired auth image. For Rancher v2.3.x and above (string)

    Driver string

    (Computed) The driver used for the Cluster. imported, azurekubernetesservice, amazonelasticcontainerservice, googlekubernetesengine and rancherKubernetesEngine are supported (string)

    EksConfig ClusterEksConfigArgs

    The Amazon EKS configuration for eks Clusters. Conflicts with aks_config, aks_config_v2, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    EksConfigV2 ClusterEksConfigV2Args

    The Amazon EKS V2 configuration to create or import eks Clusters. Conflicts with aks_config, eks_config, gke_config, gke_config_v2, oke_config k3s_config and rke_config. For Rancher v2.5.x and above (list maxitems:1)

    EnableClusterAlerting bool

    Enable built-in cluster alerting (bool)

    EnableClusterIstio bool

    Deploy istio on system project and istio-system namespace, using rancher2.App resource instead. See above example.

    Deprecated:

    Deploy istio using rancher2_app resource instead

    EnableClusterMonitoring bool

    Enable built-in cluster monitoring (bool)

    EnableNetworkPolicy bool

    Enable project network isolation (bool)

    FleetAgentDeploymentCustomizations []ClusterFleetAgentDeploymentCustomizationArgs

    Optional customization for fleet agent. For Rancher v2.7.5 and above (list)

    FleetWorkspaceName string

    Fleet workspace name (string)

    GkeConfig ClusterGkeConfigArgs

    The Google GKE configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config_v2, oke_config, k3s_config and rke_config (list maxitems:1)

    GkeConfigV2 ClusterGkeConfigV2Args

    The Google GKE V2 configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, oke_config, k3s_config and rke_config. For Rancher v2.5.8 and above (list maxitems:1)

    IstioEnabled bool

    (Computed) Is istio enabled at cluster? For Rancher v2.3.x and above (bool)

    K3sConfig ClusterK3sConfigArgs

    The K3S configuration for k3s imported Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and rke_config (list maxitems:1)

    KubeConfig string

    (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has cluster_auth_endpoint enabled, the kube_config will not be available until the cluster is connected (string)

    Labels map[string]interface{}

    Labels for the Cluster (map)

    Name string

    The name of the Cluster (string)

    OkeConfig ClusterOkeConfigArgs

    The Oracle OKE configuration for oke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, k3s_config and rke_config (list maxitems:1)

    Rke2Config ClusterRke2ConfigArgs

    The RKE2 configuration for rke2 Clusters. Conflicts with aks_config, aks_config_v2, eks_config, gke_config, oke_config, k3s_config and rke_config (list maxitems:1)

    RkeConfig ClusterRkeConfigArgs

    The RKE configuration for rke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and k3s_config (list maxitems:1)

    SystemProjectId string

    (Computed) System project ID for the cluster (string)

    WindowsPreferedCluster bool

    Windows preferred cluster. Default: false (bool)

    agentEnvVars List<ClusterAgentEnvVar>

    Optional Agent Env Vars for Rancher agent. For Rancher v2.5.6 and above (list)

    aksConfig ClusterAksConfig

    The Azure AKS configuration for aks Clusters. Conflicts with aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    aksConfigV2 ClusterAksConfigV2

    The Azure AKS v2 configuration for creating/import aks Clusters. Conflicts with aks_config, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    annotations Map<String,Object>

    Annotations for the Cluster (map)

    caCert String

    TLS CA certificate for etcd service (string)

    clusterAgentDeploymentCustomizations List<ClusterClusterAgentDeploymentCustomization>

    Optional customization for cluster agent. For Rancher v2.7.5 and above (list)

    clusterAuthEndpoint ClusterClusterAuthEndpoint

    Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)

    clusterMonitoringInput ClusterClusterMonitoringInput

    Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)

    clusterRegistrationToken ClusterClusterRegistrationToken

    (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)

    clusterTemplateAnswers ClusterClusterTemplateAnswers

    Cluster template answers. For Rancher v2.3.x and above (list maxitems:1)

    clusterTemplateId String

    Cluster template ID. For Rancher v2.3.x and above (string)

    clusterTemplateQuestions List<ClusterClusterTemplateQuestion>

    Cluster template questions. For Rancher v2.3.x and above (list)

    clusterTemplateRevisionId String

    Cluster template revision ID. For Rancher v2.3.x and above (string)

    defaultPodSecurityAdmissionConfigurationTemplateName String

    Cluster default pod security admission configuration template name (string)

    defaultPodSecurityPolicyTemplateId String

    Default pod security policy template id (string)

    defaultProjectId String

    (Computed) Default project ID for the cluster (string)

    description String

    The description for Cluster (string)

    desiredAgentImage String

    Desired agent image. For Rancher v2.3.x and above (string)

    desiredAuthImage String

    Desired auth image. For Rancher v2.3.x and above (string)

    dockerRootDir String

    Desired auth image. For Rancher v2.3.x and above (string)

    driver String

    (Computed) The driver used for the Cluster. imported, azurekubernetesservice, amazonelasticcontainerservice, googlekubernetesengine and rancherKubernetesEngine are supported (string)

    eksConfig ClusterEksConfig

    The Amazon EKS configuration for eks Clusters. Conflicts with aks_config, aks_config_v2, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    eksConfigV2 ClusterEksConfigV2

    The Amazon EKS V2 configuration to create or import eks Clusters. Conflicts with aks_config, eks_config, gke_config, gke_config_v2, oke_config k3s_config and rke_config. For Rancher v2.5.x and above (list maxitems:1)

    enableClusterAlerting Boolean

    Enable built-in cluster alerting (bool)

    enableClusterIstio Boolean

    Deploy istio on system project and istio-system namespace, using rancher2.App resource instead. See above example.

    Deprecated:

    Deploy istio using rancher2_app resource instead

    enableClusterMonitoring Boolean

    Enable built-in cluster monitoring (bool)

    enableNetworkPolicy Boolean

    Enable project network isolation (bool)

    fleetAgentDeploymentCustomizations List<ClusterFleetAgentDeploymentCustomization>

    Optional customization for fleet agent. For Rancher v2.7.5 and above (list)

    fleetWorkspaceName String

    Fleet workspace name (string)

    gkeConfig ClusterGkeConfig

    The Google GKE configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config_v2, oke_config, k3s_config and rke_config (list maxitems:1)

    gkeConfigV2 ClusterGkeConfigV2

    The Google GKE V2 configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, oke_config, k3s_config and rke_config. For Rancher v2.5.8 and above (list maxitems:1)

    istioEnabled Boolean

    (Computed) Is istio enabled at cluster? For Rancher v2.3.x and above (bool)

    k3sConfig ClusterK3sConfig

    The K3S configuration for k3s imported Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and rke_config (list maxitems:1)

    kubeConfig String

    (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has cluster_auth_endpoint enabled, the kube_config will not be available until the cluster is connected (string)

    labels Map<String,Object>

    Labels for the Cluster (map)

    name String

    The name of the Cluster (string)

    okeConfig ClusterOkeConfig

    The Oracle OKE configuration for oke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, k3s_config and rke_config (list maxitems:1)

    rke2Config ClusterRke2Config

    The RKE2 configuration for rke2 Clusters. Conflicts with aks_config, aks_config_v2, eks_config, gke_config, oke_config, k3s_config and rke_config (list maxitems:1)

    rkeConfig ClusterRkeConfig

    The RKE configuration for rke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and k3s_config (list maxitems:1)

    systemProjectId String

    (Computed) System project ID for the cluster (string)

    windowsPreferedCluster Boolean

    Windows preferred cluster. Default: false (bool)

    agentEnvVars ClusterAgentEnvVar[]

    Optional Agent Env Vars for Rancher agent. For Rancher v2.5.6 and above (list)

    aksConfig ClusterAksConfig

    The Azure AKS configuration for aks Clusters. Conflicts with aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    aksConfigV2 ClusterAksConfigV2

    The Azure AKS v2 configuration for creating/import aks Clusters. Conflicts with aks_config, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    annotations {[key: string]: any}

    Annotations for the Cluster (map)

    caCert string

    TLS CA certificate for etcd service (string)

    clusterAgentDeploymentCustomizations ClusterClusterAgentDeploymentCustomization[]

    Optional customization for cluster agent. For Rancher v2.7.5 and above (list)

    clusterAuthEndpoint ClusterClusterAuthEndpoint

    Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)

    clusterMonitoringInput ClusterClusterMonitoringInput

    Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)

    clusterRegistrationToken ClusterClusterRegistrationToken

    (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)

    clusterTemplateAnswers ClusterClusterTemplateAnswers

    Cluster template answers. For Rancher v2.3.x and above (list maxitems:1)

    clusterTemplateId string

    Cluster template ID. For Rancher v2.3.x and above (string)

    clusterTemplateQuestions ClusterClusterTemplateQuestion[]

    Cluster template questions. For Rancher v2.3.x and above (list)

    clusterTemplateRevisionId string

    Cluster template revision ID. For Rancher v2.3.x and above (string)

    defaultPodSecurityAdmissionConfigurationTemplateName string

    Cluster default pod security admission configuration template name (string)

    defaultPodSecurityPolicyTemplateId string

    Default pod security policy template id (string)

    defaultProjectId string

    (Computed) Default project ID for the cluster (string)

    description string

    The description for Cluster (string)

    desiredAgentImage string

    Desired agent image. For Rancher v2.3.x and above (string)

    desiredAuthImage string

    Desired auth image. For Rancher v2.3.x and above (string)

    dockerRootDir string

    Desired auth image. For Rancher v2.3.x and above (string)

    driver string

    (Computed) The driver used for the Cluster. imported, azurekubernetesservice, amazonelasticcontainerservice, googlekubernetesengine and rancherKubernetesEngine are supported (string)

    eksConfig ClusterEksConfig

    The Amazon EKS configuration for eks Clusters. Conflicts with aks_config, aks_config_v2, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    eksConfigV2 ClusterEksConfigV2

    The Amazon EKS V2 configuration to create or import eks Clusters. Conflicts with aks_config, eks_config, gke_config, gke_config_v2, oke_config k3s_config and rke_config. For Rancher v2.5.x and above (list maxitems:1)

    enableClusterAlerting boolean

    Enable built-in cluster alerting (bool)

    enableClusterIstio boolean

    Deploy istio on system project and istio-system namespace, using rancher2.App resource instead. See above example.

    Deprecated:

    Deploy istio using rancher2_app resource instead

    enableClusterMonitoring boolean

    Enable built-in cluster monitoring (bool)

    enableNetworkPolicy boolean

    Enable project network isolation (bool)

    fleetAgentDeploymentCustomizations ClusterFleetAgentDeploymentCustomization[]

    Optional customization for fleet agent. For Rancher v2.7.5 and above (list)

    fleetWorkspaceName string

    Fleet workspace name (string)

    gkeConfig ClusterGkeConfig

    The Google GKE configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config_v2, oke_config, k3s_config and rke_config (list maxitems:1)

    gkeConfigV2 ClusterGkeConfigV2

    The Google GKE V2 configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, oke_config, k3s_config and rke_config. For Rancher v2.5.8 and above (list maxitems:1)

    istioEnabled boolean

    (Computed) Is istio enabled at cluster? For Rancher v2.3.x and above (bool)

    k3sConfig ClusterK3sConfig

    The K3S configuration for k3s imported Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and rke_config (list maxitems:1)

    kubeConfig string

    (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has cluster_auth_endpoint enabled, the kube_config will not be available until the cluster is connected (string)

    labels {[key: string]: any}

    Labels for the Cluster (map)

    name string

    The name of the Cluster (string)

    okeConfig ClusterOkeConfig

    The Oracle OKE configuration for oke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, k3s_config and rke_config (list maxitems:1)

    rke2Config ClusterRke2Config

    The RKE2 configuration for rke2 Clusters. Conflicts with aks_config, aks_config_v2, eks_config, gke_config, oke_config, k3s_config and rke_config (list maxitems:1)

    rkeConfig ClusterRkeConfig

    The RKE configuration for rke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and k3s_config (list maxitems:1)

    systemProjectId string

    (Computed) System project ID for the cluster (string)

    windowsPreferedCluster boolean

    Windows preferred cluster. Default: false (bool)

    agent_env_vars Sequence[ClusterAgentEnvVarArgs]

    Optional Agent Env Vars for Rancher agent. For Rancher v2.5.6 and above (list)

    aks_config ClusterAksConfigArgs

    The Azure AKS configuration for aks Clusters. Conflicts with aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    aks_config_v2 ClusterAksConfigV2Args

    The Azure AKS v2 configuration for creating/import aks Clusters. Conflicts with aks_config, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    annotations Mapping[str, Any]

    Annotations for the Cluster (map)

    ca_cert str

    TLS CA certificate for etcd service (string)

    cluster_agent_deployment_customizations Sequence[ClusterClusterAgentDeploymentCustomizationArgs]

    Optional customization for cluster agent. For Rancher v2.7.5 and above (list)

    cluster_auth_endpoint ClusterClusterAuthEndpointArgs

    Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)

    cluster_monitoring_input ClusterClusterMonitoringInputArgs

    Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)

    cluster_registration_token ClusterClusterRegistrationTokenArgs

    (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)

    cluster_template_answers ClusterClusterTemplateAnswersArgs

    Cluster template answers. For Rancher v2.3.x and above (list maxitems:1)

    cluster_template_id str

    Cluster template ID. For Rancher v2.3.x and above (string)

    cluster_template_questions Sequence[ClusterClusterTemplateQuestionArgs]

    Cluster template questions. For Rancher v2.3.x and above (list)

    cluster_template_revision_id str

    Cluster template revision ID. For Rancher v2.3.x and above (string)

    default_pod_security_admission_configuration_template_name str

    Cluster default pod security admission configuration template name (string)

    default_pod_security_policy_template_id str

    Default pod security policy template id (string)

    default_project_id str

    (Computed) Default project ID for the cluster (string)

    description str

    The description for Cluster (string)

    desired_agent_image str

    Desired agent image. For Rancher v2.3.x and above (string)

    desired_auth_image str

    Desired auth image. For Rancher v2.3.x and above (string)

    docker_root_dir str

    Desired auth image. For Rancher v2.3.x and above (string)

    driver str

    (Computed) The driver used for the Cluster. imported, azurekubernetesservice, amazonelasticcontainerservice, googlekubernetesengine and rancherKubernetesEngine are supported (string)

    eks_config ClusterEksConfigArgs

    The Amazon EKS configuration for eks Clusters. Conflicts with aks_config, aks_config_v2, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    eks_config_v2 ClusterEksConfigV2Args

    The Amazon EKS V2 configuration to create or import eks Clusters. Conflicts with aks_config, eks_config, gke_config, gke_config_v2, oke_config k3s_config and rke_config. For Rancher v2.5.x and above (list maxitems:1)

    enable_cluster_alerting bool

    Enable built-in cluster alerting (bool)

    enable_cluster_istio bool

    Deploy istio on system project and istio-system namespace, using rancher2.App resource instead. See above example.

    Deprecated:

    Deploy istio using rancher2_app resource instead

    enable_cluster_monitoring bool

    Enable built-in cluster monitoring (bool)

    enable_network_policy bool

    Enable project network isolation (bool)

    fleet_agent_deployment_customizations Sequence[ClusterFleetAgentDeploymentCustomizationArgs]

    Optional customization for fleet agent. For Rancher v2.7.5 and above (list)

    fleet_workspace_name str

    Fleet workspace name (string)

    gke_config ClusterGkeConfigArgs

    The Google GKE configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config_v2, oke_config, k3s_config and rke_config (list maxitems:1)

    gke_config_v2 ClusterGkeConfigV2Args

    The Google GKE V2 configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, oke_config, k3s_config and rke_config. For Rancher v2.5.8 and above (list maxitems:1)

    istio_enabled bool

    (Computed) Is istio enabled at cluster? For Rancher v2.3.x and above (bool)

    k3s_config ClusterK3sConfigArgs

    The K3S configuration for k3s imported Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and rke_config (list maxitems:1)

    kube_config str

    (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has cluster_auth_endpoint enabled, the kube_config will not be available until the cluster is connected (string)

    labels Mapping[str, Any]

    Labels for the Cluster (map)

    name str

    The name of the Cluster (string)

    oke_config ClusterOkeConfigArgs

    The Oracle OKE configuration for oke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, k3s_config and rke_config (list maxitems:1)

    rke2_config ClusterRke2ConfigArgs

    The RKE2 configuration for rke2 Clusters. Conflicts with aks_config, aks_config_v2, eks_config, gke_config, oke_config, k3s_config and rke_config (list maxitems:1)

    rke_config ClusterRkeConfigArgs

    The RKE configuration for rke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and k3s_config (list maxitems:1)

    system_project_id str

    (Computed) System project ID for the cluster (string)

    windows_prefered_cluster bool

    Windows preferred cluster. Default: false (bool)

    agentEnvVars List<Property Map>

    Optional Agent Env Vars for Rancher agent. For Rancher v2.5.6 and above (list)

    aksConfig Property Map

    The Azure AKS configuration for aks Clusters. Conflicts with aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    aksConfigV2 Property Map

    The Azure AKS v2 configuration for creating/import aks Clusters. Conflicts with aks_config, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    annotations Map<Any>

    Annotations for the Cluster (map)

    caCert String

    TLS CA certificate for etcd service (string)

    clusterAgentDeploymentCustomizations List<Property Map>

    Optional customization for cluster agent. For Rancher v2.7.5 and above (list)

    clusterAuthEndpoint Property Map

    Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)

    clusterMonitoringInput Property Map

    Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)

    clusterRegistrationToken Property Map

    (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)

    clusterTemplateAnswers Property Map

    Cluster template answers. For Rancher v2.3.x and above (list maxitems:1)

    clusterTemplateId String

    Cluster template ID. For Rancher v2.3.x and above (string)

    clusterTemplateQuestions List<Property Map>

    Cluster template questions. For Rancher v2.3.x and above (list)

    clusterTemplateRevisionId String

    Cluster template revision ID. For Rancher v2.3.x and above (string)

    defaultPodSecurityAdmissionConfigurationTemplateName String

    Cluster default pod security admission configuration template name (string)

    defaultPodSecurityPolicyTemplateId String

    Default pod security policy template id (string)

    defaultProjectId String

    (Computed) Default project ID for the cluster (string)

    description String

    The description for Cluster (string)

    desiredAgentImage String

    Desired agent image. For Rancher v2.3.x and above (string)

    desiredAuthImage String

    Desired auth image. For Rancher v2.3.x and above (string)

    dockerRootDir String

    Desired auth image. For Rancher v2.3.x and above (string)

    driver String

    (Computed) The driver used for the Cluster. imported, azurekubernetesservice, amazonelasticcontainerservice, googlekubernetesengine and rancherKubernetesEngine are supported (string)

    eksConfig Property Map

    The Amazon EKS configuration for eks Clusters. Conflicts with aks_config, aks_config_v2, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

    eksConfigV2 Property Map

    The Amazon EKS V2 configuration to create or import eks Clusters. Conflicts with aks_config, eks_config, gke_config, gke_config_v2, oke_config k3s_config and rke_config. For Rancher v2.5.x and above (list maxitems:1)

    enableClusterAlerting Boolean

    Enable built-in cluster alerting (bool)

    enableClusterIstio Boolean

    Deploy istio on system project and istio-system namespace, using rancher2.App resource instead. See above example.

    Deprecated:

    Deploy istio using rancher2_app resource instead

    enableClusterMonitoring Boolean

    Enable built-in cluster monitoring (bool)

    enableNetworkPolicy Boolean

    Enable project network isolation (bool)

    fleetAgentDeploymentCustomizations List<Property Map>

    Optional customization for fleet agent. For Rancher v2.7.5 and above (list)

    fleetWorkspaceName String

    Fleet workspace name (string)

    gkeConfig Property Map

    The Google GKE configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config_v2, oke_config, k3s_config and rke_config (list maxitems:1)

    gkeConfigV2 Property Map

    The Google GKE V2 configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, oke_config, k3s_config and rke_config. For Rancher v2.5.8 and above (list maxitems:1)

    istioEnabled Boolean

    (Computed) Is istio enabled at cluster? For Rancher v2.3.x and above (bool)

    k3sConfig Property Map

    The K3S configuration for k3s imported Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and rke_config (list maxitems:1)

    kubeConfig String

    (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has cluster_auth_endpoint enabled, the kube_config will not be available until the cluster is connected (string)

    labels Map<Any>

    Labels for the Cluster (map)

    name String

    The name of the Cluster (string)

    okeConfig Property Map

    The Oracle OKE configuration for oke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, k3s_config and rke_config (list maxitems:1)

    rke2Config Property Map

    The RKE2 configuration for rke2 Clusters. Conflicts with aks_config, aks_config_v2, eks_config, gke_config, oke_config, k3s_config and rke_config (list maxitems:1)

    rkeConfig Property Map

    The RKE configuration for rke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and k3s_config (list maxitems:1)

    systemProjectId String

    (Computed) System project ID for the cluster (string)

    windowsPreferedCluster Boolean

    Windows preferred cluster. Default: false (bool)

    Supporting Types

    ClusterAgentEnvVar, ClusterAgentEnvVarArgs

    Name string

    The name of the Cluster (string)

    Value string

    The GKE taint value (string)

    Name string

    The name of the Cluster (string)

    Value string

    The GKE taint value (string)

    name String

    The name of the Cluster (string)

    value String

    The GKE taint value (string)

    name string

    The name of the Cluster (string)

    value string

    The GKE taint value (string)

    name str

    The name of the Cluster (string)

    value str

    The GKE taint value (string)

    name String

    The name of the Cluster (string)

    value String

    The GKE taint value (string)

    ClusterAksConfig, ClusterAksConfigArgs

    AgentDnsPrefix string

    DNS prefix to be used to create the FQDN for the agent pool (string)

    ClientId string

    Azure client ID to use (string)

    ClientSecret string

    Azure client secret associated with the "client id" (string)

    KubernetesVersion string

    The Kubernetes version that will be used for your master and OKE worker nodes (string)

    MasterDnsPrefix string

    DNS prefix to use the Kubernetes cluster control pane (string)

    ResourceGroup string

    The AKS resource group (string)

    SshPublicKeyContents string

    Contents of the SSH public key used to authenticate with Linux hosts (string)

    Subnet string

    The AKS subnet (string)

    SubscriptionId string

    Subscription credentials which uniquely identify Microsoft Azure subscription (string)

    TenantId string

    Azure tenant ID to use (string)

    VirtualNetwork string

    The name of the virtual network to use. If it's not specified Rancher will create a new VPC (string)

    VirtualNetworkResourceGroup string

    The AKS virtual network resource group (string)

    AadServerAppSecret string

    The secret of an Azure Active Directory server application (string)

    AadTenantId string

    The ID of an Azure Active Directory tenant (string)

    AddClientAppId string

    The ID of an Azure Active Directory client application of type "Native". This application is for user login via kubectl (string)

    AddServerAppId string

    The ID of an Azure Active Directory server application of type "Web app/API". This application represents the managed cluster's apiserver (Server application) (string)

    AdminUsername string

    The administrator username to use for Linux hosts. Default azureuser (string)

    AgentOsDiskSize int

    GB size to be used to specify the disk for every machine in the agent pool. If you specify 0, it will apply the default according to the "agent vm size" specified. Default 0 (int)

    AgentPoolName string

    Name for the agent pool, upto 12 alphanumeric characters. Default agentpool0 (string)

    AgentStorageProfile string

    Storage profile specifies what kind of storage used on machine in the agent pool. Chooses from [ManagedDisks StorageAccount]. Default ManagedDisks (string)

    AgentVmSize string

    Size of machine in the agent pool. Default Standard_D1_v2 (string)

    AuthBaseUrl string

    The AKS auth base url (string)

    BaseUrl string

    The AKS base url (string)

    Count int

    The AKS node pool count. Default: 1 (int)

    DnsServiceIp string

    An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes Service address range specified in "service cidr". Default 10.0.0.10 (string)

    DockerBridgeCidr string

    A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes Service address range specified in "service cidr". Default 172.17.0.1/16 (string)

    EnableHttpApplicationRouting bool

    Enable the Kubernetes ingress with automatic public DNS name creation. Default false (bool)

    EnableMonitoring bool

    Turn on Azure Log Analytics monitoring. Uses the Log Analytics "Default" workspace if it exists, else creates one. if using an existing workspace, specifies "log analytics workspace resource id". Default true (bool)

    LoadBalancerSku string

    The AKS load balancer sku (string)

    Location string

    Azure Kubernetes cluster location. Default eastus (string)

    LogAnalyticsWorkspace string

    The name of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses '{resource group}-{subscription id}-{location code}' (string)

    LogAnalyticsWorkspaceResourceGroup string

    The resource group of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses the 'Cluster' resource group (string)

    MaxPods int

    The AKS node pool max pods. Default: 110 (int)

    NetworkPlugin string

    The AKS network plugin. Required if imported=false (string)

    NetworkPolicy string

    The AKS network policy (string)

    PodCidr string

    A CIDR IP range from which to assign Kubernetes Pod IPs (string)

    ServiceCidr string

    A CIDR IP range from which to assign Kubernetes Service IPs (string)

    Tag Dictionary<string, object>

    Use tags argument instead as []string

    Deprecated:

    Use tags argument instead as []string

    Tags List<string>

    The GKE node config tags (List)

    AgentDnsPrefix string

    DNS prefix to be used to create the FQDN for the agent pool (string)

    ClientId string

    Azure client ID to use (string)

    ClientSecret string

    Azure client secret associated with the "client id" (string)

    KubernetesVersion string

    The Kubernetes version that will be used for your master and OKE worker nodes (string)

    MasterDnsPrefix string

    DNS prefix to use the Kubernetes cluster control pane (string)

    ResourceGroup string

    The AKS resource group (string)

    SshPublicKeyContents string

    Contents of the SSH public key used to authenticate with Linux hosts (string)

    Subnet string

    The AKS subnet (string)

    SubscriptionId string

    Subscription credentials which uniquely identify Microsoft Azure subscription (string)

    TenantId string

    Azure tenant ID to use (string)

    VirtualNetwork string

    The name of the virtual network to use. If it's not specified Rancher will create a new VPC (string)

    VirtualNetworkResourceGroup string

    The AKS virtual network resource group (string)

    AadServerAppSecret string

    The secret of an Azure Active Directory server application (string)

    AadTenantId string

    The ID of an Azure Active Directory tenant (string)

    AddClientAppId string

    The ID of an Azure Active Directory client application of type "Native". This application is for user login via kubectl (string)

    AddServerAppId string

    The ID of an Azure Active Directory server application of type "Web app/API". This application represents the managed cluster's apiserver (Server application) (string)

    AdminUsername string

    The administrator username to use for Linux hosts. Default azureuser (string)

    AgentOsDiskSize int

    GB size to be used to specify the disk for every machine in the agent pool. If you specify 0, it will apply the default according to the "agent vm size" specified. Default 0 (int)

    AgentPoolName string

    Name for the agent pool, upto 12 alphanumeric characters. Default agentpool0 (string)

    AgentStorageProfile string

    Storage profile specifies what kind of storage used on machine in the agent pool. Chooses from [ManagedDisks StorageAccount]. Default ManagedDisks (string)

    AgentVmSize string

    Size of machine in the agent pool. Default Standard_D1_v2 (string)

    AuthBaseUrl string

    The AKS auth base url (string)

    BaseUrl string

    The AKS base url (string)

    Count int

    The AKS node pool count. Default: 1 (int)

    DnsServiceIp string

    An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes Service address range specified in "service cidr". Default 10.0.0.10 (string)

    DockerBridgeCidr string

    A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes Service address range specified in "service cidr". Default 172.17.0.1/16 (string)

    EnableHttpApplicationRouting bool

    Enable the Kubernetes ingress with automatic public DNS name creation. Default false (bool)

    EnableMonitoring bool

    Turn on Azure Log Analytics monitoring. Uses the Log Analytics "Default" workspace if it exists, else creates one. if using an existing workspace, specifies "log analytics workspace resource id". Default true (bool)

    LoadBalancerSku string

    The AKS load balancer sku (string)

    Location string

    Azure Kubernetes cluster location. Default eastus (string)

    LogAnalyticsWorkspace string

    The name of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses '{resource group}-{subscription id}-{location code}' (string)

    LogAnalyticsWorkspaceResourceGroup string

    The resource group of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses the 'Cluster' resource group (string)

    MaxPods int

    The AKS node pool max pods. Default: 110 (int)

    NetworkPlugin string

    The AKS network plugin. Required if imported=false (string)

    NetworkPolicy string

    The AKS network policy (string)

    PodCidr string

    A CIDR IP range from which to assign Kubernetes Pod IPs (string)

    ServiceCidr string

    A CIDR IP range from which to assign Kubernetes Service IPs (string)

    Tag map[string]interface{}

    Use tags argument instead as []string

    Deprecated:

    Use tags argument instead as []string

    Tags []string

    The GKE node config tags (List)

    agentDnsPrefix String

    DNS prefix to be used to create the FQDN for the agent pool (string)

    clientId String

    Azure client ID to use (string)

    clientSecret String

    Azure client secret associated with the "client id" (string)

    kubernetesVersion String

    The Kubernetes version that will be used for your master and OKE worker nodes (string)

    masterDnsPrefix String

    DNS prefix to use the Kubernetes cluster control pane (string)

    resourceGroup String

    The AKS resource group (string)

    sshPublicKeyContents String

    Contents of the SSH public key used to authenticate with Linux hosts (string)

    subnet String

    The AKS subnet (string)

    subscriptionId String

    Subscription credentials which uniquely identify Microsoft Azure subscription (string)

    tenantId String

    Azure tenant ID to use (string)

    virtualNetwork String

    The name of the virtual network to use. If it's not specified Rancher will create a new VPC (string)

    virtualNetworkResourceGroup String

    The AKS virtual network resource group (string)

    aadServerAppSecret String

    The secret of an Azure Active Directory server application (string)

    aadTenantId String

    The ID of an Azure Active Directory tenant (string)

    addClientAppId String

    The ID of an Azure Active Directory client application of type "Native". This application is for user login via kubectl (string)

    addServerAppId String

    The ID of an Azure Active Directory server application of type "Web app/API". This application represents the managed cluster's apiserver (Server application) (string)

    adminUsername String

    The administrator username to use for Linux hosts. Default azureuser (string)

    agentOsDiskSize Integer

    GB size to be used to specify the disk for every machine in the agent pool. If you specify 0, it will apply the default according to the "agent vm size" specified. Default 0 (int)

    agentPoolName String

    Name for the agent pool, upto 12 alphanumeric characters. Default agentpool0 (string)

    agentStorageProfile String

    Storage profile specifies what kind of storage used on machine in the agent pool. Chooses from [ManagedDisks StorageAccount]. Default ManagedDisks (string)

    agentVmSize String

    Size of machine in the agent pool. Default Standard_D1_v2 (string)

    authBaseUrl String

    The AKS auth base url (string)

    baseUrl String

    The AKS base url (string)

    count Integer

    The AKS node pool count. Default: 1 (int)

    dnsServiceIp String

    An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes Service address range specified in "service cidr". Default 10.0.0.10 (string)

    dockerBridgeCidr String

    A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes Service address range specified in "service cidr". Default 172.17.0.1/16 (string)

    enableHttpApplicationRouting Boolean

    Enable the Kubernetes ingress with automatic public DNS name creation. Default false (bool)

    enableMonitoring Boolean

    Turn on Azure Log Analytics monitoring. Uses the Log Analytics "Default" workspace if it exists, else creates one. if using an existing workspace, specifies "log analytics workspace resource id". Default true (bool)

    loadBalancerSku String

    The AKS load balancer sku (string)

    location String

    Azure Kubernetes cluster location. Default eastus (string)

    logAnalyticsWorkspace String

    The name of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses '{resource group}-{subscription id}-{location code}' (string)

    logAnalyticsWorkspaceResourceGroup String

    The resource group of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses the 'Cluster' resource group (string)

    maxPods Integer

    The AKS node pool max pods. Default: 110 (int)

    networkPlugin String

    The AKS network plugin. Required if imported=false (string)

    networkPolicy String

    The AKS network policy (string)

    podCidr String

    A CIDR IP range from which to assign Kubernetes Pod IPs (string)

    serviceCidr String

    A CIDR IP range from which to assign Kubernetes Service IPs (string)

    tag Map<String,Object>

    Use tags argument instead as []string

    Deprecated:

    Use tags argument instead as []string

    tags List<String>

    The GKE node config tags (List)

    agentDnsPrefix string

    DNS prefix to be used to create the FQDN for the agent pool (string)

    clientId string

    Azure client ID to use (string)

    clientSecret string

    Azure client secret associated with the "client id" (string)

    kubernetesVersion string

    The Kubernetes version that will be used for your master and OKE worker nodes (string)

    masterDnsPrefix string

    DNS prefix to use the Kubernetes cluster control pane (string)

    resourceGroup string

    The AKS resource group (string)

    sshPublicKeyContents string

    Contents of the SSH public key used to authenticate with Linux hosts (string)

    subnet string

    The AKS subnet (string)

    subscriptionId string

    Subscription credentials which uniquely identify Microsoft Azure subscription (string)

    tenantId string

    Azure tenant ID to use (string)

    virtualNetwork string

    The name of the virtual network to use. If it's not specified Rancher will create a new VPC (string)

    virtualNetworkResourceGroup string

    The AKS virtual network resource group (string)

    aadServerAppSecret string

    The secret of an Azure Active Directory server application (string)

    aadTenantId string

    The ID of an Azure Active Directory tenant (string)

    addClientAppId string

    The ID of an Azure Active Directory client application of type "Native". This application is for user login via kubectl (string)

    addServerAppId string

    The ID of an Azure Active Directory server application of type "Web app/API". This application represents the managed cluster's apiserver (Server application) (string)

    adminUsername string

    The administrator username to use for Linux hosts. Default azureuser (string)

    agentOsDiskSize number

    GB size to be used to specify the disk for every machine in the agent pool. If you specify 0, it will apply the default according to the "agent vm size" specified. Default 0 (int)

    agentPoolName string

    Name for the agent pool, upto 12 alphanumeric characters. Default agentpool0 (string)

    agentStorageProfile string

    Storage profile specifies what kind of storage used on machine in the agent pool. Chooses from [ManagedDisks StorageAccount]. Default ManagedDisks (string)

    agentVmSize string

    Size of machine in the agent pool. Default Standard_D1_v2 (string)

    authBaseUrl string

    The AKS auth base url (string)

    baseUrl string

    The AKS base url (string)

    count number

    The AKS node pool count. Default: 1 (int)

    dnsServiceIp string

    An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes Service address range specified in "service cidr". Default 10.0.0.10 (string)

    dockerBridgeCidr string

    A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes Service address range specified in "service cidr". Default 172.17.0.1/16 (string)

    enableHttpApplicationRouting boolean

    Enable the Kubernetes ingress with automatic public DNS name creation. Default false (bool)

    enableMonitoring boolean

    Turn on Azure Log Analytics monitoring. Uses the Log Analytics "Default" workspace if it exists, else creates one. if using an existing workspace, specifies "log analytics workspace resource id". Default true (bool)

    loadBalancerSku string

    The AKS load balancer sku (string)

    location string

    Azure Kubernetes cluster location. Default eastus (string)

    logAnalyticsWorkspace string

    The name of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses '{resource group}-{subscription id}-{location code}' (string)

    logAnalyticsWorkspaceResourceGroup string

    The resource group of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses the 'Cluster' resource group (string)

    maxPods number

    The AKS node pool max pods. Default: 110 (int)

    networkPlugin string

    The AKS network plugin. Required if imported=false (string)

    networkPolicy string

    The AKS network policy (string)

    podCidr string

    A CIDR IP range from which to assign Kubernetes Pod IPs (string)

    serviceCidr string

    A CIDR IP range from which to assign Kubernetes Service IPs (string)

    tag {[key: string]: any}

    Use tags argument instead as []string

    Deprecated:

    Use tags argument instead as []string

    tags string[]

    The GKE node config tags (List)

    agent_dns_prefix str

    DNS prefix to be used to create the FQDN for the agent pool (string)

    client_id str

    Azure client ID to use (string)

    client_secret str

    Azure client secret associated with the "client id" (string)

    kubernetes_version str

    The Kubernetes version that will be used for your master and OKE worker nodes (string)

    master_dns_prefix str

    DNS prefix to use the Kubernetes cluster control pane (string)

    resource_group str

    The AKS resource group (string)

    ssh_public_key_contents str

    Contents of the SSH public key used to authenticate with Linux hosts (string)

    subnet str

    The AKS subnet (string)

    subscription_id str

    Subscription credentials which uniquely identify Microsoft Azure subscription (string)

    tenant_id str

    Azure tenant ID to use (string)

    virtual_network str

    The name of the virtual network to use. If it's not specified Rancher will create a new VPC (string)

    virtual_network_resource_group str

    The AKS virtual network resource group (string)

    aad_server_app_secret str

    The secret of an Azure Active Directory server application (string)

    aad_tenant_id str

    The ID of an Azure Active Directory tenant (string)

    add_client_app_id str

    The ID of an Azure Active Directory client application of type "Native". This application is for user login via kubectl (string)

    add_server_app_id str

    The ID of an Azure Active Directory server application of type "Web app/API". This application represents the managed cluster's apiserver (Server application) (string)

    admin_username str

    The administrator username to use for Linux hosts. Default azureuser (string)

    agent_os_disk_size int

    GB size to be used to specify the disk for every machine in the agent pool. If you specify 0, it will apply the default according to the "agent vm size" specified. Default 0 (int)

    agent_pool_name str

    Name for the agent pool, upto 12 alphanumeric characters. Default agentpool0 (string)

    agent_storage_profile str

    Storage profile specifies what kind of storage used on machine in the agent pool. Chooses from [ManagedDisks StorageAccount]. Default ManagedDisks (string)

    agent_vm_size str

    Size of machine in the agent pool. Default Standard_D1_v2 (string)

    auth_base_url str

    The AKS auth base url (string)

    base_url str

    The AKS base url (string)

    count int

    The AKS node pool count. Default: 1 (int)

    dns_service_ip str

    An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes Service address range specified in "service cidr". Default 10.0.0.10 (string)

    docker_bridge_cidr str

    A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes Service address range specified in "service cidr". Default 172.17.0.1/16 (string)

    enable_http_application_routing bool

    Enable the Kubernetes ingress with automatic public DNS name creation. Default false (bool)

    enable_monitoring bool

    Turn on Azure Log Analytics monitoring. Uses the Log Analytics "Default" workspace if it exists, else creates one. if using an existing workspace, specifies "log analytics workspace resource id". Default true (bool)

    load_balancer_sku str

    The AKS load balancer sku (string)

    location str

    Azure Kubernetes cluster location. Default eastus (string)

    log_analytics_workspace str

    The name of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses '{resource group}-{subscription id}-{location code}' (string)

    log_analytics_workspace_resource_group str

    The resource group of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses the 'Cluster' resource group (string)

    max_pods int

    The AKS node pool max pods. Default: 110 (int)

    network_plugin str

    The AKS network plugin. Required if imported=false (string)

    network_policy str

    The AKS network policy (string)

    pod_cidr str

    A CIDR IP range from which to assign Kubernetes Pod IPs (string)

    service_cidr str

    A CIDR IP range from which to assign Kubernetes Service IPs (string)

    tag Mapping[str, Any]

    Use tags argument instead as []string

    Deprecated:

    Use tags argument instead as []string

    tags Sequence[str]

    The GKE node config tags (List)

    agentDnsPrefix String

    DNS prefix to be used to create the FQDN for the agent pool (string)

    clientId String

    Azure client ID to use (string)

    clientSecret String

    Azure client secret associated with the "client id" (string)

    kubernetesVersion String

    The Kubernetes version that will be used for your master and OKE worker nodes (string)

    masterDnsPrefix String

    DNS prefix to use the Kubernetes cluster control pane (string)

    resourceGroup String

    The AKS resource group (string)

    sshPublicKeyContents String

    Contents of the SSH public key used to authenticate with Linux hosts (string)

    subnet String

    The AKS subnet (string)

    subscriptionId String

    Subscription credentials which uniquely identify Microsoft Azure subscription (string)

    tenantId String

    Azure tenant ID to use (string)

    virtualNetwork String

    The name of the virtual network to use. If it's not specified Rancher will create a new VPC (string)

    virtualNetworkResourceGroup String

    The AKS virtual network resource group (string)

    aadServerAppSecret String

    The secret of an Azure Active Directory server application (string)

    aadTenantId String

    The ID of an Azure Active Directory tenant (string)

    addClientAppId String

    The ID of an Azure Active Directory client application of type "Native". This application is for user login via kubectl (string)

    addServerAppId String

    The ID of an Azure Active Directory server application of type "Web app/API". This application represents the managed cluster's apiserver (Server application) (string)

    adminUsername String

    The administrator username to use for Linux hosts. Default azureuser (string)

    agentOsDiskSize Number

    GB size to be used to specify the disk for every machine in the agent pool. If you specify 0, it will apply the default according to the "agent vm size" specified. Default 0 (int)

    agentPoolName String

    Name for the agent pool, upto 12 alphanumeric characters. Default agentpool0 (string)

    agentStorageProfile String

    Storage profile specifies what kind of storage used on machine in the agent pool. Chooses from [ManagedDisks StorageAccount]. Default ManagedDisks (string)

    agentVmSize String

    Size of machine in the agent pool. Default Standard_D1_v2 (string)

    authBaseUrl String

    The AKS auth base url (string)

    baseUrl String

    The AKS base url (string)

    count Number

    The AKS node pool count. Default: 1 (int)

    dnsServiceIp String

    An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes Service address range specified in "service cidr". Default 10.0.0.10 (string)

    dockerBridgeCidr String

    A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes Service address range specified in "service cidr". Default 172.17.0.1/16 (string)

    enableHttpApplicationRouting Boolean

    Enable the Kubernetes ingress with automatic public DNS name creation. Default false (bool)

    enableMonitoring Boolean

    Turn on Azure Log Analytics monitoring. Uses the Log Analytics "Default" workspace if it exists, else creates one. if using an existing workspace, specifies "log analytics workspace resource id". Default true (bool)

    loadBalancerSku String

    The AKS load balancer sku (string)

    location String

    Azure Kubernetes cluster location. Default eastus (string)

    logAnalyticsWorkspace String

    The name of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses '{resource group}-{subscription id}-{location code}' (string)

    logAnalyticsWorkspaceResourceGroup String

    The resource group of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses the 'Cluster' resource group (string)

    maxPods Number

    The AKS node pool max pods. Default: 110 (int)

    networkPlugin String

    The AKS network plugin. Required if imported=false (string)

    networkPolicy String

    The AKS network policy (string)

    podCidr String

    A CIDR IP range from which to assign Kubernetes Pod IPs (string)

    serviceCidr String

    A CIDR IP range from which to assign Kubernetes Service IPs (string)

    tag Map<Any>

    Use tags argument instead as []string

    Deprecated:

    Use tags argument instead as []string

    tags List<String>

    The GKE node config tags (List)

    ClusterAksConfigV2, ClusterAksConfigV2Args

    CloudCredentialId string

    The EKS cloud_credential id (string)

    ResourceGroup string

    The AKS resource group (string)

    ResourceLocation string

    The AKS resource location (string)

    AuthBaseUrl string

    The AKS auth base url (string)

    AuthorizedIpRanges List<string>

    The AKS authorized ip ranges (list)

    BaseUrl string

    The AKS base url (string)

    DnsPrefix string

    The AKS dns prefix. Required if imported=false (string)

    HttpApplicationRouting bool

    Enable AKS http application routing? (bool)

    Imported bool

    Is GKE cluster imported? Default: false (bool)

    KubernetesVersion string

    The Kubernetes version that will be used for your master and OKE worker nodes (string)

    LinuxAdminUsername string

    The AKS linux admin username (string)

    LinuxSshPublicKey string

    The AKS linux ssh public key (string)

    LoadBalancerSku string

    The AKS load balancer sku (string)

    LogAnalyticsWorkspaceGroup string

    The AKS log analytics workspace group (string)

    LogAnalyticsWorkspaceName string

    The AKS log analytics workspace name (string)

    Monitoring bool

    Is AKS cluster monitoring enabled? (bool)

    Name string

    The name of the Cluster (string)

    NetworkDnsServiceIp string

    The AKS network dns service ip (string)

    NetworkDockerBridgeCidr string

    The AKS network docker bridge cidr (string)

    NetworkPlugin string

    The AKS network plugin. Required if imported=false (string)

    NetworkPodCidr string

    The AKS network pod cidr (string)

    NetworkPolicy string

    The AKS network policy (string)

    NetworkServiceCidr string

    The AKS network service cidr (string)

    NodePools List<ClusterAksConfigV2NodePool>

    The GKE cluster node pools. Required for create new cluster (List)

    PrivateCluster bool

    Is AKS cluster private? (bool)

    Subnet string

    The AKS subnet (string)

    Tags Dictionary<string, object>

    The GKE node config tags (List)

    VirtualNetwork string

    The name of the virtual network to use. If it's not specified Rancher will create a new VPC (string)

    VirtualNetworkResourceGroup string

    The AKS virtual network resource group (string)

    CloudCredentialId string

    The EKS cloud_credential id (string)

    ResourceGroup string

    The AKS resource group (string)

    ResourceLocation string

    The AKS resource location (string)

    AuthBaseUrl string

    The AKS auth base url (string)

    AuthorizedIpRanges []string

    The AKS authorized ip ranges (list)

    BaseUrl string

    The AKS base url (string)

    DnsPrefix string

    The AKS dns prefix. Required if imported=false (string)

    HttpApplicationRouting bool

    Enable AKS http application routing? (bool)

    Imported bool

    Is GKE cluster imported? Default: false (bool)

    KubernetesVersion string

    The Kubernetes version that will be used for your master and OKE worker nodes (string)

    LinuxAdminUsername string

    The AKS linux admin username (string)

    LinuxSshPublicKey string

    The AKS linux ssh public key (string)

    LoadBalancerSku string

    The AKS load balancer sku (string)

    LogAnalyticsWorkspaceGroup string

    The AKS log analytics workspace group (string)

    LogAnalyticsWorkspaceName string

    The AKS log analytics workspace name (string)

    Monitoring bool

    Is AKS cluster monitoring enabled? (bool)

    Name string

    The name of the Cluster (string)

    NetworkDnsServiceIp string

    The AKS network dns service ip (string)

    NetworkDockerBridgeCidr string

    The AKS network docker bridge cidr (string)

    NetworkPlugin string

    The AKS network plugin. Required if imported=false (string)

    NetworkPodCidr string

    The AKS network pod cidr (string)

    NetworkPolicy string

    The AKS network policy (string)

    NetworkServiceCidr string

    The AKS network service cidr (string)

    NodePools []ClusterAksConfigV2NodePool

    The GKE cluster node pools. Required for create new cluster (List)

    PrivateCluster bool

    Is AKS cluster private? (bool)

    Subnet string

    The AKS subnet (string)

    Tags map[string]interface{}

    The GKE node config tags (List)

    VirtualNetwork string

    The name of the virtual network to use. If it's not specified Rancher will create a new VPC (string)

    VirtualNetworkResourceGroup string

    The AKS virtual network resource group (string)

    cloudCredentialId String

    The EKS cloud_credential id (string)

    resourceGroup String

    The AKS resource group (string)

    resourceLocation String

    The AKS resource location (string)

    authBaseUrl String

    The AKS auth base url (string)

    authorizedIpRanges List<String>

    The AKS authorized ip ranges (list)

    baseUrl String

    The AKS base url (string)

    dnsPrefix String

    The AKS dns prefix. Required if imported=false (string)

    httpApplicationRouting Boolean

    Enable AKS http application routing? (bool)

    imported Boolean

    Is GKE cluster imported? Default: false (bool)

    kubernetesVersion String

    The Kubernetes version that will be used for your master and OKE worker nodes (string)

    linuxAdminUsername String

    The AKS linux admin username (string)

    linuxSshPublicKey String

    The AKS linux ssh public key (string)

    loadBalancerSku String

    The AKS load balancer sku (string)

    logAnalyticsWorkspaceGroup String

    The AKS log analytics workspace group (string)

    logAnalyticsWorkspaceName String

    The AKS log analytics workspace name (string)

    monitoring Boolean

    Is AKS cluster monitoring enabled? (bool)

    name String

    The name of the Cluster (string)

    networkDnsServiceIp String

    The AKS network dns service ip (string)

    networkDockerBridgeCidr String

    The AKS network docker bridge cidr (string)

    networkPlugin String

    The AKS network plugin. Required if imported=false (string)

    networkPodCidr String

    The AKS network pod cidr (string)

    networkPolicy String

    The AKS network policy (string)

    networkServiceCidr String

    The AKS network service cidr (string)

    nodePools List<ClusterAksConfigV2NodePool>

    The GKE cluster node pools. Required for create new cluster (List)

    privateCluster Boolean

    Is AKS cluster private? (bool)

    subnet String

    The AKS subnet (string)

    tags Map<String,Object>

    The GKE node config tags (List)

    virtualNetwork String

    The name of the virtual network to use. If it's not specified Rancher will create a new VPC (string)

    virtualNetworkResourceGroup String

    The AKS virtual network resource group (string)

    cloudCredentialId string

    The EKS cloud_credential id (string)

    resourceGroup string

    The AKS resource group (string)

    resourceLocation string

    The AKS resource location (string)

    authBaseUrl string

    The AKS auth base url (string)

    authorizedIpRanges string[]

    The AKS authorized ip ranges (list)

    baseUrl string

    The AKS base url (string)

    dnsPrefix string

    The AKS dns prefix. Required if imported=false (string)

    httpApplicationRouting boolean

    Enable AKS http application routing? (bool)

    imported boolean

    Is GKE cluster imported? Default: false (bool)

    kubernetesVersion string

    The Kubernetes version that will be used for your master and OKE worker nodes (string)

    linuxAdminUsername string

    The AKS linux admin username (string)

    linuxSshPublicKey string

    The AKS linux ssh public key (string)

    loadBalancerSku string

    The AKS load balancer sku (string)

    logAnalyticsWorkspaceGroup string

    The AKS log analytics workspace group (string)

    logAnalyticsWorkspaceName string

    The AKS log analytics workspace name (string)

    monitoring boolean

    Is AKS cluster monitoring enabled? (bool)

    name string

    The name of the Cluster (string)

    networkDnsServiceIp string

    The AKS network dns service ip (string)

    networkDockerBridgeCidr string

    The AKS network docker bridge cidr (string)

    networkPlugin string

    The AKS network plugin. Required if imported=false (string)

    networkPodCidr string

    The AKS network pod cidr (string)

    networkPolicy string

    The AKS network policy (string)

    networkServiceCidr string

    The AKS network service cidr (string)

    nodePools ClusterAksConfigV2NodePool[]

    The GKE cluster node pools. Required for create new cluster (List)

    privateCluster boolean

    Is AKS cluster private? (bool)

    subnet string

    The AKS subnet (string)

    tags {[key: string]: any}

    The GKE node config tags (List)

    virtualNetwork string

    The name of the virtual network to use. If it's not specified Rancher will create a new VPC (string)

    virtualNetworkResourceGroup string

    The AKS virtual network resource group (string)

    cloud_credential_id str

    The EKS cloud_credential id (string)

    resource_group str

    The AKS resource group (string)

    resource_location str

    The AKS resource location (string)

    auth_base_url str

    The AKS auth base url (string)

    authorized_ip_ranges Sequence[str]

    The AKS authorized ip ranges (list)

    base_url str

    The AKS base url (string)

    dns_prefix str

    The AKS dns prefix. Required if imported=false (string)

    http_application_routing bool

    Enable AKS http application routing? (bool)

    imported bool

    Is GKE cluster imported? Default: false (bool)

    kubernetes_version str

    The Kubernetes version that will be used for your master and OKE worker nodes (string)

    linux_admin_username str

    The AKS linux admin username (string)

    linux_ssh_public_key str

    The AKS linux ssh public key (string)

    load_balancer_sku str

    The AKS load balancer sku (string)

    log_analytics_workspace_group str

    The AKS log analytics workspace group (string)

    log_analytics_workspace_name str

    The AKS log analytics workspace name (string)

    monitoring bool

    Is AKS cluster monitoring enabled? (bool)

    name str

    The name of the Cluster (string)

    network_dns_service_ip str

    The AKS network dns service ip (string)

    network_docker_bridge_cidr str

    The AKS network docker bridge cidr (string)

    network_plugin str

    The AKS network plugin. Required if imported=false (string)

    network_pod_cidr str

    The AKS network pod cidr (string)

    network_policy str

    The AKS network policy (string)

    network_service_cidr str

    The AKS network service cidr (string)

    node_pools Sequence[ClusterAksConfigV2NodePool]

    The GKE cluster node pools. Required for create new cluster (List)

    private_cluster bool

    Is AKS cluster private? (bool)

    subnet str

    The AKS subnet (string)

    tags Mapping[str, Any]

    The GKE node config tags (List)

    virtual_network str

    The name of the virtual network to use. If it's not specified Rancher will create a new VPC (string)

    virtual_network_resource_group str

    The AKS virtual network resource group (string)

    cloudCredentialId String

    The EKS cloud_credential id (string)

    resourceGroup String

    The AKS resource group (string)

    resourceLocation String

    The AKS resource location (string)

    authBaseUrl String

    The AKS auth base url (string)

    authorizedIpRanges List<String>

    The AKS authorized ip ranges (list)

    baseUrl String

    The AKS base url (string)

    dnsPrefix String

    The AKS dns prefix. Required if imported=false (string)

    httpApplicationRouting Boolean

    Enable AKS http application routing? (bool)

    imported Boolean

    Is GKE cluster imported? Default: false (bool)

    kubernetesVersion String

    The Kubernetes version that will be used for your master and OKE worker nodes (string)

    linuxAdminUsername String

    The AKS linux admin username (string)

    linuxSshPublicKey String

    The AKS linux ssh public key (string)

    loadBalancerSku String

    The AKS load balancer sku (string)

    logAnalyticsWorkspaceGroup String

    The AKS log analytics workspace group (string)

    logAnalyticsWorkspaceName String

    The AKS log analytics workspace name (string)

    monitoring Boolean

    Is AKS cluster monitoring enabled? (bool)

    name String

    The name of the Cluster (string)

    networkDnsServiceIp String

    The AKS network dns service ip (string)

    networkDockerBridgeCidr String

    The AKS network docker bridge cidr (string)

    networkPlugin String

    The AKS network plugin. Required if imported=false (string)

    networkPodCidr String

    The AKS network pod cidr (string)

    networkPolicy String

    The AKS network policy (string)

    networkServiceCidr String

    The AKS network service cidr (string)

    nodePools List<Property Map>

    The GKE cluster node pools. Required for create new cluster (List)

    privateCluster Boolean

    Is AKS cluster private? (bool)

    subnet String

    The AKS subnet (string)

    tags Map<Any>

    The GKE node config tags (List)

    virtualNetwork String

    The name of the virtual network to use. If it's not specified Rancher will create a new VPC (string)

    virtualNetworkResourceGroup String

    The AKS virtual network resource group (string)

    ClusterAksConfigV2NodePool, ClusterAksConfigV2NodePoolArgs

    Name string

    The name of the Cluster (string)

    AvailabilityZones List<string>

    The AKS node pool availability zones (list)

    Count int

    The AKS node pool count. Default: 1 (int)

    EnableAutoScaling bool

    Is AKS node pool auto scaling enabled? Default: false (bool)

    Labels Dictionary<string, object>

    Labels for the Cluster (map)

    MaxCount int

    The AKS node pool max count. Required if enable_auto_scaling=true (int)

    MaxPods int

    The AKS node pool max pods. Default: 110 (int)

    MaxSurge string

    The AKS node pool max surge (string), example value: 25%

    MinCount int

    The AKS node pool min count. Required if enable_auto_scaling=true (int)

    Mode string

    The AKS node group mode. Default: System (string)

    OrchestratorVersion string

    The AKS node pool orchestrator version (string)

    OsDiskSizeGb int

    The AKS node pool os disk size gb. Default: 128 (int)

    OsDiskType string

    The AKS node pool os disk type. Default: Managed (string)

    OsType string

    The AKS node pool os type. Default: Linux (string)

    Taints List<string>

    The GKE node config taints (List)

    VmSize string

    The AKS node pool orchestrator version (string)

    Name string

    The name of the Cluster (string)

    AvailabilityZones []string

    The AKS node pool availability zones (list)

    Count int

    The AKS node pool count. Default: 1 (int)

    EnableAutoScaling bool

    Is AKS node pool auto scaling enabled? Default: false (bool)

    Labels map[string]interface{}

    Labels for the Cluster (map)

    MaxCount int

    The AKS node pool max count. Required if enable_auto_scaling=true (int)

    MaxPods int

    The AKS node pool max pods. Default: 110 (int)

    MaxSurge string

    The AKS node pool max surge (string), example value: 25%

    MinCount int

    The AKS node pool min count