1. Packages
  2. Rancher2
  3. API Docs
  4. Cluster
Rancher 2 v6.2.1 published on Monday, Aug 12, 2024 by Pulumi

rancher2.Cluster

Explore with Pulumi AI

rancher2 logo
Rancher 2 v6.2.1 published on Monday, Aug 12, 2024 by Pulumi

    Provides a Rancher v2 Cluster resource. This can be used to create Clusters for Rancher v2 environments and retrieve their information.

    Example Usage

    Note optional/computed arguments If any optional/computed argument of this resource is defined by the user, removing it from tf file will NOT reset its value. To reset it, let its definition at tf file as empty/false object. Ex: enable_cluster_monitoring = false, cloud_provider {}, name = ""

    Creating Rancher v2 imported cluster

    import * as pulumi from "@pulumi/pulumi";
    import * as rancher2 from "@pulumi/rancher2";
    
    // Create a new rancher2 imported Cluster
    const foo_imported = new rancher2.Cluster("foo-imported", {
        name: "foo-imported",
        description: "Foo rancher2 imported cluster",
    });
    
    import pulumi
    import pulumi_rancher2 as rancher2
    
    # Create a new rancher2 imported Cluster
    foo_imported = rancher2.Cluster("foo-imported",
        name="foo-imported",
        description="Foo rancher2 imported cluster")
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-rancher2/sdk/v6/go/rancher2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		// Create a new rancher2 imported Cluster
    		_, err := rancher2.NewCluster(ctx, "foo-imported", &rancher2.ClusterArgs{
    			Name:        pulumi.String("foo-imported"),
    			Description: pulumi.String("Foo rancher2 imported cluster"),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Rancher2 = Pulumi.Rancher2;
    
    return await Deployment.RunAsync(() => 
    {
        // Create a new rancher2 imported Cluster
        var foo_imported = new Rancher2.Cluster("foo-imported", new()
        {
            Name = "foo-imported",
            Description = "Foo rancher2 imported cluster",
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.rancher2.Cluster;
    import com.pulumi.rancher2.ClusterArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            // Create a new rancher2 imported Cluster
            var foo_imported = new Cluster("foo-imported", ClusterArgs.builder()
                .name("foo-imported")
                .description("Foo rancher2 imported cluster")
                .build());
    
        }
    }
    
    resources:
      # Create a new rancher2 imported Cluster
      foo-imported:
        type: rancher2:Cluster
        properties:
          name: foo-imported
          description: Foo rancher2 imported cluster
    

    Creating Rancher v2 RKE cluster

    Creating Rancher v2 RKE cluster enabling and customizing monitoring

    Note Cluster monitoring version 0.2.0 and above, can’t be enabled until cluster is fully deployed as kubeVersion requirement has been introduced to helm chart

    import * as pulumi from "@pulumi/pulumi";
    import * as rancher2 from "@pulumi/rancher2";
    
    // Create a new rancher2 RKE Cluster
    const foo_custom = new rancher2.Cluster("foo-custom", {
        name: "foo-custom",
        description: "Foo rancher2 custom cluster",
        rkeConfig: {
            network: {
                plugin: "canal",
            },
        },
        enableClusterMonitoring: true,
        clusterMonitoringInput: {
            answers: {
                "exporter-kubelets.https": true,
                "exporter-node.enabled": true,
                "exporter-node.ports.metrics.port": 9796,
                "exporter-node.resources.limits.cpu": "200m",
                "exporter-node.resources.limits.memory": "200Mi",
                "grafana.persistence.enabled": false,
                "grafana.persistence.size": "10Gi",
                "grafana.persistence.storageClass": "default",
                "operator.resources.limits.memory": "500Mi",
                "prometheus.persistence.enabled": "false",
                "prometheus.persistence.size": "50Gi",
                "prometheus.persistence.storageClass": "default",
                "prometheus.persistent.useReleaseName": "true",
                "prometheus.resources.core.limits.cpu": "1000m",
                "prometheus.resources.core.limits.memory": "1500Mi",
                "prometheus.resources.core.requests.cpu": "750m",
                "prometheus.resources.core.requests.memory": "750Mi",
                "prometheus.retention": "12h",
            },
            version: "0.1.0",
        },
    });
    
    import pulumi
    import pulumi_rancher2 as rancher2
    
    # Create a new rancher2 RKE Cluster
    foo_custom = rancher2.Cluster("foo-custom",
        name="foo-custom",
        description="Foo rancher2 custom cluster",
        rke_config={
            "network": {
                "plugin": "canal",
            },
        },
        enable_cluster_monitoring=True,
        cluster_monitoring_input={
            "answers": {
                "exporter_kubelets_https": True,
                "exporter_node_enabled": True,
                "exporter_node_ports_metrics_port": 9796,
                "exporter_node_resources_limits_cpu": "200m",
                "exporter_node_resources_limits_memory": "200Mi",
                "grafana_persistence_enabled": False,
                "grafana_persistence_size": "10Gi",
                "grafana_persistence_storage_class": "default",
                "operator_resources_limits_memory": "500Mi",
                "prometheus_persistence_enabled": "false",
                "prometheus_persistence_size": "50Gi",
                "prometheus_persistence_storage_class": "default",
                "prometheus_persistent_use_release_name": "true",
                "prometheus_resources_core_limits_cpu": "1000m",
                "prometheus_resources_core_limits_memory": "1500Mi",
                "prometheus_resources_core_requests_cpu": "750m",
                "prometheus_resources_core_requests_memory": "750Mi",
                "prometheus_retention": "12h",
            },
            "version": "0.1.0",
        })
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-rancher2/sdk/v6/go/rancher2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		// Create a new rancher2 RKE Cluster
    		_, err := rancher2.NewCluster(ctx, "foo-custom", &rancher2.ClusterArgs{
    			Name:        pulumi.String("foo-custom"),
    			Description: pulumi.String("Foo rancher2 custom cluster"),
    			RkeConfig: &rancher2.ClusterRkeConfigArgs{
    				Network: &rancher2.ClusterRkeConfigNetworkArgs{
    					Plugin: pulumi.String("canal"),
    				},
    			},
    			EnableClusterMonitoring: pulumi.Bool(true),
    			ClusterMonitoringInput: &rancher2.ClusterClusterMonitoringInputArgs{
    				Answers: pulumi.Map{
    					"exporter-kubelets.https":                   pulumi.Any(true),
    					"exporter-node.enabled":                     pulumi.Any(true),
    					"exporter-node.ports.metrics.port":          pulumi.Any(9796),
    					"exporter-node.resources.limits.cpu":        pulumi.Any("200m"),
    					"exporter-node.resources.limits.memory":     pulumi.Any("200Mi"),
    					"grafana.persistence.enabled":               pulumi.Any(false),
    					"grafana.persistence.size":                  pulumi.Any("10Gi"),
    					"grafana.persistence.storageClass":          pulumi.Any("default"),
    					"operator.resources.limits.memory":          pulumi.Any("500Mi"),
    					"prometheus.persistence.enabled":            pulumi.Any("false"),
    					"prometheus.persistence.size":               pulumi.Any("50Gi"),
    					"prometheus.persistence.storageClass":       pulumi.Any("default"),
    					"prometheus.persistent.useReleaseName":      pulumi.Any("true"),
    					"prometheus.resources.core.limits.cpu":      pulumi.Any("1000m"),
    					"prometheus.resources.core.limits.memory":   pulumi.Any("1500Mi"),
    					"prometheus.resources.core.requests.cpu":    pulumi.Any("750m"),
    					"prometheus.resources.core.requests.memory": pulumi.Any("750Mi"),
    					"prometheus.retention":                      pulumi.Any("12h"),
    				},
    				Version: pulumi.String("0.1.0"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Rancher2 = Pulumi.Rancher2;
    
    return await Deployment.RunAsync(() => 
    {
        // Create a new rancher2 RKE Cluster
        var foo_custom = new Rancher2.Cluster("foo-custom", new()
        {
            Name = "foo-custom",
            Description = "Foo rancher2 custom cluster",
            RkeConfig = new Rancher2.Inputs.ClusterRkeConfigArgs
            {
                Network = new Rancher2.Inputs.ClusterRkeConfigNetworkArgs
                {
                    Plugin = "canal",
                },
            },
            EnableClusterMonitoring = true,
            ClusterMonitoringInput = new Rancher2.Inputs.ClusterClusterMonitoringInputArgs
            {
                Answers = 
                {
                    { "exporter-kubelets.https", true },
                    { "exporter-node.enabled", true },
                    { "exporter-node.ports.metrics.port", 9796 },
                    { "exporter-node.resources.limits.cpu", "200m" },
                    { "exporter-node.resources.limits.memory", "200Mi" },
                    { "grafana.persistence.enabled", false },
                    { "grafana.persistence.size", "10Gi" },
                    { "grafana.persistence.storageClass", "default" },
                    { "operator.resources.limits.memory", "500Mi" },
                    { "prometheus.persistence.enabled", "false" },
                    { "prometheus.persistence.size", "50Gi" },
                    { "prometheus.persistence.storageClass", "default" },
                    { "prometheus.persistent.useReleaseName", "true" },
                    { "prometheus.resources.core.limits.cpu", "1000m" },
                    { "prometheus.resources.core.limits.memory", "1500Mi" },
                    { "prometheus.resources.core.requests.cpu", "750m" },
                    { "prometheus.resources.core.requests.memory", "750Mi" },
                    { "prometheus.retention", "12h" },
                },
                Version = "0.1.0",
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.rancher2.Cluster;
    import com.pulumi.rancher2.ClusterArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigNetworkArgs;
    import com.pulumi.rancher2.inputs.ClusterClusterMonitoringInputArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            // Create a new rancher2 RKE Cluster
            var foo_custom = new Cluster("foo-custom", ClusterArgs.builder()
                .name("foo-custom")
                .description("Foo rancher2 custom cluster")
                .rkeConfig(ClusterRkeConfigArgs.builder()
                    .network(ClusterRkeConfigNetworkArgs.builder()
                        .plugin("canal")
                        .build())
                    .build())
                .enableClusterMonitoring(true)
                .clusterMonitoringInput(ClusterClusterMonitoringInputArgs.builder()
                    .answers(Map.ofEntries(
                        Map.entry("exporter-kubelets.https", true),
                        Map.entry("exporter-node.enabled", true),
                        Map.entry("exporter-node.ports.metrics.port", 9796),
                        Map.entry("exporter-node.resources.limits.cpu", "200m"),
                        Map.entry("exporter-node.resources.limits.memory", "200Mi"),
                        Map.entry("grafana.persistence.enabled", false),
                        Map.entry("grafana.persistence.size", "10Gi"),
                        Map.entry("grafana.persistence.storageClass", "default"),
                        Map.entry("operator.resources.limits.memory", "500Mi"),
                        Map.entry("prometheus.persistence.enabled", "false"),
                        Map.entry("prometheus.persistence.size", "50Gi"),
                        Map.entry("prometheus.persistence.storageClass", "default"),
                        Map.entry("prometheus.persistent.useReleaseName", "true"),
                        Map.entry("prometheus.resources.core.limits.cpu", "1000m"),
                        Map.entry("prometheus.resources.core.limits.memory", "1500Mi"),
                        Map.entry("prometheus.resources.core.requests.cpu", "750m"),
                        Map.entry("prometheus.resources.core.requests.memory", "750Mi"),
                        Map.entry("prometheus.retention", "12h")
                    ))
                    .version("0.1.0")
                    .build())
                .build());
    
        }
    }
    
    resources:
      # Create a new rancher2 RKE Cluster
      foo-custom:
        type: rancher2:Cluster
        properties:
          name: foo-custom
          description: Foo rancher2 custom cluster
          rkeConfig:
            network:
              plugin: canal
          enableClusterMonitoring: true
          clusterMonitoringInput:
            answers:
              exporter-kubelets.https: true
              exporter-node.enabled: true
              exporter-node.ports.metrics.port: 9796
              exporter-node.resources.limits.cpu: 200m
              exporter-node.resources.limits.memory: 200Mi
              grafana.persistence.enabled: false
              grafana.persistence.size: 10Gi
              grafana.persistence.storageClass: default
              operator.resources.limits.memory: 500Mi
              prometheus.persistence.enabled: 'false'
              prometheus.persistence.size: 50Gi
              prometheus.persistence.storageClass: default
              prometheus.persistent.useReleaseName: 'true'
              prometheus.resources.core.limits.cpu: 1000m
              prometheus.resources.core.limits.memory: 1500Mi
              prometheus.resources.core.requests.cpu: 750m
              prometheus.resources.core.requests.memory: 750Mi
              prometheus.retention: 12h
            version: 0.1.0
    

    Creating Rancher v2 RKE cluster enabling/customizing monitoring and istio

    import * as pulumi from "@pulumi/pulumi";
    import * as rancher2 from "@pulumi/rancher2";
    
    // Create a new rancher2 RKE Cluster
    const foo_custom = new rancher2.Cluster("foo-custom", {
        name: "foo-custom",
        description: "Foo rancher2 custom cluster",
        rkeConfig: {
            network: {
                plugin: "canal",
            },
        },
        enableClusterMonitoring: true,
        clusterMonitoringInput: {
            answers: {
                "exporter-kubelets.https": true,
                "exporter-node.enabled": true,
                "exporter-node.ports.metrics.port": 9796,
                "exporter-node.resources.limits.cpu": "200m",
                "exporter-node.resources.limits.memory": "200Mi",
                "grafana.persistence.enabled": false,
                "grafana.persistence.size": "10Gi",
                "grafana.persistence.storageClass": "default",
                "operator.resources.limits.memory": "500Mi",
                "prometheus.persistence.enabled": "false",
                "prometheus.persistence.size": "50Gi",
                "prometheus.persistence.storageClass": "default",
                "prometheus.persistent.useReleaseName": "true",
                "prometheus.resources.core.limits.cpu": "1000m",
                "prometheus.resources.core.limits.memory": "1500Mi",
                "prometheus.resources.core.requests.cpu": "750m",
                "prometheus.resources.core.requests.memory": "750Mi",
                "prometheus.retention": "12h",
            },
            version: "0.1.0",
        },
    });
    // Create a new rancher2 Cluster Sync for foo-custom cluster
    const foo_customClusterSync = new rancher2.ClusterSync("foo-custom", {
        clusterId: foo_custom.id,
        waitMonitoring: foo_custom.enableClusterMonitoring,
    });
    // Create a new rancher2 Namespace
    const foo_istio = new rancher2.Namespace("foo-istio", {
        name: "istio-system",
        projectId: foo_customClusterSync.systemProjectId,
        description: "istio namespace",
    });
    // Create a new rancher2 App deploying istio (should wait until monitoring is up and running)
    const istio = new rancher2.App("istio", {
        catalogName: "system-library",
        name: "cluster-istio",
        description: "Terraform app acceptance test",
        projectId: foo_istio.projectId,
        templateName: "rancher-istio",
        templateVersion: "0.1.1",
        targetNamespace: foo_istio.id,
        answers: {
            "certmanager.enabled": false,
            enableCRDs: true,
            "galley.enabled": true,
            "gateways.enabled": false,
            "gateways.istio-ingressgateway.resources.limits.cpu": "2000m",
            "gateways.istio-ingressgateway.resources.limits.memory": "1024Mi",
            "gateways.istio-ingressgateway.resources.requests.cpu": "100m",
            "gateways.istio-ingressgateway.resources.requests.memory": "128Mi",
            "gateways.istio-ingressgateway.type": "NodePort",
            "global.monitoring.type": "cluster-monitoring",
            "global.rancher.clusterId": foo_customClusterSync.clusterId,
            "istio_cni.enabled": "false",
            "istiocoredns.enabled": "false",
            "kiali.enabled": "true",
            "mixer.enabled": "true",
            "mixer.policy.enabled": "true",
            "mixer.policy.resources.limits.cpu": "4800m",
            "mixer.policy.resources.limits.memory": "4096Mi",
            "mixer.policy.resources.requests.cpu": "1000m",
            "mixer.policy.resources.requests.memory": "1024Mi",
            "mixer.telemetry.resources.limits.cpu": "4800m",
            "mixer.telemetry.resources.limits.memory": "4096Mi",
            "mixer.telemetry.resources.requests.cpu": "1000m",
            "mixer.telemetry.resources.requests.memory": "1024Mi",
            "mtls.enabled": false,
            "nodeagent.enabled": false,
            "pilot.enabled": true,
            "pilot.resources.limits.cpu": "1000m",
            "pilot.resources.limits.memory": "4096Mi",
            "pilot.resources.requests.cpu": "500m",
            "pilot.resources.requests.memory": "2048Mi",
            "pilot.traceSampling": "1",
            "security.enabled": true,
            "sidecarInjectorWebhook.enabled": true,
            "tracing.enabled": true,
            "tracing.jaeger.resources.limits.cpu": "500m",
            "tracing.jaeger.resources.limits.memory": "1024Mi",
            "tracing.jaeger.resources.requests.cpu": "100m",
            "tracing.jaeger.resources.requests.memory": "100Mi",
        },
    });
    
    import pulumi
    import pulumi_rancher2 as rancher2
    
    # Create a new rancher2 RKE Cluster
    foo_custom = rancher2.Cluster("foo-custom",
        name="foo-custom",
        description="Foo rancher2 custom cluster",
        rke_config={
            "network": {
                "plugin": "canal",
            },
        },
        enable_cluster_monitoring=True,
        cluster_monitoring_input={
            "answers": {
                "exporter_kubelets_https": True,
                "exporter_node_enabled": True,
                "exporter_node_ports_metrics_port": 9796,
                "exporter_node_resources_limits_cpu": "200m",
                "exporter_node_resources_limits_memory": "200Mi",
                "grafana_persistence_enabled": False,
                "grafana_persistence_size": "10Gi",
                "grafana_persistence_storage_class": "default",
                "operator_resources_limits_memory": "500Mi",
                "prometheus_persistence_enabled": "false",
                "prometheus_persistence_size": "50Gi",
                "prometheus_persistence_storage_class": "default",
                "prometheus_persistent_use_release_name": "true",
                "prometheus_resources_core_limits_cpu": "1000m",
                "prometheus_resources_core_limits_memory": "1500Mi",
                "prometheus_resources_core_requests_cpu": "750m",
                "prometheus_resources_core_requests_memory": "750Mi",
                "prometheus_retention": "12h",
            },
            "version": "0.1.0",
        })
    # Create a new rancher2 Cluster Sync for foo-custom cluster
    foo_custom_cluster_sync = rancher2.ClusterSync("foo-custom",
        cluster_id=foo_custom.id,
        wait_monitoring=foo_custom.enable_cluster_monitoring)
    # Create a new rancher2 Namespace
    foo_istio = rancher2.Namespace("foo-istio",
        name="istio-system",
        project_id=foo_custom_cluster_sync.system_project_id,
        description="istio namespace")
    # Create a new rancher2 App deploying istio (should wait until monitoring is up and running)
    istio = rancher2.App("istio",
        catalog_name="system-library",
        name="cluster-istio",
        description="Terraform app acceptance test",
        project_id=foo_istio.project_id,
        template_name="rancher-istio",
        template_version="0.1.1",
        target_namespace=foo_istio.id,
        answers={
            "certmanager.enabled": False,
            "enableCRDs": True,
            "galley.enabled": True,
            "gateways.enabled": False,
            "gateways.istio-ingressgateway.resources.limits.cpu": "2000m",
            "gateways.istio-ingressgateway.resources.limits.memory": "1024Mi",
            "gateways.istio-ingressgateway.resources.requests.cpu": "100m",
            "gateways.istio-ingressgateway.resources.requests.memory": "128Mi",
            "gateways.istio-ingressgateway.type": "NodePort",
            "global.monitoring.type": "cluster-monitoring",
            "global.rancher.clusterId": foo_custom_cluster_sync.cluster_id,
            "istio_cni.enabled": "false",
            "istiocoredns.enabled": "false",
            "kiali.enabled": "true",
            "mixer.enabled": "true",
            "mixer.policy.enabled": "true",
            "mixer.policy.resources.limits.cpu": "4800m",
            "mixer.policy.resources.limits.memory": "4096Mi",
            "mixer.policy.resources.requests.cpu": "1000m",
            "mixer.policy.resources.requests.memory": "1024Mi",
            "mixer.telemetry.resources.limits.cpu": "4800m",
            "mixer.telemetry.resources.limits.memory": "4096Mi",
            "mixer.telemetry.resources.requests.cpu": "1000m",
            "mixer.telemetry.resources.requests.memory": "1024Mi",
            "mtls.enabled": False,
            "nodeagent.enabled": False,
            "pilot.enabled": True,
            "pilot.resources.limits.cpu": "1000m",
            "pilot.resources.limits.memory": "4096Mi",
            "pilot.resources.requests.cpu": "500m",
            "pilot.resources.requests.memory": "2048Mi",
            "pilot.traceSampling": "1",
            "security.enabled": True,
            "sidecarInjectorWebhook.enabled": True,
            "tracing.enabled": True,
            "tracing.jaeger.resources.limits.cpu": "500m",
            "tracing.jaeger.resources.limits.memory": "1024Mi",
            "tracing.jaeger.resources.requests.cpu": "100m",
            "tracing.jaeger.resources.requests.memory": "100Mi",
        })
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-rancher2/sdk/v6/go/rancher2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		// Create a new rancher2 RKE Cluster
    		_, err := rancher2.NewCluster(ctx, "foo-custom", &rancher2.ClusterArgs{
    			Name:        pulumi.String("foo-custom"),
    			Description: pulumi.String("Foo rancher2 custom cluster"),
    			RkeConfig: &rancher2.ClusterRkeConfigArgs{
    				Network: &rancher2.ClusterRkeConfigNetworkArgs{
    					Plugin: pulumi.String("canal"),
    				},
    			},
    			EnableClusterMonitoring: pulumi.Bool(true),
    			ClusterMonitoringInput: &rancher2.ClusterClusterMonitoringInputArgs{
    				Answers: pulumi.Map{
    					"exporter-kubelets.https":                   pulumi.Any(true),
    					"exporter-node.enabled":                     pulumi.Any(true),
    					"exporter-node.ports.metrics.port":          pulumi.Any(9796),
    					"exporter-node.resources.limits.cpu":        pulumi.Any("200m"),
    					"exporter-node.resources.limits.memory":     pulumi.Any("200Mi"),
    					"grafana.persistence.enabled":               pulumi.Any(false),
    					"grafana.persistence.size":                  pulumi.Any("10Gi"),
    					"grafana.persistence.storageClass":          pulumi.Any("default"),
    					"operator.resources.limits.memory":          pulumi.Any("500Mi"),
    					"prometheus.persistence.enabled":            pulumi.Any("false"),
    					"prometheus.persistence.size":               pulumi.Any("50Gi"),
    					"prometheus.persistence.storageClass":       pulumi.Any("default"),
    					"prometheus.persistent.useReleaseName":      pulumi.Any("true"),
    					"prometheus.resources.core.limits.cpu":      pulumi.Any("1000m"),
    					"prometheus.resources.core.limits.memory":   pulumi.Any("1500Mi"),
    					"prometheus.resources.core.requests.cpu":    pulumi.Any("750m"),
    					"prometheus.resources.core.requests.memory": pulumi.Any("750Mi"),
    					"prometheus.retention":                      pulumi.Any("12h"),
    				},
    				Version: pulumi.String("0.1.0"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		// Create a new rancher2 Cluster Sync for foo-custom cluster
    		_, err = rancher2.NewClusterSync(ctx, "foo-custom", &rancher2.ClusterSyncArgs{
    			ClusterId:      foo_custom.ID(),
    			WaitMonitoring: foo_custom.EnableClusterMonitoring,
    		})
    		if err != nil {
    			return err
    		}
    		// Create a new rancher2 Namespace
    		_, err = rancher2.NewNamespace(ctx, "foo-istio", &rancher2.NamespaceArgs{
    			Name:        pulumi.String("istio-system"),
    			ProjectId:   foo_customClusterSync.SystemProjectId,
    			Description: pulumi.String("istio namespace"),
    		})
    		if err != nil {
    			return err
    		}
    		// Create a new rancher2 App deploying istio (should wait until monitoring is up and running)
    		_, err = rancher2.NewApp(ctx, "istio", &rancher2.AppArgs{
    			CatalogName:     pulumi.String("system-library"),
    			Name:            pulumi.String("cluster-istio"),
    			Description:     pulumi.String("Terraform app acceptance test"),
    			ProjectId:       foo_istio.ProjectId,
    			TemplateName:    pulumi.String("rancher-istio"),
    			TemplateVersion: pulumi.String("0.1.1"),
    			TargetNamespace: foo_istio.ID(),
    			Answers: pulumi.Map{
    				"certmanager.enabled": pulumi.Any(false),
    				"enableCRDs":          pulumi.Any(true),
    				"galley.enabled":      pulumi.Any(true),
    				"gateways.enabled":    pulumi.Any(false),
    				"gateways.istio-ingressgateway.resources.limits.cpu":      pulumi.Any("2000m"),
    				"gateways.istio-ingressgateway.resources.limits.memory":   pulumi.Any("1024Mi"),
    				"gateways.istio-ingressgateway.resources.requests.cpu":    pulumi.Any("100m"),
    				"gateways.istio-ingressgateway.resources.requests.memory": pulumi.Any("128Mi"),
    				"gateways.istio-ingressgateway.type":                      pulumi.Any("NodePort"),
    				"global.monitoring.type":                                  pulumi.Any("cluster-monitoring"),
    				"global.rancher.clusterId":                                foo_customClusterSync.ClusterId,
    				"istio_cni.enabled":                                       pulumi.Any("false"),
    				"istiocoredns.enabled":                                    pulumi.Any("false"),
    				"kiali.enabled":                                           pulumi.Any("true"),
    				"mixer.enabled":                                           pulumi.Any("true"),
    				"mixer.policy.enabled":                                    pulumi.Any("true"),
    				"mixer.policy.resources.limits.cpu":                       pulumi.Any("4800m"),
    				"mixer.policy.resources.limits.memory":                    pulumi.Any("4096Mi"),
    				"mixer.policy.resources.requests.cpu":                     pulumi.Any("1000m"),
    				"mixer.policy.resources.requests.memory":                  pulumi.Any("1024Mi"),
    				"mixer.telemetry.resources.limits.cpu":                    pulumi.Any("4800m"),
    				"mixer.telemetry.resources.limits.memory":                 pulumi.Any("4096Mi"),
    				"mixer.telemetry.resources.requests.cpu":                  pulumi.Any("1000m"),
    				"mixer.telemetry.resources.requests.memory":               pulumi.Any("1024Mi"),
    				"mtls.enabled":                                            pulumi.Any(false),
    				"nodeagent.enabled":                                       pulumi.Any(false),
    				"pilot.enabled":                                           pulumi.Any(true),
    				"pilot.resources.limits.cpu":                              pulumi.Any("1000m"),
    				"pilot.resources.limits.memory":                           pulumi.Any("4096Mi"),
    				"pilot.resources.requests.cpu":                            pulumi.Any("500m"),
    				"pilot.resources.requests.memory":                         pulumi.Any("2048Mi"),
    				"pilot.traceSampling":                                     pulumi.Any("1"),
    				"security.enabled":                                        pulumi.Any(true),
    				"sidecarInjectorWebhook.enabled":                          pulumi.Any(true),
    				"tracing.enabled":                                         pulumi.Any(true),
    				"tracing.jaeger.resources.limits.cpu":                     pulumi.Any("500m"),
    				"tracing.jaeger.resources.limits.memory":                  pulumi.Any("1024Mi"),
    				"tracing.jaeger.resources.requests.cpu":                   pulumi.Any("100m"),
    				"tracing.jaeger.resources.requests.memory":                pulumi.Any("100Mi"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Rancher2 = Pulumi.Rancher2;
    
    return await Deployment.RunAsync(() => 
    {
        // Create a new rancher2 RKE Cluster
        var foo_custom = new Rancher2.Cluster("foo-custom", new()
        {
            Name = "foo-custom",
            Description = "Foo rancher2 custom cluster",
            RkeConfig = new Rancher2.Inputs.ClusterRkeConfigArgs
            {
                Network = new Rancher2.Inputs.ClusterRkeConfigNetworkArgs
                {
                    Plugin = "canal",
                },
            },
            EnableClusterMonitoring = true,
            ClusterMonitoringInput = new Rancher2.Inputs.ClusterClusterMonitoringInputArgs
            {
                Answers = 
                {
                    { "exporter-kubelets.https", true },
                    { "exporter-node.enabled", true },
                    { "exporter-node.ports.metrics.port", 9796 },
                    { "exporter-node.resources.limits.cpu", "200m" },
                    { "exporter-node.resources.limits.memory", "200Mi" },
                    { "grafana.persistence.enabled", false },
                    { "grafana.persistence.size", "10Gi" },
                    { "grafana.persistence.storageClass", "default" },
                    { "operator.resources.limits.memory", "500Mi" },
                    { "prometheus.persistence.enabled", "false" },
                    { "prometheus.persistence.size", "50Gi" },
                    { "prometheus.persistence.storageClass", "default" },
                    { "prometheus.persistent.useReleaseName", "true" },
                    { "prometheus.resources.core.limits.cpu", "1000m" },
                    { "prometheus.resources.core.limits.memory", "1500Mi" },
                    { "prometheus.resources.core.requests.cpu", "750m" },
                    { "prometheus.resources.core.requests.memory", "750Mi" },
                    { "prometheus.retention", "12h" },
                },
                Version = "0.1.0",
            },
        });
    
        // Create a new rancher2 Cluster Sync for foo-custom cluster
        var foo_customClusterSync = new Rancher2.ClusterSync("foo-custom", new()
        {
            ClusterId = foo_custom.Id,
            WaitMonitoring = foo_custom.EnableClusterMonitoring,
        });
    
        // Create a new rancher2 Namespace
        var foo_istio = new Rancher2.Namespace("foo-istio", new()
        {
            Name = "istio-system",
            ProjectId = foo_customClusterSync.SystemProjectId,
            Description = "istio namespace",
        });
    
        // Create a new rancher2 App deploying istio (should wait until monitoring is up and running)
        var istio = new Rancher2.App("istio", new()
        {
            CatalogName = "system-library",
            Name = "cluster-istio",
            Description = "Terraform app acceptance test",
            ProjectId = foo_istio.ProjectId,
            TemplateName = "rancher-istio",
            TemplateVersion = "0.1.1",
            TargetNamespace = foo_istio.Id,
            Answers = 
            {
                { "certmanager.enabled", false },
                { "enableCRDs", true },
                { "galley.enabled", true },
                { "gateways.enabled", false },
                { "gateways.istio-ingressgateway.resources.limits.cpu", "2000m" },
                { "gateways.istio-ingressgateway.resources.limits.memory", "1024Mi" },
                { "gateways.istio-ingressgateway.resources.requests.cpu", "100m" },
                { "gateways.istio-ingressgateway.resources.requests.memory", "128Mi" },
                { "gateways.istio-ingressgateway.type", "NodePort" },
                { "global.monitoring.type", "cluster-monitoring" },
                { "global.rancher.clusterId", foo_customClusterSync.ClusterId },
                { "istio_cni.enabled", "false" },
                { "istiocoredns.enabled", "false" },
                { "kiali.enabled", "true" },
                { "mixer.enabled", "true" },
                { "mixer.policy.enabled", "true" },
                { "mixer.policy.resources.limits.cpu", "4800m" },
                { "mixer.policy.resources.limits.memory", "4096Mi" },
                { "mixer.policy.resources.requests.cpu", "1000m" },
                { "mixer.policy.resources.requests.memory", "1024Mi" },
                { "mixer.telemetry.resources.limits.cpu", "4800m" },
                { "mixer.telemetry.resources.limits.memory", "4096Mi" },
                { "mixer.telemetry.resources.requests.cpu", "1000m" },
                { "mixer.telemetry.resources.requests.memory", "1024Mi" },
                { "mtls.enabled", false },
                { "nodeagent.enabled", false },
                { "pilot.enabled", true },
                { "pilot.resources.limits.cpu", "1000m" },
                { "pilot.resources.limits.memory", "4096Mi" },
                { "pilot.resources.requests.cpu", "500m" },
                { "pilot.resources.requests.memory", "2048Mi" },
                { "pilot.traceSampling", "1" },
                { "security.enabled", true },
                { "sidecarInjectorWebhook.enabled", true },
                { "tracing.enabled", true },
                { "tracing.jaeger.resources.limits.cpu", "500m" },
                { "tracing.jaeger.resources.limits.memory", "1024Mi" },
                { "tracing.jaeger.resources.requests.cpu", "100m" },
                { "tracing.jaeger.resources.requests.memory", "100Mi" },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.rancher2.Cluster;
    import com.pulumi.rancher2.ClusterArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigNetworkArgs;
    import com.pulumi.rancher2.inputs.ClusterClusterMonitoringInputArgs;
    import com.pulumi.rancher2.ClusterSync;
    import com.pulumi.rancher2.ClusterSyncArgs;
    import com.pulumi.rancher2.Namespace;
    import com.pulumi.rancher2.NamespaceArgs;
    import com.pulumi.rancher2.App;
    import com.pulumi.rancher2.AppArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            // Create a new rancher2 RKE Cluster
            var foo_custom = new Cluster("foo-custom", ClusterArgs.builder()
                .name("foo-custom")
                .description("Foo rancher2 custom cluster")
                .rkeConfig(ClusterRkeConfigArgs.builder()
                    .network(ClusterRkeConfigNetworkArgs.builder()
                        .plugin("canal")
                        .build())
                    .build())
                .enableClusterMonitoring(true)
                .clusterMonitoringInput(ClusterClusterMonitoringInputArgs.builder()
                    .answers(Map.ofEntries(
                        Map.entry("exporter-kubelets.https", true),
                        Map.entry("exporter-node.enabled", true),
                        Map.entry("exporter-node.ports.metrics.port", 9796),
                        Map.entry("exporter-node.resources.limits.cpu", "200m"),
                        Map.entry("exporter-node.resources.limits.memory", "200Mi"),
                        Map.entry("grafana.persistence.enabled", false),
                        Map.entry("grafana.persistence.size", "10Gi"),
                        Map.entry("grafana.persistence.storageClass", "default"),
                        Map.entry("operator.resources.limits.memory", "500Mi"),
                        Map.entry("prometheus.persistence.enabled", "false"),
                        Map.entry("prometheus.persistence.size", "50Gi"),
                        Map.entry("prometheus.persistence.storageClass", "default"),
                        Map.entry("prometheus.persistent.useReleaseName", "true"),
                        Map.entry("prometheus.resources.core.limits.cpu", "1000m"),
                        Map.entry("prometheus.resources.core.limits.memory", "1500Mi"),
                        Map.entry("prometheus.resources.core.requests.cpu", "750m"),
                        Map.entry("prometheus.resources.core.requests.memory", "750Mi"),
                        Map.entry("prometheus.retention", "12h")
                    ))
                    .version("0.1.0")
                    .build())
                .build());
    
            // Create a new rancher2 Cluster Sync for foo-custom cluster
            var foo_customClusterSync = new ClusterSync("foo-customClusterSync", ClusterSyncArgs.builder()
                .clusterId(foo_custom.id())
                .waitMonitoring(foo_custom.enableClusterMonitoring())
                .build());
    
            // Create a new rancher2 Namespace
            var foo_istio = new Namespace("foo-istio", NamespaceArgs.builder()
                .name("istio-system")
                .projectId(foo_customClusterSync.systemProjectId())
                .description("istio namespace")
                .build());
    
            // Create a new rancher2 App deploying istio (should wait until monitoring is up and running)
            var istio = new App("istio", AppArgs.builder()
                .catalogName("system-library")
                .name("cluster-istio")
                .description("Terraform app acceptance test")
                .projectId(foo_istio.projectId())
                .templateName("rancher-istio")
                .templateVersion("0.1.1")
                .targetNamespace(foo_istio.id())
                .answers(Map.ofEntries(
                    Map.entry("certmanager.enabled", false),
                    Map.entry("enableCRDs", true),
                    Map.entry("galley.enabled", true),
                    Map.entry("gateways.enabled", false),
                    Map.entry("gateways.istio-ingressgateway.resources.limits.cpu", "2000m"),
                    Map.entry("gateways.istio-ingressgateway.resources.limits.memory", "1024Mi"),
                    Map.entry("gateways.istio-ingressgateway.resources.requests.cpu", "100m"),
                    Map.entry("gateways.istio-ingressgateway.resources.requests.memory", "128Mi"),
                    Map.entry("gateways.istio-ingressgateway.type", "NodePort"),
                    Map.entry("global.monitoring.type", "cluster-monitoring"),
                    Map.entry("global.rancher.clusterId", foo_customClusterSync.clusterId()),
                    Map.entry("istio_cni.enabled", "false"),
                    Map.entry("istiocoredns.enabled", "false"),
                    Map.entry("kiali.enabled", "true"),
                    Map.entry("mixer.enabled", "true"),
                    Map.entry("mixer.policy.enabled", "true"),
                    Map.entry("mixer.policy.resources.limits.cpu", "4800m"),
                    Map.entry("mixer.policy.resources.limits.memory", "4096Mi"),
                    Map.entry("mixer.policy.resources.requests.cpu", "1000m"),
                    Map.entry("mixer.policy.resources.requests.memory", "1024Mi"),
                    Map.entry("mixer.telemetry.resources.limits.cpu", "4800m"),
                    Map.entry("mixer.telemetry.resources.limits.memory", "4096Mi"),
                    Map.entry("mixer.telemetry.resources.requests.cpu", "1000m"),
                    Map.entry("mixer.telemetry.resources.requests.memory", "1024Mi"),
                    Map.entry("mtls.enabled", false),
                    Map.entry("nodeagent.enabled", false),
                    Map.entry("pilot.enabled", true),
                    Map.entry("pilot.resources.limits.cpu", "1000m"),
                    Map.entry("pilot.resources.limits.memory", "4096Mi"),
                    Map.entry("pilot.resources.requests.cpu", "500m"),
                    Map.entry("pilot.resources.requests.memory", "2048Mi"),
                    Map.entry("pilot.traceSampling", "1"),
                    Map.entry("security.enabled", true),
                    Map.entry("sidecarInjectorWebhook.enabled", true),
                    Map.entry("tracing.enabled", true),
                    Map.entry("tracing.jaeger.resources.limits.cpu", "500m"),
                    Map.entry("tracing.jaeger.resources.limits.memory", "1024Mi"),
                    Map.entry("tracing.jaeger.resources.requests.cpu", "100m"),
                    Map.entry("tracing.jaeger.resources.requests.memory", "100Mi")
                ))
                .build());
    
        }
    }
    
    resources:
      # Create a new rancher2 RKE Cluster
      foo-custom:
        type: rancher2:Cluster
        properties:
          name: foo-custom
          description: Foo rancher2 custom cluster
          rkeConfig:
            network:
              plugin: canal
          enableClusterMonitoring: true
          clusterMonitoringInput:
            answers:
              exporter-kubelets.https: true
              exporter-node.enabled: true
              exporter-node.ports.metrics.port: 9796
              exporter-node.resources.limits.cpu: 200m
              exporter-node.resources.limits.memory: 200Mi
              grafana.persistence.enabled: false
              grafana.persistence.size: 10Gi
              grafana.persistence.storageClass: default
              operator.resources.limits.memory: 500Mi
              prometheus.persistence.enabled: 'false'
              prometheus.persistence.size: 50Gi
              prometheus.persistence.storageClass: default
              prometheus.persistent.useReleaseName: 'true'
              prometheus.resources.core.limits.cpu: 1000m
              prometheus.resources.core.limits.memory: 1500Mi
              prometheus.resources.core.requests.cpu: 750m
              prometheus.resources.core.requests.memory: 750Mi
              prometheus.retention: 12h
            version: 0.1.0
      # Create a new rancher2 Cluster Sync for foo-custom cluster
      foo-customClusterSync:
        type: rancher2:ClusterSync
        name: foo-custom
        properties:
          clusterId: ${["foo-custom"].id}
          waitMonitoring: ${["foo-custom"].enableClusterMonitoring}
      # Create a new rancher2 Namespace
      foo-istio:
        type: rancher2:Namespace
        properties:
          name: istio-system
          projectId: ${["foo-customClusterSync"].systemProjectId}
          description: istio namespace
      # Create a new rancher2 App deploying istio (should wait until monitoring is up and running)
      istio:
        type: rancher2:App
        properties:
          catalogName: system-library
          name: cluster-istio
          description: Terraform app acceptance test
          projectId: ${["foo-istio"].projectId}
          templateName: rancher-istio
          templateVersion: 0.1.1
          targetNamespace: ${["foo-istio"].id}
          answers:
            certmanager.enabled: false
            enableCRDs: true
            galley.enabled: true
            gateways.enabled: false
            gateways.istio-ingressgateway.resources.limits.cpu: 2000m
            gateways.istio-ingressgateway.resources.limits.memory: 1024Mi
            gateways.istio-ingressgateway.resources.requests.cpu: 100m
            gateways.istio-ingressgateway.resources.requests.memory: 128Mi
            gateways.istio-ingressgateway.type: NodePort
            global.monitoring.type: cluster-monitoring
            global.rancher.clusterId: ${["foo-customClusterSync"].clusterId}
            istio_cni.enabled: 'false'
            istiocoredns.enabled: 'false'
            kiali.enabled: 'true'
            mixer.enabled: 'true'
            mixer.policy.enabled: 'true'
            mixer.policy.resources.limits.cpu: 4800m
            mixer.policy.resources.limits.memory: 4096Mi
            mixer.policy.resources.requests.cpu: 1000m
            mixer.policy.resources.requests.memory: 1024Mi
            mixer.telemetry.resources.limits.cpu: 4800m
            mixer.telemetry.resources.limits.memory: 4096Mi
            mixer.telemetry.resources.requests.cpu: 1000m
            mixer.telemetry.resources.requests.memory: 1024Mi
            mtls.enabled: false
            nodeagent.enabled: false
            pilot.enabled: true
            pilot.resources.limits.cpu: 1000m
            pilot.resources.limits.memory: 4096Mi
            pilot.resources.requests.cpu: 500m
            pilot.resources.requests.memory: 2048Mi
            pilot.traceSampling: '1'
            security.enabled: true
            sidecarInjectorWebhook.enabled: true
            tracing.enabled: true
            tracing.jaeger.resources.limits.cpu: 500m
            tracing.jaeger.resources.limits.memory: 1024Mi
            tracing.jaeger.resources.requests.cpu: 100m
            tracing.jaeger.resources.requests.memory: 100Mi
    

    Creating Rancher v2 RKE cluster assigning a node pool (overlapped planes)

    import * as pulumi from "@pulumi/pulumi";
    import * as rancher2 from "@pulumi/rancher2";
    
    // Create a new rancher2 RKE Cluster
    const foo_custom = new rancher2.Cluster("foo-custom", {
        name: "foo-custom",
        description: "Foo rancher2 custom cluster",
        rkeConfig: {
            network: {
                plugin: "canal",
            },
        },
    });
    // Create a new rancher2 Node Template
    const foo = new rancher2.NodeTemplate("foo", {
        name: "foo",
        description: "foo test",
        amazonec2Config: {
            accessKey: "<AWS_ACCESS_KEY>",
            secretKey: "<AWS_SECRET_KEY>",
            ami: "<AMI_ID>",
            region: "<REGION>",
            securityGroups: ["<AWS_SECURITY_GROUP>"],
            subnetId: "<SUBNET_ID>",
            vpcId: "<VPC_ID>",
            zone: "<ZONE>",
        },
    });
    // Create a new rancher2 Node Pool
    const fooNodePool = new rancher2.NodePool("foo", {
        clusterId: foo_custom.id,
        name: "foo",
        hostnamePrefix: "foo-cluster-0",
        nodeTemplateId: foo.id,
        quantity: 3,
        controlPlane: true,
        etcd: true,
        worker: true,
    });
    
    import pulumi
    import pulumi_rancher2 as rancher2
    
    # Create a new rancher2 RKE Cluster
    foo_custom = rancher2.Cluster("foo-custom",
        name="foo-custom",
        description="Foo rancher2 custom cluster",
        rke_config={
            "network": {
                "plugin": "canal",
            },
        })
    # Create a new rancher2 Node Template
    foo = rancher2.NodeTemplate("foo",
        name="foo",
        description="foo test",
        amazonec2_config={
            "access_key": "<AWS_ACCESS_KEY>",
            "secret_key": "<AWS_SECRET_KEY>",
            "ami": "<AMI_ID>",
            "region": "<REGION>",
            "security_groups": ["<AWS_SECURITY_GROUP>"],
            "subnet_id": "<SUBNET_ID>",
            "vpc_id": "<VPC_ID>",
            "zone": "<ZONE>",
        })
    # Create a new rancher2 Node Pool
    foo_node_pool = rancher2.NodePool("foo",
        cluster_id=foo_custom.id,
        name="foo",
        hostname_prefix="foo-cluster-0",
        node_template_id=foo.id,
        quantity=3,
        control_plane=True,
        etcd=True,
        worker=True)
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-rancher2/sdk/v6/go/rancher2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		// Create a new rancher2 RKE Cluster
    		_, err := rancher2.NewCluster(ctx, "foo-custom", &rancher2.ClusterArgs{
    			Name:        pulumi.String("foo-custom"),
    			Description: pulumi.String("Foo rancher2 custom cluster"),
    			RkeConfig: &rancher2.ClusterRkeConfigArgs{
    				Network: &rancher2.ClusterRkeConfigNetworkArgs{
    					Plugin: pulumi.String("canal"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		// Create a new rancher2 Node Template
    		foo, err := rancher2.NewNodeTemplate(ctx, "foo", &rancher2.NodeTemplateArgs{
    			Name:        pulumi.String("foo"),
    			Description: pulumi.String("foo test"),
    			Amazonec2Config: &rancher2.NodeTemplateAmazonec2ConfigArgs{
    				AccessKey: pulumi.String("<AWS_ACCESS_KEY>"),
    				SecretKey: pulumi.String("<AWS_SECRET_KEY>"),
    				Ami:       pulumi.String("<AMI_ID>"),
    				Region:    pulumi.String("<REGION>"),
    				SecurityGroups: pulumi.StringArray{
    					pulumi.String("<AWS_SECURITY_GROUP>"),
    				},
    				SubnetId: pulumi.String("<SUBNET_ID>"),
    				VpcId:    pulumi.String("<VPC_ID>"),
    				Zone:     pulumi.String("<ZONE>"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		// Create a new rancher2 Node Pool
    		_, err = rancher2.NewNodePool(ctx, "foo", &rancher2.NodePoolArgs{
    			ClusterId:      foo_custom.ID(),
    			Name:           pulumi.String("foo"),
    			HostnamePrefix: pulumi.String("foo-cluster-0"),
    			NodeTemplateId: foo.ID(),
    			Quantity:       pulumi.Int(3),
    			ControlPlane:   pulumi.Bool(true),
    			Etcd:           pulumi.Bool(true),
    			Worker:         pulumi.Bool(true),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Rancher2 = Pulumi.Rancher2;
    
    return await Deployment.RunAsync(() => 
    {
        // Create a new rancher2 RKE Cluster
        var foo_custom = new Rancher2.Cluster("foo-custom", new()
        {
            Name = "foo-custom",
            Description = "Foo rancher2 custom cluster",
            RkeConfig = new Rancher2.Inputs.ClusterRkeConfigArgs
            {
                Network = new Rancher2.Inputs.ClusterRkeConfigNetworkArgs
                {
                    Plugin = "canal",
                },
            },
        });
    
        // Create a new rancher2 Node Template
        var foo = new Rancher2.NodeTemplate("foo", new()
        {
            Name = "foo",
            Description = "foo test",
            Amazonec2Config = new Rancher2.Inputs.NodeTemplateAmazonec2ConfigArgs
            {
                AccessKey = "<AWS_ACCESS_KEY>",
                SecretKey = "<AWS_SECRET_KEY>",
                Ami = "<AMI_ID>",
                Region = "<REGION>",
                SecurityGroups = new[]
                {
                    "<AWS_SECURITY_GROUP>",
                },
                SubnetId = "<SUBNET_ID>",
                VpcId = "<VPC_ID>",
                Zone = "<ZONE>",
            },
        });
    
        // Create a new rancher2 Node Pool
        var fooNodePool = new Rancher2.NodePool("foo", new()
        {
            ClusterId = foo_custom.Id,
            Name = "foo",
            HostnamePrefix = "foo-cluster-0",
            NodeTemplateId = foo.Id,
            Quantity = 3,
            ControlPlane = true,
            Etcd = true,
            Worker = true,
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.rancher2.Cluster;
    import com.pulumi.rancher2.ClusterArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigNetworkArgs;
    import com.pulumi.rancher2.NodeTemplate;
    import com.pulumi.rancher2.NodeTemplateArgs;
    import com.pulumi.rancher2.inputs.NodeTemplateAmazonec2ConfigArgs;
    import com.pulumi.rancher2.NodePool;
    import com.pulumi.rancher2.NodePoolArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            // Create a new rancher2 RKE Cluster
            var foo_custom = new Cluster("foo-custom", ClusterArgs.builder()
                .name("foo-custom")
                .description("Foo rancher2 custom cluster")
                .rkeConfig(ClusterRkeConfigArgs.builder()
                    .network(ClusterRkeConfigNetworkArgs.builder()
                        .plugin("canal")
                        .build())
                    .build())
                .build());
    
            // Create a new rancher2 Node Template
            var foo = new NodeTemplate("foo", NodeTemplateArgs.builder()
                .name("foo")
                .description("foo test")
                .amazonec2Config(NodeTemplateAmazonec2ConfigArgs.builder()
                    .accessKey("<AWS_ACCESS_KEY>")
                    .secretKey("<AWS_SECRET_KEY>")
                    .ami("<AMI_ID>")
                    .region("<REGION>")
                    .securityGroups("<AWS_SECURITY_GROUP>")
                    .subnetId("<SUBNET_ID>")
                    .vpcId("<VPC_ID>")
                    .zone("<ZONE>")
                    .build())
                .build());
    
            // Create a new rancher2 Node Pool
            var fooNodePool = new NodePool("fooNodePool", NodePoolArgs.builder()
                .clusterId(foo_custom.id())
                .name("foo")
                .hostnamePrefix("foo-cluster-0")
                .nodeTemplateId(foo.id())
                .quantity(3)
                .controlPlane(true)
                .etcd(true)
                .worker(true)
                .build());
    
        }
    }
    
    resources:
      # Create a new rancher2 RKE Cluster
      foo-custom:
        type: rancher2:Cluster
        properties:
          name: foo-custom
          description: Foo rancher2 custom cluster
          rkeConfig:
            network:
              plugin: canal
      # Create a new rancher2 Node Template
      foo:
        type: rancher2:NodeTemplate
        properties:
          name: foo
          description: foo test
          amazonec2Config:
            accessKey: <AWS_ACCESS_KEY>
            secretKey: <AWS_SECRET_KEY>
            ami: <AMI_ID>
            region: <REGION>
            securityGroups:
              - <AWS_SECURITY_GROUP>
            subnetId: <SUBNET_ID>
            vpcId: <VPC_ID>
            zone: <ZONE>
      # Create a new rancher2 Node Pool
      fooNodePool:
        type: rancher2:NodePool
        name: foo
        properties:
          clusterId: ${["foo-custom"].id}
          name: foo
          hostnamePrefix: foo-cluster-0
          nodeTemplateId: ${foo.id}
          quantity: 3
          controlPlane: true
          etcd: true
          worker: true
    

    Creating Rancher v2 RKE cluster from template. For Rancher v2.3.x and above.

    import * as pulumi from "@pulumi/pulumi";
    import * as rancher2 from "@pulumi/rancher2";
    
    // Create a new rancher2 cluster template
    const foo = new rancher2.ClusterTemplate("foo", {
        name: "foo",
        members: [{
            accessType: "owner",
            userPrincipalId: "local://user-XXXXX",
        }],
        templateRevisions: [{
            name: "V1",
            clusterConfig: {
                rkeConfig: {
                    network: {
                        plugin: "canal",
                    },
                    services: {
                        etcd: {
                            creation: "6h",
                            retention: "24h",
                        },
                    },
                },
            },
            "default": true,
        }],
        description: "Test cluster template v2",
    });
    // Create a new rancher2 RKE Cluster from template
    const fooCluster = new rancher2.Cluster("foo", {
        name: "foo",
        clusterTemplateId: foo.id,
        clusterTemplateRevisionId: foo.templateRevisions.apply(templateRevisions => templateRevisions[0].id),
    });
    
    import pulumi
    import pulumi_rancher2 as rancher2
    
    # Create a new rancher2 cluster template
    foo = rancher2.ClusterTemplate("foo",
        name="foo",
        members=[{
            "access_type": "owner",
            "user_principal_id": "local://user-XXXXX",
        }],
        template_revisions=[{
            "name": "V1",
            "cluster_config": {
                "rke_config": {
                    "network": {
                        "plugin": "canal",
                    },
                    "services": {
                        "etcd": {
                            "creation": "6h",
                            "retention": "24h",
                        },
                    },
                },
            },
            "default": True,
        }],
        description="Test cluster template v2")
    # Create a new rancher2 RKE Cluster from template
    foo_cluster = rancher2.Cluster("foo",
        name="foo",
        cluster_template_id=foo.id,
        cluster_template_revision_id=foo.template_revisions[0].id)
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-rancher2/sdk/v6/go/rancher2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		// Create a new rancher2 cluster template
    		foo, err := rancher2.NewClusterTemplate(ctx, "foo", &rancher2.ClusterTemplateArgs{
    			Name: pulumi.String("foo"),
    			Members: rancher2.ClusterTemplateMemberArray{
    				&rancher2.ClusterTemplateMemberArgs{
    					AccessType:      pulumi.String("owner"),
    					UserPrincipalId: pulumi.String("local://user-XXXXX"),
    				},
    			},
    			TemplateRevisions: rancher2.ClusterTemplateTemplateRevisionArray{
    				&rancher2.ClusterTemplateTemplateRevisionArgs{
    					Name: pulumi.String("V1"),
    					ClusterConfig: &rancher2.ClusterTemplateTemplateRevisionClusterConfigArgs{
    						RkeConfig: &rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigArgs{
    							Network: &rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigNetworkArgs{
    								Plugin: pulumi.String("canal"),
    							},
    							Services: &rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesArgs{
    								Etcd: &rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesEtcdArgs{
    									Creation:  pulumi.String("6h"),
    									Retention: pulumi.String("24h"),
    								},
    							},
    						},
    					},
    					Default: pulumi.Bool(true),
    				},
    			},
    			Description: pulumi.String("Test cluster template v2"),
    		})
    		if err != nil {
    			return err
    		}
    		// Create a new rancher2 RKE Cluster from template
    		_, err = rancher2.NewCluster(ctx, "foo", &rancher2.ClusterArgs{
    			Name:              pulumi.String("foo"),
    			ClusterTemplateId: foo.ID(),
    			ClusterTemplateRevisionId: pulumi.String(foo.TemplateRevisions.ApplyT(func(templateRevisions []rancher2.ClusterTemplateTemplateRevision) (*string, error) {
    				return &templateRevisions[0].Id, nil
    			}).(pulumi.StringPtrOutput)),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Rancher2 = Pulumi.Rancher2;
    
    return await Deployment.RunAsync(() => 
    {
        // Create a new rancher2 cluster template
        var foo = new Rancher2.ClusterTemplate("foo", new()
        {
            Name = "foo",
            Members = new[]
            {
                new Rancher2.Inputs.ClusterTemplateMemberArgs
                {
                    AccessType = "owner",
                    UserPrincipalId = "local://user-XXXXX",
                },
            },
            TemplateRevisions = new[]
            {
                new Rancher2.Inputs.ClusterTemplateTemplateRevisionArgs
                {
                    Name = "V1",
                    ClusterConfig = new Rancher2.Inputs.ClusterTemplateTemplateRevisionClusterConfigArgs
                    {
                        RkeConfig = new Rancher2.Inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigArgs
                        {
                            Network = new Rancher2.Inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigNetworkArgs
                            {
                                Plugin = "canal",
                            },
                            Services = new Rancher2.Inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesArgs
                            {
                                Etcd = new Rancher2.Inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesEtcdArgs
                                {
                                    Creation = "6h",
                                    Retention = "24h",
                                },
                            },
                        },
                    },
                    Default = true,
                },
            },
            Description = "Test cluster template v2",
        });
    
        // Create a new rancher2 RKE Cluster from template
        var fooCluster = new Rancher2.Cluster("foo", new()
        {
            Name = "foo",
            ClusterTemplateId = foo.Id,
            ClusterTemplateRevisionId = foo.TemplateRevisions.Apply(templateRevisions => templateRevisions[0].Id),
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.rancher2.ClusterTemplate;
    import com.pulumi.rancher2.ClusterTemplateArgs;
    import com.pulumi.rancher2.inputs.ClusterTemplateMemberArgs;
    import com.pulumi.rancher2.inputs.ClusterTemplateTemplateRevisionArgs;
    import com.pulumi.rancher2.inputs.ClusterTemplateTemplateRevisionClusterConfigArgs;
    import com.pulumi.rancher2.inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigArgs;
    import com.pulumi.rancher2.inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigNetworkArgs;
    import com.pulumi.rancher2.inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesArgs;
    import com.pulumi.rancher2.inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesEtcdArgs;
    import com.pulumi.rancher2.Cluster;
    import com.pulumi.rancher2.ClusterArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            // Create a new rancher2 cluster template
            var foo = new ClusterTemplate("foo", ClusterTemplateArgs.builder()
                .name("foo")
                .members(ClusterTemplateMemberArgs.builder()
                    .accessType("owner")
                    .userPrincipalId("local://user-XXXXX")
                    .build())
                .templateRevisions(ClusterTemplateTemplateRevisionArgs.builder()
                    .name("V1")
                    .clusterConfig(ClusterTemplateTemplateRevisionClusterConfigArgs.builder()
                        .rkeConfig(ClusterTemplateTemplateRevisionClusterConfigRkeConfigArgs.builder()
                            .network(ClusterTemplateTemplateRevisionClusterConfigRkeConfigNetworkArgs.builder()
                                .plugin("canal")
                                .build())
                            .services(ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesArgs.builder()
                                .etcd(ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesEtcdArgs.builder()
                                    .creation("6h")
                                    .retention("24h")
                                    .build())
                                .build())
                            .build())
                        .build())
                    .default_(true)
                    .build())
                .description("Test cluster template v2")
                .build());
    
            // Create a new rancher2 RKE Cluster from template
            var fooCluster = new Cluster("fooCluster", ClusterArgs.builder()
                .name("foo")
                .clusterTemplateId(foo.id())
                .clusterTemplateRevisionId(foo.templateRevisions().applyValue(templateRevisions -> templateRevisions[0].id()))
                .build());
    
        }
    }
    
    resources:
      # Create a new rancher2 cluster template
      foo:
        type: rancher2:ClusterTemplate
        properties:
          name: foo
          members:
            - accessType: owner
              userPrincipalId: local://user-XXXXX
          templateRevisions:
            - name: V1
              clusterConfig:
                rkeConfig:
                  network:
                    plugin: canal
                  services:
                    etcd:
                      creation: 6h
                      retention: 24h
              default: true
          description: Test cluster template v2
      # Create a new rancher2 RKE Cluster from template
      fooCluster:
        type: rancher2:Cluster
        name: foo
        properties:
          name: foo
          clusterTemplateId: ${foo.id}
          clusterTemplateRevisionId: ${foo.templateRevisions[0].id}
    

    Creating Rancher v2 RKE cluster with upgrade strategy. For Rancher v2.4.x and above.

    import * as pulumi from "@pulumi/pulumi";
    import * as rancher2 from "@pulumi/rancher2";
    
    const foo = new rancher2.Cluster("foo", {
        name: "foo",
        description: "Terraform custom cluster",
        rkeConfig: {
            network: {
                plugin: "canal",
            },
            services: {
                etcd: {
                    creation: "6h",
                    retention: "24h",
                },
                kubeApi: {
                    auditLog: {
                        enabled: true,
                        configuration: {
                            maxAge: 5,
                            maxBackup: 5,
                            maxSize: 100,
                            path: "-",
                            format: "json",
                            policy: `apiVersion: audit.k8s.io/v1
    kind: Policy
    metadata:
      creationTimestamp: null
    omitStages:
    - RequestReceived
    rules:
    - level: RequestResponse
      resources:
      - resources:
        - pods
    `,
                        },
                    },
                },
            },
            upgradeStrategy: {
                drain: true,
                maxUnavailableWorker: "20%",
            },
        },
    });
    
    import pulumi
    import pulumi_rancher2 as rancher2
    
    foo = rancher2.Cluster("foo",
        name="foo",
        description="Terraform custom cluster",
        rke_config={
            "network": {
                "plugin": "canal",
            },
            "services": {
                "etcd": {
                    "creation": "6h",
                    "retention": "24h",
                },
                "kube_api": {
                    "audit_log": {
                        "enabled": True,
                        "configuration": {
                            "max_age": 5,
                            "max_backup": 5,
                            "max_size": 100,
                            "path": "-",
                            "format": "json",
                            "policy": """apiVersion: audit.k8s.io/v1
    kind: Policy
    metadata:
      creationTimestamp: null
    omitStages:
    - RequestReceived
    rules:
    - level: RequestResponse
      resources:
      - resources:
        - pods
    """,
                        },
                    },
                },
            },
            "upgrade_strategy": {
                "drain": True,
                "max_unavailable_worker": "20%",
            },
        })
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-rancher2/sdk/v6/go/rancher2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := rancher2.NewCluster(ctx, "foo", &rancher2.ClusterArgs{
    			Name:        pulumi.String("foo"),
    			Description: pulumi.String("Terraform custom cluster"),
    			RkeConfig: &rancher2.ClusterRkeConfigArgs{
    				Network: &rancher2.ClusterRkeConfigNetworkArgs{
    					Plugin: pulumi.String("canal"),
    				},
    				Services: &rancher2.ClusterRkeConfigServicesArgs{
    					Etcd: &rancher2.ClusterRkeConfigServicesEtcdArgs{
    						Creation:  pulumi.String("6h"),
    						Retention: pulumi.String("24h"),
    					},
    					KubeApi: &rancher2.ClusterRkeConfigServicesKubeApiArgs{
    						AuditLog: &rancher2.ClusterRkeConfigServicesKubeApiAuditLogArgs{
    							Enabled: pulumi.Bool(true),
    							Configuration: &rancher2.ClusterRkeConfigServicesKubeApiAuditLogConfigurationArgs{
    								MaxAge:    pulumi.Int(5),
    								MaxBackup: pulumi.Int(5),
    								MaxSize:   pulumi.Int(100),
    								Path:      pulumi.String("-"),
    								Format:    pulumi.String("json"),
    								Policy: pulumi.String(`apiVersion: audit.k8s.io/v1
    kind: Policy
    metadata:
      creationTimestamp: null
    omitStages:
    - RequestReceived
    rules:
    - level: RequestResponse
      resources:
      - resources:
        - pods
    `),
    							},
    						},
    					},
    				},
    				UpgradeStrategy: &rancher2.ClusterRkeConfigUpgradeStrategyArgs{
    					Drain:                pulumi.Bool(true),
    					MaxUnavailableWorker: pulumi.String("20%"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Rancher2 = Pulumi.Rancher2;
    
    return await Deployment.RunAsync(() => 
    {
        var foo = new Rancher2.Cluster("foo", new()
        {
            Name = "foo",
            Description = "Terraform custom cluster",
            RkeConfig = new Rancher2.Inputs.ClusterRkeConfigArgs
            {
                Network = new Rancher2.Inputs.ClusterRkeConfigNetworkArgs
                {
                    Plugin = "canal",
                },
                Services = new Rancher2.Inputs.ClusterRkeConfigServicesArgs
                {
                    Etcd = new Rancher2.Inputs.ClusterRkeConfigServicesEtcdArgs
                    {
                        Creation = "6h",
                        Retention = "24h",
                    },
                    KubeApi = new Rancher2.Inputs.ClusterRkeConfigServicesKubeApiArgs
                    {
                        AuditLog = new Rancher2.Inputs.ClusterRkeConfigServicesKubeApiAuditLogArgs
                        {
                            Enabled = true,
                            Configuration = new Rancher2.Inputs.ClusterRkeConfigServicesKubeApiAuditLogConfigurationArgs
                            {
                                MaxAge = 5,
                                MaxBackup = 5,
                                MaxSize = 100,
                                Path = "-",
                                Format = "json",
                                Policy = @"apiVersion: audit.k8s.io/v1
    kind: Policy
    metadata:
      creationTimestamp: null
    omitStages:
    - RequestReceived
    rules:
    - level: RequestResponse
      resources:
      - resources:
        - pods
    ",
                            },
                        },
                    },
                },
                UpgradeStrategy = new Rancher2.Inputs.ClusterRkeConfigUpgradeStrategyArgs
                {
                    Drain = true,
                    MaxUnavailableWorker = "20%",
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.rancher2.Cluster;
    import com.pulumi.rancher2.ClusterArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigNetworkArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigServicesArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigServicesEtcdArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigServicesKubeApiArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigServicesKubeApiAuditLogArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigServicesKubeApiAuditLogConfigurationArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigUpgradeStrategyArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var foo = new Cluster("foo", ClusterArgs.builder()
                .name("foo")
                .description("Terraform custom cluster")
                .rkeConfig(ClusterRkeConfigArgs.builder()
                    .network(ClusterRkeConfigNetworkArgs.builder()
                        .plugin("canal")
                        .build())
                    .services(ClusterRkeConfigServicesArgs.builder()
                        .etcd(ClusterRkeConfigServicesEtcdArgs.builder()
                            .creation("6h")
                            .retention("24h")
                            .build())
                        .kubeApi(ClusterRkeConfigServicesKubeApiArgs.builder()
                            .auditLog(ClusterRkeConfigServicesKubeApiAuditLogArgs.builder()
                                .enabled(true)
                                .configuration(ClusterRkeConfigServicesKubeApiAuditLogConfigurationArgs.builder()
                                    .maxAge(5)
                                    .maxBackup(5)
                                    .maxSize(100)
                                    .path("-")
                                    .format("json")
                                    .policy("""
    apiVersion: audit.k8s.io/v1
    kind: Policy
    metadata:
      creationTimestamp: null
    omitStages:
    - RequestReceived
    rules:
    - level: RequestResponse
      resources:
      - resources:
        - pods
                                    """)
                                    .build())
                                .build())
                            .build())
                        .build())
                    .upgradeStrategy(ClusterRkeConfigUpgradeStrategyArgs.builder()
                        .drain(true)
                        .maxUnavailableWorker("20%")
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      foo:
        type: rancher2:Cluster
        properties:
          name: foo
          description: Terraform custom cluster
          rkeConfig:
            network:
              plugin: canal
            services:
              etcd:
                creation: 6h
                retention: 24h
              kubeApi:
                auditLog:
                  enabled: true
                  configuration:
                    maxAge: 5
                    maxBackup: 5
                    maxSize: 100
                    path: '-'
                    format: json
                    policy: |
                      apiVersion: audit.k8s.io/v1
                      kind: Policy
                      metadata:
                        creationTimestamp: null
                      omitStages:
                      - RequestReceived
                      rules:
                      - level: RequestResponse
                        resources:
                        - resources:
                          - pods                  
            upgradeStrategy:
              drain: true
              maxUnavailableWorker: 20%
    

    Creating Rancher v2 RKE cluster with cluster agent customization. For Rancher v2.7.5 and above.

    import * as pulumi from "@pulumi/pulumi";
    import * as rancher2 from "@pulumi/rancher2";
    
    const foo = new rancher2.Cluster("foo", {
        name: "foo",
        description: "Terraform cluster with agent customization",
        rkeConfig: {
            network: {
                plugin: "canal",
            },
        },
        clusterAgentDeploymentCustomizations: [{
            appendTolerations: [{
                effect: "NoSchedule",
                key: "tolerate/control-plane",
                value: "true",
            }],
            overrideAffinity: `{
      "nodeAffinity": {
        "requiredDuringSchedulingIgnoredDuringExecution": {
          "nodeSelectorTerms": [{
            "matchExpressions": [{
              "key": "not.this/nodepool",
              "operator": "In",
              "values": [
                "true"
              ]
            }]
          }]
        }
      }
    }
    `,
            overrideResourceRequirements: [{
                cpuLimit: "800",
                cpuRequest: "500",
                memoryLimit: "800",
                memoryRequest: "500",
            }],
        }],
    });
    
    import pulumi
    import pulumi_rancher2 as rancher2
    
    foo = rancher2.Cluster("foo",
        name="foo",
        description="Terraform cluster with agent customization",
        rke_config={
            "network": {
                "plugin": "canal",
            },
        },
        cluster_agent_deployment_customizations=[{
            "append_tolerations": [{
                "effect": "NoSchedule",
                "key": "tolerate/control-plane",
                "value": "true",
            }],
            "override_affinity": """{
      "nodeAffinity": {
        "requiredDuringSchedulingIgnoredDuringExecution": {
          "nodeSelectorTerms": [{
            "matchExpressions": [{
              "key": "not.this/nodepool",
              "operator": "In",
              "values": [
                "true"
              ]
            }]
          }]
        }
      }
    }
    """,
            "override_resource_requirements": [{
                "cpu_limit": "800",
                "cpu_request": "500",
                "memory_limit": "800",
                "memory_request": "500",
            }],
        }])
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-rancher2/sdk/v6/go/rancher2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := rancher2.NewCluster(ctx, "foo", &rancher2.ClusterArgs{
    			Name:        pulumi.String("foo"),
    			Description: pulumi.String("Terraform cluster with agent customization"),
    			RkeConfig: &rancher2.ClusterRkeConfigArgs{
    				Network: &rancher2.ClusterRkeConfigNetworkArgs{
    					Plugin: pulumi.String("canal"),
    				},
    			},
    			ClusterAgentDeploymentCustomizations: rancher2.ClusterClusterAgentDeploymentCustomizationArray{
    				&rancher2.ClusterClusterAgentDeploymentCustomizationArgs{
    					AppendTolerations: rancher2.ClusterClusterAgentDeploymentCustomizationAppendTolerationArray{
    						&rancher2.ClusterClusterAgentDeploymentCustomizationAppendTolerationArgs{
    							Effect: pulumi.String("NoSchedule"),
    							Key:    pulumi.String("tolerate/control-plane"),
    							Value:  pulumi.String("true"),
    						},
    					},
    					OverrideAffinity: pulumi.String(`{
      "nodeAffinity": {
        "requiredDuringSchedulingIgnoredDuringExecution": {
          "nodeSelectorTerms": [{
            "matchExpressions": [{
              "key": "not.this/nodepool",
              "operator": "In",
              "values": [
                "true"
              ]
            }]
          }]
        }
      }
    }
    `),
    					OverrideResourceRequirements: rancher2.ClusterClusterAgentDeploymentCustomizationOverrideResourceRequirementArray{
    						&rancher2.ClusterClusterAgentDeploymentCustomizationOverrideResourceRequirementArgs{
    							CpuLimit:      pulumi.String("800"),
    							CpuRequest:    pulumi.String("500"),
    							MemoryLimit:   pulumi.String("800"),
    							MemoryRequest: pulumi.String("500"),
    						},
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Rancher2 = Pulumi.Rancher2;
    
    return await Deployment.RunAsync(() => 
    {
        var foo = new Rancher2.Cluster("foo", new()
        {
            Name = "foo",
            Description = "Terraform cluster with agent customization",
            RkeConfig = new Rancher2.Inputs.ClusterRkeConfigArgs
            {
                Network = new Rancher2.Inputs.ClusterRkeConfigNetworkArgs
                {
                    Plugin = "canal",
                },
            },
            ClusterAgentDeploymentCustomizations = new[]
            {
                new Rancher2.Inputs.ClusterClusterAgentDeploymentCustomizationArgs
                {
                    AppendTolerations = new[]
                    {
                        new Rancher2.Inputs.ClusterClusterAgentDeploymentCustomizationAppendTolerationArgs
                        {
                            Effect = "NoSchedule",
                            Key = "tolerate/control-plane",
                            Value = "true",
                        },
                    },
                    OverrideAffinity = @"{
      ""nodeAffinity"": {
        ""requiredDuringSchedulingIgnoredDuringExecution"": {
          ""nodeSelectorTerms"": [{
            ""matchExpressions"": [{
              ""key"": ""not.this/nodepool"",
              ""operator"": ""In"",
              ""values"": [
                ""true""
              ]
            }]
          }]
        }
      }
    }
    ",
                    OverrideResourceRequirements = new[]
                    {
                        new Rancher2.Inputs.ClusterClusterAgentDeploymentCustomizationOverrideResourceRequirementArgs
                        {
                            CpuLimit = "800",
                            CpuRequest = "500",
                            MemoryLimit = "800",
                            MemoryRequest = "500",
                        },
                    },
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.rancher2.Cluster;
    import com.pulumi.rancher2.ClusterArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigNetworkArgs;
    import com.pulumi.rancher2.inputs.ClusterClusterAgentDeploymentCustomizationArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var foo = new Cluster("foo", ClusterArgs.builder()
                .name("foo")
                .description("Terraform cluster with agent customization")
                .rkeConfig(ClusterRkeConfigArgs.builder()
                    .network(ClusterRkeConfigNetworkArgs.builder()
                        .plugin("canal")
                        .build())
                    .build())
                .clusterAgentDeploymentCustomizations(ClusterClusterAgentDeploymentCustomizationArgs.builder()
                    .appendTolerations(ClusterClusterAgentDeploymentCustomizationAppendTolerationArgs.builder()
                        .effect("NoSchedule")
                        .key("tolerate/control-plane")
                        .value("true")
                        .build())
                    .overrideAffinity("""
    {
      "nodeAffinity": {
        "requiredDuringSchedulingIgnoredDuringExecution": {
          "nodeSelectorTerms": [{
            "matchExpressions": [{
              "key": "not.this/nodepool",
              "operator": "In",
              "values": [
                "true"
              ]
            }]
          }]
        }
      }
    }
                    """)
                    .overrideResourceRequirements(ClusterClusterAgentDeploymentCustomizationOverrideResourceRequirementArgs.builder()
                        .cpuLimit("800")
                        .cpuRequest("500")
                        .memoryLimit("800")
                        .memoryRequest("500")
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      foo:
        type: rancher2:Cluster
        properties:
          name: foo
          description: Terraform cluster with agent customization
          rkeConfig:
            network:
              plugin: canal
          clusterAgentDeploymentCustomizations:
            - appendTolerations:
                - effect: NoSchedule
                  key: tolerate/control-plane
                  value: 'true'
              overrideAffinity: |
                {
                  "nodeAffinity": {
                    "requiredDuringSchedulingIgnoredDuringExecution": {
                      "nodeSelectorTerms": [{
                        "matchExpressions": [{
                          "key": "not.this/nodepool",
                          "operator": "In",
                          "values": [
                            "true"
                          ]
                        }]
                      }]
                    }
                  }
                }            
              overrideResourceRequirements:
                - cpuLimit: '800'
                  cpuRequest: '500'
                  memoryLimit: '800'
                  memoryRequest: '500'
    

    Creating Rancher v2 RKE cluster with Pod Security Admission Configuration Template (PSACT). For Rancher v2.7.2 and above.

    import * as pulumi from "@pulumi/pulumi";
    import * as rancher2 from "@pulumi/rancher2";
    
    // Custom PSACT (if you wish to use your own)
    const foo = new rancher2.PodSecurityAdmissionConfigurationTemplate("foo", {
        name: "custom-psact",
        description: "This is my custom Pod Security Admission Configuration Template",
        defaults: {
            audit: "restricted",
            auditVersion: "latest",
            enforce: "restricted",
            enforceVersion: "latest",
            warn: "restricted",
            warnVersion: "latest",
        },
        exemptions: {
            usernames: ["testuser"],
            runtimeClasses: ["testclass"],
            namespaces: [
                "ingress-nginx",
                "kube-system",
            ],
        },
    });
    const fooCluster = new rancher2.Cluster("foo", {
        name: "foo",
        description: "Terraform cluster with PSACT",
        defaultPodSecurityAdmissionConfigurationTemplateName: "<name>",
        rkeConfig: {
            network: {
                plugin: "canal",
            },
        },
    });
    
    import pulumi
    import pulumi_rancher2 as rancher2
    
    # Custom PSACT (if you wish to use your own)
    foo = rancher2.PodSecurityAdmissionConfigurationTemplate("foo",
        name="custom-psact",
        description="This is my custom Pod Security Admission Configuration Template",
        defaults={
            "audit": "restricted",
            "audit_version": "latest",
            "enforce": "restricted",
            "enforce_version": "latest",
            "warn": "restricted",
            "warn_version": "latest",
        },
        exemptions={
            "usernames": ["testuser"],
            "runtime_classes": ["testclass"],
            "namespaces": [
                "ingress-nginx",
                "kube-system",
            ],
        })
    foo_cluster = rancher2.Cluster("foo",
        name="foo",
        description="Terraform cluster with PSACT",
        default_pod_security_admission_configuration_template_name="<name>",
        rke_config={
            "network": {
                "plugin": "canal",
            },
        })
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-rancher2/sdk/v6/go/rancher2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		// Custom PSACT (if you wish to use your own)
    		_, err := rancher2.NewPodSecurityAdmissionConfigurationTemplate(ctx, "foo", &rancher2.PodSecurityAdmissionConfigurationTemplateArgs{
    			Name:        pulumi.String("custom-psact"),
    			Description: pulumi.String("This is my custom Pod Security Admission Configuration Template"),
    			Defaults: &rancher2.PodSecurityAdmissionConfigurationTemplateDefaultsArgs{
    				Audit:          pulumi.String("restricted"),
    				AuditVersion:   pulumi.String("latest"),
    				Enforce:        pulumi.String("restricted"),
    				EnforceVersion: pulumi.String("latest"),
    				Warn:           pulumi.String("restricted"),
    				WarnVersion:    pulumi.String("latest"),
    			},
    			Exemptions: &rancher2.PodSecurityAdmissionConfigurationTemplateExemptionsArgs{
    				Usernames: pulumi.StringArray{
    					pulumi.String("testuser"),
    				},
    				RuntimeClasses: pulumi.StringArray{
    					pulumi.String("testclass"),
    				},
    				Namespaces: pulumi.StringArray{
    					pulumi.String("ingress-nginx"),
    					pulumi.String("kube-system"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = rancher2.NewCluster(ctx, "foo", &rancher2.ClusterArgs{
    			Name:        pulumi.String("foo"),
    			Description: pulumi.String("Terraform cluster with PSACT"),
    			DefaultPodSecurityAdmissionConfigurationTemplateName: pulumi.String("<name>"),
    			RkeConfig: &rancher2.ClusterRkeConfigArgs{
    				Network: &rancher2.ClusterRkeConfigNetworkArgs{
    					Plugin: pulumi.String("canal"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Rancher2 = Pulumi.Rancher2;
    
    return await Deployment.RunAsync(() => 
    {
        // Custom PSACT (if you wish to use your own)
        var foo = new Rancher2.PodSecurityAdmissionConfigurationTemplate("foo", new()
        {
            Name = "custom-psact",
            Description = "This is my custom Pod Security Admission Configuration Template",
            Defaults = new Rancher2.Inputs.PodSecurityAdmissionConfigurationTemplateDefaultsArgs
            {
                Audit = "restricted",
                AuditVersion = "latest",
                Enforce = "restricted",
                EnforceVersion = "latest",
                Warn = "restricted",
                WarnVersion = "latest",
            },
            Exemptions = new Rancher2.Inputs.PodSecurityAdmissionConfigurationTemplateExemptionsArgs
            {
                Usernames = new[]
                {
                    "testuser",
                },
                RuntimeClasses = new[]
                {
                    "testclass",
                },
                Namespaces = new[]
                {
                    "ingress-nginx",
                    "kube-system",
                },
            },
        });
    
        var fooCluster = new Rancher2.Cluster("foo", new()
        {
            Name = "foo",
            Description = "Terraform cluster with PSACT",
            DefaultPodSecurityAdmissionConfigurationTemplateName = "<name>",
            RkeConfig = new Rancher2.Inputs.ClusterRkeConfigArgs
            {
                Network = new Rancher2.Inputs.ClusterRkeConfigNetworkArgs
                {
                    Plugin = "canal",
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.rancher2.PodSecurityAdmissionConfigurationTemplate;
    import com.pulumi.rancher2.PodSecurityAdmissionConfigurationTemplateArgs;
    import com.pulumi.rancher2.inputs.PodSecurityAdmissionConfigurationTemplateDefaultsArgs;
    import com.pulumi.rancher2.inputs.PodSecurityAdmissionConfigurationTemplateExemptionsArgs;
    import com.pulumi.rancher2.Cluster;
    import com.pulumi.rancher2.ClusterArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigArgs;
    import com.pulumi.rancher2.inputs.ClusterRkeConfigNetworkArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            // Custom PSACT (if you wish to use your own)
            var foo = new PodSecurityAdmissionConfigurationTemplate("foo", PodSecurityAdmissionConfigurationTemplateArgs.builder()
                .name("custom-psact")
                .description("This is my custom Pod Security Admission Configuration Template")
                .defaults(PodSecurityAdmissionConfigurationTemplateDefaultsArgs.builder()
                    .audit("restricted")
                    .auditVersion("latest")
                    .enforce("restricted")
                    .enforceVersion("latest")
                    .warn("restricted")
                    .warnVersion("latest")
                    .build())
                .exemptions(PodSecurityAdmissionConfigurationTemplateExemptionsArgs.builder()
                    .usernames("testuser")
                    .runtimeClasses("testclass")
                    .namespaces(                
                        "ingress-nginx",
                        "kube-system")
                    .build())
                .build());
    
            var fooCluster = new Cluster("fooCluster", ClusterArgs.builder()
                .name("foo")
                .description("Terraform cluster with PSACT")
                .defaultPodSecurityAdmissionConfigurationTemplateName("<name>")
                .rkeConfig(ClusterRkeConfigArgs.builder()
                    .network(ClusterRkeConfigNetworkArgs.builder()
                        .plugin("canal")
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      # Custom PSACT (if you wish to use your own)
      foo:
        type: rancher2:PodSecurityAdmissionConfigurationTemplate
        properties:
          name: custom-psact
          description: This is my custom Pod Security Admission Configuration Template
          defaults:
            audit: restricted
            auditVersion: latest
            enforce: restricted
            enforceVersion: latest
            warn: restricted
            warnVersion: latest
          exemptions:
            usernames:
              - testuser
            runtimeClasses:
              - testclass
            namespaces:
              - ingress-nginx
              - kube-system
      fooCluster:
        type: rancher2:Cluster
        name: foo
        properties:
          name: foo
          description: Terraform cluster with PSACT
          defaultPodSecurityAdmissionConfigurationTemplateName: <name>
          rkeConfig:
            network:
              plugin: canal
    

    Importing EKS cluster to Rancher v2, using eks_config_v2. For Rancher v2.5.x and above.

    import * as pulumi from "@pulumi/pulumi";
    import * as rancher2 from "@pulumi/rancher2";
    
    const foo = new rancher2.CloudCredential("foo", {
        name: "foo",
        description: "foo test",
        amazonec2CredentialConfig: {
            accessKey: "<aws-access-key>",
            secretKey: "<aws-secret-key>",
        },
    });
    const fooCluster = new rancher2.Cluster("foo", {
        name: "foo",
        description: "Terraform EKS cluster",
        eksConfigV2: {
            cloudCredentialId: foo.id,
            name: "<cluster-name>",
            region: "<eks-region>",
            imported: true,
        },
    });
    
    import pulumi
    import pulumi_rancher2 as rancher2
    
    foo = rancher2.CloudCredential("foo",
        name="foo",
        description="foo test",
        amazonec2_credential_config={
            "access_key": "<aws-access-key>",
            "secret_key": "<aws-secret-key>",
        })
    foo_cluster = rancher2.Cluster("foo",
        name="foo",
        description="Terraform EKS cluster",
        eks_config_v2={
            "cloud_credential_id": foo.id,
            "name": "<cluster-name>",
            "region": "<eks-region>",
            "imported": True,
        })
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-rancher2/sdk/v6/go/rancher2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		foo, err := rancher2.NewCloudCredential(ctx, "foo", &rancher2.CloudCredentialArgs{
    			Name:        pulumi.String("foo"),
    			Description: pulumi.String("foo test"),
    			Amazonec2CredentialConfig: &rancher2.CloudCredentialAmazonec2CredentialConfigArgs{
    				AccessKey: pulumi.String("<aws-access-key>"),
    				SecretKey: pulumi.String("<aws-secret-key>"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = rancher2.NewCluster(ctx, "foo", &rancher2.ClusterArgs{
    			Name:        pulumi.String("foo"),
    			Description: pulumi.String("Terraform EKS cluster"),
    			EksConfigV2: &rancher2.ClusterEksConfigV2Args{
    				CloudCredentialId: foo.ID(),
    				Name:              pulumi.String("<cluster-name>"),
    				Region:            pulumi.String("<eks-region>"),
    				Imported:          pulumi.Bool(true),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Rancher2 = Pulumi.Rancher2;
    
    return await Deployment.RunAsync(() => 
    {
        var foo = new Rancher2.CloudCredential("foo", new()
        {
            Name = "foo",
            Description = "foo test",
            Amazonec2CredentialConfig = new Rancher2.Inputs.CloudCredentialAmazonec2CredentialConfigArgs
            {
                AccessKey = "<aws-access-key>",
                SecretKey = "<aws-secret-key>",
            },
        });
    
        var fooCluster = new Rancher2.Cluster("foo", new()
        {
            Name = "foo",
            Description = "Terraform EKS cluster",
            EksConfigV2 = new Rancher2.Inputs.ClusterEksConfigV2Args
            {
                CloudCredentialId = foo.Id,
                Name = "<cluster-name>",
                Region = "<eks-region>",
                Imported = true,
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.rancher2.CloudCredential;
    import com.pulumi.rancher2.CloudCredentialArgs;
    import com.pulumi.rancher2.inputs.CloudCredentialAmazonec2CredentialConfigArgs;
    import com.pulumi.rancher2.Cluster;
    import com.pulumi.rancher2.ClusterArgs;
    import com.pulumi.rancher2.inputs.ClusterEksConfigV2Args;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var foo = new CloudCredential("foo", CloudCredentialArgs.builder()
                .name("foo")
                .description("foo test")
                .amazonec2CredentialConfig(CloudCredentialAmazonec2CredentialConfigArgs.builder()
                    .accessKey("<aws-access-key>")
                    .secretKey("<aws-secret-key>")
                    .build())
                .build());
    
            var fooCluster = new Cluster("fooCluster", ClusterArgs.builder()
                .name("foo")
                .description("Terraform EKS cluster")
                .eksConfigV2(ClusterEksConfigV2Args.builder()
                    .cloudCredentialId(foo.id())
                    .name("<cluster-name>")
                    .region("<eks-region>")
                    .imported(true)
                    .build())
                .build());
    
        }
    }
    
    resources:
      foo:
        type: rancher2:CloudCredential
        properties:
          name: foo
          description: foo test
          amazonec2CredentialConfig:
            accessKey: <aws-access-key>
            secretKey: <aws-secret-key>
      fooCluster:
        type: rancher2:Cluster
        name: foo
        properties:
          name: foo
          description: Terraform EKS cluster
          eksConfigV2:
            cloudCredentialId: ${foo.id}
            name: <cluster-name>
            region: <eks-region>
            imported: true
    

    Creating EKS cluster from Rancher v2, using eks_config_v2. For Rancher v2.5.x and above.

    import * as pulumi from "@pulumi/pulumi";
    import * as rancher2 from "@pulumi/rancher2";
    
    const foo = new rancher2.CloudCredential("foo", {
        name: "foo",
        description: "foo test",
        amazonec2CredentialConfig: {
            accessKey: "<aws-access-key>",
            secretKey: "<aws-secret-key>",
        },
    });
    const fooCluster = new rancher2.Cluster("foo", {
        name: "foo",
        description: "Terraform EKS cluster",
        eksConfigV2: {
            cloudCredentialId: foo.id,
            region: "<EKS_REGION>",
            kubernetesVersion: "1.24",
            loggingTypes: [
                "audit",
                "api",
            ],
            nodeGroups: [
                {
                    name: "node_group1",
                    instanceType: "t3.medium",
                    desiredSize: 3,
                    maxSize: 5,
                },
                {
                    name: "node_group2",
                    instanceType: "m5.xlarge",
                    desiredSize: 2,
                    maxSize: 3,
                    nodeRole: "arn:aws:iam::role/test-NodeInstanceRole",
                },
            ],
            privateAccess: true,
            publicAccess: false,
        },
    });
    
    import pulumi
    import pulumi_rancher2 as rancher2
    
    foo = rancher2.CloudCredential("foo",
        name="foo",
        description="foo test",
        amazonec2_credential_config={
            "access_key": "<aws-access-key>",
            "secret_key": "<aws-secret-key>",
        })
    foo_cluster = rancher2.Cluster("foo",
        name="foo",
        description="Terraform EKS cluster",
        eks_config_v2={
            "cloud_credential_id": foo.id,
            "region": "<EKS_REGION>",
            "kubernetes_version": "1.24",
            "logging_types": [
                "audit",
                "api",
            ],
            "node_groups": [
                {
                    "name": "node_group1",
                    "instance_type": "t3.medium",
                    "desired_size": 3,
                    "max_size": 5,
                },
                {
                    "name": "node_group2",
                    "instance_type": "m5.xlarge",
                    "desired_size": 2,
                    "max_size": 3,
                    "node_role": "arn:aws:iam::role/test-NodeInstanceRole",
                },
            ],
            "private_access": True,
            "public_access": False,
        })
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-rancher2/sdk/v6/go/rancher2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		foo, err := rancher2.NewCloudCredential(ctx, "foo", &rancher2.CloudCredentialArgs{
    			Name:        pulumi.String("foo"),
    			Description: pulumi.String("foo test"),
    			Amazonec2CredentialConfig: &rancher2.CloudCredentialAmazonec2CredentialConfigArgs{
    				AccessKey: pulumi.String("<aws-access-key>"),
    				SecretKey: pulumi.String("<aws-secret-key>"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = rancher2.NewCluster(ctx, "foo", &rancher2.ClusterArgs{
    			Name:        pulumi.String("foo"),
    			Description: pulumi.String("Terraform EKS cluster"),
    			EksConfigV2: &rancher2.ClusterEksConfigV2Args{
    				CloudCredentialId: foo.ID(),
    				Region:            pulumi.String("<EKS_REGION>"),
    				KubernetesVersion: pulumi.String("1.24"),
    				LoggingTypes: pulumi.StringArray{
    					pulumi.String("audit"),
    					pulumi.String("api"),
    				},
    				NodeGroups: rancher2.ClusterEksConfigV2NodeGroupArray{
    					&rancher2.ClusterEksConfigV2NodeGroupArgs{
    						Name:         pulumi.String("node_group1"),
    						InstanceType: pulumi.String("t3.medium"),
    						DesiredSize:  pulumi.Int(3),
    						MaxSize:      pulumi.Int(5),
    					},
    					&rancher2.ClusterEksConfigV2NodeGroupArgs{
    						Name:         pulumi.String("node_group2"),
    						InstanceType: pulumi.String("m5.xlarge"),
    						DesiredSize:  pulumi.Int(2),
    						MaxSize:      pulumi.Int(3),
    						NodeRole:     pulumi.String("arn:aws:iam::role/test-NodeInstanceRole"),
    					},
    				},
    				PrivateAccess: pulumi.Bool(true),
    				PublicAccess:  pulumi.Bool(false),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Rancher2 = Pulumi.Rancher2;
    
    return await Deployment.RunAsync(() => 
    {
        var foo = new Rancher2.CloudCredential("foo", new()
        {
            Name = "foo",
            Description = "foo test",
            Amazonec2CredentialConfig = new Rancher2.Inputs.CloudCredentialAmazonec2CredentialConfigArgs
            {
                AccessKey = "<aws-access-key>",
                SecretKey = "<aws-secret-key>",
            },
        });
    
        var fooCluster = new Rancher2.Cluster("foo", new()
        {
            Name = "foo",
            Description = "Terraform EKS cluster",
            EksConfigV2 = new Rancher2.Inputs.ClusterEksConfigV2Args
            {
                CloudCredentialId = foo.Id,
                Region = "<EKS_REGION>",
                KubernetesVersion = "1.24",
                LoggingTypes = new[]
                {
                    "audit",
                    "api",
                },
                NodeGroups = new[]
                {
                    new Rancher2.Inputs.ClusterEksConfigV2NodeGroupArgs
                    {
                        Name = "node_group1",
                        InstanceType = "t3.medium",
                        DesiredSize = 3,
                        MaxSize = 5,
                    },
                    new Rancher2.Inputs.ClusterEksConfigV2NodeGroupArgs
                    {
                        Name = "node_group2",
                        InstanceType = "m5.xlarge",
                        DesiredSize = 2,
                        MaxSize = 3,
                        NodeRole = "arn:aws:iam::role/test-NodeInstanceRole",
                    },
                },
                PrivateAccess = true,
                PublicAccess = false,
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.rancher2.CloudCredential;
    import com.pulumi.rancher2.CloudCredentialArgs;
    import com.pulumi.rancher2.inputs.CloudCredentialAmazonec2CredentialConfigArgs;
    import com.pulumi.rancher2.Cluster;
    import com.pulumi.rancher2.ClusterArgs;
    import com.pulumi.rancher2.inputs.ClusterEksConfigV2Args;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var foo = new CloudCredential("foo", CloudCredentialArgs.builder()
                .name("foo")
                .description("foo test")
                .amazonec2CredentialConfig(CloudCredentialAmazonec2CredentialConfigArgs.builder()
                    .accessKey("<aws-access-key>")
                    .secretKey("<aws-secret-key>")
                    .build())
                .build());
    
            var fooCluster = new Cluster("fooCluster", ClusterArgs.builder()
                .name("foo")
                .description("Terraform EKS cluster")
                .eksConfigV2(ClusterEksConfigV2Args.builder()
                    .cloudCredentialId(foo.id())
                    .region("<EKS_REGION>")
                    .kubernetesVersion("1.24")
                    .loggingTypes(                
                        "audit",
                        "api")
                    .nodeGroups(                
                        ClusterEksConfigV2NodeGroupArgs.builder()
                            .name("node_group1")
                            .instanceType("t3.medium")
                            .desiredSize(3)
                            .maxSize(5)
                            .build(),
                        ClusterEksConfigV2NodeGroupArgs.builder()
                            .name("node_group2")
                            .instanceType("m5.xlarge")
                            .desiredSize(2)
                            .maxSize(3)
                            .nodeRole("arn:aws:iam::role/test-NodeInstanceRole")
                            .build())
                    .privateAccess(true)
                    .publicAccess(false)
                    .build())
                .build());
    
        }
    }
    
    resources:
      foo:
        type: rancher2:CloudCredential
        properties:
          name: foo
          description: foo test
          amazonec2CredentialConfig:
            accessKey: <aws-access-key>
            secretKey: <aws-secret-key>
      fooCluster:
        type: rancher2:Cluster
        name: foo
        properties:
          name: foo
          description: Terraform EKS cluster
          eksConfigV2:
            cloudCredentialId: ${foo.id}
            region: <EKS_REGION>
            kubernetesVersion: '1.24'
            loggingTypes:
              - audit
              - api
            nodeGroups:
              - name: node_group1
                instanceType: t3.medium
                desiredSize: 3
                maxSize: 5
              - name: node_group2
                instanceType: m5.xlarge
                desiredSize: 2
                maxSize: 3
                nodeRole: arn:aws:iam::role/test-NodeInstanceRole
            privateAccess: true
            publicAccess: false
    

    Creating EKS cluster from Rancher v2, using eks_config_v2 and launch template. For Rancher v2.5.6 and above.

    Note: To use launch_template you must provide the ID (seen as <EC2_LAUNCH_TEMPLATE_ID>) to the template either as a static value. Or fetched via AWS data-source using one of: aws_ami first and provide the ID to that.

    import * as pulumi from "@pulumi/pulumi";
    import * as rancher2 from "@pulumi/rancher2";
    
    const foo = new rancher2.CloudCredential("foo", {
        name: "foo",
        description: "foo test",
        amazonec2CredentialConfig: {
            accessKey: "<aws-access-key>",
            secretKey: "<aws-secret-key>",
        },
    });
    const fooCluster = new rancher2.Cluster("foo", {
        name: "foo",
        description: "Terraform EKS cluster",
        eksConfigV2: {
            cloudCredentialId: foo.id,
            region: "<EKS_REGION>",
            kubernetesVersion: "1.24",
            loggingTypes: [
                "audit",
                "api",
            ],
            nodeGroups: [{
                desiredSize: 3,
                maxSize: 5,
                name: "node_group1",
                launchTemplates: [{
                    id: "<ec2-launch-template-id>",
                    version: 1,
                }],
            }],
            privateAccess: true,
            publicAccess: true,
        },
    });
    
    import pulumi
    import pulumi_rancher2 as rancher2
    
    foo = rancher2.CloudCredential("foo",
        name="foo",
        description="foo test",
        amazonec2_credential_config={
            "access_key": "<aws-access-key>",
            "secret_key": "<aws-secret-key>",
        })
    foo_cluster = rancher2.Cluster("foo",
        name="foo",
        description="Terraform EKS cluster",
        eks_config_v2={
            "cloud_credential_id": foo.id,
            "region": "<EKS_REGION>",
            "kubernetes_version": "1.24",
            "logging_types": [
                "audit",
                "api",
            ],
            "node_groups": [{
                "desired_size": 3,
                "max_size": 5,
                "name": "node_group1",
                "launch_templates": [{
                    "id": "<ec2-launch-template-id>",
                    "version": 1,
                }],
            }],
            "private_access": True,
            "public_access": True,
        })
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-rancher2/sdk/v6/go/rancher2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		foo, err := rancher2.NewCloudCredential(ctx, "foo", &rancher2.CloudCredentialArgs{
    			Name:        pulumi.String("foo"),
    			Description: pulumi.String("foo test"),
    			Amazonec2CredentialConfig: &rancher2.CloudCredentialAmazonec2CredentialConfigArgs{
    				AccessKey: pulumi.String("<aws-access-key>"),
    				SecretKey: pulumi.String("<aws-secret-key>"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = rancher2.NewCluster(ctx, "foo", &rancher2.ClusterArgs{
    			Name:        pulumi.String("foo"),
    			Description: pulumi.String("Terraform EKS cluster"),
    			EksConfigV2: &rancher2.ClusterEksConfigV2Args{
    				CloudCredentialId: foo.ID(),
    				Region:            pulumi.String("<EKS_REGION>"),
    				KubernetesVersion: pulumi.String("1.24"),
    				LoggingTypes: pulumi.StringArray{
    					pulumi.String("audit"),
    					pulumi.String("api"),
    				},
    				NodeGroups: rancher2.ClusterEksConfigV2NodeGroupArray{
    					&rancher2.ClusterEksConfigV2NodeGroupArgs{
    						DesiredSize: pulumi.Int(3),
    						MaxSize:     pulumi.Int(5),
    						Name:        pulumi.String("node_group1"),
    						LaunchTemplates: rancher2.ClusterEksConfigV2NodeGroupLaunchTemplateArray{
    							&rancher2.ClusterEksConfigV2NodeGroupLaunchTemplateArgs{
    								Id:      pulumi.String("<ec2-launch-template-id>"),
    								Version: pulumi.Int(1),
    							},
    						},
    					},
    				},
    				PrivateAccess: pulumi.Bool(true),
    				PublicAccess:  pulumi.Bool(true),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Rancher2 = Pulumi.Rancher2;
    
    return await Deployment.RunAsync(() => 
    {
        var foo = new Rancher2.CloudCredential("foo", new()
        {
            Name = "foo",
            Description = "foo test",
            Amazonec2CredentialConfig = new Rancher2.Inputs.CloudCredentialAmazonec2CredentialConfigArgs
            {
                AccessKey = "<aws-access-key>",
                SecretKey = "<aws-secret-key>",
            },
        });
    
        var fooCluster = new Rancher2.Cluster("foo", new()
        {
            Name = "foo",
            Description = "Terraform EKS cluster",
            EksConfigV2 = new Rancher2.Inputs.ClusterEksConfigV2Args
            {
                CloudCredentialId = foo.Id,
                Region = "<EKS_REGION>",
                KubernetesVersion = "1.24",
                LoggingTypes = new[]
                {
                    "audit",
                    "api",
                },
                NodeGroups = new[]
                {
                    new Rancher2.Inputs.ClusterEksConfigV2NodeGroupArgs
                    {
                        DesiredSize = 3,
                        MaxSize = 5,
                        Name = "node_group1",
                        LaunchTemplates = new[]
                        {
                            new Rancher2.Inputs.ClusterEksConfigV2NodeGroupLaunchTemplateArgs
                            {
                                Id = "<ec2-launch-template-id>",
                                Version = 1,
                            },
                        },
                    },
                },
                PrivateAccess = true,
                PublicAccess = true,
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.rancher2.CloudCredential;
    import com.pulumi.rancher2.CloudCredentialArgs;
    import com.pulumi.rancher2.inputs.CloudCredentialAmazonec2CredentialConfigArgs;
    import com.pulumi.rancher2.Cluster;
    import com.pulumi.rancher2.ClusterArgs;
    import com.pulumi.rancher2.inputs.ClusterEksConfigV2Args;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var foo = new CloudCredential("foo", CloudCredentialArgs.builder()
                .name("foo")
                .description("foo test")
                .amazonec2CredentialConfig(CloudCredentialAmazonec2CredentialConfigArgs.builder()
                    .accessKey("<aws-access-key>")
                    .secretKey("<aws-secret-key>")
                    .build())
                .build());
    
            var fooCluster = new Cluster("fooCluster", ClusterArgs.builder()
                .name("foo")
                .description("Terraform EKS cluster")
                .eksConfigV2(ClusterEksConfigV2Args.builder()
                    .cloudCredentialId(foo.id())
                    .region("<EKS_REGION>")
                    .kubernetesVersion("1.24")
                    .loggingTypes(                
                        "audit",
                        "api")
                    .nodeGroups(ClusterEksConfigV2NodeGroupArgs.builder()
                        .desiredSize(3)
                        .maxSize(5)
                        .name("node_group1")
                        .launchTemplates(ClusterEksConfigV2NodeGroupLaunchTemplateArgs.builder()
                            .id("<ec2-launch-template-id>")
                            .version(1)
                            .build())
                        .build())
                    .privateAccess(true)
                    .publicAccess(true)
                    .build())
                .build());
    
        }
    }
    
    resources:
      foo:
        type: rancher2:CloudCredential
        properties:
          name: foo
          description: foo test
          amazonec2CredentialConfig:
            accessKey: <aws-access-key>
            secretKey: <aws-secret-key>
      fooCluster:
        type: rancher2:Cluster
        name: foo
        properties:
          name: foo
          description: Terraform EKS cluster
          eksConfigV2:
            cloudCredentialId: ${foo.id}
            region: <EKS_REGION>
            kubernetesVersion: '1.24'
            loggingTypes:
              - audit
              - api
            nodeGroups:
              - desiredSize: 3
                maxSize: 5
                name: node_group1
                launchTemplates:
                  - id: <ec2-launch-template-id>
                    version: 1
            privateAccess: true
            publicAccess: true
    

    Creating AKS cluster from Rancher v2, using aks_config_v2. For Rancher v2.6.0 and above.

    import * as pulumi from "@pulumi/pulumi";
    import * as rancher2 from "@pulumi/rancher2";
    
    const foo_aks = new rancher2.CloudCredential("foo-aks", {
        name: "foo-aks",
        azureCredentialConfig: {
            clientId: "<client-id>",
            clientSecret: "<client-secret>",
            subscriptionId: "<subscription-id>",
        },
    });
    const foo = new rancher2.Cluster("foo", {
        name: "foo",
        description: "Terraform AKS cluster",
        aksConfigV2: {
            cloudCredentialId: foo_aks.id,
            resourceGroup: "<resource-group>",
            resourceLocation: "<resource-location>",
            dnsPrefix: "<dns-prefix>",
            kubernetesVersion: "1.24.6",
            networkPlugin: "<network-plugin>",
            virtualNetwork: "<virtual-network>",
            virtualNetworkResourceGroup: "<virtual-network-resource-group>",
            subnet: "<subnet>",
            nodeResourceGroup: "<node-resource-group>",
            nodePools: [
                {
                    availabilityZones: [
                        "1",
                        "2",
                        "3",
                    ],
                    name: "<nodepool-name-1>",
                    mode: "System",
                    count: 1,
                    orchestratorVersion: "1.21.2",
                    osDiskSizeGb: 128,
                    vmSize: "Standard_DS2_v2",
                },
                {
                    availabilityZones: [
                        "1",
                        "2",
                        "3",
                    ],
                    name: "<nodepool-name-2>",
                    count: 1,
                    mode: "User",
                    orchestratorVersion: "1.21.2",
                    osDiskSizeGb: 128,
                    vmSize: "Standard_DS2_v2",
                    maxSurge: "25%",
                    labels: {
                        test1: "data1",
                        test2: "data2",
                    },
                    taints: ["none:PreferNoSchedule"],
                },
            ],
        },
    });
    
    import pulumi
    import pulumi_rancher2 as rancher2
    
    foo_aks = rancher2.CloudCredential("foo-aks",
        name="foo-aks",
        azure_credential_config={
            "client_id": "<client-id>",
            "client_secret": "<client-secret>",
            "subscription_id": "<subscription-id>",
        })
    foo = rancher2.Cluster("foo",
        name="foo",
        description="Terraform AKS cluster",
        aks_config_v2={
            "cloud_credential_id": foo_aks.id,
            "resource_group": "<resource-group>",
            "resource_location": "<resource-location>",
            "dns_prefix": "<dns-prefix>",
            "kubernetes_version": "1.24.6",
            "network_plugin": "<network-plugin>",
            "virtual_network": "<virtual-network>",
            "virtual_network_resource_group": "<virtual-network-resource-group>",
            "subnet": "<subnet>",
            "node_resource_group": "<node-resource-group>",
            "node_pools": [
                {
                    "availability_zones": [
                        "1",
                        "2",
                        "3",
                    ],
                    "name": "<nodepool-name-1>",
                    "mode": "System",
                    "count": 1,
                    "orchestrator_version": "1.21.2",
                    "os_disk_size_gb": 128,
                    "vm_size": "Standard_DS2_v2",
                },
                {
                    "availability_zones": [
                        "1",
                        "2",
                        "3",
                    ],
                    "name": "<nodepool-name-2>",
                    "count": 1,
                    "mode": "User",
                    "orchestrator_version": "1.21.2",
                    "os_disk_size_gb": 128,
                    "vm_size": "Standard_DS2_v2",
                    "max_surge": "25%",
                    "labels": {
                        "test1": "data1",
                        "test2": "data2",
                    },
                    "taints": ["none:PreferNoSchedule"],
                },
            ],
        })
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-rancher2/sdk/v6/go/rancher2"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := rancher2.NewCloudCredential(ctx, "foo-aks", &rancher2.CloudCredentialArgs{
    			Name: pulumi.String("foo-aks"),
    			AzureCredentialConfig: &rancher2.CloudCredentialAzureCredentialConfigArgs{
    				ClientId:       pulumi.String("<client-id>"),
    				ClientSecret:   pulumi.String("<client-secret>"),
    				SubscriptionId: pulumi.String("<subscription-id>"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = rancher2.NewCluster(ctx, "foo", &rancher2.ClusterArgs{
    			Name:        pulumi.String("foo"),
    			Description: pulumi.String("Terraform AKS cluster"),
    			AksConfigV2: &rancher2.ClusterAksConfigV2Args{
    				CloudCredentialId:           foo_aks.ID(),
    				ResourceGroup:               pulumi.String("<resource-group>"),
    				ResourceLocation:            pulumi.String("<resource-location>"),
    				DnsPrefix:                   pulumi.String("<dns-prefix>"),
    				KubernetesVersion:           pulumi.String("1.24.6"),
    				NetworkPlugin:               pulumi.String("<network-plugin>"),
    				VirtualNetwork:              pulumi.String("<virtual-network>"),
    				VirtualNetworkResourceGroup: pulumi.String("<virtual-network-resource-group>"),
    				Subnet:                      pulumi.String("<subnet>"),
    				NodeResourceGroup:           pulumi.String("<node-resource-group>"),
    				NodePools: rancher2.ClusterAksConfigV2NodePoolArray{
    					&rancher2.ClusterAksConfigV2NodePoolArgs{
    						AvailabilityZones: pulumi.StringArray{
    							pulumi.String("1"),
    							pulumi.String("2"),
    							pulumi.String("3"),
    						},
    						Name:                pulumi.String("<nodepool-name-1>"),
    						Mode:                pulumi.String("System"),
    						Count:               pulumi.Int(1),
    						OrchestratorVersion: pulumi.String("1.21.2"),
    						OsDiskSizeGb:        pulumi.Int(128),
    						VmSize:              pulumi.String("Standard_DS2_v2"),
    					},
    					&rancher2.ClusterAksConfigV2NodePoolArgs{
    						AvailabilityZones: pulumi.StringArray{
    							pulumi.String("1"),
    							pulumi.String("2"),
    							pulumi.String("3"),
    						},
    						Name:                pulumi.String("<nodepool-name-2>"),
    						Count:               pulumi.Int(1),
    						Mode:                pulumi.String("User"),
    						OrchestratorVersion: pulumi.String("1.21.2"),
    						OsDiskSizeGb:        pulumi.Int(128),
    						VmSize:              pulumi.String("Standard_DS2_v2"),
    						MaxSurge:            pulumi.String("25%"),
    						Labels: pulumi.Map{
    							"test1": pulumi.Any("data1"),
    							"test2": pulumi.Any("data2"),
    						},
    						Taints: pulumi.StringArray{
    							pulumi.String("none:PreferNoSchedule"),
    						},
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Rancher2 = Pulumi.Rancher2;
    
    return await Deployment.RunAsync(() => 
    {
        var foo_aks = new Rancher2.CloudCredential("foo-aks", new()
        {
            Name = "foo-aks",
            AzureCredentialConfig = new Rancher2.Inputs.CloudCredentialAzureCredentialConfigArgs
            {
                ClientId = "<client-id>",
                ClientSecret = "<client-secret>",
                SubscriptionId = "<subscription-id>",
            },
        });
    
        var foo = new Rancher2.Cluster("foo", new()
        {
            Name = "foo",
            Description = "Terraform AKS cluster",
            AksConfigV2 = new Rancher2.Inputs.ClusterAksConfigV2Args
            {
                CloudCredentialId = foo_aks.Id,
                ResourceGroup = "<resource-group>",
                ResourceLocation = "<resource-location>",
                DnsPrefix = "<dns-prefix>",
                KubernetesVersion = "1.24.6",
                NetworkPlugin = "<network-plugin>",
                VirtualNetwork = "<virtual-network>",
                VirtualNetworkResourceGroup = "<virtual-network-resource-group>",
                Subnet = "<subnet>",
                NodeResourceGroup = "<node-resource-group>",
                NodePools = new[]
                {
                    new Rancher2.Inputs.ClusterAksConfigV2NodePoolArgs
                    {
                        AvailabilityZones = new[]
                        {
                            "1",
                            "2",
                            "3",
                        },
                        Name = "<nodepool-name-1>",
                        Mode = "System",
                        Count = 1,
                        OrchestratorVersion = "1.21.2",
                        OsDiskSizeGb = 128,
                        VmSize = "Standard_DS2_v2",
                    },
                    new Rancher2.Inputs.ClusterAksConfigV2NodePoolArgs
                    {
                        AvailabilityZones = new[]
                        {
                            "1",
                            "2",
                            "3",
                        },
                        Name = "<nodepool-name-2>",
                        Count = 1,
                        Mode = "User",
                        OrchestratorVersion = "1.21.2",
                        OsDiskSizeGb = 128,
                        VmSize = "Standard_DS2_v2",
                        MaxSurge = "25%",
                        Labels = 
                        {
                            { "test1", "data1" },
                            { "test2", "data2" },
                        },
                        Taints = new[]
                        {
                            "none:PreferNoSchedule",
                        },
                    },
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.rancher2.CloudCredential;
    import com.pulumi.rancher2.CloudCredentialArgs;
    import com.pulumi.rancher2.inputs.CloudCredentialAzureCredentialConfigArgs;
    import com.pulumi.rancher2.Cluster;
    import com.pulumi.rancher2.ClusterArgs;
    import com.pulumi.rancher2.inputs.ClusterAksConfigV2Args;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var foo_aks = new CloudCredential("foo-aks", CloudCredentialArgs.builder()
                .name("foo-aks")
                .azureCredentialConfig(CloudCredentialAzureCredentialConfigArgs.builder()
                    .clientId("<client-id>")
                    .clientSecret("<client-secret>")
                    .subscriptionId("<subscription-id>")
                    .build())
                .build());
    
            var foo = new Cluster("foo", ClusterArgs.builder()
                .name("foo")
                .description("Terraform AKS cluster")
                .aksConfigV2(ClusterAksConfigV2Args.builder()
                    .cloudCredentialId(foo_aks.id())
                    .resourceGroup("<resource-group>")
                    .resourceLocation("<resource-location>")
                    .dnsPrefix("<dns-prefix>")
                    .kubernetesVersion("1.24.6")
                    .networkPlugin("<network-plugin>")
                    .virtualNetwork("<virtual-network>")
                    .virtualNetworkResourceGroup("<virtual-network-resource-group>")
                    .subnet("<subnet>")
                    .nodeResourceGroup("<node-resource-group>")
                    .nodePools(                
                        ClusterAksConfigV2NodePoolArgs.builder()
                            .availabilityZones(                        
                                "1",
                                "2",
                                "3")
                            .name("<nodepool-name-1>")
                            .mode("System")
                            .count(1)
                            .orchestratorVersion("1.21.2")
                            .osDiskSizeGb(128)
                            .vmSize("Standard_DS2_v2")
                            .build(),
                        ClusterAksConfigV2NodePoolArgs.builder()
                            .availabilityZones(                        
                                "1",
                                "2",
                                "3")
                            .name("<nodepool-name-2>")
                            .count(1)
                            .mode("User")
                            .orchestratorVersion("1.21.2")
                            .osDiskSizeGb(128)
                            .vmSize("Standard_DS2_v2")
                            .maxSurge("25%")
                            .labels(Map.ofEntries(
                                Map.entry("test1", "data1"),
                                Map.entry("test2", "data2")
                            ))
                            .taints("none:PreferNoSchedule")
                            .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      foo-aks:
        type: rancher2:CloudCredential
        properties:
          name: foo-aks
          azureCredentialConfig:
            clientId: <client-id>
            clientSecret: <client-secret>
            subscriptionId: <subscription-id>
      foo:
        type: rancher2:Cluster
        properties:
          name: foo
          description: Terraform AKS cluster
          aksConfigV2:
            cloudCredentialId: ${["foo-aks"].id}
            resourceGroup: <resource-group>
            resourceLocation: <resource-location>
            dnsPrefix: <dns-prefix>
            kubernetesVersion: 1.24.6
            networkPlugin: <network-plugin>
            virtualNetwork: <virtual-network>
            virtualNetworkResourceGroup: <virtual-network-resource-group>
            subnet: <subnet>
            nodeResourceGroup: <node-resource-group>
            nodePools:
              - availabilityZones:
                  - '1'
                  - '2'
                  - '3'
                name: <nodepool-name-1>
                mode: System
                count: 1
                orchestratorVersion: 1.21.2
                osDiskSizeGb: 128
                vmSize: Standard_DS2_v2
              - availabilityZones:
                  - '1'
                  - '2'
                  - '3'
                name: <nodepool-name-2>
                count: 1
                mode: User
                orchestratorVersion: 1.21.2
                osDiskSizeGb: 128
                vmSize: Standard_DS2_v2
                maxSurge: 25%
                labels:
                  test1: data1
                  test2: data2
                taints:
                  - none:PreferNoSchedule
    

    Create Cluster Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new Cluster(name: string, args?: ClusterArgs, opts?: CustomResourceOptions);
    @overload
    def Cluster(resource_name: str,
                args: Optional[ClusterArgs] = None,
                opts: Optional[ResourceOptions] = None)
    
    @overload
    def Cluster(resource_name: str,
                opts: Optional[ResourceOptions] = None,
                agent_env_vars: Optional[Sequence[ClusterAgentEnvVarArgs]] = None,
                aks_config: Optional[ClusterAksConfigArgs] = None,
                aks_config_v2: Optional[ClusterAksConfigV2Args] = None,
                annotations: Optional[Mapping[str, Any]] = None,
                cluster_agent_deployment_customizations: Optional[Sequence[ClusterClusterAgentDeploymentCustomizationArgs]] = None,
                cluster_auth_endpoint: Optional[ClusterClusterAuthEndpointArgs] = None,
                cluster_monitoring_input: Optional[ClusterClusterMonitoringInputArgs] = None,
                cluster_template_answers: Optional[ClusterClusterTemplateAnswersArgs] = None,
                cluster_template_id: Optional[str] = None,
                cluster_template_questions: Optional[Sequence[ClusterClusterTemplateQuestionArgs]] = None,
                cluster_template_revision_id: Optional[str] = None,
                default_pod_security_admission_configuration_template_name: Optional[str] = None,
                default_pod_security_policy_template_id: Optional[str] = None,
                description: Optional[str] = None,
                desired_agent_image: Optional[str] = None,
                desired_auth_image: Optional[str] = None,
                docker_root_dir: Optional[str] = None,
                driver: Optional[str] = None,
                eks_config: Optional[ClusterEksConfigArgs] = None,
                eks_config_v2: Optional[ClusterEksConfigV2Args] = None,
                enable_cluster_alerting: Optional[bool] = None,
                enable_cluster_monitoring: Optional[bool] = None,
                enable_network_policy: Optional[bool] = None,
                fleet_agent_deployment_customizations: Optional[Sequence[ClusterFleetAgentDeploymentCustomizationArgs]] = None,
                fleet_workspace_name: Optional[str] = None,
                gke_config: Optional[ClusterGkeConfigArgs] = None,
                gke_config_v2: Optional[ClusterGkeConfigV2Args] = None,
                k3s_config: Optional[ClusterK3sConfigArgs] = None,
                labels: Optional[Mapping[str, Any]] = None,
                name: Optional[str] = None,
                oke_config: Optional[ClusterOkeConfigArgs] = None,
                rke2_config: Optional[ClusterRke2ConfigArgs] = None,
                rke_config: Optional[ClusterRkeConfigArgs] = None,
                windows_prefered_cluster: Optional[bool] = None)
    func NewCluster(ctx *Context, name string, args *ClusterArgs, opts ...ResourceOption) (*Cluster, error)
    public Cluster(string name, ClusterArgs? args = null, CustomResourceOptions? opts = null)
    public Cluster(String name, ClusterArgs args)
    public Cluster(String name, ClusterArgs args, CustomResourceOptions options)
    
    type: rancher2:Cluster
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args ClusterArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args ClusterArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args ClusterArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args ClusterArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args ClusterArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var clusterResource = new Rancher2.Cluster("clusterResource", new()
    {
        AgentEnvVars = new[]
        {
            new Rancher2.Inputs.ClusterAgentEnvVarArgs
            {
                Name = "string",
                Value = "string",
            },
        },
        AksConfig = new Rancher2.Inputs.ClusterAksConfigArgs
        {
            ClientId = "string",
            VirtualNetworkResourceGroup = "string",
            VirtualNetwork = "string",
            TenantId = "string",
            SubscriptionId = "string",
            AgentDnsPrefix = "string",
            Subnet = "string",
            SshPublicKeyContents = "string",
            ResourceGroup = "string",
            MasterDnsPrefix = "string",
            KubernetesVersion = "string",
            ClientSecret = "string",
            EnableMonitoring = false,
            MaxPods = 0,
            Count = 0,
            DnsServiceIp = "string",
            DockerBridgeCidr = "string",
            EnableHttpApplicationRouting = false,
            AadServerAppSecret = "string",
            AuthBaseUrl = "string",
            LoadBalancerSku = "string",
            Location = "string",
            LogAnalyticsWorkspace = "string",
            LogAnalyticsWorkspaceResourceGroup = "string",
            AgentVmSize = "string",
            BaseUrl = "string",
            NetworkPlugin = "string",
            NetworkPolicy = "string",
            PodCidr = "string",
            AgentStorageProfile = "string",
            ServiceCidr = "string",
            AgentPoolName = "string",
            AgentOsDiskSize = 0,
            AdminUsername = "string",
            Tags = new[]
            {
                "string",
            },
            AddServerAppId = "string",
            AddClientAppId = "string",
            AadTenantId = "string",
        },
        AksConfigV2 = new Rancher2.Inputs.ClusterAksConfigV2Args
        {
            CloudCredentialId = "string",
            ResourceLocation = "string",
            ResourceGroup = "string",
            Name = "string",
            NetworkDockerBridgeCidr = "string",
            HttpApplicationRouting = false,
            Imported = false,
            KubernetesVersion = "string",
            LinuxAdminUsername = "string",
            LinuxSshPublicKey = "string",
            LoadBalancerSku = "string",
            LogAnalyticsWorkspaceGroup = "string",
            LogAnalyticsWorkspaceName = "string",
            Monitoring = false,
            AuthBaseUrl = "string",
            NetworkDnsServiceIp = "string",
            DnsPrefix = "string",
            NetworkPlugin = "string",
            NetworkPodCidr = "string",
            NetworkPolicy = "string",
            NetworkServiceCidr = "string",
            NodePools = new[]
            {
                new Rancher2.Inputs.ClusterAksConfigV2NodePoolArgs
                {
                    Name = "string",
                    Mode = "string",
                    Count = 0,
                    Labels = 
                    {
                        { "string", "any" },
                    },
                    MaxCount = 0,
                    MaxPods = 0,
                    MaxSurge = "string",
                    EnableAutoScaling = false,
                    AvailabilityZones = new[]
                    {
                        "string",
                    },
                    MinCount = 0,
                    OrchestratorVersion = "string",
                    OsDiskSizeGb = 0,
                    OsDiskType = "string",
                    OsType = "string",
                    Taints = new[]
                    {
                        "string",
                    },
                    VmSize = "string",
                },
            },
            NodeResourceGroup = "string",
            PrivateCluster = false,
            BaseUrl = "string",
            AuthorizedIpRanges = new[]
            {
                "string",
            },
            Subnet = "string",
            Tags = 
            {
                { "string", "any" },
            },
            VirtualNetwork = "string",
            VirtualNetworkResourceGroup = "string",
        },
        Annotations = 
        {
            { "string", "any" },
        },
        ClusterAgentDeploymentCustomizations = new[]
        {
            new Rancher2.Inputs.ClusterClusterAgentDeploymentCustomizationArgs
            {
                AppendTolerations = new[]
                {
                    new Rancher2.Inputs.ClusterClusterAgentDeploymentCustomizationAppendTolerationArgs
                    {
                        Key = "string",
                        Effect = "string",
                        Operator = "string",
                        Seconds = 0,
                        Value = "string",
                    },
                },
                OverrideAffinity = "string",
                OverrideResourceRequirements = new[]
                {
                    new Rancher2.Inputs.ClusterClusterAgentDeploymentCustomizationOverrideResourceRequirementArgs
                    {
                        CpuLimit = "string",
                        CpuRequest = "string",
                        MemoryLimit = "string",
                        MemoryRequest = "string",
                    },
                },
            },
        },
        ClusterAuthEndpoint = new Rancher2.Inputs.ClusterClusterAuthEndpointArgs
        {
            CaCerts = "string",
            Enabled = false,
            Fqdn = "string",
        },
        ClusterMonitoringInput = new Rancher2.Inputs.ClusterClusterMonitoringInputArgs
        {
            Answers = 
            {
                { "string", "any" },
            },
            Version = "string",
        },
        ClusterTemplateAnswers = new Rancher2.Inputs.ClusterClusterTemplateAnswersArgs
        {
            ClusterId = "string",
            ProjectId = "string",
            Values = 
            {
                { "string", "any" },
            },
        },
        ClusterTemplateId = "string",
        ClusterTemplateQuestions = new[]
        {
            new Rancher2.Inputs.ClusterClusterTemplateQuestionArgs
            {
                Default = "string",
                Variable = "string",
                Required = false,
                Type = "string",
            },
        },
        ClusterTemplateRevisionId = "string",
        DefaultPodSecurityAdmissionConfigurationTemplateName = "string",
        DefaultPodSecurityPolicyTemplateId = "string",
        Description = "string",
        DesiredAgentImage = "string",
        DesiredAuthImage = "string",
        DockerRootDir = "string",
        Driver = "string",
        EksConfig = new Rancher2.Inputs.ClusterEksConfigArgs
        {
            AccessKey = "string",
            SecretKey = "string",
            KubernetesVersion = "string",
            EbsEncryption = false,
            NodeVolumeSize = 0,
            InstanceType = "string",
            KeyPairName = "string",
            AssociateWorkerNodePublicIp = false,
            MaximumNodes = 0,
            MinimumNodes = 0,
            DesiredNodes = 0,
            Region = "string",
            Ami = "string",
            SecurityGroups = new[]
            {
                "string",
            },
            ServiceRole = "string",
            SessionToken = "string",
            Subnets = new[]
            {
                "string",
            },
            UserData = "string",
            VirtualNetwork = "string",
        },
        EksConfigV2 = new Rancher2.Inputs.ClusterEksConfigV2Args
        {
            CloudCredentialId = "string",
            Imported = false,
            KmsKey = "string",
            KubernetesVersion = "string",
            LoggingTypes = new[]
            {
                "string",
            },
            Name = "string",
            NodeGroups = new[]
            {
                new Rancher2.Inputs.ClusterEksConfigV2NodeGroupArgs
                {
                    Name = "string",
                    MaxSize = 0,
                    Gpu = false,
                    DiskSize = 0,
                    NodeRole = "string",
                    InstanceType = "string",
                    Labels = 
                    {
                        { "string", "any" },
                    },
                    LaunchTemplates = new[]
                    {
                        new Rancher2.Inputs.ClusterEksConfigV2NodeGroupLaunchTemplateArgs
                        {
                            Id = "string",
                            Name = "string",
                            Version = 0,
                        },
                    },
                    DesiredSize = 0,
                    Version = "string",
                    Ec2SshKey = "string",
                    ImageId = "string",
                    RequestSpotInstances = false,
                    ResourceTags = 
                    {
                        { "string", "any" },
                    },
                    SpotInstanceTypes = new[]
                    {
                        "string",
                    },
                    Subnets = new[]
                    {
                        "string",
                    },
                    Tags = 
                    {
                        { "string", "any" },
                    },
                    UserData = "string",
                    MinSize = 0,
                },
            },
            PrivateAccess = false,
            PublicAccess = false,
            PublicAccessSources = new[]
            {
                "string",
            },
            Region = "string",
            SecretsEncryption = false,
            SecurityGroups = new[]
            {
                "string",
            },
            ServiceRole = "string",
            Subnets = new[]
            {
                "string",
            },
            Tags = 
            {
                { "string", "any" },
            },
        },
        EnableClusterAlerting = false,
        EnableClusterMonitoring = false,
        EnableNetworkPolicy = false,
        FleetAgentDeploymentCustomizations = new[]
        {
            new Rancher2.Inputs.ClusterFleetAgentDeploymentCustomizationArgs
            {
                AppendTolerations = new[]
                {
                    new Rancher2.Inputs.ClusterFleetAgentDeploymentCustomizationAppendTolerationArgs
                    {
                        Key = "string",
                        Effect = "string",
                        Operator = "string",
                        Seconds = 0,
                        Value = "string",
                    },
                },
                OverrideAffinity = "string",
                OverrideResourceRequirements = new[]
                {
                    new Rancher2.Inputs.ClusterFleetAgentDeploymentCustomizationOverrideResourceRequirementArgs
                    {
                        CpuLimit = "string",
                        CpuRequest = "string",
                        MemoryLimit = "string",
                        MemoryRequest = "string",
                    },
                },
            },
        },
        FleetWorkspaceName = "string",
        GkeConfig = new Rancher2.Inputs.ClusterGkeConfigArgs
        {
            IpPolicyNodeIpv4CidrBlock = "string",
            Credential = "string",
            SubNetwork = "string",
            ServiceAccount = "string",
            DiskType = "string",
            ProjectId = "string",
            OauthScopes = new[]
            {
                "string",
            },
            NodeVersion = "string",
            NodePool = "string",
            Network = "string",
            MasterVersion = "string",
            MasterIpv4CidrBlock = "string",
            MaintenanceWindow = "string",
            ClusterIpv4Cidr = "string",
            MachineType = "string",
            Locations = new[]
            {
                "string",
            },
            IpPolicySubnetworkName = "string",
            IpPolicyServicesSecondaryRangeName = "string",
            IpPolicyServicesIpv4CidrBlock = "string",
            ImageType = "string",
            IpPolicyClusterIpv4CidrBlock = "string",
            IpPolicyClusterSecondaryRangeName = "string",
            EnableNetworkPolicyConfig = false,
            MaxNodeCount = 0,
            EnableStackdriverMonitoring = false,
            EnableStackdriverLogging = false,
            EnablePrivateNodes = false,
            IssueClientCertificate = false,
            KubernetesDashboard = false,
            Labels = 
            {
                { "string", "any" },
            },
            LocalSsdCount = 0,
            EnablePrivateEndpoint = false,
            EnableNodepoolAutoscaling = false,
            EnableMasterAuthorizedNetwork = false,
            MasterAuthorizedNetworkCidrBlocks = new[]
            {
                "string",
            },
            EnableLegacyAbac = false,
            EnableKubernetesDashboard = false,
            IpPolicyCreateSubnetwork = false,
            MinNodeCount = 0,
            EnableHttpLoadBalancing = false,
            NodeCount = 0,
            EnableHorizontalPodAutoscaling = false,
            EnableAutoUpgrade = false,
            EnableAutoRepair = false,
            Preemptible = false,
            EnableAlphaFeature = false,
            Region = "string",
            ResourceLabels = 
            {
                { "string", "any" },
            },
            DiskSizeGb = 0,
            Description = "string",
            Taints = new[]
            {
                "string",
            },
            UseIpAliases = false,
            Zone = "string",
        },
        GkeConfigV2 = new Rancher2.Inputs.ClusterGkeConfigV2Args
        {
            GoogleCredentialSecret = "string",
            ProjectId = "string",
            Name = "string",
            LoggingService = "string",
            MasterAuthorizedNetworksConfig = new Rancher2.Inputs.ClusterGkeConfigV2MasterAuthorizedNetworksConfigArgs
            {
                CidrBlocks = new[]
                {
                    new Rancher2.Inputs.ClusterGkeConfigV2MasterAuthorizedNetworksConfigCidrBlockArgs
                    {
                        CidrBlock = "string",
                        DisplayName = "string",
                    },
                },
                Enabled = false,
            },
            Imported = false,
            IpAllocationPolicy = new Rancher2.Inputs.ClusterGkeConfigV2IpAllocationPolicyArgs
            {
                ClusterIpv4CidrBlock = "string",
                ClusterSecondaryRangeName = "string",
                CreateSubnetwork = false,
                NodeIpv4CidrBlock = "string",
                ServicesIpv4CidrBlock = "string",
                ServicesSecondaryRangeName = "string",
                SubnetworkName = "string",
                UseIpAliases = false,
            },
            KubernetesVersion = "string",
            Labels = 
            {
                { "string", "any" },
            },
            Locations = new[]
            {
                "string",
            },
            ClusterAddons = new Rancher2.Inputs.ClusterGkeConfigV2ClusterAddonsArgs
            {
                HorizontalPodAutoscaling = false,
                HttpLoadBalancing = false,
                NetworkPolicyConfig = false,
            },
            MaintenanceWindow = "string",
            EnableKubernetesAlpha = false,
            MonitoringService = "string",
            Description = "string",
            Network = "string",
            NetworkPolicyEnabled = false,
            NodePools = new[]
            {
                new Rancher2.Inputs.ClusterGkeConfigV2NodePoolArgs
                {
                    InitialNodeCount = 0,
                    Name = "string",
                    Version = "string",
                    Autoscaling = new Rancher2.Inputs.ClusterGkeConfigV2NodePoolAutoscalingArgs
                    {
                        Enabled = false,
                        MaxNodeCount = 0,
                        MinNodeCount = 0,
                    },
                    Config = new Rancher2.Inputs.ClusterGkeConfigV2NodePoolConfigArgs
                    {
                        DiskSizeGb = 0,
                        DiskType = "string",
                        ImageType = "string",
                        Labels = 
                        {
                            { "string", "any" },
                        },
                        LocalSsdCount = 0,
                        MachineType = "string",
                        OauthScopes = new[]
                        {
                            "string",
                        },
                        Preemptible = false,
                        Tags = new[]
                        {
                            "string",
                        },
                        Taints = new[]
                        {
                            new Rancher2.Inputs.ClusterGkeConfigV2NodePoolConfigTaintArgs
                            {
                                Effect = "string",
                                Key = "string",
                                Value = "string",
                            },
                        },
                    },
                    Management = new Rancher2.Inputs.ClusterGkeConfigV2NodePoolManagementArgs
                    {
                        AutoRepair = false,
                        AutoUpgrade = false,
                    },
                    MaxPodsConstraint = 0,
                },
            },
            PrivateClusterConfig = new Rancher2.Inputs.ClusterGkeConfigV2PrivateClusterConfigArgs
            {
                MasterIpv4CidrBlock = "string",
                EnablePrivateEndpoint = false,
                EnablePrivateNodes = false,
            },
            ClusterIpv4CidrBlock = "string",
            Region = "string",
            Subnetwork = "string",
            Zone = "string",
        },
        K3sConfig = new Rancher2.Inputs.ClusterK3sConfigArgs
        {
            UpgradeStrategy = new Rancher2.Inputs.ClusterK3sConfigUpgradeStrategyArgs
            {
                DrainServerNodes = false,
                DrainWorkerNodes = false,
                ServerConcurrency = 0,
                WorkerConcurrency = 0,
            },
            Version = "string",
        },
        Labels = 
        {
            { "string", "any" },
        },
        Name = "string",
        OkeConfig = new Rancher2.Inputs.ClusterOkeConfigArgs
        {
            KubernetesVersion = "string",
            UserOcid = "string",
            TenancyId = "string",
            Region = "string",
            PrivateKeyContents = "string",
            NodeShape = "string",
            Fingerprint = "string",
            CompartmentId = "string",
            NodeImage = "string",
            NodePublicKeyContents = "string",
            PrivateKeyPassphrase = "string",
            LoadBalancerSubnetName1 = "string",
            LoadBalancerSubnetName2 = "string",
            KmsKeyId = "string",
            NodePoolDnsDomainName = "string",
            NodePoolSubnetName = "string",
            FlexOcpus = 0,
            EnablePrivateNodes = false,
            PodCidr = "string",
            EnablePrivateControlPlane = false,
            LimitNodeCount = 0,
            QuantityOfNodeSubnets = 0,
            QuantityPerSubnet = 0,
            EnableKubernetesDashboard = false,
            ServiceCidr = "string",
            ServiceDnsDomainName = "string",
            SkipVcnDelete = false,
            Description = "string",
            CustomBootVolumeSize = 0,
            VcnCompartmentId = "string",
            VcnName = "string",
            WorkerNodeIngressCidr = "string",
        },
        Rke2Config = new Rancher2.Inputs.ClusterRke2ConfigArgs
        {
            UpgradeStrategy = new Rancher2.Inputs.ClusterRke2ConfigUpgradeStrategyArgs
            {
                DrainServerNodes = false,
                DrainWorkerNodes = false,
                ServerConcurrency = 0,
                WorkerConcurrency = 0,
            },
            Version = "string",
        },
        RkeConfig = new Rancher2.Inputs.ClusterRkeConfigArgs
        {
            AddonJobTimeout = 0,
            Addons = "string",
            AddonsIncludes = new[]
            {
                "string",
            },
            Authentication = new Rancher2.Inputs.ClusterRkeConfigAuthenticationArgs
            {
                Sans = new[]
                {
                    "string",
                },
                Strategy = "string",
            },
            Authorization = new Rancher2.Inputs.ClusterRkeConfigAuthorizationArgs
            {
                Mode = "string",
                Options = 
                {
                    { "string", "any" },
                },
            },
            BastionHost = new Rancher2.Inputs.ClusterRkeConfigBastionHostArgs
            {
                Address = "string",
                User = "string",
                Port = "string",
                SshAgentAuth = false,
                SshKey = "string",
                SshKeyPath = "string",
            },
            CloudProvider = new Rancher2.Inputs.ClusterRkeConfigCloudProviderArgs
            {
                AwsCloudProvider = new Rancher2.Inputs.ClusterRkeConfigCloudProviderAwsCloudProviderArgs
                {
                    Global = new Rancher2.Inputs.ClusterRkeConfigCloudProviderAwsCloudProviderGlobalArgs
                    {
                        DisableSecurityGroupIngress = false,
                        DisableStrictZoneCheck = false,
                        ElbSecurityGroup = "string",
                        KubernetesClusterId = "string",
                        KubernetesClusterTag = "string",
                        RoleArn = "string",
                        RouteTableId = "string",
                        SubnetId = "string",
                        Vpc = "string",
                        Zone = "string",
                    },
                    ServiceOverrides = new[]
                    {
                        new Rancher2.Inputs.ClusterRkeConfigCloudProviderAwsCloudProviderServiceOverrideArgs
                        {
                            Service = "string",
                            Region = "string",
                            SigningMethod = "string",
                            SigningName = "string",
                            SigningRegion = "string",
                            Url = "string",
                        },
                    },
                },
                AzureCloudProvider = new Rancher2.Inputs.ClusterRkeConfigCloudProviderAzureCloudProviderArgs
                {
                    SubscriptionId = "string",
                    TenantId = "string",
                    AadClientId = "string",
                    AadClientSecret = "string",
                    Location = "string",
                    PrimaryScaleSetName = "string",
                    CloudProviderBackoffDuration = 0,
                    CloudProviderBackoffExponent = 0,
                    CloudProviderBackoffJitter = 0,
                    CloudProviderBackoffRetries = 0,
                    CloudProviderRateLimit = false,
                    CloudProviderRateLimitBucket = 0,
                    CloudProviderRateLimitQps = 0,
                    LoadBalancerSku = "string",
                    AadClientCertPassword = "string",
                    MaximumLoadBalancerRuleCount = 0,
                    PrimaryAvailabilitySetName = "string",
                    CloudProviderBackoff = false,
                    ResourceGroup = "string",
                    RouteTableName = "string",
                    SecurityGroupName = "string",
                    SubnetName = "string",
                    Cloud = "string",
                    AadClientCertPath = "string",
                    UseInstanceMetadata = false,
                    UseManagedIdentityExtension = false,
                    VmType = "string",
                    VnetName = "string",
                    VnetResourceGroup = "string",
                },
                CustomCloudProvider = "string",
                Name = "string",
                OpenstackCloudProvider = new Rancher2.Inputs.ClusterRkeConfigCloudProviderOpenstackCloudProviderArgs
                {
                    Global = new Rancher2.Inputs.ClusterRkeConfigCloudProviderOpenstackCloudProviderGlobalArgs
                    {
                        AuthUrl = "string",
                        Password = "string",
                        Username = "string",
                        CaFile = "string",
                        DomainId = "string",
                        DomainName = "string",
                        Region = "string",
                        TenantId = "string",
                        TenantName = "string",
                        TrustId = "string",
                    },
                    BlockStorage = new Rancher2.Inputs.ClusterRkeConfigCloudProviderOpenstackCloudProviderBlockStorageArgs
                    {
                        BsVersion = "string",
                        IgnoreVolumeAz = false,
                        TrustDevicePath = false,
                    },
                    LoadBalancer = new Rancher2.Inputs.ClusterRkeConfigCloudProviderOpenstackCloudProviderLoadBalancerArgs
                    {
                        CreateMonitor = false,
                        FloatingNetworkId = "string",
                        LbMethod = "string",
                        LbProvider = "string",
                        LbVersion = "string",
                        ManageSecurityGroups = false,
                        MonitorDelay = "string",
                        MonitorMaxRetries = 0,
                        MonitorTimeout = "string",
                        SubnetId = "string",
                        UseOctavia = false,
                    },
                    Metadata = new Rancher2.Inputs.ClusterRkeConfigCloudProviderOpenstackCloudProviderMetadataArgs
                    {
                        RequestTimeout = 0,
                        SearchOrder = "string",
                    },
                    Route = new Rancher2.Inputs.ClusterRkeConfigCloudProviderOpenstackCloudProviderRouteArgs
                    {
                        RouterId = "string",
                    },
                },
                VsphereCloudProvider = new Rancher2.Inputs.ClusterRkeConfigCloudProviderVsphereCloudProviderArgs
                {
                    VirtualCenters = new[]
                    {
                        new Rancher2.Inputs.ClusterRkeConfigCloudProviderVsphereCloudProviderVirtualCenterArgs
                        {
                            Datacenters = "string",
                            Name = "string",
                            Password = "string",
                            User = "string",
                            Port = "string",
                            SoapRoundtripCount = 0,
                        },
                    },
                    Workspace = new Rancher2.Inputs.ClusterRkeConfigCloudProviderVsphereCloudProviderWorkspaceArgs
                    {
                        Datacenter = "string",
                        Folder = "string",
                        Server = "string",
                        DefaultDatastore = "string",
                        ResourcepoolPath = "string",
                    },
                    Disk = new Rancher2.Inputs.ClusterRkeConfigCloudProviderVsphereCloudProviderDiskArgs
                    {
                        ScsiControllerType = "string",
                    },
                    Global = new Rancher2.Inputs.ClusterRkeConfigCloudProviderVsphereCloudProviderGlobalArgs
                    {
                        Datacenters = "string",
                        GracefulShutdownTimeout = "string",
                        InsecureFlag = false,
                        Password = "string",
                        Port = "string",
                        SoapRoundtripCount = 0,
                        User = "string",
                    },
                    Network = new Rancher2.Inputs.ClusterRkeConfigCloudProviderVsphereCloudProviderNetworkArgs
                    {
                        PublicNetwork = "string",
                    },
                },
            },
            Dns = new Rancher2.Inputs.ClusterRkeConfigDnsArgs
            {
                LinearAutoscalerParams = new Rancher2.Inputs.ClusterRkeConfigDnsLinearAutoscalerParamsArgs
                {
                    CoresPerReplica = 0,
                    Max = 0,
                    Min = 0,
                    NodesPerReplica = 0,
                    PreventSinglePointFailure = false,
                },
                NodeSelector = 
                {
                    { "string", "any" },
                },
                Nodelocal = new Rancher2.Inputs.ClusterRkeConfigDnsNodelocalArgs
                {
                    IpAddress = "string",
                    NodeSelector = 
                    {
                        { "string", "any" },
                    },
                },
                Options = 
                {
                    { "string", "any" },
                },
                Provider = "string",
                ReverseCidrs = new[]
                {
                    "string",
                },
                Tolerations = new[]
                {
                    new Rancher2.Inputs.ClusterRkeConfigDnsTolerationArgs
                    {
                        Key = "string",
                        Effect = "string",
                        Operator = "string",
                        Seconds = 0,
                        Value = "string",
                    },
                },
                UpdateStrategy = new Rancher2.Inputs.ClusterRkeConfigDnsUpdateStrategyArgs
                {
                    RollingUpdate = new Rancher2.Inputs.ClusterRkeConfigDnsUpdateStrategyRollingUpdateArgs
                    {
                        MaxSurge = 0,
                        MaxUnavailable = 0,
                    },
                    Strategy = "string",
                },
                UpstreamNameservers = new[]
                {
                    "string",
                },
            },
            EnableCriDockerd = false,
            IgnoreDockerVersion = false,
            Ingress = new Rancher2.Inputs.ClusterRkeConfigIngressArgs
            {
                DefaultBackend = false,
                DnsPolicy = "string",
                ExtraArgs = 
                {
                    { "string", "any" },
                },
                HttpPort = 0,
                HttpsPort = 0,
                NetworkMode = "string",
                NodeSelector = 
                {
                    { "string", "any" },
                },
                Options = 
                {
                    { "string", "any" },
                },
                Provider = "string",
                Tolerations = new[]
                {
                    new Rancher2.Inputs.ClusterRkeConfigIngressTolerationArgs
                    {
                        Key = "string",
                        Effect = "string",
                        Operator = "string",
                        Seconds = 0,
                        Value = "string",
                    },
                },
                UpdateStrategy = new Rancher2.Inputs.ClusterRkeConfigIngressUpdateStrategyArgs
                {
                    RollingUpdate = new Rancher2.Inputs.ClusterRkeConfigIngressUpdateStrategyRollingUpdateArgs
                    {
                        MaxUnavailable = 0,
                    },
                    Strategy = "string",
                },
            },
            KubernetesVersion = "string",
            Monitoring = new Rancher2.Inputs.ClusterRkeConfigMonitoringArgs
            {
                NodeSelector = 
                {
                    { "string", "any" },
                },
                Options = 
                {
                    { "string", "any" },
                },
                Provider = "string",
                Replicas = 0,
                Tolerations = new[]
                {
                    new Rancher2.Inputs.ClusterRkeConfigMonitoringTolerationArgs
                    {
                        Key = "string",
                        Effect = "string",
                        Operator = "string",
                        Seconds = 0,
                        Value = "string",
                    },
                },
                UpdateStrategy = new Rancher2.Inputs.ClusterRkeConfigMonitoringUpdateStrategyArgs
                {
                    RollingUpdate = new Rancher2.Inputs.ClusterRkeConfigMonitoringUpdateStrategyRollingUpdateArgs
                    {
                        MaxSurge = 0,
                        MaxUnavailable = 0,
                    },
                    Strategy = "string",
                },
            },
            Network = new Rancher2.Inputs.ClusterRkeConfigNetworkArgs
            {
                AciNetworkProvider = new Rancher2.Inputs.ClusterRkeConfigNetworkAciNetworkProviderArgs
                {
                    KubeApiVlan = "string",
                    ApicHosts = new[]
                    {
                        "string",
                    },
                    ApicUserCrt = "string",
                    ApicUserKey = "string",
                    ApicUserName = "string",
                    EncapType = "string",
                    ExternDynamic = "string",
                    VrfTenant = "string",
                    VrfName = "string",
                    Token = "string",
                    SystemId = "string",
                    ServiceVlan = "string",
                    NodeSvcSubnet = "string",
                    NodeSubnet = "string",
                    Aep = "string",
                    McastRangeStart = "string",
                    McastRangeEnd = "string",
                    ExternStatic = "string",
                    L3outExternalNetworks = new[]
                    {
                        "string",
                    },
                    L3out = "string",
                    MultusDisable = "string",
                    OvsMemoryLimit = "string",
                    ImagePullSecret = "string",
                    InfraVlan = "string",
                    InstallIstio = "string",
                    IstioProfile = "string",
                    KafkaBrokers = new[]
                    {
                        "string",
                    },
                    KafkaClientCrt = "string",
                    KafkaClientKey = "string",
                    HostAgentLogLevel = "string",
                    GbpPodSubnet = "string",
                    EpRegistry = "string",
                    MaxNodesSvcGraph = "string",
                    EnableEndpointSlice = "string",
                    DurationWaitForNetwork = "string",
                    MtuHeadRoom = "string",
                    DropLogEnable = "string",
                    NoPriorityClass = "string",
                    NodePodIfEnable = "string",
                    DisableWaitForNetwork = "string",
                    DisablePeriodicSnatGlobalInfoSync = "string",
                    OpflexClientSsl = "string",
                    OpflexDeviceDeleteTimeout = "string",
                    OpflexLogLevel = "string",
                    OpflexMode = "string",
                    OpflexServerPort = "string",
                    OverlayVrfName = "string",
                    ImagePullPolicy = "string",
                    PbrTrackingNonSnat = "string",
                    PodSubnetChunkSize = "string",
                    RunGbpContainer = "string",
                    RunOpflexServerContainer = "string",
                    ServiceMonitorInterval = "string",
                    ControllerLogLevel = "string",
                    SnatContractScope = "string",
                    SnatNamespace = "string",
                    SnatPortRangeEnd = "string",
                    SnatPortRangeStart = "string",
                    SnatPortsPerNode = "string",
                    SriovEnable = "string",
                    SubnetDomainName = "string",
                    Capic = "string",
                    Tenant = "string",
                    ApicSubscriptionDelay = "string",
                    UseAciAnywhereCrd = "string",
                    UseAciCniPriorityClass = "string",
                    UseClusterRole = "string",
                    UseHostNetnsVolume = "string",
                    UseOpflexServerVolume = "string",
                    UsePrivilegedContainer = "string",
                    VmmController = "string",
                    VmmDomain = "string",
                    ApicRefreshTime = "string",
                    ApicRefreshTickerAdjust = "string",
                },
                CalicoNetworkProvider = new Rancher2.Inputs.ClusterRkeConfigNetworkCalicoNetworkProviderArgs
                {
                    CloudProvider = "string",
                },
                CanalNetworkProvider = new Rancher2.Inputs.ClusterRkeConfigNetworkCanalNetworkProviderArgs
                {
                    Iface = "string",
                },
                FlannelNetworkProvider = new Rancher2.Inputs.ClusterRkeConfigNetworkFlannelNetworkProviderArgs
                {
                    Iface = "string",
                },
                Mtu = 0,
                Options = 
                {
                    { "string", "any" },
                },
                Plugin = "string",
                Tolerations = new[]
                {
                    new Rancher2.Inputs.ClusterRkeConfigNetworkTolerationArgs
                    {
                        Key = "string",
                        Effect = "string",
                        Operator = "string",
                        Seconds = 0,
                        Value = "string",
                    },
                },
                WeaveNetworkProvider = new Rancher2.Inputs.ClusterRkeConfigNetworkWeaveNetworkProviderArgs
                {
                    Password = "string",
                },
            },
            Nodes = new[]
            {
                new Rancher2.Inputs.ClusterRkeConfigNodeArgs
                {
                    Address = "string",
                    Roles = new[]
                    {
                        "string",
                    },
                    User = "string",
                    DockerSocket = "string",
                    HostnameOverride = "string",
                    InternalAddress = "string",
                    Labels = 
                    {
                        { "string", "any" },
                    },
                    NodeId = "string",
                    Port = "string",
                    SshAgentAuth = false,
                    SshKey = "string",
                    SshKeyPath = "string",
                },
            },
            PrefixPath = "string",
            PrivateRegistries = new[]
            {
                new Rancher2.Inputs.ClusterRkeConfigPrivateRegistryArgs
                {
                    Url = "string",
                    EcrCredentialPlugin = new Rancher2.Inputs.ClusterRkeConfigPrivateRegistryEcrCredentialPluginArgs
                    {
                        AwsAccessKeyId = "string",
                        AwsSecretAccessKey = "string",
                        AwsSessionToken = "string",
                    },
                    IsDefault = false,
                    Password = "string",
                    User = "string",
                },
            },
            Services = new Rancher2.Inputs.ClusterRkeConfigServicesArgs
            {
                Etcd = new Rancher2.Inputs.ClusterRkeConfigServicesEtcdArgs
                {
                    BackupConfig = new Rancher2.Inputs.ClusterRkeConfigServicesEtcdBackupConfigArgs
                    {
                        Enabled = false,
                        IntervalHours = 0,
                        Retention = 0,
                        S3BackupConfig = new Rancher2.Inputs.ClusterRkeConfigServicesEtcdBackupConfigS3BackupConfigArgs
                        {
                            BucketName = "string",
                            Endpoint = "string",
                            AccessKey = "string",
                            CustomCa = "string",
                            Folder = "string",
                            Region = "string",
                            SecretKey = "string",
                        },
                        SafeTimestamp = false,
                        Timeout = 0,
                    },
                    CaCert = "string",
                    Cert = "string",
                    Creation = "string",
                    ExternalUrls = new[]
                    {
                        "string",
                    },
                    ExtraArgs = 
                    {
                        { "string", "any" },
                    },
                    ExtraBinds = new[]
                    {
                        "string",
                    },
                    ExtraEnvs = new[]
                    {
                        "string",
                    },
                    Gid = 0,
                    Image = "string",
                    Key = "string",
                    Path = "string",
                    Retention = "string",
                    Snapshot = false,
                    Uid = 0,
                },
                KubeApi = new Rancher2.Inputs.ClusterRkeConfigServicesKubeApiArgs
                {
                    AdmissionConfiguration = new Rancher2.Inputs.ClusterRkeConfigServicesKubeApiAdmissionConfigurationArgs
                    {
                        ApiVersion = "string",
                        Kind = "string",
                        Plugins = new[]
                        {
                            new Rancher2.Inputs.ClusterRkeConfigServicesKubeApiAdmissionConfigurationPluginArgs
                            {
                                Configuration = "string",
                                Name = "string",
                                Path = "string",
                            },
                        },
                    },
                    AlwaysPullImages = false,
                    AuditLog = new Rancher2.Inputs.ClusterRkeConfigServicesKubeApiAuditLogArgs
                    {
                        Configuration = new Rancher2.Inputs.ClusterRkeConfigServicesKubeApiAuditLogConfigurationArgs
                        {
                            Format = "string",
                            MaxAge = 0,
                            MaxBackup = 0,
                            MaxSize = 0,
                            Path = "string",
                            Policy = "string",
                        },
                        Enabled = false,
                    },
                    EventRateLimit = new Rancher2.Inputs.ClusterRkeConfigServicesKubeApiEventRateLimitArgs
                    {
                        Configuration = "string",
                        Enabled = false,
                    },
                    ExtraArgs = 
                    {
                        { "string", "any" },
                    },
                    ExtraBinds = new[]
                    {
                        "string",
                    },
                    ExtraEnvs = new[]
                    {
                        "string",
                    },
                    Image = "string",
                    PodSecurityPolicy = false,
                    SecretsEncryptionConfig = new Rancher2.Inputs.ClusterRkeConfigServicesKubeApiSecretsEncryptionConfigArgs
                    {
                        CustomConfig = "string",
                        Enabled = false,
                    },
                    ServiceClusterIpRange = "string",
                    ServiceNodePortRange = "string",
                },
                KubeController = new Rancher2.Inputs.ClusterRkeConfigServicesKubeControllerArgs
                {
                    ClusterCidr = "string",
                    ExtraArgs = 
                    {
                        { "string", "any" },
                    },
                    ExtraBinds = new[]
                    {
                        "string",
                    },
                    ExtraEnvs = new[]
                    {
                        "string",
                    },
                    Image = "string",
                    ServiceClusterIpRange = "string",
                },
                Kubelet = new Rancher2.Inputs.ClusterRkeConfigServicesKubeletArgs
                {
                    ClusterDnsServer = "string",
                    ClusterDomain = "string",
                    ExtraArgs = 
                    {
                        { "string", "any" },
                    },
                    ExtraBinds = new[]
                    {
                        "string",
                    },
                    ExtraEnvs = new[]
                    {
                        "string",
                    },
                    FailSwapOn = false,
                    GenerateServingCertificate = false,
                    Image = "string",
                    InfraContainerImage = "string",
                },
                Kubeproxy = new Rancher2.Inputs.ClusterRkeConfigServicesKubeproxyArgs
                {
                    ExtraArgs = 
                    {
                        { "string", "any" },
                    },
                    ExtraBinds = new[]
                    {
                        "string",
                    },
                    ExtraEnvs = new[]
                    {
                        "string",
                    },
                    Image = "string",
                },
                Scheduler = new Rancher2.Inputs.ClusterRkeConfigServicesSchedulerArgs
                {
                    ExtraArgs = 
                    {
                        { "string", "any" },
                    },
                    ExtraBinds = new[]
                    {
                        "string",
                    },
                    ExtraEnvs = new[]
                    {
                        "string",
                    },
                    Image = "string",
                },
            },
            SshAgentAuth = false,
            SshCertPath = "string",
            SshKeyPath = "string",
            UpgradeStrategy = new Rancher2.Inputs.ClusterRkeConfigUpgradeStrategyArgs
            {
                Drain = false,
                DrainInput = new Rancher2.Inputs.ClusterRkeConfigUpgradeStrategyDrainInputArgs
                {
                    DeleteLocalData = false,
                    Force = false,
                    GracePeriod = 0,
                    IgnoreDaemonSets = false,
                    Timeout = 0,
                },
                MaxUnavailableControlplane = "string",
                MaxUnavailableWorker = "string",
            },
            WinPrefixPath = "string",
        },
        WindowsPreferedCluster = false,
    });
    
    example, err := rancher2.NewCluster(ctx, "clusterResource", &rancher2.ClusterArgs{
    	AgentEnvVars: rancher2.ClusterAgentEnvVarArray{
    		&rancher2.ClusterAgentEnvVarArgs{
    			Name:  pulumi.String("string"),
    			Value: pulumi.String("string"),
    		},
    	},
    	AksConfig: &rancher2.ClusterAksConfigArgs{
    		ClientId:                           pulumi.String("string"),
    		VirtualNetworkResourceGroup:        pulumi.String("string"),
    		VirtualNetwork:                     pulumi.String("string"),
    		TenantId:                           pulumi.String("string"),
    		SubscriptionId:                     pulumi.String("string"),
    		AgentDnsPrefix:                     pulumi.String("string"),
    		Subnet:                             pulumi.String("string"),
    		SshPublicKeyContents:               pulumi.String("string"),
    		ResourceGroup:                      pulumi.String("string"),
    		MasterDnsPrefix:                    pulumi.String("string"),
    		KubernetesVersion:                  pulumi.String("string"),
    		ClientSecret:                       pulumi.String("string"),
    		EnableMonitoring:                   pulumi.Bool(false),
    		MaxPods:                            pulumi.Int(0),
    		Count:                              pulumi.Int(0),
    		DnsServiceIp:                       pulumi.String("string"),
    		DockerBridgeCidr:                   pulumi.String("string"),
    		EnableHttpApplicationRouting:       pulumi.Bool(false),
    		AadServerAppSecret:                 pulumi.String("string"),
    		AuthBaseUrl:                        pulumi.String("string"),
    		LoadBalancerSku:                    pulumi.String("string"),
    		Location:                           pulumi.String("string"),
    		LogAnalyticsWorkspace:              pulumi.String("string"),
    		LogAnalyticsWorkspaceResourceGroup: pulumi.String("string"),
    		AgentVmSize:                        pulumi.String("string"),
    		BaseUrl:                            pulumi.String("string"),
    		NetworkPlugin:                      pulumi.String("string"),
    		NetworkPolicy:                      pulumi.String("string"),
    		PodCidr:                            pulumi.String("string"),
    		AgentStorageProfile:                pulumi.String("string"),
    		ServiceCidr:                        pulumi.String("string"),
    		AgentPoolName:                      pulumi.String("string"),
    		AgentOsDiskSize:                    pulumi.Int(0),
    		AdminUsername:                      pulumi.String("string"),
    		Tags: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		AddServerAppId: pulumi.String("string"),
    		AddClientAppId: pulumi.String("string"),
    		AadTenantId:    pulumi.String("string"),
    	},
    	AksConfigV2: &rancher2.ClusterAksConfigV2Args{
    		CloudCredentialId:          pulumi.String("string"),
    		ResourceLocation:           pulumi.String("string"),
    		ResourceGroup:              pulumi.String("string"),
    		Name:                       pulumi.String("string"),
    		NetworkDockerBridgeCidr:    pulumi.String("string"),
    		HttpApplicationRouting:     pulumi.Bool(false),
    		Imported:                   pulumi.Bool(false),
    		KubernetesVersion:          pulumi.String("string"),
    		LinuxAdminUsername:         pulumi.String("string"),
    		LinuxSshPublicKey:          pulumi.String("string"),
    		LoadBalancerSku:            pulumi.String("string"),
    		LogAnalyticsWorkspaceGroup: pulumi.String("string"),
    		LogAnalyticsWorkspaceName:  pulumi.String("string"),
    		Monitoring:                 pulumi.Bool(false),
    		AuthBaseUrl:                pulumi.String("string"),
    		NetworkDnsServiceIp:        pulumi.String("string"),
    		DnsPrefix:                  pulumi.String("string"),
    		NetworkPlugin:              pulumi.String("string"),
    		NetworkPodCidr:             pulumi.String("string"),
    		NetworkPolicy:              pulumi.String("string"),
    		NetworkServiceCidr:         pulumi.String("string"),
    		NodePools: rancher2.ClusterAksConfigV2NodePoolArray{
    			&rancher2.ClusterAksConfigV2NodePoolArgs{
    				Name:  pulumi.String("string"),
    				Mode:  pulumi.String("string"),
    				Count: pulumi.Int(0),
    				Labels: pulumi.Map{
    					"string": pulumi.Any("any"),
    				},
    				MaxCount:          pulumi.Int(0),
    				MaxPods:           pulumi.Int(0),
    				MaxSurge:          pulumi.String("string"),
    				EnableAutoScaling: pulumi.Bool(false),
    				AvailabilityZones: pulumi.StringArray{
    					pulumi.String("string"),
    				},
    				MinCount:            pulumi.Int(0),
    				OrchestratorVersion: pulumi.String("string"),
    				OsDiskSizeGb:        pulumi.Int(0),
    				OsDiskType:          pulumi.String("string"),
    				OsType:              pulumi.String("string"),
    				Taints: pulumi.StringArray{
    					pulumi.String("string"),
    				},
    				VmSize: pulumi.String("string"),
    			},
    		},
    		NodeResourceGroup: pulumi.String("string"),
    		PrivateCluster:    pulumi.Bool(false),
    		BaseUrl:           pulumi.String("string"),
    		AuthorizedIpRanges: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		Subnet: pulumi.String("string"),
    		Tags: pulumi.Map{
    			"string": pulumi.Any("any"),
    		},
    		VirtualNetwork:              pulumi.String("string"),
    		VirtualNetworkResourceGroup: pulumi.String("string"),
    	},
    	Annotations: pulumi.Map{
    		"string": pulumi.Any("any"),
    	},
    	ClusterAgentDeploymentCustomizations: rancher2.ClusterClusterAgentDeploymentCustomizationArray{
    		&rancher2.ClusterClusterAgentDeploymentCustomizationArgs{
    			AppendTolerations: rancher2.ClusterClusterAgentDeploymentCustomizationAppendTolerationArray{
    				&rancher2.ClusterClusterAgentDeploymentCustomizationAppendTolerationArgs{
    					Key:      pulumi.String("string"),
    					Effect:   pulumi.String("string"),
    					Operator: pulumi.String("string"),
    					Seconds:  pulumi.Int(0),
    					Value:    pulumi.String("string"),
    				},
    			},
    			OverrideAffinity: pulumi.String("string"),
    			OverrideResourceRequirements: rancher2.ClusterClusterAgentDeploymentCustomizationOverrideResourceRequirementArray{
    				&rancher2.ClusterClusterAgentDeploymentCustomizationOverrideResourceRequirementArgs{
    					CpuLimit:      pulumi.String("string"),
    					CpuRequest:    pulumi.String("string"),
    					MemoryLimit:   pulumi.String("string"),
    					MemoryRequest: pulumi.String("string"),
    				},
    			},
    		},
    	},
    	ClusterAuthEndpoint: &rancher2.ClusterClusterAuthEndpointArgs{
    		CaCerts: pulumi.String("string"),
    		Enabled: pulumi.Bool(false),
    		Fqdn:    pulumi.String("string"),
    	},
    	ClusterMonitoringInput: &rancher2.ClusterClusterMonitoringInputArgs{
    		Answers: pulumi.Map{
    			"string": pulumi.Any("any"),
    		},
    		Version: pulumi.String("string"),
    	},
    	ClusterTemplateAnswers: &rancher2.ClusterClusterTemplateAnswersArgs{
    		ClusterId: pulumi.String("string"),
    		ProjectId: pulumi.String("string"),
    		Values: pulumi.Map{
    			"string": pulumi.Any("any"),
    		},
    	},
    	ClusterTemplateId: pulumi.String("string"),
    	ClusterTemplateQuestions: rancher2.ClusterClusterTemplateQuestionArray{
    		&rancher2.ClusterClusterTemplateQuestionArgs{
    			Default:  pulumi.String("string"),
    			Variable: pulumi.String("string"),
    			Required: pulumi.Bool(false),
    			Type:     pulumi.String("string"),
    		},
    	},
    	ClusterTemplateRevisionId:                            pulumi.String("string"),
    	DefaultPodSecurityAdmissionConfigurationTemplateName: pulumi.String("string"),
    	DefaultPodSecurityPolicyTemplateId:                   pulumi.String("string"),
    	Description:                                          pulumi.String("string"),
    	DesiredAgentImage:                                    pulumi.String("string"),
    	DesiredAuthImage:                                     pulumi.String("string"),
    	DockerRootDir:                                        pulumi.String("string"),
    	Driver:                                               pulumi.String("string"),
    	EksConfig: &rancher2.ClusterEksConfigArgs{
    		AccessKey:                   pulumi.String("string"),
    		SecretKey:                   pulumi.String("string"),
    		KubernetesVersion:           pulumi.String("string"),
    		EbsEncryption:               pulumi.Bool(false),
    		NodeVolumeSize:              pulumi.Int(0),
    		InstanceType:                pulumi.String("string"),
    		KeyPairName:                 pulumi.String("string"),
    		AssociateWorkerNodePublicIp: pulumi.Bool(false),
    		MaximumNodes:                pulumi.Int(0),
    		MinimumNodes:                pulumi.Int(0),
    		DesiredNodes:                pulumi.Int(0),
    		Region:                      pulumi.String("string"),
    		Ami:                         pulumi.String("string"),
    		SecurityGroups: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		ServiceRole:  pulumi.String("string"),
    		SessionToken: pulumi.String("string"),
    		Subnets: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		UserData:       pulumi.String("string"),
    		VirtualNetwork: pulumi.String("string"),
    	},
    	EksConfigV2: &rancher2.ClusterEksConfigV2Args{
    		CloudCredentialId: pulumi.String("string"),
    		Imported:          pulumi.Bool(false),
    		KmsKey:            pulumi.String("string"),
    		KubernetesVersion: pulumi.String("string"),
    		LoggingTypes: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		Name: pulumi.String("string"),
    		NodeGroups: rancher2.ClusterEksConfigV2NodeGroupArray{
    			&rancher2.ClusterEksConfigV2NodeGroupArgs{
    				Name:         pulumi.String("string"),
    				MaxSize:      pulumi.Int(0),
    				Gpu:          pulumi.Bool(false),
    				DiskSize:     pulumi.Int(0),
    				NodeRole:     pulumi.String("string"),
    				InstanceType: pulumi.String("string"),
    				Labels: pulumi.Map{
    					"string": pulumi.Any("any"),
    				},
    				LaunchTemplates: rancher2.ClusterEksConfigV2NodeGroupLaunchTemplateArray{
    					&rancher2.ClusterEksConfigV2NodeGroupLaunchTemplateArgs{
    						Id:      pulumi.String("string"),
    						Name:    pulumi.String("string"),
    						Version: pulumi.Int(0),
    					},
    				},
    				DesiredSize:          pulumi.Int(0),
    				Version:              pulumi.String("string"),
    				Ec2SshKey:            pulumi.String("string"),
    				ImageId:              pulumi.String("string"),
    				RequestSpotInstances: pulumi.Bool(false),
    				ResourceTags: pulumi.Map{
    					"string": pulumi.Any("any"),
    				},
    				SpotInstanceTypes: pulumi.StringArray{
    					pulumi.String("string"),
    				},
    				Subnets: pulumi.StringArray{
    					pulumi.String("string"),
    				},
    				Tags: pulumi.Map{
    					"string": pulumi.Any("any"),
    				},
    				UserData: pulumi.String("string"),
    				MinSize:  pulumi.Int(0),
    			},
    		},
    		PrivateAccess: pulumi.Bool(false),
    		PublicAccess:  pulumi.Bool(false),
    		PublicAccessSources: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		Region:            pulumi.String("string"),
    		SecretsEncryption: pulumi.Bool(false),
    		SecurityGroups: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		ServiceRole: pulumi.String("string"),
    		Subnets: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		Tags: pulumi.Map{
    			"string": pulumi.Any("any"),
    		},
    	},
    	EnableClusterAlerting:   pulumi.Bool(false),
    	EnableClusterMonitoring: pulumi.Bool(false),
    	EnableNetworkPolicy:     pulumi.Bool(false),
    	FleetAgentDeploymentCustomizations: rancher2.ClusterFleetAgentDeploymentCustomizationArray{
    		&rancher2.ClusterFleetAgentDeploymentCustomizationArgs{
    			AppendTolerations: rancher2.ClusterFleetAgentDeploymentCustomizationAppendTolerationArray{
    				&rancher2.ClusterFleetAgentDeploymentCustomizationAppendTolerationArgs{
    					Key:      pulumi.String("string"),
    					Effect:   pulumi.String("string"),
    					Operator: pulumi.String("string"),
    					Seconds:  pulumi.Int(0),
    					Value:    pulumi.String("string"),
    				},
    			},
    			OverrideAffinity: pulumi.String("string"),
    			OverrideResourceRequirements: rancher2.ClusterFleetAgentDeploymentCustomizationOverrideResourceRequirementArray{
    				&rancher2.ClusterFleetAgentDeploymentCustomizationOverrideResourceRequirementArgs{
    					CpuLimit:      pulumi.String("string"),
    					CpuRequest:    pulumi.String("string"),
    					MemoryLimit:   pulumi.String("string"),
    					MemoryRequest: pulumi.String("string"),
    				},
    			},
    		},
    	},
    	FleetWorkspaceName: pulumi.String("string"),
    	GkeConfig: &rancher2.ClusterGkeConfigArgs{
    		IpPolicyNodeIpv4CidrBlock: pulumi.String("string"),
    		Credential:                pulumi.String("string"),
    		SubNetwork:                pulumi.String("string"),
    		ServiceAccount:            pulumi.String("string"),
    		DiskType:                  pulumi.String("string"),
    		ProjectId:                 pulumi.String("string"),
    		OauthScopes: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		NodeVersion:         pulumi.String("string"),
    		NodePool:            pulumi.String("string"),
    		Network:             pulumi.String("string"),
    		MasterVersion:       pulumi.String("string"),
    		MasterIpv4CidrBlock: pulumi.String("string"),
    		MaintenanceWindow:   pulumi.String("string"),
    		ClusterIpv4Cidr:     pulumi.String("string"),
    		MachineType:         pulumi.String("string"),
    		Locations: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		IpPolicySubnetworkName:             pulumi.String("string"),
    		IpPolicyServicesSecondaryRangeName: pulumi.String("string"),
    		IpPolicyServicesIpv4CidrBlock:      pulumi.String("string"),
    		ImageType:                          pulumi.String("string"),
    		IpPolicyClusterIpv4CidrBlock:       pulumi.String("string"),
    		IpPolicyClusterSecondaryRangeName:  pulumi.String("string"),
    		EnableNetworkPolicyConfig:          pulumi.Bool(false),
    		MaxNodeCount:                       pulumi.Int(0),
    		EnableStackdriverMonitoring:        pulumi.Bool(false),
    		EnableStackdriverLogging:           pulumi.Bool(false),
    		EnablePrivateNodes:                 pulumi.Bool(false),
    		IssueClientCertificate:             pulumi.Bool(false),
    		KubernetesDashboard:                pulumi.Bool(false),
    		Labels: pulumi.Map{
    			"string": pulumi.Any("any"),
    		},
    		LocalSsdCount:                 pulumi.Int(0),
    		EnablePrivateEndpoint:         pulumi.Bool(false),
    		EnableNodepoolAutoscaling:     pulumi.Bool(false),
    		EnableMasterAuthorizedNetwork: pulumi.Bool(false),
    		MasterAuthorizedNetworkCidrBlocks: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		EnableLegacyAbac:               pulumi.Bool(false),
    		EnableKubernetesDashboard:      pulumi.Bool(false),
    		IpPolicyCreateSubnetwork:       pulumi.Bool(false),
    		MinNodeCount:                   pulumi.Int(0),
    		EnableHttpLoadBalancing:        pulumi.Bool(false),
    		NodeCount:                      pulumi.Int(0),
    		EnableHorizontalPodAutoscaling: pulumi.Bool(false),
    		EnableAutoUpgrade:              pulumi.Bool(false),
    		EnableAutoRepair:               pulumi.Bool(false),
    		Preemptible:                    pulumi.Bool(false),
    		EnableAlphaFeature:             pulumi.Bool(false),
    		Region:                         pulumi.String("string"),
    		ResourceLabels: pulumi.Map{
    			"string": pulumi.Any("any"),
    		},
    		DiskSizeGb:  pulumi.Int(0),
    		Description: pulumi.String("string"),
    		Taints: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		UseIpAliases: pulumi.Bool(false),
    		Zone:         pulumi.String("string"),
    	},
    	GkeConfigV2: &rancher2.ClusterGkeConfigV2Args{
    		GoogleCredentialSecret: pulumi.String("string"),
    		ProjectId:              pulumi.String("string"),
    		Name:                   pulumi.String("string"),
    		LoggingService:         pulumi.String("string"),
    		MasterAuthorizedNetworksConfig: &rancher2.ClusterGkeConfigV2MasterAuthorizedNetworksConfigArgs{
    			CidrBlocks: rancher2.ClusterGkeConfigV2MasterAuthorizedNetworksConfigCidrBlockArray{
    				&rancher2.ClusterGkeConfigV2MasterAuthorizedNetworksConfigCidrBlockArgs{
    					CidrBlock:   pulumi.String("string"),
    					DisplayName: pulumi.String("string"),
    				},
    			},
    			Enabled: pulumi.Bool(false),
    		},
    		Imported: pulumi.Bool(false),
    		IpAllocationPolicy: &rancher2.ClusterGkeConfigV2IpAllocationPolicyArgs{
    			ClusterIpv4CidrBlock:       pulumi.String("string"),
    			ClusterSecondaryRangeName:  pulumi.String("string"),
    			CreateSubnetwork:           pulumi.Bool(false),
    			NodeIpv4CidrBlock:          pulumi.String("string"),
    			ServicesIpv4CidrBlock:      pulumi.String("string"),
    			ServicesSecondaryRangeName: pulumi.String("string"),
    			SubnetworkName:             pulumi.String("string"),
    			UseIpAliases:               pulumi.Bool(false),
    		},
    		KubernetesVersion: pulumi.String("string"),
    		Labels: pulumi.Map{
    			"string": pulumi.Any("any"),
    		},
    		Locations: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		ClusterAddons: &rancher2.ClusterGkeConfigV2ClusterAddonsArgs{
    			HorizontalPodAutoscaling: pulumi.Bool(false),
    			HttpLoadBalancing:        pulumi.Bool(false),
    			NetworkPolicyConfig:      pulumi.Bool(false),
    		},
    		MaintenanceWindow:     pulumi.String("string"),
    		EnableKubernetesAlpha: pulumi.Bool(false),
    		MonitoringService:     pulumi.String("string"),
    		Description:           pulumi.String("string"),
    		Network:               pulumi.String("string"),
    		NetworkPolicyEnabled:  pulumi.Bool(false),
    		NodePools: rancher2.ClusterGkeConfigV2NodePoolArray{
    			&rancher2.ClusterGkeConfigV2NodePoolArgs{
    				InitialNodeCount: pulumi.Int(0),
    				Name:             pulumi.String("string"),
    				Version:          pulumi.String("string"),
    				Autoscaling: &rancher2.ClusterGkeConfigV2NodePoolAutoscalingArgs{
    					Enabled:      pulumi.Bool(false),
    					MaxNodeCount: pulumi.Int(0),
    					MinNodeCount: pulumi.Int(0),
    				},
    				Config: &rancher2.ClusterGkeConfigV2NodePoolConfigArgs{
    					DiskSizeGb: pulumi.Int(0),
    					DiskType:   pulumi.String("string"),
    					ImageType:  pulumi.String("string"),
    					Labels: pulumi.Map{
    						"string": pulumi.Any("any"),
    					},
    					LocalSsdCount: pulumi.Int(0),
    					MachineType:   pulumi.String("string"),
    					OauthScopes: pulumi.StringArray{
    						pulumi.String("string"),
    					},
    					Preemptible: pulumi.Bool(false),
    					Tags: pulumi.StringArray{
    						pulumi.String("string"),
    					},
    					Taints: rancher2.ClusterGkeConfigV2NodePoolConfigTaintArray{
    						&rancher2.ClusterGkeConfigV2NodePoolConfigTaintArgs{
    							Effect: pulumi.String("string"),
    							Key:    pulumi.String("string"),
    							Value:  pulumi.String("string"),
    						},
    					},
    				},
    				Management: &rancher2.ClusterGkeConfigV2NodePoolManagementArgs{
    					AutoRepair:  pulumi.Bool(false),
    					AutoUpgrade: pulumi.Bool(false),
    				},
    				MaxPodsConstraint: pulumi.Int(0),
    			},
    		},
    		PrivateClusterConfig: &rancher2.ClusterGkeConfigV2PrivateClusterConfigArgs{
    			MasterIpv4CidrBlock:   pulumi.String("string"),
    			EnablePrivateEndpoint: pulumi.Bool(false),
    			EnablePrivateNodes:    pulumi.Bool(false),
    		},
    		ClusterIpv4CidrBlock: pulumi.String("string"),
    		Region:               pulumi.String("string"),
    		Subnetwork:           pulumi.String("string"),
    		Zone:                 pulumi.String("string"),
    	},
    	K3sConfig: &rancher2.ClusterK3sConfigArgs{
    		UpgradeStrategy: &rancher2.ClusterK3sConfigUpgradeStrategyArgs{
    			DrainServerNodes:  pulumi.Bool(false),
    			DrainWorkerNodes:  pulumi.Bool(false),
    			ServerConcurrency: pulumi.Int(0),
    			WorkerConcurrency: pulumi.Int(0),
    		},
    		Version: pulumi.String("string"),
    	},
    	Labels: pulumi.Map{
    		"string": pulumi.Any("any"),
    	},
    	Name: pulumi.String("string"),
    	OkeConfig: &rancher2.ClusterOkeConfigArgs{
    		KubernetesVersion:         pulumi.String("string"),
    		UserOcid:                  pulumi.String("string"),
    		TenancyId:                 pulumi.String("string"),
    		Region:                    pulumi.String("string"),
    		PrivateKeyContents:        pulumi.String("string"),
    		NodeShape:                 pulumi.String("string"),
    		Fingerprint:               pulumi.String("string"),
    		CompartmentId:             pulumi.String("string"),
    		NodeImage:                 pulumi.String("string"),
    		NodePublicKeyContents:     pulumi.String("string"),
    		PrivateKeyPassphrase:      pulumi.String("string"),
    		LoadBalancerSubnetName1:   pulumi.String("string"),
    		LoadBalancerSubnetName2:   pulumi.String("string"),
    		KmsKeyId:                  pulumi.String("string"),
    		NodePoolDnsDomainName:     pulumi.String("string"),
    		NodePoolSubnetName:        pulumi.String("string"),
    		FlexOcpus:                 pulumi.Int(0),
    		EnablePrivateNodes:        pulumi.Bool(false),
    		PodCidr:                   pulumi.String("string"),
    		EnablePrivateControlPlane: pulumi.Bool(false),
    		LimitNodeCount:            pulumi.Int(0),
    		QuantityOfNodeSubnets:     pulumi.Int(0),
    		QuantityPerSubnet:         pulumi.Int(0),
    		EnableKubernetesDashboard: pulumi.Bool(false),
    		ServiceCidr:               pulumi.String("string"),
    		ServiceDnsDomainName:      pulumi.String("string"),
    		SkipVcnDelete:             pulumi.Bool(false),
    		Description:               pulumi.String("string"),
    		CustomBootVolumeSize:      pulumi.Int(0),
    		VcnCompartmentId:          pulumi.String("string"),
    		VcnName:                   pulumi.String("string"),
    		WorkerNodeIngressCidr:     pulumi.String("string"),
    	},
    	Rke2Config: &rancher2.ClusterRke2ConfigArgs{
    		UpgradeStrategy: &rancher2.ClusterRke2ConfigUpgradeStrategyArgs{
    			DrainServerNodes:  pulumi.Bool(false),
    			DrainWorkerNodes:  pulumi.Bool(false),
    			ServerConcurrency: pulumi.Int(0),
    			WorkerConcurrency: pulumi.Int(0),
    		},
    		Version: pulumi.String("string"),
    	},
    	RkeConfig: &rancher2.ClusterRkeConfigArgs{
    		AddonJobTimeout: pulumi.Int(0),
    		Addons:          pulumi.String("string"),
    		AddonsIncludes: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		Authentication: &rancher2.ClusterRkeConfigAuthenticationArgs{
    			Sans: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    			Strategy: pulumi.String("string"),
    		},
    		Authorization: &rancher2.ClusterRkeConfigAuthorizationArgs{
    			Mode: pulumi.String("string"),
    			Options: pulumi.Map{
    				"string": pulumi.Any("any"),
    			},
    		},
    		BastionHost: &rancher2.ClusterRkeConfigBastionHostArgs{
    			Address:      pulumi.String("string"),
    			User:         pulumi.String("string"),
    			Port:         pulumi.String("string"),
    			SshAgentAuth: pulumi.Bool(false),
    			SshKey:       pulumi.String("string"),
    			SshKeyPath:   pulumi.String("string"),
    		},
    		CloudProvider: &rancher2.ClusterRkeConfigCloudProviderArgs{
    			AwsCloudProvider: &rancher2.ClusterRkeConfigCloudProviderAwsCloudProviderArgs{
    				Global: &rancher2.ClusterRkeConfigCloudProviderAwsCloudProviderGlobalArgs{
    					DisableSecurityGroupIngress: pulumi.Bool(false),
    					DisableStrictZoneCheck:      pulumi.Bool(false),
    					ElbSecurityGroup:            pulumi.String("string"),
    					KubernetesClusterId:         pulumi.String("string"),
    					KubernetesClusterTag:        pulumi.String("string"),
    					RoleArn:                     pulumi.String("string"),
    					RouteTableId:                pulumi.String("string"),
    					SubnetId:                    pulumi.String("string"),
    					Vpc:                         pulumi.String("string"),
    					Zone:                        pulumi.String("string"),
    				},
    				ServiceOverrides: rancher2.ClusterRkeConfigCloudProviderAwsCloudProviderServiceOverrideArray{
    					&rancher2.ClusterRkeConfigCloudProviderAwsCloudProviderServiceOverrideArgs{
    						Service:       pulumi.String("string"),
    						Region:        pulumi.String("string"),
    						SigningMethod: pulumi.String("string"),
    						SigningName:   pulumi.String("string"),
    						SigningRegion: pulumi.String("string"),
    						Url:           pulumi.String("string"),
    					},
    				},
    			},
    			AzureCloudProvider: &rancher2.ClusterRkeConfigCloudProviderAzureCloudProviderArgs{
    				SubscriptionId:               pulumi.String("string"),
    				TenantId:                     pulumi.String("string"),
    				AadClientId:                  pulumi.String("string"),
    				AadClientSecret:              pulumi.String("string"),
    				Location:                     pulumi.String("string"),
    				PrimaryScaleSetName:          pulumi.String("string"),
    				CloudProviderBackoffDuration: pulumi.Int(0),
    				CloudProviderBackoffExponent: pulumi.Int(0),
    				CloudProviderBackoffJitter:   pulumi.Int(0),
    				CloudProviderBackoffRetries:  pulumi.Int(0),
    				CloudProviderRateLimit:       pulumi.Bool(false),
    				CloudProviderRateLimitBucket: pulumi.Int(0),
    				CloudProviderRateLimitQps:    pulumi.Int(0),
    				LoadBalancerSku:              pulumi.String("string"),
    				AadClientCertPassword:        pulumi.String("string"),
    				MaximumLoadBalancerRuleCount: pulumi.Int(0),
    				PrimaryAvailabilitySetName:   pulumi.String("string"),
    				CloudProviderBackoff:         pulumi.Bool(false),
    				ResourceGroup:                pulumi.String("string"),
    				RouteTableName:               pulumi.String("string"),
    				SecurityGroupName:            pulumi.String("string"),
    				SubnetName:                   pulumi.String("string"),
    				Cloud:                        pulumi.String("string"),
    				AadClientCertPath:            pulumi.String("string"),
    				UseInstanceMetadata:          pulumi.Bool(false),
    				UseManagedIdentityExtension:  pulumi.Bool(false),
    				VmType:                       pulumi.String("string"),
    				VnetName:                     pulumi.String("string"),
    				VnetResourceGroup:            pulumi.String("string"),
    			},
    			CustomCloudProvider: pulumi.String("string"),
    			Name:                pulumi.String("string"),
    			OpenstackCloudProvider: &rancher2.ClusterRkeConfigCloudProviderOpenstackCloudProviderArgs{
    				Global: &rancher2.ClusterRkeConfigCloudProviderOpenstackCloudProviderGlobalArgs{
    					AuthUrl:    pulumi.String("string"),
    					Password:   pulumi.String("string"),
    					Username:   pulumi.String("string"),
    					CaFile:     pulumi.String("string"),
    					DomainId:   pulumi.String("string"),
    					DomainName: pulumi.String("string"),
    					Region:     pulumi.String("string"),
    					TenantId:   pulumi.String("string"),
    					TenantName: pulumi.String("string"),
    					TrustId:    pulumi.String("string"),
    				},
    				BlockStorage: &rancher2.ClusterRkeConfigCloudProviderOpenstackCloudProviderBlockStorageArgs{
    					BsVersion:       pulumi.String("string"),
    					IgnoreVolumeAz:  pulumi.Bool(false),
    					TrustDevicePath: pulumi.Bool(false),
    				},
    				LoadBalancer: &rancher2.ClusterRkeConfigCloudProviderOpenstackCloudProviderLoadBalancerArgs{
    					CreateMonitor:        pulumi.Bool(false),
    					FloatingNetworkId:    pulumi.String("string"),
    					LbMethod:             pulumi.String("string"),
    					LbProvider:           pulumi.String("string"),
    					LbVersion:            pulumi.String("string"),
    					ManageSecurityGroups: pulumi.Bool(false),
    					MonitorDelay:         pulumi.String("string"),
    					MonitorMaxRetries:    pulumi.Int(0),
    					MonitorTimeout:       pulumi.String("string"),
    					SubnetId:             pulumi.String("string"),
    					UseOctavia:           pulumi.Bool(false),
    				},
    				Metadata: &rancher2.ClusterRkeConfigCloudProviderOpenstackCloudProviderMetadataArgs{
    					RequestTimeout: pulumi.Int(0),
    					SearchOrder:    pulumi.String("string"),
    				},
    				Route: &rancher2.ClusterRkeConfigCloudProviderOpenstackCloudProviderRouteArgs{
    					RouterId: pulumi.String("string"),
    				},
    			},
    			VsphereCloudProvider: &rancher2.ClusterRkeConfigCloudProviderVsphereCloudProviderArgs{
    				VirtualCenters: rancher2.ClusterRkeConfigCloudProviderVsphereCloudProviderVirtualCenterArray{
    					&rancher2.ClusterRkeConfigCloudProviderVsphereCloudProviderVirtualCenterArgs{
    						Datacenters:        pulumi.String("string"),
    						Name:               pulumi.String("string"),
    						Password:           pulumi.String("string"),
    						User:               pulumi.String("string"),
    						Port:               pulumi.String("string"),
    						SoapRoundtripCount: pulumi.Int(0),
    					},
    				},
    				Workspace: &rancher2.ClusterRkeConfigCloudProviderVsphereCloudProviderWorkspaceArgs{
    					Datacenter:       pulumi.String("string"),
    					Folder:           pulumi.String("string"),
    					Server:           pulumi.String("string"),
    					DefaultDatastore: pulumi.String("string"),
    					ResourcepoolPath: pulumi.String("string"),
    				},
    				Disk: &rancher2.ClusterRkeConfigCloudProviderVsphereCloudProviderDiskArgs{
    					ScsiControllerType: pulumi.String("string"),
    				},
    				Global: &rancher2.ClusterRkeConfigCloudProviderVsphereCloudProviderGlobalArgs{
    					Datacenters:             pulumi.String("string"),
    					GracefulShutdownTimeout: pulumi.String("string"),
    					InsecureFlag:            pulumi.Bool(false),
    					Password:                pulumi.String("string"),
    					Port:                    pulumi.String("string"),
    					SoapRoundtripCount:      pulumi.Int(0),
    					User:                    pulumi.String("string"),
    				},
    				Network: &rancher2.ClusterRkeConfigCloudProviderVsphereCloudProviderNetworkArgs{
    					PublicNetwork: pulumi.String("string"),
    				},
    			},
    		},
    		Dns: &rancher2.ClusterRkeConfigDnsArgs{
    			LinearAutoscalerParams: &rancher2.ClusterRkeConfigDnsLinearAutoscalerParamsArgs{
    				CoresPerReplica:           pulumi.Float64(0),
    				Max:                       pulumi.Int(0),
    				Min:                       pulumi.Int(0),
    				NodesPerReplica:           pulumi.Float64(0),
    				PreventSinglePointFailure: pulumi.Bool(false),
    			},
    			NodeSelector: pulumi.Map{
    				"string": pulumi.Any("any"),
    			},
    			Nodelocal: &rancher2.ClusterRkeConfigDnsNodelocalArgs{
    				IpAddress: pulumi.String("string"),
    				NodeSelector: pulumi.Map{
    					"string": pulumi.Any("any"),
    				},
    			},
    			Options: pulumi.Map{
    				"string": pulumi.Any("any"),
    			},
    			Provider: pulumi.String("string"),
    			ReverseCidrs: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    			Tolerations: rancher2.ClusterRkeConfigDnsTolerationArray{
    				&rancher2.ClusterRkeConfigDnsTolerationArgs{
    					Key:      pulumi.String("string"),
    					Effect:   pulumi.String("string"),
    					Operator: pulumi.String("string"),
    					Seconds:  pulumi.Int(0),
    					Value:    pulumi.String("string"),
    				},
    			},
    			UpdateStrategy: &rancher2.ClusterRkeConfigDnsUpdateStrategyArgs{
    				RollingUpdate: &rancher2.ClusterRkeConfigDnsUpdateStrategyRollingUpdateArgs{
    					MaxSurge:       pulumi.Int(0),
    					MaxUnavailable: pulumi.Int(0),
    				},
    				Strategy: pulumi.String("string"),
    			},
    			UpstreamNameservers: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    		},
    		EnableCriDockerd:    pulumi.Bool(false),
    		IgnoreDockerVersion: pulumi.Bool(false),
    		Ingress: &rancher2.ClusterRkeConfigIngressArgs{
    			DefaultBackend: pulumi.Bool(false),
    			DnsPolicy:      pulumi.String("string"),
    			ExtraArgs: pulumi.Map{
    				"string": pulumi.Any("any"),
    			},
    			HttpPort:    pulumi.Int(0),
    			HttpsPort:   pulumi.Int(0),
    			NetworkMode: pulumi.String("string"),
    			NodeSelector: pulumi.Map{
    				"string": pulumi.Any("any"),
    			},
    			Options: pulumi.Map{
    				"string": pulumi.Any("any"),
    			},
    			Provider: pulumi.String("string"),
    			Tolerations: rancher2.ClusterRkeConfigIngressTolerationArray{
    				&rancher2.ClusterRkeConfigIngressTolerationArgs{
    					Key:      pulumi.String("string"),
    					Effect:   pulumi.String("string"),
    					Operator: pulumi.String("string"),
    					Seconds:  pulumi.Int(0),
    					Value:    pulumi.String("string"),
    				},
    			},
    			UpdateStrategy: &rancher2.ClusterRkeConfigIngressUpdateStrategyArgs{
    				RollingUpdate: &rancher2.ClusterRkeConfigIngressUpdateStrategyRollingUpdateArgs{
    					MaxUnavailable: pulumi.Int(0),
    				},
    				Strategy: pulumi.String("string"),
    			},
    		},
    		KubernetesVersion: pulumi.String("string"),
    		Monitoring: &rancher2.ClusterRkeConfigMonitoringArgs{
    			NodeSelector: pulumi.Map{
    				"string": pulumi.Any("any"),
    			},
    			Options: pulumi.Map{
    				"string": pulumi.Any("any"),
    			},
    			Provider: pulumi.String("string"),
    			Replicas: pulumi.Int(0),
    			Tolerations: rancher2.ClusterRkeConfigMonitoringTolerationArray{
    				&rancher2.ClusterRkeConfigMonitoringTolerationArgs{
    					Key:      pulumi.String("string"),
    					Effect:   pulumi.String("string"),
    					Operator: pulumi.String("string"),
    					Seconds:  pulumi.Int(0),
    					Value:    pulumi.String("string"),
    				},
    			},
    			UpdateStrategy: &rancher2.ClusterRkeConfigMonitoringUpdateStrategyArgs{
    				RollingUpdate: &rancher2.ClusterRkeConfigMonitoringUpdateStrategyRollingUpdateArgs{
    					MaxSurge:       pulumi.Int(0),
    					MaxUnavailable: pulumi.Int(0),
    				},
    				Strategy: pulumi.String("string"),
    			},
    		},
    		Network: &rancher2.ClusterRkeConfigNetworkArgs{
    			AciNetworkProvider: &rancher2.ClusterRkeConfigNetworkAciNetworkProviderArgs{
    				KubeApiVlan: pulumi.String("string"),
    				ApicHosts: pulumi.StringArray{
    					pulumi.String("string"),
    				},
    				ApicUserCrt:     pulumi.String("string"),
    				ApicUserKey:     pulumi.String("string"),
    				ApicUserName:    pulumi.String("string"),
    				EncapType:       pulumi.String("string"),
    				ExternDynamic:   pulumi.String("string"),
    				VrfTenant:       pulumi.String("string"),
    				VrfName:         pulumi.String("string"),
    				Token:           pulumi.String("string"),
    				SystemId:        pulumi.String("string"),