rancher2 logo
Rancher 2 v3.9.0, Jan 18 23

rancher2.Cluster

Provides a Rancher v2 Cluster resource. This can be used to create Clusters for Rancher v2 environments and retrieve their information.

Example Usage

Creating Rancher v2 RKE cluster enabling and customizing monitoring

using System.Collections.Generic;
using Pulumi;
using Rancher2 = Pulumi.Rancher2;

return await Deployment.RunAsync(() => 
{
    // Create a new rancher2 RKE Cluster
    var foo_custom = new Rancher2.Cluster("foo-custom", new()
    {
        ClusterMonitoringInput = new Rancher2.Inputs.ClusterClusterMonitoringInputArgs
        {
            Answers = 
            {
                { "exporter-kubelets.https", true },
                { "exporter-node.enabled", true },
                { "exporter-node.ports.metrics.port", 9796 },
                { "exporter-node.resources.limits.cpu", "200m" },
                { "exporter-node.resources.limits.memory", "200Mi" },
                { "grafana.persistence.enabled", false },
                { "grafana.persistence.size", "10Gi" },
                { "grafana.persistence.storageClass", "default" },
                { "operator.resources.limits.memory", "500Mi" },
                { "prometheus.persistence.enabled", "false" },
                { "prometheus.persistence.size", "50Gi" },
                { "prometheus.persistence.storageClass", "default" },
                { "prometheus.persistent.useReleaseName", "true" },
                { "prometheus.resources.core.limits.cpu", "1000m" },
                { "prometheus.resources.core.limits.memory", "1500Mi" },
                { "prometheus.resources.core.requests.cpu", "750m" },
                { "prometheus.resources.core.requests.memory", "750Mi" },
                { "prometheus.retention", "12h" },
            },
            Version = "0.1.0",
        },
        Description = "Foo rancher2 custom cluster",
        EnableClusterMonitoring = true,
        RkeConfig = new Rancher2.Inputs.ClusterRkeConfigArgs
        {
            Network = new Rancher2.Inputs.ClusterRkeConfigNetworkArgs
            {
                Plugin = "canal",
            },
        },
    });

});
package main

import (
	"github.com/pulumi/pulumi-rancher2/sdk/v3/go/rancher2"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := rancher2.NewCluster(ctx, "foo-custom", &rancher2.ClusterArgs{
			ClusterMonitoringInput: &rancher2.ClusterClusterMonitoringInputArgs{
				Answers: pulumi.AnyMap{
					"exporter-kubelets.https":                   pulumi.Any(true),
					"exporter-node.enabled":                     pulumi.Any(true),
					"exporter-node.ports.metrics.port":          pulumi.Any(9796),
					"exporter-node.resources.limits.cpu":        pulumi.Any("200m"),
					"exporter-node.resources.limits.memory":     pulumi.Any("200Mi"),
					"grafana.persistence.enabled":               pulumi.Any(false),
					"grafana.persistence.size":                  pulumi.Any("10Gi"),
					"grafana.persistence.storageClass":          pulumi.Any("default"),
					"operator.resources.limits.memory":          pulumi.Any("500Mi"),
					"prometheus.persistence.enabled":            pulumi.Any("false"),
					"prometheus.persistence.size":               pulumi.Any("50Gi"),
					"prometheus.persistence.storageClass":       pulumi.Any("default"),
					"prometheus.persistent.useReleaseName":      pulumi.Any("true"),
					"prometheus.resources.core.limits.cpu":      pulumi.Any("1000m"),
					"prometheus.resources.core.limits.memory":   pulumi.Any("1500Mi"),
					"prometheus.resources.core.requests.cpu":    pulumi.Any("750m"),
					"prometheus.resources.core.requests.memory": pulumi.Any("750Mi"),
					"prometheus.retention":                      pulumi.Any("12h"),
				},
				Version: pulumi.String("0.1.0"),
			},
			Description:             pulumi.String("Foo rancher2 custom cluster"),
			EnableClusterMonitoring: pulumi.Bool(true),
			RkeConfig: &rancher2.ClusterRkeConfigArgs{
				Network: &rancher2.ClusterRkeConfigNetworkArgs{
					Plugin: pulumi.String("canal"),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.rancher2.Cluster;
import com.pulumi.rancher2.ClusterArgs;
import com.pulumi.rancher2.inputs.ClusterClusterMonitoringInputArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigNetworkArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var foo_custom = new Cluster("foo-custom", ClusterArgs.builder()        
            .clusterMonitoringInput(ClusterClusterMonitoringInputArgs.builder()
                .answers(Map.ofEntries(
                    Map.entry("exporter-kubelets.https", true),
                    Map.entry("exporter-node.enabled", true),
                    Map.entry("exporter-node.ports.metrics.port", 9796),
                    Map.entry("exporter-node.resources.limits.cpu", "200m"),
                    Map.entry("exporter-node.resources.limits.memory", "200Mi"),
                    Map.entry("grafana.persistence.enabled", false),
                    Map.entry("grafana.persistence.size", "10Gi"),
                    Map.entry("grafana.persistence.storageClass", "default"),
                    Map.entry("operator.resources.limits.memory", "500Mi"),
                    Map.entry("prometheus.persistence.enabled", "false"),
                    Map.entry("prometheus.persistence.size", "50Gi"),
                    Map.entry("prometheus.persistence.storageClass", "default"),
                    Map.entry("prometheus.persistent.useReleaseName", "true"),
                    Map.entry("prometheus.resources.core.limits.cpu", "1000m"),
                    Map.entry("prometheus.resources.core.limits.memory", "1500Mi"),
                    Map.entry("prometheus.resources.core.requests.cpu", "750m"),
                    Map.entry("prometheus.resources.core.requests.memory", "750Mi"),
                    Map.entry("prometheus.retention", "12h")
                ))
                .version("0.1.0")
                .build())
            .description("Foo rancher2 custom cluster")
            .enableClusterMonitoring(true)
            .rkeConfig(ClusterRkeConfigArgs.builder()
                .network(ClusterRkeConfigNetworkArgs.builder()
                    .plugin("canal")
                    .build())
                .build())
            .build());

    }
}
import pulumi
import pulumi_rancher2 as rancher2

# Create a new rancher2 RKE Cluster
foo_custom = rancher2.Cluster("foo-custom",
    cluster_monitoring_input=rancher2.ClusterClusterMonitoringInputArgs(
        answers={
            "exporter-kubelets.https": True,
            "exporter-node.enabled": True,
            "exporter-node.ports.metrics.port": 9796,
            "exporter-node.resources.limits.cpu": "200m",
            "exporter-node.resources.limits.memory": "200Mi",
            "grafana.persistence.enabled": False,
            "grafana.persistence.size": "10Gi",
            "grafana.persistence.storageClass": "default",
            "operator.resources.limits.memory": "500Mi",
            "prometheus.persistence.enabled": "false",
            "prometheus.persistence.size": "50Gi",
            "prometheus.persistence.storageClass": "default",
            "prometheus.persistent.useReleaseName": "true",
            "prometheus.resources.core.limits.cpu": "1000m",
            "prometheus.resources.core.limits.memory": "1500Mi",
            "prometheus.resources.core.requests.cpu": "750m",
            "prometheus.resources.core.requests.memory": "750Mi",
            "prometheus.retention": "12h",
        },
        version="0.1.0",
    ),
    description="Foo rancher2 custom cluster",
    enable_cluster_monitoring=True,
    rke_config=rancher2.ClusterRkeConfigArgs(
        network=rancher2.ClusterRkeConfigNetworkArgs(
            plugin="canal",
        ),
    ))
import * as pulumi from "@pulumi/pulumi";
import * as rancher2 from "@pulumi/rancher2";

// Create a new rancher2 RKE Cluster
const foo_custom = new rancher2.Cluster("foo-custom", {
    clusterMonitoringInput: {
        answers: {
            "exporter-kubelets.https": true,
            "exporter-node.enabled": true,
            "exporter-node.ports.metrics.port": 9796,
            "exporter-node.resources.limits.cpu": "200m",
            "exporter-node.resources.limits.memory": "200Mi",
            "grafana.persistence.enabled": false,
            "grafana.persistence.size": "10Gi",
            "grafana.persistence.storageClass": "default",
            "operator.resources.limits.memory": "500Mi",
            "prometheus.persistence.enabled": "false",
            "prometheus.persistence.size": "50Gi",
            "prometheus.persistence.storageClass": "default",
            "prometheus.persistent.useReleaseName": "true",
            "prometheus.resources.core.limits.cpu": "1000m",
            "prometheus.resources.core.limits.memory": "1500Mi",
            "prometheus.resources.core.requests.cpu": "750m",
            "prometheus.resources.core.requests.memory": "750Mi",
            "prometheus.retention": "12h",
        },
        version: "0.1.0",
    },
    description: "Foo rancher2 custom cluster",
    enableClusterMonitoring: true,
    rkeConfig: {
        network: {
            plugin: "canal",
        },
    },
});
resources:
  # Create a new rancher2 RKE Cluster
  foo-custom:
    type: rancher2:Cluster
    properties:
      clusterMonitoringInput:
        answers:
          exporter-kubelets.https: true
          exporter-node.enabled: true
          exporter-node.ports.metrics.port: 9796
          exporter-node.resources.limits.cpu: 200m
          exporter-node.resources.limits.memory: 200Mi
          grafana.persistence.enabled: false
          grafana.persistence.size: 10Gi
          grafana.persistence.storageClass: default
          operator.resources.limits.memory: 500Mi
          prometheus.persistence.enabled: 'false'
          prometheus.persistence.size: 50Gi
          prometheus.persistence.storageClass: default
          prometheus.persistent.useReleaseName: 'true'
          prometheus.resources.core.limits.cpu: 1000m
          prometheus.resources.core.limits.memory: 1500Mi
          prometheus.resources.core.requests.cpu: 750m
          prometheus.resources.core.requests.memory: 750Mi
          prometheus.retention: 12h
        version: 0.1.0
      description: Foo rancher2 custom cluster
      enableClusterMonitoring: true
      rkeConfig:
        network:
          plugin: canal

Creating Rancher v2 RKE cluster enabling/customizing monitoring and istio

using System.Collections.Generic;
using Pulumi;
using Rancher2 = Pulumi.Rancher2;

return await Deployment.RunAsync(() => 
{
    // Create a new rancher2 RKE Cluster
    var foo_customCluster = new Rancher2.Cluster("foo-customCluster", new()
    {
        Description = "Foo rancher2 custom cluster",
        RkeConfig = new Rancher2.Inputs.ClusterRkeConfigArgs
        {
            Network = new Rancher2.Inputs.ClusterRkeConfigNetworkArgs
            {
                Plugin = "canal",
            },
        },
        EnableClusterMonitoring = true,
        ClusterMonitoringInput = new Rancher2.Inputs.ClusterClusterMonitoringInputArgs
        {
            Answers = 
            {
                { "exporter-kubelets.https", true },
                { "exporter-node.enabled", true },
                { "exporter-node.ports.metrics.port", 9796 },
                { "exporter-node.resources.limits.cpu", "200m" },
                { "exporter-node.resources.limits.memory", "200Mi" },
                { "grafana.persistence.enabled", false },
                { "grafana.persistence.size", "10Gi" },
                { "grafana.persistence.storageClass", "default" },
                { "operator.resources.limits.memory", "500Mi" },
                { "prometheus.persistence.enabled", "false" },
                { "prometheus.persistence.size", "50Gi" },
                { "prometheus.persistence.storageClass", "default" },
                { "prometheus.persistent.useReleaseName", "true" },
                { "prometheus.resources.core.limits.cpu", "1000m" },
                { "prometheus.resources.core.limits.memory", "1500Mi" },
                { "prometheus.resources.core.requests.cpu", "750m" },
                { "prometheus.resources.core.requests.memory", "750Mi" },
                { "prometheus.retention", "12h" },
            },
            Version = "0.1.0",
        },
    });

    // Create a new rancher2 Cluster Sync for foo-custom cluster
    var foo_customClusterSync = new Rancher2.ClusterSync("foo-customClusterSync", new()
    {
        ClusterId = foo_customCluster.Id,
        WaitMonitoring = foo_customCluster.EnableClusterMonitoring,
    });

    // Create a new rancher2 Namespace
    var foo_istio = new Rancher2.Namespace("foo-istio", new()
    {
        ProjectId = foo_customClusterSync.SystemProjectId,
        Description = "istio namespace",
    });

    // Create a new rancher2 App deploying istio (should wait until monitoring is up and running)
    var istio = new Rancher2.App("istio", new()
    {
        CatalogName = "system-library",
        Description = "Terraform app acceptance test",
        ProjectId = foo_istio.ProjectId,
        TemplateName = "rancher-istio",
        TemplateVersion = "0.1.1",
        TargetNamespace = foo_istio.Id,
        Answers = 
        {
            { "certmanager.enabled", false },
            { "enableCRDs", true },
            { "galley.enabled", true },
            { "gateways.enabled", false },
            { "gateways.istio-ingressgateway.resources.limits.cpu", "2000m" },
            { "gateways.istio-ingressgateway.resources.limits.memory", "1024Mi" },
            { "gateways.istio-ingressgateway.resources.requests.cpu", "100m" },
            { "gateways.istio-ingressgateway.resources.requests.memory", "128Mi" },
            { "gateways.istio-ingressgateway.type", "NodePort" },
            { "global.monitoring.type", "cluster-monitoring" },
            { "global.rancher.clusterId", foo_customClusterSync.ClusterId },
            { "istio_cni.enabled", "false" },
            { "istiocoredns.enabled", "false" },
            { "kiali.enabled", "true" },
            { "mixer.enabled", "true" },
            { "mixer.policy.enabled", "true" },
            { "mixer.policy.resources.limits.cpu", "4800m" },
            { "mixer.policy.resources.limits.memory", "4096Mi" },
            { "mixer.policy.resources.requests.cpu", "1000m" },
            { "mixer.policy.resources.requests.memory", "1024Mi" },
            { "mixer.telemetry.resources.limits.cpu", "4800m" },
            { "mixer.telemetry.resources.limits.memory", "4096Mi" },
            { "mixer.telemetry.resources.requests.cpu", "1000m" },
            { "mixer.telemetry.resources.requests.memory", "1024Mi" },
            { "mtls.enabled", false },
            { "nodeagent.enabled", false },
            { "pilot.enabled", true },
            { "pilot.resources.limits.cpu", "1000m" },
            { "pilot.resources.limits.memory", "4096Mi" },
            { "pilot.resources.requests.cpu", "500m" },
            { "pilot.resources.requests.memory", "2048Mi" },
            { "pilot.traceSampling", "1" },
            { "security.enabled", true },
            { "sidecarInjectorWebhook.enabled", true },
            { "tracing.enabled", true },
            { "tracing.jaeger.resources.limits.cpu", "500m" },
            { "tracing.jaeger.resources.limits.memory", "1024Mi" },
            { "tracing.jaeger.resources.requests.cpu", "100m" },
            { "tracing.jaeger.resources.requests.memory", "100Mi" },
        },
    });

});
package main

import (
	"github.com/pulumi/pulumi-rancher2/sdk/v3/go/rancher2"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := rancher2.NewCluster(ctx, "foo-customCluster", &rancher2.ClusterArgs{
			Description: pulumi.String("Foo rancher2 custom cluster"),
			RkeConfig: &rancher2.ClusterRkeConfigArgs{
				Network: &rancher2.ClusterRkeConfigNetworkArgs{
					Plugin: pulumi.String("canal"),
				},
			},
			EnableClusterMonitoring: pulumi.Bool(true),
			ClusterMonitoringInput: &rancher2.ClusterClusterMonitoringInputArgs{
				Answers: pulumi.AnyMap{
					"exporter-kubelets.https":                   pulumi.Any(true),
					"exporter-node.enabled":                     pulumi.Any(true),
					"exporter-node.ports.metrics.port":          pulumi.Any(9796),
					"exporter-node.resources.limits.cpu":        pulumi.Any("200m"),
					"exporter-node.resources.limits.memory":     pulumi.Any("200Mi"),
					"grafana.persistence.enabled":               pulumi.Any(false),
					"grafana.persistence.size":                  pulumi.Any("10Gi"),
					"grafana.persistence.storageClass":          pulumi.Any("default"),
					"operator.resources.limits.memory":          pulumi.Any("500Mi"),
					"prometheus.persistence.enabled":            pulumi.Any("false"),
					"prometheus.persistence.size":               pulumi.Any("50Gi"),
					"prometheus.persistence.storageClass":       pulumi.Any("default"),
					"prometheus.persistent.useReleaseName":      pulumi.Any("true"),
					"prometheus.resources.core.limits.cpu":      pulumi.Any("1000m"),
					"prometheus.resources.core.limits.memory":   pulumi.Any("1500Mi"),
					"prometheus.resources.core.requests.cpu":    pulumi.Any("750m"),
					"prometheus.resources.core.requests.memory": pulumi.Any("750Mi"),
					"prometheus.retention":                      pulumi.Any("12h"),
				},
				Version: pulumi.String("0.1.0"),
			},
		})
		if err != nil {
			return err
		}
		_, err = rancher2.NewClusterSync(ctx, "foo-customClusterSync", &rancher2.ClusterSyncArgs{
			ClusterId:      foo_customCluster.ID(),
			WaitMonitoring: foo_customCluster.EnableClusterMonitoring,
		})
		if err != nil {
			return err
		}
		_, err = rancher2.NewNamespace(ctx, "foo-istio", &rancher2.NamespaceArgs{
			ProjectId:   foo_customClusterSync.SystemProjectId,
			Description: pulumi.String("istio namespace"),
		})
		if err != nil {
			return err
		}
		_, err = rancher2.NewApp(ctx, "istio", &rancher2.AppArgs{
			CatalogName:     pulumi.String("system-library"),
			Description:     pulumi.String("Terraform app acceptance test"),
			ProjectId:       foo_istio.ProjectId,
			TemplateName:    pulumi.String("rancher-istio"),
			TemplateVersion: pulumi.String("0.1.1"),
			TargetNamespace: foo_istio.ID(),
			Answers: pulumi.AnyMap{
				"certmanager.enabled": pulumi.Any(false),
				"enableCRDs":          pulumi.Any(true),
				"galley.enabled":      pulumi.Any(true),
				"gateways.enabled":    pulumi.Any(false),
				"gateways.istio-ingressgateway.resources.limits.cpu":      pulumi.Any("2000m"),
				"gateways.istio-ingressgateway.resources.limits.memory":   pulumi.Any("1024Mi"),
				"gateways.istio-ingressgateway.resources.requests.cpu":    pulumi.Any("100m"),
				"gateways.istio-ingressgateway.resources.requests.memory": pulumi.Any("128Mi"),
				"gateways.istio-ingressgateway.type":                      pulumi.Any("NodePort"),
				"global.monitoring.type":                                  pulumi.Any("cluster-monitoring"),
				"global.rancher.clusterId":                                foo_customClusterSync.ClusterId,
				"istio_cni.enabled":                                       pulumi.Any("false"),
				"istiocoredns.enabled":                                    pulumi.Any("false"),
				"kiali.enabled":                                           pulumi.Any("true"),
				"mixer.enabled":                                           pulumi.Any("true"),
				"mixer.policy.enabled":                                    pulumi.Any("true"),
				"mixer.policy.resources.limits.cpu":                       pulumi.Any("4800m"),
				"mixer.policy.resources.limits.memory":                    pulumi.Any("4096Mi"),
				"mixer.policy.resources.requests.cpu":                     pulumi.Any("1000m"),
				"mixer.policy.resources.requests.memory":                  pulumi.Any("1024Mi"),
				"mixer.telemetry.resources.limits.cpu":                    pulumi.Any("4800m"),
				"mixer.telemetry.resources.limits.memory":                 pulumi.Any("4096Mi"),
				"mixer.telemetry.resources.requests.cpu":                  pulumi.Any("1000m"),
				"mixer.telemetry.resources.requests.memory":               pulumi.Any("1024Mi"),
				"mtls.enabled":                                            pulumi.Any(false),
				"nodeagent.enabled":                                       pulumi.Any(false),
				"pilot.enabled":                                           pulumi.Any(true),
				"pilot.resources.limits.cpu":                              pulumi.Any("1000m"),
				"pilot.resources.limits.memory":                           pulumi.Any("4096Mi"),
				"pilot.resources.requests.cpu":                            pulumi.Any("500m"),
				"pilot.resources.requests.memory":                         pulumi.Any("2048Mi"),
				"pilot.traceSampling":                                     pulumi.Any("1"),
				"security.enabled":                                        pulumi.Any(true),
				"sidecarInjectorWebhook.enabled":                          pulumi.Any(true),
				"tracing.enabled":                                         pulumi.Any(true),
				"tracing.jaeger.resources.limits.cpu":                     pulumi.Any("500m"),
				"tracing.jaeger.resources.limits.memory":                  pulumi.Any("1024Mi"),
				"tracing.jaeger.resources.requests.cpu":                   pulumi.Any("100m"),
				"tracing.jaeger.resources.requests.memory":                pulumi.Any("100Mi"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.rancher2.Cluster;
import com.pulumi.rancher2.ClusterArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigNetworkArgs;
import com.pulumi.rancher2.inputs.ClusterClusterMonitoringInputArgs;
import com.pulumi.rancher2.ClusterSync;
import com.pulumi.rancher2.ClusterSyncArgs;
import com.pulumi.rancher2.Namespace;
import com.pulumi.rancher2.NamespaceArgs;
import com.pulumi.rancher2.App;
import com.pulumi.rancher2.AppArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var foo_customCluster = new Cluster("foo-customCluster", ClusterArgs.builder()        
            .description("Foo rancher2 custom cluster")
            .rkeConfig(ClusterRkeConfigArgs.builder()
                .network(ClusterRkeConfigNetworkArgs.builder()
                    .plugin("canal")
                    .build())
                .build())
            .enableClusterMonitoring(true)
            .clusterMonitoringInput(ClusterClusterMonitoringInputArgs.builder()
                .answers(Map.ofEntries(
                    Map.entry("exporter-kubelets.https", true),
                    Map.entry("exporter-node.enabled", true),
                    Map.entry("exporter-node.ports.metrics.port", 9796),
                    Map.entry("exporter-node.resources.limits.cpu", "200m"),
                    Map.entry("exporter-node.resources.limits.memory", "200Mi"),
                    Map.entry("grafana.persistence.enabled", false),
                    Map.entry("grafana.persistence.size", "10Gi"),
                    Map.entry("grafana.persistence.storageClass", "default"),
                    Map.entry("operator.resources.limits.memory", "500Mi"),
                    Map.entry("prometheus.persistence.enabled", "false"),
                    Map.entry("prometheus.persistence.size", "50Gi"),
                    Map.entry("prometheus.persistence.storageClass", "default"),
                    Map.entry("prometheus.persistent.useReleaseName", "true"),
                    Map.entry("prometheus.resources.core.limits.cpu", "1000m"),
                    Map.entry("prometheus.resources.core.limits.memory", "1500Mi"),
                    Map.entry("prometheus.resources.core.requests.cpu", "750m"),
                    Map.entry("prometheus.resources.core.requests.memory", "750Mi"),
                    Map.entry("prometheus.retention", "12h")
                ))
                .version("0.1.0")
                .build())
            .build());

        var foo_customClusterSync = new ClusterSync("foo-customClusterSync", ClusterSyncArgs.builder()        
            .clusterId(foo_customCluster.id())
            .waitMonitoring(foo_customCluster.enableClusterMonitoring())
            .build());

        var foo_istio = new Namespace("foo-istio", NamespaceArgs.builder()        
            .projectId(foo_customClusterSync.systemProjectId())
            .description("istio namespace")
            .build());

        var istio = new App("istio", AppArgs.builder()        
            .catalogName("system-library")
            .description("Terraform app acceptance test")
            .projectId(foo_istio.projectId())
            .templateName("rancher-istio")
            .templateVersion("0.1.1")
            .targetNamespace(foo_istio.id())
            .answers(Map.ofEntries(
                Map.entry("certmanager.enabled", false),
                Map.entry("enableCRDs", true),
                Map.entry("galley.enabled", true),
                Map.entry("gateways.enabled", false),
                Map.entry("gateways.istio-ingressgateway.resources.limits.cpu", "2000m"),
                Map.entry("gateways.istio-ingressgateway.resources.limits.memory", "1024Mi"),
                Map.entry("gateways.istio-ingressgateway.resources.requests.cpu", "100m"),
                Map.entry("gateways.istio-ingressgateway.resources.requests.memory", "128Mi"),
                Map.entry("gateways.istio-ingressgateway.type", "NodePort"),
                Map.entry("global.monitoring.type", "cluster-monitoring"),
                Map.entry("global.rancher.clusterId", foo_customClusterSync.clusterId()),
                Map.entry("istio_cni.enabled", "false"),
                Map.entry("istiocoredns.enabled", "false"),
                Map.entry("kiali.enabled", "true"),
                Map.entry("mixer.enabled", "true"),
                Map.entry("mixer.policy.enabled", "true"),
                Map.entry("mixer.policy.resources.limits.cpu", "4800m"),
                Map.entry("mixer.policy.resources.limits.memory", "4096Mi"),
                Map.entry("mixer.policy.resources.requests.cpu", "1000m"),
                Map.entry("mixer.policy.resources.requests.memory", "1024Mi"),
                Map.entry("mixer.telemetry.resources.limits.cpu", "4800m"),
                Map.entry("mixer.telemetry.resources.limits.memory", "4096Mi"),
                Map.entry("mixer.telemetry.resources.requests.cpu", "1000m"),
                Map.entry("mixer.telemetry.resources.requests.memory", "1024Mi"),
                Map.entry("mtls.enabled", false),
                Map.entry("nodeagent.enabled", false),
                Map.entry("pilot.enabled", true),
                Map.entry("pilot.resources.limits.cpu", "1000m"),
                Map.entry("pilot.resources.limits.memory", "4096Mi"),
                Map.entry("pilot.resources.requests.cpu", "500m"),
                Map.entry("pilot.resources.requests.memory", "2048Mi"),
                Map.entry("pilot.traceSampling", "1"),
                Map.entry("security.enabled", true),
                Map.entry("sidecarInjectorWebhook.enabled", true),
                Map.entry("tracing.enabled", true),
                Map.entry("tracing.jaeger.resources.limits.cpu", "500m"),
                Map.entry("tracing.jaeger.resources.limits.memory", "1024Mi"),
                Map.entry("tracing.jaeger.resources.requests.cpu", "100m"),
                Map.entry("tracing.jaeger.resources.requests.memory", "100Mi")
            ))
            .build());

    }
}
import pulumi
import pulumi_rancher2 as rancher2

# Create a new rancher2 RKE Cluster
foo_custom_cluster = rancher2.Cluster("foo-customCluster",
    description="Foo rancher2 custom cluster",
    rke_config=rancher2.ClusterRkeConfigArgs(
        network=rancher2.ClusterRkeConfigNetworkArgs(
            plugin="canal",
        ),
    ),
    enable_cluster_monitoring=True,
    cluster_monitoring_input=rancher2.ClusterClusterMonitoringInputArgs(
        answers={
            "exporter-kubelets.https": True,
            "exporter-node.enabled": True,
            "exporter-node.ports.metrics.port": 9796,
            "exporter-node.resources.limits.cpu": "200m",
            "exporter-node.resources.limits.memory": "200Mi",
            "grafana.persistence.enabled": False,
            "grafana.persistence.size": "10Gi",
            "grafana.persistence.storageClass": "default",
            "operator.resources.limits.memory": "500Mi",
            "prometheus.persistence.enabled": "false",
            "prometheus.persistence.size": "50Gi",
            "prometheus.persistence.storageClass": "default",
            "prometheus.persistent.useReleaseName": "true",
            "prometheus.resources.core.limits.cpu": "1000m",
            "prometheus.resources.core.limits.memory": "1500Mi",
            "prometheus.resources.core.requests.cpu": "750m",
            "prometheus.resources.core.requests.memory": "750Mi",
            "prometheus.retention": "12h",
        },
        version="0.1.0",
    ))
# Create a new rancher2 Cluster Sync for foo-custom cluster
foo_custom_cluster_sync = rancher2.ClusterSync("foo-customClusterSync",
    cluster_id=foo_custom_cluster.id,
    wait_monitoring=foo_custom_cluster.enable_cluster_monitoring)
# Create a new rancher2 Namespace
foo_istio = rancher2.Namespace("foo-istio",
    project_id=foo_custom_cluster_sync.system_project_id,
    description="istio namespace")
# Create a new rancher2 App deploying istio (should wait until monitoring is up and running)
istio = rancher2.App("istio",
    catalog_name="system-library",
    description="Terraform app acceptance test",
    project_id=foo_istio.project_id,
    template_name="rancher-istio",
    template_version="0.1.1",
    target_namespace=foo_istio.id,
    answers={
        "certmanager.enabled": False,
        "enableCRDs": True,
        "galley.enabled": True,
        "gateways.enabled": False,
        "gateways.istio-ingressgateway.resources.limits.cpu": "2000m",
        "gateways.istio-ingressgateway.resources.limits.memory": "1024Mi",
        "gateways.istio-ingressgateway.resources.requests.cpu": "100m",
        "gateways.istio-ingressgateway.resources.requests.memory": "128Mi",
        "gateways.istio-ingressgateway.type": "NodePort",
        "global.monitoring.type": "cluster-monitoring",
        "global.rancher.clusterId": foo_custom_cluster_sync.cluster_id,
        "istio_cni.enabled": "false",
        "istiocoredns.enabled": "false",
        "kiali.enabled": "true",
        "mixer.enabled": "true",
        "mixer.policy.enabled": "true",
        "mixer.policy.resources.limits.cpu": "4800m",
        "mixer.policy.resources.limits.memory": "4096Mi",
        "mixer.policy.resources.requests.cpu": "1000m",
        "mixer.policy.resources.requests.memory": "1024Mi",
        "mixer.telemetry.resources.limits.cpu": "4800m",
        "mixer.telemetry.resources.limits.memory": "4096Mi",
        "mixer.telemetry.resources.requests.cpu": "1000m",
        "mixer.telemetry.resources.requests.memory": "1024Mi",
        "mtls.enabled": False,
        "nodeagent.enabled": False,
        "pilot.enabled": True,
        "pilot.resources.limits.cpu": "1000m",
        "pilot.resources.limits.memory": "4096Mi",
        "pilot.resources.requests.cpu": "500m",
        "pilot.resources.requests.memory": "2048Mi",
        "pilot.traceSampling": "1",
        "security.enabled": True,
        "sidecarInjectorWebhook.enabled": True,
        "tracing.enabled": True,
        "tracing.jaeger.resources.limits.cpu": "500m",
        "tracing.jaeger.resources.limits.memory": "1024Mi",
        "tracing.jaeger.resources.requests.cpu": "100m",
        "tracing.jaeger.resources.requests.memory": "100Mi",
    })
import * as pulumi from "@pulumi/pulumi";
import * as rancher2 from "@pulumi/rancher2";

// Create a new rancher2 RKE Cluster
const foo_customCluster = new rancher2.Cluster("foo-customCluster", {
    description: "Foo rancher2 custom cluster",
    rkeConfig: {
        network: {
            plugin: "canal",
        },
    },
    enableClusterMonitoring: true,
    clusterMonitoringInput: {
        answers: {
            "exporter-kubelets.https": true,
            "exporter-node.enabled": true,
            "exporter-node.ports.metrics.port": 9796,
            "exporter-node.resources.limits.cpu": "200m",
            "exporter-node.resources.limits.memory": "200Mi",
            "grafana.persistence.enabled": false,
            "grafana.persistence.size": "10Gi",
            "grafana.persistence.storageClass": "default",
            "operator.resources.limits.memory": "500Mi",
            "prometheus.persistence.enabled": "false",
            "prometheus.persistence.size": "50Gi",
            "prometheus.persistence.storageClass": "default",
            "prometheus.persistent.useReleaseName": "true",
            "prometheus.resources.core.limits.cpu": "1000m",
            "prometheus.resources.core.limits.memory": "1500Mi",
            "prometheus.resources.core.requests.cpu": "750m",
            "prometheus.resources.core.requests.memory": "750Mi",
            "prometheus.retention": "12h",
        },
        version: "0.1.0",
    },
});
// Create a new rancher2 Cluster Sync for foo-custom cluster
const foo_customClusterSync = new rancher2.ClusterSync("foo-customClusterSync", {
    clusterId: foo_customCluster.id,
    waitMonitoring: foo_customCluster.enableClusterMonitoring,
});
// Create a new rancher2 Namespace
const foo_istio = new rancher2.Namespace("foo-istio", {
    projectId: foo_customClusterSync.systemProjectId,
    description: "istio namespace",
});
// Create a new rancher2 App deploying istio (should wait until monitoring is up and running)
const istio = new rancher2.App("istio", {
    catalogName: "system-library",
    description: "Terraform app acceptance test",
    projectId: foo_istio.projectId,
    templateName: "rancher-istio",
    templateVersion: "0.1.1",
    targetNamespace: foo_istio.id,
    answers: {
        "certmanager.enabled": false,
        enableCRDs: true,
        "galley.enabled": true,
        "gateways.enabled": false,
        "gateways.istio-ingressgateway.resources.limits.cpu": "2000m",
        "gateways.istio-ingressgateway.resources.limits.memory": "1024Mi",
        "gateways.istio-ingressgateway.resources.requests.cpu": "100m",
        "gateways.istio-ingressgateway.resources.requests.memory": "128Mi",
        "gateways.istio-ingressgateway.type": "NodePort",
        "global.monitoring.type": "cluster-monitoring",
        "global.rancher.clusterId": foo_customClusterSync.clusterId,
        "istio_cni.enabled": "false",
        "istiocoredns.enabled": "false",
        "kiali.enabled": "true",
        "mixer.enabled": "true",
        "mixer.policy.enabled": "true",
        "mixer.policy.resources.limits.cpu": "4800m",
        "mixer.policy.resources.limits.memory": "4096Mi",
        "mixer.policy.resources.requests.cpu": "1000m",
        "mixer.policy.resources.requests.memory": "1024Mi",
        "mixer.telemetry.resources.limits.cpu": "4800m",
        "mixer.telemetry.resources.limits.memory": "4096Mi",
        "mixer.telemetry.resources.requests.cpu": "1000m",
        "mixer.telemetry.resources.requests.memory": "1024Mi",
        "mtls.enabled": false,
        "nodeagent.enabled": false,
        "pilot.enabled": true,
        "pilot.resources.limits.cpu": "1000m",
        "pilot.resources.limits.memory": "4096Mi",
        "pilot.resources.requests.cpu": "500m",
        "pilot.resources.requests.memory": "2048Mi",
        "pilot.traceSampling": "1",
        "security.enabled": true,
        "sidecarInjectorWebhook.enabled": true,
        "tracing.enabled": true,
        "tracing.jaeger.resources.limits.cpu": "500m",
        "tracing.jaeger.resources.limits.memory": "1024Mi",
        "tracing.jaeger.resources.requests.cpu": "100m",
        "tracing.jaeger.resources.requests.memory": "100Mi",
    },
});
resources:
  # Create a new rancher2 RKE Cluster
  foo-customCluster:
    type: rancher2:Cluster
    properties:
      description: Foo rancher2 custom cluster
      rkeConfig:
        network:
          plugin: canal
      enableClusterMonitoring: true
      clusterMonitoringInput:
        answers:
          exporter-kubelets.https: true
          exporter-node.enabled: true
          exporter-node.ports.metrics.port: 9796
          exporter-node.resources.limits.cpu: 200m
          exporter-node.resources.limits.memory: 200Mi
          grafana.persistence.enabled: false
          grafana.persistence.size: 10Gi
          grafana.persistence.storageClass: default
          operator.resources.limits.memory: 500Mi
          prometheus.persistence.enabled: 'false'
          prometheus.persistence.size: 50Gi
          prometheus.persistence.storageClass: default
          prometheus.persistent.useReleaseName: 'true'
          prometheus.resources.core.limits.cpu: 1000m
          prometheus.resources.core.limits.memory: 1500Mi
          prometheus.resources.core.requests.cpu: 750m
          prometheus.resources.core.requests.memory: 750Mi
          prometheus.retention: 12h
        version: 0.1.0
  # Create a new rancher2 Cluster Sync for foo-custom cluster
  foo-customClusterSync:
    type: rancher2:ClusterSync
    properties:
      clusterId: ${["foo-customCluster"].id}
      waitMonitoring: ${["foo-customCluster"].enableClusterMonitoring}
  # Create a new rancher2 Namespace
  foo-istio:
    type: rancher2:Namespace
    properties:
      projectId: ${["foo-customClusterSync"].systemProjectId}
      description: istio namespace
  # Create a new rancher2 App deploying istio (should wait until monitoring is up and running)
  istio:
    type: rancher2:App
    properties:
      catalogName: system-library
      description: Terraform app acceptance test
      projectId: ${["foo-istio"].projectId}
      templateName: rancher-istio
      templateVersion: 0.1.1
      targetNamespace: ${["foo-istio"].id}
      answers:
        certmanager.enabled: false
        enableCRDs: true
        galley.enabled: true
        gateways.enabled: false
        gateways.istio-ingressgateway.resources.limits.cpu: 2000m
        gateways.istio-ingressgateway.resources.limits.memory: 1024Mi
        gateways.istio-ingressgateway.resources.requests.cpu: 100m
        gateways.istio-ingressgateway.resources.requests.memory: 128Mi
        gateways.istio-ingressgateway.type: NodePort
        global.monitoring.type: cluster-monitoring
        global.rancher.clusterId: ${["foo-customClusterSync"].clusterId}
        istio_cni.enabled: 'false'
        istiocoredns.enabled: 'false'
        kiali.enabled: 'true'
        mixer.enabled: 'true'
        mixer.policy.enabled: 'true'
        mixer.policy.resources.limits.cpu: 4800m
        mixer.policy.resources.limits.memory: 4096Mi
        mixer.policy.resources.requests.cpu: 1000m
        mixer.policy.resources.requests.memory: 1024Mi
        mixer.telemetry.resources.limits.cpu: 4800m
        mixer.telemetry.resources.limits.memory: 4096Mi
        mixer.telemetry.resources.requests.cpu: 1000m
        mixer.telemetry.resources.requests.memory: 1024Mi
        mtls.enabled: false
        nodeagent.enabled: false
        pilot.enabled: true
        pilot.resources.limits.cpu: 1000m
        pilot.resources.limits.memory: 4096Mi
        pilot.resources.requests.cpu: 500m
        pilot.resources.requests.memory: 2048Mi
        pilot.traceSampling: '1'
        security.enabled: true
        sidecarInjectorWebhook.enabled: true
        tracing.enabled: true
        tracing.jaeger.resources.limits.cpu: 500m
        tracing.jaeger.resources.limits.memory: 1024Mi
        tracing.jaeger.resources.requests.cpu: 100m
        tracing.jaeger.resources.requests.memory: 100Mi

Creating Rancher v2 RKE cluster assigning a node pool (overlapped planes)

using System.Collections.Generic;
using Pulumi;
using Rancher2 = Pulumi.Rancher2;

return await Deployment.RunAsync(() => 
{
    // Create a new rancher2 RKE Cluster
    var foo_custom = new Rancher2.Cluster("foo-custom", new()
    {
        Description = "Foo rancher2 custom cluster",
        RkeConfig = new Rancher2.Inputs.ClusterRkeConfigArgs
        {
            Network = new Rancher2.Inputs.ClusterRkeConfigNetworkArgs
            {
                Plugin = "canal",
            },
        },
    });

    // Create a new rancher2 Node Template
    var fooNodeTemplate = new Rancher2.NodeTemplate("fooNodeTemplate", new()
    {
        Description = "foo test",
        Amazonec2Config = new Rancher2.Inputs.NodeTemplateAmazonec2ConfigArgs
        {
            AccessKey = "<AWS_ACCESS_KEY>",
            SecretKey = "<AWS_SECRET_KEY>",
            Ami = "<AMI_ID>",
            Region = "<REGION>",
            SecurityGroups = new[]
            {
                "<AWS_SECURITY_GROUP>",
            },
            SubnetId = "<SUBNET_ID>",
            VpcId = "<VPC_ID>",
            Zone = "<ZONE>",
        },
    });

    // Create a new rancher2 Node Pool
    var fooNodePool = new Rancher2.NodePool("fooNodePool", new()
    {
        ClusterId = foo_custom.Id,
        HostnamePrefix = "foo-cluster-0",
        NodeTemplateId = fooNodeTemplate.Id,
        Quantity = 3,
        ControlPlane = true,
        Etcd = true,
        Worker = true,
    });

});
package main

import (
	"github.com/pulumi/pulumi-rancher2/sdk/v3/go/rancher2"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := rancher2.NewCluster(ctx, "foo-custom", &rancher2.ClusterArgs{
			Description: pulumi.String("Foo rancher2 custom cluster"),
			RkeConfig: &rancher2.ClusterRkeConfigArgs{
				Network: &rancher2.ClusterRkeConfigNetworkArgs{
					Plugin: pulumi.String("canal"),
				},
			},
		})
		if err != nil {
			return err
		}
		fooNodeTemplate, err := rancher2.NewNodeTemplate(ctx, "fooNodeTemplate", &rancher2.NodeTemplateArgs{
			Description: pulumi.String("foo test"),
			Amazonec2Config: &rancher2.NodeTemplateAmazonec2ConfigArgs{
				AccessKey: pulumi.String("<AWS_ACCESS_KEY>"),
				SecretKey: pulumi.String("<AWS_SECRET_KEY>"),
				Ami:       pulumi.String("<AMI_ID>"),
				Region:    pulumi.String("<REGION>"),
				SecurityGroups: pulumi.StringArray{
					pulumi.String("<AWS_SECURITY_GROUP>"),
				},
				SubnetId: pulumi.String("<SUBNET_ID>"),
				VpcId:    pulumi.String("<VPC_ID>"),
				Zone:     pulumi.String("<ZONE>"),
			},
		})
		if err != nil {
			return err
		}
		_, err = rancher2.NewNodePool(ctx, "fooNodePool", &rancher2.NodePoolArgs{
			ClusterId:      foo_custom.ID(),
			HostnamePrefix: pulumi.String("foo-cluster-0"),
			NodeTemplateId: fooNodeTemplate.ID(),
			Quantity:       pulumi.Int(3),
			ControlPlane:   pulumi.Bool(true),
			Etcd:           pulumi.Bool(true),
			Worker:         pulumi.Bool(true),
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.rancher2.Cluster;
import com.pulumi.rancher2.ClusterArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigNetworkArgs;
import com.pulumi.rancher2.NodeTemplate;
import com.pulumi.rancher2.NodeTemplateArgs;
import com.pulumi.rancher2.inputs.NodeTemplateAmazonec2ConfigArgs;
import com.pulumi.rancher2.NodePool;
import com.pulumi.rancher2.NodePoolArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var foo_custom = new Cluster("foo-custom", ClusterArgs.builder()        
            .description("Foo rancher2 custom cluster")
            .rkeConfig(ClusterRkeConfigArgs.builder()
                .network(ClusterRkeConfigNetworkArgs.builder()
                    .plugin("canal")
                    .build())
                .build())
            .build());

        var fooNodeTemplate = new NodeTemplate("fooNodeTemplate", NodeTemplateArgs.builder()        
            .description("foo test")
            .amazonec2Config(NodeTemplateAmazonec2ConfigArgs.builder()
                .accessKey("<AWS_ACCESS_KEY>")
                .secretKey("<AWS_SECRET_KEY>")
                .ami("<AMI_ID>")
                .region("<REGION>")
                .securityGroups("<AWS_SECURITY_GROUP>")
                .subnetId("<SUBNET_ID>")
                .vpcId("<VPC_ID>")
                .zone("<ZONE>")
                .build())
            .build());

        var fooNodePool = new NodePool("fooNodePool", NodePoolArgs.builder()        
            .clusterId(foo_custom.id())
            .hostnamePrefix("foo-cluster-0")
            .nodeTemplateId(fooNodeTemplate.id())
            .quantity(3)
            .controlPlane(true)
            .etcd(true)
            .worker(true)
            .build());

    }
}
import pulumi
import pulumi_rancher2 as rancher2

# Create a new rancher2 RKE Cluster
foo_custom = rancher2.Cluster("foo-custom",
    description="Foo rancher2 custom cluster",
    rke_config=rancher2.ClusterRkeConfigArgs(
        network=rancher2.ClusterRkeConfigNetworkArgs(
            plugin="canal",
        ),
    ))
# Create a new rancher2 Node Template
foo_node_template = rancher2.NodeTemplate("fooNodeTemplate",
    description="foo test",
    amazonec2_config=rancher2.NodeTemplateAmazonec2ConfigArgs(
        access_key="<AWS_ACCESS_KEY>",
        secret_key="<AWS_SECRET_KEY>",
        ami="<AMI_ID>",
        region="<REGION>",
        security_groups=["<AWS_SECURITY_GROUP>"],
        subnet_id="<SUBNET_ID>",
        vpc_id="<VPC_ID>",
        zone="<ZONE>",
    ))
# Create a new rancher2 Node Pool
foo_node_pool = rancher2.NodePool("fooNodePool",
    cluster_id=foo_custom.id,
    hostname_prefix="foo-cluster-0",
    node_template_id=foo_node_template.id,
    quantity=3,
    control_plane=True,
    etcd=True,
    worker=True)
import * as pulumi from "@pulumi/pulumi";
import * as rancher2 from "@pulumi/rancher2";

// Create a new rancher2 RKE Cluster
const foo_custom = new rancher2.Cluster("foo-custom", {
    description: "Foo rancher2 custom cluster",
    rkeConfig: {
        network: {
            plugin: "canal",
        },
    },
});
// Create a new rancher2 Node Template
const fooNodeTemplate = new rancher2.NodeTemplate("fooNodeTemplate", {
    description: "foo test",
    amazonec2Config: {
        accessKey: "<AWS_ACCESS_KEY>",
        secretKey: "<AWS_SECRET_KEY>",
        ami: "<AMI_ID>",
        region: "<REGION>",
        securityGroups: ["<AWS_SECURITY_GROUP>"],
        subnetId: "<SUBNET_ID>",
        vpcId: "<VPC_ID>",
        zone: "<ZONE>",
    },
});
// Create a new rancher2 Node Pool
const fooNodePool = new rancher2.NodePool("fooNodePool", {
    clusterId: foo_custom.id,
    hostnamePrefix: "foo-cluster-0",
    nodeTemplateId: fooNodeTemplate.id,
    quantity: 3,
    controlPlane: true,
    etcd: true,
    worker: true,
});
resources:
  # Create a new rancher2 RKE Cluster
  foo-custom:
    type: rancher2:Cluster
    properties:
      description: Foo rancher2 custom cluster
      rkeConfig:
        network:
          plugin: canal
  # Create a new rancher2 Node Template
  fooNodeTemplate:
    type: rancher2:NodeTemplate
    properties:
      description: foo test
      amazonec2Config:
        accessKey: <AWS_ACCESS_KEY>
        secretKey: <AWS_SECRET_KEY>
        ami: <AMI_ID>
        region: <REGION>
        securityGroups:
          - <AWS_SECURITY_GROUP>
        subnetId: <SUBNET_ID>
        vpcId: <VPC_ID>
        zone: <ZONE>
  # Create a new rancher2 Node Pool
  fooNodePool:
    type: rancher2:NodePool
    properties:
      clusterId: ${["foo-custom"].id}
      hostnamePrefix: foo-cluster-0
      nodeTemplateId: ${fooNodeTemplate.id}
      quantity: 3
      controlPlane: true
      etcd: true
      worker: true

Creating Rancher v2 RKE cluster from template. For Rancher v2.3.x or above.

using System.Collections.Generic;
using Pulumi;
using Rancher2 = Pulumi.Rancher2;

return await Deployment.RunAsync(() => 
{
    // Create a new rancher2 cluster template
    var fooClusterTemplate = new Rancher2.ClusterTemplate("fooClusterTemplate", new()
    {
        Members = new[]
        {
            new Rancher2.Inputs.ClusterTemplateMemberArgs
            {
                AccessType = "owner",
                UserPrincipalId = "local://user-XXXXX",
            },
        },
        TemplateRevisions = new[]
        {
            new Rancher2.Inputs.ClusterTemplateTemplateRevisionArgs
            {
                Name = "V1",
                ClusterConfig = new Rancher2.Inputs.ClusterTemplateTemplateRevisionClusterConfigArgs
                {
                    RkeConfig = new Rancher2.Inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigArgs
                    {
                        Network = new Rancher2.Inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigNetworkArgs
                        {
                            Plugin = "canal",
                        },
                        Services = new Rancher2.Inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesArgs
                        {
                            Etcd = new Rancher2.Inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesEtcdArgs
                            {
                                Creation = "6h",
                                Retention = "24h",
                            },
                        },
                    },
                },
                Default = true,
            },
        },
        Description = "Test cluster template v2",
    });

    // Create a new rancher2 RKE Cluster from template
    var fooCluster = new Rancher2.Cluster("fooCluster", new()
    {
        ClusterTemplateId = fooClusterTemplate.Id,
        ClusterTemplateRevisionId = fooClusterTemplate.TemplateRevisions.Apply(templateRevisions => templateRevisions[0].Id),
    });

});
package main

import (
	"github.com/pulumi/pulumi-rancher2/sdk/v3/go/rancher2"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		fooClusterTemplate, err := rancher2.NewClusterTemplate(ctx, "fooClusterTemplate", &rancher2.ClusterTemplateArgs{
			Members: rancher2.ClusterTemplateMemberArray{
				&rancher2.ClusterTemplateMemberArgs{
					AccessType:      pulumi.String("owner"),
					UserPrincipalId: pulumi.String("local://user-XXXXX"),
				},
			},
			TemplateRevisions: rancher2.ClusterTemplateTemplateRevisionArray{
				&rancher2.ClusterTemplateTemplateRevisionArgs{
					Name: pulumi.String("V1"),
					ClusterConfig: &rancher2.ClusterTemplateTemplateRevisionClusterConfigArgs{
						RkeConfig: &rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigArgs{
							Network: &rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigNetworkArgs{
								Plugin: pulumi.String("canal"),
							},
							Services: &rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesArgs{
								Etcd: &rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesEtcdArgs{
									Creation:  pulumi.String("6h"),
									Retention: pulumi.String("24h"),
								},
							},
						},
					},
					Default: pulumi.Bool(true),
				},
			},
			Description: pulumi.String("Test cluster template v2"),
		})
		if err != nil {
			return err
		}
		_, err = rancher2.NewCluster(ctx, "fooCluster", &rancher2.ClusterArgs{
			ClusterTemplateId: fooClusterTemplate.ID(),
			ClusterTemplateRevisionId: fooClusterTemplate.TemplateRevisions.ApplyT(func(templateRevisions []rancher2.ClusterTemplateTemplateRevision) (*string, error) {
				return &templateRevisions[0].Id, nil
			}).(pulumi.StringPtrOutput),
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.rancher2.ClusterTemplate;
import com.pulumi.rancher2.ClusterTemplateArgs;
import com.pulumi.rancher2.inputs.ClusterTemplateMemberArgs;
import com.pulumi.rancher2.inputs.ClusterTemplateTemplateRevisionArgs;
import com.pulumi.rancher2.inputs.ClusterTemplateTemplateRevisionClusterConfigArgs;
import com.pulumi.rancher2.inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigArgs;
import com.pulumi.rancher2.inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigNetworkArgs;
import com.pulumi.rancher2.inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesArgs;
import com.pulumi.rancher2.inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesEtcdArgs;
import com.pulumi.rancher2.Cluster;
import com.pulumi.rancher2.ClusterArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var fooClusterTemplate = new ClusterTemplate("fooClusterTemplate", ClusterTemplateArgs.builder()        
            .members(ClusterTemplateMemberArgs.builder()
                .accessType("owner")
                .userPrincipalId("local://user-XXXXX")
                .build())
            .templateRevisions(ClusterTemplateTemplateRevisionArgs.builder()
                .name("V1")
                .clusterConfig(ClusterTemplateTemplateRevisionClusterConfigArgs.builder()
                    .rkeConfig(ClusterTemplateTemplateRevisionClusterConfigRkeConfigArgs.builder()
                        .network(ClusterTemplateTemplateRevisionClusterConfigRkeConfigNetworkArgs.builder()
                            .plugin("canal")
                            .build())
                        .services(ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesArgs.builder()
                            .etcd(ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesEtcdArgs.builder()
                                .creation("6h")
                                .retention("24h")
                                .build())
                            .build())
                        .build())
                    .build())
                .default_(true)
                .build())
            .description("Test cluster template v2")
            .build());

        var fooCluster = new Cluster("fooCluster", ClusterArgs.builder()        
            .clusterTemplateId(fooClusterTemplate.id())
            .clusterTemplateRevisionId(fooClusterTemplate.templateRevisions().applyValue(templateRevisions -> templateRevisions[0].id()))
            .build());

    }
}
import pulumi
import pulumi_rancher2 as rancher2

# Create a new rancher2 cluster template
foo_cluster_template = rancher2.ClusterTemplate("fooClusterTemplate",
    members=[rancher2.ClusterTemplateMemberArgs(
        access_type="owner",
        user_principal_id="local://user-XXXXX",
    )],
    template_revisions=[rancher2.ClusterTemplateTemplateRevisionArgs(
        name="V1",
        cluster_config=rancher2.ClusterTemplateTemplateRevisionClusterConfigArgs(
            rke_config=rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigArgs(
                network=rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigNetworkArgs(
                    plugin="canal",
                ),
                services=rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesArgs(
                    etcd=rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesEtcdArgs(
                        creation="6h",
                        retention="24h",
                    ),
                ),
            ),
        ),
        default=True,
    )],
    description="Test cluster template v2")
# Create a new rancher2 RKE Cluster from template
foo_cluster = rancher2.Cluster("fooCluster",
    cluster_template_id=foo_cluster_template.id,
    cluster_template_revision_id=foo_cluster_template.template_revisions[0].id)
import * as pulumi from "@pulumi/pulumi";
import * as rancher2 from "@pulumi/rancher2";

// Create a new rancher2 cluster template
const fooClusterTemplate = new rancher2.ClusterTemplate("fooClusterTemplate", {
    members: [{
        accessType: "owner",
        userPrincipalId: "local://user-XXXXX",
    }],
    templateRevisions: [{
        name: "V1",
        clusterConfig: {
            rkeConfig: {
                network: {
                    plugin: "canal",
                },
                services: {
                    etcd: {
                        creation: "6h",
                        retention: "24h",
                    },
                },
            },
        },
        "default": true,
    }],
    description: "Test cluster template v2",
});
// Create a new rancher2 RKE Cluster from template
const fooCluster = new rancher2.Cluster("fooCluster", {
    clusterTemplateId: fooClusterTemplate.id,
    clusterTemplateRevisionId: fooClusterTemplate.templateRevisions.apply(templateRevisions => templateRevisions[0].id),
});
resources:
  # Create a new rancher2 cluster template
  fooClusterTemplate:
    type: rancher2:ClusterTemplate
    properties:
      members:
        - accessType: owner
          userPrincipalId: local://user-XXXXX
      templateRevisions:
        - name: V1
          clusterConfig:
            rkeConfig:
              network:
                plugin: canal
              services:
                etcd:
                  creation: 6h
                  retention: 24h
          default: true
      description: Test cluster template v2
  # Create a new rancher2 RKE Cluster from template
  fooCluster:
    type: rancher2:Cluster
    properties:
      clusterTemplateId: ${fooClusterTemplate.id}
      clusterTemplateRevisionId: ${fooClusterTemplate.templateRevisions[0].id}

Creating Rancher v2 RKE cluster with upgrade strategy. For Rancher v2.4.x or above.

using System.Collections.Generic;
using Pulumi;
using Rancher2 = Pulumi.Rancher2;

return await Deployment.RunAsync(() => 
{
    var foo = new Rancher2.Cluster("foo", new()
    {
        Description = "Terraform custom cluster",
        RkeConfig = new Rancher2.Inputs.ClusterRkeConfigArgs
        {
            Network = new Rancher2.Inputs.ClusterRkeConfigNetworkArgs
            {
                Plugin = "canal",
            },
            Services = new Rancher2.Inputs.ClusterRkeConfigServicesArgs
            {
                Etcd = new Rancher2.Inputs.ClusterRkeConfigServicesEtcdArgs
                {
                    Creation = "6h",
                    Retention = "24h",
                },
                KubeApi = new Rancher2.Inputs.ClusterRkeConfigServicesKubeApiArgs
                {
                    AuditLog = new Rancher2.Inputs.ClusterRkeConfigServicesKubeApiAuditLogArgs
                    {
                        Configuration = new Rancher2.Inputs.ClusterRkeConfigServicesKubeApiAuditLogConfigurationArgs
                        {
                            Format = "json",
                            MaxAge = 5,
                            MaxBackup = 5,
                            MaxSize = 100,
                            Path = "-",
                            Policy = @"apiVersion: audit.k8s.io/v1
kind: Policy
metadata:
  creationTimestamp: null
omitStages:
- RequestReceived
rules:
- level: RequestResponse
  resources:
  - resources:
    - pods

",
                        },
                        Enabled = true,
                    },
                },
            },
            UpgradeStrategy = new Rancher2.Inputs.ClusterRkeConfigUpgradeStrategyArgs
            {
                Drain = true,
                MaxUnavailableWorker = "20%",
            },
        },
    });

});
package main

import (
	"fmt"

	"github.com/pulumi/pulumi-rancher2/sdk/v3/go/rancher2"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := rancher2.NewCluster(ctx, "foo", &rancher2.ClusterArgs{
			Description: pulumi.String("Terraform custom cluster"),
			RkeConfig: &rancher2.ClusterRkeConfigArgs{
				Network: &rancher2.ClusterRkeConfigNetworkArgs{
					Plugin: pulumi.String("canal"),
				},
				Services: &rancher2.ClusterRkeConfigServicesArgs{
					Etcd: &rancher2.ClusterRkeConfigServicesEtcdArgs{
						Creation:  pulumi.String("6h"),
						Retention: pulumi.String("24h"),
					},
					KubeApi: &rancher2.ClusterRkeConfigServicesKubeApiArgs{
						AuditLog: &rancher2.ClusterRkeConfigServicesKubeApiAuditLogArgs{
							Configuration: &rancher2.ClusterRkeConfigServicesKubeApiAuditLogConfigurationArgs{
								Format:    pulumi.String("json"),
								MaxAge:    pulumi.Int(5),
								MaxBackup: pulumi.Int(5),
								MaxSize:   pulumi.Int(100),
								Path:      pulumi.String("-"),
								Policy: pulumi.String(fmt.Sprintf(`apiVersion: audit.k8s.io/v1
kind: Policy
metadata:
  creationTimestamp: null
omitStages:
- RequestReceived
rules:
- level: RequestResponse
  resources:
  - resources:
    - pods

`)),
							},
							Enabled: pulumi.Bool(true),
						},
					},
				},
				UpgradeStrategy: &rancher2.ClusterRkeConfigUpgradeStrategyArgs{
					Drain:                pulumi.Bool(true),
					MaxUnavailableWorker: pulumi.String(fmt.Sprintf("20%v", "%")),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.rancher2.Cluster;
import com.pulumi.rancher2.ClusterArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigNetworkArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigServicesArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigServicesEtcdArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigServicesKubeApiArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigServicesKubeApiAuditLogArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigServicesKubeApiAuditLogConfigurationArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigUpgradeStrategyArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var foo = new Cluster("foo", ClusterArgs.builder()        
            .description("Terraform custom cluster")
            .rkeConfig(ClusterRkeConfigArgs.builder()
                .network(ClusterRkeConfigNetworkArgs.builder()
                    .plugin("canal")
                    .build())
                .services(ClusterRkeConfigServicesArgs.builder()
                    .etcd(ClusterRkeConfigServicesEtcdArgs.builder()
                        .creation("6h")
                        .retention("24h")
                        .build())
                    .kubeApi(ClusterRkeConfigServicesKubeApiArgs.builder()
                        .auditLog(ClusterRkeConfigServicesKubeApiAuditLogArgs.builder()
                            .configuration(ClusterRkeConfigServicesKubeApiAuditLogConfigurationArgs.builder()
                                .format("json")
                                .maxAge(5)
                                .maxBackup(5)
                                .maxSize(100)
                                .path("-")
                                .policy("""
apiVersion: audit.k8s.io/v1
kind: Policy
metadata:
  creationTimestamp: null
omitStages:
- RequestReceived
rules:
- level: RequestResponse
  resources:
  - resources:
    - pods

                                """)
                                .build())
                            .enabled(true)
                            .build())
                        .build())
                    .build())
                .upgradeStrategy(ClusterRkeConfigUpgradeStrategyArgs.builder()
                    .drain(true)
                    .maxUnavailableWorker("20%")
                    .build())
                .build())
            .build());

    }
}
import pulumi
import pulumi_rancher2 as rancher2

foo = rancher2.Cluster("foo",
    description="Terraform custom cluster",
    rke_config=rancher2.ClusterRkeConfigArgs(
        network=rancher2.ClusterRkeConfigNetworkArgs(
            plugin="canal",
        ),
        services=rancher2.ClusterRkeConfigServicesArgs(
            etcd=rancher2.ClusterRkeConfigServicesEtcdArgs(
                creation="6h",
                retention="24h",
            ),
            kube_api=rancher2.ClusterRkeConfigServicesKubeApiArgs(
                audit_log=rancher2.ClusterRkeConfigServicesKubeApiAuditLogArgs(
                    configuration=rancher2.ClusterRkeConfigServicesKubeApiAuditLogConfigurationArgs(
                        format="json",
                        max_age=5,
                        max_backup=5,
                        max_size=100,
                        path="-",
                        policy="""apiVersion: audit.k8s.io/v1
kind: Policy
metadata:
  creationTimestamp: null
omitStages:
- RequestReceived
rules:
- level: RequestResponse
  resources:
  - resources:
    - pods

""",
                    ),
                    enabled=True,
                ),
            ),
        ),
        upgrade_strategy=rancher2.ClusterRkeConfigUpgradeStrategyArgs(
            drain=True,
            max_unavailable_worker="20%",
        ),
    ))
import * as pulumi from "@pulumi/pulumi";
import * as rancher2 from "@pulumi/rancher2";

const foo = new rancher2.Cluster("foo", {
    description: "Terraform custom cluster",
    rkeConfig: {
        network: {
            plugin: "canal",
        },
        services: {
            etcd: {
                creation: "6h",
                retention: "24h",
            },
            kubeApi: {
                auditLog: {
                    configuration: {
                        format: "json",
                        maxAge: 5,
                        maxBackup: 5,
                        maxSize: 100,
                        path: "-",
                        policy: `apiVersion: audit.k8s.io/v1
kind: Policy
metadata:
  creationTimestamp: null
omitStages:
- RequestReceived
rules:
- level: RequestResponse
  resources:
  - resources:
    - pods

`,
                    },
                    enabled: true,
                },
            },
        },
        upgradeStrategy: {
            drain: true,
            maxUnavailableWorker: `20%`,
        },
    },
});
resources:
  foo:
    type: rancher2:Cluster
    properties:
      description: Terraform custom cluster
      rkeConfig:
        network:
          plugin: canal
        services:
          etcd:
            creation: 6h
            retention: 24h
          kubeApi:
            auditLog:
              configuration:
                format: json
                maxAge: 5
                maxBackup: 5
                maxSize: 100
                path: '-'
                policy: |+
                  apiVersion: audit.k8s.io/v1
                  kind: Policy
                  metadata:
                    creationTimestamp: null
                  omitStages:
                  - RequestReceived
                  rules:
                  - level: RequestResponse
                    resources:
                    - resources:
                      - pods                  

              enabled: true
        upgradeStrategy:
          drain: true
          maxUnavailableWorker: 20%

Creating Rancher v2 RKE cluster with scheduled cluster scan. For Rancher v2.4.x or above.

using System.Collections.Generic;
using Pulumi;
using Rancher2 = Pulumi.Rancher2;

return await Deployment.RunAsync(() => 
{
    var foo = new Rancher2.Cluster("foo", new()
    {
        Description = "Terraform custom cluster",
        RkeConfig = new Rancher2.Inputs.ClusterRkeConfigArgs
        {
            Network = new Rancher2.Inputs.ClusterRkeConfigNetworkArgs
            {
                Plugin = "canal",
            },
            Services = new Rancher2.Inputs.ClusterRkeConfigServicesArgs
            {
                Etcd = new Rancher2.Inputs.ClusterRkeConfigServicesEtcdArgs
                {
                    Creation = "6h",
                    Retention = "24h",
                },
            },
        },
        ScheduledClusterScan = new Rancher2.Inputs.ClusterScheduledClusterScanArgs
        {
            Enabled = true,
            ScanConfig = new Rancher2.Inputs.ClusterScheduledClusterScanScanConfigArgs
            {
                CisScanConfig = new Rancher2.Inputs.ClusterScheduledClusterScanScanConfigCisScanConfigArgs
                {
                    DebugMaster = true,
                    DebugWorker = true,
                },
            },
            ScheduleConfig = new Rancher2.Inputs.ClusterScheduledClusterScanScheduleConfigArgs
            {
                CronSchedule = "30 * * * *",
                Retention = 5,
            },
        },
    });

});
package main

import (
	"github.com/pulumi/pulumi-rancher2/sdk/v3/go/rancher2"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := rancher2.NewCluster(ctx, "foo", &rancher2.ClusterArgs{
			Description: pulumi.String("Terraform custom cluster"),
			RkeConfig: &rancher2.ClusterRkeConfigArgs{
				Network: &rancher2.ClusterRkeConfigNetworkArgs{
					Plugin: pulumi.String("canal"),
				},
				Services: &rancher2.ClusterRkeConfigServicesArgs{
					Etcd: &rancher2.ClusterRkeConfigServicesEtcdArgs{
						Creation:  pulumi.String("6h"),
						Retention: pulumi.String("24h"),
					},
				},
			},
			ScheduledClusterScan: &rancher2.ClusterScheduledClusterScanArgs{
				Enabled: pulumi.Bool(true),
				ScanConfig: &rancher2.ClusterScheduledClusterScanScanConfigArgs{
					CisScanConfig: &rancher2.ClusterScheduledClusterScanScanConfigCisScanConfigArgs{
						DebugMaster: pulumi.Bool(true),
						DebugWorker: pulumi.Bool(true),
					},
				},
				ScheduleConfig: &rancher2.ClusterScheduledClusterScanScheduleConfigArgs{
					CronSchedule: pulumi.String("30 * * * *"),
					Retention:    pulumi.Int(5),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.rancher2.Cluster;
import com.pulumi.rancher2.ClusterArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigNetworkArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigServicesArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigServicesEtcdArgs;
import com.pulumi.rancher2.inputs.ClusterScheduledClusterScanArgs;
import com.pulumi.rancher2.inputs.ClusterScheduledClusterScanScanConfigArgs;
import com.pulumi.rancher2.inputs.ClusterScheduledClusterScanScanConfigCisScanConfigArgs;
import com.pulumi.rancher2.inputs.ClusterScheduledClusterScanScheduleConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var foo = new Cluster("foo", ClusterArgs.builder()        
            .description("Terraform custom cluster")
            .rkeConfig(ClusterRkeConfigArgs.builder()
                .network(ClusterRkeConfigNetworkArgs.builder()
                    .plugin("canal")
                    .build())
                .services(ClusterRkeConfigServicesArgs.builder()
                    .etcd(ClusterRkeConfigServicesEtcdArgs.builder()
                        .creation("6h")
                        .retention("24h")
                        .build())
                    .build())
                .build())
            .scheduledClusterScan(ClusterScheduledClusterScanArgs.builder()
                .enabled(true)
                .scanConfig(ClusterScheduledClusterScanScanConfigArgs.builder()
                    .cisScanConfig(ClusterScheduledClusterScanScanConfigCisScanConfigArgs.builder()
                        .debugMaster(true)
                        .debugWorker(true)
                        .build())
                    .build())
                .scheduleConfig(ClusterScheduledClusterScanScheduleConfigArgs.builder()
                    .cronSchedule("30 * * * *")
                    .retention(5)
                    .build())
                .build())
            .build());

    }
}
import pulumi
import pulumi_rancher2 as rancher2

foo = rancher2.Cluster("foo",
    description="Terraform custom cluster",
    rke_config=rancher2.ClusterRkeConfigArgs(
        network=rancher2.ClusterRkeConfigNetworkArgs(
            plugin="canal",
        ),
        services=rancher2.ClusterRkeConfigServicesArgs(
            etcd=rancher2.ClusterRkeConfigServicesEtcdArgs(
                creation="6h",
                retention="24h",
            ),
        ),
    ),
    scheduled_cluster_scan=rancher2.ClusterScheduledClusterScanArgs(
        enabled=True,
        scan_config=rancher2.ClusterScheduledClusterScanScanConfigArgs(
            cis_scan_config=rancher2.ClusterScheduledClusterScanScanConfigCisScanConfigArgs(
                debug_master=True,
                debug_worker=True,
            ),
        ),
        schedule_config=rancher2.ClusterScheduledClusterScanScheduleConfigArgs(
            cron_schedule="30 * * * *",
            retention=5,
        ),
    ))
import * as pulumi from "@pulumi/pulumi";
import * as rancher2 from "@pulumi/rancher2";

const foo = new rancher2.Cluster("foo", {
    description: "Terraform custom cluster",
    rkeConfig: {
        network: {
            plugin: "canal",
        },
        services: {
            etcd: {
                creation: "6h",
                retention: "24h",
            },
        },
    },
    scheduledClusterScan: {
        enabled: true,
        scanConfig: {
            cisScanConfig: {
                debugMaster: true,
                debugWorker: true,
            },
        },
        scheduleConfig: {
            cronSchedule: "30 * * * *",
            retention: 5,
        },
    },
});
resources:
  foo:
    type: rancher2:Cluster
    properties:
      description: Terraform custom cluster
      rkeConfig:
        network:
          plugin: canal
        services:
          etcd:
            creation: 6h
            retention: 24h
      scheduledClusterScan:
        enabled: true
        scanConfig:
          cisScanConfig:
            debugMaster: true
            debugWorker: true
        scheduleConfig:
          cronSchedule: 30 * * * *
          retention: 5

Importing EKS cluster to Rancher v2, using eks_config_v2. For Rancher v2.5.x or above.

using System.Collections.Generic;
using Pulumi;
using Rancher2 = Pulumi.Rancher2;

return await Deployment.RunAsync(() => 
{
    var fooCloudCredential = new Rancher2.CloudCredential("fooCloudCredential", new()
    {
        Description = "foo test",
        Amazonec2CredentialConfig = new Rancher2.Inputs.CloudCredentialAmazonec2CredentialConfigArgs
        {
            AccessKey = "<AWS_ACCESS_KEY>",
            SecretKey = "<AWS_SECRET_KEY>",
        },
    });

    var fooCluster = new Rancher2.Cluster("fooCluster", new()
    {
        Description = "Terraform EKS cluster",
        EksConfigV2 = new Rancher2.Inputs.ClusterEksConfigV2Args
        {
            CloudCredentialId = fooCloudCredential.Id,
            Name = "<CLUSTER_NAME>",
            Region = "<EKS_REGION>",
            Imported = true,
        },
    });

});
package main

import (
	"github.com/pulumi/pulumi-rancher2/sdk/v3/go/rancher2"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		fooCloudCredential, err := rancher2.NewCloudCredential(ctx, "fooCloudCredential", &rancher2.CloudCredentialArgs{
			Description: pulumi.String("foo test"),
			Amazonec2CredentialConfig: &rancher2.CloudCredentialAmazonec2CredentialConfigArgs{
				AccessKey: pulumi.String("<AWS_ACCESS_KEY>"),
				SecretKey: pulumi.String("<AWS_SECRET_KEY>"),
			},
		})
		if err != nil {
			return err
		}
		_, err = rancher2.NewCluster(ctx, "fooCluster", &rancher2.ClusterArgs{
			Description: pulumi.String("Terraform EKS cluster"),
			EksConfigV2: &rancher2.ClusterEksConfigV2Args{
				CloudCredentialId: fooCloudCredential.ID(),
				Name:              pulumi.String("<CLUSTER_NAME>"),
				Region:            pulumi.String("<EKS_REGION>"),
				Imported:          pulumi.Bool(true),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.rancher2.CloudCredential;
import com.pulumi.rancher2.CloudCredentialArgs;
import com.pulumi.rancher2.inputs.CloudCredentialAmazonec2CredentialConfigArgs;
import com.pulumi.rancher2.Cluster;
import com.pulumi.rancher2.ClusterArgs;
import com.pulumi.rancher2.inputs.ClusterEksConfigV2Args;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var fooCloudCredential = new CloudCredential("fooCloudCredential", CloudCredentialArgs.builder()        
            .description("foo test")
            .amazonec2CredentialConfig(CloudCredentialAmazonec2CredentialConfigArgs.builder()
                .accessKey("<AWS_ACCESS_KEY>")
                .secretKey("<AWS_SECRET_KEY>")
                .build())
            .build());

        var fooCluster = new Cluster("fooCluster", ClusterArgs.builder()        
            .description("Terraform EKS cluster")
            .eksConfigV2(ClusterEksConfigV2Args.builder()
                .cloudCredentialId(fooCloudCredential.id())
                .name("<CLUSTER_NAME>")
                .region("<EKS_REGION>")
                .imported(true)
                .build())
            .build());

    }
}
import pulumi
import pulumi_rancher2 as rancher2

foo_cloud_credential = rancher2.CloudCredential("fooCloudCredential",
    description="foo test",
    amazonec2_credential_config=rancher2.CloudCredentialAmazonec2CredentialConfigArgs(
        access_key="<AWS_ACCESS_KEY>",
        secret_key="<AWS_SECRET_KEY>",
    ))
foo_cluster = rancher2.Cluster("fooCluster",
    description="Terraform EKS cluster",
    eks_config_v2=rancher2.ClusterEksConfigV2Args(
        cloud_credential_id=foo_cloud_credential.id,
        name="<CLUSTER_NAME>",
        region="<EKS_REGION>",
        imported=True,
    ))
import * as pulumi from "@pulumi/pulumi";
import * as rancher2 from "@pulumi/rancher2";

const fooCloudCredential = new rancher2.CloudCredential("fooCloudCredential", {
    description: "foo test",
    amazonec2CredentialConfig: {
        accessKey: "<AWS_ACCESS_KEY>",
        secretKey: "<AWS_SECRET_KEY>",
    },
});
const fooCluster = new rancher2.Cluster("fooCluster", {
    description: "Terraform EKS cluster",
    eksConfigV2: {
        cloudCredentialId: fooCloudCredential.id,
        name: "<CLUSTER_NAME>",
        region: "<EKS_REGION>",
        imported: true,
    },
});
resources:
  fooCloudCredential:
    type: rancher2:CloudCredential
    properties:
      description: foo test
      amazonec2CredentialConfig:
        accessKey: <AWS_ACCESS_KEY>
        secretKey: <AWS_SECRET_KEY>
  fooCluster:
    type: rancher2:Cluster
    properties:
      description: Terraform EKS cluster
      eksConfigV2:
        cloudCredentialId: ${fooCloudCredential.id}
        name: <CLUSTER_NAME>
        region: <EKS_REGION>
        imported: true

Creating EKS cluster from Rancher v2, using eks_config_v2. For Rancher v2.5.x or above.

using System.Collections.Generic;
using Pulumi;
using Rancher2 = Pulumi.Rancher2;

return await Deployment.RunAsync(() => 
{
    var fooCloudCredential = new Rancher2.CloudCredential("fooCloudCredential", new()
    {
        Description = "foo test",
        Amazonec2CredentialConfig = new Rancher2.Inputs.CloudCredentialAmazonec2CredentialConfigArgs
        {
            AccessKey = "<AWS_ACCESS_KEY>",
            SecretKey = "<AWS_SECRET_KEY>",
        },
    });

    var fooCluster = new Rancher2.Cluster("fooCluster", new()
    {
        Description = "Terraform EKS cluster",
        EksConfigV2 = new Rancher2.Inputs.ClusterEksConfigV2Args
        {
            CloudCredentialId = fooCloudCredential.Id,
            Region = "<EKS_REGION>",
            KubernetesVersion = "1.17",
            LoggingTypes = new[]
            {
                "audit",
                "api",
            },
            NodeGroups = new[]
            {
                new Rancher2.Inputs.ClusterEksConfigV2NodeGroupArgs
                {
                    Name = "node_group1",
                    InstanceType = "t3.medium",
                    DesiredSize = 3,
                    MaxSize = 5,
                },
                new Rancher2.Inputs.ClusterEksConfigV2NodeGroupArgs
                {
                    Name = "node_group2",
                    InstanceType = "m5.xlarge",
                    DesiredSize = 2,
                    MaxSize = 3,
                },
            },
            PrivateAccess = true,
            PublicAccess = false,
        },
    });

});
package main

import (
	"github.com/pulumi/pulumi-rancher2/sdk/v3/go/rancher2"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		fooCloudCredential, err := rancher2.NewCloudCredential(ctx, "fooCloudCredential", &rancher2.CloudCredentialArgs{
			Description: pulumi.String("foo test"),
			Amazonec2CredentialConfig: &rancher2.CloudCredentialAmazonec2CredentialConfigArgs{
				AccessKey: pulumi.String("<AWS_ACCESS_KEY>"),
				SecretKey: pulumi.String("<AWS_SECRET_KEY>"),
			},
		})
		if err != nil {
			return err
		}
		_, err = rancher2.NewCluster(ctx, "fooCluster", &rancher2.ClusterArgs{
			Description: pulumi.String("Terraform EKS cluster"),
			EksConfigV2: &rancher2.ClusterEksConfigV2Args{
				CloudCredentialId: fooCloudCredential.ID(),
				Region:            pulumi.String("<EKS_REGION>"),
				KubernetesVersion: pulumi.String("1.17"),
				LoggingTypes: pulumi.StringArray{
					pulumi.String("audit"),
					pulumi.String("api"),
				},
				NodeGroups: rancher2.ClusterEksConfigV2NodeGroupArray{
					&rancher2.ClusterEksConfigV2NodeGroupArgs{
						Name:         pulumi.String("node_group1"),
						InstanceType: pulumi.String("t3.medium"),
						DesiredSize:  pulumi.Int(3),
						MaxSize:      pulumi.Int(5),
					},
					&rancher2.ClusterEksConfigV2NodeGroupArgs{
						Name:         pulumi.String("node_group2"),
						InstanceType: pulumi.String("m5.xlarge"),
						DesiredSize:  pulumi.Int(2),
						MaxSize:      pulumi.Int(3),
					},
				},
				PrivateAccess: pulumi.Bool(true),
				PublicAccess:  pulumi.Bool(false),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.rancher2.CloudCredential;
import com.pulumi.rancher2.CloudCredentialArgs;
import com.pulumi.rancher2.inputs.CloudCredentialAmazonec2CredentialConfigArgs;
import com.pulumi.rancher2.Cluster;
import com.pulumi.rancher2.ClusterArgs;
import com.pulumi.rancher2.inputs.ClusterEksConfigV2Args;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var fooCloudCredential = new CloudCredential("fooCloudCredential", CloudCredentialArgs.builder()        
            .description("foo test")
            .amazonec2CredentialConfig(CloudCredentialAmazonec2CredentialConfigArgs.builder()
                .accessKey("<AWS_ACCESS_KEY>")
                .secretKey("<AWS_SECRET_KEY>")
                .build())
            .build());

        var fooCluster = new Cluster("fooCluster", ClusterArgs.builder()        
            .description("Terraform EKS cluster")
            .eksConfigV2(ClusterEksConfigV2Args.builder()
                .cloudCredentialId(fooCloudCredential.id())
                .region("<EKS_REGION>")
                .kubernetesVersion("1.17")
                .loggingTypes(                
                    "audit",
                    "api")
                .nodeGroups(                
                    ClusterEksConfigV2NodeGroupArgs.builder()
                        .name("node_group1")
                        .instanceType("t3.medium")
                        .desiredSize(3)
                        .maxSize(5)
                        .build(),
                    ClusterEksConfigV2NodeGroupArgs.builder()
                        .name("node_group2")
                        .instanceType("m5.xlarge")
                        .desiredSize(2)
                        .maxSize(3)
                        .build())
                .privateAccess(true)
                .publicAccess(false)
                .build())
            .build());

    }
}
import pulumi
import pulumi_rancher2 as rancher2

foo_cloud_credential = rancher2.CloudCredential("fooCloudCredential",
    description="foo test",
    amazonec2_credential_config=rancher2.CloudCredentialAmazonec2CredentialConfigArgs(
        access_key="<AWS_ACCESS_KEY>",
        secret_key="<AWS_SECRET_KEY>",
    ))
foo_cluster = rancher2.Cluster("fooCluster",
    description="Terraform EKS cluster",
    eks_config_v2=rancher2.ClusterEksConfigV2Args(
        cloud_credential_id=foo_cloud_credential.id,
        region="<EKS_REGION>",
        kubernetes_version="1.17",
        logging_types=[
            "audit",
            "api",
        ],
        node_groups=[
            rancher2.ClusterEksConfigV2NodeGroupArgs(
                name="node_group1",
                instance_type="t3.medium",
                desired_size=3,
                max_size=5,
            ),
            rancher2.ClusterEksConfigV2NodeGroupArgs(
                name="node_group2",
                instance_type="m5.xlarge",
                desired_size=2,
                max_size=3,
            ),
        ],
        private_access=True,
        public_access=False,
    ))
import * as pulumi from "@pulumi/pulumi";
import * as rancher2 from "@pulumi/rancher2";

const fooCloudCredential = new rancher2.CloudCredential("fooCloudCredential", {
    description: "foo test",
    amazonec2CredentialConfig: {
        accessKey: "<AWS_ACCESS_KEY>",
        secretKey: "<AWS_SECRET_KEY>",
    },
});
const fooCluster = new rancher2.Cluster("fooCluster", {
    description: "Terraform EKS cluster",
    eksConfigV2: {
        cloudCredentialId: fooCloudCredential.id,
        region: "<EKS_REGION>",
        kubernetesVersion: "1.17",
        loggingTypes: [
            "audit",
            "api",
        ],
        nodeGroups: [
            {
                name: "node_group1",
                instanceType: "t3.medium",
                desiredSize: 3,
                maxSize: 5,
            },
            {
                name: "node_group2",
                instanceType: "m5.xlarge",
                desiredSize: 2,
                maxSize: 3,
            },
        ],
        privateAccess: true,
        publicAccess: false,
    },
});
resources:
  fooCloudCredential:
    type: rancher2:CloudCredential
    properties:
      description: foo test
      amazonec2CredentialConfig:
        accessKey: <AWS_ACCESS_KEY>
        secretKey: <AWS_SECRET_KEY>
  fooCluster:
    type: rancher2:Cluster
    properties:
      description: Terraform EKS cluster
      eksConfigV2:
        cloudCredentialId: ${fooCloudCredential.id}
        region: <EKS_REGION>
        kubernetesVersion: '1.17'
        loggingTypes:
          - audit
          - api
        nodeGroups:
          - name: node_group1
            instanceType: t3.medium
            desiredSize: 3
            maxSize: 5
          - name: node_group2
            instanceType: m5.xlarge
            desiredSize: 2
            maxSize: 3
        privateAccess: true
        publicAccess: false

Creating EKS cluster from Rancher v2, using eks_config_v2 and launch template. For Rancher v2.5.6 or above.

using System.Collections.Generic;
using Pulumi;
using Rancher2 = Pulumi.Rancher2;

return await Deployment.RunAsync(() => 
{
    var fooCloudCredential = new Rancher2.CloudCredential("fooCloudCredential", new()
    {
        Description = "foo test",
        Amazonec2CredentialConfig = new Rancher2.Inputs.CloudCredentialAmazonec2CredentialConfigArgs
        {
            AccessKey = "<AWS_ACCESS_KEY>",
            SecretKey = "<AWS_SECRET_KEY>",
        },
    });

    var fooCluster = new Rancher2.Cluster("fooCluster", new()
    {
        Description = "Terraform EKS cluster",
        EksConfigV2 = new Rancher2.Inputs.ClusterEksConfigV2Args
        {
            CloudCredentialId = fooCloudCredential.Id,
            Region = "<EKS_REGION>",
            KubernetesVersion = "1.17",
            LoggingTypes = new[]
            {
                "audit",
                "api",
            },
            NodeGroups = new[]
            {
                new Rancher2.Inputs.ClusterEksConfigV2NodeGroupArgs
                {
                    DesiredSize = 3,
                    MaxSize = 5,
                    Name = "node_group1",
                    LaunchTemplates = new[]
                    {
                        new Rancher2.Inputs.ClusterEksConfigV2NodeGroupLaunchTemplateArgs
                        {
                            Id = "<EC2_LAUNCH_TEMPLATE_ID>",
                            Version = 1,
                        },
                    },
                },
            },
            PrivateAccess = true,
            PublicAccess = true,
        },
    });

});
package main

import (
	"github.com/pulumi/pulumi-rancher2/sdk/v3/go/rancher2"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		fooCloudCredential, err := rancher2.NewCloudCredential(ctx, "fooCloudCredential", &rancher2.CloudCredentialArgs{
			Description: pulumi.String("foo test"),
			Amazonec2CredentialConfig: &rancher2.CloudCredentialAmazonec2CredentialConfigArgs{
				AccessKey: pulumi.String("<AWS_ACCESS_KEY>"),
				SecretKey: pulumi.String("<AWS_SECRET_KEY>"),
			},
		})
		if err != nil {
			return err
		}
		_, err = rancher2.NewCluster(ctx, "fooCluster", &rancher2.ClusterArgs{
			Description: pulumi.String("Terraform EKS cluster"),
			EksConfigV2: &rancher2.ClusterEksConfigV2Args{
				CloudCredentialId: fooCloudCredential.ID(),
				Region:            pulumi.String("<EKS_REGION>"),
				KubernetesVersion: pulumi.String("1.17"),
				LoggingTypes: pulumi.StringArray{
					pulumi.String("audit"),
					pulumi.String("api"),
				},
				NodeGroups: rancher2.ClusterEksConfigV2NodeGroupArray{
					&rancher2.ClusterEksConfigV2NodeGroupArgs{
						DesiredSize: pulumi.Int(3),
						MaxSize:     pulumi.Int(5),
						Name:        pulumi.String("node_group1"),
						LaunchTemplates: rancher2.ClusterEksConfigV2NodeGroupLaunchTemplateArray{
							&rancher2.ClusterEksConfigV2NodeGroupLaunchTemplateArgs{
								Id:      pulumi.String("<EC2_LAUNCH_TEMPLATE_ID>"),
								Version: pulumi.Int(1),
							},
						},
					},
				},
				PrivateAccess: pulumi.Bool(true),
				PublicAccess:  pulumi.Bool(true),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.rancher2.CloudCredential;
import com.pulumi.rancher2.CloudCredentialArgs;
import com.pulumi.rancher2.inputs.CloudCredentialAmazonec2CredentialConfigArgs;
import com.pulumi.rancher2.Cluster;
import com.pulumi.rancher2.ClusterArgs;
import com.pulumi.rancher2.inputs.ClusterEksConfigV2Args;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var fooCloudCredential = new CloudCredential("fooCloudCredential", CloudCredentialArgs.builder()        
            .description("foo test")
            .amazonec2CredentialConfig(CloudCredentialAmazonec2CredentialConfigArgs.builder()
                .accessKey("<AWS_ACCESS_KEY>")
                .secretKey("<AWS_SECRET_KEY>")
                .build())
            .build());

        var fooCluster = new Cluster("fooCluster", ClusterArgs.builder()        
            .description("Terraform EKS cluster")
            .eksConfigV2(ClusterEksConfigV2Args.builder()
                .cloudCredentialId(fooCloudCredential.id())
                .region("<EKS_REGION>")
                .kubernetesVersion("1.17")
                .loggingTypes(                
                    "audit",
                    "api")
                .nodeGroups(ClusterEksConfigV2NodeGroupArgs.builder()
                    .desiredSize(3)
                    .maxSize(5)
                    .name("node_group1")
                    .launchTemplates(ClusterEksConfigV2NodeGroupLaunchTemplateArgs.builder()
                        .id("<EC2_LAUNCH_TEMPLATE_ID>")
                        .version(1)
                        .build())
                    .build())
                .privateAccess(true)
                .publicAccess(true)
                .build())
            .build());

    }
}
import pulumi
import pulumi_rancher2 as rancher2

foo_cloud_credential = rancher2.CloudCredential("fooCloudCredential",
    description="foo test",
    amazonec2_credential_config=rancher2.CloudCredentialAmazonec2CredentialConfigArgs(
        access_key="<AWS_ACCESS_KEY>",
        secret_key="<AWS_SECRET_KEY>",
    ))
foo_cluster = rancher2.Cluster("fooCluster",
    description="Terraform EKS cluster",
    eks_config_v2=rancher2.ClusterEksConfigV2Args(
        cloud_credential_id=foo_cloud_credential.id,
        region="<EKS_REGION>",
        kubernetes_version="1.17",
        logging_types=[
            "audit",
            "api",
        ],
        node_groups=[rancher2.ClusterEksConfigV2NodeGroupArgs(
            desired_size=3,
            max_size=5,
            name="node_group1",
            launch_templates=[rancher2.ClusterEksConfigV2NodeGroupLaunchTemplateArgs(
                id="<EC2_LAUNCH_TEMPLATE_ID>",
                version=1,
            )],
        )],
        private_access=True,
        public_access=True,
    ))
import * as pulumi from "@pulumi/pulumi";
import * as rancher2 from "@pulumi/rancher2";

const fooCloudCredential = new rancher2.CloudCredential("fooCloudCredential", {
    description: "foo test",
    amazonec2CredentialConfig: {
        accessKey: "<AWS_ACCESS_KEY>",
        secretKey: "<AWS_SECRET_KEY>",
    },
});
const fooCluster = new rancher2.Cluster("fooCluster", {
    description: "Terraform EKS cluster",
    eksConfigV2: {
        cloudCredentialId: fooCloudCredential.id,
        region: "<EKS_REGION>",
        kubernetesVersion: "1.17",
        loggingTypes: [
            "audit",
            "api",
        ],
        nodeGroups: [{
            desiredSize: 3,
            maxSize: 5,
            name: "node_group1",
            launchTemplates: [{
                id: "<EC2_LAUNCH_TEMPLATE_ID>",
                version: 1,
            }],
        }],
        privateAccess: true,
        publicAccess: true,
    },
});
resources:
  fooCloudCredential:
    type: rancher2:CloudCredential
    properties:
      description: foo test
      amazonec2CredentialConfig:
        accessKey: <AWS_ACCESS_KEY>
        secretKey: <AWS_SECRET_KEY>
  fooCluster:
    type: rancher2:Cluster
    properties:
      description: Terraform EKS cluster
      eksConfigV2:
        cloudCredentialId: ${fooCloudCredential.id}
        region: <EKS_REGION>
        kubernetesVersion: '1.17'
        loggingTypes:
          - audit
          - api
        nodeGroups:
          - desiredSize: 3
            maxSize: 5
            name: node_group1
            launchTemplates:
              - id: <EC2_LAUNCH_TEMPLATE_ID>
                version: 1
        privateAccess: true
        publicAccess: true

Creating AKS cluster from Rancher v2, using aks_config_v2. For Rancher v2.6.0 or above.

using System.Collections.Generic;
using Pulumi;
using Rancher2 = Pulumi.Rancher2;

return await Deployment.RunAsync(() => 
{
    var foo_aks = new Rancher2.CloudCredential("foo-aks", new()
    {
        AzureCredentialConfig = new Rancher2.Inputs.CloudCredentialAzureCredentialConfigArgs
        {
            ClientId = "<CLIENT_ID>",
            ClientSecret = "<CLIENT_SECRET>",
            SubscriptionId = "<SUBSCRIPTION_ID>",
        },
    });

    var foo = new Rancher2.Cluster("foo", new()
    {
        Description = "Terraform AKS cluster",
        AksConfigV2 = new Rancher2.Inputs.ClusterAksConfigV2Args
        {
            CloudCredentialId = foo_aks.Id,
            ResourceGroup = "<RESOURCE_GROUP>",
            ResourceLocation = "<RESOURCE_LOCATION>",
            DnsPrefix = "<DNS_PREFIX>",
            KubernetesVersion = "1.21.2",
            NetworkPlugin = "<NETWORK_PLUGIN>",
            NodePools = new[]
            {
                new Rancher2.Inputs.ClusterAksConfigV2NodePoolArgs
                {
                    AvailabilityZones = new[]
                    {
                        "1",
                        "2",
                        "3",
                    },
                    Name = "<NODEPOOL_NAME>",
                    Count = 1,
                    OrchestratorVersion = "1.21.2",
                    OsDiskSizeGb = 128,
                    VmSize = "Standard_DS2_v2",
                },
            },
        },
    });

});
package main

import (
	"github.com/pulumi/pulumi-rancher2/sdk/v3/go/rancher2"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := rancher2.NewCloudCredential(ctx, "foo-aks", &rancher2.CloudCredentialArgs{
			AzureCredentialConfig: &rancher2.CloudCredentialAzureCredentialConfigArgs{
				ClientId:       pulumi.String("<CLIENT_ID>"),
				ClientSecret:   pulumi.String("<CLIENT_SECRET>"),
				SubscriptionId: pulumi.String("<SUBSCRIPTION_ID>"),
			},
		})
		if err != nil {
			return err
		}
		_, err = rancher2.NewCluster(ctx, "foo", &rancher2.ClusterArgs{
			Description: pulumi.String("Terraform AKS cluster"),
			AksConfigV2: &rancher2.ClusterAksConfigV2Args{
				CloudCredentialId: foo_aks.ID(),
				ResourceGroup:     pulumi.String("<RESOURCE_GROUP>"),
				ResourceLocation:  pulumi.String("<RESOURCE_LOCATION>"),
				DnsPrefix:         pulumi.String("<DNS_PREFIX>"),
				KubernetesVersion: pulumi.String("1.21.2"),
				NetworkPlugin:     pulumi.String("<NETWORK_PLUGIN>"),
				NodePools: rancher2.ClusterAksConfigV2NodePoolArray{
					&rancher2.ClusterAksConfigV2NodePoolArgs{
						AvailabilityZones: pulumi.StringArray{
							pulumi.String("1"),
							pulumi.String("2"),
							pulumi.String("3"),
						},
						Name:                pulumi.String("<NODEPOOL_NAME>"),
						Count:               pulumi.Int(1),
						OrchestratorVersion: pulumi.String("1.21.2"),
						OsDiskSizeGb:        pulumi.Int(128),
						VmSize:              pulumi.String("Standard_DS2_v2"),
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.rancher2.CloudCredential;
import com.pulumi.rancher2.CloudCredentialArgs;
import com.pulumi.rancher2.inputs.CloudCredentialAzureCredentialConfigArgs;
import com.pulumi.rancher2.Cluster;
import com.pulumi.rancher2.ClusterArgs;
import com.pulumi.rancher2.inputs.ClusterAksConfigV2Args;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var foo_aks = new CloudCredential("foo-aks", CloudCredentialArgs.builder()        
            .azureCredentialConfig(CloudCredentialAzureCredentialConfigArgs.builder()
                .clientId("<CLIENT_ID>")
                .clientSecret("<CLIENT_SECRET>")
                .subscriptionId("<SUBSCRIPTION_ID>")
                .build())
            .build());

        var foo = new Cluster("foo", ClusterArgs.builder()        
            .description("Terraform AKS cluster")
            .aksConfigV2(ClusterAksConfigV2Args.builder()
                .cloudCredentialId(foo_aks.id())
                .resourceGroup("<RESOURCE_GROUP>")
                .resourceLocation("<RESOURCE_LOCATION>")
                .dnsPrefix("<DNS_PREFIX>")
                .kubernetesVersion("1.21.2")
                .networkPlugin("<NETWORK_PLUGIN>")
                .nodePools(ClusterAksConfigV2NodePoolArgs.builder()
                    .availabilityZones(                    
                        "1",
                        "2",
                        "3")
                    .name("<NODEPOOL_NAME>")
                    .count(1)
                    .orchestratorVersion("1.21.2")
                    .osDiskSizeGb(128)
                    .vmSize("Standard_DS2_v2")
                    .build())
                .build())
            .build());

    }
}
import pulumi
import pulumi_rancher2 as rancher2

foo_aks = rancher2.CloudCredential("foo-aks", azure_credential_config=rancher2.CloudCredentialAzureCredentialConfigArgs(
    client_id="<CLIENT_ID>",
    client_secret="<CLIENT_SECRET>",
    subscription_id="<SUBSCRIPTION_ID>",
))
foo = rancher2.Cluster("foo",
    description="Terraform AKS cluster",
    aks_config_v2=rancher2.ClusterAksConfigV2Args(
        cloud_credential_id=foo_aks.id,
        resource_group="<RESOURCE_GROUP>",
        resource_location="<RESOURCE_LOCATION>",
        dns_prefix="<DNS_PREFIX>",
        kubernetes_version="1.21.2",
        network_plugin="<NETWORK_PLUGIN>",
        node_pools=[rancher2.ClusterAksConfigV2NodePoolArgs(
            availability_zones=[
                "1",
                "2",
                "3",
            ],
            name="<NODEPOOL_NAME>",
            count=1,
            orchestrator_version="1.21.2",
            os_disk_size_gb=128,
            vm_size="Standard_DS2_v2",
        )],
    ))
import * as pulumi from "@pulumi/pulumi";
import * as rancher2 from "@pulumi/rancher2";

const foo_aks = new rancher2.CloudCredential("foo-aks", {azureCredentialConfig: {
    clientId: "<CLIENT_ID>",
    clientSecret: "<CLIENT_SECRET>",
    subscriptionId: "<SUBSCRIPTION_ID>",
}});
const foo = new rancher2.Cluster("foo", {
    description: "Terraform AKS cluster",
    aksConfigV2: {
        cloudCredentialId: foo_aks.id,
        resourceGroup: "<RESOURCE_GROUP>",
        resourceLocation: "<RESOURCE_LOCATION>",
        dnsPrefix: "<DNS_PREFIX>",
        kubernetesVersion: "1.21.2",
        networkPlugin: "<NETWORK_PLUGIN>",
        nodePools: [{
            availabilityZones: [
                "1",
                "2",
                "3",
            ],
            name: "<NODEPOOL_NAME>",
            count: 1,
            orchestratorVersion: "1.21.2",
            osDiskSizeGb: 128,
            vmSize: "Standard_DS2_v2",
        }],
    },
});
resources:
  foo-aks:
    type: rancher2:CloudCredential
    properties:
      azureCredentialConfig:
        clientId: <CLIENT_ID>
        clientSecret: <CLIENT_SECRET>
        subscriptionId: <SUBSCRIPTION_ID>
  foo:
    type: rancher2:Cluster
    properties:
      description: Terraform AKS cluster
      aksConfigV2:
        cloudCredentialId: ${["foo-aks"].id}
        resourceGroup: <RESOURCE_GROUP>
        resourceLocation: <RESOURCE_LOCATION>
        dnsPrefix: <DNS_PREFIX>
        kubernetesVersion: 1.21.2
        networkPlugin: <NETWORK_PLUGIN>
        nodePools:
          - availabilityZones:
              - '1'
              - '2'
              - '3'
            name: <NODEPOOL_NAME>
            count: 1
            orchestratorVersion: 1.21.2
            osDiskSizeGb: 128
            vmSize: Standard_DS2_v2

Create Cluster Resource

new Cluster(name: string, args?: ClusterArgs, opts?: CustomResourceOptions);
@overload
def Cluster(resource_name: str,
            opts: Optional[ResourceOptions] = None,
            agent_env_vars: Optional[Sequence[ClusterAgentEnvVarArgs]] = None,
            aks_config: Optional[ClusterAksConfigArgs] = None,
            aks_config_v2: Optional[ClusterAksConfigV2Args] = None,
            annotations: Optional[Mapping[str, Any]] = None,
            cluster_auth_endpoint: Optional[ClusterClusterAuthEndpointArgs] = None,
            cluster_monitoring_input: Optional[ClusterClusterMonitoringInputArgs] = None,
            cluster_template_answers: Optional[ClusterClusterTemplateAnswersArgs] = None,
            cluster_template_id: Optional[str] = None,
            cluster_template_questions: Optional[Sequence[ClusterClusterTemplateQuestionArgs]] = None,
            cluster_template_revision_id: Optional[str] = None,
            default_pod_security_policy_template_id: Optional[str] = None,
            description: Optional[str] = None,
            desired_agent_image: Optional[str] = None,
            desired_auth_image: Optional[str] = None,
            docker_root_dir: Optional[str] = None,
            driver: Optional[str] = None,
            eks_config: Optional[ClusterEksConfigArgs] = None,
            eks_config_v2: Optional[ClusterEksConfigV2Args] = None,
            enable_cluster_alerting: Optional[bool] = None,
            enable_cluster_monitoring: Optional[bool] = None,
            enable_network_policy: Optional[bool] = None,
            fleet_workspace_name: Optional[str] = None,
            gke_config: Optional[ClusterGkeConfigArgs] = None,
            gke_config_v2: Optional[ClusterGkeConfigV2Args] = None,
            k3s_config: Optional[ClusterK3sConfigArgs] = None,
            labels: Optional[Mapping[str, Any]] = None,
            name: Optional[str] = None,
            oke_config: Optional[ClusterOkeConfigArgs] = None,
            rke2_config: Optional[ClusterRke2ConfigArgs] = None,
            rke_config: Optional[ClusterRkeConfigArgs] = None,
            scheduled_cluster_scan: Optional[ClusterScheduledClusterScanArgs] = None,
            windows_prefered_cluster: Optional[bool] = None)
@overload
def Cluster(resource_name: str,
            args: Optional[ClusterArgs] = None,
            opts: Optional[ResourceOptions] = None)
func NewCluster(ctx *Context, name string, args *ClusterArgs, opts ...ResourceOption) (*Cluster, error)
public Cluster(string name, ClusterArgs? args = null, CustomResourceOptions? opts = null)
public Cluster(String name, ClusterArgs args)
public Cluster(String name, ClusterArgs args, CustomResourceOptions options)
type: rancher2:Cluster
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.

name string
The unique name of the resource.
args ClusterArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
resource_name str
The unique name of the resource.
args ClusterArgs
The arguments to resource properties.
opts ResourceOptions
Bag of options to control resource's behavior.
ctx Context
Context object for the current deployment.
name string
The unique name of the resource.
args ClusterArgs
The arguments to resource properties.
opts ResourceOption
Bag of options to control resource's behavior.
name string
The unique name of the resource.
args ClusterArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
name String
The unique name of the resource.
args ClusterArgs
The arguments to resource properties.
options CustomResourceOptions
Bag of options to control resource's behavior.

Cluster Resource Properties

To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

Inputs

The Cluster resource accepts the following input properties:

AgentEnvVars List<ClusterAgentEnvVarArgs>

Optional Agent Env Vars for Rancher agent. Just for Rancher v2.5.6 and above (list)

AksConfig ClusterAksConfigArgs

The Azure AKS configuration for aks Clusters. Conflicts with aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

AksConfigV2 ClusterAksConfigV2Args

The Azure AKS v2 configuration for creating/import aks Clusters. Conflicts with aks_config, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

Annotations Dictionary<string, object>

Annotations for the Cluster (map)

ClusterAuthEndpoint ClusterClusterAuthEndpointArgs

Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)

ClusterMonitoringInput ClusterClusterMonitoringInputArgs

Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)

ClusterTemplateAnswers ClusterClusterTemplateAnswersArgs

Cluster template answers. Just for Rancher v2.3.x and above (list maxitems:1)

ClusterTemplateId string

Cluster template ID. Just for Rancher v2.3.x and above (string)

ClusterTemplateQuestions List<ClusterClusterTemplateQuestionArgs>

Cluster template questions. Just for Rancher v2.3.x and above (list)

ClusterTemplateRevisionId string

Cluster template revision ID. Just for Rancher v2.3.x and above (string)

DefaultPodSecurityPolicyTemplateId string

Default pod security policy template id (string)

Description string

The description for Cluster (string)

DesiredAgentImage string

Desired agent image. Just for Rancher v2.3.x and above (string)

DesiredAuthImage string

Desired auth image. Just for Rancher v2.3.x and above (string)

DockerRootDir string

Desired auth image. Just for Rancher v2.3.x and above (string)

Driver string

(Computed) The driver used for the Cluster. imported, azurekubernetesservice, amazonelasticcontainerservice, googlekubernetesengine and rancherKubernetesEngine are supported (string)

EksConfig ClusterEksConfigArgs

The Amazon EKS configuration for eks Clusters. Conflicts with aks_config, aks_config_v2, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

EksConfigV2 ClusterEksConfigV2Args

The Amazon EKS V2 configuration to create or import eks Clusters. Conflicts with aks_config, eks_config, gke_config, gke_config_v2, oke_config k3s_config and rke_config. For Rancher v2.5.x or above (list maxitems:1)

EnableClusterAlerting bool

Enable built-in cluster alerting (bool)

EnableClusterMonitoring bool

Enable built-in cluster monitoring (bool)

EnableNetworkPolicy bool

Enable project network isolation (bool)

FleetWorkspaceName string

Fleet workspace name (string)

GkeConfig ClusterGkeConfigArgs

The Google GKE configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config_v2, oke_config, k3s_config and rke_config (list maxitems:1)

GkeConfigV2 ClusterGkeConfigV2Args

The Google GKE V2 configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, oke_config, k3s_config and rke_config. For Rancher v2.5.8 or above (list maxitems:1)

K3sConfig ClusterK3sConfigArgs

The K3S configuration for k3s imported Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and rke_config (list maxitems:1)

Labels Dictionary<string, object>

Labels for the Cluster (map)

Name string

The name of the Cluster (string)

OkeConfig ClusterOkeConfigArgs

The Oracle OKE configuration for oke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, k3s_config and rke_config (list maxitems:1)

Rke2Config ClusterRke2ConfigArgs

The RKE2 configuration for rke2 Clusters. Conflicts with aks_config, aks_config_v2, eks_config, gke_config, oke_config, k3s_config and rke_config (list maxitems:1)

RkeConfig ClusterRkeConfigArgs

The RKE configuration for rke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and k3s_config (list maxitems:1)

ScheduledClusterScan ClusterScheduledClusterScanArgs

Cluster scheduled cis scan. For Rancher v2.4.0 or above (List maxitems:1)

WindowsPreferedCluster bool

Windows preferred cluster. Default: false (bool)

AgentEnvVars []ClusterAgentEnvVarArgs

Optional Agent Env Vars for Rancher agent. Just for Rancher v2.5.6 and above (list)

AksConfig ClusterAksConfigArgs

The Azure AKS configuration for aks Clusters. Conflicts with aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

AksConfigV2 ClusterAksConfigV2Args

The Azure AKS v2 configuration for creating/import aks Clusters. Conflicts with aks_config, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

Annotations map[string]interface{}

Annotations for the Cluster (map)

ClusterAuthEndpoint ClusterClusterAuthEndpointArgs

Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)

ClusterMonitoringInput ClusterClusterMonitoringInputArgs

Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)

ClusterTemplateAnswers ClusterClusterTemplateAnswersArgs

Cluster template answers. Just for Rancher v2.3.x and above (list maxitems:1)

ClusterTemplateId string

Cluster template ID. Just for Rancher v2.3.x and above (string)

ClusterTemplateQuestions []ClusterClusterTemplateQuestionArgs

Cluster template questions. Just for Rancher v2.3.x and above (list)

ClusterTemplateRevisionId string

Cluster template revision ID. Just for Rancher v2.3.x and above (string)

DefaultPodSecurityPolicyTemplateId string

Default pod security policy template id (string)

Description string

The description for Cluster (string)

DesiredAgentImage string

Desired agent image. Just for Rancher v2.3.x and above (string)

DesiredAuthImage string

Desired auth image. Just for Rancher v2.3.x and above (string)

DockerRootDir string

Desired auth image. Just for Rancher v2.3.x and above (string)

Driver string

(Computed) The driver used for the Cluster. imported, azurekubernetesservice, amazonelasticcontainerservice, googlekubernetesengine and rancherKubernetesEngine are supported (string)

EksConfig ClusterEksConfigArgs

The Amazon EKS configuration for eks Clusters. Conflicts with aks_config, aks_config_v2, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

EksConfigV2 ClusterEksConfigV2Args

The Amazon EKS V2 configuration to create or import eks Clusters. Conflicts with aks_config, eks_config, gke_config, gke_config_v2, oke_config k3s_config and rke_config. For Rancher v2.5.x or above (list maxitems:1)

EnableClusterAlerting bool

Enable built-in cluster alerting (bool)

EnableClusterMonitoring bool

Enable built-in cluster monitoring (bool)

EnableNetworkPolicy bool

Enable project network isolation (bool)

FleetWorkspaceName string

Fleet workspace name (string)

GkeConfig ClusterGkeConfigArgs

The Google GKE configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config_v2, oke_config, k3s_config and rke_config (list maxitems:1)

GkeConfigV2 ClusterGkeConfigV2Args

The Google GKE V2 configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, oke_config, k3s_config and rke_config. For Rancher v2.5.8 or above (list maxitems:1)

K3sConfig ClusterK3sConfigArgs

The K3S configuration for k3s imported Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and rke_config (list maxitems:1)

Labels map[string]interface{}

Labels for the Cluster (map)

Name string

The name of the Cluster (string)

OkeConfig ClusterOkeConfigArgs

The Oracle OKE configuration for oke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, k3s_config and rke_config (list maxitems:1)

Rke2Config ClusterRke2ConfigArgs

The RKE2 configuration for rke2 Clusters. Conflicts with aks_config, aks_config_v2, eks_config, gke_config, oke_config, k3s_config and rke_config (list maxitems:1)

RkeConfig ClusterRkeConfigArgs

The RKE configuration for rke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and k3s_config (list maxitems:1)

ScheduledClusterScan ClusterScheduledClusterScanArgs

Cluster scheduled cis scan. For Rancher v2.4.0 or above (List maxitems:1)

WindowsPreferedCluster bool

Windows preferred cluster. Default: false (bool)

agentEnvVars List<ClusterAgentEnvVarArgs>

Optional Agent Env Vars for Rancher agent. Just for Rancher v2.5.6 and above (list)

aksConfig ClusterAksConfigArgs

The Azure AKS configuration for aks Clusters. Conflicts with aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

aksConfigV2 ClusterAksConfigV2Args

The Azure AKS v2 configuration for creating/import aks Clusters. Conflicts with aks_config, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

annotations Map<String,Object>

Annotations for the Cluster (map)

clusterAuthEndpoint ClusterClusterAuthEndpointArgs

Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)

clusterMonitoringInput ClusterClusterMonitoringInputArgs

Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)

clusterTemplateAnswers ClusterClusterTemplateAnswersArgs

Cluster template answers. Just for Rancher v2.3.x and above (list maxitems:1)

clusterTemplateId String

Cluster template ID. Just for Rancher v2.3.x and above (string)

clusterTemplateQuestions List<ClusterClusterTemplateQuestionArgs>

Cluster template questions. Just for Rancher v2.3.x and above (list)

clusterTemplateRevisionId String

Cluster template revision ID. Just for Rancher v2.3.x and above (string)

defaultPodSecurityPolicyTemplateId String

Default pod security policy template id (string)

description String

The description for Cluster (string)

desiredAgentImage String

Desired agent image. Just for Rancher v2.3.x and above (string)

desiredAuthImage String

Desired auth image. Just for Rancher v2.3.x and above (string)

dockerRootDir String

Desired auth image. Just for Rancher v2.3.x and above (string)

driver String

(Computed) The driver used for the Cluster. imported, azurekubernetesservice, amazonelasticcontainerservice, googlekubernetesengine and rancherKubernetesEngine are supported (string)

eksConfig ClusterEksConfigArgs

The Amazon EKS configuration for eks Clusters. Conflicts with aks_config, aks_config_v2, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

eksConfigV2 ClusterEksConfigV2Args

The Amazon EKS V2 configuration to create or import eks Clusters. Conflicts with aks_config, eks_config, gke_config, gke_config_v2, oke_config k3s_config and rke_config. For Rancher v2.5.x or above (list maxitems:1)

enableClusterAlerting Boolean

Enable built-in cluster alerting (bool)

enableClusterMonitoring Boolean

Enable built-in cluster monitoring (bool)

enableNetworkPolicy Boolean

Enable project network isolation (bool)

fleetWorkspaceName String

Fleet workspace name (string)

gkeConfig ClusterGkeConfigArgs

The Google GKE configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config_v2, oke_config, k3s_config and rke_config (list maxitems:1)

gkeConfigV2 ClusterGkeConfigV2Args

The Google GKE V2 configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, oke_config, k3s_config and rke_config. For Rancher v2.5.8 or above (list maxitems:1)

k3sConfig ClusterK3sConfigArgs

The K3S configuration for k3s imported Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and rke_config (list maxitems:1)

labels Map<String,Object>

Labels for the Cluster (map)

name String

The name of the Cluster (string)

okeConfig ClusterOkeConfigArgs

The Oracle OKE configuration for oke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, k3s_config and rke_config (list maxitems:1)

rke2Config ClusterRke2ConfigArgs

The RKE2 configuration for rke2 Clusters. Conflicts with aks_config, aks_config_v2, eks_config, gke_config, oke_config, k3s_config and rke_config (list maxitems:1)

rkeConfig ClusterRkeConfigArgs

The RKE configuration for rke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and k3s_config (list maxitems:1)

scheduledClusterScan ClusterScheduledClusterScanArgs

Cluster scheduled cis scan. For Rancher v2.4.0 or above (List maxitems:1)

windowsPreferedCluster Boolean

Windows preferred cluster. Default: false (bool)

agentEnvVars ClusterAgentEnvVarArgs[]

Optional Agent Env Vars for Rancher agent. Just for Rancher v2.5.6 and above (list)

aksConfig ClusterAksConfigArgs

The Azure AKS configuration for aks Clusters. Conflicts with aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

aksConfigV2 ClusterAksConfigV2Args

The Azure AKS v2 configuration for creating/import aks Clusters. Conflicts with aks_config, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

annotations {[key: string]: any}

Annotations for the Cluster (map)

clusterAuthEndpoint ClusterClusterAuthEndpointArgs

Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)

clusterMonitoringInput ClusterClusterMonitoringInputArgs

Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)

clusterTemplateAnswers ClusterClusterTemplateAnswersArgs

Cluster template answers. Just for Rancher v2.3.x and above (list maxitems:1)

clusterTemplateId string

Cluster template ID. Just for Rancher v2.3.x and above (string)

clusterTemplateQuestions ClusterClusterTemplateQuestionArgs[]

Cluster template questions. Just for Rancher v2.3.x and above (list)

clusterTemplateRevisionId string

Cluster template revision ID. Just for Rancher v2.3.x and above (string)

defaultPodSecurityPolicyTemplateId string

Default pod security policy template id (string)

description string

The description for Cluster (string)

desiredAgentImage string

Desired agent image. Just for Rancher v2.3.x and above (string)

desiredAuthImage string

Desired auth image. Just for Rancher v2.3.x and above (string)

dockerRootDir string

Desired auth image. Just for Rancher v2.3.x and above (string)

driver string

(Computed) The driver used for the Cluster. imported, azurekubernetesservice, amazonelasticcontainerservice, googlekubernetesengine and rancherKubernetesEngine are supported (string)

eksConfig ClusterEksConfigArgs

The Amazon EKS configuration for eks Clusters. Conflicts with aks_config, aks_config_v2, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

eksConfigV2 ClusterEksConfigV2Args

The Amazon EKS V2 configuration to create or import eks Clusters. Conflicts with aks_config, eks_config, gke_config, gke_config_v2, oke_config k3s_config and rke_config. For Rancher v2.5.x or above (list maxitems:1)

enableClusterAlerting boolean

Enable built-in cluster alerting (bool)

enableClusterMonitoring boolean

Enable built-in cluster monitoring (bool)

enableNetworkPolicy boolean

Enable project network isolation (bool)

fleetWorkspaceName string

Fleet workspace name (string)

gkeConfig ClusterGkeConfigArgs

The Google GKE configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config_v2, oke_config, k3s_config and rke_config (list maxitems:1)

gkeConfigV2 ClusterGkeConfigV2Args

The Google GKE V2 configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, oke_config, k3s_config and rke_config. For Rancher v2.5.8 or above (list maxitems:1)

k3sConfig ClusterK3sConfigArgs

The K3S configuration for k3s imported Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and rke_config (list maxitems:1)

labels {[key: string]: any}

Labels for the Cluster (map)

name string

The name of the Cluster (string)

okeConfig ClusterOkeConfigArgs

The Oracle OKE configuration for oke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, k3s_config and rke_config (list maxitems:1)

rke2Config ClusterRke2ConfigArgs

The RKE2 configuration for rke2 Clusters. Conflicts with aks_config, aks_config_v2, eks_config, gke_config, oke_config, k3s_config and rke_config (list maxitems:1)

rkeConfig ClusterRkeConfigArgs

The RKE configuration for rke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and k3s_config (list maxitems:1)

scheduledClusterScan ClusterScheduledClusterScanArgs

Cluster scheduled cis scan. For Rancher v2.4.0 or above (List maxitems:1)

windowsPreferedCluster boolean

Windows preferred cluster. Default: false (bool)

agent_env_vars Sequence[ClusterAgentEnvVarArgs]

Optional Agent Env Vars for Rancher agent. Just for Rancher v2.5.6 and above (list)

aks_config ClusterAksConfigArgs

The Azure AKS configuration for aks Clusters. Conflicts with aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

aks_config_v2 ClusterAksConfigV2Args

The Azure AKS v2 configuration for creating/import aks Clusters. Conflicts with aks_config, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

annotations Mapping[str, Any]

Annotations for the Cluster (map)

cluster_auth_endpoint ClusterClusterAuthEndpointArgs

Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)

cluster_monitoring_input ClusterClusterMonitoringInputArgs

Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)

cluster_template_answers ClusterClusterTemplateAnswersArgs

Cluster template answers. Just for Rancher v2.3.x and above (list maxitems:1)

cluster_template_id str

Cluster template ID. Just for Rancher v2.3.x and above (string)

cluster_template_questions Sequence[ClusterClusterTemplateQuestionArgs]

Cluster template questions. Just for Rancher v2.3.x and above (list)

cluster_template_revision_id str

Cluster template revision ID. Just for Rancher v2.3.x and above (string)

default_pod_security_policy_template_id str

Default pod security policy template id (string)

description str

The description for Cluster (string)

desired_agent_image str

Desired agent image. Just for Rancher v2.3.x and above (string)

desired_auth_image str

Desired auth image. Just for Rancher v2.3.x and above (string)

docker_root_dir str

Desired auth image. Just for Rancher v2.3.x and above (string)

driver str

(Computed) The driver used for the Cluster. imported, azurekubernetesservice, amazonelasticcontainerservice, googlekubernetesengine and rancherKubernetesEngine are supported (string)

eks_config ClusterEksConfigArgs

The Amazon EKS configuration for eks Clusters. Conflicts with aks_config, aks_config_v2, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

eks_config_v2 ClusterEksConfigV2Args

The Amazon EKS V2 configuration to create or import eks Clusters. Conflicts with aks_config, eks_config, gke_config, gke_config_v2, oke_config k3s_config and rke_config. For Rancher v2.5.x or above (list maxitems:1)

enable_cluster_alerting bool

Enable built-in cluster alerting (bool)

enable_cluster_monitoring bool

Enable built-in cluster monitoring (bool)

enable_network_policy bool

Enable project network isolation (bool)

fleet_workspace_name str

Fleet workspace name (string)

gke_config ClusterGkeConfigArgs

The Google GKE configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config_v2, oke_config, k3s_config and rke_config (list maxitems:1)

gke_config_v2 ClusterGkeConfigV2Args

The Google GKE V2 configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, oke_config, k3s_config and rke_config. For Rancher v2.5.8 or above (list maxitems:1)

k3s_config ClusterK3sConfigArgs

The K3S configuration for k3s imported Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and rke_config (list maxitems:1)

labels Mapping[str, Any]

Labels for the Cluster (map)

name str

The name of the Cluster (string)

oke_config ClusterOkeConfigArgs

The Oracle OKE configuration for oke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, k3s_config and rke_config (list maxitems:1)

rke2_config ClusterRke2ConfigArgs

The RKE2 configuration for rke2 Clusters. Conflicts with aks_config, aks_config_v2, eks_config, gke_config, oke_config, k3s_config and rke_config (list maxitems:1)

rke_config ClusterRkeConfigArgs

The RKE configuration for rke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and k3s_config (list maxitems:1)

scheduled_cluster_scan ClusterScheduledClusterScanArgs

Cluster scheduled cis scan. For Rancher v2.4.0 or above (List maxitems:1)

windows_prefered_cluster bool

Windows preferred cluster. Default: false (bool)

agentEnvVars List<Property Map>

Optional Agent Env Vars for Rancher agent. Just for Rancher v2.5.6 and above (list)

aksConfig Property Map

The Azure AKS configuration for aks Clusters. Conflicts with aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

aksConfigV2 Property Map

The Azure AKS v2 configuration for creating/import aks Clusters. Conflicts with aks_config, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

annotations Map<Any>

Annotations for the Cluster (map)

clusterAuthEndpoint Property Map

Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)

clusterMonitoringInput Property Map

Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)

clusterTemplateAnswers Property Map

Cluster template answers. Just for Rancher v2.3.x and above (list maxitems:1)

clusterTemplateId String

Cluster template ID. Just for Rancher v2.3.x and above (string)

clusterTemplateQuestions List<Property Map>

Cluster template questions. Just for Rancher v2.3.x and above (list)

clusterTemplateRevisionId String

Cluster template revision ID. Just for Rancher v2.3.x and above (string)

defaultPodSecurityPolicyTemplateId String

Default pod security policy template id (string)

description String

The description for Cluster (string)

desiredAgentImage String

Desired agent image. Just for Rancher v2.3.x and above (string)

desiredAuthImage String

Desired auth image. Just for Rancher v2.3.x and above (string)

dockerRootDir String

Desired auth image. Just for Rancher v2.3.x and above (string)

driver String

(Computed) The driver used for the Cluster. imported, azurekubernetesservice, amazonelasticcontainerservice, googlekubernetesengine and rancherKubernetesEngine are supported (string)

eksConfig Property Map

The Amazon EKS configuration for eks Clusters. Conflicts with aks_config, aks_config_v2, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

eksConfigV2 Property Map

The Amazon EKS V2 configuration to create or import eks Clusters. Conflicts with aks_config, eks_config, gke_config, gke_config_v2, oke_config k3s_config and rke_config. For Rancher v2.5.x or above (list maxitems:1)

enableClusterAlerting Boolean

Enable built-in cluster alerting (bool)

enableClusterMonitoring Boolean

Enable built-in cluster monitoring (bool)

enableNetworkPolicy Boolean

Enable project network isolation (bool)

fleetWorkspaceName String

Fleet workspace name (string)

gkeConfig Property Map

The Google GKE configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config_v2, oke_config, k3s_config and rke_config (list maxitems:1)

gkeConfigV2 Property Map

The Google GKE V2 configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, oke_config, k3s_config and rke_config. For Rancher v2.5.8 or above (list maxitems:1)

k3sConfig Property Map

The K3S configuration for k3s imported Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and rke_config (list maxitems:1)

labels Map<Any>

Labels for the Cluster (map)

name String

The name of the Cluster (string)

okeConfig Property Map

The Oracle OKE configuration for oke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, k3s_config and rke_config (list maxitems:1)

rke2Config Property Map

The RKE2 configuration for rke2 Clusters. Conflicts with aks_config, aks_config_v2, eks_config, gke_config, oke_config, k3s_config and rke_config (list maxitems:1)

rkeConfig Property Map

The RKE configuration for rke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and k3s_config (list maxitems:1)

scheduledClusterScan Property Map

Cluster scheduled cis scan. For Rancher v2.4.0 or above (List maxitems:1)

windowsPreferedCluster Boolean

Windows preferred cluster. Default: false (bool)

Outputs

All input properties are implicitly available as output properties. Additionally, the Cluster resource produces the following output properties:

CaCert string

TLS CA certificate for etcd service (string)

ClusterRegistrationToken ClusterClusterRegistrationToken

(Computed) Cluster Registration Token generated for the cluster (list maxitems:1)

DefaultProjectId string

(Computed) Default project ID for the cluster (string)

EnableClusterIstio bool

Deploy istio on system project and istio-system namespace, using rancher2.App resource instead. See above example.

Deprecated:

Deploy istio using rancher2_app resource instead

Id string

The provider-assigned unique ID for this managed resource.

IstioEnabled bool

(Computed) Is istio enabled at cluster? Just for Rancher v2.3.x and above (bool)

KubeConfig string

(Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has cluster_auth_endpoint enabled, the kube_config will not be available until the cluster is connected (string)

SystemProjectId string

(Computed) System project ID for the cluster (string)

CaCert string

TLS CA certificate for etcd service (string)

ClusterRegistrationToken ClusterClusterRegistrationToken

(Computed) Cluster Registration Token generated for the cluster (list maxitems:1)

DefaultProjectId string

(Computed) Default project ID for the cluster (string)

EnableClusterIstio bool

Deploy istio on system project and istio-system namespace, using rancher2.App resource instead. See above example.

Deprecated:

Deploy istio using rancher2_app resource instead

Id string

The provider-assigned unique ID for this managed resource.

IstioEnabled bool

(Computed) Is istio enabled at cluster? Just for Rancher v2.3.x and above (bool)

KubeConfig string

(Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has cluster_auth_endpoint enabled, the kube_config will not be available until the cluster is connected (string)

SystemProjectId string

(Computed) System project ID for the cluster (string)

caCert String

TLS CA certificate for etcd service (string)

clusterRegistrationToken ClusterClusterRegistrationToken

(Computed) Cluster Registration Token generated for the cluster (list maxitems:1)

defaultProjectId String

(Computed) Default project ID for the cluster (string)

enableClusterIstio Boolean

Deploy istio on system project and istio-system namespace, using rancher2.App resource instead. See above example.

Deprecated:

Deploy istio using rancher2_app resource instead

id String

The provider-assigned unique ID for this managed resource.

istioEnabled Boolean

(Computed) Is istio enabled at cluster? Just for Rancher v2.3.x and above (bool)

kubeConfig String

(Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has cluster_auth_endpoint enabled, the kube_config will not be available until the cluster is connected (string)

systemProjectId String

(Computed) System project ID for the cluster (string)

caCert string

TLS CA certificate for etcd service (string)

clusterRegistrationToken ClusterClusterRegistrationToken

(Computed) Cluster Registration Token generated for the cluster (list maxitems:1)

defaultProjectId string

(Computed) Default project ID for the cluster (string)

enableClusterIstio boolean

Deploy istio on system project and istio-system namespace, using rancher2.App resource instead. See above example.

Deprecated:

Deploy istio using rancher2_app resource instead

id string

The provider-assigned unique ID for this managed resource.

istioEnabled boolean

(Computed) Is istio enabled at cluster? Just for Rancher v2.3.x and above (bool)

kubeConfig string

(Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has cluster_auth_endpoint enabled, the kube_config will not be available until the cluster is connected (string)

systemProjectId string

(Computed) System project ID for the cluster (string)

ca_cert str

TLS CA certificate for etcd service (string)

cluster_registration_token ClusterClusterRegistrationToken

(Computed) Cluster Registration Token generated for the cluster (list maxitems:1)

default_project_id str

(Computed) Default project ID for the cluster (string)

enable_cluster_istio bool

Deploy istio on system project and istio-system namespace, using rancher2.App resource instead. See above example.

Deprecated:

Deploy istio using rancher2_app resource instead

id str

The provider-assigned unique ID for this managed resource.

istio_enabled bool

(Computed) Is istio enabled at cluster? Just for Rancher v2.3.x and above (bool)

kube_config str

(Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has cluster_auth_endpoint enabled, the kube_config will not be available until the cluster is connected (string)

system_project_id str

(Computed) System project ID for the cluster (string)

caCert String

TLS CA certificate for etcd service (string)

clusterRegistrationToken Property Map

(Computed) Cluster Registration Token generated for the cluster (list maxitems:1)

defaultProjectId String

(Computed) Default project ID for the cluster (string)

enableClusterIstio Boolean

Deploy istio on system project and istio-system namespace, using rancher2.App resource instead. See above example.

Deprecated:

Deploy istio using rancher2_app resource instead

id String

The provider-assigned unique ID for this managed resource.

istioEnabled Boolean

(Computed) Is istio enabled at cluster? Just for Rancher v2.3.x and above (bool)

kubeConfig String

(Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has cluster_auth_endpoint enabled, the kube_config will not be available until the cluster is connected (string)

systemProjectId String

(Computed) System project ID for the cluster (string)

Look up Existing Cluster Resource

Get an existing Cluster resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

public static get(name: string, id: Input<ID>, state?: ClusterState, opts?: CustomResourceOptions): Cluster
@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        agent_env_vars: Optional[Sequence[ClusterAgentEnvVarArgs]] = None,
        aks_config: Optional[ClusterAksConfigArgs] = None,
        aks_config_v2: Optional[ClusterAksConfigV2Args] = None,
        annotations: Optional[Mapping[str, Any]] = None,
        ca_cert: Optional[str] = None,
        cluster_auth_endpoint: Optional[ClusterClusterAuthEndpointArgs] = None,
        cluster_monitoring_input: Optional[ClusterClusterMonitoringInputArgs] = None,
        cluster_registration_token: Optional[ClusterClusterRegistrationTokenArgs] = None,
        cluster_template_answers: Optional[ClusterClusterTemplateAnswersArgs] = None,
        cluster_template_id: Optional[str] = None,
        cluster_template_questions: Optional[Sequence[ClusterClusterTemplateQuestionArgs]] = None,
        cluster_template_revision_id: Optional[str] = None,
        default_pod_security_policy_template_id: Optional[str] = None,
        default_project_id: Optional[str] = None,
        description: Optional[str] = None,
        desired_agent_image: Optional[str] = None,
        desired_auth_image: Optional[str] = None,
        docker_root_dir: Optional[str] = None,
        driver: Optional[str] = None,
        eks_config: Optional[ClusterEksConfigArgs] = None,
        eks_config_v2: Optional[ClusterEksConfigV2Args] = None,
        enable_cluster_alerting: Optional[bool] = None,
        enable_cluster_istio: Optional[bool] = None,
        enable_cluster_monitoring: Optional[bool] = None,
        enable_network_policy: Optional[bool] = None,
        fleet_workspace_name: Optional[str] = None,
        gke_config: Optional[ClusterGkeConfigArgs] = None,
        gke_config_v2: Optional[ClusterGkeConfigV2Args] = None,
        istio_enabled: Optional[bool] = None,
        k3s_config: Optional[ClusterK3sConfigArgs] = None,
        kube_config: Optional[str] = None,
        labels: Optional[Mapping[str, Any]] = None,
        name: Optional[str] = None,
        oke_config: Optional[ClusterOkeConfigArgs] = None,
        rke2_config: Optional[ClusterRke2ConfigArgs] = None,
        rke_config: Optional[ClusterRkeConfigArgs] = None,
        scheduled_cluster_scan: Optional[ClusterScheduledClusterScanArgs] = None,
        system_project_id: Optional[str] = None,
        windows_prefered_cluster: Optional[bool] = None) -> Cluster
func GetCluster(ctx *Context, name string, id IDInput, state *ClusterState, opts ...ResourceOption) (*Cluster, error)
public static Cluster Get(string name, Input<string> id, ClusterState? state, CustomResourceOptions? opts = null)
public static Cluster get(String name, Output<String> id, ClusterState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
resource_name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
The following state arguments are supported:
AgentEnvVars List<ClusterAgentEnvVarArgs>

Optional Agent Env Vars for Rancher agent. Just for Rancher v2.5.6 and above (list)

AksConfig ClusterAksConfigArgs

The Azure AKS configuration for aks Clusters. Conflicts with aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

AksConfigV2 ClusterAksConfigV2Args

The Azure AKS v2 configuration for creating/import aks Clusters. Conflicts with aks_config, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

Annotations Dictionary<string, object>

Annotations for the Cluster (map)

CaCert string

TLS CA certificate for etcd service (string)

ClusterAuthEndpoint ClusterClusterAuthEndpointArgs

Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)

ClusterMonitoringInput ClusterClusterMonitoringInputArgs

Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)

ClusterRegistrationToken ClusterClusterRegistrationTokenArgs

(Computed) Cluster Registration Token generated for the cluster (list maxitems:1)

ClusterTemplateAnswers ClusterClusterTemplateAnswersArgs

Cluster template answers. Just for Rancher v2.3.x and above (list maxitems:1)

ClusterTemplateId string

Cluster template ID. Just for Rancher v2.3.x and above (string)

ClusterTemplateQuestions List<ClusterClusterTemplateQuestionArgs>

Cluster template questions. Just for Rancher v2.3.x and above (list)

ClusterTemplateRevisionId string

Cluster template revision ID. Just for Rancher v2.3.x and above (string)

DefaultPodSecurityPolicyTemplateId string

Default pod security policy template id (string)

DefaultProjectId string

(Computed) Default project ID for the cluster (string)

Description string

The description for Cluster (string)

DesiredAgentImage string

Desired agent image. Just for Rancher v2.3.x and above (string)

DesiredAuthImage string

Desired auth image. Just for Rancher v2.3.x and above (string)

DockerRootDir string

Desired auth image. Just for Rancher v2.3.x and above (string)

Driver string

(Computed) The driver used for the Cluster. imported, azurekubernetesservice, amazonelasticcontainerservice, googlekubernetesengine and rancherKubernetesEngine are supported (string)

EksConfig ClusterEksConfigArgs

The Amazon EKS configuration for eks Clusters. Conflicts with aks_config, aks_config_v2, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

EksConfigV2 ClusterEksConfigV2Args

The Amazon EKS V2 configuration to create or import eks Clusters. Conflicts with aks_config, eks_config, gke_config, gke_config_v2, oke_config k3s_config and rke_config. For Rancher v2.5.x or above (list maxitems:1)

EnableClusterAlerting bool

Enable built-in cluster alerting (bool)

EnableClusterIstio bool

Deploy istio on system project and istio-system namespace, using rancher2.App resource instead. See above example.

Deprecated:

Deploy istio using rancher2_app resource instead

EnableClusterMonitoring bool

Enable built-in cluster monitoring (bool)

EnableNetworkPolicy bool

Enable project network isolation (bool)

FleetWorkspaceName string

Fleet workspace name (string)

GkeConfig ClusterGkeConfigArgs

The Google GKE configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config_v2, oke_config, k3s_config and rke_config (list maxitems:1)

GkeConfigV2 ClusterGkeConfigV2Args

The Google GKE V2 configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, oke_config, k3s_config and rke_config. For Rancher v2.5.8 or above (list maxitems:1)

IstioEnabled bool

(Computed) Is istio enabled at cluster? Just for Rancher v2.3.x and above (bool)

K3sConfig ClusterK3sConfigArgs

The K3S configuration for k3s imported Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and rke_config (list maxitems:1)

KubeConfig string

(Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has cluster_auth_endpoint enabled, the kube_config will not be available until the cluster is connected (string)

Labels Dictionary<string, object>

Labels for the Cluster (map)

Name string

The name of the Cluster (string)

OkeConfig ClusterOkeConfigArgs

The Oracle OKE configuration for oke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, k3s_config and rke_config (list maxitems:1)

Rke2Config ClusterRke2ConfigArgs

The RKE2 configuration for rke2 Clusters. Conflicts with aks_config, aks_config_v2, eks_config, gke_config, oke_config, k3s_config and rke_config (list maxitems:1)

RkeConfig ClusterRkeConfigArgs

The RKE configuration for rke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and k3s_config (list maxitems:1)

ScheduledClusterScan ClusterScheduledClusterScanArgs

Cluster scheduled cis scan. For Rancher v2.4.0 or above (List maxitems:1)

SystemProjectId string

(Computed) System project ID for the cluster (string)

WindowsPreferedCluster bool

Windows preferred cluster. Default: false (bool)

AgentEnvVars []ClusterAgentEnvVarArgs

Optional Agent Env Vars for Rancher agent. Just for Rancher v2.5.6 and above (list)

AksConfig ClusterAksConfigArgs

The Azure AKS configuration for aks Clusters. Conflicts with aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

AksConfigV2 ClusterAksConfigV2Args

The Azure AKS v2 configuration for creating/import aks Clusters. Conflicts with aks_config, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

Annotations map[string]interface{}

Annotations for the Cluster (map)

CaCert string

TLS CA certificate for etcd service (string)

ClusterAuthEndpoint ClusterClusterAuthEndpointArgs

Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)

ClusterMonitoringInput ClusterClusterMonitoringInputArgs

Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)

ClusterRegistrationToken ClusterClusterRegistrationTokenArgs

(Computed) Cluster Registration Token generated for the cluster (list maxitems:1)

ClusterTemplateAnswers ClusterClusterTemplateAnswersArgs

Cluster template answers. Just for Rancher v2.3.x and above (list maxitems:1)

ClusterTemplateId string

Cluster template ID. Just for Rancher v2.3.x and above (string)

ClusterTemplateQuestions []ClusterClusterTemplateQuestionArgs

Cluster template questions. Just for Rancher v2.3.x and above (list)

ClusterTemplateRevisionId string

Cluster template revision ID. Just for Rancher v2.3.x and above (string)

DefaultPodSecurityPolicyTemplateId string

Default pod security policy template id (string)

DefaultProjectId string

(Computed) Default project ID for the cluster (string)

Description string

The description for Cluster (string)

DesiredAgentImage string

Desired agent image. Just for Rancher v2.3.x and above (string)

DesiredAuthImage string

Desired auth image. Just for Rancher v2.3.x and above (string)

DockerRootDir string

Desired auth image. Just for Rancher v2.3.x and above (string)

Driver string

(Computed) The driver used for the Cluster. imported, azurekubernetesservice, amazonelasticcontainerservice, googlekubernetesengine and rancherKubernetesEngine are supported (string)

EksConfig ClusterEksConfigArgs

The Amazon EKS configuration for eks Clusters. Conflicts with aks_config, aks_config_v2, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

EksConfigV2 ClusterEksConfigV2Args

The Amazon EKS V2 configuration to create or import eks Clusters. Conflicts with aks_config, eks_config, gke_config, gke_config_v2, oke_config k3s_config and rke_config. For Rancher v2.5.x or above (list maxitems:1)

EnableClusterAlerting bool

Enable built-in cluster alerting (bool)

EnableClusterIstio bool

Deploy istio on system project and istio-system namespace, using rancher2.App resource instead. See above example.

Deprecated:

Deploy istio using rancher2_app resource instead

EnableClusterMonitoring bool

Enable built-in cluster monitoring (bool)

EnableNetworkPolicy bool

Enable project network isolation (bool)

FleetWorkspaceName string

Fleet workspace name (string)

GkeConfig ClusterGkeConfigArgs

The Google GKE configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config_v2, oke_config, k3s_config and rke_config (list maxitems:1)

GkeConfigV2 ClusterGkeConfigV2Args

The Google GKE V2 configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, oke_config, k3s_config and rke_config. For Rancher v2.5.8 or above (list maxitems:1)

IstioEnabled bool

(Computed) Is istio enabled at cluster? Just for Rancher v2.3.x and above (bool)

K3sConfig ClusterK3sConfigArgs

The K3S configuration for k3s imported Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and rke_config (list maxitems:1)

KubeConfig string

(Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has cluster_auth_endpoint enabled, the kube_config will not be available until the cluster is connected (string)

Labels map[string]interface{}

Labels for the Cluster (map)

Name string

The name of the Cluster (string)

OkeConfig ClusterOkeConfigArgs

The Oracle OKE configuration for oke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, k3s_config and rke_config (list maxitems:1)

Rke2Config ClusterRke2ConfigArgs

The RKE2 configuration for rke2 Clusters. Conflicts with aks_config, aks_config_v2, eks_config, gke_config, oke_config, k3s_config and rke_config (list maxitems:1)

RkeConfig ClusterRkeConfigArgs

The RKE configuration for rke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and k3s_config (list maxitems:1)

ScheduledClusterScan ClusterScheduledClusterScanArgs

Cluster scheduled cis scan. For Rancher v2.4.0 or above (List maxitems:1)

SystemProjectId string

(Computed) System project ID for the cluster (string)

WindowsPreferedCluster bool

Windows preferred cluster. Default: false (bool)

agentEnvVars List<ClusterAgentEnvVarArgs>

Optional Agent Env Vars for Rancher agent. Just for Rancher v2.5.6 and above (list)

aksConfig ClusterAksConfigArgs

The Azure AKS configuration for aks Clusters. Conflicts with aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

aksConfigV2 ClusterAksConfigV2Args

The Azure AKS v2 configuration for creating/import aks Clusters. Conflicts with aks_config, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

annotations Map<String,Object>

Annotations for the Cluster (map)

caCert String

TLS CA certificate for etcd service (string)

clusterAuthEndpoint ClusterClusterAuthEndpointArgs

Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)

clusterMonitoringInput ClusterClusterMonitoringInputArgs

Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)

clusterRegistrationToken ClusterClusterRegistrationTokenArgs

(Computed) Cluster Registration Token generated for the cluster (list maxitems:1)

clusterTemplateAnswers ClusterClusterTemplateAnswersArgs

Cluster template answers. Just for Rancher v2.3.x and above (list maxitems:1)

clusterTemplateId String

Cluster template ID. Just for Rancher v2.3.x and above (string)

clusterTemplateQuestions List<ClusterClusterTemplateQuestionArgs>

Cluster template questions. Just for Rancher v2.3.x and above (list)

clusterTemplateRevisionId String

Cluster template revision ID. Just for Rancher v2.3.x and above (string)

defaultPodSecurityPolicyTemplateId String

Default pod security policy template id (string)

defaultProjectId String

(Computed) Default project ID for the cluster (string)

description String

The description for Cluster (string)

desiredAgentImage String

Desired agent image. Just for Rancher v2.3.x and above (string)

desiredAuthImage String

Desired auth image. Just for Rancher v2.3.x and above (string)

dockerRootDir String

Desired auth image. Just for Rancher v2.3.x and above (string)

driver String

(Computed) The driver used for the Cluster. imported, azurekubernetesservice, amazonelasticcontainerservice, googlekubernetesengine and rancherKubernetesEngine are supported (string)

eksConfig ClusterEksConfigArgs

The Amazon EKS configuration for eks Clusters. Conflicts with aks_config, aks_config_v2, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

eksConfigV2 ClusterEksConfigV2Args

The Amazon EKS V2 configuration to create or import eks Clusters. Conflicts with aks_config, eks_config, gke_config, gke_config_v2, oke_config k3s_config and rke_config. For Rancher v2.5.x or above (list maxitems:1)

enableClusterAlerting Boolean

Enable built-in cluster alerting (bool)

enableClusterIstio Boolean

Deploy istio on system project and istio-system namespace, using rancher2.App resource instead. See above example.

Deprecated:

Deploy istio using rancher2_app resource instead

enableClusterMonitoring Boolean

Enable built-in cluster monitoring (bool)

enableNetworkPolicy Boolean

Enable project network isolation (bool)

fleetWorkspaceName String

Fleet workspace name (string)

gkeConfig ClusterGkeConfigArgs

The Google GKE configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config_v2, oke_config, k3s_config and rke_config (list maxitems:1)

gkeConfigV2 ClusterGkeConfigV2Args

The Google GKE V2 configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, oke_config, k3s_config and rke_config. For Rancher v2.5.8 or above (list maxitems:1)

istioEnabled Boolean

(Computed) Is istio enabled at cluster? Just for Rancher v2.3.x and above (bool)

k3sConfig ClusterK3sConfigArgs

The K3S configuration for k3s imported Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and rke_config (list maxitems:1)

kubeConfig String

(Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has cluster_auth_endpoint enabled, the kube_config will not be available until the cluster is connected (string)

labels Map<String,Object>

Labels for the Cluster (map)

name String

The name of the Cluster (string)

okeConfig ClusterOkeConfigArgs

The Oracle OKE configuration for oke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, k3s_config and rke_config (list maxitems:1)

rke2Config ClusterRke2ConfigArgs

The RKE2 configuration for rke2 Clusters. Conflicts with aks_config, aks_config_v2, eks_config, gke_config, oke_config, k3s_config and rke_config (list maxitems:1)

rkeConfig ClusterRkeConfigArgs

The RKE configuration for rke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and k3s_config (list maxitems:1)

scheduledClusterScan ClusterScheduledClusterScanArgs

Cluster scheduled cis scan. For Rancher v2.4.0 or above (List maxitems:1)

systemProjectId String

(Computed) System project ID for the cluster (string)

windowsPreferedCluster Boolean

Windows preferred cluster. Default: false (bool)

agentEnvVars ClusterAgentEnvVarArgs[]

Optional Agent Env Vars for Rancher agent. Just for Rancher v2.5.6 and above (list)

aksConfig ClusterAksConfigArgs

The Azure AKS configuration for aks Clusters. Conflicts with aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

aksConfigV2 ClusterAksConfigV2Args

The Azure AKS v2 configuration for creating/import aks Clusters. Conflicts with aks_config, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

annotations {[key: string]: any}

Annotations for the Cluster (map)

caCert string

TLS CA certificate for etcd service (string)

clusterAuthEndpoint ClusterClusterAuthEndpointArgs

Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)

clusterMonitoringInput ClusterClusterMonitoringInputArgs

Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)

clusterRegistrationToken ClusterClusterRegistrationTokenArgs

(Computed) Cluster Registration Token generated for the cluster (list maxitems:1)

clusterTemplateAnswers ClusterClusterTemplateAnswersArgs

Cluster template answers. Just for Rancher v2.3.x and above (list maxitems:1)

clusterTemplateId string

Cluster template ID. Just for Rancher v2.3.x and above (string)

clusterTemplateQuestions ClusterClusterTemplateQuestionArgs[]

Cluster template questions. Just for Rancher v2.3.x and above (list)

clusterTemplateRevisionId string

Cluster template revision ID. Just for Rancher v2.3.x and above (string)

defaultPodSecurityPolicyTemplateId string

Default pod security policy template id (string)

defaultProjectId string

(Computed) Default project ID for the cluster (string)

description string

The description for Cluster (string)

desiredAgentImage string

Desired agent image. Just for Rancher v2.3.x and above (string)

desiredAuthImage string

Desired auth image. Just for Rancher v2.3.x and above (string)

dockerRootDir string

Desired auth image. Just for Rancher v2.3.x and above (string)

driver string

(Computed) The driver used for the Cluster. imported, azurekubernetesservice, amazonelasticcontainerservice, googlekubernetesengine and rancherKubernetesEngine are supported (string)

eksConfig ClusterEksConfigArgs

The Amazon EKS configuration for eks Clusters. Conflicts with aks_config, aks_config_v2, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

eksConfigV2 ClusterEksConfigV2Args

The Amazon EKS V2 configuration to create or import eks Clusters. Conflicts with aks_config, eks_config, gke_config, gke_config_v2, oke_config k3s_config and rke_config. For Rancher v2.5.x or above (list maxitems:1)

enableClusterAlerting boolean

Enable built-in cluster alerting (bool)

enableClusterIstio boolean

Deploy istio on system project and istio-system namespace, using rancher2.App resource instead. See above example.

Deprecated:

Deploy istio using rancher2_app resource instead

enableClusterMonitoring boolean

Enable built-in cluster monitoring (bool)

enableNetworkPolicy boolean

Enable project network isolation (bool)

fleetWorkspaceName string

Fleet workspace name (string)

gkeConfig ClusterGkeConfigArgs

The Google GKE configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config_v2, oke_config, k3s_config and rke_config (list maxitems:1)

gkeConfigV2 ClusterGkeConfigV2Args

The Google GKE V2 configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, oke_config, k3s_config and rke_config. For Rancher v2.5.8 or above (list maxitems:1)

istioEnabled boolean

(Computed) Is istio enabled at cluster? Just for Rancher v2.3.x and above (bool)

k3sConfig ClusterK3sConfigArgs

The K3S configuration for k3s imported Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and rke_config (list maxitems:1)

kubeConfig string

(Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has cluster_auth_endpoint enabled, the kube_config will not be available until the cluster is connected (string)

labels {[key: string]: any}

Labels for the Cluster (map)

name string

The name of the Cluster (string)

okeConfig ClusterOkeConfigArgs

The Oracle OKE configuration for oke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, k3s_config and rke_config (list maxitems:1)

rke2Config ClusterRke2ConfigArgs

The RKE2 configuration for rke2 Clusters. Conflicts with aks_config, aks_config_v2, eks_config, gke_config, oke_config, k3s_config and rke_config (list maxitems:1)

rkeConfig ClusterRkeConfigArgs

The RKE configuration for rke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and k3s_config (list maxitems:1)

scheduledClusterScan ClusterScheduledClusterScanArgs

Cluster scheduled cis scan. For Rancher v2.4.0 or above (List maxitems:1)

systemProjectId string

(Computed) System project ID for the cluster (string)

windowsPreferedCluster boolean

Windows preferred cluster. Default: false (bool)

agent_env_vars Sequence[ClusterAgentEnvVarArgs]

Optional Agent Env Vars for Rancher agent. Just for Rancher v2.5.6 and above (list)

aks_config ClusterAksConfigArgs

The Azure AKS configuration for aks Clusters. Conflicts with aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

aks_config_v2 ClusterAksConfigV2Args

The Azure AKS v2 configuration for creating/import aks Clusters. Conflicts with aks_config, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

annotations Mapping[str, Any]

Annotations for the Cluster (map)

ca_cert str

TLS CA certificate for etcd service (string)

cluster_auth_endpoint ClusterClusterAuthEndpointArgs

Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)

cluster_monitoring_input ClusterClusterMonitoringInputArgs

Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)

cluster_registration_token ClusterClusterRegistrationTokenArgs

(Computed) Cluster Registration Token generated for the cluster (list maxitems:1)

cluster_template_answers ClusterClusterTemplateAnswersArgs

Cluster template answers. Just for Rancher v2.3.x and above (list maxitems:1)

cluster_template_id str

Cluster template ID. Just for Rancher v2.3.x and above (string)

cluster_template_questions Sequence[ClusterClusterTemplateQuestionArgs]

Cluster template questions. Just for Rancher v2.3.x and above (list)

cluster_template_revision_id str

Cluster template revision ID. Just for Rancher v2.3.x and above (string)

default_pod_security_policy_template_id str

Default pod security policy template id (string)

default_project_id str

(Computed) Default project ID for the cluster (string)

description str

The description for Cluster (string)

desired_agent_image str

Desired agent image. Just for Rancher v2.3.x and above (string)

desired_auth_image str

Desired auth image. Just for Rancher v2.3.x and above (string)

docker_root_dir str

Desired auth image. Just for Rancher v2.3.x and above (string)

driver str

(Computed) The driver used for the Cluster. imported, azurekubernetesservice, amazonelasticcontainerservice, googlekubernetesengine and rancherKubernetesEngine are supported (string)

eks_config ClusterEksConfigArgs

The Amazon EKS configuration for eks Clusters. Conflicts with aks_config, aks_config_v2, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

eks_config_v2 ClusterEksConfigV2Args

The Amazon EKS V2 configuration to create or import eks Clusters. Conflicts with aks_config, eks_config, gke_config, gke_config_v2, oke_config k3s_config and rke_config. For Rancher v2.5.x or above (list maxitems:1)

enable_cluster_alerting bool

Enable built-in cluster alerting (bool)

enable_cluster_istio bool

Deploy istio on system project and istio-system namespace, using rancher2.App resource instead. See above example.

Deprecated:

Deploy istio using rancher2_app resource instead

enable_cluster_monitoring bool

Enable built-in cluster monitoring (bool)

enable_network_policy bool

Enable project network isolation (bool)

fleet_workspace_name str

Fleet workspace name (string)

gke_config ClusterGkeConfigArgs

The Google GKE configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config_v2, oke_config, k3s_config and rke_config (list maxitems:1)

gke_config_v2 ClusterGkeConfigV2Args

The Google GKE V2 configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, oke_config, k3s_config and rke_config. For Rancher v2.5.8 or above (list maxitems:1)

istio_enabled bool

(Computed) Is istio enabled at cluster? Just for Rancher v2.3.x and above (bool)

k3s_config ClusterK3sConfigArgs

The K3S configuration for k3s imported Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and rke_config (list maxitems:1)

kube_config str

(Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has cluster_auth_endpoint enabled, the kube_config will not be available until the cluster is connected (string)

labels Mapping[str, Any]

Labels for the Cluster (map)

name str

The name of the Cluster (string)

oke_config ClusterOkeConfigArgs

The Oracle OKE configuration for oke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, k3s_config and rke_config (list maxitems:1)

rke2_config ClusterRke2ConfigArgs

The RKE2 configuration for rke2 Clusters. Conflicts with aks_config, aks_config_v2, eks_config, gke_config, oke_config, k3s_config and rke_config (list maxitems:1)

rke_config ClusterRkeConfigArgs

The RKE configuration for rke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and k3s_config (list maxitems:1)

scheduled_cluster_scan ClusterScheduledClusterScanArgs

Cluster scheduled cis scan. For Rancher v2.4.0 or above (List maxitems:1)

system_project_id str

(Computed) System project ID for the cluster (string)

windows_prefered_cluster bool

Windows preferred cluster. Default: false (bool)

agentEnvVars List<Property Map>

Optional Agent Env Vars for Rancher agent. Just for Rancher v2.5.6 and above (list)

aksConfig Property Map

The Azure AKS configuration for aks Clusters. Conflicts with aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

aksConfigV2 Property Map

The Azure AKS v2 configuration for creating/import aks Clusters. Conflicts with aks_config, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

annotations Map<Any>

Annotations for the Cluster (map)

caCert String

TLS CA certificate for etcd service (string)

clusterAuthEndpoint Property Map

Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)

clusterMonitoringInput Property Map

Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)

clusterRegistrationToken Property Map

(Computed) Cluster Registration Token generated for the cluster (list maxitems:1)

clusterTemplateAnswers Property Map

Cluster template answers. Just for Rancher v2.3.x and above (list maxitems:1)

clusterTemplateId String

Cluster template ID. Just for Rancher v2.3.x and above (string)

clusterTemplateQuestions List<Property Map>

Cluster template questions. Just for Rancher v2.3.x and above (list)

clusterTemplateRevisionId String

Cluster template revision ID. Just for Rancher v2.3.x and above (string)

defaultPodSecurityPolicyTemplateId String

Default pod security policy template id (string)

defaultProjectId String

(Computed) Default project ID for the cluster (string)

description String

The description for Cluster (string)

desiredAgentImage String

Desired agent image. Just for Rancher v2.3.x and above (string)

desiredAuthImage String

Desired auth image. Just for Rancher v2.3.x and above (string)

dockerRootDir String

Desired auth image. Just for Rancher v2.3.x and above (string)

driver String

(Computed) The driver used for the Cluster. imported, azurekubernetesservice, amazonelasticcontainerservice, googlekubernetesengine and rancherKubernetesEngine are supported (string)

eksConfig Property Map

The Amazon EKS configuration for eks Clusters. Conflicts with aks_config, aks_config_v2, eks_config_v2, gke_config, gke_config_v2, oke_config k3s_config and rke_config (list maxitems:1)

eksConfigV2 Property Map

The Amazon EKS V2 configuration to create or import eks Clusters. Conflicts with aks_config, eks_config, gke_config, gke_config_v2, oke_config k3s_config and rke_config. For Rancher v2.5.x or above (list maxitems:1)

enableClusterAlerting Boolean

Enable built-in cluster alerting (bool)

enableClusterIstio Boolean

Deploy istio on system project and istio-system namespace, using rancher2.App resource instead. See above example.

Deprecated:

Deploy istio using rancher2_app resource instead

enableClusterMonitoring Boolean

Enable built-in cluster monitoring (bool)

enableNetworkPolicy Boolean

Enable project network isolation (bool)

fleetWorkspaceName String

Fleet workspace name (string)

gkeConfig Property Map

The Google GKE configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config_v2, oke_config, k3s_config and rke_config (list maxitems:1)

gkeConfigV2 Property Map

The Google GKE V2 configuration for gke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, oke_config, k3s_config and rke_config. For Rancher v2.5.8 or above (list maxitems:1)

istioEnabled Boolean

(Computed) Is istio enabled at cluster? Just for Rancher v2.3.x and above (bool)

k3sConfig Property Map

The K3S configuration for k3s imported Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and rke_config (list maxitems:1)

kubeConfig String

(Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has cluster_auth_endpoint enabled, the kube_config will not be available until the cluster is connected (string)

labels Map<Any>

Labels for the Cluster (map)

name String

The name of the Cluster (string)

okeConfig Property Map

The Oracle OKE configuration for oke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, k3s_config and rke_config (list maxitems:1)

rke2Config Property Map

The RKE2 configuration for rke2 Clusters. Conflicts with aks_config, aks_config_v2, eks_config, gke_config, oke_config, k3s_config and rke_config (list maxitems:1)

rkeConfig Property Map

The RKE configuration for rke Clusters. Conflicts with aks_config, aks_config_v2, eks_config, eks_config_v2, gke_config, gke_config_v2, oke_config and k3s_config (list maxitems:1)

scheduledClusterScan Property Map

Cluster scheduled cis scan. For Rancher v2.4.0 or above (List maxitems:1)

systemProjectId String

(Computed) System project ID for the cluster (string)

windowsPreferedCluster Boolean

Windows preferred cluster. Default: false (bool)

Supporting Types

ClusterAgentEnvVar

Name string

The name of the Cluster (string)

Value string

Rancher agent env var value (string)

Name string

The name of the Cluster (string)

Value string

Rancher agent env var value (string)

name String

The name of the Cluster (string)

value String

Rancher agent env var value (string)

name string

The name of the Cluster (string)

value string

Rancher agent env var value (string)

name str

The name of the Cluster (string)

value str

Rancher agent env var value (string)

name String

The name of the Cluster (string)

value String

Rancher agent env var value (string)

ClusterAksConfig

AgentDnsPrefix string

DNS prefix to be used to create the FQDN for the agent pool (string)

ClientId string

Azure client ID to use (string)

ClientSecret string

Azure client secret associated with the "client id" (string)

KubernetesVersion string

K8s version to deploy. Default: Rancher default (string) (Note - if rke_config is set at cluster_template, kubernetes_version must be set to the active cluster version so Rancher can clone the RKE template)

MasterDnsPrefix string

DNS prefix to use the Kubernetes cluster control pane (string)

ResourceGroup string

(string)

SshPublicKeyContents string

Contents of the SSH public key used to authenticate with Linux hosts (string)

Subnet string

The name of an existing Azure Virtual Subnet. Composite of agent virtual network subnet ID (string)

SubscriptionId string

(string)

TenantId string

(string)

VirtualNetwork string

The name of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)

VirtualNetworkResourceGroup string

The resource group of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)

AadServerAppSecret string

The secret of an Azure Active Directory server application (string)

AadTenantId string

The ID of an Azure Active Directory tenant (string)

AddClientAppId string

The ID of an Azure Active Directory client application of type "Native". This application is for user login via kubectl (string)

AddServerAppId string

The ID of an Azure Active Directory server application of type "Web app/API". This application represents the managed cluster's apiserver (Server application) (string)

AdminUsername string

The administrator username to use for Linux hosts. Default azureuser (string)

AgentOsDiskSize int

GB size to be used to specify the disk for every machine in the agent pool. If you specify 0, it will apply the default according to the "agent vm size" specified. Default 0 (int)

AgentPoolName string

Name for the agent pool, upto 12 alphanumeric characters. Default agentpool0 (string)

AgentStorageProfile string

Storage profile specifies what kind of storage used on machine in the agent pool. Chooses from [ManagedDisks StorageAccount]. Default ManagedDisks (string)

AgentVmSize string

Size of machine in the agent pool. Default Standard_D1_v2 (string)

AuthBaseUrl string

Different authentication API url to use. Default https://login.microsoftonline.com/ (string)

BaseUrl string

Different resource management API url to use. Default https://management.azure.com/ (string)

Count int

Number of machines (VMs) in the agent pool. Allowed values must be in the range of 1 to 100 (inclusive). Default 1 (int)

DnsServiceIp string

An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes Service address range specified in "service cidr". Default 10.0.0.10 (string)

DockerBridgeCidr string

A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes Service address range specified in "service cidr". Default 172.17.0.1/16 (string)

EnableHttpApplicationRouting bool

Enable the Kubernetes ingress with automatic public DNS name creation. Default false (bool)

EnableMonitoring bool

Turn on Azure Log Analytics monitoring. Uses the Log Analytics "Default" workspace if it exists, else creates one. if using an existing workspace, specifies "log analytics workspace resource id". Default true (bool)

LoadBalancerSku string

Allowed values: basic (default) standard (string)

Location string

(string)

LogAnalyticsWorkspace string

The name of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses '{resource group}-{subscription id}-{location code}' (string)

LogAnalyticsWorkspaceResourceGroup string

The resource group of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses the 'Cluster' resource group (string)

MaxPods int

Maximum number of pods that can run on a node. Default 110 (int)

NetworkPlugin string

Network plugin used for building Kubernetes network. Chooses from azure or kubenet. Default azure (string)

NetworkPolicy string

Network policy used for building Kubernetes network. Chooses from calico (string)

PodCidr string

A CIDR notation IP range from which to assign Kubernetes Pod IPs when "network plugin" is specified in "kubenet". Default 172.244.0.0/16 (string)

ServiceCidr string

A CIDR notation IP range from which to assign Kubernetes Service cluster IPs. It must not overlap with any Subnet IP ranges. Default 10.0.0.0/16 (string)

Tag Dictionary<string, object>

Use tags argument instead as []string

Deprecated:

Use tags argument instead as []string

Tags List<string>

Tags for Kubernetes cluster. For example, ["foo=bar","bar=foo"] (list)

AgentDnsPrefix string

DNS prefix to be used to create the FQDN for the agent pool (string)

ClientId string

Azure client ID to use (string)

ClientSecret string

Azure client secret associated with the "client id" (string)

KubernetesVersion string

K8s version to deploy. Default: Rancher default (string) (Note - if rke_config is set at cluster_template, kubernetes_version must be set to the active cluster version so Rancher can clone the RKE template)

MasterDnsPrefix string

DNS prefix to use the Kubernetes cluster control pane (string)

ResourceGroup string

(string)

SshPublicKeyContents string

Contents of the SSH public key used to authenticate with Linux hosts (string)

Subnet string

The name of an existing Azure Virtual Subnet. Composite of agent virtual network subnet ID (string)

SubscriptionId string

(string)

TenantId string

(string)

VirtualNetwork string

The name of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)

VirtualNetworkResourceGroup string

The resource group of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)

AadServerAppSecret string

The secret of an Azure Active Directory server application (string)

AadTenantId string

The ID of an Azure Active Directory tenant (string)

AddClientAppId string

The ID of an Azure Active Directory client application of type "Native". This application is for user login via kubectl (string)

AddServerAppId string

The ID of an Azure Active Directory server application of type "Web app/API". This application represents the managed cluster's apiserver (Server application) (string)

AdminUsername string

The administrator username to use for Linux hosts. Default azureuser (string)

AgentOsDiskSize int

GB size to be used to specify the disk for every machine in the agent pool. If you specify 0, it will apply the default according to the "agent vm size" specified. Default 0 (int)

AgentPoolName string

Name for the agent pool, upto 12 alphanumeric characters. Default agentpool0 (string)

AgentStorageProfile string

Storage profile specifies what kind of storage used on machine in the agent pool. Chooses from [ManagedDisks StorageAccount]. Default ManagedDisks (string)

AgentVmSize string

Size of machine in the agent pool. Default Standard_D1_v2 (string)

AuthBaseUrl string

Different authentication API url to use. Default https://login.microsoftonline.com/ (string)

BaseUrl string

Different resource management API url to use. Default https://management.azure.com/ (string)

Count int

Number of machines (VMs) in the agent pool. Allowed values must be in the range of 1 to 100 (inclusive). Default 1 (int)

DnsServiceIp string

An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes Service address range specified in "service cidr". Default 10.0.0.10 (string)

DockerBridgeCidr string

A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes Service address range specified in "service cidr". Default 172.17.0.1/16 (string)

EnableHttpApplicationRouting bool

Enable the Kubernetes ingress with automatic public DNS name creation. Default false (bool)

EnableMonitoring bool

Turn on Azure Log Analytics monitoring. Uses the Log Analytics "Default" workspace if it exists, else creates one. if using an existing workspace, specifies "log analytics workspace resource id". Default true (bool)

LoadBalancerSku string

Allowed values: basic (default) standard (string)

Location string

(string)

LogAnalyticsWorkspace string

The name of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses '{resource group}-{subscription id}-{location code}' (string)

LogAnalyticsWorkspaceResourceGroup string

The resource group of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses the 'Cluster' resource group (string)

MaxPods int

Maximum number of pods that can run on a node. Default 110 (int)

NetworkPlugin string

Network plugin used for building Kubernetes network. Chooses from azure or kubenet. Default azure (string)

NetworkPolicy string

Network policy used for building Kubernetes network. Chooses from calico (string)

PodCidr string

A CIDR notation IP range from which to assign Kubernetes Pod IPs when "network plugin" is specified in "kubenet". Default 172.244.0.0/16 (string)

ServiceCidr string

A CIDR notation IP range from which to assign Kubernetes Service cluster IPs. It must not overlap with any Subnet IP ranges. Default 10.0.0.0/16 (string)

Tag map[string]interface{}

Use tags argument instead as []string

Deprecated:

Use tags argument instead as []string

Tags []string

Tags for Kubernetes cluster. For example, ["foo=bar","bar=foo"] (list)

agentDnsPrefix String

DNS prefix to be used to create the FQDN for the agent pool (string)

clientId String

Azure client ID to use (string)

clientSecret String

Azure client secret associated with the "client id" (string)

kubernetesVersion String

K8s version to deploy. Default: Rancher default (string) (Note - if rke_config is set at cluster_template, kubernetes_version must be set to the active cluster version so Rancher can clone the RKE template)

masterDnsPrefix String

DNS prefix to use the Kubernetes cluster control pane (string)

resourceGroup String

(string)

sshPublicKeyContents String

Contents of the SSH public key used to authenticate with Linux hosts (string)

subnet String

The name of an existing Azure Virtual Subnet. Composite of agent virtual network subnet ID (string)

subscriptionId String

(string)

tenantId String

(string)

virtualNetwork String

The name of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)

virtualNetworkResourceGroup String

The resource group of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)

aadServerAppSecret String

The secret of an Azure Active Directory server application (string)

aadTenantId String

The ID of an Azure Active Directory tenant (string)

addClientAppId String

The ID of an Azure Active Directory client application of type "Native". This application is for user login via kubectl (string)

addServerAppId String

The ID of an Azure Active Directory server application of type "Web app/API". This application represents the managed cluster's apiserver (Server application) (string)

adminUsername String

The administrator username to use for Linux hosts. Default azureuser (string)

agentOsDiskSize Integer

GB size to be used to specify the disk for every machine in the agent pool. If you specify 0, it will apply the default according to the "agent vm size" specified. Default 0 (int)

agentPoolName String

Name for the agent pool, upto 12 alphanumeric characters. Default agentpool0 (string)

agentStorageProfile String

Storage profile specifies what kind of storage used on machine in the agent pool. Chooses from [ManagedDisks StorageAccount]. Default ManagedDisks (string)

agentVmSize String

Size of machine in the agent pool. Default Standard_D1_v2 (string)

authBaseUrl String

Different authentication API url to use. Default https://login.microsoftonline.com/ (string)

baseUrl String

Different resource management API url to use. Default https://management.azure.com/ (string)

count Integer

Number of machines (VMs) in the agent pool. Allowed values must be in the range of 1 to 100 (inclusive). Default 1 (int)

dnsServiceIp String

An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes Service address range specified in "service cidr". Default 10.0.0.10 (string)

dockerBridgeCidr String

A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes Service address range specified in "service cidr". Default 172.17.0.1/16 (string)

enableHttpApplicationRouting Boolean

Enable the Kubernetes ingress with automatic public DNS name creation. Default false (bool)

enableMonitoring Boolean

Turn on Azure Log Analytics monitoring. Uses the Log Analytics "Default" workspace if it exists, else creates one. if using an existing workspace, specifies "log analytics workspace resource id". Default true (bool)

loadBalancerSku String

Allowed values: basic (default) standard (string)

location String

(string)

logAnalyticsWorkspace String

The name of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses '{resource group}-{subscription id}-{location code}' (string)

logAnalyticsWorkspaceResourceGroup String

The resource group of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses the 'Cluster' resource group (string)

maxPods Integer

Maximum number of pods that can run on a node. Default 110 (int)

networkPlugin String

Network plugin used for building Kubernetes network. Chooses from azure or kubenet. Default azure (string)

networkPolicy String

Network policy used for building Kubernetes network. Chooses from calico (string)

podCidr String

A CIDR notation IP range from which to assign Kubernetes Pod IPs when "network plugin" is specified in "kubenet". Default 172.244.0.0/16 (string)

serviceCidr String

A CIDR notation IP range from which to assign Kubernetes Service cluster IPs. It must not overlap with any Subnet IP ranges. Default 10.0.0.0/16 (string)

tag Map<String,Object>

Use tags argument instead as []string

Deprecated:

Use tags argument instead as []string

tags List<String>

Tags for Kubernetes cluster. For example, ["foo=bar","bar=foo"] (list)

agentDnsPrefix string

DNS prefix to be used to create the FQDN for the agent pool (string)

clientId string

Azure client ID to use (string)

clientSecret string

Azure client secret associated with the "client id" (string)

kubernetesVersion string

K8s version to deploy. Default: Rancher default (string) (Note - if rke_config is set at cluster_template, kubernetes_version must be set to the active cluster version so Rancher can clone the RKE template)

masterDnsPrefix string

DNS prefix to use the Kubernetes cluster control pane (string)

resourceGroup string

(string)

sshPublicKeyContents string

Contents of the SSH public key used to authenticate with Linux hosts (string)

subnet string

The name of an existing Azure Virtual Subnet. Composite of agent virtual network subnet ID (string)

subscriptionId string

(string)

tenantId string

(string)

virtualNetwork string

The name of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)

virtualNetworkResourceGroup string

The resource group of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)

aadServerAppSecret string

The secret of an Azure Active Directory server application (string)

aadTenantId string

The ID of an Azure Active Directory tenant (string)

addClientAppId string

The ID of an Azure Active Directory client application of type "Native". This application is for user login via kubectl (string)

addServerAppId string

The ID of an Azure Active Directory server application of type "Web app/API". This application represents the managed cluster's apiserver (Server application) (string)

adminUsername string

The administrator username to use for Linux hosts. Default azureuser (string)

agentOsDiskSize number

GB size to be used to specify the disk for every machine in the agent pool. If you specify 0, it will apply the default according to the "agent vm size" specified. Default 0 (int)

agentPoolName string

Name for the agent pool, upto 12 alphanumeric characters. Default agentpool0 (string)

agentStorageProfile string

Storage profile specifies what kind of storage used on machine in the agent pool. Chooses from [ManagedDisks StorageAccount]. Default ManagedDisks (string)

agentVmSize string

Size of machine in the agent pool. Default Standard_D1_v2 (string)

authBaseUrl string

Different authentication API url to use. Default https://login.microsoftonline.com/ (string)

baseUrl string

Different resource management API url to use. Default https://management.azure.com/ (string)

count number

Number of machines (VMs) in the agent pool. Allowed values must be in the range of 1 to 100 (inclusive). Default 1 (int)

dnsServiceIp string

An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes Service address range specified in "service cidr". Default 10.0.0.10 (string)

dockerBridgeCidr string

A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes Service address range specified in "service cidr". Default 172.17.0.1/16 (string)

enableHttpApplicationRouting boolean

Enable the Kubernetes ingress with automatic public DNS name creation. Default false (bool)

enableMonitoring boolean

Turn on Azure Log Analytics monitoring. Uses the Log Analytics "Default" workspace if it exists, else creates one. if using an existing workspace, specifies "log analytics workspace resource id". Default true (bool)

loadBalancerSku string

Allowed values: basic (default) standard (string)

location string

(string)

logAnalyticsWorkspace string

The name of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses '{resource group}-{subscription id}-{location code}' (string)

logAnalyticsWorkspaceResourceGroup string

The resource group of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses the 'Cluster' resource group (string)

maxPods number

Maximum number of pods that can run on a node. Default 110 (int)

networkPlugin string

Network plugin used for building Kubernetes network. Chooses from azure or kubenet. Default azure (string)

networkPolicy string

Network policy used for building Kubernetes network. Chooses from calico (string)

podCidr string

A CIDR notation IP range from which to assign Kubernetes Pod IPs when "network plugin" is specified in "kubenet". Default 172.244.0.0/16 (string)

serviceCidr string

A CIDR notation IP range from which to assign Kubernetes Service cluster IPs. It must not overlap with any Subnet IP ranges. Default 10.0.0.0/16 (string)

tag {[key: string]: any}

Use tags argument instead as []string

Deprecated:

Use tags argument instead as []string

tags string[]

Tags for Kubernetes cluster. For example, ["foo=bar","bar=foo"] (list)

agent_dns_prefix str

DNS prefix to be used to create the FQDN for the agent pool (string)

client_id str

Azure client ID to use (string)

client_secret str

Azure client secret associated with the "client id" (string)

kubernetes_version str

K8s version to deploy. Default: Rancher default (string) (Note - if rke_config is set at cluster_template, kubernetes_version must be set to the active cluster version so Rancher can clone the RKE template)

master_dns_prefix str

DNS prefix to use the Kubernetes cluster control pane (string)

resource_group str

(string)

ssh_public_key_contents str

Contents of the SSH public key used to authenticate with Linux hosts (string)

subnet str

The name of an existing Azure Virtual Subnet. Composite of agent virtual network subnet ID (string)

subscription_id str

(string)

tenant_id str

(string)

virtual_network str

The name of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)

virtual_network_resource_group str

The resource group of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)

aad_server_app_secret str

The secret of an Azure Active Directory server application (string)

aad_tenant_id str

The ID of an Azure Active Directory tenant (string)

add_client_app_id str

The ID of an Azure Active Directory client application of type "Native". This application is for user login via kubectl (string)

add_server_app_id str

The ID of an Azure Active Directory server application of type "Web app/API". This application represents the managed cluster's apiserver (Server application) (string)

admin_username str

The administrator username to use for Linux hosts. Default azureuser (string)

agent_os_disk_size int

GB size to be used to specify the disk for every machine in the agent pool. If you specify 0, it will apply the default according to the "agent vm size" specified. Default 0 (int)

agent_pool_name str

Name for the agent pool, upto 12 alphanumeric characters. Default agentpool0 (string)

agent_storage_profile str

Storage profile specifies what kind of storage used on machine in the agent pool. Chooses from [ManagedDisks StorageAccount]. Default ManagedDisks (string)

agent_vm_size str

Size of machine in the agent pool. Default Standard_D1_v2 (string)

auth_base_url str

Different authentication API url to use. Default https://login.microsoftonline.com/ (string)

base_url str

Different resource management API url to use. Default https://management.azure.com/ (string)

count int

Number of machines (VMs) in the agent pool. Allowed values must be in the range of 1 to 100 (inclusive). Default 1 (int)

dns_service_ip str

An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes Service address range specified in "service cidr". Default 10.0.0.10 (string)

docker_bridge_cidr str

A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes Service address range specified in "service cidr". Default 172.17.0.1/16 (string)

enable_http_application_routing bool

Enable the Kubernetes ingress with automatic public DNS name creation. Default false (bool)

enable_monitoring bool

Turn on Azure Log Analytics monitoring. Uses the Log Analytics "Default" workspace if it exists, else creates one. if using an existing workspace, specifies "log analytics workspace resource id". Default true (bool)

load_balancer_sku str

Allowed values: basic (default) standard (string)

location str

(string)

log_analytics_workspace str

The name of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses '{resource group}-{subscription id}-{location code}' (string)

log_analytics_workspace_resource_group str

The resource group of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses the 'Cluster' resource group (string)

max_pods int

Maximum number of pods that can run on a node. Default 110 (int)

network_plugin str

Network plugin used for building Kubernetes network. Chooses from azure or kubenet. Default azure (string)

network_policy str

Network policy used for building Kubernetes network. Chooses from calico (string)

pod_cidr str

A CIDR notation IP range from which to assign Kubernetes Pod IPs when "network plugin" is specified in "kubenet". Default 172.244.0.0/16 (string)

service_cidr str

A CIDR notation IP range from which to assign Kubernetes Service cluster IPs. It must not overlap with any Subnet IP ranges. Default 10.0.0.0/16 (string)

tag Mapping[str, Any]

Use tags argument instead as []string

Deprecated:

Use tags argument instead as []string

tags Sequence[str]

Tags for Kubernetes cluster. For example, ["foo=bar","bar=foo"] (list)

agentDnsPrefix String

DNS prefix to be used to create the FQDN for the agent pool (string)

clientId String

Azure client ID to use (string)

clientSecret String

Azure client secret associated with the "client id" (string)

kubernetesVersion String

K8s version to deploy. Default: Rancher default (string) (Note - if rke_config is set at cluster_template, kubernetes_version must be set to the active cluster version so Rancher can clone the RKE template)

masterDnsPrefix String

DNS prefix to use the Kubernetes cluster control pane (string)

resourceGroup String

(string)

sshPublicKeyContents String

Contents of the SSH public key used to authenticate with Linux hosts (string)

subnet String

The name of an existing Azure Virtual Subnet. Composite of agent virtual network subnet ID (string)

subscriptionId String

(string)

tenantId String

(string)

virtualNetwork String

The name of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)

virtualNetworkResourceGroup String

The resource group of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)

aadServerAppSecret String

The secret of an Azure Active Directory server application (string)

aadTenantId String

The ID of an Azure Active Directory tenant (string)

addClientAppId String

The ID of an Azure Active Directory client application of type "Native". This application is for user login via kubectl (string)

addServerAppId String

The ID of an Azure Active Directory server application of type "Web app/API". This application represents the managed cluster's apiserver (Server application) (string)

adminUsername String

The administrator username to use for Linux hosts. Default azureuser (string)

agentOsDiskSize Number

GB size to be used to specify the disk for every machine in the agent pool. If you specify 0, it will apply the default according to the "agent vm size" specified. Default 0 (int)

agentPoolName String

Name for the agent pool, upto 12 alphanumeric characters. Default agentpool0 (string)

agentStorageProfile String

Storage profile specifies what kind of storage used on machine in the agent pool. Chooses from [ManagedDisks StorageAccount]. Default ManagedDisks (string)

agentVmSize String

Size of machine in the agent pool. Default Standard_D1_v2 (string)

authBaseUrl String

Different authentication API url to use. Default https://login.microsoftonline.com/ (string)

baseUrl String

Different resource management API url to use. Default https://management.azure.com/ (string)

count Number

Number of machines (VMs) in the agent pool. Allowed values must be in the range of 1 to 100 (inclusive). Default 1 (int)

dnsServiceIp String

An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes Service address range specified in "service cidr". Default 10.0.0.10 (string)

dockerBridgeCidr String

A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes Service address range specified in "service cidr". Default 172.17.0.1/16 (string)

enableHttpApplicationRouting Boolean

Enable the Kubernetes ingress with automatic public DNS name creation. Default false (bool)

enableMonitoring Boolean

Turn on Azure Log Analytics monitoring. Uses the Log Analytics "Default" workspace if it exists, else creates one. if using an existing workspace, specifies "log analytics workspace resource id". Default true (bool)

loadBalancerSku String

Allowed values: basic (default) standard (string)

location String

(string)

logAnalyticsWorkspace String

The name of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses '{resource group}-{subscription id}-{location code}' (string)

logAnalyticsWorkspaceResourceGroup String

The resource group of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses the 'Cluster' resource group (string)

maxPods Number

Maximum number of pods that can run on a node. Default 110 (int)

networkPlugin String

Network plugin used for building Kubernetes network. Chooses from azure or kubenet. Default azure (string)

networkPolicy String

Network policy used for building Kubernetes network. Chooses from calico (string)

podCidr String

A CIDR notation IP range from which to assign Kubernetes Pod IPs when "network plugin" is specified in "kubenet". Default 172.244.0.0/16 (string)

serviceCidr String

A CIDR notation IP range from which to assign Kubernetes Service cluster IPs. It must not overlap with any Subnet IP ranges. Default 10.0.0.0/16 (string)

tag Map<Any>

Use tags argument instead as []string

Deprecated:

Use tags argument instead as []string

tags List<String>

Tags for Kubernetes cluster. For example, ["foo=bar","bar=foo"] (list)

ClusterAksConfigV2

CloudCredentialId string

The AKS Cloud Credential ID to use (string)

ResourceGroup string

(string)

ResourceLocation string

The AKS resource location (string)

AuthBaseUrl string

Different authentication API url to use. Default https://login.microsoftonline.com/ (string)

AuthorizedIpRanges List<string>

The AKS authorized ip ranges (list)

BaseUrl string

Different resource management API url to use. Default https://management.azure.com/ (string)

DnsPrefix string

The AKS dns prefix. Required if imported=false (string)

HttpApplicationRouting bool

Enable AKS http application routing? (bool)

Imported bool

Is AKS cluster imported? Defaul: false (bool)

KubernetesVersion string

K8s version to deploy. Default: Rancher default (string) (Note - if rke_config is set at cluster_template, kubernetes_version must be set to the active cluster version so Rancher can clone the RKE template)

LinuxAdminUsername string

The AKS linux admin username (string)

LinuxSshPublicKey string

The AKS linux ssh public key (string)

LoadBalancerSku string

Allowed values: basic (default) standard (string)

LogAnalyticsWorkspaceGroup string

The AKS log analytics workspace group (string)

LogAnalyticsWorkspaceName string

The AKS log analytics workspace name (string)

Monitoring bool

Kubernetes cluster monitoring (list maxitems:1)

Name string

The name of the Cluster (string)

NetworkDnsServiceIp string

The AKS network dns service ip (string)

NetworkDockerBridgeCidr string

The AKS network docker bridge cidr (string)

NetworkPlugin string

Network plugin used for building Kubernetes network. Chooses from azure or kubenet. Default azure (string)

NetworkPodCidr string

The AKS network pod cidr (string)

NetworkPolicy string

Network policy used for building Kubernetes network. Chooses from calico (string)

NetworkServiceCidr string

The AKS network service cidr (string)

NodePools List<ClusterAksConfigV2NodePool>

The AKS nnode pools. Required if imported=false (list)

PrivateCluster bool

Is AKS cluster private? (bool)

Subnet string

The name of an existing Azure Virtual Subnet. Composite of agent virtual network subnet ID (string)

Tags Dictionary<string, object>

Tags for Kubernetes cluster. For example, ["foo=bar","bar=foo"] (list)

VirtualNetwork string

The name of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)

VirtualNetworkResourceGroup string

The resource group of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)

CloudCredentialId string

The AKS Cloud Credential ID to use (string)

ResourceGroup string

(string)

ResourceLocation string

The AKS resource location (string)

AuthBaseUrl string

Different authentication API url to use. Default https://login.microsoftonline.com/ (string)

AuthorizedIpRanges []string

The AKS authorized ip ranges (list)

BaseUrl string

Different resource management API url to use. Default https://management.azure.com/ (string)

DnsPrefix string

The AKS dns prefix. Required if imported=false (string)

HttpApplicationRouting bool

Enable AKS http application routing? (bool)

Imported bool

Is AKS cluster imported? Defaul: false (bool)

KubernetesVersion string

K8s version to deploy. Default: Rancher default (string) (Note - if rke_config is set at cluster_template, kubernetes_version must be set to the active cluster version so Rancher can clone the RKE template)

LinuxAdminUsername string

The AKS linux admin username (string)

LinuxSshPublicKey string

The AKS linux ssh public key (string)

LoadBalancerSku string

Allowed values: basic (default) standard (string)

LogAnalyticsWorkspaceGroup string

The AKS log analytics workspace group (string)

LogAnalyticsWorkspaceName string

The AKS log analytics workspace name (string)

Monitoring bool

Kubernetes cluster monitoring (list maxitems:1)

Name string

The name of the Cluster (string)

NetworkDnsServiceIp string

The AKS network dns service ip (string)

NetworkDockerBridgeCidr string

The AKS network docker bridge cidr (string)

NetworkPlugin string

Network plugin used for building Kubernetes network. Chooses from azure or kubenet. Default azure (string)

NetworkPodCidr string

The AKS network pod cidr (string)

NetworkPolicy string

Network policy used for building Kubernetes network. Chooses from calico (string)

NetworkServiceCidr string

The AKS network service cidr (string)

NodePools []ClusterAksConfigV2NodePool

The AKS nnode pools. Required if imported=false (list)

PrivateCluster bool

Is AKS cluster private? (bool)

Subnet string

The name of an existing Azure Virtual Subnet. Composite of agent virtual network subnet ID (string)

Tags map[string]interface{}

Tags for Kubernetes cluster. For example, ["foo=bar","bar=foo"] (list)

VirtualNetwork string

The name of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)

VirtualNetworkResourceGroup string

The resource group of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)

cloudCredentialId String

The AKS Cloud Credential ID to use (string)

resourceGroup String

(string)

resourceLocation String

The AKS resource location (string)

authBaseUrl String

Different authentication API url to use. Default https://login.microsoftonline.com/ (string)

authorizedIpRanges List<String>

The AKS authorized ip ranges (list)

baseUrl String

Different resource management API url to use. Default https://management.azure.com/ (string)

dnsPrefix String

The AKS dns prefix. Required if imported=false (string)

httpApplicationRouting Boolean

Enable AKS http application routing? (bool)

imported Boolean

Is AKS cluster imported? Defaul: false (bool)

kubernetesVersion String

K8s version to deploy. Default: Rancher default (string) (Note - if rke_config is set at cluster_template, kubernetes_version must be set to the active cluster version so Rancher can clone the RKE template)

linuxAdminUsername String

The AKS linux admin username (string)

linuxSshPublicKey String

The AKS linux ssh public key (string)

loadBalancerSku String

Allowed values: basic (default) standard (string)

logAnalyticsWorkspaceGroup String

The AKS log analytics workspace group (string)

logAnalyticsWorkspaceName String

The AKS log analytics workspace name (string)

monitoring Boolean

Kubernetes cluster monitoring (list maxitems:1)

name String

The name of the Cluster (string)

networkDnsServiceIp String

The AKS network dns service ip (string)

networkDockerBridgeCidr String

The AKS network docker bridge cidr (string)

networkPlugin String

Network plugin used for building Kubernetes network. Chooses from azure or kubenet. Default azure (string)

networkPodCidr String

The AKS network pod cidr (string)

networkPolicy String

Network policy used for building Kubernetes network. Chooses from calico (string)

networkServiceCidr String

The AKS network service cidr (string)

nodePools List<ClusterAksConfigV2NodePool>

The AKS nnode pools. Required if imported=false (list)

privateCluster Boolean

Is AKS cluster private? (bool)

subnet String

The name of an existing Azure Virtual Subnet. Composite of agent virtual network subnet ID (string)

tags Map<String,Object>

Tags for Kubernetes cluster. For example, ["foo=bar","bar=foo"] (list)

virtualNetwork String

The name of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)

virtualNetworkResourceGroup String

The resource group of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)

cloudCredentialId string

The AKS Cloud Credential ID to use (string)

resourceGroup string

(string)

resourceLocation string

The AKS resource location (string)

authBaseUrl string

Different authentication API url to use. Default https://login.microsoftonline.com/ (string)

authorizedIpRanges string[]

The AKS authorized ip ranges (list)

baseUrl string

Different resource management API url to use. Default https://management.azure.com/ (string)

dnsPrefix string

The AKS dns prefix. Required if imported=false (string)

httpApplicationRouting boolean

Enable AKS http application routing? (bool)

imported boolean

Is AKS cluster imported? Defaul: false (bool)

kubernetesVersion string

K8s version to deploy. Default: Rancher default (string) (Note - if rke_config is set at cluster_template, kubernetes_version must be set to the active cluster version so Rancher can clone the RKE template)

linuxAdminUsername string

The AKS linux admin username (string)

linuxSshPublicKey string

The AKS linux ssh public key (string)

loadBalancerSku string

Allowed values: basic (default) standard (string)

logAnalyticsWorkspaceGroup string

The AKS log analytics workspace group (string)

logAnalyticsWorkspaceName string

The AKS log analytics workspace name (string)

monitoring boolean

Kubernetes cluster monitoring (list maxitems:1)

name string

The name of the Cluster (string)

networkDnsServiceIp string

The AKS network dns service ip (string)

networkDockerBridgeCidr string

The AKS network docker bridge cidr (string)

networkPlugin string

Network plugin used for building Kubernetes network. Chooses from azure or kubenet. Default azure (string)

networkPodCidr string

The AKS network pod cidr (string)

networkPolicy string

Network policy used for building Kubernetes network. Chooses from calico (string)

networkServiceCidr string

The AKS network service cidr (string)

nodePools ClusterAksConfigV2NodePool[]

The AKS nnode pools. Required if imported=false (list)

privateCluster boolean

Is AKS cluster private? (bool)

subnet string

The name of an existing Azure Virtual Subnet. Composite of agent virtual network subnet ID (string)

tags {[key: string]: any}

Tags for Kubernetes cluster. For example, ["foo=bar","bar=foo"] (list)

virtualNetwork string

The name of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)

virtualNetworkResourceGroup string

The resource group of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)

cloud_credential_id str

The AKS Cloud Credential ID to use (string)

resource_group str

(string)

resource_location str

The AKS resource location (string)

auth_base_url str

Different authentication API url to use. Default https://login.microsoftonline.com/ (string)

authorized_ip_ranges Sequence[str]

The AKS authorized ip ranges (list)

base_url str

Different resource management API url to use. Default https://management.azure.com/ (string)

dns_prefix str

The AKS dns prefix. Required if imported=false (string)

http_application_routing bool

Enable AKS http application routing? (bool)

imported bool

Is AKS cluster imported? Defaul: false (bool)

kubernetes_version str

K8s version to deploy. Default: Rancher default (string) (Note - if rke_config is set at cluster_template, kubernetes_version must be set to the active cluster version so Rancher can clone the RKE template)

linux_admin_username str

The AKS linux admin username (string)

linux_ssh_public_key str

The AKS linux ssh public key (string)

load_balancer_sku str

Allowed values: basic (default) standard (string)

log_analytics_workspace_group str

The AKS log analytics workspace group (string)

log_analytics_workspace_name str

The AKS log analytics workspace name (string)

monitoring bool

Kubernetes cluster monitoring (list maxitems:1)

name str

The name of the Cluster (string)

network_dns_service_ip str

The AKS network dns service ip (string)

network_docker_bridge_cidr str

The AKS network docker bridge cidr (string)

network_plugin str

Network plugin used for building Kubernetes network. Chooses from azure or kubenet. Default azure (string)

network_pod_cidr str

The AKS network pod cidr (string)

network_policy str

Network policy used for building Kubernetes network. Chooses from calico (string)

network_service_cidr str

The AKS network service cidr (string)

node_pools Sequence[ClusterAksConfigV2NodePool]

The AKS nnode pools. Required if imported=false (list)

private_cluster bool

Is AKS cluster private? (bool)

subnet str

The name of an existing Azure Virtual Subnet. Composite of agent virtual network subnet ID (string)

tags Mapping[str, Any]

Tags for Kubernetes cluster. For example, ["foo=bar","bar=foo"] (list)

virtual_network str

The name of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)

virtual_network_resource_group str

The resource group of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)

cloudCredentialId String

The AKS Cloud Credential ID to use (string)

resourceGroup String

(string)

resourceLocation String

The AKS resource location (string)

authBaseUrl String

Different authentication API url to use. Default https://login.microsoftonline.com/ (string)

authorizedIpRanges List<String>

The AKS authorized ip ranges (list)

baseUrl String

Different resource management API url to use. Default https://management.azure.com/ (string)

dnsPrefix String

The AKS dns prefix. Required if imported=false (string)

httpApplicationRouting Boolean

Enable AKS http application routing? (bool)

imported Boolean

Is AKS cluster imported? Defaul: false (bool)

kubernetesVersion String

K8s version to deploy. Default: Rancher default (string) (Note - if rke_config is set at cluster_template, kubernetes_version must be set to the active cluster version so Rancher can clone the RKE template)

linuxAdminUsername String

The AKS linux admin username (string)

linuxSshPublicKey String

The AKS linux ssh public key (string)

loadBalancerSku String

Allowed values: basic (default) standard (string)

logAnalyticsWorkspaceGroup String

The AKS log analytics workspace group (string)

logAnalyticsWorkspaceName String

The AKS log analytics workspace name (string)

monitoring Boolean

Kubernetes cluster monitoring (list maxitems:1)

name String

The name of the Cluster (string)

networkDnsServiceIp String

The AKS network dns service ip (string)

networkDockerBridgeCidr String

The AKS network docker bridge cidr (string)

networkPlugin String

Network plugin used for building Kubernetes network. Chooses from azure or kubenet. Default azure (string)

networkPodCidr String

The AKS network pod cidr (string)

networkPolicy String

Network policy used for building Kubernetes network. Chooses from calico (string)

networkServiceCidr String

The AKS network service cidr (string)

nodePools List<Property Map>

The AKS nnode pools. Required if imported=false (list)

privateCluster Boolean

Is AKS cluster private? (bool)

subnet String

The name of an existing Azure Virtual Subnet. Composite of agent virtual network subnet ID (string)

tags Map<Any>

Tags for Kubernetes cluster. For example, ["foo=bar","bar=foo"] (list)

virtualNetwork String

The name of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)

virtualNetworkResourceGroup String

The resource group of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)

ClusterAksConfigV2NodePool

Name string

The name of the Cluster (string)

AvailabilityZones List<string>

The AKS node pool availability zones (list)

Count int

Number of machines (VMs) in the agent pool. Allowed values must be in the range of 1 to 100 (inclusive). Default 1 (int)

EnableAutoScaling bool

Is AKS node pool auto scaling enabled? Default: false (bool)

MaxCount int

The AKS node pool max count. Required if enable_auto_scaling=true (int)

MaxPods int

Maximum number of pods that can run on a node. Default 110 (int)

MinCount int

The AKS node pool min count. Required if enable_auto_scaling=true (int)

Mode string

RKE mode for authorization. rbac and none modes are available. Default rbac (string)

OrchestratorVersion string

The AKS node pool orchestrator version (string)

OsDiskSizeGb int

The AKS node pool os disk size gb. Default: 128 (int)

OsDiskType string

The AKS node pool os disk type. Default: Managed (string)

OsType string

The AKS node pool os type. Default: Linux (string)

VmSize string

The AKS node pool orchestrator version (string)

Name string

The name of the Cluster (string)

AvailabilityZones []string

The AKS node pool availability zones (list)

Count int

Number of machines (VMs) in the agent pool. Allowed values must be in the range of 1 to 100 (inclusive). Default 1 (int)

EnableAutoScaling bool

Is AKS node pool auto scaling enabled? Default: false (bool)

MaxCount int

The AKS node pool max count. Required if enable_auto_scaling=true (int)

MaxPods int

Maximum number of pods that can run on a node. Default 110 (int)

MinCount int

The AKS node pool min count. Required if enable_auto_scaling=true (int)

Mode string

RKE mode for authorization. rbac and none modes are available. Default rbac (string)

OrchestratorVersion string

The AKS node pool orchestrator version (string)

OsDiskSizeGb int

The AKS node pool os disk size gb. Default: 128 (int)

OsDiskType string

The AKS node pool os disk type. Default: Managed (string)

OsType string

The AKS node pool os type. Default: Linux (string)

VmSize string

The AKS node pool orchestrator version (string)

name String

The name of the Cluster (string)

availabilityZones List<String>

The AKS node pool availability zones (list)

count Integer

Number of machines (VMs) in the agent pool. Allowed values must be in the range of 1 to 100 (inclusive). Default 1 (int)

enableAutoScaling Boolean

Is AKS node pool auto scaling enabled? Default: false (bool)

maxCount Integer

The AKS node pool max count. Required if enable_auto_scaling=true (int)

maxPods Integer

Maximum number of pods that can run on a node. Default 110 (int)

minCount Integer

The AKS node pool min count. Required if enable_auto_scaling=true (int)

mode String

RKE mode for authorization. rbac and none modes are available. Default rbac (string)

orchestratorVersion String

The AKS node pool orchestrator version (string)

osDiskSizeGb Integer

The AKS node pool os disk size gb. Default: 128 (int)

osDiskType String

The AKS node pool os disk type. Default: Managed (string)

osType String

The AKS node pool os type. Default: Linux (string)

vmSize String

The AKS node pool orchestrator version (string)

name string

The name of the Cluster (string)

availabilityZones string[]

The AKS node pool availability zones (list)

count number

Number of machines (VMs) in the agent pool. Allowed values must be in the range of 1 to 100 (inclusive). Default 1 (int)

enableAutoScaling boolean

Is AKS node pool auto scaling enabled? Default: false (bool)

maxCount number

The AKS node pool max count. Required if enable_auto_scaling=true (int)

maxPods number

Maximum number of pods that can run on a node. Default 110 (int)

minCount number

The AKS node pool min count. Required if enable_auto_scaling=true (int)

mode string

RKE mode for authorization. rbac and none modes are available. Default rbac (string)

orchestratorVersion string

The AKS node pool orchestrator version (string)

osDiskSizeGb number

The AKS node pool os disk size gb. Default: 128 (int)

osDiskType string

The AKS node pool os disk type. Default: Managed (string)

osType string

The AKS node pool os type. Default: Linux (string)

vmSize string

The AKS node pool orchestrator version (string)

name str

The name of the Cluster (string)

availability_zones Sequence[str]

The AKS node pool availability zones (list)

count int

Number of machines (VMs) in the agent pool. Allowed values must be in the range of 1 to 100 (inclusive). Default 1 (int)

enable_auto_scaling bool

Is AKS node pool auto scaling enabled? Default: false (bool)

max_count int

The AKS node pool max count. Required if enable_auto_scaling=true (int)

max_pods int

Maximum number of pods that can run on a node. Default 110 (int)

min_count int

The AKS node pool min count. Required if enable_auto_scaling=true (int)

mode str

RKE mode for authorization. rbac and none modes are available. Default rbac (string)

orchestrator_version str

The AKS node pool orchestrator version (string)

os_disk_size_gb int

The AKS node pool os disk size gb. Default: 128 (int)

os_disk_type str

The AKS node pool os disk type. Default: Managed (string)

os_type str

The AKS node pool os type. Default: Linux (string)

vm_size str

The AKS node pool orchestrator version (string)

name String

The name of the Cluster (string)

availabilityZones List<String>

The AKS node pool availability zones (list)

count Number

Number of machines (VMs) in the agent pool. Allowed values must be in the range of 1 to 100 (inclusive). Default 1 (int)

enableAutoScaling Boolean

Is AKS node pool auto scaling enabled? Default: false (bool)

maxCount Number

The AKS node pool max count. Required if enable_auto_scaling=true (int)

maxPods Number

Maximum number of pods that can run on a node. Default 110 (int)

minCount Number

The AKS node pool min count. Required if enable_auto_scaling=true (int)

mode String

RKE mode for authorization. rbac and none modes are available. Default rbac (string)

orchestratorVersion String

The AKS node pool orchestrator version (string)

osDiskSizeGb Number

The AKS node pool os disk size gb. Default: 128 (int)

osDiskType String

The AKS node pool os disk type. Default: Managed (string)

osType String

The AKS node pool os type. Default: Linux (string)

vmSize String

The AKS node pool orchestrator version (string)

ClusterClusterAuthEndpoint

CaCerts string

CA certs for the authorized cluster endpoint (string)

Enabled bool

Enable etcd backup (bool)

Fqdn string

FQDN for the authorized cluster endpoint (string)

CaCerts string

CA certs for the authorized cluster endpoint (string)

Enabled bool

Enable etcd backup (bool)

Fqdn string

FQDN for the authorized cluster endpoint (string)

caCerts String

CA certs for the authorized cluster endpoint (string)

enabled Boolean

Enable etcd backup (bool)

fqdn String

FQDN for the authorized cluster endpoint (string)

caCerts string

CA certs for the authorized cluster endpoint (string)

enabled boolean

Enable etcd backup (bool)

fqdn string

FQDN for the authorized cluster endpoint (string)

ca_certs str

CA certs for the authorized cluster endpoint (string)

enabled bool

Enable etcd backup (bool)

fqdn str

FQDN for the authorized cluster endpoint (string)

caCerts String

CA certs for the authorized cluster endpoint (string)

enabled Boolean

Enable etcd backup (bool)

fqdn String

FQDN for the authorized cluster endpoint (string)

ClusterClusterMonitoringInput

Answers Dictionary<string, object>

Key/value answers for monitor input (map)

Version string

RKE2 kubernetes version (string)

Answers map[string]interface{}

Key/value answers for monitor input (map)

Version string

RKE2 kubernetes version (string)

answers Map<String,Object>

Key/value answers for monitor input (map)

version String

RKE2 kubernetes version (string)

answers {[key: string]: any}

Key/value answers for monitor input (map)

version string

RKE2 kubernetes version (string)

answers Mapping[str, Any]

Key/value answers for monitor input (map)

version str

RKE2 kubernetes version (string)

answers Map<Any>

Key/value answers for monitor input (map)

version String

RKE2 kubernetes version (string)

ClusterClusterRegistrationToken

Annotations Dictionary<string, object>

Annotations for the Cluster (map)

ClusterId string

Cluster ID to apply answer (string)

Command string

Command to execute in a imported k8s cluster (string)

Id string

The EKS node group launch template ID (string)

InsecureCommand string

Insecure command to execute in a imported k8s cluster (string)

InsecureNodeCommand string

Insecure node command to execute in a imported k8s cluster (string)

InsecureWindowsNodeCommand string

Insecure windows command to execute in a imported k8s cluster (string)

Labels Dictionary<string, object>

Labels for the Cluster (map)

ManifestUrl string

K8s manifest url to execute with kubectl to import an existing k8s cluster (string)

Name string

The name of the Cluster (string)

NodeCommand string

Node command to execute in linux nodes for custom k8s cluster (string)

Token string

ACI token (string)