rancher2.Cluster
Provides a Rancher v2 Cluster resource. This can be used to create Clusters for Rancher v2 environments and retrieve their information.
Example Usage
Creating Rancher v2 RKE cluster enabling and customizing monitoring
using System.Collections.Generic;
using Pulumi;
using Rancher2 = Pulumi.Rancher2;
return await Deployment.RunAsync(() =>
{
// Create a new rancher2 RKE Cluster
var foo_custom = new Rancher2.Cluster("foo-custom", new()
{
ClusterMonitoringInput = new Rancher2.Inputs.ClusterClusterMonitoringInputArgs
{
Answers =
{
{ "exporter-kubelets.https", true },
{ "exporter-node.enabled", true },
{ "exporter-node.ports.metrics.port", 9796 },
{ "exporter-node.resources.limits.cpu", "200m" },
{ "exporter-node.resources.limits.memory", "200Mi" },
{ "grafana.persistence.enabled", false },
{ "grafana.persistence.size", "10Gi" },
{ "grafana.persistence.storageClass", "default" },
{ "operator.resources.limits.memory", "500Mi" },
{ "prometheus.persistence.enabled", "false" },
{ "prometheus.persistence.size", "50Gi" },
{ "prometheus.persistence.storageClass", "default" },
{ "prometheus.persistent.useReleaseName", "true" },
{ "prometheus.resources.core.limits.cpu", "1000m" },
{ "prometheus.resources.core.limits.memory", "1500Mi" },
{ "prometheus.resources.core.requests.cpu", "750m" },
{ "prometheus.resources.core.requests.memory", "750Mi" },
{ "prometheus.retention", "12h" },
},
Version = "0.1.0",
},
Description = "Foo rancher2 custom cluster",
EnableClusterMonitoring = true,
RkeConfig = new Rancher2.Inputs.ClusterRkeConfigArgs
{
Network = new Rancher2.Inputs.ClusterRkeConfigNetworkArgs
{
Plugin = "canal",
},
},
});
});
package main
import (
"github.com/pulumi/pulumi-rancher2/sdk/v3/go/rancher2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := rancher2.NewCluster(ctx, "foo-custom", &rancher2.ClusterArgs{
ClusterMonitoringInput: &rancher2.ClusterClusterMonitoringInputArgs{
Answers: pulumi.AnyMap{
"exporter-kubelets.https": pulumi.Any(true),
"exporter-node.enabled": pulumi.Any(true),
"exporter-node.ports.metrics.port": pulumi.Any(9796),
"exporter-node.resources.limits.cpu": pulumi.Any("200m"),
"exporter-node.resources.limits.memory": pulumi.Any("200Mi"),
"grafana.persistence.enabled": pulumi.Any(false),
"grafana.persistence.size": pulumi.Any("10Gi"),
"grafana.persistence.storageClass": pulumi.Any("default"),
"operator.resources.limits.memory": pulumi.Any("500Mi"),
"prometheus.persistence.enabled": pulumi.Any("false"),
"prometheus.persistence.size": pulumi.Any("50Gi"),
"prometheus.persistence.storageClass": pulumi.Any("default"),
"prometheus.persistent.useReleaseName": pulumi.Any("true"),
"prometheus.resources.core.limits.cpu": pulumi.Any("1000m"),
"prometheus.resources.core.limits.memory": pulumi.Any("1500Mi"),
"prometheus.resources.core.requests.cpu": pulumi.Any("750m"),
"prometheus.resources.core.requests.memory": pulumi.Any("750Mi"),
"prometheus.retention": pulumi.Any("12h"),
},
Version: pulumi.String("0.1.0"),
},
Description: pulumi.String("Foo rancher2 custom cluster"),
EnableClusterMonitoring: pulumi.Bool(true),
RkeConfig: &rancher2.ClusterRkeConfigArgs{
Network: &rancher2.ClusterRkeConfigNetworkArgs{
Plugin: pulumi.String("canal"),
},
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.rancher2.Cluster;
import com.pulumi.rancher2.ClusterArgs;
import com.pulumi.rancher2.inputs.ClusterClusterMonitoringInputArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigNetworkArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var foo_custom = new Cluster("foo-custom", ClusterArgs.builder()
.clusterMonitoringInput(ClusterClusterMonitoringInputArgs.builder()
.answers(Map.ofEntries(
Map.entry("exporter-kubelets.https", true),
Map.entry("exporter-node.enabled", true),
Map.entry("exporter-node.ports.metrics.port", 9796),
Map.entry("exporter-node.resources.limits.cpu", "200m"),
Map.entry("exporter-node.resources.limits.memory", "200Mi"),
Map.entry("grafana.persistence.enabled", false),
Map.entry("grafana.persistence.size", "10Gi"),
Map.entry("grafana.persistence.storageClass", "default"),
Map.entry("operator.resources.limits.memory", "500Mi"),
Map.entry("prometheus.persistence.enabled", "false"),
Map.entry("prometheus.persistence.size", "50Gi"),
Map.entry("prometheus.persistence.storageClass", "default"),
Map.entry("prometheus.persistent.useReleaseName", "true"),
Map.entry("prometheus.resources.core.limits.cpu", "1000m"),
Map.entry("prometheus.resources.core.limits.memory", "1500Mi"),
Map.entry("prometheus.resources.core.requests.cpu", "750m"),
Map.entry("prometheus.resources.core.requests.memory", "750Mi"),
Map.entry("prometheus.retention", "12h")
))
.version("0.1.0")
.build())
.description("Foo rancher2 custom cluster")
.enableClusterMonitoring(true)
.rkeConfig(ClusterRkeConfigArgs.builder()
.network(ClusterRkeConfigNetworkArgs.builder()
.plugin("canal")
.build())
.build())
.build());
}
}
import pulumi
import pulumi_rancher2 as rancher2
# Create a new rancher2 RKE Cluster
foo_custom = rancher2.Cluster("foo-custom",
cluster_monitoring_input=rancher2.ClusterClusterMonitoringInputArgs(
answers={
"exporter-kubelets.https": True,
"exporter-node.enabled": True,
"exporter-node.ports.metrics.port": 9796,
"exporter-node.resources.limits.cpu": "200m",
"exporter-node.resources.limits.memory": "200Mi",
"grafana.persistence.enabled": False,
"grafana.persistence.size": "10Gi",
"grafana.persistence.storageClass": "default",
"operator.resources.limits.memory": "500Mi",
"prometheus.persistence.enabled": "false",
"prometheus.persistence.size": "50Gi",
"prometheus.persistence.storageClass": "default",
"prometheus.persistent.useReleaseName": "true",
"prometheus.resources.core.limits.cpu": "1000m",
"prometheus.resources.core.limits.memory": "1500Mi",
"prometheus.resources.core.requests.cpu": "750m",
"prometheus.resources.core.requests.memory": "750Mi",
"prometheus.retention": "12h",
},
version="0.1.0",
),
description="Foo rancher2 custom cluster",
enable_cluster_monitoring=True,
rke_config=rancher2.ClusterRkeConfigArgs(
network=rancher2.ClusterRkeConfigNetworkArgs(
plugin="canal",
),
))
import * as pulumi from "@pulumi/pulumi";
import * as rancher2 from "@pulumi/rancher2";
// Create a new rancher2 RKE Cluster
const foo_custom = new rancher2.Cluster("foo-custom", {
clusterMonitoringInput: {
answers: {
"exporter-kubelets.https": true,
"exporter-node.enabled": true,
"exporter-node.ports.metrics.port": 9796,
"exporter-node.resources.limits.cpu": "200m",
"exporter-node.resources.limits.memory": "200Mi",
"grafana.persistence.enabled": false,
"grafana.persistence.size": "10Gi",
"grafana.persistence.storageClass": "default",
"operator.resources.limits.memory": "500Mi",
"prometheus.persistence.enabled": "false",
"prometheus.persistence.size": "50Gi",
"prometheus.persistence.storageClass": "default",
"prometheus.persistent.useReleaseName": "true",
"prometheus.resources.core.limits.cpu": "1000m",
"prometheus.resources.core.limits.memory": "1500Mi",
"prometheus.resources.core.requests.cpu": "750m",
"prometheus.resources.core.requests.memory": "750Mi",
"prometheus.retention": "12h",
},
version: "0.1.0",
},
description: "Foo rancher2 custom cluster",
enableClusterMonitoring: true,
rkeConfig: {
network: {
plugin: "canal",
},
},
});
resources:
# Create a new rancher2 RKE Cluster
foo-custom:
type: rancher2:Cluster
properties:
clusterMonitoringInput:
answers:
exporter-kubelets.https: true
exporter-node.enabled: true
exporter-node.ports.metrics.port: 9796
exporter-node.resources.limits.cpu: 200m
exporter-node.resources.limits.memory: 200Mi
grafana.persistence.enabled: false
grafana.persistence.size: 10Gi
grafana.persistence.storageClass: default
operator.resources.limits.memory: 500Mi
prometheus.persistence.enabled: 'false'
prometheus.persistence.size: 50Gi
prometheus.persistence.storageClass: default
prometheus.persistent.useReleaseName: 'true'
prometheus.resources.core.limits.cpu: 1000m
prometheus.resources.core.limits.memory: 1500Mi
prometheus.resources.core.requests.cpu: 750m
prometheus.resources.core.requests.memory: 750Mi
prometheus.retention: 12h
version: 0.1.0
description: Foo rancher2 custom cluster
enableClusterMonitoring: true
rkeConfig:
network:
plugin: canal
Creating Rancher v2 RKE cluster enabling/customizing monitoring and istio
using System.Collections.Generic;
using Pulumi;
using Rancher2 = Pulumi.Rancher2;
return await Deployment.RunAsync(() =>
{
// Create a new rancher2 RKE Cluster
var foo_customCluster = new Rancher2.Cluster("foo-customCluster", new()
{
Description = "Foo rancher2 custom cluster",
RkeConfig = new Rancher2.Inputs.ClusterRkeConfigArgs
{
Network = new Rancher2.Inputs.ClusterRkeConfigNetworkArgs
{
Plugin = "canal",
},
},
EnableClusterMonitoring = true,
ClusterMonitoringInput = new Rancher2.Inputs.ClusterClusterMonitoringInputArgs
{
Answers =
{
{ "exporter-kubelets.https", true },
{ "exporter-node.enabled", true },
{ "exporter-node.ports.metrics.port", 9796 },
{ "exporter-node.resources.limits.cpu", "200m" },
{ "exporter-node.resources.limits.memory", "200Mi" },
{ "grafana.persistence.enabled", false },
{ "grafana.persistence.size", "10Gi" },
{ "grafana.persistence.storageClass", "default" },
{ "operator.resources.limits.memory", "500Mi" },
{ "prometheus.persistence.enabled", "false" },
{ "prometheus.persistence.size", "50Gi" },
{ "prometheus.persistence.storageClass", "default" },
{ "prometheus.persistent.useReleaseName", "true" },
{ "prometheus.resources.core.limits.cpu", "1000m" },
{ "prometheus.resources.core.limits.memory", "1500Mi" },
{ "prometheus.resources.core.requests.cpu", "750m" },
{ "prometheus.resources.core.requests.memory", "750Mi" },
{ "prometheus.retention", "12h" },
},
Version = "0.1.0",
},
});
// Create a new rancher2 Cluster Sync for foo-custom cluster
var foo_customClusterSync = new Rancher2.ClusterSync("foo-customClusterSync", new()
{
ClusterId = foo_customCluster.Id,
WaitMonitoring = foo_customCluster.EnableClusterMonitoring,
});
// Create a new rancher2 Namespace
var foo_istio = new Rancher2.Namespace("foo-istio", new()
{
ProjectId = foo_customClusterSync.SystemProjectId,
Description = "istio namespace",
});
// Create a new rancher2 App deploying istio (should wait until monitoring is up and running)
var istio = new Rancher2.App("istio", new()
{
CatalogName = "system-library",
Description = "Terraform app acceptance test",
ProjectId = foo_istio.ProjectId,
TemplateName = "rancher-istio",
TemplateVersion = "0.1.1",
TargetNamespace = foo_istio.Id,
Answers =
{
{ "certmanager.enabled", false },
{ "enableCRDs", true },
{ "galley.enabled", true },
{ "gateways.enabled", false },
{ "gateways.istio-ingressgateway.resources.limits.cpu", "2000m" },
{ "gateways.istio-ingressgateway.resources.limits.memory", "1024Mi" },
{ "gateways.istio-ingressgateway.resources.requests.cpu", "100m" },
{ "gateways.istio-ingressgateway.resources.requests.memory", "128Mi" },
{ "gateways.istio-ingressgateway.type", "NodePort" },
{ "global.monitoring.type", "cluster-monitoring" },
{ "global.rancher.clusterId", foo_customClusterSync.ClusterId },
{ "istio_cni.enabled", "false" },
{ "istiocoredns.enabled", "false" },
{ "kiali.enabled", "true" },
{ "mixer.enabled", "true" },
{ "mixer.policy.enabled", "true" },
{ "mixer.policy.resources.limits.cpu", "4800m" },
{ "mixer.policy.resources.limits.memory", "4096Mi" },
{ "mixer.policy.resources.requests.cpu", "1000m" },
{ "mixer.policy.resources.requests.memory", "1024Mi" },
{ "mixer.telemetry.resources.limits.cpu", "4800m" },
{ "mixer.telemetry.resources.limits.memory", "4096Mi" },
{ "mixer.telemetry.resources.requests.cpu", "1000m" },
{ "mixer.telemetry.resources.requests.memory", "1024Mi" },
{ "mtls.enabled", false },
{ "nodeagent.enabled", false },
{ "pilot.enabled", true },
{ "pilot.resources.limits.cpu", "1000m" },
{ "pilot.resources.limits.memory", "4096Mi" },
{ "pilot.resources.requests.cpu", "500m" },
{ "pilot.resources.requests.memory", "2048Mi" },
{ "pilot.traceSampling", "1" },
{ "security.enabled", true },
{ "sidecarInjectorWebhook.enabled", true },
{ "tracing.enabled", true },
{ "tracing.jaeger.resources.limits.cpu", "500m" },
{ "tracing.jaeger.resources.limits.memory", "1024Mi" },
{ "tracing.jaeger.resources.requests.cpu", "100m" },
{ "tracing.jaeger.resources.requests.memory", "100Mi" },
},
});
});
package main
import (
"github.com/pulumi/pulumi-rancher2/sdk/v3/go/rancher2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := rancher2.NewCluster(ctx, "foo-customCluster", &rancher2.ClusterArgs{
Description: pulumi.String("Foo rancher2 custom cluster"),
RkeConfig: &rancher2.ClusterRkeConfigArgs{
Network: &rancher2.ClusterRkeConfigNetworkArgs{
Plugin: pulumi.String("canal"),
},
},
EnableClusterMonitoring: pulumi.Bool(true),
ClusterMonitoringInput: &rancher2.ClusterClusterMonitoringInputArgs{
Answers: pulumi.AnyMap{
"exporter-kubelets.https": pulumi.Any(true),
"exporter-node.enabled": pulumi.Any(true),
"exporter-node.ports.metrics.port": pulumi.Any(9796),
"exporter-node.resources.limits.cpu": pulumi.Any("200m"),
"exporter-node.resources.limits.memory": pulumi.Any("200Mi"),
"grafana.persistence.enabled": pulumi.Any(false),
"grafana.persistence.size": pulumi.Any("10Gi"),
"grafana.persistence.storageClass": pulumi.Any("default"),
"operator.resources.limits.memory": pulumi.Any("500Mi"),
"prometheus.persistence.enabled": pulumi.Any("false"),
"prometheus.persistence.size": pulumi.Any("50Gi"),
"prometheus.persistence.storageClass": pulumi.Any("default"),
"prometheus.persistent.useReleaseName": pulumi.Any("true"),
"prometheus.resources.core.limits.cpu": pulumi.Any("1000m"),
"prometheus.resources.core.limits.memory": pulumi.Any("1500Mi"),
"prometheus.resources.core.requests.cpu": pulumi.Any("750m"),
"prometheus.resources.core.requests.memory": pulumi.Any("750Mi"),
"prometheus.retention": pulumi.Any("12h"),
},
Version: pulumi.String("0.1.0"),
},
})
if err != nil {
return err
}
_, err = rancher2.NewClusterSync(ctx, "foo-customClusterSync", &rancher2.ClusterSyncArgs{
ClusterId: foo_customCluster.ID(),
WaitMonitoring: foo_customCluster.EnableClusterMonitoring,
})
if err != nil {
return err
}
_, err = rancher2.NewNamespace(ctx, "foo-istio", &rancher2.NamespaceArgs{
ProjectId: foo_customClusterSync.SystemProjectId,
Description: pulumi.String("istio namespace"),
})
if err != nil {
return err
}
_, err = rancher2.NewApp(ctx, "istio", &rancher2.AppArgs{
CatalogName: pulumi.String("system-library"),
Description: pulumi.String("Terraform app acceptance test"),
ProjectId: foo_istio.ProjectId,
TemplateName: pulumi.String("rancher-istio"),
TemplateVersion: pulumi.String("0.1.1"),
TargetNamespace: foo_istio.ID(),
Answers: pulumi.AnyMap{
"certmanager.enabled": pulumi.Any(false),
"enableCRDs": pulumi.Any(true),
"galley.enabled": pulumi.Any(true),
"gateways.enabled": pulumi.Any(false),
"gateways.istio-ingressgateway.resources.limits.cpu": pulumi.Any("2000m"),
"gateways.istio-ingressgateway.resources.limits.memory": pulumi.Any("1024Mi"),
"gateways.istio-ingressgateway.resources.requests.cpu": pulumi.Any("100m"),
"gateways.istio-ingressgateway.resources.requests.memory": pulumi.Any("128Mi"),
"gateways.istio-ingressgateway.type": pulumi.Any("NodePort"),
"global.monitoring.type": pulumi.Any("cluster-monitoring"),
"global.rancher.clusterId": foo_customClusterSync.ClusterId,
"istio_cni.enabled": pulumi.Any("false"),
"istiocoredns.enabled": pulumi.Any("false"),
"kiali.enabled": pulumi.Any("true"),
"mixer.enabled": pulumi.Any("true"),
"mixer.policy.enabled": pulumi.Any("true"),
"mixer.policy.resources.limits.cpu": pulumi.Any("4800m"),
"mixer.policy.resources.limits.memory": pulumi.Any("4096Mi"),
"mixer.policy.resources.requests.cpu": pulumi.Any("1000m"),
"mixer.policy.resources.requests.memory": pulumi.Any("1024Mi"),
"mixer.telemetry.resources.limits.cpu": pulumi.Any("4800m"),
"mixer.telemetry.resources.limits.memory": pulumi.Any("4096Mi"),
"mixer.telemetry.resources.requests.cpu": pulumi.Any("1000m"),
"mixer.telemetry.resources.requests.memory": pulumi.Any("1024Mi"),
"mtls.enabled": pulumi.Any(false),
"nodeagent.enabled": pulumi.Any(false),
"pilot.enabled": pulumi.Any(true),
"pilot.resources.limits.cpu": pulumi.Any("1000m"),
"pilot.resources.limits.memory": pulumi.Any("4096Mi"),
"pilot.resources.requests.cpu": pulumi.Any("500m"),
"pilot.resources.requests.memory": pulumi.Any("2048Mi"),
"pilot.traceSampling": pulumi.Any("1"),
"security.enabled": pulumi.Any(true),
"sidecarInjectorWebhook.enabled": pulumi.Any(true),
"tracing.enabled": pulumi.Any(true),
"tracing.jaeger.resources.limits.cpu": pulumi.Any("500m"),
"tracing.jaeger.resources.limits.memory": pulumi.Any("1024Mi"),
"tracing.jaeger.resources.requests.cpu": pulumi.Any("100m"),
"tracing.jaeger.resources.requests.memory": pulumi.Any("100Mi"),
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.rancher2.Cluster;
import com.pulumi.rancher2.ClusterArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigNetworkArgs;
import com.pulumi.rancher2.inputs.ClusterClusterMonitoringInputArgs;
import com.pulumi.rancher2.ClusterSync;
import com.pulumi.rancher2.ClusterSyncArgs;
import com.pulumi.rancher2.Namespace;
import com.pulumi.rancher2.NamespaceArgs;
import com.pulumi.rancher2.App;
import com.pulumi.rancher2.AppArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var foo_customCluster = new Cluster("foo-customCluster", ClusterArgs.builder()
.description("Foo rancher2 custom cluster")
.rkeConfig(ClusterRkeConfigArgs.builder()
.network(ClusterRkeConfigNetworkArgs.builder()
.plugin("canal")
.build())
.build())
.enableClusterMonitoring(true)
.clusterMonitoringInput(ClusterClusterMonitoringInputArgs.builder()
.answers(Map.ofEntries(
Map.entry("exporter-kubelets.https", true),
Map.entry("exporter-node.enabled", true),
Map.entry("exporter-node.ports.metrics.port", 9796),
Map.entry("exporter-node.resources.limits.cpu", "200m"),
Map.entry("exporter-node.resources.limits.memory", "200Mi"),
Map.entry("grafana.persistence.enabled", false),
Map.entry("grafana.persistence.size", "10Gi"),
Map.entry("grafana.persistence.storageClass", "default"),
Map.entry("operator.resources.limits.memory", "500Mi"),
Map.entry("prometheus.persistence.enabled", "false"),
Map.entry("prometheus.persistence.size", "50Gi"),
Map.entry("prometheus.persistence.storageClass", "default"),
Map.entry("prometheus.persistent.useReleaseName", "true"),
Map.entry("prometheus.resources.core.limits.cpu", "1000m"),
Map.entry("prometheus.resources.core.limits.memory", "1500Mi"),
Map.entry("prometheus.resources.core.requests.cpu", "750m"),
Map.entry("prometheus.resources.core.requests.memory", "750Mi"),
Map.entry("prometheus.retention", "12h")
))
.version("0.1.0")
.build())
.build());
var foo_customClusterSync = new ClusterSync("foo-customClusterSync", ClusterSyncArgs.builder()
.clusterId(foo_customCluster.id())
.waitMonitoring(foo_customCluster.enableClusterMonitoring())
.build());
var foo_istio = new Namespace("foo-istio", NamespaceArgs.builder()
.projectId(foo_customClusterSync.systemProjectId())
.description("istio namespace")
.build());
var istio = new App("istio", AppArgs.builder()
.catalogName("system-library")
.description("Terraform app acceptance test")
.projectId(foo_istio.projectId())
.templateName("rancher-istio")
.templateVersion("0.1.1")
.targetNamespace(foo_istio.id())
.answers(Map.ofEntries(
Map.entry("certmanager.enabled", false),
Map.entry("enableCRDs", true),
Map.entry("galley.enabled", true),
Map.entry("gateways.enabled", false),
Map.entry("gateways.istio-ingressgateway.resources.limits.cpu", "2000m"),
Map.entry("gateways.istio-ingressgateway.resources.limits.memory", "1024Mi"),
Map.entry("gateways.istio-ingressgateway.resources.requests.cpu", "100m"),
Map.entry("gateways.istio-ingressgateway.resources.requests.memory", "128Mi"),
Map.entry("gateways.istio-ingressgateway.type", "NodePort"),
Map.entry("global.monitoring.type", "cluster-monitoring"),
Map.entry("global.rancher.clusterId", foo_customClusterSync.clusterId()),
Map.entry("istio_cni.enabled", "false"),
Map.entry("istiocoredns.enabled", "false"),
Map.entry("kiali.enabled", "true"),
Map.entry("mixer.enabled", "true"),
Map.entry("mixer.policy.enabled", "true"),
Map.entry("mixer.policy.resources.limits.cpu", "4800m"),
Map.entry("mixer.policy.resources.limits.memory", "4096Mi"),
Map.entry("mixer.policy.resources.requests.cpu", "1000m"),
Map.entry("mixer.policy.resources.requests.memory", "1024Mi"),
Map.entry("mixer.telemetry.resources.limits.cpu", "4800m"),
Map.entry("mixer.telemetry.resources.limits.memory", "4096Mi"),
Map.entry("mixer.telemetry.resources.requests.cpu", "1000m"),
Map.entry("mixer.telemetry.resources.requests.memory", "1024Mi"),
Map.entry("mtls.enabled", false),
Map.entry("nodeagent.enabled", false),
Map.entry("pilot.enabled", true),
Map.entry("pilot.resources.limits.cpu", "1000m"),
Map.entry("pilot.resources.limits.memory", "4096Mi"),
Map.entry("pilot.resources.requests.cpu", "500m"),
Map.entry("pilot.resources.requests.memory", "2048Mi"),
Map.entry("pilot.traceSampling", "1"),
Map.entry("security.enabled", true),
Map.entry("sidecarInjectorWebhook.enabled", true),
Map.entry("tracing.enabled", true),
Map.entry("tracing.jaeger.resources.limits.cpu", "500m"),
Map.entry("tracing.jaeger.resources.limits.memory", "1024Mi"),
Map.entry("tracing.jaeger.resources.requests.cpu", "100m"),
Map.entry("tracing.jaeger.resources.requests.memory", "100Mi")
))
.build());
}
}
import pulumi
import pulumi_rancher2 as rancher2
# Create a new rancher2 RKE Cluster
foo_custom_cluster = rancher2.Cluster("foo-customCluster",
description="Foo rancher2 custom cluster",
rke_config=rancher2.ClusterRkeConfigArgs(
network=rancher2.ClusterRkeConfigNetworkArgs(
plugin="canal",
),
),
enable_cluster_monitoring=True,
cluster_monitoring_input=rancher2.ClusterClusterMonitoringInputArgs(
answers={
"exporter-kubelets.https": True,
"exporter-node.enabled": True,
"exporter-node.ports.metrics.port": 9796,
"exporter-node.resources.limits.cpu": "200m",
"exporter-node.resources.limits.memory": "200Mi",
"grafana.persistence.enabled": False,
"grafana.persistence.size": "10Gi",
"grafana.persistence.storageClass": "default",
"operator.resources.limits.memory": "500Mi",
"prometheus.persistence.enabled": "false",
"prometheus.persistence.size": "50Gi",
"prometheus.persistence.storageClass": "default",
"prometheus.persistent.useReleaseName": "true",
"prometheus.resources.core.limits.cpu": "1000m",
"prometheus.resources.core.limits.memory": "1500Mi",
"prometheus.resources.core.requests.cpu": "750m",
"prometheus.resources.core.requests.memory": "750Mi",
"prometheus.retention": "12h",
},
version="0.1.0",
))
# Create a new rancher2 Cluster Sync for foo-custom cluster
foo_custom_cluster_sync = rancher2.ClusterSync("foo-customClusterSync",
cluster_id=foo_custom_cluster.id,
wait_monitoring=foo_custom_cluster.enable_cluster_monitoring)
# Create a new rancher2 Namespace
foo_istio = rancher2.Namespace("foo-istio",
project_id=foo_custom_cluster_sync.system_project_id,
description="istio namespace")
# Create a new rancher2 App deploying istio (should wait until monitoring is up and running)
istio = rancher2.App("istio",
catalog_name="system-library",
description="Terraform app acceptance test",
project_id=foo_istio.project_id,
template_name="rancher-istio",
template_version="0.1.1",
target_namespace=foo_istio.id,
answers={
"certmanager.enabled": False,
"enableCRDs": True,
"galley.enabled": True,
"gateways.enabled": False,
"gateways.istio-ingressgateway.resources.limits.cpu": "2000m",
"gateways.istio-ingressgateway.resources.limits.memory": "1024Mi",
"gateways.istio-ingressgateway.resources.requests.cpu": "100m",
"gateways.istio-ingressgateway.resources.requests.memory": "128Mi",
"gateways.istio-ingressgateway.type": "NodePort",
"global.monitoring.type": "cluster-monitoring",
"global.rancher.clusterId": foo_custom_cluster_sync.cluster_id,
"istio_cni.enabled": "false",
"istiocoredns.enabled": "false",
"kiali.enabled": "true",
"mixer.enabled": "true",
"mixer.policy.enabled": "true",
"mixer.policy.resources.limits.cpu": "4800m",
"mixer.policy.resources.limits.memory": "4096Mi",
"mixer.policy.resources.requests.cpu": "1000m",
"mixer.policy.resources.requests.memory": "1024Mi",
"mixer.telemetry.resources.limits.cpu": "4800m",
"mixer.telemetry.resources.limits.memory": "4096Mi",
"mixer.telemetry.resources.requests.cpu": "1000m",
"mixer.telemetry.resources.requests.memory": "1024Mi",
"mtls.enabled": False,
"nodeagent.enabled": False,
"pilot.enabled": True,
"pilot.resources.limits.cpu": "1000m",
"pilot.resources.limits.memory": "4096Mi",
"pilot.resources.requests.cpu": "500m",
"pilot.resources.requests.memory": "2048Mi",
"pilot.traceSampling": "1",
"security.enabled": True,
"sidecarInjectorWebhook.enabled": True,
"tracing.enabled": True,
"tracing.jaeger.resources.limits.cpu": "500m",
"tracing.jaeger.resources.limits.memory": "1024Mi",
"tracing.jaeger.resources.requests.cpu": "100m",
"tracing.jaeger.resources.requests.memory": "100Mi",
})
import * as pulumi from "@pulumi/pulumi";
import * as rancher2 from "@pulumi/rancher2";
// Create a new rancher2 RKE Cluster
const foo_customCluster = new rancher2.Cluster("foo-customCluster", {
description: "Foo rancher2 custom cluster",
rkeConfig: {
network: {
plugin: "canal",
},
},
enableClusterMonitoring: true,
clusterMonitoringInput: {
answers: {
"exporter-kubelets.https": true,
"exporter-node.enabled": true,
"exporter-node.ports.metrics.port": 9796,
"exporter-node.resources.limits.cpu": "200m",
"exporter-node.resources.limits.memory": "200Mi",
"grafana.persistence.enabled": false,
"grafana.persistence.size": "10Gi",
"grafana.persistence.storageClass": "default",
"operator.resources.limits.memory": "500Mi",
"prometheus.persistence.enabled": "false",
"prometheus.persistence.size": "50Gi",
"prometheus.persistence.storageClass": "default",
"prometheus.persistent.useReleaseName": "true",
"prometheus.resources.core.limits.cpu": "1000m",
"prometheus.resources.core.limits.memory": "1500Mi",
"prometheus.resources.core.requests.cpu": "750m",
"prometheus.resources.core.requests.memory": "750Mi",
"prometheus.retention": "12h",
},
version: "0.1.0",
},
});
// Create a new rancher2 Cluster Sync for foo-custom cluster
const foo_customClusterSync = new rancher2.ClusterSync("foo-customClusterSync", {
clusterId: foo_customCluster.id,
waitMonitoring: foo_customCluster.enableClusterMonitoring,
});
// Create a new rancher2 Namespace
const foo_istio = new rancher2.Namespace("foo-istio", {
projectId: foo_customClusterSync.systemProjectId,
description: "istio namespace",
});
// Create a new rancher2 App deploying istio (should wait until monitoring is up and running)
const istio = new rancher2.App("istio", {
catalogName: "system-library",
description: "Terraform app acceptance test",
projectId: foo_istio.projectId,
templateName: "rancher-istio",
templateVersion: "0.1.1",
targetNamespace: foo_istio.id,
answers: {
"certmanager.enabled": false,
enableCRDs: true,
"galley.enabled": true,
"gateways.enabled": false,
"gateways.istio-ingressgateway.resources.limits.cpu": "2000m",
"gateways.istio-ingressgateway.resources.limits.memory": "1024Mi",
"gateways.istio-ingressgateway.resources.requests.cpu": "100m",
"gateways.istio-ingressgateway.resources.requests.memory": "128Mi",
"gateways.istio-ingressgateway.type": "NodePort",
"global.monitoring.type": "cluster-monitoring",
"global.rancher.clusterId": foo_customClusterSync.clusterId,
"istio_cni.enabled": "false",
"istiocoredns.enabled": "false",
"kiali.enabled": "true",
"mixer.enabled": "true",
"mixer.policy.enabled": "true",
"mixer.policy.resources.limits.cpu": "4800m",
"mixer.policy.resources.limits.memory": "4096Mi",
"mixer.policy.resources.requests.cpu": "1000m",
"mixer.policy.resources.requests.memory": "1024Mi",
"mixer.telemetry.resources.limits.cpu": "4800m",
"mixer.telemetry.resources.limits.memory": "4096Mi",
"mixer.telemetry.resources.requests.cpu": "1000m",
"mixer.telemetry.resources.requests.memory": "1024Mi",
"mtls.enabled": false,
"nodeagent.enabled": false,
"pilot.enabled": true,
"pilot.resources.limits.cpu": "1000m",
"pilot.resources.limits.memory": "4096Mi",
"pilot.resources.requests.cpu": "500m",
"pilot.resources.requests.memory": "2048Mi",
"pilot.traceSampling": "1",
"security.enabled": true,
"sidecarInjectorWebhook.enabled": true,
"tracing.enabled": true,
"tracing.jaeger.resources.limits.cpu": "500m",
"tracing.jaeger.resources.limits.memory": "1024Mi",
"tracing.jaeger.resources.requests.cpu": "100m",
"tracing.jaeger.resources.requests.memory": "100Mi",
},
});
resources:
# Create a new rancher2 RKE Cluster
foo-customCluster:
type: rancher2:Cluster
properties:
description: Foo rancher2 custom cluster
rkeConfig:
network:
plugin: canal
enableClusterMonitoring: true
clusterMonitoringInput:
answers:
exporter-kubelets.https: true
exporter-node.enabled: true
exporter-node.ports.metrics.port: 9796
exporter-node.resources.limits.cpu: 200m
exporter-node.resources.limits.memory: 200Mi
grafana.persistence.enabled: false
grafana.persistence.size: 10Gi
grafana.persistence.storageClass: default
operator.resources.limits.memory: 500Mi
prometheus.persistence.enabled: 'false'
prometheus.persistence.size: 50Gi
prometheus.persistence.storageClass: default
prometheus.persistent.useReleaseName: 'true'
prometheus.resources.core.limits.cpu: 1000m
prometheus.resources.core.limits.memory: 1500Mi
prometheus.resources.core.requests.cpu: 750m
prometheus.resources.core.requests.memory: 750Mi
prometheus.retention: 12h
version: 0.1.0
# Create a new rancher2 Cluster Sync for foo-custom cluster
foo-customClusterSync:
type: rancher2:ClusterSync
properties:
clusterId: ${["foo-customCluster"].id}
waitMonitoring: ${["foo-customCluster"].enableClusterMonitoring}
# Create a new rancher2 Namespace
foo-istio:
type: rancher2:Namespace
properties:
projectId: ${["foo-customClusterSync"].systemProjectId}
description: istio namespace
# Create a new rancher2 App deploying istio (should wait until monitoring is up and running)
istio:
type: rancher2:App
properties:
catalogName: system-library
description: Terraform app acceptance test
projectId: ${["foo-istio"].projectId}
templateName: rancher-istio
templateVersion: 0.1.1
targetNamespace: ${["foo-istio"].id}
answers:
certmanager.enabled: false
enableCRDs: true
galley.enabled: true
gateways.enabled: false
gateways.istio-ingressgateway.resources.limits.cpu: 2000m
gateways.istio-ingressgateway.resources.limits.memory: 1024Mi
gateways.istio-ingressgateway.resources.requests.cpu: 100m
gateways.istio-ingressgateway.resources.requests.memory: 128Mi
gateways.istio-ingressgateway.type: NodePort
global.monitoring.type: cluster-monitoring
global.rancher.clusterId: ${["foo-customClusterSync"].clusterId}
istio_cni.enabled: 'false'
istiocoredns.enabled: 'false'
kiali.enabled: 'true'
mixer.enabled: 'true'
mixer.policy.enabled: 'true'
mixer.policy.resources.limits.cpu: 4800m
mixer.policy.resources.limits.memory: 4096Mi
mixer.policy.resources.requests.cpu: 1000m
mixer.policy.resources.requests.memory: 1024Mi
mixer.telemetry.resources.limits.cpu: 4800m
mixer.telemetry.resources.limits.memory: 4096Mi
mixer.telemetry.resources.requests.cpu: 1000m
mixer.telemetry.resources.requests.memory: 1024Mi
mtls.enabled: false
nodeagent.enabled: false
pilot.enabled: true
pilot.resources.limits.cpu: 1000m
pilot.resources.limits.memory: 4096Mi
pilot.resources.requests.cpu: 500m
pilot.resources.requests.memory: 2048Mi
pilot.traceSampling: '1'
security.enabled: true
sidecarInjectorWebhook.enabled: true
tracing.enabled: true
tracing.jaeger.resources.limits.cpu: 500m
tracing.jaeger.resources.limits.memory: 1024Mi
tracing.jaeger.resources.requests.cpu: 100m
tracing.jaeger.resources.requests.memory: 100Mi
Creating Rancher v2 RKE cluster assigning a node pool (overlapped planes)
using System.Collections.Generic;
using Pulumi;
using Rancher2 = Pulumi.Rancher2;
return await Deployment.RunAsync(() =>
{
// Create a new rancher2 RKE Cluster
var foo_custom = new Rancher2.Cluster("foo-custom", new()
{
Description = "Foo rancher2 custom cluster",
RkeConfig = new Rancher2.Inputs.ClusterRkeConfigArgs
{
Network = new Rancher2.Inputs.ClusterRkeConfigNetworkArgs
{
Plugin = "canal",
},
},
});
// Create a new rancher2 Node Template
var fooNodeTemplate = new Rancher2.NodeTemplate("fooNodeTemplate", new()
{
Description = "foo test",
Amazonec2Config = new Rancher2.Inputs.NodeTemplateAmazonec2ConfigArgs
{
AccessKey = "<AWS_ACCESS_KEY>",
SecretKey = "<AWS_SECRET_KEY>",
Ami = "<AMI_ID>",
Region = "<REGION>",
SecurityGroups = new[]
{
"<AWS_SECURITY_GROUP>",
},
SubnetId = "<SUBNET_ID>",
VpcId = "<VPC_ID>",
Zone = "<ZONE>",
},
});
// Create a new rancher2 Node Pool
var fooNodePool = new Rancher2.NodePool("fooNodePool", new()
{
ClusterId = foo_custom.Id,
HostnamePrefix = "foo-cluster-0",
NodeTemplateId = fooNodeTemplate.Id,
Quantity = 3,
ControlPlane = true,
Etcd = true,
Worker = true,
});
});
package main
import (
"github.com/pulumi/pulumi-rancher2/sdk/v3/go/rancher2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := rancher2.NewCluster(ctx, "foo-custom", &rancher2.ClusterArgs{
Description: pulumi.String("Foo rancher2 custom cluster"),
RkeConfig: &rancher2.ClusterRkeConfigArgs{
Network: &rancher2.ClusterRkeConfigNetworkArgs{
Plugin: pulumi.String("canal"),
},
},
})
if err != nil {
return err
}
fooNodeTemplate, err := rancher2.NewNodeTemplate(ctx, "fooNodeTemplate", &rancher2.NodeTemplateArgs{
Description: pulumi.String("foo test"),
Amazonec2Config: &rancher2.NodeTemplateAmazonec2ConfigArgs{
AccessKey: pulumi.String("<AWS_ACCESS_KEY>"),
SecretKey: pulumi.String("<AWS_SECRET_KEY>"),
Ami: pulumi.String("<AMI_ID>"),
Region: pulumi.String("<REGION>"),
SecurityGroups: pulumi.StringArray{
pulumi.String("<AWS_SECURITY_GROUP>"),
},
SubnetId: pulumi.String("<SUBNET_ID>"),
VpcId: pulumi.String("<VPC_ID>"),
Zone: pulumi.String("<ZONE>"),
},
})
if err != nil {
return err
}
_, err = rancher2.NewNodePool(ctx, "fooNodePool", &rancher2.NodePoolArgs{
ClusterId: foo_custom.ID(),
HostnamePrefix: pulumi.String("foo-cluster-0"),
NodeTemplateId: fooNodeTemplate.ID(),
Quantity: pulumi.Int(3),
ControlPlane: pulumi.Bool(true),
Etcd: pulumi.Bool(true),
Worker: pulumi.Bool(true),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.rancher2.Cluster;
import com.pulumi.rancher2.ClusterArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigNetworkArgs;
import com.pulumi.rancher2.NodeTemplate;
import com.pulumi.rancher2.NodeTemplateArgs;
import com.pulumi.rancher2.inputs.NodeTemplateAmazonec2ConfigArgs;
import com.pulumi.rancher2.NodePool;
import com.pulumi.rancher2.NodePoolArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var foo_custom = new Cluster("foo-custom", ClusterArgs.builder()
.description("Foo rancher2 custom cluster")
.rkeConfig(ClusterRkeConfigArgs.builder()
.network(ClusterRkeConfigNetworkArgs.builder()
.plugin("canal")
.build())
.build())
.build());
var fooNodeTemplate = new NodeTemplate("fooNodeTemplate", NodeTemplateArgs.builder()
.description("foo test")
.amazonec2Config(NodeTemplateAmazonec2ConfigArgs.builder()
.accessKey("<AWS_ACCESS_KEY>")
.secretKey("<AWS_SECRET_KEY>")
.ami("<AMI_ID>")
.region("<REGION>")
.securityGroups("<AWS_SECURITY_GROUP>")
.subnetId("<SUBNET_ID>")
.vpcId("<VPC_ID>")
.zone("<ZONE>")
.build())
.build());
var fooNodePool = new NodePool("fooNodePool", NodePoolArgs.builder()
.clusterId(foo_custom.id())
.hostnamePrefix("foo-cluster-0")
.nodeTemplateId(fooNodeTemplate.id())
.quantity(3)
.controlPlane(true)
.etcd(true)
.worker(true)
.build());
}
}
import pulumi
import pulumi_rancher2 as rancher2
# Create a new rancher2 RKE Cluster
foo_custom = rancher2.Cluster("foo-custom",
description="Foo rancher2 custom cluster",
rke_config=rancher2.ClusterRkeConfigArgs(
network=rancher2.ClusterRkeConfigNetworkArgs(
plugin="canal",
),
))
# Create a new rancher2 Node Template
foo_node_template = rancher2.NodeTemplate("fooNodeTemplate",
description="foo test",
amazonec2_config=rancher2.NodeTemplateAmazonec2ConfigArgs(
access_key="<AWS_ACCESS_KEY>",
secret_key="<AWS_SECRET_KEY>",
ami="<AMI_ID>",
region="<REGION>",
security_groups=["<AWS_SECURITY_GROUP>"],
subnet_id="<SUBNET_ID>",
vpc_id="<VPC_ID>",
zone="<ZONE>",
))
# Create a new rancher2 Node Pool
foo_node_pool = rancher2.NodePool("fooNodePool",
cluster_id=foo_custom.id,
hostname_prefix="foo-cluster-0",
node_template_id=foo_node_template.id,
quantity=3,
control_plane=True,
etcd=True,
worker=True)
import * as pulumi from "@pulumi/pulumi";
import * as rancher2 from "@pulumi/rancher2";
// Create a new rancher2 RKE Cluster
const foo_custom = new rancher2.Cluster("foo-custom", {
description: "Foo rancher2 custom cluster",
rkeConfig: {
network: {
plugin: "canal",
},
},
});
// Create a new rancher2 Node Template
const fooNodeTemplate = new rancher2.NodeTemplate("fooNodeTemplate", {
description: "foo test",
amazonec2Config: {
accessKey: "<AWS_ACCESS_KEY>",
secretKey: "<AWS_SECRET_KEY>",
ami: "<AMI_ID>",
region: "<REGION>",
securityGroups: ["<AWS_SECURITY_GROUP>"],
subnetId: "<SUBNET_ID>",
vpcId: "<VPC_ID>",
zone: "<ZONE>",
},
});
// Create a new rancher2 Node Pool
const fooNodePool = new rancher2.NodePool("fooNodePool", {
clusterId: foo_custom.id,
hostnamePrefix: "foo-cluster-0",
nodeTemplateId: fooNodeTemplate.id,
quantity: 3,
controlPlane: true,
etcd: true,
worker: true,
});
resources:
# Create a new rancher2 RKE Cluster
foo-custom:
type: rancher2:Cluster
properties:
description: Foo rancher2 custom cluster
rkeConfig:
network:
plugin: canal
# Create a new rancher2 Node Template
fooNodeTemplate:
type: rancher2:NodeTemplate
properties:
description: foo test
amazonec2Config:
accessKey: <AWS_ACCESS_KEY>
secretKey: <AWS_SECRET_KEY>
ami: <AMI_ID>
region: <REGION>
securityGroups:
- <AWS_SECURITY_GROUP>
subnetId: <SUBNET_ID>
vpcId: <VPC_ID>
zone: <ZONE>
# Create a new rancher2 Node Pool
fooNodePool:
type: rancher2:NodePool
properties:
clusterId: ${["foo-custom"].id}
hostnamePrefix: foo-cluster-0
nodeTemplateId: ${fooNodeTemplate.id}
quantity: 3
controlPlane: true
etcd: true
worker: true
Creating Rancher v2 RKE cluster from template. For Rancher v2.3.x or above.
using System.Collections.Generic;
using Pulumi;
using Rancher2 = Pulumi.Rancher2;
return await Deployment.RunAsync(() =>
{
// Create a new rancher2 cluster template
var fooClusterTemplate = new Rancher2.ClusterTemplate("fooClusterTemplate", new()
{
Members = new[]
{
new Rancher2.Inputs.ClusterTemplateMemberArgs
{
AccessType = "owner",
UserPrincipalId = "local://user-XXXXX",
},
},
TemplateRevisions = new[]
{
new Rancher2.Inputs.ClusterTemplateTemplateRevisionArgs
{
Name = "V1",
ClusterConfig = new Rancher2.Inputs.ClusterTemplateTemplateRevisionClusterConfigArgs
{
RkeConfig = new Rancher2.Inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigArgs
{
Network = new Rancher2.Inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigNetworkArgs
{
Plugin = "canal",
},
Services = new Rancher2.Inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesArgs
{
Etcd = new Rancher2.Inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesEtcdArgs
{
Creation = "6h",
Retention = "24h",
},
},
},
},
Default = true,
},
},
Description = "Test cluster template v2",
});
// Create a new rancher2 RKE Cluster from template
var fooCluster = new Rancher2.Cluster("fooCluster", new()
{
ClusterTemplateId = fooClusterTemplate.Id,
ClusterTemplateRevisionId = fooClusterTemplate.TemplateRevisions.Apply(templateRevisions => templateRevisions[0].Id),
});
});
package main
import (
"github.com/pulumi/pulumi-rancher2/sdk/v3/go/rancher2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
fooClusterTemplate, err := rancher2.NewClusterTemplate(ctx, "fooClusterTemplate", &rancher2.ClusterTemplateArgs{
Members: rancher2.ClusterTemplateMemberArray{
&rancher2.ClusterTemplateMemberArgs{
AccessType: pulumi.String("owner"),
UserPrincipalId: pulumi.String("local://user-XXXXX"),
},
},
TemplateRevisions: rancher2.ClusterTemplateTemplateRevisionArray{
&rancher2.ClusterTemplateTemplateRevisionArgs{
Name: pulumi.String("V1"),
ClusterConfig: &rancher2.ClusterTemplateTemplateRevisionClusterConfigArgs{
RkeConfig: &rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigArgs{
Network: &rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigNetworkArgs{
Plugin: pulumi.String("canal"),
},
Services: &rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesArgs{
Etcd: &rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesEtcdArgs{
Creation: pulumi.String("6h"),
Retention: pulumi.String("24h"),
},
},
},
},
Default: pulumi.Bool(true),
},
},
Description: pulumi.String("Test cluster template v2"),
})
if err != nil {
return err
}
_, err = rancher2.NewCluster(ctx, "fooCluster", &rancher2.ClusterArgs{
ClusterTemplateId: fooClusterTemplate.ID(),
ClusterTemplateRevisionId: fooClusterTemplate.TemplateRevisions.ApplyT(func(templateRevisions []rancher2.ClusterTemplateTemplateRevision) (*string, error) {
return &templateRevisions[0].Id, nil
}).(pulumi.StringPtrOutput),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.rancher2.ClusterTemplate;
import com.pulumi.rancher2.ClusterTemplateArgs;
import com.pulumi.rancher2.inputs.ClusterTemplateMemberArgs;
import com.pulumi.rancher2.inputs.ClusterTemplateTemplateRevisionArgs;
import com.pulumi.rancher2.inputs.ClusterTemplateTemplateRevisionClusterConfigArgs;
import com.pulumi.rancher2.inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigArgs;
import com.pulumi.rancher2.inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigNetworkArgs;
import com.pulumi.rancher2.inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesArgs;
import com.pulumi.rancher2.inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesEtcdArgs;
import com.pulumi.rancher2.Cluster;
import com.pulumi.rancher2.ClusterArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var fooClusterTemplate = new ClusterTemplate("fooClusterTemplate", ClusterTemplateArgs.builder()
.members(ClusterTemplateMemberArgs.builder()
.accessType("owner")
.userPrincipalId("local://user-XXXXX")
.build())
.templateRevisions(ClusterTemplateTemplateRevisionArgs.builder()
.name("V1")
.clusterConfig(ClusterTemplateTemplateRevisionClusterConfigArgs.builder()
.rkeConfig(ClusterTemplateTemplateRevisionClusterConfigRkeConfigArgs.builder()
.network(ClusterTemplateTemplateRevisionClusterConfigRkeConfigNetworkArgs.builder()
.plugin("canal")
.build())
.services(ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesArgs.builder()
.etcd(ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesEtcdArgs.builder()
.creation("6h")
.retention("24h")
.build())
.build())
.build())
.build())
.default_(true)
.build())
.description("Test cluster template v2")
.build());
var fooCluster = new Cluster("fooCluster", ClusterArgs.builder()
.clusterTemplateId(fooClusterTemplate.id())
.clusterTemplateRevisionId(fooClusterTemplate.templateRevisions().applyValue(templateRevisions -> templateRevisions[0].id()))
.build());
}
}
import pulumi
import pulumi_rancher2 as rancher2
# Create a new rancher2 cluster template
foo_cluster_template = rancher2.ClusterTemplate("fooClusterTemplate",
members=[rancher2.ClusterTemplateMemberArgs(
access_type="owner",
user_principal_id="local://user-XXXXX",
)],
template_revisions=[rancher2.ClusterTemplateTemplateRevisionArgs(
name="V1",
cluster_config=rancher2.ClusterTemplateTemplateRevisionClusterConfigArgs(
rke_config=rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigArgs(
network=rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigNetworkArgs(
plugin="canal",
),
services=rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesArgs(
etcd=rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesEtcdArgs(
creation="6h",
retention="24h",
),
),
),
),
default=True,
)],
description="Test cluster template v2")
# Create a new rancher2 RKE Cluster from template
foo_cluster = rancher2.Cluster("fooCluster",
cluster_template_id=foo_cluster_template.id,
cluster_template_revision_id=foo_cluster_template.template_revisions[0].id)
import * as pulumi from "@pulumi/pulumi";
import * as rancher2 from "@pulumi/rancher2";
// Create a new rancher2 cluster template
const fooClusterTemplate = new rancher2.ClusterTemplate("fooClusterTemplate", {
members: [{
accessType: "owner",
userPrincipalId: "local://user-XXXXX",
}],
templateRevisions: [{
name: "V1",
clusterConfig: {
rkeConfig: {
network: {
plugin: "canal",
},
services: {
etcd: {
creation: "6h",
retention: "24h",
},
},
},
},
"default": true,
}],
description: "Test cluster template v2",
});
// Create a new rancher2 RKE Cluster from template
const fooCluster = new rancher2.Cluster("fooCluster", {
clusterTemplateId: fooClusterTemplate.id,
clusterTemplateRevisionId: fooClusterTemplate.templateRevisions.apply(templateRevisions => templateRevisions[0].id),
});
resources:
# Create a new rancher2 cluster template
fooClusterTemplate:
type: rancher2:ClusterTemplate
properties:
members:
- accessType: owner
userPrincipalId: local://user-XXXXX
templateRevisions:
- name: V1
clusterConfig:
rkeConfig:
network:
plugin: canal
services:
etcd:
creation: 6h
retention: 24h
default: true
description: Test cluster template v2
# Create a new rancher2 RKE Cluster from template
fooCluster:
type: rancher2:Cluster
properties:
clusterTemplateId: ${fooClusterTemplate.id}
clusterTemplateRevisionId: ${fooClusterTemplate.templateRevisions[0].id}
Creating Rancher v2 RKE cluster with upgrade strategy. For Rancher v2.4.x or above.
using System.Collections.Generic;
using Pulumi;
using Rancher2 = Pulumi.Rancher2;
return await Deployment.RunAsync(() =>
{
var foo = new Rancher2.Cluster("foo", new()
{
Description = "Terraform custom cluster",
RkeConfig = new Rancher2.Inputs.ClusterRkeConfigArgs
{
Network = new Rancher2.Inputs.ClusterRkeConfigNetworkArgs
{
Plugin = "canal",
},
Services = new Rancher2.Inputs.ClusterRkeConfigServicesArgs
{
Etcd = new Rancher2.Inputs.ClusterRkeConfigServicesEtcdArgs
{
Creation = "6h",
Retention = "24h",
},
KubeApi = new Rancher2.Inputs.ClusterRkeConfigServicesKubeApiArgs
{
AuditLog = new Rancher2.Inputs.ClusterRkeConfigServicesKubeApiAuditLogArgs
{
Configuration = new Rancher2.Inputs.ClusterRkeConfigServicesKubeApiAuditLogConfigurationArgs
{
Format = "json",
MaxAge = 5,
MaxBackup = 5,
MaxSize = 100,
Path = "-",
Policy = @"apiVersion: audit.k8s.io/v1
kind: Policy
metadata:
creationTimestamp: null
omitStages:
- RequestReceived
rules:
- level: RequestResponse
resources:
- resources:
- pods
",
},
Enabled = true,
},
},
},
UpgradeStrategy = new Rancher2.Inputs.ClusterRkeConfigUpgradeStrategyArgs
{
Drain = true,
MaxUnavailableWorker = "20%",
},
},
});
});
package main
import (
"fmt"
"github.com/pulumi/pulumi-rancher2/sdk/v3/go/rancher2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := rancher2.NewCluster(ctx, "foo", &rancher2.ClusterArgs{
Description: pulumi.String("Terraform custom cluster"),
RkeConfig: &rancher2.ClusterRkeConfigArgs{
Network: &rancher2.ClusterRkeConfigNetworkArgs{
Plugin: pulumi.String("canal"),
},
Services: &rancher2.ClusterRkeConfigServicesArgs{
Etcd: &rancher2.ClusterRkeConfigServicesEtcdArgs{
Creation: pulumi.String("6h"),
Retention: pulumi.String("24h"),
},
KubeApi: &rancher2.ClusterRkeConfigServicesKubeApiArgs{
AuditLog: &rancher2.ClusterRkeConfigServicesKubeApiAuditLogArgs{
Configuration: &rancher2.ClusterRkeConfigServicesKubeApiAuditLogConfigurationArgs{
Format: pulumi.String("json"),
MaxAge: pulumi.Int(5),
MaxBackup: pulumi.Int(5),
MaxSize: pulumi.Int(100),
Path: pulumi.String("-"),
Policy: pulumi.String(fmt.Sprintf(`apiVersion: audit.k8s.io/v1
kind: Policy
metadata:
creationTimestamp: null
omitStages:
- RequestReceived
rules:
- level: RequestResponse
resources:
- resources:
- pods
`)),
},
Enabled: pulumi.Bool(true),
},
},
},
UpgradeStrategy: &rancher2.ClusterRkeConfigUpgradeStrategyArgs{
Drain: pulumi.Bool(true),
MaxUnavailableWorker: pulumi.String(fmt.Sprintf("20%v", "%")),
},
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.rancher2.Cluster;
import com.pulumi.rancher2.ClusterArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigNetworkArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigServicesArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigServicesEtcdArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigServicesKubeApiArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigServicesKubeApiAuditLogArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigServicesKubeApiAuditLogConfigurationArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigUpgradeStrategyArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var foo = new Cluster("foo", ClusterArgs.builder()
.description("Terraform custom cluster")
.rkeConfig(ClusterRkeConfigArgs.builder()
.network(ClusterRkeConfigNetworkArgs.builder()
.plugin("canal")
.build())
.services(ClusterRkeConfigServicesArgs.builder()
.etcd(ClusterRkeConfigServicesEtcdArgs.builder()
.creation("6h")
.retention("24h")
.build())
.kubeApi(ClusterRkeConfigServicesKubeApiArgs.builder()
.auditLog(ClusterRkeConfigServicesKubeApiAuditLogArgs.builder()
.configuration(ClusterRkeConfigServicesKubeApiAuditLogConfigurationArgs.builder()
.format("json")
.maxAge(5)
.maxBackup(5)
.maxSize(100)
.path("-")
.policy("""
apiVersion: audit.k8s.io/v1
kind: Policy
metadata:
creationTimestamp: null
omitStages:
- RequestReceived
rules:
- level: RequestResponse
resources:
- resources:
- pods
""")
.build())
.enabled(true)
.build())
.build())
.build())
.upgradeStrategy(ClusterRkeConfigUpgradeStrategyArgs.builder()
.drain(true)
.maxUnavailableWorker("20%")
.build())
.build())
.build());
}
}
import pulumi
import pulumi_rancher2 as rancher2
foo = rancher2.Cluster("foo",
description="Terraform custom cluster",
rke_config=rancher2.ClusterRkeConfigArgs(
network=rancher2.ClusterRkeConfigNetworkArgs(
plugin="canal",
),
services=rancher2.ClusterRkeConfigServicesArgs(
etcd=rancher2.ClusterRkeConfigServicesEtcdArgs(
creation="6h",
retention="24h",
),
kube_api=rancher2.ClusterRkeConfigServicesKubeApiArgs(
audit_log=rancher2.ClusterRkeConfigServicesKubeApiAuditLogArgs(
configuration=rancher2.ClusterRkeConfigServicesKubeApiAuditLogConfigurationArgs(
format="json",
max_age=5,
max_backup=5,
max_size=100,
path="-",
policy="""apiVersion: audit.k8s.io/v1
kind: Policy
metadata:
creationTimestamp: null
omitStages:
- RequestReceived
rules:
- level: RequestResponse
resources:
- resources:
- pods
""",
),
enabled=True,
),
),
),
upgrade_strategy=rancher2.ClusterRkeConfigUpgradeStrategyArgs(
drain=True,
max_unavailable_worker="20%",
),
))
import * as pulumi from "@pulumi/pulumi";
import * as rancher2 from "@pulumi/rancher2";
const foo = new rancher2.Cluster("foo", {
description: "Terraform custom cluster",
rkeConfig: {
network: {
plugin: "canal",
},
services: {
etcd: {
creation: "6h",
retention: "24h",
},
kubeApi: {
auditLog: {
configuration: {
format: "json",
maxAge: 5,
maxBackup: 5,
maxSize: 100,
path: "-",
policy: `apiVersion: audit.k8s.io/v1
kind: Policy
metadata:
creationTimestamp: null
omitStages:
- RequestReceived
rules:
- level: RequestResponse
resources:
- resources:
- pods
`,
},
enabled: true,
},
},
},
upgradeStrategy: {
drain: true,
maxUnavailableWorker: `20%`,
},
},
});
resources:
foo:
type: rancher2:Cluster
properties:
description: Terraform custom cluster
rkeConfig:
network:
plugin: canal
services:
etcd:
creation: 6h
retention: 24h
kubeApi:
auditLog:
configuration:
format: json
maxAge: 5
maxBackup: 5
maxSize: 100
path: '-'
policy: |+
apiVersion: audit.k8s.io/v1
kind: Policy
metadata:
creationTimestamp: null
omitStages:
- RequestReceived
rules:
- level: RequestResponse
resources:
- resources:
- pods
enabled: true
upgradeStrategy:
drain: true
maxUnavailableWorker: 20%
Creating Rancher v2 RKE cluster with scheduled cluster scan. For Rancher v2.4.x or above.
using System.Collections.Generic;
using Pulumi;
using Rancher2 = Pulumi.Rancher2;
return await Deployment.RunAsync(() =>
{
var foo = new Rancher2.Cluster("foo", new()
{
Description = "Terraform custom cluster",
RkeConfig = new Rancher2.Inputs.ClusterRkeConfigArgs
{
Network = new Rancher2.Inputs.ClusterRkeConfigNetworkArgs
{
Plugin = "canal",
},
Services = new Rancher2.Inputs.ClusterRkeConfigServicesArgs
{
Etcd = new Rancher2.Inputs.ClusterRkeConfigServicesEtcdArgs
{
Creation = "6h",
Retention = "24h",
},
},
},
ScheduledClusterScan = new Rancher2.Inputs.ClusterScheduledClusterScanArgs
{
Enabled = true,
ScanConfig = new Rancher2.Inputs.ClusterScheduledClusterScanScanConfigArgs
{
CisScanConfig = new Rancher2.Inputs.ClusterScheduledClusterScanScanConfigCisScanConfigArgs
{
DebugMaster = true,
DebugWorker = true,
},
},
ScheduleConfig = new Rancher2.Inputs.ClusterScheduledClusterScanScheduleConfigArgs
{
CronSchedule = "30 * * * *",
Retention = 5,
},
},
});
});
package main
import (
"github.com/pulumi/pulumi-rancher2/sdk/v3/go/rancher2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := rancher2.NewCluster(ctx, "foo", &rancher2.ClusterArgs{
Description: pulumi.String("Terraform custom cluster"),
RkeConfig: &rancher2.ClusterRkeConfigArgs{
Network: &rancher2.ClusterRkeConfigNetworkArgs{
Plugin: pulumi.String("canal"),
},
Services: &rancher2.ClusterRkeConfigServicesArgs{
Etcd: &rancher2.ClusterRkeConfigServicesEtcdArgs{
Creation: pulumi.String("6h"),
Retention: pulumi.String("24h"),
},
},
},
ScheduledClusterScan: &rancher2.ClusterScheduledClusterScanArgs{
Enabled: pulumi.Bool(true),
ScanConfig: &rancher2.ClusterScheduledClusterScanScanConfigArgs{
CisScanConfig: &rancher2.ClusterScheduledClusterScanScanConfigCisScanConfigArgs{
DebugMaster: pulumi.Bool(true),
DebugWorker: pulumi.Bool(true),
},
},
ScheduleConfig: &rancher2.ClusterScheduledClusterScanScheduleConfigArgs{
CronSchedule: pulumi.String("30 * * * *"),
Retention: pulumi.Int(5),
},
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.rancher2.Cluster;
import com.pulumi.rancher2.ClusterArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigNetworkArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigServicesArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigServicesEtcdArgs;
import com.pulumi.rancher2.inputs.ClusterScheduledClusterScanArgs;
import com.pulumi.rancher2.inputs.ClusterScheduledClusterScanScanConfigArgs;
import com.pulumi.rancher2.inputs.ClusterScheduledClusterScanScanConfigCisScanConfigArgs;
import com.pulumi.rancher2.inputs.ClusterScheduledClusterScanScheduleConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var foo = new Cluster("foo", ClusterArgs.builder()
.description("Terraform custom cluster")
.rkeConfig(ClusterRkeConfigArgs.builder()
.network(ClusterRkeConfigNetworkArgs.builder()
.plugin("canal")
.build())
.services(ClusterRkeConfigServicesArgs.builder()
.etcd(ClusterRkeConfigServicesEtcdArgs.builder()
.creation("6h")
.retention("24h")
.build())
.build())
.build())
.scheduledClusterScan(ClusterScheduledClusterScanArgs.builder()
.enabled(true)
.scanConfig(ClusterScheduledClusterScanScanConfigArgs.builder()
.cisScanConfig(ClusterScheduledClusterScanScanConfigCisScanConfigArgs.builder()
.debugMaster(true)
.debugWorker(true)
.build())
.build())
.scheduleConfig(ClusterScheduledClusterScanScheduleConfigArgs.builder()
.cronSchedule("30 * * * *")
.retention(5)
.build())
.build())
.build());
}
}
import pulumi
import pulumi_rancher2 as rancher2
foo = rancher2.Cluster("foo",
description="Terraform custom cluster",
rke_config=rancher2.ClusterRkeConfigArgs(
network=rancher2.ClusterRkeConfigNetworkArgs(
plugin="canal",
),
services=rancher2.ClusterRkeConfigServicesArgs(
etcd=rancher2.ClusterRkeConfigServicesEtcdArgs(
creation="6h",
retention="24h",
),
),
),
scheduled_cluster_scan=rancher2.ClusterScheduledClusterScanArgs(
enabled=True,
scan_config=rancher2.ClusterScheduledClusterScanScanConfigArgs(
cis_scan_config=rancher2.ClusterScheduledClusterScanScanConfigCisScanConfigArgs(
debug_master=True,
debug_worker=True,
),
),
schedule_config=rancher2.ClusterScheduledClusterScanScheduleConfigArgs(
cron_schedule="30 * * * *",
retention=5,
),
))
import * as pulumi from "@pulumi/pulumi";
import * as rancher2 from "@pulumi/rancher2";
const foo = new rancher2.Cluster("foo", {
description: "Terraform custom cluster",
rkeConfig: {
network: {
plugin: "canal",
},
services: {
etcd: {
creation: "6h",
retention: "24h",
},
},
},
scheduledClusterScan: {
enabled: true,
scanConfig: {
cisScanConfig: {
debugMaster: true,
debugWorker: true,
},
},
scheduleConfig: {
cronSchedule: "30 * * * *",
retention: 5,
},
},
});
resources:
foo:
type: rancher2:Cluster
properties:
description: Terraform custom cluster
rkeConfig:
network:
plugin: canal
services:
etcd:
creation: 6h
retention: 24h
scheduledClusterScan:
enabled: true
scanConfig:
cisScanConfig:
debugMaster: true
debugWorker: true
scheduleConfig:
cronSchedule: 30 * * * *
retention: 5
Importing EKS cluster to Rancher v2, using eks_config_v2
. For Rancher v2.5.x or above.
using System.Collections.Generic;
using Pulumi;
using Rancher2 = Pulumi.Rancher2;
return await Deployment.RunAsync(() =>
{
var fooCloudCredential = new Rancher2.CloudCredential("fooCloudCredential", new()
{
Description = "foo test",
Amazonec2CredentialConfig = new Rancher2.Inputs.CloudCredentialAmazonec2CredentialConfigArgs
{
AccessKey = "<AWS_ACCESS_KEY>",
SecretKey = "<AWS_SECRET_KEY>",
},
});
var fooCluster = new Rancher2.Cluster("fooCluster", new()
{
Description = "Terraform EKS cluster",
EksConfigV2 = new Rancher2.Inputs.ClusterEksConfigV2Args
{
CloudCredentialId = fooCloudCredential.Id,
Name = "<CLUSTER_NAME>",
Region = "<EKS_REGION>",
Imported = true,
},
});
});
package main
import (
"github.com/pulumi/pulumi-rancher2/sdk/v3/go/rancher2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
fooCloudCredential, err := rancher2.NewCloudCredential(ctx, "fooCloudCredential", &rancher2.CloudCredentialArgs{
Description: pulumi.String("foo test"),
Amazonec2CredentialConfig: &rancher2.CloudCredentialAmazonec2CredentialConfigArgs{
AccessKey: pulumi.String("<AWS_ACCESS_KEY>"),
SecretKey: pulumi.String("<AWS_SECRET_KEY>"),
},
})
if err != nil {
return err
}
_, err = rancher2.NewCluster(ctx, "fooCluster", &rancher2.ClusterArgs{
Description: pulumi.String("Terraform EKS cluster"),
EksConfigV2: &rancher2.ClusterEksConfigV2Args{
CloudCredentialId: fooCloudCredential.ID(),
Name: pulumi.String("<CLUSTER_NAME>"),
Region: pulumi.String("<EKS_REGION>"),
Imported: pulumi.Bool(true),
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.rancher2.CloudCredential;
import com.pulumi.rancher2.CloudCredentialArgs;
import com.pulumi.rancher2.inputs.CloudCredentialAmazonec2CredentialConfigArgs;
import com.pulumi.rancher2.Cluster;
import com.pulumi.rancher2.ClusterArgs;
import com.pulumi.rancher2.inputs.ClusterEksConfigV2Args;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var fooCloudCredential = new CloudCredential("fooCloudCredential", CloudCredentialArgs.builder()
.description("foo test")
.amazonec2CredentialConfig(CloudCredentialAmazonec2CredentialConfigArgs.builder()
.accessKey("<AWS_ACCESS_KEY>")
.secretKey("<AWS_SECRET_KEY>")
.build())
.build());
var fooCluster = new Cluster("fooCluster", ClusterArgs.builder()
.description("Terraform EKS cluster")
.eksConfigV2(ClusterEksConfigV2Args.builder()
.cloudCredentialId(fooCloudCredential.id())
.name("<CLUSTER_NAME>")
.region("<EKS_REGION>")
.imported(true)
.build())
.build());
}
}
import pulumi
import pulumi_rancher2 as rancher2
foo_cloud_credential = rancher2.CloudCredential("fooCloudCredential",
description="foo test",
amazonec2_credential_config=rancher2.CloudCredentialAmazonec2CredentialConfigArgs(
access_key="<AWS_ACCESS_KEY>",
secret_key="<AWS_SECRET_KEY>",
))
foo_cluster = rancher2.Cluster("fooCluster",
description="Terraform EKS cluster",
eks_config_v2=rancher2.ClusterEksConfigV2Args(
cloud_credential_id=foo_cloud_credential.id,
name="<CLUSTER_NAME>",
region="<EKS_REGION>",
imported=True,
))
import * as pulumi from "@pulumi/pulumi";
import * as rancher2 from "@pulumi/rancher2";
const fooCloudCredential = new rancher2.CloudCredential("fooCloudCredential", {
description: "foo test",
amazonec2CredentialConfig: {
accessKey: "<AWS_ACCESS_KEY>",
secretKey: "<AWS_SECRET_KEY>",
},
});
const fooCluster = new rancher2.Cluster("fooCluster", {
description: "Terraform EKS cluster",
eksConfigV2: {
cloudCredentialId: fooCloudCredential.id,
name: "<CLUSTER_NAME>",
region: "<EKS_REGION>",
imported: true,
},
});
resources:
fooCloudCredential:
type: rancher2:CloudCredential
properties:
description: foo test
amazonec2CredentialConfig:
accessKey: <AWS_ACCESS_KEY>
secretKey: <AWS_SECRET_KEY>
fooCluster:
type: rancher2:Cluster
properties:
description: Terraform EKS cluster
eksConfigV2:
cloudCredentialId: ${fooCloudCredential.id}
name: <CLUSTER_NAME>
region: <EKS_REGION>
imported: true
Creating EKS cluster from Rancher v2, using eks_config_v2
. For Rancher v2.5.x or above.
using System.Collections.Generic;
using Pulumi;
using Rancher2 = Pulumi.Rancher2;
return await Deployment.RunAsync(() =>
{
var fooCloudCredential = new Rancher2.CloudCredential("fooCloudCredential", new()
{
Description = "foo test",
Amazonec2CredentialConfig = new Rancher2.Inputs.CloudCredentialAmazonec2CredentialConfigArgs
{
AccessKey = "<AWS_ACCESS_KEY>",
SecretKey = "<AWS_SECRET_KEY>",
},
});
var fooCluster = new Rancher2.Cluster("fooCluster", new()
{
Description = "Terraform EKS cluster",
EksConfigV2 = new Rancher2.Inputs.ClusterEksConfigV2Args
{
CloudCredentialId = fooCloudCredential.Id,
Region = "<EKS_REGION>",
KubernetesVersion = "1.17",
LoggingTypes = new[]
{
"audit",
"api",
},
NodeGroups = new[]
{
new Rancher2.Inputs.ClusterEksConfigV2NodeGroupArgs
{
Name = "node_group1",
InstanceType = "t3.medium",
DesiredSize = 3,
MaxSize = 5,
},
new Rancher2.Inputs.ClusterEksConfigV2NodeGroupArgs
{
Name = "node_group2",
InstanceType = "m5.xlarge",
DesiredSize = 2,
MaxSize = 3,
},
},
PrivateAccess = true,
PublicAccess = false,
},
});
});
package main
import (
"github.com/pulumi/pulumi-rancher2/sdk/v3/go/rancher2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
fooCloudCredential, err := rancher2.NewCloudCredential(ctx, "fooCloudCredential", &rancher2.CloudCredentialArgs{
Description: pulumi.String("foo test"),
Amazonec2CredentialConfig: &rancher2.CloudCredentialAmazonec2CredentialConfigArgs{
AccessKey: pulumi.String("<AWS_ACCESS_KEY>"),
SecretKey: pulumi.String("<AWS_SECRET_KEY>"),
},
})
if err != nil {
return err
}
_, err = rancher2.NewCluster(ctx, "fooCluster", &rancher2.ClusterArgs{
Description: pulumi.String("Terraform EKS cluster"),
EksConfigV2: &rancher2.ClusterEksConfigV2Args{
CloudCredentialId: fooCloudCredential.ID(),
Region: pulumi.String("<EKS_REGION>"),
KubernetesVersion: pulumi.String("1.17"),
LoggingTypes: pulumi.StringArray{
pulumi.String("audit"),
pulumi.String("api"),
},
NodeGroups: rancher2.ClusterEksConfigV2NodeGroupArray{
&rancher2.ClusterEksConfigV2NodeGroupArgs{
Name: pulumi.String("node_group1"),
InstanceType: pulumi.String("t3.medium"),
DesiredSize: pulumi.Int(3),
MaxSize: pulumi.Int(5),
},
&rancher2.ClusterEksConfigV2NodeGroupArgs{
Name: pulumi.String("node_group2"),
InstanceType: pulumi.String("m5.xlarge"),
DesiredSize: pulumi.Int(2),
MaxSize: pulumi.Int(3),
},
},
PrivateAccess: pulumi.Bool(true),
PublicAccess: pulumi.Bool(false),
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.rancher2.CloudCredential;
import com.pulumi.rancher2.CloudCredentialArgs;
import com.pulumi.rancher2.inputs.CloudCredentialAmazonec2CredentialConfigArgs;
import com.pulumi.rancher2.Cluster;
import com.pulumi.rancher2.ClusterArgs;
import com.pulumi.rancher2.inputs.ClusterEksConfigV2Args;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var fooCloudCredential = new CloudCredential("fooCloudCredential", CloudCredentialArgs.builder()
.description("foo test")
.amazonec2CredentialConfig(CloudCredentialAmazonec2CredentialConfigArgs.builder()
.accessKey("<AWS_ACCESS_KEY>")
.secretKey("<AWS_SECRET_KEY>")
.build())
.build());
var fooCluster = new Cluster("fooCluster", ClusterArgs.builder()
.description("Terraform EKS cluster")
.eksConfigV2(ClusterEksConfigV2Args.builder()
.cloudCredentialId(fooCloudCredential.id())
.region("<EKS_REGION>")
.kubernetesVersion("1.17")
.loggingTypes(
"audit",
"api")
.nodeGroups(
ClusterEksConfigV2NodeGroupArgs.builder()
.name("node_group1")
.instanceType("t3.medium")
.desiredSize(3)
.maxSize(5)
.build(),
ClusterEksConfigV2NodeGroupArgs.builder()
.name("node_group2")
.instanceType("m5.xlarge")
.desiredSize(2)
.maxSize(3)
.build())
.privateAccess(true)
.publicAccess(false)
.build())
.build());
}
}
import pulumi
import pulumi_rancher2 as rancher2
foo_cloud_credential = rancher2.CloudCredential("fooCloudCredential",
description="foo test",
amazonec2_credential_config=rancher2.CloudCredentialAmazonec2CredentialConfigArgs(
access_key="<AWS_ACCESS_KEY>",
secret_key="<AWS_SECRET_KEY>",
))
foo_cluster = rancher2.Cluster("fooCluster",
description="Terraform EKS cluster",
eks_config_v2=rancher2.ClusterEksConfigV2Args(
cloud_credential_id=foo_cloud_credential.id,
region="<EKS_REGION>",
kubernetes_version="1.17",
logging_types=[
"audit",
"api",
],
node_groups=[
rancher2.ClusterEksConfigV2NodeGroupArgs(
name="node_group1",
instance_type="t3.medium",
desired_size=3,
max_size=5,
),
rancher2.ClusterEksConfigV2NodeGroupArgs(
name="node_group2",
instance_type="m5.xlarge",
desired_size=2,
max_size=3,
),
],
private_access=True,
public_access=False,
))
import * as pulumi from "@pulumi/pulumi";
import * as rancher2 from "@pulumi/rancher2";
const fooCloudCredential = new rancher2.CloudCredential("fooCloudCredential", {
description: "foo test",
amazonec2CredentialConfig: {
accessKey: "<AWS_ACCESS_KEY>",
secretKey: "<AWS_SECRET_KEY>",
},
});
const fooCluster = new rancher2.Cluster("fooCluster", {
description: "Terraform EKS cluster",
eksConfigV2: {
cloudCredentialId: fooCloudCredential.id,
region: "<EKS_REGION>",
kubernetesVersion: "1.17",
loggingTypes: [
"audit",
"api",
],
nodeGroups: [
{
name: "node_group1",
instanceType: "t3.medium",
desiredSize: 3,
maxSize: 5,
},
{
name: "node_group2",
instanceType: "m5.xlarge",
desiredSize: 2,
maxSize: 3,
},
],
privateAccess: true,
publicAccess: false,
},
});
resources:
fooCloudCredential:
type: rancher2:CloudCredential
properties:
description: foo test
amazonec2CredentialConfig:
accessKey: <AWS_ACCESS_KEY>
secretKey: <AWS_SECRET_KEY>
fooCluster:
type: rancher2:Cluster
properties:
description: Terraform EKS cluster
eksConfigV2:
cloudCredentialId: ${fooCloudCredential.id}
region: <EKS_REGION>
kubernetesVersion: '1.17'
loggingTypes:
- audit
- api
nodeGroups:
- name: node_group1
instanceType: t3.medium
desiredSize: 3
maxSize: 5
- name: node_group2
instanceType: m5.xlarge
desiredSize: 2
maxSize: 3
privateAccess: true
publicAccess: false
Creating EKS cluster from Rancher v2, using eks_config_v2
and launch template. For Rancher v2.5.6 or above.
using System.Collections.Generic;
using Pulumi;
using Rancher2 = Pulumi.Rancher2;
return await Deployment.RunAsync(() =>
{
var fooCloudCredential = new Rancher2.CloudCredential("fooCloudCredential", new()
{
Description = "foo test",
Amazonec2CredentialConfig = new Rancher2.Inputs.CloudCredentialAmazonec2CredentialConfigArgs
{
AccessKey = "<AWS_ACCESS_KEY>",
SecretKey = "<AWS_SECRET_KEY>",
},
});
var fooCluster = new Rancher2.Cluster("fooCluster", new()
{
Description = "Terraform EKS cluster",
EksConfigV2 = new Rancher2.Inputs.ClusterEksConfigV2Args
{
CloudCredentialId = fooCloudCredential.Id,
Region = "<EKS_REGION>",
KubernetesVersion = "1.17",
LoggingTypes = new[]
{
"audit",
"api",
},
NodeGroups = new[]
{
new Rancher2.Inputs.ClusterEksConfigV2NodeGroupArgs
{
DesiredSize = 3,
MaxSize = 5,
Name = "node_group1",
LaunchTemplates = new[]
{
new Rancher2.Inputs.ClusterEksConfigV2NodeGroupLaunchTemplateArgs
{
Id = "<EC2_LAUNCH_TEMPLATE_ID>",
Version = 1,
},
},
},
},
PrivateAccess = true,
PublicAccess = true,
},
});
});
package main
import (
"github.com/pulumi/pulumi-rancher2/sdk/v3/go/rancher2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
fooCloudCredential, err := rancher2.NewCloudCredential(ctx, "fooCloudCredential", &rancher2.CloudCredentialArgs{
Description: pulumi.String("foo test"),
Amazonec2CredentialConfig: &rancher2.CloudCredentialAmazonec2CredentialConfigArgs{
AccessKey: pulumi.String("<AWS_ACCESS_KEY>"),
SecretKey: pulumi.String("<AWS_SECRET_KEY>"),
},
})
if err != nil {
return err
}
_, err = rancher2.NewCluster(ctx, "fooCluster", &rancher2.ClusterArgs{
Description: pulumi.String("Terraform EKS cluster"),
EksConfigV2: &rancher2.ClusterEksConfigV2Args{
CloudCredentialId: fooCloudCredential.ID(),
Region: pulumi.String("<EKS_REGION>"),
KubernetesVersion: pulumi.String("1.17"),
LoggingTypes: pulumi.StringArray{
pulumi.String("audit"),
pulumi.String("api"),
},
NodeGroups: rancher2.ClusterEksConfigV2NodeGroupArray{
&rancher2.ClusterEksConfigV2NodeGroupArgs{
DesiredSize: pulumi.Int(3),
MaxSize: pulumi.Int(5),
Name: pulumi.String("node_group1"),
LaunchTemplates: rancher2.ClusterEksConfigV2NodeGroupLaunchTemplateArray{
&rancher2.ClusterEksConfigV2NodeGroupLaunchTemplateArgs{
Id: pulumi.String("<EC2_LAUNCH_TEMPLATE_ID>"),
Version: pulumi.Int(1),
},
},
},
},
PrivateAccess: pulumi.Bool(true),
PublicAccess: pulumi.Bool(true),
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.rancher2.CloudCredential;
import com.pulumi.rancher2.CloudCredentialArgs;
import com.pulumi.rancher2.inputs.CloudCredentialAmazonec2CredentialConfigArgs;
import com.pulumi.rancher2.Cluster;
import com.pulumi.rancher2.ClusterArgs;
import com.pulumi.rancher2.inputs.ClusterEksConfigV2Args;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var fooCloudCredential = new CloudCredential("fooCloudCredential", CloudCredentialArgs.builder()
.description("foo test")
.amazonec2CredentialConfig(CloudCredentialAmazonec2CredentialConfigArgs.builder()
.accessKey("<AWS_ACCESS_KEY>")
.secretKey("<AWS_SECRET_KEY>")
.build())
.build());
var fooCluster = new Cluster("fooCluster", ClusterArgs.builder()
.description("Terraform EKS cluster")
.eksConfigV2(ClusterEksConfigV2Args.builder()
.cloudCredentialId(fooCloudCredential.id())
.region("<EKS_REGION>")
.kubernetesVersion("1.17")
.loggingTypes(
"audit",
"api")
.nodeGroups(ClusterEksConfigV2NodeGroupArgs.builder()
.desiredSize(3)
.maxSize(5)
.name("node_group1")
.launchTemplates(ClusterEksConfigV2NodeGroupLaunchTemplateArgs.builder()
.id("<EC2_LAUNCH_TEMPLATE_ID>")
.version(1)
.build())
.build())
.privateAccess(true)
.publicAccess(true)
.build())
.build());
}
}
import pulumi
import pulumi_rancher2 as rancher2
foo_cloud_credential = rancher2.CloudCredential("fooCloudCredential",
description="foo test",
amazonec2_credential_config=rancher2.CloudCredentialAmazonec2CredentialConfigArgs(
access_key="<AWS_ACCESS_KEY>",
secret_key="<AWS_SECRET_KEY>",
))
foo_cluster = rancher2.Cluster("fooCluster",
description="Terraform EKS cluster",
eks_config_v2=rancher2.ClusterEksConfigV2Args(
cloud_credential_id=foo_cloud_credential.id,
region="<EKS_REGION>",
kubernetes_version="1.17",
logging_types=[
"audit",
"api",
],
node_groups=[rancher2.ClusterEksConfigV2NodeGroupArgs(
desired_size=3,
max_size=5,
name="node_group1",
launch_templates=[rancher2.ClusterEksConfigV2NodeGroupLaunchTemplateArgs(
id="<EC2_LAUNCH_TEMPLATE_ID>",
version=1,
)],
)],
private_access=True,
public_access=True,
))
import * as pulumi from "@pulumi/pulumi";
import * as rancher2 from "@pulumi/rancher2";
const fooCloudCredential = new rancher2.CloudCredential("fooCloudCredential", {
description: "foo test",
amazonec2CredentialConfig: {
accessKey: "<AWS_ACCESS_KEY>",
secretKey: "<AWS_SECRET_KEY>",
},
});
const fooCluster = new rancher2.Cluster("fooCluster", {
description: "Terraform EKS cluster",
eksConfigV2: {
cloudCredentialId: fooCloudCredential.id,
region: "<EKS_REGION>",
kubernetesVersion: "1.17",
loggingTypes: [
"audit",
"api",
],
nodeGroups: [{
desiredSize: 3,
maxSize: 5,
name: "node_group1",
launchTemplates: [{
id: "<EC2_LAUNCH_TEMPLATE_ID>",
version: 1,
}],
}],
privateAccess: true,
publicAccess: true,
},
});
resources:
fooCloudCredential:
type: rancher2:CloudCredential
properties:
description: foo test
amazonec2CredentialConfig:
accessKey: <AWS_ACCESS_KEY>
secretKey: <AWS_SECRET_KEY>
fooCluster:
type: rancher2:Cluster
properties:
description: Terraform EKS cluster
eksConfigV2:
cloudCredentialId: ${fooCloudCredential.id}
region: <EKS_REGION>
kubernetesVersion: '1.17'
loggingTypes:
- audit
- api
nodeGroups:
- desiredSize: 3
maxSize: 5
name: node_group1
launchTemplates:
- id: <EC2_LAUNCH_TEMPLATE_ID>
version: 1
privateAccess: true
publicAccess: true
Creating AKS cluster from Rancher v2, using aks_config_v2
. For Rancher v2.6.0 or above.
using System.Collections.Generic;
using Pulumi;
using Rancher2 = Pulumi.Rancher2;
return await Deployment.RunAsync(() =>
{
var foo_aks = new Rancher2.CloudCredential("foo-aks", new()
{
AzureCredentialConfig = new Rancher2.Inputs.CloudCredentialAzureCredentialConfigArgs
{
ClientId = "<CLIENT_ID>",
ClientSecret = "<CLIENT_SECRET>",
SubscriptionId = "<SUBSCRIPTION_ID>",
},
});
var foo = new Rancher2.Cluster("foo", new()
{
Description = "Terraform AKS cluster",
AksConfigV2 = new Rancher2.Inputs.ClusterAksConfigV2Args
{
CloudCredentialId = foo_aks.Id,
ResourceGroup = "<RESOURCE_GROUP>",
ResourceLocation = "<RESOURCE_LOCATION>",
DnsPrefix = "<DNS_PREFIX>",
KubernetesVersion = "1.21.2",
NetworkPlugin = "<NETWORK_PLUGIN>",
NodePools = new[]
{
new Rancher2.Inputs.ClusterAksConfigV2NodePoolArgs
{
AvailabilityZones = new[]
{
"1",
"2",
"3",
},
Name = "<NODEPOOL_NAME>",
Count = 1,
OrchestratorVersion = "1.21.2",
OsDiskSizeGb = 128,
VmSize = "Standard_DS2_v2",
},
},
},
});
});
package main
import (
"github.com/pulumi/pulumi-rancher2/sdk/v3/go/rancher2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := rancher2.NewCloudCredential(ctx, "foo-aks", &rancher2.CloudCredentialArgs{
AzureCredentialConfig: &rancher2.CloudCredentialAzureCredentialConfigArgs{
ClientId: pulumi.String("<CLIENT_ID>"),
ClientSecret: pulumi.String("<CLIENT_SECRET>"),
SubscriptionId: pulumi.String("<SUBSCRIPTION_ID>"),
},
})
if err != nil {
return err
}
_, err = rancher2.NewCluster(ctx, "foo", &rancher2.ClusterArgs{
Description: pulumi.String("Terraform AKS cluster"),
AksConfigV2: &rancher2.ClusterAksConfigV2Args{
CloudCredentialId: foo_aks.ID(),
ResourceGroup: pulumi.String("<RESOURCE_GROUP>"),
ResourceLocation: pulumi.String("<RESOURCE_LOCATION>"),
DnsPrefix: pulumi.String("<DNS_PREFIX>"),
KubernetesVersion: pulumi.String("1.21.2"),
NetworkPlugin: pulumi.String("<NETWORK_PLUGIN>"),
NodePools: rancher2.ClusterAksConfigV2NodePoolArray{
&rancher2.ClusterAksConfigV2NodePoolArgs{
AvailabilityZones: pulumi.StringArray{
pulumi.String("1"),
pulumi.String("2"),
pulumi.String("3"),
},
Name: pulumi.String("<NODEPOOL_NAME>"),
Count: pulumi.Int(1),
OrchestratorVersion: pulumi.String("1.21.2"),
OsDiskSizeGb: pulumi.Int(128),
VmSize: pulumi.String("Standard_DS2_v2"),
},
},
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.rancher2.CloudCredential;
import com.pulumi.rancher2.CloudCredentialArgs;
import com.pulumi.rancher2.inputs.CloudCredentialAzureCredentialConfigArgs;
import com.pulumi.rancher2.Cluster;
import com.pulumi.rancher2.ClusterArgs;
import com.pulumi.rancher2.inputs.ClusterAksConfigV2Args;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var foo_aks = new CloudCredential("foo-aks", CloudCredentialArgs.builder()
.azureCredentialConfig(CloudCredentialAzureCredentialConfigArgs.builder()
.clientId("<CLIENT_ID>")
.clientSecret("<CLIENT_SECRET>")
.subscriptionId("<SUBSCRIPTION_ID>")
.build())
.build());
var foo = new Cluster("foo", ClusterArgs.builder()
.description("Terraform AKS cluster")
.aksConfigV2(ClusterAksConfigV2Args.builder()
.cloudCredentialId(foo_aks.id())
.resourceGroup("<RESOURCE_GROUP>")
.resourceLocation("<RESOURCE_LOCATION>")
.dnsPrefix("<DNS_PREFIX>")
.kubernetesVersion("1.21.2")
.networkPlugin("<NETWORK_PLUGIN>")
.nodePools(ClusterAksConfigV2NodePoolArgs.builder()
.availabilityZones(
"1",
"2",
"3")
.name("<NODEPOOL_NAME>")
.count(1)
.orchestratorVersion("1.21.2")
.osDiskSizeGb(128)
.vmSize("Standard_DS2_v2")
.build())
.build())
.build());
}
}
import pulumi
import pulumi_rancher2 as rancher2
foo_aks = rancher2.CloudCredential("foo-aks", azure_credential_config=rancher2.CloudCredentialAzureCredentialConfigArgs(
client_id="<CLIENT_ID>",
client_secret="<CLIENT_SECRET>",
subscription_id="<SUBSCRIPTION_ID>",
))
foo = rancher2.Cluster("foo",
description="Terraform AKS cluster",
aks_config_v2=rancher2.ClusterAksConfigV2Args(
cloud_credential_id=foo_aks.id,
resource_group="<RESOURCE_GROUP>",
resource_location="<RESOURCE_LOCATION>",
dns_prefix="<DNS_PREFIX>",
kubernetes_version="1.21.2",
network_plugin="<NETWORK_PLUGIN>",
node_pools=[rancher2.ClusterAksConfigV2NodePoolArgs(
availability_zones=[
"1",
"2",
"3",
],
name="<NODEPOOL_NAME>",
count=1,
orchestrator_version="1.21.2",
os_disk_size_gb=128,
vm_size="Standard_DS2_v2",
)],
))
import * as pulumi from "@pulumi/pulumi";
import * as rancher2 from "@pulumi/rancher2";
const foo_aks = new rancher2.CloudCredential("foo-aks", {azureCredentialConfig: {
clientId: "<CLIENT_ID>",
clientSecret: "<CLIENT_SECRET>",
subscriptionId: "<SUBSCRIPTION_ID>",
}});
const foo = new rancher2.Cluster("foo", {
description: "Terraform AKS cluster",
aksConfigV2: {
cloudCredentialId: foo_aks.id,
resourceGroup: "<RESOURCE_GROUP>",
resourceLocation: "<RESOURCE_LOCATION>",
dnsPrefix: "<DNS_PREFIX>",
kubernetesVersion: "1.21.2",
networkPlugin: "<NETWORK_PLUGIN>",
nodePools: [{
availabilityZones: [
"1",
"2",
"3",
],
name: "<NODEPOOL_NAME>",
count: 1,
orchestratorVersion: "1.21.2",
osDiskSizeGb: 128,
vmSize: "Standard_DS2_v2",
}],
},
});
resources:
foo-aks:
type: rancher2:CloudCredential
properties:
azureCredentialConfig:
clientId: <CLIENT_ID>
clientSecret: <CLIENT_SECRET>
subscriptionId: <SUBSCRIPTION_ID>
foo:
type: rancher2:Cluster
properties:
description: Terraform AKS cluster
aksConfigV2:
cloudCredentialId: ${["foo-aks"].id}
resourceGroup: <RESOURCE_GROUP>
resourceLocation: <RESOURCE_LOCATION>
dnsPrefix: <DNS_PREFIX>
kubernetesVersion: 1.21.2
networkPlugin: <NETWORK_PLUGIN>
nodePools:
- availabilityZones:
- '1'
- '2'
- '3'
name: <NODEPOOL_NAME>
count: 1
orchestratorVersion: 1.21.2
osDiskSizeGb: 128
vmSize: Standard_DS2_v2
Create Cluster Resource
new Cluster(name: string, args?: ClusterArgs, opts?: CustomResourceOptions);
@overload
def Cluster(resource_name: str,
opts: Optional[ResourceOptions] = None,
agent_env_vars: Optional[Sequence[ClusterAgentEnvVarArgs]] = None,
aks_config: Optional[ClusterAksConfigArgs] = None,
aks_config_v2: Optional[ClusterAksConfigV2Args] = None,
annotations: Optional[Mapping[str, Any]] = None,
cluster_auth_endpoint: Optional[ClusterClusterAuthEndpointArgs] = None,
cluster_monitoring_input: Optional[ClusterClusterMonitoringInputArgs] = None,
cluster_template_answers: Optional[ClusterClusterTemplateAnswersArgs] = None,
cluster_template_id: Optional[str] = None,
cluster_template_questions: Optional[Sequence[ClusterClusterTemplateQuestionArgs]] = None,
cluster_template_revision_id: Optional[str] = None,
default_pod_security_policy_template_id: Optional[str] = None,
description: Optional[str] = None,
desired_agent_image: Optional[str] = None,
desired_auth_image: Optional[str] = None,
docker_root_dir: Optional[str] = None,
driver: Optional[str] = None,
eks_config: Optional[ClusterEksConfigArgs] = None,
eks_config_v2: Optional[ClusterEksConfigV2Args] = None,
enable_cluster_alerting: Optional[bool] = None,
enable_cluster_monitoring: Optional[bool] = None,
enable_network_policy: Optional[bool] = None,
fleet_workspace_name: Optional[str] = None,
gke_config: Optional[ClusterGkeConfigArgs] = None,
gke_config_v2: Optional[ClusterGkeConfigV2Args] = None,
k3s_config: Optional[ClusterK3sConfigArgs] = None,
labels: Optional[Mapping[str, Any]] = None,
name: Optional[str] = None,
oke_config: Optional[ClusterOkeConfigArgs] = None,
rke2_config: Optional[ClusterRke2ConfigArgs] = None,
rke_config: Optional[ClusterRkeConfigArgs] = None,
scheduled_cluster_scan: Optional[ClusterScheduledClusterScanArgs] = None,
windows_prefered_cluster: Optional[bool] = None)
@overload
def Cluster(resource_name: str,
args: Optional[ClusterArgs] = None,
opts: Optional[ResourceOptions] = None)
func NewCluster(ctx *Context, name string, args *ClusterArgs, opts ...ResourceOption) (*Cluster, error)
public Cluster(string name, ClusterArgs? args = null, CustomResourceOptions? opts = null)
public Cluster(String name, ClusterArgs args)
public Cluster(String name, ClusterArgs args, CustomResourceOptions options)
type: rancher2:Cluster
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args ClusterArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args ClusterArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args ClusterArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args ClusterArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args ClusterArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Cluster Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
The Cluster resource accepts the following input properties:
- Agent
Env List<ClusterVars Agent Env Var Args> Optional Agent Env Vars for Rancher agent. Just for Rancher v2.5.6 and above (list)
- Aks
Config ClusterAks Config Args The Azure AKS configuration for
aks
Clusters. Conflicts withaks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- Aks
Config ClusterV2 Aks Config V2Args The Azure AKS v2 configuration for creating/import
aks
Clusters. Conflicts withaks_config
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- Annotations Dictionary<string, object>
Annotations for the Cluster (map)
- Cluster
Auth ClusterEndpoint Cluster Auth Endpoint Args Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)
- Cluster
Monitoring ClusterInput Cluster Monitoring Input Args Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)
- Cluster
Template ClusterAnswers Cluster Template Answers Args Cluster template answers. Just for Rancher v2.3.x and above (list maxitems:1)
- Cluster
Template stringId Cluster template ID. Just for Rancher v2.3.x and above (string)
- Cluster
Template List<ClusterQuestions Cluster Template Question Args> Cluster template questions. Just for Rancher v2.3.x and above (list)
- Cluster
Template stringRevision Id Cluster template revision ID. Just for Rancher v2.3.x and above (string)
- Default
Pod stringSecurity Policy Template Id - Description string
The description for Cluster (string)
- Desired
Agent stringImage Desired agent image. Just for Rancher v2.3.x and above (string)
- Desired
Auth stringImage Desired auth image. Just for Rancher v2.3.x and above (string)
- Docker
Root stringDir Desired auth image. Just for Rancher v2.3.x and above (string)
- Driver string
(Computed) The driver used for the Cluster.
imported
,azurekubernetesservice
,amazonelasticcontainerservice
,googlekubernetesengine
andrancherKubernetesEngine
are supported (string)- Eks
Config ClusterEks Config Args The Amazon EKS configuration for
eks
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- Eks
Config ClusterV2 Eks Config V2Args The Amazon EKS V2 configuration to create or import
eks
Clusters. Conflicts withaks_config
,eks_config
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
. For Rancher v2.5.x or above (list maxitems:1)- Enable
Cluster boolAlerting Enable built-in cluster alerting (bool)
- Enable
Cluster boolMonitoring Enable built-in cluster monitoring (bool)
- Enable
Network boolPolicy Enable project network isolation (bool)
- Fleet
Workspace stringName Fleet workspace name (string)
- Gke
Config ClusterGke Config Args The Google GKE configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config_v2
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- Gke
Config ClusterV2 Gke Config V2Args The Google GKE V2 configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,oke_config
,k3s_config
andrke_config
. For Rancher v2.5.8 or above (list maxitems:1)- K3s
Config ClusterK3s Config Args The K3S configuration for
k3s
imported Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andrke_config
(list maxitems:1)- Labels Dictionary<string, object>
Labels for the Cluster (map)
- Name string
The name of the Cluster (string)
- Oke
Config ClusterOke Config Args The Oracle OKE configuration for
oke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,k3s_config
andrke_config
(list maxitems:1)- Rke2Config
Cluster
Rke2Config Args The RKE2 configuration for
rke2
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,gke_config
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- Rke
Config ClusterRke Config Args The RKE configuration for
rke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andk3s_config
(list maxitems:1)- Scheduled
Cluster ClusterScan Scheduled Cluster Scan Args Cluster scheduled cis scan. For Rancher v2.4.0 or above (List maxitems:1)
- Windows
Prefered boolCluster Windows preferred cluster. Default:
false
(bool)
- Agent
Env []ClusterVars Agent Env Var Args Optional Agent Env Vars for Rancher agent. Just for Rancher v2.5.6 and above (list)
- Aks
Config ClusterAks Config Args The Azure AKS configuration for
aks
Clusters. Conflicts withaks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- Aks
Config ClusterV2 Aks Config V2Args The Azure AKS v2 configuration for creating/import
aks
Clusters. Conflicts withaks_config
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- Annotations map[string]interface{}
Annotations for the Cluster (map)
- Cluster
Auth ClusterEndpoint Cluster Auth Endpoint Args Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)
- Cluster
Monitoring ClusterInput Cluster Monitoring Input Args Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)
- Cluster
Template ClusterAnswers Cluster Template Answers Args Cluster template answers. Just for Rancher v2.3.x and above (list maxitems:1)
- Cluster
Template stringId Cluster template ID. Just for Rancher v2.3.x and above (string)
- Cluster
Template []ClusterQuestions Cluster Template Question Args Cluster template questions. Just for Rancher v2.3.x and above (list)
- Cluster
Template stringRevision Id Cluster template revision ID. Just for Rancher v2.3.x and above (string)
- Default
Pod stringSecurity Policy Template Id - Description string
The description for Cluster (string)
- Desired
Agent stringImage Desired agent image. Just for Rancher v2.3.x and above (string)
- Desired
Auth stringImage Desired auth image. Just for Rancher v2.3.x and above (string)
- Docker
Root stringDir Desired auth image. Just for Rancher v2.3.x and above (string)
- Driver string
(Computed) The driver used for the Cluster.
imported
,azurekubernetesservice
,amazonelasticcontainerservice
,googlekubernetesengine
andrancherKubernetesEngine
are supported (string)- Eks
Config ClusterEks Config Args The Amazon EKS configuration for
eks
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- Eks
Config ClusterV2 Eks Config V2Args The Amazon EKS V2 configuration to create or import
eks
Clusters. Conflicts withaks_config
,eks_config
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
. For Rancher v2.5.x or above (list maxitems:1)- Enable
Cluster boolAlerting Enable built-in cluster alerting (bool)
- Enable
Cluster boolMonitoring Enable built-in cluster monitoring (bool)
- Enable
Network boolPolicy Enable project network isolation (bool)
- Fleet
Workspace stringName Fleet workspace name (string)
- Gke
Config ClusterGke Config Args The Google GKE configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config_v2
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- Gke
Config ClusterV2 Gke Config V2Args The Google GKE V2 configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,oke_config
,k3s_config
andrke_config
. For Rancher v2.5.8 or above (list maxitems:1)- K3s
Config ClusterK3s Config Args The K3S configuration for
k3s
imported Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andrke_config
(list maxitems:1)- Labels map[string]interface{}
Labels for the Cluster (map)
- Name string
The name of the Cluster (string)
- Oke
Config ClusterOke Config Args The Oracle OKE configuration for
oke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,k3s_config
andrke_config
(list maxitems:1)- Rke2Config
Cluster
Rke2Config Args The RKE2 configuration for
rke2
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,gke_config
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- Rke
Config ClusterRke Config Args The RKE configuration for
rke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andk3s_config
(list maxitems:1)- Scheduled
Cluster ClusterScan Scheduled Cluster Scan Args Cluster scheduled cis scan. For Rancher v2.4.0 or above (List maxitems:1)
- Windows
Prefered boolCluster Windows preferred cluster. Default:
false
(bool)
- agent
Env List<ClusterVars Agent Env Var Args> Optional Agent Env Vars for Rancher agent. Just for Rancher v2.5.6 and above (list)
- aks
Config ClusterAks Config Args The Azure AKS configuration for
aks
Clusters. Conflicts withaks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- aks
Config ClusterV2 Aks Config V2Args The Azure AKS v2 configuration for creating/import
aks
Clusters. Conflicts withaks_config
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- annotations Map<String,Object>
Annotations for the Cluster (map)
- cluster
Auth ClusterEndpoint Cluster Auth Endpoint Args Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)
- cluster
Monitoring ClusterInput Cluster Monitoring Input Args Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)
- cluster
Template ClusterAnswers Cluster Template Answers Args Cluster template answers. Just for Rancher v2.3.x and above (list maxitems:1)
- cluster
Template StringId Cluster template ID. Just for Rancher v2.3.x and above (string)
- cluster
Template List<ClusterQuestions Cluster Template Question Args> Cluster template questions. Just for Rancher v2.3.x and above (list)
- cluster
Template StringRevision Id Cluster template revision ID. Just for Rancher v2.3.x and above (string)
- default
Pod StringSecurity Policy Template Id - description String
The description for Cluster (string)
- desired
Agent StringImage Desired agent image. Just for Rancher v2.3.x and above (string)
- desired
Auth StringImage Desired auth image. Just for Rancher v2.3.x and above (string)
- docker
Root StringDir Desired auth image. Just for Rancher v2.3.x and above (string)
- driver String
(Computed) The driver used for the Cluster.
imported
,azurekubernetesservice
,amazonelasticcontainerservice
,googlekubernetesengine
andrancherKubernetesEngine
are supported (string)- eks
Config ClusterEks Config Args The Amazon EKS configuration for
eks
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- eks
Config ClusterV2 Eks Config V2Args The Amazon EKS V2 configuration to create or import
eks
Clusters. Conflicts withaks_config
,eks_config
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
. For Rancher v2.5.x or above (list maxitems:1)- enable
Cluster BooleanAlerting Enable built-in cluster alerting (bool)
- enable
Cluster BooleanMonitoring Enable built-in cluster monitoring (bool)
- enable
Network BooleanPolicy Enable project network isolation (bool)
- fleet
Workspace StringName Fleet workspace name (string)
- gke
Config ClusterGke Config Args The Google GKE configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config_v2
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- gke
Config ClusterV2 Gke Config V2Args The Google GKE V2 configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,oke_config
,k3s_config
andrke_config
. For Rancher v2.5.8 or above (list maxitems:1)- k3s
Config ClusterK3s Config Args The K3S configuration for
k3s
imported Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andrke_config
(list maxitems:1)- labels Map<String,Object>
Labels for the Cluster (map)
- name String
The name of the Cluster (string)
- oke
Config ClusterOke Config Args The Oracle OKE configuration for
oke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,k3s_config
andrke_config
(list maxitems:1)- rke2Config
Cluster
Rke2Config Args The RKE2 configuration for
rke2
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,gke_config
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- rke
Config ClusterRke Config Args The RKE configuration for
rke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andk3s_config
(list maxitems:1)- scheduled
Cluster ClusterScan Scheduled Cluster Scan Args Cluster scheduled cis scan. For Rancher v2.4.0 or above (List maxitems:1)
- windows
Prefered BooleanCluster Windows preferred cluster. Default:
false
(bool)
- agent
Env ClusterVars Agent Env Var Args[] Optional Agent Env Vars for Rancher agent. Just for Rancher v2.5.6 and above (list)
- aks
Config ClusterAks Config Args The Azure AKS configuration for
aks
Clusters. Conflicts withaks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- aks
Config ClusterV2 Aks Config V2Args The Azure AKS v2 configuration for creating/import
aks
Clusters. Conflicts withaks_config
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- annotations {[key: string]: any}
Annotations for the Cluster (map)
- cluster
Auth ClusterEndpoint Cluster Auth Endpoint Args Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)
- cluster
Monitoring ClusterInput Cluster Monitoring Input Args Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)
- cluster
Template ClusterAnswers Cluster Template Answers Args Cluster template answers. Just for Rancher v2.3.x and above (list maxitems:1)
- cluster
Template stringId Cluster template ID. Just for Rancher v2.3.x and above (string)
- cluster
Template ClusterQuestions Cluster Template Question Args[] Cluster template questions. Just for Rancher v2.3.x and above (list)
- cluster
Template stringRevision Id Cluster template revision ID. Just for Rancher v2.3.x and above (string)
- default
Pod stringSecurity Policy Template Id - description string
The description for Cluster (string)
- desired
Agent stringImage Desired agent image. Just for Rancher v2.3.x and above (string)
- desired
Auth stringImage Desired auth image. Just for Rancher v2.3.x and above (string)
- docker
Root stringDir Desired auth image. Just for Rancher v2.3.x and above (string)
- driver string
(Computed) The driver used for the Cluster.
imported
,azurekubernetesservice
,amazonelasticcontainerservice
,googlekubernetesengine
andrancherKubernetesEngine
are supported (string)- eks
Config ClusterEks Config Args The Amazon EKS configuration for
eks
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- eks
Config ClusterV2 Eks Config V2Args The Amazon EKS V2 configuration to create or import
eks
Clusters. Conflicts withaks_config
,eks_config
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
. For Rancher v2.5.x or above (list maxitems:1)- enable
Cluster booleanAlerting Enable built-in cluster alerting (bool)
- enable
Cluster booleanMonitoring Enable built-in cluster monitoring (bool)
- enable
Network booleanPolicy Enable project network isolation (bool)
- fleet
Workspace stringName Fleet workspace name (string)
- gke
Config ClusterGke Config Args The Google GKE configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config_v2
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- gke
Config ClusterV2 Gke Config V2Args The Google GKE V2 configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,oke_config
,k3s_config
andrke_config
. For Rancher v2.5.8 or above (list maxitems:1)- k3s
Config ClusterK3s Config Args The K3S configuration for
k3s
imported Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andrke_config
(list maxitems:1)- labels {[key: string]: any}
Labels for the Cluster (map)
- name string
The name of the Cluster (string)
- oke
Config ClusterOke Config Args The Oracle OKE configuration for
oke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,k3s_config
andrke_config
(list maxitems:1)- rke2Config
Cluster
Rke2Config Args The RKE2 configuration for
rke2
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,gke_config
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- rke
Config ClusterRke Config Args The RKE configuration for
rke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andk3s_config
(list maxitems:1)- scheduled
Cluster ClusterScan Scheduled Cluster Scan Args Cluster scheduled cis scan. For Rancher v2.4.0 or above (List maxitems:1)
- windows
Prefered booleanCluster Windows preferred cluster. Default:
false
(bool)
- agent_
env_ Sequence[Clustervars Agent Env Var Args] Optional Agent Env Vars for Rancher agent. Just for Rancher v2.5.6 and above (list)
- aks_
config ClusterAks Config Args The Azure AKS configuration for
aks
Clusters. Conflicts withaks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- aks_
config_ Clusterv2 Aks Config V2Args The Azure AKS v2 configuration for creating/import
aks
Clusters. Conflicts withaks_config
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- annotations Mapping[str, Any]
Annotations for the Cluster (map)
- cluster_
auth_ Clusterendpoint Cluster Auth Endpoint Args Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)
- cluster_
monitoring_ Clusterinput Cluster Monitoring Input Args Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)
- cluster_
template_ Clusteranswers Cluster Template Answers Args Cluster template answers. Just for Rancher v2.3.x and above (list maxitems:1)
- cluster_
template_ strid Cluster template ID. Just for Rancher v2.3.x and above (string)
- cluster_
template_ Sequence[Clusterquestions Cluster Template Question Args] Cluster template questions. Just for Rancher v2.3.x and above (list)
- cluster_
template_ strrevision_ id Cluster template revision ID. Just for Rancher v2.3.x and above (string)
- default_
pod_ strsecurity_ policy_ template_ id - description str
The description for Cluster (string)
- desired_
agent_ strimage Desired agent image. Just for Rancher v2.3.x and above (string)
- desired_
auth_ strimage Desired auth image. Just for Rancher v2.3.x and above (string)
- docker_
root_ strdir Desired auth image. Just for Rancher v2.3.x and above (string)
- driver str
(Computed) The driver used for the Cluster.
imported
,azurekubernetesservice
,amazonelasticcontainerservice
,googlekubernetesengine
andrancherKubernetesEngine
are supported (string)- eks_
config ClusterEks Config Args The Amazon EKS configuration for
eks
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- eks_
config_ Clusterv2 Eks Config V2Args The Amazon EKS V2 configuration to create or import
eks
Clusters. Conflicts withaks_config
,eks_config
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
. For Rancher v2.5.x or above (list maxitems:1)- enable_
cluster_ boolalerting Enable built-in cluster alerting (bool)
- enable_
cluster_ boolmonitoring Enable built-in cluster monitoring (bool)
- enable_
network_ boolpolicy Enable project network isolation (bool)
- fleet_
workspace_ strname Fleet workspace name (string)
- gke_
config ClusterGke Config Args The Google GKE configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config_v2
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- gke_
config_ Clusterv2 Gke Config V2Args The Google GKE V2 configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,oke_config
,k3s_config
andrke_config
. For Rancher v2.5.8 or above (list maxitems:1)- k3s_
config ClusterK3s Config Args The K3S configuration for
k3s
imported Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andrke_config
(list maxitems:1)- labels Mapping[str, Any]
Labels for the Cluster (map)
- name str
The name of the Cluster (string)
- oke_
config ClusterOke Config Args The Oracle OKE configuration for
oke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,k3s_config
andrke_config
(list maxitems:1)- rke2_
config ClusterRke2Config Args The RKE2 configuration for
rke2
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,gke_config
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- rke_
config ClusterRke Config Args The RKE configuration for
rke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andk3s_config
(list maxitems:1)- scheduled_
cluster_ Clusterscan Scheduled Cluster Scan Args Cluster scheduled cis scan. For Rancher v2.4.0 or above (List maxitems:1)
- windows_
prefered_ boolcluster Windows preferred cluster. Default:
false
(bool)
- agent
Env List<Property Map>Vars Optional Agent Env Vars for Rancher agent. Just for Rancher v2.5.6 and above (list)
- aks
Config Property Map The Azure AKS configuration for
aks
Clusters. Conflicts withaks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- aks
Config Property MapV2 The Azure AKS v2 configuration for creating/import
aks
Clusters. Conflicts withaks_config
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- annotations Map<Any>
Annotations for the Cluster (map)
- cluster
Auth Property MapEndpoint Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)
- cluster
Monitoring Property MapInput Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)
- cluster
Template Property MapAnswers Cluster template answers. Just for Rancher v2.3.x and above (list maxitems:1)
- cluster
Template StringId Cluster template ID. Just for Rancher v2.3.x and above (string)
- cluster
Template List<Property Map>Questions Cluster template questions. Just for Rancher v2.3.x and above (list)
- cluster
Template StringRevision Id Cluster template revision ID. Just for Rancher v2.3.x and above (string)
- default
Pod StringSecurity Policy Template Id - description String
The description for Cluster (string)
- desired
Agent StringImage Desired agent image. Just for Rancher v2.3.x and above (string)
- desired
Auth StringImage Desired auth image. Just for Rancher v2.3.x and above (string)
- docker
Root StringDir Desired auth image. Just for Rancher v2.3.x and above (string)
- driver String
(Computed) The driver used for the Cluster.
imported
,azurekubernetesservice
,amazonelasticcontainerservice
,googlekubernetesengine
andrancherKubernetesEngine
are supported (string)- eks
Config Property Map The Amazon EKS configuration for
eks
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- eks
Config Property MapV2 The Amazon EKS V2 configuration to create or import
eks
Clusters. Conflicts withaks_config
,eks_config
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
. For Rancher v2.5.x or above (list maxitems:1)- enable
Cluster BooleanAlerting Enable built-in cluster alerting (bool)
- enable
Cluster BooleanMonitoring Enable built-in cluster monitoring (bool)
- enable
Network BooleanPolicy Enable project network isolation (bool)
- fleet
Workspace StringName Fleet workspace name (string)
- gke
Config Property Map The Google GKE configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config_v2
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- gke
Config Property MapV2 The Google GKE V2 configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,oke_config
,k3s_config
andrke_config
. For Rancher v2.5.8 or above (list maxitems:1)- k3s
Config Property Map The K3S configuration for
k3s
imported Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andrke_config
(list maxitems:1)- labels Map<Any>
Labels for the Cluster (map)
- name String
The name of the Cluster (string)
- oke
Config Property Map The Oracle OKE configuration for
oke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,k3s_config
andrke_config
(list maxitems:1)- rke2Config Property Map
The RKE2 configuration for
rke2
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,gke_config
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- rke
Config Property Map The RKE configuration for
rke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andk3s_config
(list maxitems:1)- scheduled
Cluster Property MapScan Cluster scheduled cis scan. For Rancher v2.4.0 or above (List maxitems:1)
- windows
Prefered BooleanCluster Windows preferred cluster. Default:
false
(bool)
Outputs
All input properties are implicitly available as output properties. Additionally, the Cluster resource produces the following output properties:
- Ca
Cert string TLS CA certificate for etcd service (string)
- Cluster
Registration ClusterToken Cluster Registration Token (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)
- Default
Project stringId (Computed) Default project ID for the cluster (string)
- Enable
Cluster boolIstio Deploy istio on
system
project andistio-system
namespace, using rancher2.App resource instead. See above example.Deploy istio using rancher2_app resource instead
- Id string
The provider-assigned unique ID for this managed resource.
- Istio
Enabled bool (Computed) Is istio enabled at cluster? Just for Rancher v2.3.x and above (bool)
- Kube
Config string (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has
cluster_auth_endpoint
enabled, the kube_config will not be available until the cluster isconnected
(string)- System
Project stringId (Computed) System project ID for the cluster (string)
- Ca
Cert string TLS CA certificate for etcd service (string)
- Cluster
Registration ClusterToken Cluster Registration Token (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)
- Default
Project stringId (Computed) Default project ID for the cluster (string)
- Enable
Cluster boolIstio Deploy istio on
system
project andistio-system
namespace, using rancher2.App resource instead. See above example.Deploy istio using rancher2_app resource instead
- Id string
The provider-assigned unique ID for this managed resource.
- Istio
Enabled bool (Computed) Is istio enabled at cluster? Just for Rancher v2.3.x and above (bool)
- Kube
Config string (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has
cluster_auth_endpoint
enabled, the kube_config will not be available until the cluster isconnected
(string)- System
Project stringId (Computed) System project ID for the cluster (string)
- ca
Cert String TLS CA certificate for etcd service (string)
- cluster
Registration ClusterToken Cluster Registration Token (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)
- default
Project StringId (Computed) Default project ID for the cluster (string)
- enable
Cluster BooleanIstio Deploy istio on
system
project andistio-system
namespace, using rancher2.App resource instead. See above example.Deploy istio using rancher2_app resource instead
- id String
The provider-assigned unique ID for this managed resource.
- istio
Enabled Boolean (Computed) Is istio enabled at cluster? Just for Rancher v2.3.x and above (bool)
- kube
Config String (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has
cluster_auth_endpoint
enabled, the kube_config will not be available until the cluster isconnected
(string)- system
Project StringId (Computed) System project ID for the cluster (string)
- ca
Cert string TLS CA certificate for etcd service (string)
- cluster
Registration ClusterToken Cluster Registration Token (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)
- default
Project stringId (Computed) Default project ID for the cluster (string)
- enable
Cluster booleanIstio Deploy istio on
system
project andistio-system
namespace, using rancher2.App resource instead. See above example.Deploy istio using rancher2_app resource instead
- id string
The provider-assigned unique ID for this managed resource.
- istio
Enabled boolean (Computed) Is istio enabled at cluster? Just for Rancher v2.3.x and above (bool)
- kube
Config string (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has
cluster_auth_endpoint
enabled, the kube_config will not be available until the cluster isconnected
(string)- system
Project stringId (Computed) System project ID for the cluster (string)
- ca_
cert str TLS CA certificate for etcd service (string)
- cluster_
registration_ Clustertoken Cluster Registration Token (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)
- default_
project_ strid (Computed) Default project ID for the cluster (string)
- enable_
cluster_ boolistio Deploy istio on
system
project andistio-system
namespace, using rancher2.App resource instead. See above example.Deploy istio using rancher2_app resource instead
- id str
The provider-assigned unique ID for this managed resource.
- istio_
enabled bool (Computed) Is istio enabled at cluster? Just for Rancher v2.3.x and above (bool)
- kube_
config str (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has
cluster_auth_endpoint
enabled, the kube_config will not be available until the cluster isconnected
(string)- system_
project_ strid (Computed) System project ID for the cluster (string)
- ca
Cert String TLS CA certificate for etcd service (string)
- cluster
Registration Property MapToken (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)
- default
Project StringId (Computed) Default project ID for the cluster (string)
- enable
Cluster BooleanIstio Deploy istio on
system
project andistio-system
namespace, using rancher2.App resource instead. See above example.Deploy istio using rancher2_app resource instead
- id String
The provider-assigned unique ID for this managed resource.
- istio
Enabled Boolean (Computed) Is istio enabled at cluster? Just for Rancher v2.3.x and above (bool)
- kube
Config String (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has
cluster_auth_endpoint
enabled, the kube_config will not be available until the cluster isconnected
(string)- system
Project StringId (Computed) System project ID for the cluster (string)
Look up Existing Cluster Resource
Get an existing Cluster resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: ClusterState, opts?: CustomResourceOptions): Cluster
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
agent_env_vars: Optional[Sequence[ClusterAgentEnvVarArgs]] = None,
aks_config: Optional[ClusterAksConfigArgs] = None,
aks_config_v2: Optional[ClusterAksConfigV2Args] = None,
annotations: Optional[Mapping[str, Any]] = None,
ca_cert: Optional[str] = None,
cluster_auth_endpoint: Optional[ClusterClusterAuthEndpointArgs] = None,
cluster_monitoring_input: Optional[ClusterClusterMonitoringInputArgs] = None,
cluster_registration_token: Optional[ClusterClusterRegistrationTokenArgs] = None,
cluster_template_answers: Optional[ClusterClusterTemplateAnswersArgs] = None,
cluster_template_id: Optional[str] = None,
cluster_template_questions: Optional[Sequence[ClusterClusterTemplateQuestionArgs]] = None,
cluster_template_revision_id: Optional[str] = None,
default_pod_security_policy_template_id: Optional[str] = None,
default_project_id: Optional[str] = None,
description: Optional[str] = None,
desired_agent_image: Optional[str] = None,
desired_auth_image: Optional[str] = None,
docker_root_dir: Optional[str] = None,
driver: Optional[str] = None,
eks_config: Optional[ClusterEksConfigArgs] = None,
eks_config_v2: Optional[ClusterEksConfigV2Args] = None,
enable_cluster_alerting: Optional[bool] = None,
enable_cluster_istio: Optional[bool] = None,
enable_cluster_monitoring: Optional[bool] = None,
enable_network_policy: Optional[bool] = None,
fleet_workspace_name: Optional[str] = None,
gke_config: Optional[ClusterGkeConfigArgs] = None,
gke_config_v2: Optional[ClusterGkeConfigV2Args] = None,
istio_enabled: Optional[bool] = None,
k3s_config: Optional[ClusterK3sConfigArgs] = None,
kube_config: Optional[str] = None,
labels: Optional[Mapping[str, Any]] = None,
name: Optional[str] = None,
oke_config: Optional[ClusterOkeConfigArgs] = None,
rke2_config: Optional[ClusterRke2ConfigArgs] = None,
rke_config: Optional[ClusterRkeConfigArgs] = None,
scheduled_cluster_scan: Optional[ClusterScheduledClusterScanArgs] = None,
system_project_id: Optional[str] = None,
windows_prefered_cluster: Optional[bool] = None) -> Cluster
func GetCluster(ctx *Context, name string, id IDInput, state *ClusterState, opts ...ResourceOption) (*Cluster, error)
public static Cluster Get(string name, Input<string> id, ClusterState? state, CustomResourceOptions? opts = null)
public static Cluster get(String name, Output<String> id, ClusterState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Agent
Env List<ClusterVars Agent Env Var Args> Optional Agent Env Vars for Rancher agent. Just for Rancher v2.5.6 and above (list)
- Aks
Config ClusterAks Config Args The Azure AKS configuration for
aks
Clusters. Conflicts withaks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- Aks
Config ClusterV2 Aks Config V2Args The Azure AKS v2 configuration for creating/import
aks
Clusters. Conflicts withaks_config
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- Annotations Dictionary<string, object>
Annotations for the Cluster (map)
- Ca
Cert string TLS CA certificate for etcd service (string)
- Cluster
Auth ClusterEndpoint Cluster Auth Endpoint Args Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)
- Cluster
Monitoring ClusterInput Cluster Monitoring Input Args Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)
- Cluster
Registration ClusterToken Cluster Registration Token Args (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)
- Cluster
Template ClusterAnswers Cluster Template Answers Args Cluster template answers. Just for Rancher v2.3.x and above (list maxitems:1)
- Cluster
Template stringId Cluster template ID. Just for Rancher v2.3.x and above (string)
- Cluster
Template List<ClusterQuestions Cluster Template Question Args> Cluster template questions. Just for Rancher v2.3.x and above (list)
- Cluster
Template stringRevision Id Cluster template revision ID. Just for Rancher v2.3.x and above (string)
- Default
Pod stringSecurity Policy Template Id - Default
Project stringId (Computed) Default project ID for the cluster (string)
- Description string
The description for Cluster (string)
- Desired
Agent stringImage Desired agent image. Just for Rancher v2.3.x and above (string)
- Desired
Auth stringImage Desired auth image. Just for Rancher v2.3.x and above (string)
- Docker
Root stringDir Desired auth image. Just for Rancher v2.3.x and above (string)
- Driver string
(Computed) The driver used for the Cluster.
imported
,azurekubernetesservice
,amazonelasticcontainerservice
,googlekubernetesengine
andrancherKubernetesEngine
are supported (string)- Eks
Config ClusterEks Config Args The Amazon EKS configuration for
eks
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- Eks
Config ClusterV2 Eks Config V2Args The Amazon EKS V2 configuration to create or import
eks
Clusters. Conflicts withaks_config
,eks_config
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
. For Rancher v2.5.x or above (list maxitems:1)- Enable
Cluster boolAlerting Enable built-in cluster alerting (bool)
- Enable
Cluster boolIstio Deploy istio on
system
project andistio-system
namespace, using rancher2.App resource instead. See above example.Deploy istio using rancher2_app resource instead
- Enable
Cluster boolMonitoring Enable built-in cluster monitoring (bool)
- Enable
Network boolPolicy Enable project network isolation (bool)
- Fleet
Workspace stringName Fleet workspace name (string)
- Gke
Config ClusterGke Config Args The Google GKE configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config_v2
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- Gke
Config ClusterV2 Gke Config V2Args The Google GKE V2 configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,oke_config
,k3s_config
andrke_config
. For Rancher v2.5.8 or above (list maxitems:1)- Istio
Enabled bool (Computed) Is istio enabled at cluster? Just for Rancher v2.3.x and above (bool)
- K3s
Config ClusterK3s Config Args The K3S configuration for
k3s
imported Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andrke_config
(list maxitems:1)- Kube
Config string (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has
cluster_auth_endpoint
enabled, the kube_config will not be available until the cluster isconnected
(string)- Labels Dictionary<string, object>
Labels for the Cluster (map)
- Name string
The name of the Cluster (string)
- Oke
Config ClusterOke Config Args The Oracle OKE configuration for
oke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,k3s_config
andrke_config
(list maxitems:1)- Rke2Config
Cluster
Rke2Config Args The RKE2 configuration for
rke2
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,gke_config
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- Rke
Config ClusterRke Config Args The RKE configuration for
rke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andk3s_config
(list maxitems:1)- Scheduled
Cluster ClusterScan Scheduled Cluster Scan Args Cluster scheduled cis scan. For Rancher v2.4.0 or above (List maxitems:1)
- System
Project stringId (Computed) System project ID for the cluster (string)
- Windows
Prefered boolCluster Windows preferred cluster. Default:
false
(bool)
- Agent
Env []ClusterVars Agent Env Var Args Optional Agent Env Vars for Rancher agent. Just for Rancher v2.5.6 and above (list)
- Aks
Config ClusterAks Config Args The Azure AKS configuration for
aks
Clusters. Conflicts withaks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- Aks
Config ClusterV2 Aks Config V2Args The Azure AKS v2 configuration for creating/import
aks
Clusters. Conflicts withaks_config
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- Annotations map[string]interface{}
Annotations for the Cluster (map)
- Ca
Cert string TLS CA certificate for etcd service (string)
- Cluster
Auth ClusterEndpoint Cluster Auth Endpoint Args Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)
- Cluster
Monitoring ClusterInput Cluster Monitoring Input Args Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)
- Cluster
Registration ClusterToken Cluster Registration Token Args (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)
- Cluster
Template ClusterAnswers Cluster Template Answers Args Cluster template answers. Just for Rancher v2.3.x and above (list maxitems:1)
- Cluster
Template stringId Cluster template ID. Just for Rancher v2.3.x and above (string)
- Cluster
Template []ClusterQuestions Cluster Template Question Args Cluster template questions. Just for Rancher v2.3.x and above (list)
- Cluster
Template stringRevision Id Cluster template revision ID. Just for Rancher v2.3.x and above (string)
- Default
Pod stringSecurity Policy Template Id - Default
Project stringId (Computed) Default project ID for the cluster (string)
- Description string
The description for Cluster (string)
- Desired
Agent stringImage Desired agent image. Just for Rancher v2.3.x and above (string)
- Desired
Auth stringImage Desired auth image. Just for Rancher v2.3.x and above (string)
- Docker
Root stringDir Desired auth image. Just for Rancher v2.3.x and above (string)
- Driver string
(Computed) The driver used for the Cluster.
imported
,azurekubernetesservice
,amazonelasticcontainerservice
,googlekubernetesengine
andrancherKubernetesEngine
are supported (string)- Eks
Config ClusterEks Config Args The Amazon EKS configuration for
eks
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- Eks
Config ClusterV2 Eks Config V2Args The Amazon EKS V2 configuration to create or import
eks
Clusters. Conflicts withaks_config
,eks_config
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
. For Rancher v2.5.x or above (list maxitems:1)- Enable
Cluster boolAlerting Enable built-in cluster alerting (bool)
- Enable
Cluster boolIstio Deploy istio on
system
project andistio-system
namespace, using rancher2.App resource instead. See above example.Deploy istio using rancher2_app resource instead
- Enable
Cluster boolMonitoring Enable built-in cluster monitoring (bool)
- Enable
Network boolPolicy Enable project network isolation (bool)
- Fleet
Workspace stringName Fleet workspace name (string)
- Gke
Config ClusterGke Config Args The Google GKE configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config_v2
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- Gke
Config ClusterV2 Gke Config V2Args The Google GKE V2 configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,oke_config
,k3s_config
andrke_config
. For Rancher v2.5.8 or above (list maxitems:1)- Istio
Enabled bool (Computed) Is istio enabled at cluster? Just for Rancher v2.3.x and above (bool)
- K3s
Config ClusterK3s Config Args The K3S configuration for
k3s
imported Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andrke_config
(list maxitems:1)- Kube
Config string (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has
cluster_auth_endpoint
enabled, the kube_config will not be available until the cluster isconnected
(string)- Labels map[string]interface{}
Labels for the Cluster (map)
- Name string
The name of the Cluster (string)
- Oke
Config ClusterOke Config Args The Oracle OKE configuration for
oke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,k3s_config
andrke_config
(list maxitems:1)- Rke2Config
Cluster
Rke2Config Args The RKE2 configuration for
rke2
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,gke_config
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- Rke
Config ClusterRke Config Args The RKE configuration for
rke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andk3s_config
(list maxitems:1)- Scheduled
Cluster ClusterScan Scheduled Cluster Scan Args Cluster scheduled cis scan. For Rancher v2.4.0 or above (List maxitems:1)
- System
Project stringId (Computed) System project ID for the cluster (string)
- Windows
Prefered boolCluster Windows preferred cluster. Default:
false
(bool)
- agent
Env List<ClusterVars Agent Env Var Args> Optional Agent Env Vars for Rancher agent. Just for Rancher v2.5.6 and above (list)
- aks
Config ClusterAks Config Args The Azure AKS configuration for
aks
Clusters. Conflicts withaks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- aks
Config ClusterV2 Aks Config V2Args The Azure AKS v2 configuration for creating/import
aks
Clusters. Conflicts withaks_config
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- annotations Map<String,Object>
Annotations for the Cluster (map)
- ca
Cert String TLS CA certificate for etcd service (string)
- cluster
Auth ClusterEndpoint Cluster Auth Endpoint Args Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)
- cluster
Monitoring ClusterInput Cluster Monitoring Input Args Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)
- cluster
Registration ClusterToken Cluster Registration Token Args (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)
- cluster
Template ClusterAnswers Cluster Template Answers Args Cluster template answers. Just for Rancher v2.3.x and above (list maxitems:1)
- cluster
Template StringId Cluster template ID. Just for Rancher v2.3.x and above (string)
- cluster
Template List<ClusterQuestions Cluster Template Question Args> Cluster template questions. Just for Rancher v2.3.x and above (list)
- cluster
Template StringRevision Id Cluster template revision ID. Just for Rancher v2.3.x and above (string)
- default
Pod StringSecurity Policy Template Id - default
Project StringId (Computed) Default project ID for the cluster (string)
- description String
The description for Cluster (string)
- desired
Agent StringImage Desired agent image. Just for Rancher v2.3.x and above (string)
- desired
Auth StringImage Desired auth image. Just for Rancher v2.3.x and above (string)
- docker
Root StringDir Desired auth image. Just for Rancher v2.3.x and above (string)
- driver String
(Computed) The driver used for the Cluster.
imported
,azurekubernetesservice
,amazonelasticcontainerservice
,googlekubernetesengine
andrancherKubernetesEngine
are supported (string)- eks
Config ClusterEks Config Args The Amazon EKS configuration for
eks
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- eks
Config ClusterV2 Eks Config V2Args The Amazon EKS V2 configuration to create or import
eks
Clusters. Conflicts withaks_config
,eks_config
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
. For Rancher v2.5.x or above (list maxitems:1)- enable
Cluster BooleanAlerting Enable built-in cluster alerting (bool)
- enable
Cluster BooleanIstio Deploy istio on
system
project andistio-system
namespace, using rancher2.App resource instead. See above example.Deploy istio using rancher2_app resource instead
- enable
Cluster BooleanMonitoring Enable built-in cluster monitoring (bool)
- enable
Network BooleanPolicy Enable project network isolation (bool)
- fleet
Workspace StringName Fleet workspace name (string)
- gke
Config ClusterGke Config Args The Google GKE configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config_v2
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- gke
Config ClusterV2 Gke Config V2Args The Google GKE V2 configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,oke_config
,k3s_config
andrke_config
. For Rancher v2.5.8 or above (list maxitems:1)- istio
Enabled Boolean (Computed) Is istio enabled at cluster? Just for Rancher v2.3.x and above (bool)
- k3s
Config ClusterK3s Config Args The K3S configuration for
k3s
imported Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andrke_config
(list maxitems:1)- kube
Config String (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has
cluster_auth_endpoint
enabled, the kube_config will not be available until the cluster isconnected
(string)- labels Map<String,Object>
Labels for the Cluster (map)
- name String
The name of the Cluster (string)
- oke
Config ClusterOke Config Args The Oracle OKE configuration for
oke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,k3s_config
andrke_config
(list maxitems:1)- rke2Config
Cluster
Rke2Config Args The RKE2 configuration for
rke2
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,gke_config
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- rke
Config ClusterRke Config Args The RKE configuration for
rke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andk3s_config
(list maxitems:1)- scheduled
Cluster ClusterScan Scheduled Cluster Scan Args Cluster scheduled cis scan. For Rancher v2.4.0 or above (List maxitems:1)
- system
Project StringId (Computed) System project ID for the cluster (string)
- windows
Prefered BooleanCluster Windows preferred cluster. Default:
false
(bool)
- agent
Env ClusterVars Agent Env Var Args[] Optional Agent Env Vars for Rancher agent. Just for Rancher v2.5.6 and above (list)
- aks
Config ClusterAks Config Args The Azure AKS configuration for
aks
Clusters. Conflicts withaks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- aks
Config ClusterV2 Aks Config V2Args The Azure AKS v2 configuration for creating/import
aks
Clusters. Conflicts withaks_config
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- annotations {[key: string]: any}
Annotations for the Cluster (map)
- ca
Cert string TLS CA certificate for etcd service (string)
- cluster
Auth ClusterEndpoint Cluster Auth Endpoint Args Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)
- cluster
Monitoring ClusterInput Cluster Monitoring Input Args Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)
- cluster
Registration ClusterToken Cluster Registration Token Args (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)
- cluster
Template ClusterAnswers Cluster Template Answers Args Cluster template answers. Just for Rancher v2.3.x and above (list maxitems:1)
- cluster
Template stringId Cluster template ID. Just for Rancher v2.3.x and above (string)
- cluster
Template ClusterQuestions Cluster Template Question Args[] Cluster template questions. Just for Rancher v2.3.x and above (list)
- cluster
Template stringRevision Id Cluster template revision ID. Just for Rancher v2.3.x and above (string)
- default
Pod stringSecurity Policy Template Id - default
Project stringId (Computed) Default project ID for the cluster (string)
- description string
The description for Cluster (string)
- desired
Agent stringImage Desired agent image. Just for Rancher v2.3.x and above (string)
- desired
Auth stringImage Desired auth image. Just for Rancher v2.3.x and above (string)
- docker
Root stringDir Desired auth image. Just for Rancher v2.3.x and above (string)
- driver string
(Computed) The driver used for the Cluster.
imported
,azurekubernetesservice
,amazonelasticcontainerservice
,googlekubernetesengine
andrancherKubernetesEngine
are supported (string)- eks
Config ClusterEks Config Args The Amazon EKS configuration for
eks
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- eks
Config ClusterV2 Eks Config V2Args The Amazon EKS V2 configuration to create or import
eks
Clusters. Conflicts withaks_config
,eks_config
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
. For Rancher v2.5.x or above (list maxitems:1)- enable
Cluster booleanAlerting Enable built-in cluster alerting (bool)
- enable
Cluster booleanIstio Deploy istio on
system
project andistio-system
namespace, using rancher2.App resource instead. See above example.Deploy istio using rancher2_app resource instead
- enable
Cluster booleanMonitoring Enable built-in cluster monitoring (bool)
- enable
Network booleanPolicy Enable project network isolation (bool)
- fleet
Workspace stringName Fleet workspace name (string)
- gke
Config ClusterGke Config Args The Google GKE configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config_v2
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- gke
Config ClusterV2 Gke Config V2Args The Google GKE V2 configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,oke_config
,k3s_config
andrke_config
. For Rancher v2.5.8 or above (list maxitems:1)- istio
Enabled boolean (Computed) Is istio enabled at cluster? Just for Rancher v2.3.x and above (bool)
- k3s
Config ClusterK3s Config Args The K3S configuration for
k3s
imported Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andrke_config
(list maxitems:1)- kube
Config string (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has
cluster_auth_endpoint
enabled, the kube_config will not be available until the cluster isconnected
(string)- labels {[key: string]: any}
Labels for the Cluster (map)
- name string
The name of the Cluster (string)
- oke
Config ClusterOke Config Args The Oracle OKE configuration for
oke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,k3s_config
andrke_config
(list maxitems:1)- rke2Config
Cluster
Rke2Config Args The RKE2 configuration for
rke2
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,gke_config
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- rke
Config ClusterRke Config Args The RKE configuration for
rke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andk3s_config
(list maxitems:1)- scheduled
Cluster ClusterScan Scheduled Cluster Scan Args Cluster scheduled cis scan. For Rancher v2.4.0 or above (List maxitems:1)
- system
Project stringId (Computed) System project ID for the cluster (string)
- windows
Prefered booleanCluster Windows preferred cluster. Default:
false
(bool)
- agent_
env_ Sequence[Clustervars Agent Env Var Args] Optional Agent Env Vars for Rancher agent. Just for Rancher v2.5.6 and above (list)
- aks_
config ClusterAks Config Args The Azure AKS configuration for
aks
Clusters. Conflicts withaks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- aks_
config_ Clusterv2 Aks Config V2Args The Azure AKS v2 configuration for creating/import
aks
Clusters. Conflicts withaks_config
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- annotations Mapping[str, Any]
Annotations for the Cluster (map)
- ca_
cert str TLS CA certificate for etcd service (string)
- cluster_
auth_ Clusterendpoint Cluster Auth Endpoint Args Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)
- cluster_
monitoring_ Clusterinput Cluster Monitoring Input Args Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)
- cluster_
registration_ Clustertoken Cluster Registration Token Args (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)
- cluster_
template_ Clusteranswers Cluster Template Answers Args Cluster template answers. Just for Rancher v2.3.x and above (list maxitems:1)
- cluster_
template_ strid Cluster template ID. Just for Rancher v2.3.x and above (string)
- cluster_
template_ Sequence[Clusterquestions Cluster Template Question Args] Cluster template questions. Just for Rancher v2.3.x and above (list)
- cluster_
template_ strrevision_ id Cluster template revision ID. Just for Rancher v2.3.x and above (string)
- default_
pod_ strsecurity_ policy_ template_ id - default_
project_ strid (Computed) Default project ID for the cluster (string)
- description str
The description for Cluster (string)
- desired_
agent_ strimage Desired agent image. Just for Rancher v2.3.x and above (string)
- desired_
auth_ strimage Desired auth image. Just for Rancher v2.3.x and above (string)
- docker_
root_ strdir Desired auth image. Just for Rancher v2.3.x and above (string)
- driver str
(Computed) The driver used for the Cluster.
imported
,azurekubernetesservice
,amazonelasticcontainerservice
,googlekubernetesengine
andrancherKubernetesEngine
are supported (string)- eks_
config ClusterEks Config Args The Amazon EKS configuration for
eks
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- eks_
config_ Clusterv2 Eks Config V2Args The Amazon EKS V2 configuration to create or import
eks
Clusters. Conflicts withaks_config
,eks_config
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
. For Rancher v2.5.x or above (list maxitems:1)- enable_
cluster_ boolalerting Enable built-in cluster alerting (bool)
- enable_
cluster_ boolistio Deploy istio on
system
project andistio-system
namespace, using rancher2.App resource instead. See above example.Deploy istio using rancher2_app resource instead
- enable_
cluster_ boolmonitoring Enable built-in cluster monitoring (bool)
- enable_
network_ boolpolicy Enable project network isolation (bool)
- fleet_
workspace_ strname Fleet workspace name (string)
- gke_
config ClusterGke Config Args The Google GKE configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config_v2
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- gke_
config_ Clusterv2 Gke Config V2Args The Google GKE V2 configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,oke_config
,k3s_config
andrke_config
. For Rancher v2.5.8 or above (list maxitems:1)- istio_
enabled bool (Computed) Is istio enabled at cluster? Just for Rancher v2.3.x and above (bool)
- k3s_
config ClusterK3s Config Args The K3S configuration for
k3s
imported Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andrke_config
(list maxitems:1)- kube_
config str (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has
cluster_auth_endpoint
enabled, the kube_config will not be available until the cluster isconnected
(string)- labels Mapping[str, Any]
Labels for the Cluster (map)
- name str
The name of the Cluster (string)
- oke_
config ClusterOke Config Args The Oracle OKE configuration for
oke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,k3s_config
andrke_config
(list maxitems:1)- rke2_
config ClusterRke2Config Args The RKE2 configuration for
rke2
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,gke_config
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- rke_
config ClusterRke Config Args The RKE configuration for
rke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andk3s_config
(list maxitems:1)- scheduled_
cluster_ Clusterscan Scheduled Cluster Scan Args Cluster scheduled cis scan. For Rancher v2.4.0 or above (List maxitems:1)
- system_
project_ strid (Computed) System project ID for the cluster (string)
- windows_
prefered_ boolcluster Windows preferred cluster. Default:
false
(bool)
- agent
Env List<Property Map>Vars Optional Agent Env Vars for Rancher agent. Just for Rancher v2.5.6 and above (list)
- aks
Config Property Map The Azure AKS configuration for
aks
Clusters. Conflicts withaks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- aks
Config Property MapV2 The Azure AKS v2 configuration for creating/import
aks
Clusters. Conflicts withaks_config
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- annotations Map<Any>
Annotations for the Cluster (map)
- ca
Cert String TLS CA certificate for etcd service (string)
- cluster
Auth Property MapEndpoint Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)
- cluster
Monitoring Property MapInput Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)
- cluster
Registration Property MapToken (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)
- cluster
Template Property MapAnswers Cluster template answers. Just for Rancher v2.3.x and above (list maxitems:1)
- cluster
Template StringId Cluster template ID. Just for Rancher v2.3.x and above (string)
- cluster
Template List<Property Map>Questions Cluster template questions. Just for Rancher v2.3.x and above (list)
- cluster
Template StringRevision Id Cluster template revision ID. Just for Rancher v2.3.x and above (string)
- default
Pod StringSecurity Policy Template Id - default
Project StringId (Computed) Default project ID for the cluster (string)
- description String
The description for Cluster (string)
- desired
Agent StringImage Desired agent image. Just for Rancher v2.3.x and above (string)
- desired
Auth StringImage Desired auth image. Just for Rancher v2.3.x and above (string)
- docker
Root StringDir Desired auth image. Just for Rancher v2.3.x and above (string)
- driver String
(Computed) The driver used for the Cluster.
imported
,azurekubernetesservice
,amazonelasticcontainerservice
,googlekubernetesengine
andrancherKubernetesEngine
are supported (string)- eks
Config Property Map The Amazon EKS configuration for
eks
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- eks
Config Property MapV2 The Amazon EKS V2 configuration to create or import
eks
Clusters. Conflicts withaks_config
,eks_config
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
. For Rancher v2.5.x or above (list maxitems:1)- enable
Cluster BooleanAlerting Enable built-in cluster alerting (bool)
- enable
Cluster BooleanIstio Deploy istio on
system
project andistio-system
namespace, using rancher2.App resource instead. See above example.Deploy istio using rancher2_app resource instead
- enable
Cluster BooleanMonitoring Enable built-in cluster monitoring (bool)
- enable
Network BooleanPolicy Enable project network isolation (bool)
- fleet
Workspace StringName Fleet workspace name (string)
- gke
Config Property Map The Google GKE configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config_v2
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- gke
Config Property MapV2 The Google GKE V2 configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,oke_config
,k3s_config
andrke_config
. For Rancher v2.5.8 or above (list maxitems:1)- istio
Enabled Boolean (Computed) Is istio enabled at cluster? Just for Rancher v2.3.x and above (bool)
- k3s
Config Property Map The K3S configuration for
k3s
imported Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andrke_config
(list maxitems:1)- kube
Config String (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has
cluster_auth_endpoint
enabled, the kube_config will not be available until the cluster isconnected
(string)- labels Map<Any>
Labels for the Cluster (map)
- name String
The name of the Cluster (string)
- oke
Config Property Map The Oracle OKE configuration for
oke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,k3s_config
andrke_config
(list maxitems:1)- rke2Config Property Map
The RKE2 configuration for
rke2
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,gke_config
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- rke
Config Property Map The RKE configuration for
rke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andk3s_config
(list maxitems:1)- scheduled
Cluster Property MapScan Cluster scheduled cis scan. For Rancher v2.4.0 or above (List maxitems:1)
- system
Project StringId (Computed) System project ID for the cluster (string)
- windows
Prefered BooleanCluster Windows preferred cluster. Default:
false
(bool)
Supporting Types
ClusterAgentEnvVar
ClusterAksConfig
- Agent
Dns stringPrefix DNS prefix to be used to create the FQDN for the agent pool (string)
- Client
Id string Azure client ID to use (string)
- Client
Secret string Azure client secret associated with the "client id" (string)
- Kubernetes
Version string K8s version to deploy. Default:
Rancher default
(string) (Note - if rke_config is set at cluster_template, kubernetes_version must be set to the active cluster version so Rancher can clone the RKE template)- Master
Dns stringPrefix DNS prefix to use the Kubernetes cluster control pane (string)
- Resource
Group string (string)
- Ssh
Public stringKey Contents Contents of the SSH public key used to authenticate with Linux hosts (string)
- Subnet string
The name of an existing Azure Virtual Subnet. Composite of agent virtual network subnet ID (string)
- Subscription
Id string (string)
- Tenant
Id string (string)
- Virtual
Network string The name of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)
- Virtual
Network stringResource Group The resource group of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)
- Aad
Server stringApp Secret The secret of an Azure Active Directory server application (string)
- Aad
Tenant stringId The ID of an Azure Active Directory tenant (string)
- Add
Client stringApp Id The ID of an Azure Active Directory client application of type "Native". This application is for user login via kubectl (string)
- Add
Server stringApp Id The ID of an Azure Active Directory server application of type "Web app/API". This application represents the managed cluster's apiserver (Server application) (string)
- Admin
Username string The administrator username to use for Linux hosts. Default
azureuser
(string)- Agent
Os intDisk Size GB size to be used to specify the disk for every machine in the agent pool. If you specify 0, it will apply the default according to the "agent vm size" specified. Default
0
(int)- Agent
Pool stringName Name for the agent pool, upto 12 alphanumeric characters. Default
agentpool0
(string)- Agent
Storage stringProfile Storage profile specifies what kind of storage used on machine in the agent pool. Chooses from [ManagedDisks StorageAccount]. Default
ManagedDisks
(string)- Agent
Vm stringSize Size of machine in the agent pool. Default
Standard_D1_v2
(string)- Auth
Base stringUrl Different authentication API url to use. Default
https://login.microsoftonline.com/
(string)- Base
Url string Different resource management API url to use. Default
https://management.azure.com/
(string)- Count int
Number of machines (VMs) in the agent pool. Allowed values must be in the range of 1 to 100 (inclusive). Default
1
(int)- Dns
Service stringIp An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes Service address range specified in "service cidr". Default
10.0.0.10
(string)- Docker
Bridge stringCidr A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes Service address range specified in "service cidr". Default
172.17.0.1/16
(string)- Enable
Http boolApplication Routing Enable the Kubernetes ingress with automatic public DNS name creation. Default
false
(bool)- Enable
Monitoring bool Turn on Azure Log Analytics monitoring. Uses the Log Analytics "Default" workspace if it exists, else creates one. if using an existing workspace, specifies "log analytics workspace resource id". Default
true
(bool)- Load
Balancer stringSku Allowed values:
basic
(default)standard
(string)- Location string
(string)
- Log
Analytics stringWorkspace The name of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses '{resource group}-{subscription id}-{location code}' (string)
- Log
Analytics stringWorkspace Resource Group The resource group of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses the 'Cluster' resource group (string)
- Max
Pods int Maximum number of pods that can run on a node. Default
110
(int)- Network
Plugin string Network plugin used for building Kubernetes network. Chooses from
azure
orkubenet
. Defaultazure
(string)- Network
Policy string Network policy used for building Kubernetes network. Chooses from
calico
(string)- Pod
Cidr string A CIDR notation IP range from which to assign Kubernetes Pod IPs when "network plugin" is specified in "kubenet". Default
172.244.0.0/16
(string)- Service
Cidr string A CIDR notation IP range from which to assign Kubernetes Service cluster IPs. It must not overlap with any Subnet IP ranges. Default
10.0.0.0/16
(string)- Tag Dictionary<string, object>
Use
tags
argument instead as []stringUse tags argument instead as []string
- List<string>
Tags for Kubernetes cluster. For example,
["foo=bar","bar=foo"]
(list)
- Agent
Dns stringPrefix DNS prefix to be used to create the FQDN for the agent pool (string)
- Client
Id string Azure client ID to use (string)
- Client
Secret string Azure client secret associated with the "client id" (string)
- Kubernetes
Version string K8s version to deploy. Default:
Rancher default
(string) (Note - if rke_config is set at cluster_template, kubernetes_version must be set to the active cluster version so Rancher can clone the RKE template)- Master
Dns stringPrefix DNS prefix to use the Kubernetes cluster control pane (string)
- Resource
Group string (string)
- Ssh
Public stringKey Contents Contents of the SSH public key used to authenticate with Linux hosts (string)
- Subnet string
The name of an existing Azure Virtual Subnet. Composite of agent virtual network subnet ID (string)
- Subscription
Id string (string)
- Tenant
Id string (string)
- Virtual
Network string The name of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)
- Virtual
Network stringResource Group The resource group of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)
- Aad
Server stringApp Secret The secret of an Azure Active Directory server application (string)
- Aad
Tenant stringId The ID of an Azure Active Directory tenant (string)
- Add
Client stringApp Id The ID of an Azure Active Directory client application of type "Native". This application is for user login via kubectl (string)
- Add
Server stringApp Id The ID of an Azure Active Directory server application of type "Web app/API". This application represents the managed cluster's apiserver (Server application) (string)
- Admin
Username string The administrator username to use for Linux hosts. Default
azureuser
(string)- Agent
Os intDisk Size GB size to be used to specify the disk for every machine in the agent pool. If you specify 0, it will apply the default according to the "agent vm size" specified. Default
0
(int)- Agent
Pool stringName Name for the agent pool, upto 12 alphanumeric characters. Default
agentpool0
(string)- Agent
Storage stringProfile Storage profile specifies what kind of storage used on machine in the agent pool. Chooses from [ManagedDisks StorageAccount]. Default
ManagedDisks
(string)- Agent
Vm stringSize Size of machine in the agent pool. Default
Standard_D1_v2
(string)- Auth
Base stringUrl Different authentication API url to use. Default
https://login.microsoftonline.com/
(string)- Base
Url string Different resource management API url to use. Default
https://management.azure.com/
(string)- Count int
Number of machines (VMs) in the agent pool. Allowed values must be in the range of 1 to 100 (inclusive). Default
1
(int)- Dns
Service stringIp An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes Service address range specified in "service cidr". Default
10.0.0.10
(string)- Docker
Bridge stringCidr A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes Service address range specified in "service cidr". Default
172.17.0.1/16
(string)- Enable
Http boolApplication Routing Enable the Kubernetes ingress with automatic public DNS name creation. Default
false
(bool)- Enable
Monitoring bool Turn on Azure Log Analytics monitoring. Uses the Log Analytics "Default" workspace if it exists, else creates one. if using an existing workspace, specifies "log analytics workspace resource id". Default
true
(bool)- Load
Balancer stringSku Allowed values:
basic
(default)standard
(string)- Location string
(string)
- Log
Analytics stringWorkspace The name of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses '{resource group}-{subscription id}-{location code}' (string)
- Log
Analytics stringWorkspace Resource Group The resource group of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses the 'Cluster' resource group (string)
- Max
Pods int Maximum number of pods that can run on a node. Default
110
(int)- Network
Plugin string Network plugin used for building Kubernetes network. Chooses from
azure
orkubenet
. Defaultazure
(string)- Network
Policy string Network policy used for building Kubernetes network. Chooses from
calico
(string)- Pod
Cidr string A CIDR notation IP range from which to assign Kubernetes Pod IPs when "network plugin" is specified in "kubenet". Default
172.244.0.0/16
(string)- Service
Cidr string A CIDR notation IP range from which to assign Kubernetes Service cluster IPs. It must not overlap with any Subnet IP ranges. Default
10.0.0.0/16
(string)- Tag map[string]interface{}
Use
tags
argument instead as []stringUse tags argument instead as []string
- []string
Tags for Kubernetes cluster. For example,
["foo=bar","bar=foo"]
(list)
- agent
Dns StringPrefix DNS prefix to be used to create the FQDN for the agent pool (string)
- client
Id String Azure client ID to use (string)
- client
Secret String Azure client secret associated with the "client id" (string)
- kubernetes
Version String K8s version to deploy. Default:
Rancher default
(string) (Note - if rke_config is set at cluster_template, kubernetes_version must be set to the active cluster version so Rancher can clone the RKE template)- master
Dns StringPrefix DNS prefix to use the Kubernetes cluster control pane (string)
- resource
Group String (string)
- ssh
Public StringKey Contents Contents of the SSH public key used to authenticate with Linux hosts (string)
- subnet String
The name of an existing Azure Virtual Subnet. Composite of agent virtual network subnet ID (string)
- subscription
Id String (string)
- tenant
Id String (string)
- virtual
Network String The name of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)
- virtual
Network StringResource Group The resource group of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)
- aad
Server StringApp Secret The secret of an Azure Active Directory server application (string)
- aad
Tenant StringId The ID of an Azure Active Directory tenant (string)
- add
Client StringApp Id The ID of an Azure Active Directory client application of type "Native". This application is for user login via kubectl (string)
- add
Server StringApp Id The ID of an Azure Active Directory server application of type "Web app/API". This application represents the managed cluster's apiserver (Server application) (string)
- admin
Username String The administrator username to use for Linux hosts. Default
azureuser
(string)- agent
Os IntegerDisk Size GB size to be used to specify the disk for every machine in the agent pool. If you specify 0, it will apply the default according to the "agent vm size" specified. Default
0
(int)- agent
Pool StringName Name for the agent pool, upto 12 alphanumeric characters. Default
agentpool0
(string)- agent
Storage StringProfile Storage profile specifies what kind of storage used on machine in the agent pool. Chooses from [ManagedDisks StorageAccount]. Default
ManagedDisks
(string)- agent
Vm StringSize Size of machine in the agent pool. Default
Standard_D1_v2
(string)- auth
Base StringUrl Different authentication API url to use. Default
https://login.microsoftonline.com/
(string)- base
Url String Different resource management API url to use. Default
https://management.azure.com/
(string)- count Integer
Number of machines (VMs) in the agent pool. Allowed values must be in the range of 1 to 100 (inclusive). Default
1
(int)- dns
Service StringIp An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes Service address range specified in "service cidr". Default
10.0.0.10
(string)- docker
Bridge StringCidr A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes Service address range specified in "service cidr". Default
172.17.0.1/16
(string)- enable
Http BooleanApplication Routing Enable the Kubernetes ingress with automatic public DNS name creation. Default
false
(bool)- enable
Monitoring Boolean Turn on Azure Log Analytics monitoring. Uses the Log Analytics "Default" workspace if it exists, else creates one. if using an existing workspace, specifies "log analytics workspace resource id". Default
true
(bool)- load
Balancer StringSku Allowed values:
basic
(default)standard
(string)- location String
(string)
- log
Analytics StringWorkspace The name of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses '{resource group}-{subscription id}-{location code}' (string)
- log
Analytics StringWorkspace Resource Group The resource group of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses the 'Cluster' resource group (string)
- max
Pods Integer Maximum number of pods that can run on a node. Default
110
(int)- network
Plugin String Network plugin used for building Kubernetes network. Chooses from
azure
orkubenet
. Defaultazure
(string)- network
Policy String Network policy used for building Kubernetes network. Chooses from
calico
(string)- pod
Cidr String A CIDR notation IP range from which to assign Kubernetes Pod IPs when "network plugin" is specified in "kubenet". Default
172.244.0.0/16
(string)- service
Cidr String A CIDR notation IP range from which to assign Kubernetes Service cluster IPs. It must not overlap with any Subnet IP ranges. Default
10.0.0.0/16
(string)- tag Map<String,Object>
Use
tags
argument instead as []stringUse tags argument instead as []string
- List<String>
Tags for Kubernetes cluster. For example,
["foo=bar","bar=foo"]
(list)
- agent
Dns stringPrefix DNS prefix to be used to create the FQDN for the agent pool (string)
- client
Id string Azure client ID to use (string)
- client
Secret string Azure client secret associated with the "client id" (string)
- kubernetes
Version string K8s version to deploy. Default:
Rancher default
(string) (Note - if rke_config is set at cluster_template, kubernetes_version must be set to the active cluster version so Rancher can clone the RKE template)- master
Dns stringPrefix DNS prefix to use the Kubernetes cluster control pane (string)
- resource
Group string (string)
- ssh
Public stringKey Contents Contents of the SSH public key used to authenticate with Linux hosts (string)
- subnet string
The name of an existing Azure Virtual Subnet. Composite of agent virtual network subnet ID (string)
- subscription
Id string (string)
- tenant
Id string (string)
- virtual
Network string The name of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)
- virtual
Network stringResource Group The resource group of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)
- aad
Server stringApp Secret The secret of an Azure Active Directory server application (string)
- aad
Tenant stringId The ID of an Azure Active Directory tenant (string)
- add
Client stringApp Id The ID of an Azure Active Directory client application of type "Native". This application is for user login via kubectl (string)
- add
Server stringApp Id The ID of an Azure Active Directory server application of type "Web app/API". This application represents the managed cluster's apiserver (Server application) (string)
- admin
Username string The administrator username to use for Linux hosts. Default
azureuser
(string)- agent
Os numberDisk Size GB size to be used to specify the disk for every machine in the agent pool. If you specify 0, it will apply the default according to the "agent vm size" specified. Default
0
(int)- agent
Pool stringName Name for the agent pool, upto 12 alphanumeric characters. Default
agentpool0
(string)- agent
Storage stringProfile Storage profile specifies what kind of storage used on machine in the agent pool. Chooses from [ManagedDisks StorageAccount]. Default
ManagedDisks
(string)- agent
Vm stringSize Size of machine in the agent pool. Default
Standard_D1_v2
(string)- auth
Base stringUrl Different authentication API url to use. Default
https://login.microsoftonline.com/
(string)- base
Url string Different resource management API url to use. Default
https://management.azure.com/
(string)- count number
Number of machines (VMs) in the agent pool. Allowed values must be in the range of 1 to 100 (inclusive). Default
1
(int)- dns
Service stringIp An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes Service address range specified in "service cidr". Default
10.0.0.10
(string)- docker
Bridge stringCidr A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes Service address range specified in "service cidr". Default
172.17.0.1/16
(string)- enable
Http booleanApplication Routing Enable the Kubernetes ingress with automatic public DNS name creation. Default
false
(bool)- enable
Monitoring boolean Turn on Azure Log Analytics monitoring. Uses the Log Analytics "Default" workspace if it exists, else creates one. if using an existing workspace, specifies "log analytics workspace resource id". Default
true
(bool)- load
Balancer stringSku Allowed values:
basic
(default)standard
(string)- location string
(string)
- log
Analytics stringWorkspace The name of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses '{resource group}-{subscription id}-{location code}' (string)
- log
Analytics stringWorkspace Resource Group The resource group of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses the 'Cluster' resource group (string)
- max
Pods number Maximum number of pods that can run on a node. Default
110
(int)- network
Plugin string Network plugin used for building Kubernetes network. Chooses from
azure
orkubenet
. Defaultazure
(string)- network
Policy string Network policy used for building Kubernetes network. Chooses from
calico
(string)- pod
Cidr string A CIDR notation IP range from which to assign Kubernetes Pod IPs when "network plugin" is specified in "kubenet". Default
172.244.0.0/16
(string)- service
Cidr string A CIDR notation IP range from which to assign Kubernetes Service cluster IPs. It must not overlap with any Subnet IP ranges. Default
10.0.0.0/16
(string)- tag {[key: string]: any}
Use
tags
argument instead as []stringUse tags argument instead as []string
- string[]
Tags for Kubernetes cluster. For example,
["foo=bar","bar=foo"]
(list)
- agent_
dns_ strprefix DNS prefix to be used to create the FQDN for the agent pool (string)
- client_
id str Azure client ID to use (string)
- client_
secret str Azure client secret associated with the "client id" (string)
- kubernetes_
version str K8s version to deploy. Default:
Rancher default
(string) (Note - if rke_config is set at cluster_template, kubernetes_version must be set to the active cluster version so Rancher can clone the RKE template)- master_
dns_ strprefix DNS prefix to use the Kubernetes cluster control pane (string)
- resource_
group str (string)
- ssh_
public_ strkey_ contents Contents of the SSH public key used to authenticate with Linux hosts (string)
- subnet str
The name of an existing Azure Virtual Subnet. Composite of agent virtual network subnet ID (string)
- subscription_
id str (string)
- tenant_
id str (string)
- virtual_
network str The name of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)
- virtual_
network_ strresource_ group The resource group of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)
- aad_
server_ strapp_ secret The secret of an Azure Active Directory server application (string)
- aad_
tenant_ strid The ID of an Azure Active Directory tenant (string)
- add_
client_ strapp_ id The ID of an Azure Active Directory client application of type "Native". This application is for user login via kubectl (string)
- add_
server_ strapp_ id The ID of an Azure Active Directory server application of type "Web app/API". This application represents the managed cluster's apiserver (Server application) (string)
- admin_
username str The administrator username to use for Linux hosts. Default
azureuser
(string)- agent_
os_ intdisk_ size GB size to be used to specify the disk for every machine in the agent pool. If you specify 0, it will apply the default according to the "agent vm size" specified. Default
0
(int)- agent_
pool_ strname Name for the agent pool, upto 12 alphanumeric characters. Default
agentpool0
(string)- agent_
storage_ strprofile Storage profile specifies what kind of storage used on machine in the agent pool. Chooses from [ManagedDisks StorageAccount]. Default
ManagedDisks
(string)- agent_
vm_ strsize Size of machine in the agent pool. Default
Standard_D1_v2
(string)- auth_
base_ strurl Different authentication API url to use. Default
https://login.microsoftonline.com/
(string)- base_
url str Different resource management API url to use. Default
https://management.azure.com/
(string)- count int
Number of machines (VMs) in the agent pool. Allowed values must be in the range of 1 to 100 (inclusive). Default
1
(int)- dns_
service_ strip An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes Service address range specified in "service cidr". Default
10.0.0.10
(string)- docker_
bridge_ strcidr A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes Service address range specified in "service cidr". Default
172.17.0.1/16
(string)- enable_
http_ boolapplication_ routing Enable the Kubernetes ingress with automatic public DNS name creation. Default
false
(bool)- enable_
monitoring bool Turn on Azure Log Analytics monitoring. Uses the Log Analytics "Default" workspace if it exists, else creates one. if using an existing workspace, specifies "log analytics workspace resource id". Default
true
(bool)- load_
balancer_ strsku Allowed values:
basic
(default)standard
(string)- location str
(string)
- log_
analytics_ strworkspace The name of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses '{resource group}-{subscription id}-{location code}' (string)
- log_
analytics_ strworkspace_ resource_ group The resource group of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses the 'Cluster' resource group (string)
- max_
pods int Maximum number of pods that can run on a node. Default
110
(int)- network_
plugin str Network plugin used for building Kubernetes network. Chooses from
azure
orkubenet
. Defaultazure
(string)- network_
policy str Network policy used for building Kubernetes network. Chooses from
calico
(string)- pod_
cidr str A CIDR notation IP range from which to assign Kubernetes Pod IPs when "network plugin" is specified in "kubenet". Default
172.244.0.0/16
(string)- service_
cidr str A CIDR notation IP range from which to assign Kubernetes Service cluster IPs. It must not overlap with any Subnet IP ranges. Default
10.0.0.0/16
(string)- tag Mapping[str, Any]
Use
tags
argument instead as []stringUse tags argument instead as []string
- Sequence[str]
Tags for Kubernetes cluster. For example,
["foo=bar","bar=foo"]
(list)
- agent
Dns StringPrefix DNS prefix to be used to create the FQDN for the agent pool (string)
- client
Id String Azure client ID to use (string)
- client
Secret String Azure client secret associated with the "client id" (string)
- kubernetes
Version String K8s version to deploy. Default:
Rancher default
(string) (Note - if rke_config is set at cluster_template, kubernetes_version must be set to the active cluster version so Rancher can clone the RKE template)- master
Dns StringPrefix DNS prefix to use the Kubernetes cluster control pane (string)
- resource
Group String (string)
- ssh
Public StringKey Contents Contents of the SSH public key used to authenticate with Linux hosts (string)
- subnet String
The name of an existing Azure Virtual Subnet. Composite of agent virtual network subnet ID (string)
- subscription
Id String (string)
- tenant
Id String (string)
- virtual
Network String The name of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)
- virtual
Network StringResource Group The resource group of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)
- aad
Server StringApp Secret The secret of an Azure Active Directory server application (string)
- aad
Tenant StringId The ID of an Azure Active Directory tenant (string)
- add
Client StringApp Id The ID of an Azure Active Directory client application of type "Native". This application is for user login via kubectl (string)
- add
Server StringApp Id The ID of an Azure Active Directory server application of type "Web app/API". This application represents the managed cluster's apiserver (Server application) (string)
- admin
Username String The administrator username to use for Linux hosts. Default
azureuser
(string)- agent
Os NumberDisk Size GB size to be used to specify the disk for every machine in the agent pool. If you specify 0, it will apply the default according to the "agent vm size" specified. Default
0
(int)- agent
Pool StringName Name for the agent pool, upto 12 alphanumeric characters. Default
agentpool0
(string)- agent
Storage StringProfile Storage profile specifies what kind of storage used on machine in the agent pool. Chooses from [ManagedDisks StorageAccount]. Default
ManagedDisks
(string)- agent
Vm StringSize Size of machine in the agent pool. Default
Standard_D1_v2
(string)- auth
Base StringUrl Different authentication API url to use. Default
https://login.microsoftonline.com/
(string)- base
Url String Different resource management API url to use. Default
https://management.azure.com/
(string)- count Number
Number of machines (VMs) in the agent pool. Allowed values must be in the range of 1 to 100 (inclusive). Default
1
(int)- dns
Service StringIp An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes Service address range specified in "service cidr". Default
10.0.0.10
(string)- docker
Bridge StringCidr A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes Service address range specified in "service cidr". Default
172.17.0.1/16
(string)- enable
Http BooleanApplication Routing Enable the Kubernetes ingress with automatic public DNS name creation. Default
false
(bool)- enable
Monitoring Boolean Turn on Azure Log Analytics monitoring. Uses the Log Analytics "Default" workspace if it exists, else creates one. if using an existing workspace, specifies "log analytics workspace resource id". Default
true
(bool)- load
Balancer StringSku Allowed values:
basic
(default)standard
(string)- location String
(string)
- log
Analytics StringWorkspace The name of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses '{resource group}-{subscription id}-{location code}' (string)
- log
Analytics StringWorkspace Resource Group The resource group of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses the 'Cluster' resource group (string)
- max
Pods Number Maximum number of pods that can run on a node. Default
110
(int)- network
Plugin String Network plugin used for building Kubernetes network. Chooses from
azure
orkubenet
. Defaultazure
(string)- network
Policy String Network policy used for building Kubernetes network. Chooses from
calico
(string)- pod
Cidr String A CIDR notation IP range from which to assign Kubernetes Pod IPs when "network plugin" is specified in "kubenet". Default
172.244.0.0/16
(string)- service
Cidr String A CIDR notation IP range from which to assign Kubernetes Service cluster IPs. It must not overlap with any Subnet IP ranges. Default
10.0.0.0/16
(string)- tag Map<Any>
Use
tags
argument instead as []stringUse tags argument instead as []string
- List<String>
Tags for Kubernetes cluster. For example,
["foo=bar","bar=foo"]
(list)
ClusterAksConfigV2
- Cloud
Credential stringId The AKS Cloud Credential ID to use (string)
- Resource
Group string (string)
- Resource
Location string The AKS resource location (string)
- Auth
Base stringUrl Different authentication API url to use. Default
https://login.microsoftonline.com/
(string)- List<string>
The AKS authorized ip ranges (list)
- Base
Url string Different resource management API url to use. Default
https://management.azure.com/
(string)- Dns
Prefix string The AKS dns prefix. Required if
imported=false
(string)- Http
Application boolRouting Enable AKS http application routing? (bool)
- Imported bool
Is AKS cluster imported? Defaul:
false
(bool)- Kubernetes
Version string K8s version to deploy. Default:
Rancher default
(string) (Note - if rke_config is set at cluster_template, kubernetes_version must be set to the active cluster version so Rancher can clone the RKE template)- Linux
Admin stringUsername The AKS linux admin username (string)
- Linux
Ssh stringPublic Key The AKS linux ssh public key (string)
- Load
Balancer stringSku Allowed values:
basic
(default)standard
(string)- Log
Analytics stringWorkspace Group The AKS log analytics workspace group (string)
- Log
Analytics stringWorkspace Name The AKS log analytics workspace name (string)
- Monitoring bool
Kubernetes cluster monitoring (list maxitems:1)
- Name string
The name of the Cluster (string)
- Network
Dns stringService Ip The AKS network dns service ip (string)
- Network
Docker stringBridge Cidr The AKS network docker bridge cidr (string)
- Network
Plugin string Network plugin used for building Kubernetes network. Chooses from
azure
orkubenet
. Defaultazure
(string)- Network
Pod stringCidr The AKS network pod cidr (string)
- Network
Policy string Network policy used for building Kubernetes network. Chooses from
calico
(string)- Network
Service stringCidr The AKS network service cidr (string)
- Node
Pools List<ClusterAks Config V2Node Pool> The AKS nnode pools. Required if
imported=false
(list)- Private
Cluster bool Is AKS cluster private? (bool)
- Subnet string
The name of an existing Azure Virtual Subnet. Composite of agent virtual network subnet ID (string)
- Dictionary<string, object>
Tags for Kubernetes cluster. For example,
["foo=bar","bar=foo"]
(list)- Virtual
Network string The name of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)
- Virtual
Network stringResource Group The resource group of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)
- Cloud
Credential stringId The AKS Cloud Credential ID to use (string)
- Resource
Group string (string)
- Resource
Location string The AKS resource location (string)
- Auth
Base stringUrl Different authentication API url to use. Default
https://login.microsoftonline.com/
(string)- []string
The AKS authorized ip ranges (list)
- Base
Url string Different resource management API url to use. Default
https://management.azure.com/
(string)- Dns
Prefix string The AKS dns prefix. Required if
imported=false
(string)- Http
Application boolRouting Enable AKS http application routing? (bool)
- Imported bool
Is AKS cluster imported? Defaul:
false
(bool)- Kubernetes
Version string K8s version to deploy. Default:
Rancher default
(string) (Note - if rke_config is set at cluster_template, kubernetes_version must be set to the active cluster version so Rancher can clone the RKE template)- Linux
Admin stringUsername The AKS linux admin username (string)
- Linux
Ssh stringPublic Key The AKS linux ssh public key (string)
- Load
Balancer stringSku Allowed values:
basic
(default)standard
(string)- Log
Analytics stringWorkspace Group The AKS log analytics workspace group (string)
- Log
Analytics stringWorkspace Name The AKS log analytics workspace name (string)
- Monitoring bool
Kubernetes cluster monitoring (list maxitems:1)
- Name string
The name of the Cluster (string)
- Network
Dns stringService Ip The AKS network dns service ip (string)
- Network
Docker stringBridge Cidr The AKS network docker bridge cidr (string)
- Network
Plugin string Network plugin used for building Kubernetes network. Chooses from
azure
orkubenet
. Defaultazure
(string)- Network
Pod stringCidr The AKS network pod cidr (string)
- Network
Policy string Network policy used for building Kubernetes network. Chooses from
calico
(string)- Network
Service stringCidr The AKS network service cidr (string)
- Node
Pools []ClusterAks Config V2Node Pool The AKS nnode pools. Required if
imported=false
(list)- Private
Cluster bool Is AKS cluster private? (bool)
- Subnet string
The name of an existing Azure Virtual Subnet. Composite of agent virtual network subnet ID (string)
- map[string]interface{}
Tags for Kubernetes cluster. For example,
["foo=bar","bar=foo"]
(list)- Virtual
Network string The name of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)
- Virtual
Network stringResource Group The resource group of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)
- cloud
Credential StringId The AKS Cloud Credential ID to use (string)
- resource
Group String (string)
- resource
Location String The AKS resource location (string)
- auth
Base StringUrl Different authentication API url to use. Default
https://login.microsoftonline.com/
(string)- List<String>
The AKS authorized ip ranges (list)
- base
Url String Different resource management API url to use. Default
https://management.azure.com/
(string)- dns
Prefix String The AKS dns prefix. Required if
imported=false
(string)- http
Application BooleanRouting Enable AKS http application routing? (bool)
- imported Boolean
Is AKS cluster imported? Defaul:
false
(bool)- kubernetes
Version String K8s version to deploy. Default:
Rancher default
(string) (Note - if rke_config is set at cluster_template, kubernetes_version must be set to the active cluster version so Rancher can clone the RKE template)- linux
Admin StringUsername The AKS linux admin username (string)
- linux
Ssh StringPublic Key The AKS linux ssh public key (string)
- load
Balancer StringSku Allowed values:
basic
(default)standard
(string)- log
Analytics StringWorkspace Group The AKS log analytics workspace group (string)
- log
Analytics StringWorkspace Name The AKS log analytics workspace name (string)
- monitoring Boolean
Kubernetes cluster monitoring (list maxitems:1)
- name String
The name of the Cluster (string)
- network
Dns StringService Ip The AKS network dns service ip (string)
- network
Docker StringBridge Cidr The AKS network docker bridge cidr (string)
- network
Plugin String Network plugin used for building Kubernetes network. Chooses from
azure
orkubenet
. Defaultazure
(string)- network
Pod StringCidr The AKS network pod cidr (string)
- network
Policy String Network policy used for building Kubernetes network. Chooses from
calico
(string)- network
Service StringCidr The AKS network service cidr (string)
- node
Pools List<ClusterAks Config V2Node Pool> The AKS nnode pools. Required if
imported=false
(list)- private
Cluster Boolean Is AKS cluster private? (bool)
- subnet String
The name of an existing Azure Virtual Subnet. Composite of agent virtual network subnet ID (string)
- Map<String,Object>
Tags for Kubernetes cluster. For example,
["foo=bar","bar=foo"]
(list)- virtual
Network String The name of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)
- virtual
Network StringResource Group The resource group of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)
- cloud
Credential stringId The AKS Cloud Credential ID to use (string)
- resource
Group string (string)
- resource
Location string The AKS resource location (string)
- auth
Base stringUrl Different authentication API url to use. Default
https://login.microsoftonline.com/
(string)- string[]
The AKS authorized ip ranges (list)
- base
Url string Different resource management API url to use. Default
https://management.azure.com/
(string)- dns
Prefix string The AKS dns prefix. Required if
imported=false
(string)- http
Application booleanRouting Enable AKS http application routing? (bool)
- imported boolean
Is AKS cluster imported? Defaul:
false
(bool)- kubernetes
Version string K8s version to deploy. Default:
Rancher default
(string) (Note - if rke_config is set at cluster_template, kubernetes_version must be set to the active cluster version so Rancher can clone the RKE template)- linux
Admin stringUsername The AKS linux admin username (string)
- linux
Ssh stringPublic Key The AKS linux ssh public key (string)
- load
Balancer stringSku Allowed values:
basic
(default)standard
(string)- log
Analytics stringWorkspace Group The AKS log analytics workspace group (string)
- log
Analytics stringWorkspace Name The AKS log analytics workspace name (string)
- monitoring boolean
Kubernetes cluster monitoring (list maxitems:1)
- name string
The name of the Cluster (string)
- network
Dns stringService Ip The AKS network dns service ip (string)
- network
Docker stringBridge Cidr The AKS network docker bridge cidr (string)
- network
Plugin string Network plugin used for building Kubernetes network. Chooses from
azure
orkubenet
. Defaultazure
(string)- network
Pod stringCidr The AKS network pod cidr (string)
- network
Policy string Network policy used for building Kubernetes network. Chooses from
calico
(string)- network
Service stringCidr The AKS network service cidr (string)
- node
Pools ClusterAks Config V2Node Pool[] The AKS nnode pools. Required if
imported=false
(list)- private
Cluster boolean Is AKS cluster private? (bool)
- subnet string
The name of an existing Azure Virtual Subnet. Composite of agent virtual network subnet ID (string)
- {[key: string]: any}
Tags for Kubernetes cluster. For example,
["foo=bar","bar=foo"]
(list)- virtual
Network string The name of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)
- virtual
Network stringResource Group The resource group of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)
- cloud_
credential_ strid The AKS Cloud Credential ID to use (string)
- resource_
group str (string)
- resource_
location str The AKS resource location (string)
- auth_
base_ strurl Different authentication API url to use. Default
https://login.microsoftonline.com/
(string)- Sequence[str]
The AKS authorized ip ranges (list)
- base_
url str Different resource management API url to use. Default
https://management.azure.com/
(string)- dns_
prefix str The AKS dns prefix. Required if
imported=false
(string)- http_
application_ boolrouting Enable AKS http application routing? (bool)
- imported bool
Is AKS cluster imported? Defaul:
false
(bool)- kubernetes_
version str K8s version to deploy. Default:
Rancher default
(string) (Note - if rke_config is set at cluster_template, kubernetes_version must be set to the active cluster version so Rancher can clone the RKE template)- linux_
admin_ strusername The AKS linux admin username (string)
- linux_
ssh_ strpublic_ key The AKS linux ssh public key (string)
- load_
balancer_ strsku Allowed values:
basic
(default)standard
(string)- log_
analytics_ strworkspace_ group The AKS log analytics workspace group (string)
- log_
analytics_ strworkspace_ name The AKS log analytics workspace name (string)
- monitoring bool
Kubernetes cluster monitoring (list maxitems:1)
- name str
The name of the Cluster (string)
- network_
dns_ strservice_ ip The AKS network dns service ip (string)
- network_
docker_ strbridge_ cidr The AKS network docker bridge cidr (string)
- network_
plugin str Network plugin used for building Kubernetes network. Chooses from
azure
orkubenet
. Defaultazure
(string)- network_
pod_ strcidr The AKS network pod cidr (string)
- network_
policy str Network policy used for building Kubernetes network. Chooses from
calico
(string)- network_
service_ strcidr The AKS network service cidr (string)
- node_
pools Sequence[ClusterAks Config V2Node Pool] The AKS nnode pools. Required if
imported=false
(list)- private_
cluster bool Is AKS cluster private? (bool)
- subnet str
The name of an existing Azure Virtual Subnet. Composite of agent virtual network subnet ID (string)
- Mapping[str, Any]
Tags for Kubernetes cluster. For example,
["foo=bar","bar=foo"]
(list)- virtual_
network str The name of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)
- virtual_
network_ strresource_ group The resource group of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)
- cloud
Credential StringId The AKS Cloud Credential ID to use (string)
- resource
Group String (string)
- resource
Location String The AKS resource location (string)
- auth
Base StringUrl Different authentication API url to use. Default
https://login.microsoftonline.com/
(string)- List<String>
The AKS authorized ip ranges (list)
- base
Url String Different resource management API url to use. Default
https://management.azure.com/
(string)- dns
Prefix String The AKS dns prefix. Required if
imported=false
(string)- http
Application BooleanRouting Enable AKS http application routing? (bool)
- imported Boolean
Is AKS cluster imported? Defaul:
false
(bool)- kubernetes
Version String K8s version to deploy. Default:
Rancher default
(string) (Note - if rke_config is set at cluster_template, kubernetes_version must be set to the active cluster version so Rancher can clone the RKE template)- linux
Admin StringUsername The AKS linux admin username (string)
- linux
Ssh StringPublic Key The AKS linux ssh public key (string)
- load
Balancer StringSku Allowed values:
basic
(default)standard
(string)- log
Analytics StringWorkspace Group The AKS log analytics workspace group (string)
- log
Analytics StringWorkspace Name The AKS log analytics workspace name (string)
- monitoring Boolean
Kubernetes cluster monitoring (list maxitems:1)
- name String
The name of the Cluster (string)
- network
Dns StringService Ip The AKS network dns service ip (string)
- network
Docker StringBridge Cidr The AKS network docker bridge cidr (string)
- network
Plugin String Network plugin used for building Kubernetes network. Chooses from
azure
orkubenet
. Defaultazure
(string)- network
Pod StringCidr The AKS network pod cidr (string)
- network
Policy String Network policy used for building Kubernetes network. Chooses from
calico
(string)- network
Service StringCidr The AKS network service cidr (string)
- node
Pools List<Property Map> The AKS nnode pools. Required if
imported=false
(list)- private
Cluster Boolean Is AKS cluster private? (bool)
- subnet String
The name of an existing Azure Virtual Subnet. Composite of agent virtual network subnet ID (string)
- Map<Any>
Tags for Kubernetes cluster. For example,
["foo=bar","bar=foo"]
(list)- virtual
Network String The name of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)
- virtual
Network StringResource Group The resource group of an existing Azure Virtual Network. Composite of agent virtual network subnet ID (string)
ClusterAksConfigV2NodePool
- Name string
The name of the Cluster (string)
- Availability
Zones List<string> The AKS node pool availability zones (list)
- Count int
Number of machines (VMs) in the agent pool. Allowed values must be in the range of 1 to 100 (inclusive). Default
1
(int)- Enable
Auto boolScaling Is AKS node pool auto scaling enabled? Default:
false
(bool)- Max
Count int The AKS node pool max count. Required if
enable_auto_scaling=true
(int)- Max
Pods int Maximum number of pods that can run on a node. Default
110
(int)- Min
Count int The AKS node pool min count. Required if
enable_auto_scaling=true
(int)- Mode string
RKE mode for authorization.
rbac
andnone
modes are available. Defaultrbac
(string)- Orchestrator
Version string The AKS node pool orchestrator version (string)
- Os
Disk intSize Gb The AKS node pool os disk size gb. Default:
128
(int)- Os
Disk stringType The AKS node pool os disk type. Default:
Managed
(string)- Os
Type string The AKS node pool os type. Default:
Linux
(string)- Vm
Size string The AKS node pool orchestrator version (string)
- Name string
The name of the Cluster (string)
- Availability
Zones []string The AKS node pool availability zones (list)
- Count int
Number of machines (VMs) in the agent pool. Allowed values must be in the range of 1 to 100 (inclusive). Default
1
(int)- Enable
Auto boolScaling Is AKS node pool auto scaling enabled? Default:
false
(bool)- Max
Count int The AKS node pool max count. Required if
enable_auto_scaling=true
(int)- Max
Pods int Maximum number of pods that can run on a node. Default
110
(int)- Min
Count int The AKS node pool min count. Required if
enable_auto_scaling=true
(int)- Mode string
RKE mode for authorization.
rbac
andnone
modes are available. Defaultrbac
(string)- Orchestrator
Version string The AKS node pool orchestrator version (string)
- Os
Disk intSize Gb The AKS node pool os disk size gb. Default:
128
(int)- Os
Disk stringType The AKS node pool os disk type. Default:
Managed
(string)- Os
Type string The AKS node pool os type. Default:
Linux
(string)- Vm
Size string The AKS node pool orchestrator version (string)
- name String
The name of the Cluster (string)
- availability
Zones List<String> The AKS node pool availability zones (list)
- count Integer
Number of machines (VMs) in the agent pool. Allowed values must be in the range of 1 to 100 (inclusive). Default
1
(int)- enable
Auto BooleanScaling Is AKS node pool auto scaling enabled? Default:
false
(bool)- max
Count Integer The AKS node pool max count. Required if
enable_auto_scaling=true
(int)- max
Pods Integer Maximum number of pods that can run on a node. Default
110
(int)- min
Count Integer The AKS node pool min count. Required if
enable_auto_scaling=true
(int)- mode String
RKE mode for authorization.
rbac
andnone
modes are available. Defaultrbac
(string)- orchestrator
Version String The AKS node pool orchestrator version (string)
- os
Disk IntegerSize Gb The AKS node pool os disk size gb. Default:
128
(int)- os
Disk StringType The AKS node pool os disk type. Default:
Managed
(string)- os
Type String The AKS node pool os type. Default:
Linux
(string)- vm
Size String The AKS node pool orchestrator version (string)
- name string
The name of the Cluster (string)
- availability
Zones string[] The AKS node pool availability zones (list)
- count number
Number of machines (VMs) in the agent pool. Allowed values must be in the range of 1 to 100 (inclusive). Default
1
(int)- enable
Auto booleanScaling Is AKS node pool auto scaling enabled? Default:
false
(bool)- max
Count number The AKS node pool max count. Required if
enable_auto_scaling=true
(int)- max
Pods number Maximum number of pods that can run on a node. Default
110
(int)- min
Count number The AKS node pool min count. Required if
enable_auto_scaling=true
(int)- mode string
RKE mode for authorization.
rbac
andnone
modes are available. Defaultrbac
(string)- orchestrator
Version string The AKS node pool orchestrator version (string)
- os
Disk numberSize Gb The AKS node pool os disk size gb. Default:
128
(int)- os
Disk stringType The AKS node pool os disk type. Default:
Managed
(string)- os
Type string The AKS node pool os type. Default:
Linux
(string)- vm
Size string The AKS node pool orchestrator version (string)
- name str
The name of the Cluster (string)
- availability_
zones Sequence[str] The AKS node pool availability zones (list)
- count int
Number of machines (VMs) in the agent pool. Allowed values must be in the range of 1 to 100 (inclusive). Default
1
(int)- enable_
auto_ boolscaling Is AKS node pool auto scaling enabled? Default:
false
(bool)- max_
count int The AKS node pool max count. Required if
enable_auto_scaling=true
(int)- max_
pods int Maximum number of pods that can run on a node. Default
110
(int)- min_
count int The AKS node pool min count. Required if
enable_auto_scaling=true
(int)- mode str
RKE mode for authorization.
rbac
andnone
modes are available. Defaultrbac
(string)- orchestrator_
version str The AKS node pool orchestrator version (string)
- os_
disk_ intsize_ gb The AKS node pool os disk size gb. Default:
128
(int)- os_
disk_ strtype The AKS node pool os disk type. Default:
Managed
(string)- os_
type str The AKS node pool os type. Default:
Linux
(string)- vm_
size str The AKS node pool orchestrator version (string)
- name String
The name of the Cluster (string)
- availability
Zones List<String> The AKS node pool availability zones (list)
- count Number
Number of machines (VMs) in the agent pool. Allowed values must be in the range of 1 to 100 (inclusive). Default
1
(int)- enable
Auto BooleanScaling Is AKS node pool auto scaling enabled? Default:
false
(bool)- max
Count Number The AKS node pool max count. Required if
enable_auto_scaling=true
(int)- max
Pods Number Maximum number of pods that can run on a node. Default
110
(int)- min
Count Number The AKS node pool min count. Required if
enable_auto_scaling=true
(int)- mode String
RKE mode for authorization.
rbac
andnone
modes are available. Defaultrbac
(string)- orchestrator
Version String The AKS node pool orchestrator version (string)
- os
Disk NumberSize Gb The AKS node pool os disk size gb. Default:
128
(int)- os
Disk StringType The AKS node pool os disk type. Default:
Managed
(string)- os
Type String The AKS node pool os type. Default:
Linux
(string)- vm
Size String The AKS node pool orchestrator version (string)
ClusterClusterAuthEndpoint
ClusterClusterMonitoringInput
ClusterClusterRegistrationToken
- Annotations Dictionary<string, object>
Annotations for the Cluster (map)
- Cluster
Id string Cluster ID to apply answer (string)
- Command string
Command to execute in a imported k8s cluster (string)
- Id string
The EKS node group launch template ID (string)
- Insecure
Command string Insecure command to execute in a imported k8s cluster (string)
- Insecure
Node stringCommand Insecure node command to execute in a imported k8s cluster (string)
- Insecure
Windows stringNode Command Insecure windows command to execute in a imported k8s cluster (string)
- Labels Dictionary<string, object>
Labels for the Cluster (map)
- Manifest
Url string K8s manifest url to execute with
kubectl
to import an existing k8s cluster (string)- Name string
The name of the Cluster (string)
- Node
Command string Node command to execute in linux nodes for custom k8s cluster (string)
- Token string
ACI token (string)