rancher2.Cluster
Explore with Pulumi AI
Provides a Rancher v2 Cluster resource. This can be used to create Clusters for Rancher v2 environments and retrieve their information.
Example Usage
Creating Rancher v2 RKE cluster enabling and customizing monitoring
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Rancher2 = Pulumi.Rancher2;
return await Deployment.RunAsync(() =>
{
// Create a new rancher2 RKE Cluster
var foo_custom = new Rancher2.Cluster("foo-custom", new()
{
ClusterMonitoringInput = new Rancher2.Inputs.ClusterClusterMonitoringInputArgs
{
Answers =
{
{ "exporter-kubelets.https", true },
{ "exporter-node.enabled", true },
{ "exporter-node.ports.metrics.port", 9796 },
{ "exporter-node.resources.limits.cpu", "200m" },
{ "exporter-node.resources.limits.memory", "200Mi" },
{ "grafana.persistence.enabled", false },
{ "grafana.persistence.size", "10Gi" },
{ "grafana.persistence.storageClass", "default" },
{ "operator.resources.limits.memory", "500Mi" },
{ "prometheus.persistence.enabled", "false" },
{ "prometheus.persistence.size", "50Gi" },
{ "prometheus.persistence.storageClass", "default" },
{ "prometheus.persistent.useReleaseName", "true" },
{ "prometheus.resources.core.limits.cpu", "1000m" },
{ "prometheus.resources.core.limits.memory", "1500Mi" },
{ "prometheus.resources.core.requests.cpu", "750m" },
{ "prometheus.resources.core.requests.memory", "750Mi" },
{ "prometheus.retention", "12h" },
},
Version = "0.1.0",
},
Description = "Foo rancher2 custom cluster",
EnableClusterMonitoring = true,
RkeConfig = new Rancher2.Inputs.ClusterRkeConfigArgs
{
Network = new Rancher2.Inputs.ClusterRkeConfigNetworkArgs
{
Plugin = "canal",
},
},
});
});
package main
import (
"github.com/pulumi/pulumi-rancher2/sdk/v5/go/rancher2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := rancher2.NewCluster(ctx, "foo-custom", &rancher2.ClusterArgs{
ClusterMonitoringInput: &rancher2.ClusterClusterMonitoringInputArgs{
Answers: pulumi.Map{
"exporter-kubelets.https": pulumi.Any(true),
"exporter-node.enabled": pulumi.Any(true),
"exporter-node.ports.metrics.port": pulumi.Any(9796),
"exporter-node.resources.limits.cpu": pulumi.Any("200m"),
"exporter-node.resources.limits.memory": pulumi.Any("200Mi"),
"grafana.persistence.enabled": pulumi.Any(false),
"grafana.persistence.size": pulumi.Any("10Gi"),
"grafana.persistence.storageClass": pulumi.Any("default"),
"operator.resources.limits.memory": pulumi.Any("500Mi"),
"prometheus.persistence.enabled": pulumi.Any("false"),
"prometheus.persistence.size": pulumi.Any("50Gi"),
"prometheus.persistence.storageClass": pulumi.Any("default"),
"prometheus.persistent.useReleaseName": pulumi.Any("true"),
"prometheus.resources.core.limits.cpu": pulumi.Any("1000m"),
"prometheus.resources.core.limits.memory": pulumi.Any("1500Mi"),
"prometheus.resources.core.requests.cpu": pulumi.Any("750m"),
"prometheus.resources.core.requests.memory": pulumi.Any("750Mi"),
"prometheus.retention": pulumi.Any("12h"),
},
Version: pulumi.String("0.1.0"),
},
Description: pulumi.String("Foo rancher2 custom cluster"),
EnableClusterMonitoring: pulumi.Bool(true),
RkeConfig: &rancher2.ClusterRkeConfigArgs{
Network: &rancher2.ClusterRkeConfigNetworkArgs{
Plugin: pulumi.String("canal"),
},
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.rancher2.Cluster;
import com.pulumi.rancher2.ClusterArgs;
import com.pulumi.rancher2.inputs.ClusterClusterMonitoringInputArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigNetworkArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var foo_custom = new Cluster("foo-custom", ClusterArgs.builder()
.clusterMonitoringInput(ClusterClusterMonitoringInputArgs.builder()
.answers(Map.ofEntries(
Map.entry("exporter-kubelets.https", true),
Map.entry("exporter-node.enabled", true),
Map.entry("exporter-node.ports.metrics.port", 9796),
Map.entry("exporter-node.resources.limits.cpu", "200m"),
Map.entry("exporter-node.resources.limits.memory", "200Mi"),
Map.entry("grafana.persistence.enabled", false),
Map.entry("grafana.persistence.size", "10Gi"),
Map.entry("grafana.persistence.storageClass", "default"),
Map.entry("operator.resources.limits.memory", "500Mi"),
Map.entry("prometheus.persistence.enabled", "false"),
Map.entry("prometheus.persistence.size", "50Gi"),
Map.entry("prometheus.persistence.storageClass", "default"),
Map.entry("prometheus.persistent.useReleaseName", "true"),
Map.entry("prometheus.resources.core.limits.cpu", "1000m"),
Map.entry("prometheus.resources.core.limits.memory", "1500Mi"),
Map.entry("prometheus.resources.core.requests.cpu", "750m"),
Map.entry("prometheus.resources.core.requests.memory", "750Mi"),
Map.entry("prometheus.retention", "12h")
))
.version("0.1.0")
.build())
.description("Foo rancher2 custom cluster")
.enableClusterMonitoring(true)
.rkeConfig(ClusterRkeConfigArgs.builder()
.network(ClusterRkeConfigNetworkArgs.builder()
.plugin("canal")
.build())
.build())
.build());
}
}
import pulumi
import pulumi_rancher2 as rancher2
# Create a new rancher2 RKE Cluster
foo_custom = rancher2.Cluster("foo-custom",
cluster_monitoring_input=rancher2.ClusterClusterMonitoringInputArgs(
answers={
"exporter-kubelets.https": True,
"exporter-node.enabled": True,
"exporter-node.ports.metrics.port": 9796,
"exporter-node.resources.limits.cpu": "200m",
"exporter-node.resources.limits.memory": "200Mi",
"grafana.persistence.enabled": False,
"grafana.persistence.size": "10Gi",
"grafana.persistence.storageClass": "default",
"operator.resources.limits.memory": "500Mi",
"prometheus.persistence.enabled": "false",
"prometheus.persistence.size": "50Gi",
"prometheus.persistence.storageClass": "default",
"prometheus.persistent.useReleaseName": "true",
"prometheus.resources.core.limits.cpu": "1000m",
"prometheus.resources.core.limits.memory": "1500Mi",
"prometheus.resources.core.requests.cpu": "750m",
"prometheus.resources.core.requests.memory": "750Mi",
"prometheus.retention": "12h",
},
version="0.1.0",
),
description="Foo rancher2 custom cluster",
enable_cluster_monitoring=True,
rke_config=rancher2.ClusterRkeConfigArgs(
network=rancher2.ClusterRkeConfigNetworkArgs(
plugin="canal",
),
))
import * as pulumi from "@pulumi/pulumi";
import * as rancher2 from "@pulumi/rancher2";
// Create a new rancher2 RKE Cluster
const foo_custom = new rancher2.Cluster("foo-custom", {
clusterMonitoringInput: {
answers: {
"exporter-kubelets.https": true,
"exporter-node.enabled": true,
"exporter-node.ports.metrics.port": 9796,
"exporter-node.resources.limits.cpu": "200m",
"exporter-node.resources.limits.memory": "200Mi",
"grafana.persistence.enabled": false,
"grafana.persistence.size": "10Gi",
"grafana.persistence.storageClass": "default",
"operator.resources.limits.memory": "500Mi",
"prometheus.persistence.enabled": "false",
"prometheus.persistence.size": "50Gi",
"prometheus.persistence.storageClass": "default",
"prometheus.persistent.useReleaseName": "true",
"prometheus.resources.core.limits.cpu": "1000m",
"prometheus.resources.core.limits.memory": "1500Mi",
"prometheus.resources.core.requests.cpu": "750m",
"prometheus.resources.core.requests.memory": "750Mi",
"prometheus.retention": "12h",
},
version: "0.1.0",
},
description: "Foo rancher2 custom cluster",
enableClusterMonitoring: true,
rkeConfig: {
network: {
plugin: "canal",
},
},
});
resources:
# Create a new rancher2 RKE Cluster
foo-custom:
type: rancher2:Cluster
properties:
clusterMonitoringInput:
answers:
exporter-kubelets.https: true
exporter-node.enabled: true
exporter-node.ports.metrics.port: 9796
exporter-node.resources.limits.cpu: 200m
exporter-node.resources.limits.memory: 200Mi
grafana.persistence.enabled: false
grafana.persistence.size: 10Gi
grafana.persistence.storageClass: default
operator.resources.limits.memory: 500Mi
prometheus.persistence.enabled: 'false'
prometheus.persistence.size: 50Gi
prometheus.persistence.storageClass: default
prometheus.persistent.useReleaseName: 'true'
prometheus.resources.core.limits.cpu: 1000m
prometheus.resources.core.limits.memory: 1500Mi
prometheus.resources.core.requests.cpu: 750m
prometheus.resources.core.requests.memory: 750Mi
prometheus.retention: 12h
version: 0.1.0
description: Foo rancher2 custom cluster
enableClusterMonitoring: true
rkeConfig:
network:
plugin: canal
Creating Rancher v2 RKE cluster enabling/customizing monitoring and istio
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Rancher2 = Pulumi.Rancher2;
return await Deployment.RunAsync(() =>
{
// Create a new rancher2 RKE Cluster
var foo_customCluster = new Rancher2.Cluster("foo-customCluster", new()
{
Description = "Foo rancher2 custom cluster",
RkeConfig = new Rancher2.Inputs.ClusterRkeConfigArgs
{
Network = new Rancher2.Inputs.ClusterRkeConfigNetworkArgs
{
Plugin = "canal",
},
},
EnableClusterMonitoring = true,
ClusterMonitoringInput = new Rancher2.Inputs.ClusterClusterMonitoringInputArgs
{
Answers =
{
{ "exporter-kubelets.https", true },
{ "exporter-node.enabled", true },
{ "exporter-node.ports.metrics.port", 9796 },
{ "exporter-node.resources.limits.cpu", "200m" },
{ "exporter-node.resources.limits.memory", "200Mi" },
{ "grafana.persistence.enabled", false },
{ "grafana.persistence.size", "10Gi" },
{ "grafana.persistence.storageClass", "default" },
{ "operator.resources.limits.memory", "500Mi" },
{ "prometheus.persistence.enabled", "false" },
{ "prometheus.persistence.size", "50Gi" },
{ "prometheus.persistence.storageClass", "default" },
{ "prometheus.persistent.useReleaseName", "true" },
{ "prometheus.resources.core.limits.cpu", "1000m" },
{ "prometheus.resources.core.limits.memory", "1500Mi" },
{ "prometheus.resources.core.requests.cpu", "750m" },
{ "prometheus.resources.core.requests.memory", "750Mi" },
{ "prometheus.retention", "12h" },
},
Version = "0.1.0",
},
});
// Create a new rancher2 Cluster Sync for foo-custom cluster
var foo_customClusterSync = new Rancher2.ClusterSync("foo-customClusterSync", new()
{
ClusterId = foo_customCluster.Id,
WaitMonitoring = foo_customCluster.EnableClusterMonitoring,
});
// Create a new rancher2 Namespace
var foo_istio = new Rancher2.Namespace("foo-istio", new()
{
ProjectId = foo_customClusterSync.SystemProjectId,
Description = "istio namespace",
});
// Create a new rancher2 App deploying istio (should wait until monitoring is up and running)
var istio = new Rancher2.App("istio", new()
{
CatalogName = "system-library",
Description = "Terraform app acceptance test",
ProjectId = foo_istio.ProjectId,
TemplateName = "rancher-istio",
TemplateVersion = "0.1.1",
TargetNamespace = foo_istio.Id,
Answers =
{
{ "certmanager.enabled", false },
{ "enableCRDs", true },
{ "galley.enabled", true },
{ "gateways.enabled", false },
{ "gateways.istio-ingressgateway.resources.limits.cpu", "2000m" },
{ "gateways.istio-ingressgateway.resources.limits.memory", "1024Mi" },
{ "gateways.istio-ingressgateway.resources.requests.cpu", "100m" },
{ "gateways.istio-ingressgateway.resources.requests.memory", "128Mi" },
{ "gateways.istio-ingressgateway.type", "NodePort" },
{ "global.monitoring.type", "cluster-monitoring" },
{ "global.rancher.clusterId", foo_customClusterSync.ClusterId },
{ "istio_cni.enabled", "false" },
{ "istiocoredns.enabled", "false" },
{ "kiali.enabled", "true" },
{ "mixer.enabled", "true" },
{ "mixer.policy.enabled", "true" },
{ "mixer.policy.resources.limits.cpu", "4800m" },
{ "mixer.policy.resources.limits.memory", "4096Mi" },
{ "mixer.policy.resources.requests.cpu", "1000m" },
{ "mixer.policy.resources.requests.memory", "1024Mi" },
{ "mixer.telemetry.resources.limits.cpu", "4800m" },
{ "mixer.telemetry.resources.limits.memory", "4096Mi" },
{ "mixer.telemetry.resources.requests.cpu", "1000m" },
{ "mixer.telemetry.resources.requests.memory", "1024Mi" },
{ "mtls.enabled", false },
{ "nodeagent.enabled", false },
{ "pilot.enabled", true },
{ "pilot.resources.limits.cpu", "1000m" },
{ "pilot.resources.limits.memory", "4096Mi" },
{ "pilot.resources.requests.cpu", "500m" },
{ "pilot.resources.requests.memory", "2048Mi" },
{ "pilot.traceSampling", "1" },
{ "security.enabled", true },
{ "sidecarInjectorWebhook.enabled", true },
{ "tracing.enabled", true },
{ "tracing.jaeger.resources.limits.cpu", "500m" },
{ "tracing.jaeger.resources.limits.memory", "1024Mi" },
{ "tracing.jaeger.resources.requests.cpu", "100m" },
{ "tracing.jaeger.resources.requests.memory", "100Mi" },
},
});
});
package main
import (
"github.com/pulumi/pulumi-rancher2/sdk/v5/go/rancher2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := rancher2.NewCluster(ctx, "foo-customCluster", &rancher2.ClusterArgs{
Description: pulumi.String("Foo rancher2 custom cluster"),
RkeConfig: &rancher2.ClusterRkeConfigArgs{
Network: &rancher2.ClusterRkeConfigNetworkArgs{
Plugin: pulumi.String("canal"),
},
},
EnableClusterMonitoring: pulumi.Bool(true),
ClusterMonitoringInput: &rancher2.ClusterClusterMonitoringInputArgs{
Answers: pulumi.Map{
"exporter-kubelets.https": pulumi.Any(true),
"exporter-node.enabled": pulumi.Any(true),
"exporter-node.ports.metrics.port": pulumi.Any(9796),
"exporter-node.resources.limits.cpu": pulumi.Any("200m"),
"exporter-node.resources.limits.memory": pulumi.Any("200Mi"),
"grafana.persistence.enabled": pulumi.Any(false),
"grafana.persistence.size": pulumi.Any("10Gi"),
"grafana.persistence.storageClass": pulumi.Any("default"),
"operator.resources.limits.memory": pulumi.Any("500Mi"),
"prometheus.persistence.enabled": pulumi.Any("false"),
"prometheus.persistence.size": pulumi.Any("50Gi"),
"prometheus.persistence.storageClass": pulumi.Any("default"),
"prometheus.persistent.useReleaseName": pulumi.Any("true"),
"prometheus.resources.core.limits.cpu": pulumi.Any("1000m"),
"prometheus.resources.core.limits.memory": pulumi.Any("1500Mi"),
"prometheus.resources.core.requests.cpu": pulumi.Any("750m"),
"prometheus.resources.core.requests.memory": pulumi.Any("750Mi"),
"prometheus.retention": pulumi.Any("12h"),
},
Version: pulumi.String("0.1.0"),
},
})
if err != nil {
return err
}
_, err = rancher2.NewClusterSync(ctx, "foo-customClusterSync", &rancher2.ClusterSyncArgs{
ClusterId: foo_customCluster.ID(),
WaitMonitoring: foo_customCluster.EnableClusterMonitoring,
})
if err != nil {
return err
}
_, err = rancher2.NewNamespace(ctx, "foo-istio", &rancher2.NamespaceArgs{
ProjectId: foo_customClusterSync.SystemProjectId,
Description: pulumi.String("istio namespace"),
})
if err != nil {
return err
}
_, err = rancher2.NewApp(ctx, "istio", &rancher2.AppArgs{
CatalogName: pulumi.String("system-library"),
Description: pulumi.String("Terraform app acceptance test"),
ProjectId: foo_istio.ProjectId,
TemplateName: pulumi.String("rancher-istio"),
TemplateVersion: pulumi.String("0.1.1"),
TargetNamespace: foo_istio.ID(),
Answers: pulumi.Map{
"certmanager.enabled": pulumi.Any(false),
"enableCRDs": pulumi.Any(true),
"galley.enabled": pulumi.Any(true),
"gateways.enabled": pulumi.Any(false),
"gateways.istio-ingressgateway.resources.limits.cpu": pulumi.Any("2000m"),
"gateways.istio-ingressgateway.resources.limits.memory": pulumi.Any("1024Mi"),
"gateways.istio-ingressgateway.resources.requests.cpu": pulumi.Any("100m"),
"gateways.istio-ingressgateway.resources.requests.memory": pulumi.Any("128Mi"),
"gateways.istio-ingressgateway.type": pulumi.Any("NodePort"),
"global.monitoring.type": pulumi.Any("cluster-monitoring"),
"global.rancher.clusterId": foo_customClusterSync.ClusterId,
"istio_cni.enabled": pulumi.Any("false"),
"istiocoredns.enabled": pulumi.Any("false"),
"kiali.enabled": pulumi.Any("true"),
"mixer.enabled": pulumi.Any("true"),
"mixer.policy.enabled": pulumi.Any("true"),
"mixer.policy.resources.limits.cpu": pulumi.Any("4800m"),
"mixer.policy.resources.limits.memory": pulumi.Any("4096Mi"),
"mixer.policy.resources.requests.cpu": pulumi.Any("1000m"),
"mixer.policy.resources.requests.memory": pulumi.Any("1024Mi"),
"mixer.telemetry.resources.limits.cpu": pulumi.Any("4800m"),
"mixer.telemetry.resources.limits.memory": pulumi.Any("4096Mi"),
"mixer.telemetry.resources.requests.cpu": pulumi.Any("1000m"),
"mixer.telemetry.resources.requests.memory": pulumi.Any("1024Mi"),
"mtls.enabled": pulumi.Any(false),
"nodeagent.enabled": pulumi.Any(false),
"pilot.enabled": pulumi.Any(true),
"pilot.resources.limits.cpu": pulumi.Any("1000m"),
"pilot.resources.limits.memory": pulumi.Any("4096Mi"),
"pilot.resources.requests.cpu": pulumi.Any("500m"),
"pilot.resources.requests.memory": pulumi.Any("2048Mi"),
"pilot.traceSampling": pulumi.Any("1"),
"security.enabled": pulumi.Any(true),
"sidecarInjectorWebhook.enabled": pulumi.Any(true),
"tracing.enabled": pulumi.Any(true),
"tracing.jaeger.resources.limits.cpu": pulumi.Any("500m"),
"tracing.jaeger.resources.limits.memory": pulumi.Any("1024Mi"),
"tracing.jaeger.resources.requests.cpu": pulumi.Any("100m"),
"tracing.jaeger.resources.requests.memory": pulumi.Any("100Mi"),
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.rancher2.Cluster;
import com.pulumi.rancher2.ClusterArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigNetworkArgs;
import com.pulumi.rancher2.inputs.ClusterClusterMonitoringInputArgs;
import com.pulumi.rancher2.ClusterSync;
import com.pulumi.rancher2.ClusterSyncArgs;
import com.pulumi.rancher2.Namespace;
import com.pulumi.rancher2.NamespaceArgs;
import com.pulumi.rancher2.App;
import com.pulumi.rancher2.AppArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var foo_customCluster = new Cluster("foo-customCluster", ClusterArgs.builder()
.description("Foo rancher2 custom cluster")
.rkeConfig(ClusterRkeConfigArgs.builder()
.network(ClusterRkeConfigNetworkArgs.builder()
.plugin("canal")
.build())
.build())
.enableClusterMonitoring(true)
.clusterMonitoringInput(ClusterClusterMonitoringInputArgs.builder()
.answers(Map.ofEntries(
Map.entry("exporter-kubelets.https", true),
Map.entry("exporter-node.enabled", true),
Map.entry("exporter-node.ports.metrics.port", 9796),
Map.entry("exporter-node.resources.limits.cpu", "200m"),
Map.entry("exporter-node.resources.limits.memory", "200Mi"),
Map.entry("grafana.persistence.enabled", false),
Map.entry("grafana.persistence.size", "10Gi"),
Map.entry("grafana.persistence.storageClass", "default"),
Map.entry("operator.resources.limits.memory", "500Mi"),
Map.entry("prometheus.persistence.enabled", "false"),
Map.entry("prometheus.persistence.size", "50Gi"),
Map.entry("prometheus.persistence.storageClass", "default"),
Map.entry("prometheus.persistent.useReleaseName", "true"),
Map.entry("prometheus.resources.core.limits.cpu", "1000m"),
Map.entry("prometheus.resources.core.limits.memory", "1500Mi"),
Map.entry("prometheus.resources.core.requests.cpu", "750m"),
Map.entry("prometheus.resources.core.requests.memory", "750Mi"),
Map.entry("prometheus.retention", "12h")
))
.version("0.1.0")
.build())
.build());
var foo_customClusterSync = new ClusterSync("foo-customClusterSync", ClusterSyncArgs.builder()
.clusterId(foo_customCluster.id())
.waitMonitoring(foo_customCluster.enableClusterMonitoring())
.build());
var foo_istio = new Namespace("foo-istio", NamespaceArgs.builder()
.projectId(foo_customClusterSync.systemProjectId())
.description("istio namespace")
.build());
var istio = new App("istio", AppArgs.builder()
.catalogName("system-library")
.description("Terraform app acceptance test")
.projectId(foo_istio.projectId())
.templateName("rancher-istio")
.templateVersion("0.1.1")
.targetNamespace(foo_istio.id())
.answers(Map.ofEntries(
Map.entry("certmanager.enabled", false),
Map.entry("enableCRDs", true),
Map.entry("galley.enabled", true),
Map.entry("gateways.enabled", false),
Map.entry("gateways.istio-ingressgateway.resources.limits.cpu", "2000m"),
Map.entry("gateways.istio-ingressgateway.resources.limits.memory", "1024Mi"),
Map.entry("gateways.istio-ingressgateway.resources.requests.cpu", "100m"),
Map.entry("gateways.istio-ingressgateway.resources.requests.memory", "128Mi"),
Map.entry("gateways.istio-ingressgateway.type", "NodePort"),
Map.entry("global.monitoring.type", "cluster-monitoring"),
Map.entry("global.rancher.clusterId", foo_customClusterSync.clusterId()),
Map.entry("istio_cni.enabled", "false"),
Map.entry("istiocoredns.enabled", "false"),
Map.entry("kiali.enabled", "true"),
Map.entry("mixer.enabled", "true"),
Map.entry("mixer.policy.enabled", "true"),
Map.entry("mixer.policy.resources.limits.cpu", "4800m"),
Map.entry("mixer.policy.resources.limits.memory", "4096Mi"),
Map.entry("mixer.policy.resources.requests.cpu", "1000m"),
Map.entry("mixer.policy.resources.requests.memory", "1024Mi"),
Map.entry("mixer.telemetry.resources.limits.cpu", "4800m"),
Map.entry("mixer.telemetry.resources.limits.memory", "4096Mi"),
Map.entry("mixer.telemetry.resources.requests.cpu", "1000m"),
Map.entry("mixer.telemetry.resources.requests.memory", "1024Mi"),
Map.entry("mtls.enabled", false),
Map.entry("nodeagent.enabled", false),
Map.entry("pilot.enabled", true),
Map.entry("pilot.resources.limits.cpu", "1000m"),
Map.entry("pilot.resources.limits.memory", "4096Mi"),
Map.entry("pilot.resources.requests.cpu", "500m"),
Map.entry("pilot.resources.requests.memory", "2048Mi"),
Map.entry("pilot.traceSampling", "1"),
Map.entry("security.enabled", true),
Map.entry("sidecarInjectorWebhook.enabled", true),
Map.entry("tracing.enabled", true),
Map.entry("tracing.jaeger.resources.limits.cpu", "500m"),
Map.entry("tracing.jaeger.resources.limits.memory", "1024Mi"),
Map.entry("tracing.jaeger.resources.requests.cpu", "100m"),
Map.entry("tracing.jaeger.resources.requests.memory", "100Mi")
))
.build());
}
}
import pulumi
import pulumi_rancher2 as rancher2
# Create a new rancher2 RKE Cluster
foo_custom_cluster = rancher2.Cluster("foo-customCluster",
description="Foo rancher2 custom cluster",
rke_config=rancher2.ClusterRkeConfigArgs(
network=rancher2.ClusterRkeConfigNetworkArgs(
plugin="canal",
),
),
enable_cluster_monitoring=True,
cluster_monitoring_input=rancher2.ClusterClusterMonitoringInputArgs(
answers={
"exporter-kubelets.https": True,
"exporter-node.enabled": True,
"exporter-node.ports.metrics.port": 9796,
"exporter-node.resources.limits.cpu": "200m",
"exporter-node.resources.limits.memory": "200Mi",
"grafana.persistence.enabled": False,
"grafana.persistence.size": "10Gi",
"grafana.persistence.storageClass": "default",
"operator.resources.limits.memory": "500Mi",
"prometheus.persistence.enabled": "false",
"prometheus.persistence.size": "50Gi",
"prometheus.persistence.storageClass": "default",
"prometheus.persistent.useReleaseName": "true",
"prometheus.resources.core.limits.cpu": "1000m",
"prometheus.resources.core.limits.memory": "1500Mi",
"prometheus.resources.core.requests.cpu": "750m",
"prometheus.resources.core.requests.memory": "750Mi",
"prometheus.retention": "12h",
},
version="0.1.0",
))
# Create a new rancher2 Cluster Sync for foo-custom cluster
foo_custom_cluster_sync = rancher2.ClusterSync("foo-customClusterSync",
cluster_id=foo_custom_cluster.id,
wait_monitoring=foo_custom_cluster.enable_cluster_monitoring)
# Create a new rancher2 Namespace
foo_istio = rancher2.Namespace("foo-istio",
project_id=foo_custom_cluster_sync.system_project_id,
description="istio namespace")
# Create a new rancher2 App deploying istio (should wait until monitoring is up and running)
istio = rancher2.App("istio",
catalog_name="system-library",
description="Terraform app acceptance test",
project_id=foo_istio.project_id,
template_name="rancher-istio",
template_version="0.1.1",
target_namespace=foo_istio.id,
answers={
"certmanager.enabled": False,
"enableCRDs": True,
"galley.enabled": True,
"gateways.enabled": False,
"gateways.istio-ingressgateway.resources.limits.cpu": "2000m",
"gateways.istio-ingressgateway.resources.limits.memory": "1024Mi",
"gateways.istio-ingressgateway.resources.requests.cpu": "100m",
"gateways.istio-ingressgateway.resources.requests.memory": "128Mi",
"gateways.istio-ingressgateway.type": "NodePort",
"global.monitoring.type": "cluster-monitoring",
"global.rancher.clusterId": foo_custom_cluster_sync.cluster_id,
"istio_cni.enabled": "false",
"istiocoredns.enabled": "false",
"kiali.enabled": "true",
"mixer.enabled": "true",
"mixer.policy.enabled": "true",
"mixer.policy.resources.limits.cpu": "4800m",
"mixer.policy.resources.limits.memory": "4096Mi",
"mixer.policy.resources.requests.cpu": "1000m",
"mixer.policy.resources.requests.memory": "1024Mi",
"mixer.telemetry.resources.limits.cpu": "4800m",
"mixer.telemetry.resources.limits.memory": "4096Mi",
"mixer.telemetry.resources.requests.cpu": "1000m",
"mixer.telemetry.resources.requests.memory": "1024Mi",
"mtls.enabled": False,
"nodeagent.enabled": False,
"pilot.enabled": True,
"pilot.resources.limits.cpu": "1000m",
"pilot.resources.limits.memory": "4096Mi",
"pilot.resources.requests.cpu": "500m",
"pilot.resources.requests.memory": "2048Mi",
"pilot.traceSampling": "1",
"security.enabled": True,
"sidecarInjectorWebhook.enabled": True,
"tracing.enabled": True,
"tracing.jaeger.resources.limits.cpu": "500m",
"tracing.jaeger.resources.limits.memory": "1024Mi",
"tracing.jaeger.resources.requests.cpu": "100m",
"tracing.jaeger.resources.requests.memory": "100Mi",
})
import * as pulumi from "@pulumi/pulumi";
import * as rancher2 from "@pulumi/rancher2";
// Create a new rancher2 RKE Cluster
const foo_customCluster = new rancher2.Cluster("foo-customCluster", {
description: "Foo rancher2 custom cluster",
rkeConfig: {
network: {
plugin: "canal",
},
},
enableClusterMonitoring: true,
clusterMonitoringInput: {
answers: {
"exporter-kubelets.https": true,
"exporter-node.enabled": true,
"exporter-node.ports.metrics.port": 9796,
"exporter-node.resources.limits.cpu": "200m",
"exporter-node.resources.limits.memory": "200Mi",
"grafana.persistence.enabled": false,
"grafana.persistence.size": "10Gi",
"grafana.persistence.storageClass": "default",
"operator.resources.limits.memory": "500Mi",
"prometheus.persistence.enabled": "false",
"prometheus.persistence.size": "50Gi",
"prometheus.persistence.storageClass": "default",
"prometheus.persistent.useReleaseName": "true",
"prometheus.resources.core.limits.cpu": "1000m",
"prometheus.resources.core.limits.memory": "1500Mi",
"prometheus.resources.core.requests.cpu": "750m",
"prometheus.resources.core.requests.memory": "750Mi",
"prometheus.retention": "12h",
},
version: "0.1.0",
},
});
// Create a new rancher2 Cluster Sync for foo-custom cluster
const foo_customClusterSync = new rancher2.ClusterSync("foo-customClusterSync", {
clusterId: foo_customCluster.id,
waitMonitoring: foo_customCluster.enableClusterMonitoring,
});
// Create a new rancher2 Namespace
const foo_istio = new rancher2.Namespace("foo-istio", {
projectId: foo_customClusterSync.systemProjectId,
description: "istio namespace",
});
// Create a new rancher2 App deploying istio (should wait until monitoring is up and running)
const istio = new rancher2.App("istio", {
catalogName: "system-library",
description: "Terraform app acceptance test",
projectId: foo_istio.projectId,
templateName: "rancher-istio",
templateVersion: "0.1.1",
targetNamespace: foo_istio.id,
answers: {
"certmanager.enabled": false,
enableCRDs: true,
"galley.enabled": true,
"gateways.enabled": false,
"gateways.istio-ingressgateway.resources.limits.cpu": "2000m",
"gateways.istio-ingressgateway.resources.limits.memory": "1024Mi",
"gateways.istio-ingressgateway.resources.requests.cpu": "100m",
"gateways.istio-ingressgateway.resources.requests.memory": "128Mi",
"gateways.istio-ingressgateway.type": "NodePort",
"global.monitoring.type": "cluster-monitoring",
"global.rancher.clusterId": foo_customClusterSync.clusterId,
"istio_cni.enabled": "false",
"istiocoredns.enabled": "false",
"kiali.enabled": "true",
"mixer.enabled": "true",
"mixer.policy.enabled": "true",
"mixer.policy.resources.limits.cpu": "4800m",
"mixer.policy.resources.limits.memory": "4096Mi",
"mixer.policy.resources.requests.cpu": "1000m",
"mixer.policy.resources.requests.memory": "1024Mi",
"mixer.telemetry.resources.limits.cpu": "4800m",
"mixer.telemetry.resources.limits.memory": "4096Mi",
"mixer.telemetry.resources.requests.cpu": "1000m",
"mixer.telemetry.resources.requests.memory": "1024Mi",
"mtls.enabled": false,
"nodeagent.enabled": false,
"pilot.enabled": true,
"pilot.resources.limits.cpu": "1000m",
"pilot.resources.limits.memory": "4096Mi",
"pilot.resources.requests.cpu": "500m",
"pilot.resources.requests.memory": "2048Mi",
"pilot.traceSampling": "1",
"security.enabled": true,
"sidecarInjectorWebhook.enabled": true,
"tracing.enabled": true,
"tracing.jaeger.resources.limits.cpu": "500m",
"tracing.jaeger.resources.limits.memory": "1024Mi",
"tracing.jaeger.resources.requests.cpu": "100m",
"tracing.jaeger.resources.requests.memory": "100Mi",
},
});
resources:
# Create a new rancher2 RKE Cluster
foo-customCluster:
type: rancher2:Cluster
properties:
description: Foo rancher2 custom cluster
rkeConfig:
network:
plugin: canal
enableClusterMonitoring: true
clusterMonitoringInput:
answers:
exporter-kubelets.https: true
exporter-node.enabled: true
exporter-node.ports.metrics.port: 9796
exporter-node.resources.limits.cpu: 200m
exporter-node.resources.limits.memory: 200Mi
grafana.persistence.enabled: false
grafana.persistence.size: 10Gi
grafana.persistence.storageClass: default
operator.resources.limits.memory: 500Mi
prometheus.persistence.enabled: 'false'
prometheus.persistence.size: 50Gi
prometheus.persistence.storageClass: default
prometheus.persistent.useReleaseName: 'true'
prometheus.resources.core.limits.cpu: 1000m
prometheus.resources.core.limits.memory: 1500Mi
prometheus.resources.core.requests.cpu: 750m
prometheus.resources.core.requests.memory: 750Mi
prometheus.retention: 12h
version: 0.1.0
# Create a new rancher2 Cluster Sync for foo-custom cluster
foo-customClusterSync:
type: rancher2:ClusterSync
properties:
clusterId: ${["foo-customCluster"].id}
waitMonitoring: ${["foo-customCluster"].enableClusterMonitoring}
# Create a new rancher2 Namespace
foo-istio:
type: rancher2:Namespace
properties:
projectId: ${["foo-customClusterSync"].systemProjectId}
description: istio namespace
# Create a new rancher2 App deploying istio (should wait until monitoring is up and running)
istio:
type: rancher2:App
properties:
catalogName: system-library
description: Terraform app acceptance test
projectId: ${["foo-istio"].projectId}
templateName: rancher-istio
templateVersion: 0.1.1
targetNamespace: ${["foo-istio"].id}
answers:
certmanager.enabled: false
enableCRDs: true
galley.enabled: true
gateways.enabled: false
gateways.istio-ingressgateway.resources.limits.cpu: 2000m
gateways.istio-ingressgateway.resources.limits.memory: 1024Mi
gateways.istio-ingressgateway.resources.requests.cpu: 100m
gateways.istio-ingressgateway.resources.requests.memory: 128Mi
gateways.istio-ingressgateway.type: NodePort
global.monitoring.type: cluster-monitoring
global.rancher.clusterId: ${["foo-customClusterSync"].clusterId}
istio_cni.enabled: 'false'
istiocoredns.enabled: 'false'
kiali.enabled: 'true'
mixer.enabled: 'true'
mixer.policy.enabled: 'true'
mixer.policy.resources.limits.cpu: 4800m
mixer.policy.resources.limits.memory: 4096Mi
mixer.policy.resources.requests.cpu: 1000m
mixer.policy.resources.requests.memory: 1024Mi
mixer.telemetry.resources.limits.cpu: 4800m
mixer.telemetry.resources.limits.memory: 4096Mi
mixer.telemetry.resources.requests.cpu: 1000m
mixer.telemetry.resources.requests.memory: 1024Mi
mtls.enabled: false
nodeagent.enabled: false
pilot.enabled: true
pilot.resources.limits.cpu: 1000m
pilot.resources.limits.memory: 4096Mi
pilot.resources.requests.cpu: 500m
pilot.resources.requests.memory: 2048Mi
pilot.traceSampling: '1'
security.enabled: true
sidecarInjectorWebhook.enabled: true
tracing.enabled: true
tracing.jaeger.resources.limits.cpu: 500m
tracing.jaeger.resources.limits.memory: 1024Mi
tracing.jaeger.resources.requests.cpu: 100m
tracing.jaeger.resources.requests.memory: 100Mi
Creating Rancher v2 RKE cluster assigning a node pool (overlapped planes)
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Rancher2 = Pulumi.Rancher2;
return await Deployment.RunAsync(() =>
{
// Create a new rancher2 RKE Cluster
var foo_custom = new Rancher2.Cluster("foo-custom", new()
{
Description = "Foo rancher2 custom cluster",
RkeConfig = new Rancher2.Inputs.ClusterRkeConfigArgs
{
Network = new Rancher2.Inputs.ClusterRkeConfigNetworkArgs
{
Plugin = "canal",
},
},
});
// Create a new rancher2 Node Template
var fooNodeTemplate = new Rancher2.NodeTemplate("fooNodeTemplate", new()
{
Description = "foo test",
Amazonec2Config = new Rancher2.Inputs.NodeTemplateAmazonec2ConfigArgs
{
AccessKey = "<AWS_ACCESS_KEY>",
SecretKey = "<AWS_SECRET_KEY>",
Ami = "<AMI_ID>",
Region = "<REGION>",
SecurityGroups = new[]
{
"<AWS_SECURITY_GROUP>",
},
SubnetId = "<SUBNET_ID>",
VpcId = "<VPC_ID>",
Zone = "<ZONE>",
},
});
// Create a new rancher2 Node Pool
var fooNodePool = new Rancher2.NodePool("fooNodePool", new()
{
ClusterId = foo_custom.Id,
HostnamePrefix = "foo-cluster-0",
NodeTemplateId = fooNodeTemplate.Id,
Quantity = 3,
ControlPlane = true,
Etcd = true,
Worker = true,
});
});
package main
import (
"github.com/pulumi/pulumi-rancher2/sdk/v5/go/rancher2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := rancher2.NewCluster(ctx, "foo-custom", &rancher2.ClusterArgs{
Description: pulumi.String("Foo rancher2 custom cluster"),
RkeConfig: &rancher2.ClusterRkeConfigArgs{
Network: &rancher2.ClusterRkeConfigNetworkArgs{
Plugin: pulumi.String("canal"),
},
},
})
if err != nil {
return err
}
fooNodeTemplate, err := rancher2.NewNodeTemplate(ctx, "fooNodeTemplate", &rancher2.NodeTemplateArgs{
Description: pulumi.String("foo test"),
Amazonec2Config: &rancher2.NodeTemplateAmazonec2ConfigArgs{
AccessKey: pulumi.String("<AWS_ACCESS_KEY>"),
SecretKey: pulumi.String("<AWS_SECRET_KEY>"),
Ami: pulumi.String("<AMI_ID>"),
Region: pulumi.String("<REGION>"),
SecurityGroups: pulumi.StringArray{
pulumi.String("<AWS_SECURITY_GROUP>"),
},
SubnetId: pulumi.String("<SUBNET_ID>"),
VpcId: pulumi.String("<VPC_ID>"),
Zone: pulumi.String("<ZONE>"),
},
})
if err != nil {
return err
}
_, err = rancher2.NewNodePool(ctx, "fooNodePool", &rancher2.NodePoolArgs{
ClusterId: foo_custom.ID(),
HostnamePrefix: pulumi.String("foo-cluster-0"),
NodeTemplateId: fooNodeTemplate.ID(),
Quantity: pulumi.Int(3),
ControlPlane: pulumi.Bool(true),
Etcd: pulumi.Bool(true),
Worker: pulumi.Bool(true),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.rancher2.Cluster;
import com.pulumi.rancher2.ClusterArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigNetworkArgs;
import com.pulumi.rancher2.NodeTemplate;
import com.pulumi.rancher2.NodeTemplateArgs;
import com.pulumi.rancher2.inputs.NodeTemplateAmazonec2ConfigArgs;
import com.pulumi.rancher2.NodePool;
import com.pulumi.rancher2.NodePoolArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var foo_custom = new Cluster("foo-custom", ClusterArgs.builder()
.description("Foo rancher2 custom cluster")
.rkeConfig(ClusterRkeConfigArgs.builder()
.network(ClusterRkeConfigNetworkArgs.builder()
.plugin("canal")
.build())
.build())
.build());
var fooNodeTemplate = new NodeTemplate("fooNodeTemplate", NodeTemplateArgs.builder()
.description("foo test")
.amazonec2Config(NodeTemplateAmazonec2ConfigArgs.builder()
.accessKey("<AWS_ACCESS_KEY>")
.secretKey("<AWS_SECRET_KEY>")
.ami("<AMI_ID>")
.region("<REGION>")
.securityGroups("<AWS_SECURITY_GROUP>")
.subnetId("<SUBNET_ID>")
.vpcId("<VPC_ID>")
.zone("<ZONE>")
.build())
.build());
var fooNodePool = new NodePool("fooNodePool", NodePoolArgs.builder()
.clusterId(foo_custom.id())
.hostnamePrefix("foo-cluster-0")
.nodeTemplateId(fooNodeTemplate.id())
.quantity(3)
.controlPlane(true)
.etcd(true)
.worker(true)
.build());
}
}
import pulumi
import pulumi_rancher2 as rancher2
# Create a new rancher2 RKE Cluster
foo_custom = rancher2.Cluster("foo-custom",
description="Foo rancher2 custom cluster",
rke_config=rancher2.ClusterRkeConfigArgs(
network=rancher2.ClusterRkeConfigNetworkArgs(
plugin="canal",
),
))
# Create a new rancher2 Node Template
foo_node_template = rancher2.NodeTemplate("fooNodeTemplate",
description="foo test",
amazonec2_config=rancher2.NodeTemplateAmazonec2ConfigArgs(
access_key="<AWS_ACCESS_KEY>",
secret_key="<AWS_SECRET_KEY>",
ami="<AMI_ID>",
region="<REGION>",
security_groups=["<AWS_SECURITY_GROUP>"],
subnet_id="<SUBNET_ID>",
vpc_id="<VPC_ID>",
zone="<ZONE>",
))
# Create a new rancher2 Node Pool
foo_node_pool = rancher2.NodePool("fooNodePool",
cluster_id=foo_custom.id,
hostname_prefix="foo-cluster-0",
node_template_id=foo_node_template.id,
quantity=3,
control_plane=True,
etcd=True,
worker=True)
import * as pulumi from "@pulumi/pulumi";
import * as rancher2 from "@pulumi/rancher2";
// Create a new rancher2 RKE Cluster
const foo_custom = new rancher2.Cluster("foo-custom", {
description: "Foo rancher2 custom cluster",
rkeConfig: {
network: {
plugin: "canal",
},
},
});
// Create a new rancher2 Node Template
const fooNodeTemplate = new rancher2.NodeTemplate("fooNodeTemplate", {
description: "foo test",
amazonec2Config: {
accessKey: "<AWS_ACCESS_KEY>",
secretKey: "<AWS_SECRET_KEY>",
ami: "<AMI_ID>",
region: "<REGION>",
securityGroups: ["<AWS_SECURITY_GROUP>"],
subnetId: "<SUBNET_ID>",
vpcId: "<VPC_ID>",
zone: "<ZONE>",
},
});
// Create a new rancher2 Node Pool
const fooNodePool = new rancher2.NodePool("fooNodePool", {
clusterId: foo_custom.id,
hostnamePrefix: "foo-cluster-0",
nodeTemplateId: fooNodeTemplate.id,
quantity: 3,
controlPlane: true,
etcd: true,
worker: true,
});
resources:
# Create a new rancher2 RKE Cluster
foo-custom:
type: rancher2:Cluster
properties:
description: Foo rancher2 custom cluster
rkeConfig:
network:
plugin: canal
# Create a new rancher2 Node Template
fooNodeTemplate:
type: rancher2:NodeTemplate
properties:
description: foo test
amazonec2Config:
accessKey: <AWS_ACCESS_KEY>
secretKey: <AWS_SECRET_KEY>
ami: <AMI_ID>
region: <REGION>
securityGroups:
- <AWS_SECURITY_GROUP>
subnetId: <SUBNET_ID>
vpcId: <VPC_ID>
zone: <ZONE>
# Create a new rancher2 Node Pool
fooNodePool:
type: rancher2:NodePool
properties:
clusterId: ${["foo-custom"].id}
hostnamePrefix: foo-cluster-0
nodeTemplateId: ${fooNodeTemplate.id}
quantity: 3
controlPlane: true
etcd: true
worker: true
Creating Rancher v2 RKE cluster from template. For Rancher v2.3.x and above.
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Rancher2 = Pulumi.Rancher2;
return await Deployment.RunAsync(() =>
{
// Create a new rancher2 cluster template
var fooClusterTemplate = new Rancher2.ClusterTemplate("fooClusterTemplate", new()
{
Members = new[]
{
new Rancher2.Inputs.ClusterTemplateMemberArgs
{
AccessType = "owner",
UserPrincipalId = "local://user-XXXXX",
},
},
TemplateRevisions = new[]
{
new Rancher2.Inputs.ClusterTemplateTemplateRevisionArgs
{
Name = "V1",
ClusterConfig = new Rancher2.Inputs.ClusterTemplateTemplateRevisionClusterConfigArgs
{
RkeConfig = new Rancher2.Inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigArgs
{
Network = new Rancher2.Inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigNetworkArgs
{
Plugin = "canal",
},
Services = new Rancher2.Inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesArgs
{
Etcd = new Rancher2.Inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesEtcdArgs
{
Creation = "6h",
Retention = "24h",
},
},
},
},
Default = true,
},
},
Description = "Test cluster template v2",
});
// Create a new rancher2 RKE Cluster from template
var fooCluster = new Rancher2.Cluster("fooCluster", new()
{
ClusterTemplateId = fooClusterTemplate.Id,
ClusterTemplateRevisionId = fooClusterTemplate.TemplateRevisions.Apply(templateRevisions => templateRevisions[0].Id),
});
});
package main
import (
"github.com/pulumi/pulumi-rancher2/sdk/v5/go/rancher2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
fooClusterTemplate, err := rancher2.NewClusterTemplate(ctx, "fooClusterTemplate", &rancher2.ClusterTemplateArgs{
Members: rancher2.ClusterTemplateMemberArray{
&rancher2.ClusterTemplateMemberArgs{
AccessType: pulumi.String("owner"),
UserPrincipalId: pulumi.String("local://user-XXXXX"),
},
},
TemplateRevisions: rancher2.ClusterTemplateTemplateRevisionArray{
&rancher2.ClusterTemplateTemplateRevisionArgs{
Name: pulumi.String("V1"),
ClusterConfig: &rancher2.ClusterTemplateTemplateRevisionClusterConfigArgs{
RkeConfig: &rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigArgs{
Network: &rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigNetworkArgs{
Plugin: pulumi.String("canal"),
},
Services: &rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesArgs{
Etcd: &rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesEtcdArgs{
Creation: pulumi.String("6h"),
Retention: pulumi.String("24h"),
},
},
},
},
Default: pulumi.Bool(true),
},
},
Description: pulumi.String("Test cluster template v2"),
})
if err != nil {
return err
}
_, err = rancher2.NewCluster(ctx, "fooCluster", &rancher2.ClusterArgs{
ClusterTemplateId: fooClusterTemplate.ID(),
ClusterTemplateRevisionId: fooClusterTemplate.TemplateRevisions.ApplyT(func(templateRevisions []rancher2.ClusterTemplateTemplateRevision) (*string, error) {
return &templateRevisions[0].Id, nil
}).(pulumi.StringPtrOutput),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.rancher2.ClusterTemplate;
import com.pulumi.rancher2.ClusterTemplateArgs;
import com.pulumi.rancher2.inputs.ClusterTemplateMemberArgs;
import com.pulumi.rancher2.inputs.ClusterTemplateTemplateRevisionArgs;
import com.pulumi.rancher2.inputs.ClusterTemplateTemplateRevisionClusterConfigArgs;
import com.pulumi.rancher2.inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigArgs;
import com.pulumi.rancher2.inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigNetworkArgs;
import com.pulumi.rancher2.inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesArgs;
import com.pulumi.rancher2.inputs.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesEtcdArgs;
import com.pulumi.rancher2.Cluster;
import com.pulumi.rancher2.ClusterArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var fooClusterTemplate = new ClusterTemplate("fooClusterTemplate", ClusterTemplateArgs.builder()
.members(ClusterTemplateMemberArgs.builder()
.accessType("owner")
.userPrincipalId("local://user-XXXXX")
.build())
.templateRevisions(ClusterTemplateTemplateRevisionArgs.builder()
.name("V1")
.clusterConfig(ClusterTemplateTemplateRevisionClusterConfigArgs.builder()
.rkeConfig(ClusterTemplateTemplateRevisionClusterConfigRkeConfigArgs.builder()
.network(ClusterTemplateTemplateRevisionClusterConfigRkeConfigNetworkArgs.builder()
.plugin("canal")
.build())
.services(ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesArgs.builder()
.etcd(ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesEtcdArgs.builder()
.creation("6h")
.retention("24h")
.build())
.build())
.build())
.build())
.default_(true)
.build())
.description("Test cluster template v2")
.build());
var fooCluster = new Cluster("fooCluster", ClusterArgs.builder()
.clusterTemplateId(fooClusterTemplate.id())
.clusterTemplateRevisionId(fooClusterTemplate.templateRevisions().applyValue(templateRevisions -> templateRevisions[0].id()))
.build());
}
}
import pulumi
import pulumi_rancher2 as rancher2
# Create a new rancher2 cluster template
foo_cluster_template = rancher2.ClusterTemplate("fooClusterTemplate",
members=[rancher2.ClusterTemplateMemberArgs(
access_type="owner",
user_principal_id="local://user-XXXXX",
)],
template_revisions=[rancher2.ClusterTemplateTemplateRevisionArgs(
name="V1",
cluster_config=rancher2.ClusterTemplateTemplateRevisionClusterConfigArgs(
rke_config=rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigArgs(
network=rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigNetworkArgs(
plugin="canal",
),
services=rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesArgs(
etcd=rancher2.ClusterTemplateTemplateRevisionClusterConfigRkeConfigServicesEtcdArgs(
creation="6h",
retention="24h",
),
),
),
),
default=True,
)],
description="Test cluster template v2")
# Create a new rancher2 RKE Cluster from template
foo_cluster = rancher2.Cluster("fooCluster",
cluster_template_id=foo_cluster_template.id,
cluster_template_revision_id=foo_cluster_template.template_revisions[0].id)
import * as pulumi from "@pulumi/pulumi";
import * as rancher2 from "@pulumi/rancher2";
// Create a new rancher2 cluster template
const fooClusterTemplate = new rancher2.ClusterTemplate("fooClusterTemplate", {
members: [{
accessType: "owner",
userPrincipalId: "local://user-XXXXX",
}],
templateRevisions: [{
name: "V1",
clusterConfig: {
rkeConfig: {
network: {
plugin: "canal",
},
services: {
etcd: {
creation: "6h",
retention: "24h",
},
},
},
},
"default": true,
}],
description: "Test cluster template v2",
});
// Create a new rancher2 RKE Cluster from template
const fooCluster = new rancher2.Cluster("fooCluster", {
clusterTemplateId: fooClusterTemplate.id,
clusterTemplateRevisionId: fooClusterTemplate.templateRevisions.apply(templateRevisions => templateRevisions[0].id),
});
resources:
# Create a new rancher2 cluster template
fooClusterTemplate:
type: rancher2:ClusterTemplate
properties:
members:
- accessType: owner
userPrincipalId: local://user-XXXXX
templateRevisions:
- name: V1
clusterConfig:
rkeConfig:
network:
plugin: canal
services:
etcd:
creation: 6h
retention: 24h
default: true
description: Test cluster template v2
# Create a new rancher2 RKE Cluster from template
fooCluster:
type: rancher2:Cluster
properties:
clusterTemplateId: ${fooClusterTemplate.id}
clusterTemplateRevisionId: ${fooClusterTemplate.templateRevisions[0].id}
Creating Rancher v2 RKE cluster with upgrade strategy. For Rancher v2.4.x and above.
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Rancher2 = Pulumi.Rancher2;
return await Deployment.RunAsync(() =>
{
var foo = new Rancher2.Cluster("foo", new()
{
Description = "Terraform custom cluster",
RkeConfig = new Rancher2.Inputs.ClusterRkeConfigArgs
{
Network = new Rancher2.Inputs.ClusterRkeConfigNetworkArgs
{
Plugin = "canal",
},
Services = new Rancher2.Inputs.ClusterRkeConfigServicesArgs
{
Etcd = new Rancher2.Inputs.ClusterRkeConfigServicesEtcdArgs
{
Creation = "6h",
Retention = "24h",
},
KubeApi = new Rancher2.Inputs.ClusterRkeConfigServicesKubeApiArgs
{
AuditLog = new Rancher2.Inputs.ClusterRkeConfigServicesKubeApiAuditLogArgs
{
Configuration = new Rancher2.Inputs.ClusterRkeConfigServicesKubeApiAuditLogConfigurationArgs
{
Format = "json",
MaxAge = 5,
MaxBackup = 5,
MaxSize = 100,
Path = "-",
Policy = @"apiVersion: audit.k8s.io/v1
kind: Policy
metadata:
creationTimestamp: null
omitStages:
- RequestReceived
rules:
- level: RequestResponse
resources:
- resources:
- pods
",
},
Enabled = true,
},
},
},
UpgradeStrategy = new Rancher2.Inputs.ClusterRkeConfigUpgradeStrategyArgs
{
Drain = true,
MaxUnavailableWorker = "20%",
},
},
});
});
package main
import (
"github.com/pulumi/pulumi-rancher2/sdk/v5/go/rancher2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := rancher2.NewCluster(ctx, "foo", &rancher2.ClusterArgs{
Description: pulumi.String("Terraform custom cluster"),
RkeConfig: &rancher2.ClusterRkeConfigArgs{
Network: &rancher2.ClusterRkeConfigNetworkArgs{
Plugin: pulumi.String("canal"),
},
Services: &rancher2.ClusterRkeConfigServicesArgs{
Etcd: &rancher2.ClusterRkeConfigServicesEtcdArgs{
Creation: pulumi.String("6h"),
Retention: pulumi.String("24h"),
},
KubeApi: &rancher2.ClusterRkeConfigServicesKubeApiArgs{
AuditLog: &rancher2.ClusterRkeConfigServicesKubeApiAuditLogArgs{
Configuration: &rancher2.ClusterRkeConfigServicesKubeApiAuditLogConfigurationArgs{
Format: pulumi.String("json"),
MaxAge: pulumi.Int(5),
MaxBackup: pulumi.Int(5),
MaxSize: pulumi.Int(100),
Path: pulumi.String("-"),
Policy: pulumi.String(`apiVersion: audit.k8s.io/v1
kind: Policy
metadata:
creationTimestamp: null
omitStages:
- RequestReceived
rules:
- level: RequestResponse
resources:
- resources:
- pods
`),
},
Enabled: pulumi.Bool(true),
},
},
},
UpgradeStrategy: &rancher2.ClusterRkeConfigUpgradeStrategyArgs{
Drain: pulumi.Bool(true),
MaxUnavailableWorker: pulumi.String("20%"),
},
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.rancher2.Cluster;
import com.pulumi.rancher2.ClusterArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigNetworkArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigServicesArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigServicesEtcdArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigServicesKubeApiArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigServicesKubeApiAuditLogArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigServicesKubeApiAuditLogConfigurationArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigUpgradeStrategyArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var foo = new Cluster("foo", ClusterArgs.builder()
.description("Terraform custom cluster")
.rkeConfig(ClusterRkeConfigArgs.builder()
.network(ClusterRkeConfigNetworkArgs.builder()
.plugin("canal")
.build())
.services(ClusterRkeConfigServicesArgs.builder()
.etcd(ClusterRkeConfigServicesEtcdArgs.builder()
.creation("6h")
.retention("24h")
.build())
.kubeApi(ClusterRkeConfigServicesKubeApiArgs.builder()
.auditLog(ClusterRkeConfigServicesKubeApiAuditLogArgs.builder()
.configuration(ClusterRkeConfigServicesKubeApiAuditLogConfigurationArgs.builder()
.format("json")
.maxAge(5)
.maxBackup(5)
.maxSize(100)
.path("-")
.policy("""
apiVersion: audit.k8s.io/v1
kind: Policy
metadata:
creationTimestamp: null
omitStages:
- RequestReceived
rules:
- level: RequestResponse
resources:
- resources:
- pods
""")
.build())
.enabled(true)
.build())
.build())
.build())
.upgradeStrategy(ClusterRkeConfigUpgradeStrategyArgs.builder()
.drain(true)
.maxUnavailableWorker("20%")
.build())
.build())
.build());
}
}
import pulumi
import pulumi_rancher2 as rancher2
foo = rancher2.Cluster("foo",
description="Terraform custom cluster",
rke_config=rancher2.ClusterRkeConfigArgs(
network=rancher2.ClusterRkeConfigNetworkArgs(
plugin="canal",
),
services=rancher2.ClusterRkeConfigServicesArgs(
etcd=rancher2.ClusterRkeConfigServicesEtcdArgs(
creation="6h",
retention="24h",
),
kube_api=rancher2.ClusterRkeConfigServicesKubeApiArgs(
audit_log=rancher2.ClusterRkeConfigServicesKubeApiAuditLogArgs(
configuration=rancher2.ClusterRkeConfigServicesKubeApiAuditLogConfigurationArgs(
format="json",
max_age=5,
max_backup=5,
max_size=100,
path="-",
policy="""apiVersion: audit.k8s.io/v1
kind: Policy
metadata:
creationTimestamp: null
omitStages:
- RequestReceived
rules:
- level: RequestResponse
resources:
- resources:
- pods
""",
),
enabled=True,
),
),
),
upgrade_strategy=rancher2.ClusterRkeConfigUpgradeStrategyArgs(
drain=True,
max_unavailable_worker="20%",
),
))
import * as pulumi from "@pulumi/pulumi";
import * as rancher2 from "@pulumi/rancher2";
const foo = new rancher2.Cluster("foo", {
description: "Terraform custom cluster",
rkeConfig: {
network: {
plugin: "canal",
},
services: {
etcd: {
creation: "6h",
retention: "24h",
},
kubeApi: {
auditLog: {
configuration: {
format: "json",
maxAge: 5,
maxBackup: 5,
maxSize: 100,
path: "-",
policy: `apiVersion: audit.k8s.io/v1
kind: Policy
metadata:
creationTimestamp: null
omitStages:
- RequestReceived
rules:
- level: RequestResponse
resources:
- resources:
- pods
`,
},
enabled: true,
},
},
},
upgradeStrategy: {
drain: true,
maxUnavailableWorker: "20%",
},
},
});
resources:
foo:
type: rancher2:Cluster
properties:
description: Terraform custom cluster
rkeConfig:
network:
plugin: canal
services:
etcd:
creation: 6h
retention: 24h
kubeApi:
auditLog:
configuration:
format: json
maxAge: 5
maxBackup: 5
maxSize: 100
path: '-'
policy: |+
apiVersion: audit.k8s.io/v1
kind: Policy
metadata:
creationTimestamp: null
omitStages:
- RequestReceived
rules:
- level: RequestResponse
resources:
- resources:
- pods
enabled: true
upgradeStrategy:
drain: true
maxUnavailableWorker: 20%
Creating Rancher v2 RKE cluster with cluster agent customization. For Rancher v2.7.5 and above.
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Rancher2 = Pulumi.Rancher2;
return await Deployment.RunAsync(() =>
{
var foo = new Rancher2.Cluster("foo", new()
{
ClusterAgentDeploymentCustomizations = new[]
{
new Rancher2.Inputs.ClusterClusterAgentDeploymentCustomizationArgs
{
AppendTolerations = new[]
{
new Rancher2.Inputs.ClusterClusterAgentDeploymentCustomizationAppendTolerationArgs
{
Effect = "NoSchedule",
Key = "tolerate/control-plane",
Value = "true",
},
},
OverrideAffinity = @"{
""nodeAffinity"": {
""requiredDuringSchedulingIgnoredDuringExecution"": {
""nodeSelectorTerms"": [{
""matchExpressions"": [{
""key"": ""not.this/nodepool"",
""operator"": ""In"",
""values"": [
""true""
]
}]
}]
}
}
}
",
OverrideResourceRequirements = new[]
{
new Rancher2.Inputs.ClusterClusterAgentDeploymentCustomizationOverrideResourceRequirementArgs
{
CpuLimit = "800",
CpuRequest = "500",
MemoryLimit = "800",
MemoryRequest = "500",
},
},
},
},
Description = "Terraform cluster with agent customization",
RkeConfig = new Rancher2.Inputs.ClusterRkeConfigArgs
{
Network = new Rancher2.Inputs.ClusterRkeConfigNetworkArgs
{
Plugin = "canal",
},
},
});
});
package main
import (
"github.com/pulumi/pulumi-rancher2/sdk/v5/go/rancher2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := rancher2.NewCluster(ctx, "foo", &rancher2.ClusterArgs{
ClusterAgentDeploymentCustomizations: rancher2.ClusterClusterAgentDeploymentCustomizationArray{
&rancher2.ClusterClusterAgentDeploymentCustomizationArgs{
AppendTolerations: rancher2.ClusterClusterAgentDeploymentCustomizationAppendTolerationArray{
&rancher2.ClusterClusterAgentDeploymentCustomizationAppendTolerationArgs{
Effect: pulumi.String("NoSchedule"),
Key: pulumi.String("tolerate/control-plane"),
Value: pulumi.String("true"),
},
},
OverrideAffinity: pulumi.String(`{
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [{
"matchExpressions": [{
"key": "not.this/nodepool",
"operator": "In",
"values": [
"true"
]
}]
}]
}
}
}
`),
OverrideResourceRequirements: rancher2.ClusterClusterAgentDeploymentCustomizationOverrideResourceRequirementArray{
&rancher2.ClusterClusterAgentDeploymentCustomizationOverrideResourceRequirementArgs{
CpuLimit: pulumi.String("800"),
CpuRequest: pulumi.String("500"),
MemoryLimit: pulumi.String("800"),
MemoryRequest: pulumi.String("500"),
},
},
},
},
Description: pulumi.String("Terraform cluster with agent customization"),
RkeConfig: &rancher2.ClusterRkeConfigArgs{
Network: &rancher2.ClusterRkeConfigNetworkArgs{
Plugin: pulumi.String("canal"),
},
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.rancher2.Cluster;
import com.pulumi.rancher2.ClusterArgs;
import com.pulumi.rancher2.inputs.ClusterClusterAgentDeploymentCustomizationArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigArgs;
import com.pulumi.rancher2.inputs.ClusterRkeConfigNetworkArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var foo = new Cluster("foo", ClusterArgs.builder()
.clusterAgentDeploymentCustomizations(ClusterClusterAgentDeploymentCustomizationArgs.builder()
.appendTolerations(ClusterClusterAgentDeploymentCustomizationAppendTolerationArgs.builder()
.effect("NoSchedule")
.key("tolerate/control-plane")
.value("true")
.build())
.overrideAffinity("""
{
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [{
"matchExpressions": [{
"key": "not.this/nodepool",
"operator": "In",
"values": [
"true"
]
}]
}]
}
}
}
""")
.overrideResourceRequirements(ClusterClusterAgentDeploymentCustomizationOverrideResourceRequirementArgs.builder()
.cpuLimit("800")
.cpuRequest("500")
.memoryLimit("800")
.memoryRequest("500")
.build())
.build())
.description("Terraform cluster with agent customization")
.rkeConfig(ClusterRkeConfigArgs.builder()
.network(ClusterRkeConfigNetworkArgs.builder()
.plugin("canal")
.build())
.build())
.build());
}
}
import pulumi
import pulumi_rancher2 as rancher2
foo = rancher2.Cluster("foo",
cluster_agent_deployment_customizations=[rancher2.ClusterClusterAgentDeploymentCustomizationArgs(
append_tolerations=[rancher2.ClusterClusterAgentDeploymentCustomizationAppendTolerationArgs(
effect="NoSchedule",
key="tolerate/control-plane",
value="true",
)],
override_affinity="""{
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [{
"matchExpressions": [{
"key": "not.this/nodepool",
"operator": "In",
"values": [
"true"
]
}]
}]
}
}
}
""",
override_resource_requirements=[rancher2.ClusterClusterAgentDeploymentCustomizationOverrideResourceRequirementArgs(
cpu_limit="800",
cpu_request="500",
memory_limit="800",
memory_request="500",
)],
)],
description="Terraform cluster with agent customization",
rke_config=rancher2.ClusterRkeConfigArgs(
network=rancher2.ClusterRkeConfigNetworkArgs(
plugin="canal",
),
))
import * as pulumi from "@pulumi/pulumi";
import * as rancher2 from "@pulumi/rancher2";
const foo = new rancher2.Cluster("foo", {
clusterAgentDeploymentCustomizations: [{
appendTolerations: [{
effect: "NoSchedule",
key: "tolerate/control-plane",
value: "true",
}],
overrideAffinity: `{
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [{
"matchExpressions": [{
"key": "not.this/nodepool",
"operator": "In",
"values": [
"true"
]
}]
}]
}
}
}
`,
overrideResourceRequirements: [{
cpuLimit: "800",
cpuRequest: "500",
memoryLimit: "800",
memoryRequest: "500",
}],
}],
description: "Terraform cluster with agent customization",
rkeConfig: {
network: {
plugin: "canal",
},
},
});
resources:
foo:
type: rancher2:Cluster
properties:
clusterAgentDeploymentCustomizations:
- appendTolerations:
- effect: NoSchedule
key: tolerate/control-plane
value: 'true'
overrideAffinity: |+
{
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [{
"matchExpressions": [{
"key": "not.this/nodepool",
"operator": "In",
"values": [
"true"
]
}]
}]
}
}
}
overrideResourceRequirements:
- cpuLimit: '800'
cpuRequest: '500'
memoryLimit: '800'
memoryRequest: '500'
description: Terraform cluster with agent customization
rkeConfig:
network:
plugin: canal
Importing EKS cluster to Rancher v2, using eks_config_v2
. For Rancher v2.5.x and above.
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Rancher2 = Pulumi.Rancher2;
return await Deployment.RunAsync(() =>
{
var fooCloudCredential = new Rancher2.CloudCredential("fooCloudCredential", new()
{
Description = "foo test",
Amazonec2CredentialConfig = new Rancher2.Inputs.CloudCredentialAmazonec2CredentialConfigArgs
{
AccessKey = "<AWS_ACCESS_KEY>",
SecretKey = "<AWS_SECRET_KEY>",
},
});
var fooCluster = new Rancher2.Cluster("fooCluster", new()
{
Description = "Terraform EKS cluster",
EksConfigV2 = new Rancher2.Inputs.ClusterEksConfigV2Args
{
CloudCredentialId = fooCloudCredential.Id,
Name = "<CLUSTER_NAME>",
Region = "<EKS_REGION>",
Imported = true,
},
});
});
package main
import (
"github.com/pulumi/pulumi-rancher2/sdk/v5/go/rancher2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
fooCloudCredential, err := rancher2.NewCloudCredential(ctx, "fooCloudCredential", &rancher2.CloudCredentialArgs{
Description: pulumi.String("foo test"),
Amazonec2CredentialConfig: &rancher2.CloudCredentialAmazonec2CredentialConfigArgs{
AccessKey: pulumi.String("<AWS_ACCESS_KEY>"),
SecretKey: pulumi.String("<AWS_SECRET_KEY>"),
},
})
if err != nil {
return err
}
_, err = rancher2.NewCluster(ctx, "fooCluster", &rancher2.ClusterArgs{
Description: pulumi.String("Terraform EKS cluster"),
EksConfigV2: &rancher2.ClusterEksConfigV2Args{
CloudCredentialId: fooCloudCredential.ID(),
Name: pulumi.String("<CLUSTER_NAME>"),
Region: pulumi.String("<EKS_REGION>"),
Imported: pulumi.Bool(true),
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.rancher2.CloudCredential;
import com.pulumi.rancher2.CloudCredentialArgs;
import com.pulumi.rancher2.inputs.CloudCredentialAmazonec2CredentialConfigArgs;
import com.pulumi.rancher2.Cluster;
import com.pulumi.rancher2.ClusterArgs;
import com.pulumi.rancher2.inputs.ClusterEksConfigV2Args;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var fooCloudCredential = new CloudCredential("fooCloudCredential", CloudCredentialArgs.builder()
.description("foo test")
.amazonec2CredentialConfig(CloudCredentialAmazonec2CredentialConfigArgs.builder()
.accessKey("<AWS_ACCESS_KEY>")
.secretKey("<AWS_SECRET_KEY>")
.build())
.build());
var fooCluster = new Cluster("fooCluster", ClusterArgs.builder()
.description("Terraform EKS cluster")
.eksConfigV2(ClusterEksConfigV2Args.builder()
.cloudCredentialId(fooCloudCredential.id())
.name("<CLUSTER_NAME>")
.region("<EKS_REGION>")
.imported(true)
.build())
.build());
}
}
import pulumi
import pulumi_rancher2 as rancher2
foo_cloud_credential = rancher2.CloudCredential("fooCloudCredential",
description="foo test",
amazonec2_credential_config=rancher2.CloudCredentialAmazonec2CredentialConfigArgs(
access_key="<AWS_ACCESS_KEY>",
secret_key="<AWS_SECRET_KEY>",
))
foo_cluster = rancher2.Cluster("fooCluster",
description="Terraform EKS cluster",
eks_config_v2=rancher2.ClusterEksConfigV2Args(
cloud_credential_id=foo_cloud_credential.id,
name="<CLUSTER_NAME>",
region="<EKS_REGION>",
imported=True,
))
import * as pulumi from "@pulumi/pulumi";
import * as rancher2 from "@pulumi/rancher2";
const fooCloudCredential = new rancher2.CloudCredential("fooCloudCredential", {
description: "foo test",
amazonec2CredentialConfig: {
accessKey: "<AWS_ACCESS_KEY>",
secretKey: "<AWS_SECRET_KEY>",
},
});
const fooCluster = new rancher2.Cluster("fooCluster", {
description: "Terraform EKS cluster",
eksConfigV2: {
cloudCredentialId: fooCloudCredential.id,
name: "<CLUSTER_NAME>",
region: "<EKS_REGION>",
imported: true,
},
});
resources:
fooCloudCredential:
type: rancher2:CloudCredential
properties:
description: foo test
amazonec2CredentialConfig:
accessKey: <AWS_ACCESS_KEY>
secretKey: <AWS_SECRET_KEY>
fooCluster:
type: rancher2:Cluster
properties:
description: Terraform EKS cluster
eksConfigV2:
cloudCredentialId: ${fooCloudCredential.id}
name: <CLUSTER_NAME>
region: <EKS_REGION>
imported: true
Creating EKS cluster from Rancher v2, using eks_config_v2
. For Rancher v2.5.x and above.
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Rancher2 = Pulumi.Rancher2;
return await Deployment.RunAsync(() =>
{
var fooCloudCredential = new Rancher2.CloudCredential("fooCloudCredential", new()
{
Description = "foo test",
Amazonec2CredentialConfig = new Rancher2.Inputs.CloudCredentialAmazonec2CredentialConfigArgs
{
AccessKey = "<AWS_ACCESS_KEY>",
SecretKey = "<AWS_SECRET_KEY>",
},
});
var fooCluster = new Rancher2.Cluster("fooCluster", new()
{
Description = "Terraform EKS cluster",
EksConfigV2 = new Rancher2.Inputs.ClusterEksConfigV2Args
{
CloudCredentialId = fooCloudCredential.Id,
Region = "<EKS_REGION>",
KubernetesVersion = "1.24",
LoggingTypes = new[]
{
"audit",
"api",
},
NodeGroups = new[]
{
new Rancher2.Inputs.ClusterEksConfigV2NodeGroupArgs
{
Name = "node_group1",
InstanceType = "t3.medium",
DesiredSize = 3,
MaxSize = 5,
},
new Rancher2.Inputs.ClusterEksConfigV2NodeGroupArgs
{
Name = "node_group2",
InstanceType = "m5.xlarge",
DesiredSize = 2,
MaxSize = 3,
NodeRole = "arn:aws:iam::role/test-NodeInstanceRole",
},
},
PrivateAccess = true,
PublicAccess = false,
},
});
});
package main
import (
"github.com/pulumi/pulumi-rancher2/sdk/v5/go/rancher2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
fooCloudCredential, err := rancher2.NewCloudCredential(ctx, "fooCloudCredential", &rancher2.CloudCredentialArgs{
Description: pulumi.String("foo test"),
Amazonec2CredentialConfig: &rancher2.CloudCredentialAmazonec2CredentialConfigArgs{
AccessKey: pulumi.String("<AWS_ACCESS_KEY>"),
SecretKey: pulumi.String("<AWS_SECRET_KEY>"),
},
})
if err != nil {
return err
}
_, err = rancher2.NewCluster(ctx, "fooCluster", &rancher2.ClusterArgs{
Description: pulumi.String("Terraform EKS cluster"),
EksConfigV2: &rancher2.ClusterEksConfigV2Args{
CloudCredentialId: fooCloudCredential.ID(),
Region: pulumi.String("<EKS_REGION>"),
KubernetesVersion: pulumi.String("1.24"),
LoggingTypes: pulumi.StringArray{
pulumi.String("audit"),
pulumi.String("api"),
},
NodeGroups: rancher2.ClusterEksConfigV2NodeGroupArray{
&rancher2.ClusterEksConfigV2NodeGroupArgs{
Name: pulumi.String("node_group1"),
InstanceType: pulumi.String("t3.medium"),
DesiredSize: pulumi.Int(3),
MaxSize: pulumi.Int(5),
},
&rancher2.ClusterEksConfigV2NodeGroupArgs{
Name: pulumi.String("node_group2"),
InstanceType: pulumi.String("m5.xlarge"),
DesiredSize: pulumi.Int(2),
MaxSize: pulumi.Int(3),
NodeRole: pulumi.String("arn:aws:iam::role/test-NodeInstanceRole"),
},
},
PrivateAccess: pulumi.Bool(true),
PublicAccess: pulumi.Bool(false),
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.rancher2.CloudCredential;
import com.pulumi.rancher2.CloudCredentialArgs;
import com.pulumi.rancher2.inputs.CloudCredentialAmazonec2CredentialConfigArgs;
import com.pulumi.rancher2.Cluster;
import com.pulumi.rancher2.ClusterArgs;
import com.pulumi.rancher2.inputs.ClusterEksConfigV2Args;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var fooCloudCredential = new CloudCredential("fooCloudCredential", CloudCredentialArgs.builder()
.description("foo test")
.amazonec2CredentialConfig(CloudCredentialAmazonec2CredentialConfigArgs.builder()
.accessKey("<AWS_ACCESS_KEY>")
.secretKey("<AWS_SECRET_KEY>")
.build())
.build());
var fooCluster = new Cluster("fooCluster", ClusterArgs.builder()
.description("Terraform EKS cluster")
.eksConfigV2(ClusterEksConfigV2Args.builder()
.cloudCredentialId(fooCloudCredential.id())
.region("<EKS_REGION>")
.kubernetesVersion("1.24")
.loggingTypes(
"audit",
"api")
.nodeGroups(
ClusterEksConfigV2NodeGroupArgs.builder()
.name("node_group1")
.instanceType("t3.medium")
.desiredSize(3)
.maxSize(5)
.build(),
ClusterEksConfigV2NodeGroupArgs.builder()
.name("node_group2")
.instanceType("m5.xlarge")
.desiredSize(2)
.maxSize(3)
.nodeRole("arn:aws:iam::role/test-NodeInstanceRole")
.build())
.privateAccess(true)
.publicAccess(false)
.build())
.build());
}
}
import pulumi
import pulumi_rancher2 as rancher2
foo_cloud_credential = rancher2.CloudCredential("fooCloudCredential",
description="foo test",
amazonec2_credential_config=rancher2.CloudCredentialAmazonec2CredentialConfigArgs(
access_key="<AWS_ACCESS_KEY>",
secret_key="<AWS_SECRET_KEY>",
))
foo_cluster = rancher2.Cluster("fooCluster",
description="Terraform EKS cluster",
eks_config_v2=rancher2.ClusterEksConfigV2Args(
cloud_credential_id=foo_cloud_credential.id,
region="<EKS_REGION>",
kubernetes_version="1.24",
logging_types=[
"audit",
"api",
],
node_groups=[
rancher2.ClusterEksConfigV2NodeGroupArgs(
name="node_group1",
instance_type="t3.medium",
desired_size=3,
max_size=5,
),
rancher2.ClusterEksConfigV2NodeGroupArgs(
name="node_group2",
instance_type="m5.xlarge",
desired_size=2,
max_size=3,
node_role="arn:aws:iam::role/test-NodeInstanceRole",
),
],
private_access=True,
public_access=False,
))
import * as pulumi from "@pulumi/pulumi";
import * as rancher2 from "@pulumi/rancher2";
const fooCloudCredential = new rancher2.CloudCredential("fooCloudCredential", {
description: "foo test",
amazonec2CredentialConfig: {
accessKey: "<AWS_ACCESS_KEY>",
secretKey: "<AWS_SECRET_KEY>",
},
});
const fooCluster = new rancher2.Cluster("fooCluster", {
description: "Terraform EKS cluster",
eksConfigV2: {
cloudCredentialId: fooCloudCredential.id,
region: "<EKS_REGION>",
kubernetesVersion: "1.24",
loggingTypes: [
"audit",
"api",
],
nodeGroups: [
{
name: "node_group1",
instanceType: "t3.medium",
desiredSize: 3,
maxSize: 5,
},
{
name: "node_group2",
instanceType: "m5.xlarge",
desiredSize: 2,
maxSize: 3,
nodeRole: "arn:aws:iam::role/test-NodeInstanceRole",
},
],
privateAccess: true,
publicAccess: false,
},
});
resources:
fooCloudCredential:
type: rancher2:CloudCredential
properties:
description: foo test
amazonec2CredentialConfig:
accessKey: <AWS_ACCESS_KEY>
secretKey: <AWS_SECRET_KEY>
fooCluster:
type: rancher2:Cluster
properties:
description: Terraform EKS cluster
eksConfigV2:
cloudCredentialId: ${fooCloudCredential.id}
region: <EKS_REGION>
kubernetesVersion: '1.24'
loggingTypes:
- audit
- api
nodeGroups:
- name: node_group1
instanceType: t3.medium
desiredSize: 3
maxSize: 5
- name: node_group2
instanceType: m5.xlarge
desiredSize: 2
maxSize: 3
nodeRole: arn:aws:iam::role/test-NodeInstanceRole
privateAccess: true
publicAccess: false
Creating EKS cluster from Rancher v2, using eks_config_v2
and launch template. For Rancher v2.5.6 and above.
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Rancher2 = Pulumi.Rancher2;
return await Deployment.RunAsync(() =>
{
var fooCloudCredential = new Rancher2.CloudCredential("fooCloudCredential", new()
{
Description = "foo test",
Amazonec2CredentialConfig = new Rancher2.Inputs.CloudCredentialAmazonec2CredentialConfigArgs
{
AccessKey = "<AWS_ACCESS_KEY>",
SecretKey = "<AWS_SECRET_KEY>",
},
});
var fooCluster = new Rancher2.Cluster("fooCluster", new()
{
Description = "Terraform EKS cluster",
EksConfigV2 = new Rancher2.Inputs.ClusterEksConfigV2Args
{
CloudCredentialId = fooCloudCredential.Id,
Region = "<EKS_REGION>",
KubernetesVersion = "1.24",
LoggingTypes = new[]
{
"audit",
"api",
},
NodeGroups = new[]
{
new Rancher2.Inputs.ClusterEksConfigV2NodeGroupArgs
{
DesiredSize = 3,
MaxSize = 5,
Name = "node_group1",
LaunchTemplates = new[]
{
new Rancher2.Inputs.ClusterEksConfigV2NodeGroupLaunchTemplateArgs
{
Id = "<EC2_LAUNCH_TEMPLATE_ID>",
Version = 1,
},
},
},
},
PrivateAccess = true,
PublicAccess = true,
},
});
});
package main
import (
"github.com/pulumi/pulumi-rancher2/sdk/v5/go/rancher2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
fooCloudCredential, err := rancher2.NewCloudCredential(ctx, "fooCloudCredential", &rancher2.CloudCredentialArgs{
Description: pulumi.String("foo test"),
Amazonec2CredentialConfig: &rancher2.CloudCredentialAmazonec2CredentialConfigArgs{
AccessKey: pulumi.String("<AWS_ACCESS_KEY>"),
SecretKey: pulumi.String("<AWS_SECRET_KEY>"),
},
})
if err != nil {
return err
}
_, err = rancher2.NewCluster(ctx, "fooCluster", &rancher2.ClusterArgs{
Description: pulumi.String("Terraform EKS cluster"),
EksConfigV2: &rancher2.ClusterEksConfigV2Args{
CloudCredentialId: fooCloudCredential.ID(),
Region: pulumi.String("<EKS_REGION>"),
KubernetesVersion: pulumi.String("1.24"),
LoggingTypes: pulumi.StringArray{
pulumi.String("audit"),
pulumi.String("api"),
},
NodeGroups: rancher2.ClusterEksConfigV2NodeGroupArray{
&rancher2.ClusterEksConfigV2NodeGroupArgs{
DesiredSize: pulumi.Int(3),
MaxSize: pulumi.Int(5),
Name: pulumi.String("node_group1"),
LaunchTemplates: rancher2.ClusterEksConfigV2NodeGroupLaunchTemplateArray{
&rancher2.ClusterEksConfigV2NodeGroupLaunchTemplateArgs{
Id: pulumi.String("<EC2_LAUNCH_TEMPLATE_ID>"),
Version: pulumi.Int(1),
},
},
},
},
PrivateAccess: pulumi.Bool(true),
PublicAccess: pulumi.Bool(true),
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.rancher2.CloudCredential;
import com.pulumi.rancher2.CloudCredentialArgs;
import com.pulumi.rancher2.inputs.CloudCredentialAmazonec2CredentialConfigArgs;
import com.pulumi.rancher2.Cluster;
import com.pulumi.rancher2.ClusterArgs;
import com.pulumi.rancher2.inputs.ClusterEksConfigV2Args;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var fooCloudCredential = new CloudCredential("fooCloudCredential", CloudCredentialArgs.builder()
.description("foo test")
.amazonec2CredentialConfig(CloudCredentialAmazonec2CredentialConfigArgs.builder()
.accessKey("<AWS_ACCESS_KEY>")
.secretKey("<AWS_SECRET_KEY>")
.build())
.build());
var fooCluster = new Cluster("fooCluster", ClusterArgs.builder()
.description("Terraform EKS cluster")
.eksConfigV2(ClusterEksConfigV2Args.builder()
.cloudCredentialId(fooCloudCredential.id())
.region("<EKS_REGION>")
.kubernetesVersion("1.24")
.loggingTypes(
"audit",
"api")
.nodeGroups(ClusterEksConfigV2NodeGroupArgs.builder()
.desiredSize(3)
.maxSize(5)
.name("node_group1")
.launchTemplates(ClusterEksConfigV2NodeGroupLaunchTemplateArgs.builder()
.id("<EC2_LAUNCH_TEMPLATE_ID>")
.version(1)
.build())
.build())
.privateAccess(true)
.publicAccess(true)
.build())
.build());
}
}
import pulumi
import pulumi_rancher2 as rancher2
foo_cloud_credential = rancher2.CloudCredential("fooCloudCredential",
description="foo test",
amazonec2_credential_config=rancher2.CloudCredentialAmazonec2CredentialConfigArgs(
access_key="<AWS_ACCESS_KEY>",
secret_key="<AWS_SECRET_KEY>",
))
foo_cluster = rancher2.Cluster("fooCluster",
description="Terraform EKS cluster",
eks_config_v2=rancher2.ClusterEksConfigV2Args(
cloud_credential_id=foo_cloud_credential.id,
region="<EKS_REGION>",
kubernetes_version="1.24",
logging_types=[
"audit",
"api",
],
node_groups=[rancher2.ClusterEksConfigV2NodeGroupArgs(
desired_size=3,
max_size=5,
name="node_group1",
launch_templates=[rancher2.ClusterEksConfigV2NodeGroupLaunchTemplateArgs(
id="<EC2_LAUNCH_TEMPLATE_ID>",
version=1,
)],
)],
private_access=True,
public_access=True,
))
import * as pulumi from "@pulumi/pulumi";
import * as rancher2 from "@pulumi/rancher2";
const fooCloudCredential = new rancher2.CloudCredential("fooCloudCredential", {
description: "foo test",
amazonec2CredentialConfig: {
accessKey: "<AWS_ACCESS_KEY>",
secretKey: "<AWS_SECRET_KEY>",
},
});
const fooCluster = new rancher2.Cluster("fooCluster", {
description: "Terraform EKS cluster",
eksConfigV2: {
cloudCredentialId: fooCloudCredential.id,
region: "<EKS_REGION>",
kubernetesVersion: "1.24",
loggingTypes: [
"audit",
"api",
],
nodeGroups: [{
desiredSize: 3,
maxSize: 5,
name: "node_group1",
launchTemplates: [{
id: "<EC2_LAUNCH_TEMPLATE_ID>",
version: 1,
}],
}],
privateAccess: true,
publicAccess: true,
},
});
resources:
fooCloudCredential:
type: rancher2:CloudCredential
properties:
description: foo test
amazonec2CredentialConfig:
accessKey: <AWS_ACCESS_KEY>
secretKey: <AWS_SECRET_KEY>
fooCluster:
type: rancher2:Cluster
properties:
description: Terraform EKS cluster
eksConfigV2:
cloudCredentialId: ${fooCloudCredential.id}
region: <EKS_REGION>
kubernetesVersion: '1.24'
loggingTypes:
- audit
- api
nodeGroups:
- desiredSize: 3
maxSize: 5
name: node_group1
launchTemplates:
- id: <EC2_LAUNCH_TEMPLATE_ID>
version: 1
privateAccess: true
publicAccess: true
Creating AKS cluster from Rancher v2, using aks_config_v2
. For Rancher v2.6.0 and above.
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Rancher2 = Pulumi.Rancher2;
return await Deployment.RunAsync(() =>
{
var foo_aks = new Rancher2.CloudCredential("foo-aks", new()
{
AzureCredentialConfig = new Rancher2.Inputs.CloudCredentialAzureCredentialConfigArgs
{
ClientId = "<CLIENT_ID>",
ClientSecret = "<CLIENT_SECRET>",
SubscriptionId = "<SUBSCRIPTION_ID>",
},
});
var foo = new Rancher2.Cluster("foo", new()
{
Description = "Terraform AKS cluster",
AksConfigV2 = new Rancher2.Inputs.ClusterAksConfigV2Args
{
CloudCredentialId = foo_aks.Id,
ResourceGroup = "<RESOURCE_GROUP>",
ResourceLocation = "<RESOURCE_LOCATION>",
DnsPrefix = "<DNS_PREFIX>",
KubernetesVersion = "1.24.6",
NetworkPlugin = "<NETWORK_PLUGIN>",
NodePools = new[]
{
new Rancher2.Inputs.ClusterAksConfigV2NodePoolArgs
{
AvailabilityZones = new[]
{
"1",
"2",
"3",
},
Name = "<NODEPOOL_NAME_1>",
Mode = "System",
Count = 1,
OrchestratorVersion = "1.21.2",
OsDiskSizeGb = 128,
VmSize = "Standard_DS2_v2",
},
new Rancher2.Inputs.ClusterAksConfigV2NodePoolArgs
{
AvailabilityZones = new[]
{
"1",
"2",
"3",
},
Name = "<NODEPOOL_NAME_2>",
Count = 1,
Mode = "User",
OrchestratorVersion = "1.21.2",
OsDiskSizeGb = 128,
VmSize = "Standard_DS2_v2",
MaxSurge = "25%",
Labels =
{
{ "test1", "data1" },
{ "test2", "data2" },
},
Taints = new[]
{
"none:PreferNoSchedule",
},
},
},
},
});
});
package main
import (
"github.com/pulumi/pulumi-rancher2/sdk/v5/go/rancher2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := rancher2.NewCloudCredential(ctx, "foo-aks", &rancher2.CloudCredentialArgs{
AzureCredentialConfig: &rancher2.CloudCredentialAzureCredentialConfigArgs{
ClientId: pulumi.String("<CLIENT_ID>"),
ClientSecret: pulumi.String("<CLIENT_SECRET>"),
SubscriptionId: pulumi.String("<SUBSCRIPTION_ID>"),
},
})
if err != nil {
return err
}
_, err = rancher2.NewCluster(ctx, "foo", &rancher2.ClusterArgs{
Description: pulumi.String("Terraform AKS cluster"),
AksConfigV2: &rancher2.ClusterAksConfigV2Args{
CloudCredentialId: foo_aks.ID(),
ResourceGroup: pulumi.String("<RESOURCE_GROUP>"),
ResourceLocation: pulumi.String("<RESOURCE_LOCATION>"),
DnsPrefix: pulumi.String("<DNS_PREFIX>"),
KubernetesVersion: pulumi.String("1.24.6"),
NetworkPlugin: pulumi.String("<NETWORK_PLUGIN>"),
NodePools: rancher2.ClusterAksConfigV2NodePoolArray{
&rancher2.ClusterAksConfigV2NodePoolArgs{
AvailabilityZones: pulumi.StringArray{
pulumi.String("1"),
pulumi.String("2"),
pulumi.String("3"),
},
Name: pulumi.String("<NODEPOOL_NAME_1>"),
Mode: pulumi.String("System"),
Count: pulumi.Int(1),
OrchestratorVersion: pulumi.String("1.21.2"),
OsDiskSizeGb: pulumi.Int(128),
VmSize: pulumi.String("Standard_DS2_v2"),
},
&rancher2.ClusterAksConfigV2NodePoolArgs{
AvailabilityZones: pulumi.StringArray{
pulumi.String("1"),
pulumi.String("2"),
pulumi.String("3"),
},
Name: pulumi.String("<NODEPOOL_NAME_2>"),
Count: pulumi.Int(1),
Mode: pulumi.String("User"),
OrchestratorVersion: pulumi.String("1.21.2"),
OsDiskSizeGb: pulumi.Int(128),
VmSize: pulumi.String("Standard_DS2_v2"),
MaxSurge: pulumi.String("25%"),
Labels: pulumi.Map{
"test1": pulumi.Any("data1"),
"test2": pulumi.Any("data2"),
},
Taints: pulumi.StringArray{
pulumi.String("none:PreferNoSchedule"),
},
},
},
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.rancher2.CloudCredential;
import com.pulumi.rancher2.CloudCredentialArgs;
import com.pulumi.rancher2.inputs.CloudCredentialAzureCredentialConfigArgs;
import com.pulumi.rancher2.Cluster;
import com.pulumi.rancher2.ClusterArgs;
import com.pulumi.rancher2.inputs.ClusterAksConfigV2Args;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var foo_aks = new CloudCredential("foo-aks", CloudCredentialArgs.builder()
.azureCredentialConfig(CloudCredentialAzureCredentialConfigArgs.builder()
.clientId("<CLIENT_ID>")
.clientSecret("<CLIENT_SECRET>")
.subscriptionId("<SUBSCRIPTION_ID>")
.build())
.build());
var foo = new Cluster("foo", ClusterArgs.builder()
.description("Terraform AKS cluster")
.aksConfigV2(ClusterAksConfigV2Args.builder()
.cloudCredentialId(foo_aks.id())
.resourceGroup("<RESOURCE_GROUP>")
.resourceLocation("<RESOURCE_LOCATION>")
.dnsPrefix("<DNS_PREFIX>")
.kubernetesVersion("1.24.6")
.networkPlugin("<NETWORK_PLUGIN>")
.nodePools(
ClusterAksConfigV2NodePoolArgs.builder()
.availabilityZones(
"1",
"2",
"3")
.name("<NODEPOOL_NAME_1>")
.mode("System")
.count(1)
.orchestratorVersion("1.21.2")
.osDiskSizeGb(128)
.vmSize("Standard_DS2_v2")
.build(),
ClusterAksConfigV2NodePoolArgs.builder()
.availabilityZones(
"1",
"2",
"3")
.name("<NODEPOOL_NAME_2>")
.count(1)
.mode("User")
.orchestratorVersion("1.21.2")
.osDiskSizeGb(128)
.vmSize("Standard_DS2_v2")
.maxSurge("25%")
.labels(Map.ofEntries(
Map.entry("test1", "data1"),
Map.entry("test2", "data2")
))
.taints("none:PreferNoSchedule")
.build())
.build())
.build());
}
}
import pulumi
import pulumi_rancher2 as rancher2
foo_aks = rancher2.CloudCredential("foo-aks", azure_credential_config=rancher2.CloudCredentialAzureCredentialConfigArgs(
client_id="<CLIENT_ID>",
client_secret="<CLIENT_SECRET>",
subscription_id="<SUBSCRIPTION_ID>",
))
foo = rancher2.Cluster("foo",
description="Terraform AKS cluster",
aks_config_v2=rancher2.ClusterAksConfigV2Args(
cloud_credential_id=foo_aks.id,
resource_group="<RESOURCE_GROUP>",
resource_location="<RESOURCE_LOCATION>",
dns_prefix="<DNS_PREFIX>",
kubernetes_version="1.24.6",
network_plugin="<NETWORK_PLUGIN>",
node_pools=[
rancher2.ClusterAksConfigV2NodePoolArgs(
availability_zones=[
"1",
"2",
"3",
],
name="<NODEPOOL_NAME_1>",
mode="System",
count=1,
orchestrator_version="1.21.2",
os_disk_size_gb=128,
vm_size="Standard_DS2_v2",
),
rancher2.ClusterAksConfigV2NodePoolArgs(
availability_zones=[
"1",
"2",
"3",
],
name="<NODEPOOL_NAME_2>",
count=1,
mode="User",
orchestrator_version="1.21.2",
os_disk_size_gb=128,
vm_size="Standard_DS2_v2",
max_surge="25%",
labels={
"test1": "data1",
"test2": "data2",
},
taints=["none:PreferNoSchedule"],
),
],
))
import * as pulumi from "@pulumi/pulumi";
import * as rancher2 from "@pulumi/rancher2";
const foo_aks = new rancher2.CloudCredential("foo-aks", {azureCredentialConfig: {
clientId: "<CLIENT_ID>",
clientSecret: "<CLIENT_SECRET>",
subscriptionId: "<SUBSCRIPTION_ID>",
}});
const foo = new rancher2.Cluster("foo", {
description: "Terraform AKS cluster",
aksConfigV2: {
cloudCredentialId: foo_aks.id,
resourceGroup: "<RESOURCE_GROUP>",
resourceLocation: "<RESOURCE_LOCATION>",
dnsPrefix: "<DNS_PREFIX>",
kubernetesVersion: "1.24.6",
networkPlugin: "<NETWORK_PLUGIN>",
nodePools: [
{
availabilityZones: [
"1",
"2",
"3",
],
name: "<NODEPOOL_NAME_1>",
mode: "System",
count: 1,
orchestratorVersion: "1.21.2",
osDiskSizeGb: 128,
vmSize: "Standard_DS2_v2",
},
{
availabilityZones: [
"1",
"2",
"3",
],
name: "<NODEPOOL_NAME_2>",
count: 1,
mode: "User",
orchestratorVersion: "1.21.2",
osDiskSizeGb: 128,
vmSize: "Standard_DS2_v2",
maxSurge: "25%",
labels: {
test1: "data1",
test2: "data2",
},
taints: ["none:PreferNoSchedule"],
},
],
},
});
resources:
foo-aks:
type: rancher2:CloudCredential
properties:
azureCredentialConfig:
clientId: <CLIENT_ID>
clientSecret: <CLIENT_SECRET>
subscriptionId: <SUBSCRIPTION_ID>
foo:
type: rancher2:Cluster
properties:
description: Terraform AKS cluster
aksConfigV2:
cloudCredentialId: ${["foo-aks"].id}
resourceGroup: <RESOURCE_GROUP>
resourceLocation: <RESOURCE_LOCATION>
dnsPrefix: <DNS_PREFIX>
kubernetesVersion: 1.24.6
networkPlugin: <NETWORK_PLUGIN>
nodePools:
- availabilityZones:
- '1'
- '2'
- '3'
name: <NODEPOOL_NAME_1>
mode: System
count: 1
orchestratorVersion: 1.21.2
osDiskSizeGb: 128
vmSize: Standard_DS2_v2
- availabilityZones:
- '1'
- '2'
- '3'
name: <NODEPOOL_NAME_2>
count: 1
mode: User
orchestratorVersion: 1.21.2
osDiskSizeGb: 128
vmSize: Standard_DS2_v2
maxSurge: 25%
labels:
test1: data1
test2: data2
taints:
- none:PreferNoSchedule
Create Cluster Resource
new Cluster(name: string, args?: ClusterArgs, opts?: CustomResourceOptions);
@overload
def Cluster(resource_name: str,
opts: Optional[ResourceOptions] = None,
agent_env_vars: Optional[Sequence[ClusterAgentEnvVarArgs]] = None,
aks_config: Optional[ClusterAksConfigArgs] = None,
aks_config_v2: Optional[ClusterAksConfigV2Args] = None,
annotations: Optional[Mapping[str, Any]] = None,
cluster_agent_deployment_customizations: Optional[Sequence[ClusterClusterAgentDeploymentCustomizationArgs]] = None,
cluster_auth_endpoint: Optional[ClusterClusterAuthEndpointArgs] = None,
cluster_monitoring_input: Optional[ClusterClusterMonitoringInputArgs] = None,
cluster_template_answers: Optional[ClusterClusterTemplateAnswersArgs] = None,
cluster_template_id: Optional[str] = None,
cluster_template_questions: Optional[Sequence[ClusterClusterTemplateQuestionArgs]] = None,
cluster_template_revision_id: Optional[str] = None,
default_pod_security_admission_configuration_template_name: Optional[str] = None,
default_pod_security_policy_template_id: Optional[str] = None,
description: Optional[str] = None,
desired_agent_image: Optional[str] = None,
desired_auth_image: Optional[str] = None,
docker_root_dir: Optional[str] = None,
driver: Optional[str] = None,
eks_config: Optional[ClusterEksConfigArgs] = None,
eks_config_v2: Optional[ClusterEksConfigV2Args] = None,
enable_cluster_alerting: Optional[bool] = None,
enable_cluster_monitoring: Optional[bool] = None,
enable_network_policy: Optional[bool] = None,
fleet_agent_deployment_customizations: Optional[Sequence[ClusterFleetAgentDeploymentCustomizationArgs]] = None,
fleet_workspace_name: Optional[str] = None,
gke_config: Optional[ClusterGkeConfigArgs] = None,
gke_config_v2: Optional[ClusterGkeConfigV2Args] = None,
k3s_config: Optional[ClusterK3sConfigArgs] = None,
labels: Optional[Mapping[str, Any]] = None,
name: Optional[str] = None,
oke_config: Optional[ClusterOkeConfigArgs] = None,
rke2_config: Optional[ClusterRke2ConfigArgs] = None,
rke_config: Optional[ClusterRkeConfigArgs] = None,
windows_prefered_cluster: Optional[bool] = None)
@overload
def Cluster(resource_name: str,
args: Optional[ClusterArgs] = None,
opts: Optional[ResourceOptions] = None)
func NewCluster(ctx *Context, name string, args *ClusterArgs, opts ...ResourceOption) (*Cluster, error)
public Cluster(string name, ClusterArgs? args = null, CustomResourceOptions? opts = null)
public Cluster(String name, ClusterArgs args)
public Cluster(String name, ClusterArgs args, CustomResourceOptions options)
type: rancher2:Cluster
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args ClusterArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args ClusterArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args ClusterArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args ClusterArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args ClusterArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Cluster Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
The Cluster resource accepts the following input properties:
- Agent
Env List<ClusterVars Agent Env Var> Optional Agent Env Vars for Rancher agent. For Rancher v2.5.6 and above (list)
- Aks
Config ClusterAks Config The Azure AKS configuration for
aks
Clusters. Conflicts withaks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- Aks
Config ClusterV2 Aks Config V2 The Azure AKS v2 configuration for creating/import
aks
Clusters. Conflicts withaks_config
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- Annotations Dictionary<string, object>
Annotations for the Cluster (map)
- Cluster
Agent List<ClusterDeployment Customizations Cluster Agent Deployment Customization> Optional customization for cluster agent. For Rancher v2.7.5 and above (list)
- Cluster
Auth ClusterEndpoint Cluster Auth Endpoint Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)
- Cluster
Monitoring ClusterInput Cluster Monitoring Input Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)
- Cluster
Template ClusterAnswers Cluster Template Answers Cluster template answers. For Rancher v2.3.x and above (list maxitems:1)
- Cluster
Template stringId Cluster template ID. For Rancher v2.3.x and above (string)
- Cluster
Template List<ClusterQuestions Cluster Template Question> Cluster template questions. For Rancher v2.3.x and above (list)
- Cluster
Template stringRevision Id Cluster template revision ID. For Rancher v2.3.x and above (string)
- Default
Pod stringSecurity Admission Configuration Template Name Cluster default pod security admission configuration template name (string)
- Default
Pod stringSecurity Policy Template Id - Description string
The description for Cluster (string)
- Desired
Agent stringImage Desired agent image. For Rancher v2.3.x and above (string)
- Desired
Auth stringImage Desired auth image. For Rancher v2.3.x and above (string)
- Docker
Root stringDir Desired auth image. For Rancher v2.3.x and above (string)
- Driver string
(Computed) The driver used for the Cluster.
imported
,azurekubernetesservice
,amazonelasticcontainerservice
,googlekubernetesengine
andrancherKubernetesEngine
are supported (string)- Eks
Config ClusterEks Config The Amazon EKS configuration for
eks
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- Eks
Config ClusterV2 Eks Config V2 The Amazon EKS V2 configuration to create or import
eks
Clusters. Conflicts withaks_config
,eks_config
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
. For Rancher v2.5.x and above (list maxitems:1)- Enable
Cluster boolAlerting Enable built-in cluster alerting (bool)
- Enable
Cluster boolMonitoring Enable built-in cluster monitoring (bool)
- Enable
Network boolPolicy Enable project network isolation (bool)
- Fleet
Agent List<ClusterDeployment Customizations Fleet Agent Deployment Customization> Optional customization for fleet agent. For Rancher v2.7.5 and above (list)
- Fleet
Workspace stringName Fleet workspace name (string)
- Gke
Config ClusterGke Config The Google GKE configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config_v2
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- Gke
Config ClusterV2 Gke Config V2 The Google GKE V2 configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,oke_config
,k3s_config
andrke_config
. For Rancher v2.5.8 and above (list maxitems:1)- K3s
Config ClusterK3s Config The K3S configuration for
k3s
imported Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andrke_config
(list maxitems:1)- Labels Dictionary<string, object>
Labels for the Cluster (map)
- Name string
The name of the Cluster (string)
- Oke
Config ClusterOke Config The Oracle OKE configuration for
oke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,k3s_config
andrke_config
(list maxitems:1)- Rke2Config
Cluster
Rke2Config The RKE2 configuration for
rke2
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,gke_config
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- Rke
Config ClusterRke Config The RKE configuration for
rke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andk3s_config
(list maxitems:1)- Windows
Prefered boolCluster Windows preferred cluster. Default:
false
(bool)
- Agent
Env []ClusterVars Agent Env Var Args Optional Agent Env Vars for Rancher agent. For Rancher v2.5.6 and above (list)
- Aks
Config ClusterAks Config Args The Azure AKS configuration for
aks
Clusters. Conflicts withaks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- Aks
Config ClusterV2 Aks Config V2Args The Azure AKS v2 configuration for creating/import
aks
Clusters. Conflicts withaks_config
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- Annotations map[string]interface{}
Annotations for the Cluster (map)
- Cluster
Agent []ClusterDeployment Customizations Cluster Agent Deployment Customization Args Optional customization for cluster agent. For Rancher v2.7.5 and above (list)
- Cluster
Auth ClusterEndpoint Cluster Auth Endpoint Args Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)
- Cluster
Monitoring ClusterInput Cluster Monitoring Input Args Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)
- Cluster
Template ClusterAnswers Cluster Template Answers Args Cluster template answers. For Rancher v2.3.x and above (list maxitems:1)
- Cluster
Template stringId Cluster template ID. For Rancher v2.3.x and above (string)
- Cluster
Template []ClusterQuestions Cluster Template Question Args Cluster template questions. For Rancher v2.3.x and above (list)
- Cluster
Template stringRevision Id Cluster template revision ID. For Rancher v2.3.x and above (string)
- Default
Pod stringSecurity Admission Configuration Template Name Cluster default pod security admission configuration template name (string)
- Default
Pod stringSecurity Policy Template Id - Description string
The description for Cluster (string)
- Desired
Agent stringImage Desired agent image. For Rancher v2.3.x and above (string)
- Desired
Auth stringImage Desired auth image. For Rancher v2.3.x and above (string)
- Docker
Root stringDir Desired auth image. For Rancher v2.3.x and above (string)
- Driver string
(Computed) The driver used for the Cluster.
imported
,azurekubernetesservice
,amazonelasticcontainerservice
,googlekubernetesengine
andrancherKubernetesEngine
are supported (string)- Eks
Config ClusterEks Config Args The Amazon EKS configuration for
eks
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- Eks
Config ClusterV2 Eks Config V2Args The Amazon EKS V2 configuration to create or import
eks
Clusters. Conflicts withaks_config
,eks_config
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
. For Rancher v2.5.x and above (list maxitems:1)- Enable
Cluster boolAlerting Enable built-in cluster alerting (bool)
- Enable
Cluster boolMonitoring Enable built-in cluster monitoring (bool)
- Enable
Network boolPolicy Enable project network isolation (bool)
- Fleet
Agent []ClusterDeployment Customizations Fleet Agent Deployment Customization Args Optional customization for fleet agent. For Rancher v2.7.5 and above (list)
- Fleet
Workspace stringName Fleet workspace name (string)
- Gke
Config ClusterGke Config Args The Google GKE configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config_v2
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- Gke
Config ClusterV2 Gke Config V2Args The Google GKE V2 configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,oke_config
,k3s_config
andrke_config
. For Rancher v2.5.8 and above (list maxitems:1)- K3s
Config ClusterK3s Config Args The K3S configuration for
k3s
imported Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andrke_config
(list maxitems:1)- Labels map[string]interface{}
Labels for the Cluster (map)
- Name string
The name of the Cluster (string)
- Oke
Config ClusterOke Config Args The Oracle OKE configuration for
oke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,k3s_config
andrke_config
(list maxitems:1)- Rke2Config
Cluster
Rke2Config Args The RKE2 configuration for
rke2
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,gke_config
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- Rke
Config ClusterRke Config Args The RKE configuration for
rke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andk3s_config
(list maxitems:1)- Windows
Prefered boolCluster Windows preferred cluster. Default:
false
(bool)
- agent
Env List<ClusterVars Agent Env Var> Optional Agent Env Vars for Rancher agent. For Rancher v2.5.6 and above (list)
- aks
Config ClusterAks Config The Azure AKS configuration for
aks
Clusters. Conflicts withaks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- aks
Config ClusterV2 Aks Config V2 The Azure AKS v2 configuration for creating/import
aks
Clusters. Conflicts withaks_config
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- annotations Map<String,Object>
Annotations for the Cluster (map)
- cluster
Agent List<ClusterDeployment Customizations Cluster Agent Deployment Customization> Optional customization for cluster agent. For Rancher v2.7.5 and above (list)
- cluster
Auth ClusterEndpoint Cluster Auth Endpoint Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)
- cluster
Monitoring ClusterInput Cluster Monitoring Input Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)
- cluster
Template ClusterAnswers Cluster Template Answers Cluster template answers. For Rancher v2.3.x and above (list maxitems:1)
- cluster
Template StringId Cluster template ID. For Rancher v2.3.x and above (string)
- cluster
Template List<ClusterQuestions Cluster Template Question> Cluster template questions. For Rancher v2.3.x and above (list)
- cluster
Template StringRevision Id Cluster template revision ID. For Rancher v2.3.x and above (string)
- default
Pod StringSecurity Admission Configuration Template Name Cluster default pod security admission configuration template name (string)
- default
Pod StringSecurity Policy Template Id - description String
The description for Cluster (string)
- desired
Agent StringImage Desired agent image. For Rancher v2.3.x and above (string)
- desired
Auth StringImage Desired auth image. For Rancher v2.3.x and above (string)
- docker
Root StringDir Desired auth image. For Rancher v2.3.x and above (string)
- driver String
(Computed) The driver used for the Cluster.
imported
,azurekubernetesservice
,amazonelasticcontainerservice
,googlekubernetesengine
andrancherKubernetesEngine
are supported (string)- eks
Config ClusterEks Config The Amazon EKS configuration for
eks
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- eks
Config ClusterV2 Eks Config V2 The Amazon EKS V2 configuration to create or import
eks
Clusters. Conflicts withaks_config
,eks_config
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
. For Rancher v2.5.x and above (list maxitems:1)- enable
Cluster BooleanAlerting Enable built-in cluster alerting (bool)
- enable
Cluster BooleanMonitoring Enable built-in cluster monitoring (bool)
- enable
Network BooleanPolicy Enable project network isolation (bool)
- fleet
Agent List<ClusterDeployment Customizations Fleet Agent Deployment Customization> Optional customization for fleet agent. For Rancher v2.7.5 and above (list)
- fleet
Workspace StringName Fleet workspace name (string)
- gke
Config ClusterGke Config The Google GKE configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config_v2
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- gke
Config ClusterV2 Gke Config V2 The Google GKE V2 configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,oke_config
,k3s_config
andrke_config
. For Rancher v2.5.8 and above (list maxitems:1)- k3s
Config ClusterK3s Config The K3S configuration for
k3s
imported Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andrke_config
(list maxitems:1)- labels Map<String,Object>
Labels for the Cluster (map)
- name String
The name of the Cluster (string)
- oke
Config ClusterOke Config The Oracle OKE configuration for
oke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,k3s_config
andrke_config
(list maxitems:1)- rke2Config
Cluster
Rke2Config The RKE2 configuration for
rke2
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,gke_config
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- rke
Config ClusterRke Config The RKE configuration for
rke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andk3s_config
(list maxitems:1)- windows
Prefered BooleanCluster Windows preferred cluster. Default:
false
(bool)
- agent
Env ClusterVars Agent Env Var[] Optional Agent Env Vars for Rancher agent. For Rancher v2.5.6 and above (list)
- aks
Config ClusterAks Config The Azure AKS configuration for
aks
Clusters. Conflicts withaks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- aks
Config ClusterV2 Aks Config V2 The Azure AKS v2 configuration for creating/import
aks
Clusters. Conflicts withaks_config
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- annotations {[key: string]: any}
Annotations for the Cluster (map)
- cluster
Agent ClusterDeployment Customizations Cluster Agent Deployment Customization[] Optional customization for cluster agent. For Rancher v2.7.5 and above (list)
- cluster
Auth ClusterEndpoint Cluster Auth Endpoint Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)
- cluster
Monitoring ClusterInput Cluster Monitoring Input Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)
- cluster
Template ClusterAnswers Cluster Template Answers Cluster template answers. For Rancher v2.3.x and above (list maxitems:1)
- cluster
Template stringId Cluster template ID. For Rancher v2.3.x and above (string)
- cluster
Template ClusterQuestions Cluster Template Question[] Cluster template questions. For Rancher v2.3.x and above (list)
- cluster
Template stringRevision Id Cluster template revision ID. For Rancher v2.3.x and above (string)
- default
Pod stringSecurity Admission Configuration Template Name Cluster default pod security admission configuration template name (string)
- default
Pod stringSecurity Policy Template Id - description string
The description for Cluster (string)
- desired
Agent stringImage Desired agent image. For Rancher v2.3.x and above (string)
- desired
Auth stringImage Desired auth image. For Rancher v2.3.x and above (string)
- docker
Root stringDir Desired auth image. For Rancher v2.3.x and above (string)
- driver string
(Computed) The driver used for the Cluster.
imported
,azurekubernetesservice
,amazonelasticcontainerservice
,googlekubernetesengine
andrancherKubernetesEngine
are supported (string)- eks
Config ClusterEks Config The Amazon EKS configuration for
eks
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- eks
Config ClusterV2 Eks Config V2 The Amazon EKS V2 configuration to create or import
eks
Clusters. Conflicts withaks_config
,eks_config
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
. For Rancher v2.5.x and above (list maxitems:1)- enable
Cluster booleanAlerting Enable built-in cluster alerting (bool)
- enable
Cluster booleanMonitoring Enable built-in cluster monitoring (bool)
- enable
Network booleanPolicy Enable project network isolation (bool)
- fleet
Agent ClusterDeployment Customizations Fleet Agent Deployment Customization[] Optional customization for fleet agent. For Rancher v2.7.5 and above (list)
- fleet
Workspace stringName Fleet workspace name (string)
- gke
Config ClusterGke Config The Google GKE configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config_v2
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- gke
Config ClusterV2 Gke Config V2 The Google GKE V2 configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,oke_config
,k3s_config
andrke_config
. For Rancher v2.5.8 and above (list maxitems:1)- k3s
Config ClusterK3s Config The K3S configuration for
k3s
imported Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andrke_config
(list maxitems:1)- labels {[key: string]: any}
Labels for the Cluster (map)
- name string
The name of the Cluster (string)
- oke
Config ClusterOke Config The Oracle OKE configuration for
oke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,k3s_config
andrke_config
(list maxitems:1)- rke2Config
Cluster
Rke2Config The RKE2 configuration for
rke2
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,gke_config
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- rke
Config ClusterRke Config The RKE configuration for
rke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andk3s_config
(list maxitems:1)- windows
Prefered booleanCluster Windows preferred cluster. Default:
false
(bool)
- agent_
env_ Sequence[Clustervars Agent Env Var Args] Optional Agent Env Vars for Rancher agent. For Rancher v2.5.6 and above (list)
- aks_
config ClusterAks Config Args The Azure AKS configuration for
aks
Clusters. Conflicts withaks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- aks_
config_ Clusterv2 Aks Config V2Args The Azure AKS v2 configuration for creating/import
aks
Clusters. Conflicts withaks_config
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- annotations Mapping[str, Any]
Annotations for the Cluster (map)
- cluster_
agent_ Sequence[Clusterdeployment_ customizations Cluster Agent Deployment Customization Args] Optional customization for cluster agent. For Rancher v2.7.5 and above (list)
- cluster_
auth_ Clusterendpoint Cluster Auth Endpoint Args Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)
- cluster_
monitoring_ Clusterinput Cluster Monitoring Input Args Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)
- cluster_
template_ Clusteranswers Cluster Template Answers Args Cluster template answers. For Rancher v2.3.x and above (list maxitems:1)
- cluster_
template_ strid Cluster template ID. For Rancher v2.3.x and above (string)
- cluster_
template_ Sequence[Clusterquestions Cluster Template Question Args] Cluster template questions. For Rancher v2.3.x and above (list)
- cluster_
template_ strrevision_ id Cluster template revision ID. For Rancher v2.3.x and above (string)
- default_
pod_ strsecurity_ admission_ configuration_ template_ name Cluster default pod security admission configuration template name (string)
- default_
pod_ strsecurity_ policy_ template_ id - description str
The description for Cluster (string)
- desired_
agent_ strimage Desired agent image. For Rancher v2.3.x and above (string)
- desired_
auth_ strimage Desired auth image. For Rancher v2.3.x and above (string)
- docker_
root_ strdir Desired auth image. For Rancher v2.3.x and above (string)
- driver str
(Computed) The driver used for the Cluster.
imported
,azurekubernetesservice
,amazonelasticcontainerservice
,googlekubernetesengine
andrancherKubernetesEngine
are supported (string)- eks_
config ClusterEks Config Args The Amazon EKS configuration for
eks
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- eks_
config_ Clusterv2 Eks Config V2Args The Amazon EKS V2 configuration to create or import
eks
Clusters. Conflicts withaks_config
,eks_config
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
. For Rancher v2.5.x and above (list maxitems:1)- enable_
cluster_ boolalerting Enable built-in cluster alerting (bool)
- enable_
cluster_ boolmonitoring Enable built-in cluster monitoring (bool)
- enable_
network_ boolpolicy Enable project network isolation (bool)
- fleet_
agent_ Sequence[Clusterdeployment_ customizations Fleet Agent Deployment Customization Args] Optional customization for fleet agent. For Rancher v2.7.5 and above (list)
- fleet_
workspace_ strname Fleet workspace name (string)
- gke_
config ClusterGke Config Args The Google GKE configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config_v2
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- gke_
config_ Clusterv2 Gke Config V2Args The Google GKE V2 configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,oke_config
,k3s_config
andrke_config
. For Rancher v2.5.8 and above (list maxitems:1)- k3s_
config ClusterK3s Config Args The K3S configuration for
k3s
imported Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andrke_config
(list maxitems:1)- labels Mapping[str, Any]
Labels for the Cluster (map)
- name str
The name of the Cluster (string)
- oke_
config ClusterOke Config Args The Oracle OKE configuration for
oke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,k3s_config
andrke_config
(list maxitems:1)- rke2_
config ClusterRke2Config Args The RKE2 configuration for
rke2
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,gke_config
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- rke_
config ClusterRke Config Args The RKE configuration for
rke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andk3s_config
(list maxitems:1)- windows_
prefered_ boolcluster Windows preferred cluster. Default:
false
(bool)
- agent
Env List<Property Map>Vars Optional Agent Env Vars for Rancher agent. For Rancher v2.5.6 and above (list)
- aks
Config Property Map The Azure AKS configuration for
aks
Clusters. Conflicts withaks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- aks
Config Property MapV2 The Azure AKS v2 configuration for creating/import
aks
Clusters. Conflicts withaks_config
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- annotations Map<Any>
Annotations for the Cluster (map)
- cluster
Agent List<Property Map>Deployment Customizations Optional customization for cluster agent. For Rancher v2.7.5 and above (list)
- cluster
Auth Property MapEndpoint Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)
- cluster
Monitoring Property MapInput Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)
- cluster
Template Property MapAnswers Cluster template answers. For Rancher v2.3.x and above (list maxitems:1)
- cluster
Template StringId Cluster template ID. For Rancher v2.3.x and above (string)
- cluster
Template List<Property Map>Questions Cluster template questions. For Rancher v2.3.x and above (list)
- cluster
Template StringRevision Id Cluster template revision ID. For Rancher v2.3.x and above (string)
- default
Pod StringSecurity Admission Configuration Template Name Cluster default pod security admission configuration template name (string)
- default
Pod StringSecurity Policy Template Id - description String
The description for Cluster (string)
- desired
Agent StringImage Desired agent image. For Rancher v2.3.x and above (string)
- desired
Auth StringImage Desired auth image. For Rancher v2.3.x and above (string)
- docker
Root StringDir Desired auth image. For Rancher v2.3.x and above (string)
- driver String
(Computed) The driver used for the Cluster.
imported
,azurekubernetesservice
,amazonelasticcontainerservice
,googlekubernetesengine
andrancherKubernetesEngine
are supported (string)- eks
Config Property Map The Amazon EKS configuration for
eks
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- eks
Config Property MapV2 The Amazon EKS V2 configuration to create or import
eks
Clusters. Conflicts withaks_config
,eks_config
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
. For Rancher v2.5.x and above (list maxitems:1)- enable
Cluster BooleanAlerting Enable built-in cluster alerting (bool)
- enable
Cluster BooleanMonitoring Enable built-in cluster monitoring (bool)
- enable
Network BooleanPolicy Enable project network isolation (bool)
- fleet
Agent List<Property Map>Deployment Customizations Optional customization for fleet agent. For Rancher v2.7.5 and above (list)
- fleet
Workspace StringName Fleet workspace name (string)
- gke
Config Property Map The Google GKE configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config_v2
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- gke
Config Property MapV2 The Google GKE V2 configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,oke_config
,k3s_config
andrke_config
. For Rancher v2.5.8 and above (list maxitems:1)- k3s
Config Property Map The K3S configuration for
k3s
imported Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andrke_config
(list maxitems:1)- labels Map<Any>
Labels for the Cluster (map)
- name String
The name of the Cluster (string)
- oke
Config Property Map The Oracle OKE configuration for
oke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,k3s_config
andrke_config
(list maxitems:1)- rke2Config Property Map
The RKE2 configuration for
rke2
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,gke_config
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- rke
Config Property Map The RKE configuration for
rke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andk3s_config
(list maxitems:1)- windows
Prefered BooleanCluster Windows preferred cluster. Default:
false
(bool)
Outputs
All input properties are implicitly available as output properties. Additionally, the Cluster resource produces the following output properties:
- Ca
Cert string TLS CA certificate for etcd service (string)
- Cluster
Registration ClusterToken Cluster Registration Token (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)
- Default
Project stringId (Computed) Default project ID for the cluster (string)
- Enable
Cluster boolIstio Deploy istio on
system
project andistio-system
namespace, using rancher2.App resource instead. See above example.Deploy istio using rancher2_app resource instead
- Id string
The provider-assigned unique ID for this managed resource.
- Istio
Enabled bool (Computed) Is istio enabled at cluster? For Rancher v2.3.x and above (bool)
- Kube
Config string (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has
cluster_auth_endpoint
enabled, the kube_config will not be available until the cluster isconnected
(string)- System
Project stringId (Computed) System project ID for the cluster (string)
- Ca
Cert string TLS CA certificate for etcd service (string)
- Cluster
Registration ClusterToken Cluster Registration Token (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)
- Default
Project stringId (Computed) Default project ID for the cluster (string)
- Enable
Cluster boolIstio Deploy istio on
system
project andistio-system
namespace, using rancher2.App resource instead. See above example.Deploy istio using rancher2_app resource instead
- Id string
The provider-assigned unique ID for this managed resource.
- Istio
Enabled bool (Computed) Is istio enabled at cluster? For Rancher v2.3.x and above (bool)
- Kube
Config string (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has
cluster_auth_endpoint
enabled, the kube_config will not be available until the cluster isconnected
(string)- System
Project stringId (Computed) System project ID for the cluster (string)
- ca
Cert String TLS CA certificate for etcd service (string)
- cluster
Registration ClusterToken Cluster Registration Token (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)
- default
Project StringId (Computed) Default project ID for the cluster (string)
- enable
Cluster BooleanIstio Deploy istio on
system
project andistio-system
namespace, using rancher2.App resource instead. See above example.Deploy istio using rancher2_app resource instead
- id String
The provider-assigned unique ID for this managed resource.
- istio
Enabled Boolean (Computed) Is istio enabled at cluster? For Rancher v2.3.x and above (bool)
- kube
Config String (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has
cluster_auth_endpoint
enabled, the kube_config will not be available until the cluster isconnected
(string)- system
Project StringId (Computed) System project ID for the cluster (string)
- ca
Cert string TLS CA certificate for etcd service (string)
- cluster
Registration ClusterToken Cluster Registration Token (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)
- default
Project stringId (Computed) Default project ID for the cluster (string)
- enable
Cluster booleanIstio Deploy istio on
system
project andistio-system
namespace, using rancher2.App resource instead. See above example.Deploy istio using rancher2_app resource instead
- id string
The provider-assigned unique ID for this managed resource.
- istio
Enabled boolean (Computed) Is istio enabled at cluster? For Rancher v2.3.x and above (bool)
- kube
Config string (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has
cluster_auth_endpoint
enabled, the kube_config will not be available until the cluster isconnected
(string)- system
Project stringId (Computed) System project ID for the cluster (string)
- ca_
cert str TLS CA certificate for etcd service (string)
- cluster_
registration_ Clustertoken Cluster Registration Token (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)
- default_
project_ strid (Computed) Default project ID for the cluster (string)
- enable_
cluster_ boolistio Deploy istio on
system
project andistio-system
namespace, using rancher2.App resource instead. See above example.Deploy istio using rancher2_app resource instead
- id str
The provider-assigned unique ID for this managed resource.
- istio_
enabled bool (Computed) Is istio enabled at cluster? For Rancher v2.3.x and above (bool)
- kube_
config str (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has
cluster_auth_endpoint
enabled, the kube_config will not be available until the cluster isconnected
(string)- system_
project_ strid (Computed) System project ID for the cluster (string)
- ca
Cert String TLS CA certificate for etcd service (string)
- cluster
Registration Property MapToken (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)
- default
Project StringId (Computed) Default project ID for the cluster (string)
- enable
Cluster BooleanIstio Deploy istio on
system
project andistio-system
namespace, using rancher2.App resource instead. See above example.Deploy istio using rancher2_app resource instead
- id String
The provider-assigned unique ID for this managed resource.
- istio
Enabled Boolean (Computed) Is istio enabled at cluster? For Rancher v2.3.x and above (bool)
- kube
Config String (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has
cluster_auth_endpoint
enabled, the kube_config will not be available until the cluster isconnected
(string)- system
Project StringId (Computed) System project ID for the cluster (string)
Look up Existing Cluster Resource
Get an existing Cluster resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: ClusterState, opts?: CustomResourceOptions): Cluster
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
agent_env_vars: Optional[Sequence[ClusterAgentEnvVarArgs]] = None,
aks_config: Optional[ClusterAksConfigArgs] = None,
aks_config_v2: Optional[ClusterAksConfigV2Args] = None,
annotations: Optional[Mapping[str, Any]] = None,
ca_cert: Optional[str] = None,
cluster_agent_deployment_customizations: Optional[Sequence[ClusterClusterAgentDeploymentCustomizationArgs]] = None,
cluster_auth_endpoint: Optional[ClusterClusterAuthEndpointArgs] = None,
cluster_monitoring_input: Optional[ClusterClusterMonitoringInputArgs] = None,
cluster_registration_token: Optional[ClusterClusterRegistrationTokenArgs] = None,
cluster_template_answers: Optional[ClusterClusterTemplateAnswersArgs] = None,
cluster_template_id: Optional[str] = None,
cluster_template_questions: Optional[Sequence[ClusterClusterTemplateQuestionArgs]] = None,
cluster_template_revision_id: Optional[str] = None,
default_pod_security_admission_configuration_template_name: Optional[str] = None,
default_pod_security_policy_template_id: Optional[str] = None,
default_project_id: Optional[str] = None,
description: Optional[str] = None,
desired_agent_image: Optional[str] = None,
desired_auth_image: Optional[str] = None,
docker_root_dir: Optional[str] = None,
driver: Optional[str] = None,
eks_config: Optional[ClusterEksConfigArgs] = None,
eks_config_v2: Optional[ClusterEksConfigV2Args] = None,
enable_cluster_alerting: Optional[bool] = None,
enable_cluster_istio: Optional[bool] = None,
enable_cluster_monitoring: Optional[bool] = None,
enable_network_policy: Optional[bool] = None,
fleet_agent_deployment_customizations: Optional[Sequence[ClusterFleetAgentDeploymentCustomizationArgs]] = None,
fleet_workspace_name: Optional[str] = None,
gke_config: Optional[ClusterGkeConfigArgs] = None,
gke_config_v2: Optional[ClusterGkeConfigV2Args] = None,
istio_enabled: Optional[bool] = None,
k3s_config: Optional[ClusterK3sConfigArgs] = None,
kube_config: Optional[str] = None,
labels: Optional[Mapping[str, Any]] = None,
name: Optional[str] = None,
oke_config: Optional[ClusterOkeConfigArgs] = None,
rke2_config: Optional[ClusterRke2ConfigArgs] = None,
rke_config: Optional[ClusterRkeConfigArgs] = None,
system_project_id: Optional[str] = None,
windows_prefered_cluster: Optional[bool] = None) -> Cluster
func GetCluster(ctx *Context, name string, id IDInput, state *ClusterState, opts ...ResourceOption) (*Cluster, error)
public static Cluster Get(string name, Input<string> id, ClusterState? state, CustomResourceOptions? opts = null)
public static Cluster get(String name, Output<String> id, ClusterState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Agent
Env List<ClusterVars Agent Env Var> Optional Agent Env Vars for Rancher agent. For Rancher v2.5.6 and above (list)
- Aks
Config ClusterAks Config The Azure AKS configuration for
aks
Clusters. Conflicts withaks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- Aks
Config ClusterV2 Aks Config V2 The Azure AKS v2 configuration for creating/import
aks
Clusters. Conflicts withaks_config
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- Annotations Dictionary<string, object>
Annotations for the Cluster (map)
- Ca
Cert string TLS CA certificate for etcd service (string)
- Cluster
Agent List<ClusterDeployment Customizations Cluster Agent Deployment Customization> Optional customization for cluster agent. For Rancher v2.7.5 and above (list)
- Cluster
Auth ClusterEndpoint Cluster Auth Endpoint Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)
- Cluster
Monitoring ClusterInput Cluster Monitoring Input Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)
- Cluster
Registration ClusterToken Cluster Registration Token (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)
- Cluster
Template ClusterAnswers Cluster Template Answers Cluster template answers. For Rancher v2.3.x and above (list maxitems:1)
- Cluster
Template stringId Cluster template ID. For Rancher v2.3.x and above (string)
- Cluster
Template List<ClusterQuestions Cluster Template Question> Cluster template questions. For Rancher v2.3.x and above (list)
- Cluster
Template stringRevision Id Cluster template revision ID. For Rancher v2.3.x and above (string)
- Default
Pod stringSecurity Admission Configuration Template Name Cluster default pod security admission configuration template name (string)
- Default
Pod stringSecurity Policy Template Id - Default
Project stringId (Computed) Default project ID for the cluster (string)
- Description string
The description for Cluster (string)
- Desired
Agent stringImage Desired agent image. For Rancher v2.3.x and above (string)
- Desired
Auth stringImage Desired auth image. For Rancher v2.3.x and above (string)
- Docker
Root stringDir Desired auth image. For Rancher v2.3.x and above (string)
- Driver string
(Computed) The driver used for the Cluster.
imported
,azurekubernetesservice
,amazonelasticcontainerservice
,googlekubernetesengine
andrancherKubernetesEngine
are supported (string)- Eks
Config ClusterEks Config The Amazon EKS configuration for
eks
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- Eks
Config ClusterV2 Eks Config V2 The Amazon EKS V2 configuration to create or import
eks
Clusters. Conflicts withaks_config
,eks_config
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
. For Rancher v2.5.x and above (list maxitems:1)- Enable
Cluster boolAlerting Enable built-in cluster alerting (bool)
- Enable
Cluster boolIstio Deploy istio on
system
project andistio-system
namespace, using rancher2.App resource instead. See above example.Deploy istio using rancher2_app resource instead
- Enable
Cluster boolMonitoring Enable built-in cluster monitoring (bool)
- Enable
Network boolPolicy Enable project network isolation (bool)
- Fleet
Agent List<ClusterDeployment Customizations Fleet Agent Deployment Customization> Optional customization for fleet agent. For Rancher v2.7.5 and above (list)
- Fleet
Workspace stringName Fleet workspace name (string)
- Gke
Config ClusterGke Config The Google GKE configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config_v2
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- Gke
Config ClusterV2 Gke Config V2 The Google GKE V2 configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,oke_config
,k3s_config
andrke_config
. For Rancher v2.5.8 and above (list maxitems:1)- Istio
Enabled bool (Computed) Is istio enabled at cluster? For Rancher v2.3.x and above (bool)
- K3s
Config ClusterK3s Config The K3S configuration for
k3s
imported Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andrke_config
(list maxitems:1)- Kube
Config string (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has
cluster_auth_endpoint
enabled, the kube_config will not be available until the cluster isconnected
(string)- Labels Dictionary<string, object>
Labels for the Cluster (map)
- Name string
The name of the Cluster (string)
- Oke
Config ClusterOke Config The Oracle OKE configuration for
oke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,k3s_config
andrke_config
(list maxitems:1)- Rke2Config
Cluster
Rke2Config The RKE2 configuration for
rke2
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,gke_config
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- Rke
Config ClusterRke Config The RKE configuration for
rke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andk3s_config
(list maxitems:1)- System
Project stringId (Computed) System project ID for the cluster (string)
- Windows
Prefered boolCluster Windows preferred cluster. Default:
false
(bool)
- Agent
Env []ClusterVars Agent Env Var Args Optional Agent Env Vars for Rancher agent. For Rancher v2.5.6 and above (list)
- Aks
Config ClusterAks Config Args The Azure AKS configuration for
aks
Clusters. Conflicts withaks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- Aks
Config ClusterV2 Aks Config V2Args The Azure AKS v2 configuration for creating/import
aks
Clusters. Conflicts withaks_config
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- Annotations map[string]interface{}
Annotations for the Cluster (map)
- Ca
Cert string TLS CA certificate for etcd service (string)
- Cluster
Agent []ClusterDeployment Customizations Cluster Agent Deployment Customization Args Optional customization for cluster agent. For Rancher v2.7.5 and above (list)
- Cluster
Auth ClusterEndpoint Cluster Auth Endpoint Args Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)
- Cluster
Monitoring ClusterInput Cluster Monitoring Input Args Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)
- Cluster
Registration ClusterToken Cluster Registration Token Args (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)
- Cluster
Template ClusterAnswers Cluster Template Answers Args Cluster template answers. For Rancher v2.3.x and above (list maxitems:1)
- Cluster
Template stringId Cluster template ID. For Rancher v2.3.x and above (string)
- Cluster
Template []ClusterQuestions Cluster Template Question Args Cluster template questions. For Rancher v2.3.x and above (list)
- Cluster
Template stringRevision Id Cluster template revision ID. For Rancher v2.3.x and above (string)
- Default
Pod stringSecurity Admission Configuration Template Name Cluster default pod security admission configuration template name (string)
- Default
Pod stringSecurity Policy Template Id - Default
Project stringId (Computed) Default project ID for the cluster (string)
- Description string
The description for Cluster (string)
- Desired
Agent stringImage Desired agent image. For Rancher v2.3.x and above (string)
- Desired
Auth stringImage Desired auth image. For Rancher v2.3.x and above (string)
- Docker
Root stringDir Desired auth image. For Rancher v2.3.x and above (string)
- Driver string
(Computed) The driver used for the Cluster.
imported
,azurekubernetesservice
,amazonelasticcontainerservice
,googlekubernetesengine
andrancherKubernetesEngine
are supported (string)- Eks
Config ClusterEks Config Args The Amazon EKS configuration for
eks
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- Eks
Config ClusterV2 Eks Config V2Args The Amazon EKS V2 configuration to create or import
eks
Clusters. Conflicts withaks_config
,eks_config
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
. For Rancher v2.5.x and above (list maxitems:1)- Enable
Cluster boolAlerting Enable built-in cluster alerting (bool)
- Enable
Cluster boolIstio Deploy istio on
system
project andistio-system
namespace, using rancher2.App resource instead. See above example.Deploy istio using rancher2_app resource instead
- Enable
Cluster boolMonitoring Enable built-in cluster monitoring (bool)
- Enable
Network boolPolicy Enable project network isolation (bool)
- Fleet
Agent []ClusterDeployment Customizations Fleet Agent Deployment Customization Args Optional customization for fleet agent. For Rancher v2.7.5 and above (list)
- Fleet
Workspace stringName Fleet workspace name (string)
- Gke
Config ClusterGke Config Args The Google GKE configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config_v2
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- Gke
Config ClusterV2 Gke Config V2Args The Google GKE V2 configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,oke_config
,k3s_config
andrke_config
. For Rancher v2.5.8 and above (list maxitems:1)- Istio
Enabled bool (Computed) Is istio enabled at cluster? For Rancher v2.3.x and above (bool)
- K3s
Config ClusterK3s Config Args The K3S configuration for
k3s
imported Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andrke_config
(list maxitems:1)- Kube
Config string (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has
cluster_auth_endpoint
enabled, the kube_config will not be available until the cluster isconnected
(string)- Labels map[string]interface{}
Labels for the Cluster (map)
- Name string
The name of the Cluster (string)
- Oke
Config ClusterOke Config Args The Oracle OKE configuration for
oke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,k3s_config
andrke_config
(list maxitems:1)- Rke2Config
Cluster
Rke2Config Args The RKE2 configuration for
rke2
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,gke_config
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- Rke
Config ClusterRke Config Args The RKE configuration for
rke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andk3s_config
(list maxitems:1)- System
Project stringId (Computed) System project ID for the cluster (string)
- Windows
Prefered boolCluster Windows preferred cluster. Default:
false
(bool)
- agent
Env List<ClusterVars Agent Env Var> Optional Agent Env Vars for Rancher agent. For Rancher v2.5.6 and above (list)
- aks
Config ClusterAks Config The Azure AKS configuration for
aks
Clusters. Conflicts withaks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- aks
Config ClusterV2 Aks Config V2 The Azure AKS v2 configuration for creating/import
aks
Clusters. Conflicts withaks_config
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- annotations Map<String,Object>
Annotations for the Cluster (map)
- ca
Cert String TLS CA certificate for etcd service (string)
- cluster
Agent List<ClusterDeployment Customizations Cluster Agent Deployment Customization> Optional customization for cluster agent. For Rancher v2.7.5 and above (list)
- cluster
Auth ClusterEndpoint Cluster Auth Endpoint Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)
- cluster
Monitoring ClusterInput Cluster Monitoring Input Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)
- cluster
Registration ClusterToken Cluster Registration Token (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)
- cluster
Template ClusterAnswers Cluster Template Answers Cluster template answers. For Rancher v2.3.x and above (list maxitems:1)
- cluster
Template StringId Cluster template ID. For Rancher v2.3.x and above (string)
- cluster
Template List<ClusterQuestions Cluster Template Question> Cluster template questions. For Rancher v2.3.x and above (list)
- cluster
Template StringRevision Id Cluster template revision ID. For Rancher v2.3.x and above (string)
- default
Pod StringSecurity Admission Configuration Template Name Cluster default pod security admission configuration template name (string)
- default
Pod StringSecurity Policy Template Id - default
Project StringId (Computed) Default project ID for the cluster (string)
- description String
The description for Cluster (string)
- desired
Agent StringImage Desired agent image. For Rancher v2.3.x and above (string)
- desired
Auth StringImage Desired auth image. For Rancher v2.3.x and above (string)
- docker
Root StringDir Desired auth image. For Rancher v2.3.x and above (string)
- driver String
(Computed) The driver used for the Cluster.
imported
,azurekubernetesservice
,amazonelasticcontainerservice
,googlekubernetesengine
andrancherKubernetesEngine
are supported (string)- eks
Config ClusterEks Config The Amazon EKS configuration for
eks
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- eks
Config ClusterV2 Eks Config V2 The Amazon EKS V2 configuration to create or import
eks
Clusters. Conflicts withaks_config
,eks_config
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
. For Rancher v2.5.x and above (list maxitems:1)- enable
Cluster BooleanAlerting Enable built-in cluster alerting (bool)
- enable
Cluster BooleanIstio Deploy istio on
system
project andistio-system
namespace, using rancher2.App resource instead. See above example.Deploy istio using rancher2_app resource instead
- enable
Cluster BooleanMonitoring Enable built-in cluster monitoring (bool)
- enable
Network BooleanPolicy Enable project network isolation (bool)
- fleet
Agent List<ClusterDeployment Customizations Fleet Agent Deployment Customization> Optional customization for fleet agent. For Rancher v2.7.5 and above (list)
- fleet
Workspace StringName Fleet workspace name (string)
- gke
Config ClusterGke Config The Google GKE configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config_v2
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- gke
Config ClusterV2 Gke Config V2 The Google GKE V2 configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,oke_config
,k3s_config
andrke_config
. For Rancher v2.5.8 and above (list maxitems:1)- istio
Enabled Boolean (Computed) Is istio enabled at cluster? For Rancher v2.3.x and above (bool)
- k3s
Config ClusterK3s Config The K3S configuration for
k3s
imported Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andrke_config
(list maxitems:1)- kube
Config String (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has
cluster_auth_endpoint
enabled, the kube_config will not be available until the cluster isconnected
(string)- labels Map<String,Object>
Labels for the Cluster (map)
- name String
The name of the Cluster (string)
- oke
Config ClusterOke Config The Oracle OKE configuration for
oke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,k3s_config
andrke_config
(list maxitems:1)- rke2Config
Cluster
Rke2Config The RKE2 configuration for
rke2
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,gke_config
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- rke
Config ClusterRke Config The RKE configuration for
rke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andk3s_config
(list maxitems:1)- system
Project StringId (Computed) System project ID for the cluster (string)
- windows
Prefered BooleanCluster Windows preferred cluster. Default:
false
(bool)
- agent
Env ClusterVars Agent Env Var[] Optional Agent Env Vars for Rancher agent. For Rancher v2.5.6 and above (list)
- aks
Config ClusterAks Config The Azure AKS configuration for
aks
Clusters. Conflicts withaks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- aks
Config ClusterV2 Aks Config V2 The Azure AKS v2 configuration for creating/import
aks
Clusters. Conflicts withaks_config
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- annotations {[key: string]: any}
Annotations for the Cluster (map)
- ca
Cert string TLS CA certificate for etcd service (string)
- cluster
Agent ClusterDeployment Customizations Cluster Agent Deployment Customization[] Optional customization for cluster agent. For Rancher v2.7.5 and above (list)
- cluster
Auth ClusterEndpoint Cluster Auth Endpoint Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)
- cluster
Monitoring ClusterInput Cluster Monitoring Input Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)
- cluster
Registration ClusterToken Cluster Registration Token (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)
- cluster
Template ClusterAnswers Cluster Template Answers Cluster template answers. For Rancher v2.3.x and above (list maxitems:1)
- cluster
Template stringId Cluster template ID. For Rancher v2.3.x and above (string)
- cluster
Template ClusterQuestions Cluster Template Question[] Cluster template questions. For Rancher v2.3.x and above (list)
- cluster
Template stringRevision Id Cluster template revision ID. For Rancher v2.3.x and above (string)
- default
Pod stringSecurity Admission Configuration Template Name Cluster default pod security admission configuration template name (string)
- default
Pod stringSecurity Policy Template Id - default
Project stringId (Computed) Default project ID for the cluster (string)
- description string
The description for Cluster (string)
- desired
Agent stringImage Desired agent image. For Rancher v2.3.x and above (string)
- desired
Auth stringImage Desired auth image. For Rancher v2.3.x and above (string)
- docker
Root stringDir Desired auth image. For Rancher v2.3.x and above (string)
- driver string
(Computed) The driver used for the Cluster.
imported
,azurekubernetesservice
,amazonelasticcontainerservice
,googlekubernetesengine
andrancherKubernetesEngine
are supported (string)- eks
Config ClusterEks Config The Amazon EKS configuration for
eks
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- eks
Config ClusterV2 Eks Config V2 The Amazon EKS V2 configuration to create or import
eks
Clusters. Conflicts withaks_config
,eks_config
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
. For Rancher v2.5.x and above (list maxitems:1)- enable
Cluster booleanAlerting Enable built-in cluster alerting (bool)
- enable
Cluster booleanIstio Deploy istio on
system
project andistio-system
namespace, using rancher2.App resource instead. See above example.Deploy istio using rancher2_app resource instead
- enable
Cluster booleanMonitoring Enable built-in cluster monitoring (bool)
- enable
Network booleanPolicy Enable project network isolation (bool)
- fleet
Agent ClusterDeployment Customizations Fleet Agent Deployment Customization[] Optional customization for fleet agent. For Rancher v2.7.5 and above (list)
- fleet
Workspace stringName Fleet workspace name (string)
- gke
Config ClusterGke Config The Google GKE configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config_v2
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- gke
Config ClusterV2 Gke Config V2 The Google GKE V2 configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,oke_config
,k3s_config
andrke_config
. For Rancher v2.5.8 and above (list maxitems:1)- istio
Enabled boolean (Computed) Is istio enabled at cluster? For Rancher v2.3.x and above (bool)
- k3s
Config ClusterK3s Config The K3S configuration for
k3s
imported Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andrke_config
(list maxitems:1)- kube
Config string (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has
cluster_auth_endpoint
enabled, the kube_config will not be available until the cluster isconnected
(string)- labels {[key: string]: any}
Labels for the Cluster (map)
- name string
The name of the Cluster (string)
- oke
Config ClusterOke Config The Oracle OKE configuration for
oke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,k3s_config
andrke_config
(list maxitems:1)- rke2Config
Cluster
Rke2Config The RKE2 configuration for
rke2
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,gke_config
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- rke
Config ClusterRke Config The RKE configuration for
rke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andk3s_config
(list maxitems:1)- system
Project stringId (Computed) System project ID for the cluster (string)
- windows
Prefered booleanCluster Windows preferred cluster. Default:
false
(bool)
- agent_
env_ Sequence[Clustervars Agent Env Var Args] Optional Agent Env Vars for Rancher agent. For Rancher v2.5.6 and above (list)
- aks_
config ClusterAks Config Args The Azure AKS configuration for
aks
Clusters. Conflicts withaks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- aks_
config_ Clusterv2 Aks Config V2Args The Azure AKS v2 configuration for creating/import
aks
Clusters. Conflicts withaks_config
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- annotations Mapping[str, Any]
Annotations for the Cluster (map)
- ca_
cert str TLS CA certificate for etcd service (string)
- cluster_
agent_ Sequence[Clusterdeployment_ customizations Cluster Agent Deployment Customization Args] Optional customization for cluster agent. For Rancher v2.7.5 and above (list)
- cluster_
auth_ Clusterendpoint Cluster Auth Endpoint Args Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)
- cluster_
monitoring_ Clusterinput Cluster Monitoring Input Args Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)
- cluster_
registration_ Clustertoken Cluster Registration Token Args (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)
- cluster_
template_ Clusteranswers Cluster Template Answers Args Cluster template answers. For Rancher v2.3.x and above (list maxitems:1)
- cluster_
template_ strid Cluster template ID. For Rancher v2.3.x and above (string)
- cluster_
template_ Sequence[Clusterquestions Cluster Template Question Args] Cluster template questions. For Rancher v2.3.x and above (list)
- cluster_
template_ strrevision_ id Cluster template revision ID. For Rancher v2.3.x and above (string)
- default_
pod_ strsecurity_ admission_ configuration_ template_ name Cluster default pod security admission configuration template name (string)
- default_
pod_ strsecurity_ policy_ template_ id - default_
project_ strid (Computed) Default project ID for the cluster (string)
- description str
The description for Cluster (string)
- desired_
agent_ strimage Desired agent image. For Rancher v2.3.x and above (string)
- desired_
auth_ strimage Desired auth image. For Rancher v2.3.x and above (string)
- docker_
root_ strdir Desired auth image. For Rancher v2.3.x and above (string)
- driver str
(Computed) The driver used for the Cluster.
imported
,azurekubernetesservice
,amazonelasticcontainerservice
,googlekubernetesengine
andrancherKubernetesEngine
are supported (string)- eks_
config ClusterEks Config Args The Amazon EKS configuration for
eks
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- eks_
config_ Clusterv2 Eks Config V2Args The Amazon EKS V2 configuration to create or import
eks
Clusters. Conflicts withaks_config
,eks_config
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
. For Rancher v2.5.x and above (list maxitems:1)- enable_
cluster_ boolalerting Enable built-in cluster alerting (bool)
- enable_
cluster_ boolistio Deploy istio on
system
project andistio-system
namespace, using rancher2.App resource instead. See above example.Deploy istio using rancher2_app resource instead
- enable_
cluster_ boolmonitoring Enable built-in cluster monitoring (bool)
- enable_
network_ boolpolicy Enable project network isolation (bool)
- fleet_
agent_ Sequence[Clusterdeployment_ customizations Fleet Agent Deployment Customization Args] Optional customization for fleet agent. For Rancher v2.7.5 and above (list)
- fleet_
workspace_ strname Fleet workspace name (string)
- gke_
config ClusterGke Config Args The Google GKE configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config_v2
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- gke_
config_ Clusterv2 Gke Config V2Args The Google GKE V2 configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,oke_config
,k3s_config
andrke_config
. For Rancher v2.5.8 and above (list maxitems:1)- istio_
enabled bool (Computed) Is istio enabled at cluster? For Rancher v2.3.x and above (bool)
- k3s_
config ClusterK3s Config Args The K3S configuration for
k3s
imported Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andrke_config
(list maxitems:1)- kube_
config str (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has
cluster_auth_endpoint
enabled, the kube_config will not be available until the cluster isconnected
(string)- labels Mapping[str, Any]
Labels for the Cluster (map)
- name str
The name of the Cluster (string)
- oke_
config ClusterOke Config Args The Oracle OKE configuration for
oke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,k3s_config
andrke_config
(list maxitems:1)- rke2_
config ClusterRke2Config Args The RKE2 configuration for
rke2
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,gke_config
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- rke_
config ClusterRke Config Args The RKE configuration for
rke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andk3s_config
(list maxitems:1)- system_
project_ strid (Computed) System project ID for the cluster (string)
- windows_
prefered_ boolcluster Windows preferred cluster. Default:
false
(bool)
- agent
Env List<Property Map>Vars Optional Agent Env Vars for Rancher agent. For Rancher v2.5.6 and above (list)
- aks
Config Property Map The Azure AKS configuration for
aks
Clusters. Conflicts withaks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- aks
Config Property MapV2 The Azure AKS v2 configuration for creating/import
aks
Clusters. Conflicts withaks_config
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- annotations Map<Any>
Annotations for the Cluster (map)
- ca
Cert String TLS CA certificate for etcd service (string)
- cluster
Agent List<Property Map>Deployment Customizations Optional customization for cluster agent. For Rancher v2.7.5 and above (list)
- cluster
Auth Property MapEndpoint Enabling the local cluster authorized endpoint allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1)
- cluster
Monitoring Property MapInput Cluster monitoring config. Any parameter defined in rancher-monitoring charts could be configured (list maxitems:1)
- cluster
Registration Property MapToken (Computed) Cluster Registration Token generated for the cluster (list maxitems:1)
- cluster
Template Property MapAnswers Cluster template answers. For Rancher v2.3.x and above (list maxitems:1)
- cluster
Template StringId Cluster template ID. For Rancher v2.3.x and above (string)
- cluster
Template List<Property Map>Questions Cluster template questions. For Rancher v2.3.x and above (list)
- cluster
Template StringRevision Id Cluster template revision ID. For Rancher v2.3.x and above (string)
- default
Pod StringSecurity Admission Configuration Template Name Cluster default pod security admission configuration template name (string)
- default
Pod StringSecurity Policy Template Id - default
Project StringId (Computed) Default project ID for the cluster (string)
- description String
The description for Cluster (string)
- desired
Agent StringImage Desired agent image. For Rancher v2.3.x and above (string)
- desired
Auth StringImage Desired auth image. For Rancher v2.3.x and above (string)
- docker
Root StringDir Desired auth image. For Rancher v2.3.x and above (string)
- driver String
(Computed) The driver used for the Cluster.
imported
,azurekubernetesservice
,amazonelasticcontainerservice
,googlekubernetesengine
andrancherKubernetesEngine
are supported (string)- eks
Config Property Map The Amazon EKS configuration for
eks
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
(list maxitems:1)- eks
Config Property MapV2 The Amazon EKS V2 configuration to create or import
eks
Clusters. Conflicts withaks_config
,eks_config
,gke_config
,gke_config_v2
,oke_config
k3s_config
andrke_config
. For Rancher v2.5.x and above (list maxitems:1)- enable
Cluster BooleanAlerting Enable built-in cluster alerting (bool)
- enable
Cluster BooleanIstio Deploy istio on
system
project andistio-system
namespace, using rancher2.App resource instead. See above example.Deploy istio using rancher2_app resource instead
- enable
Cluster BooleanMonitoring Enable built-in cluster monitoring (bool)
- enable
Network BooleanPolicy Enable project network isolation (bool)
- fleet
Agent List<Property Map>Deployment Customizations Optional customization for fleet agent. For Rancher v2.7.5 and above (list)
- fleet
Workspace StringName Fleet workspace name (string)
- gke
Config Property Map The Google GKE configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config_v2
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- gke
Config Property MapV2 The Google GKE V2 configuration for
gke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,oke_config
,k3s_config
andrke_config
. For Rancher v2.5.8 and above (list maxitems:1)- istio
Enabled Boolean (Computed) Is istio enabled at cluster? For Rancher v2.3.x and above (bool)
- k3s
Config Property Map The K3S configuration for
k3s
imported Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andrke_config
(list maxitems:1)- kube
Config String (Computed/Sensitive) Kube Config generated for the cluster. Note: For Rancher 2.6.0 and above, when the cluster has
cluster_auth_endpoint
enabled, the kube_config will not be available until the cluster isconnected
(string)- labels Map<Any>
Labels for the Cluster (map)
- name String
The name of the Cluster (string)
- oke
Config Property Map The Oracle OKE configuration for
oke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,k3s_config
andrke_config
(list maxitems:1)- rke2Config Property Map
The RKE2 configuration for
rke2
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,gke_config
,oke_config
,k3s_config
andrke_config
(list maxitems:1)- rke
Config Property Map The RKE configuration for
rke
Clusters. Conflicts withaks_config
,aks_config_v2
,eks_config
,eks_config_v2
,gke_config
,gke_config_v2
,oke_config
andk3s_config
(list maxitems:1)- system
Project StringId (Computed) System project ID for the cluster (string)
- windows
Prefered BooleanCluster Windows preferred cluster. Default:
false
(bool)
Supporting Types
ClusterAgentEnvVar, ClusterAgentEnvVarArgs
ClusterAksConfig, ClusterAksConfigArgs
- Agent
Dns stringPrefix DNS prefix to be used to create the FQDN for the agent pool (string)
- Client
Id string Azure client ID to use (string)
- Client
Secret string Azure client secret associated with the "client id" (string)
- Kubernetes
Version string The Kubernetes version that will be used for your master and OKE worker nodes (string)
- Master
Dns stringPrefix DNS prefix to use the Kubernetes cluster control pane (string)
- Resource
Group string The AKS resource group (string)
- Ssh
Public stringKey Contents Contents of the SSH public key used to authenticate with Linux hosts (string)
- Subnet string
The AKS subnet (string)
- Subscription
Id string Subscription credentials which uniquely identify Microsoft Azure subscription (string)
- Tenant
Id string Azure tenant ID to use (string)
- Virtual
Network string The name of the virtual network to use. If it's not specified Rancher will create a new VPC (string)
- Virtual
Network stringResource Group The AKS virtual network resource group (string)
- Aad
Server stringApp Secret The secret of an Azure Active Directory server application (string)
- Aad
Tenant stringId The ID of an Azure Active Directory tenant (string)
- Add
Client stringApp Id The ID of an Azure Active Directory client application of type "Native". This application is for user login via kubectl (string)
- Add
Server stringApp Id The ID of an Azure Active Directory server application of type "Web app/API". This application represents the managed cluster's apiserver (Server application) (string)
- Admin
Username string The administrator username to use for Linux hosts. Default
azureuser
(string)- Agent
Os intDisk Size GB size to be used to specify the disk for every machine in the agent pool. If you specify 0, it will apply the default according to the "agent vm size" specified. Default
0
(int)- Agent
Pool stringName Name for the agent pool, upto 12 alphanumeric characters. Default
agentpool0
(string)- Agent
Storage stringProfile Storage profile specifies what kind of storage used on machine in the agent pool. Chooses from [ManagedDisks StorageAccount]. Default
ManagedDisks
(string)- Agent
Vm stringSize Size of machine in the agent pool. Default
Standard_D1_v2
(string)- Auth
Base stringUrl The AKS auth base url (string)
- Base
Url string The AKS base url (string)
- Count int
The AKS node pool count. Default:
1
(int)- Dns
Service stringIp An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes Service address range specified in "service cidr". Default
10.0.0.10
(string)- Docker
Bridge stringCidr A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes Service address range specified in "service cidr". Default
172.17.0.1/16
(string)- Enable
Http boolApplication Routing Enable the Kubernetes ingress with automatic public DNS name creation. Default
false
(bool)- Enable
Monitoring bool Turn on Azure Log Analytics monitoring. Uses the Log Analytics "Default" workspace if it exists, else creates one. if using an existing workspace, specifies "log analytics workspace resource id". Default
true
(bool)- Load
Balancer stringSku The AKS load balancer sku (string)
- Location string
Azure Kubernetes cluster location. Default
eastus
(string)- Log
Analytics stringWorkspace The name of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses '{resource group}-{subscription id}-{location code}' (string)
- Log
Analytics stringWorkspace Resource Group The resource group of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses the 'Cluster' resource group (string)
- Max
Pods int The AKS node pool max pods. Default:
110
(int)- Network
Plugin string The AKS network plugin. Required if
imported=false
(string)- Network
Policy string The AKS network policy (string)
- Pod
Cidr string A CIDR IP range from which to assign Kubernetes Pod IPs (string)
- Service
Cidr string A CIDR IP range from which to assign Kubernetes Service IPs (string)
- Tag Dictionary<string, object>
Use
tags
argument instead as []stringUse tags argument instead as []string
- List<string>
The GKE node config tags (List)
- Agent
Dns stringPrefix DNS prefix to be used to create the FQDN for the agent pool (string)
- Client
Id string Azure client ID to use (string)
- Client
Secret string Azure client secret associated with the "client id" (string)
- Kubernetes
Version string The Kubernetes version that will be used for your master and OKE worker nodes (string)
- Master
Dns stringPrefix DNS prefix to use the Kubernetes cluster control pane (string)
- Resource
Group string The AKS resource group (string)
- Ssh
Public stringKey Contents Contents of the SSH public key used to authenticate with Linux hosts (string)
- Subnet string
The AKS subnet (string)
- Subscription
Id string Subscription credentials which uniquely identify Microsoft Azure subscription (string)
- Tenant
Id string Azure tenant ID to use (string)
- Virtual
Network string The name of the virtual network to use. If it's not specified Rancher will create a new VPC (string)
- Virtual
Network stringResource Group The AKS virtual network resource group (string)
- Aad
Server stringApp Secret The secret of an Azure Active Directory server application (string)
- Aad
Tenant stringId The ID of an Azure Active Directory tenant (string)
- Add
Client stringApp Id The ID of an Azure Active Directory client application of type "Native". This application is for user login via kubectl (string)
- Add
Server stringApp Id The ID of an Azure Active Directory server application of type "Web app/API". This application represents the managed cluster's apiserver (Server application) (string)
- Admin
Username string The administrator username to use for Linux hosts. Default
azureuser
(string)- Agent
Os intDisk Size GB size to be used to specify the disk for every machine in the agent pool. If you specify 0, it will apply the default according to the "agent vm size" specified. Default
0
(int)- Agent
Pool stringName Name for the agent pool, upto 12 alphanumeric characters. Default
agentpool0
(string)- Agent
Storage stringProfile Storage profile specifies what kind of storage used on machine in the agent pool. Chooses from [ManagedDisks StorageAccount]. Default
ManagedDisks
(string)- Agent
Vm stringSize Size of machine in the agent pool. Default
Standard_D1_v2
(string)- Auth
Base stringUrl The AKS auth base url (string)
- Base
Url string The AKS base url (string)
- Count int
The AKS node pool count. Default:
1
(int)- Dns
Service stringIp An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes Service address range specified in "service cidr". Default
10.0.0.10
(string)- Docker
Bridge stringCidr A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes Service address range specified in "service cidr". Default
172.17.0.1/16
(string)- Enable
Http boolApplication Routing Enable the Kubernetes ingress with automatic public DNS name creation. Default
false
(bool)- Enable
Monitoring bool Turn on Azure Log Analytics monitoring. Uses the Log Analytics "Default" workspace if it exists, else creates one. if using an existing workspace, specifies "log analytics workspace resource id". Default
true
(bool)- Load
Balancer stringSku The AKS load balancer sku (string)
- Location string
Azure Kubernetes cluster location. Default
eastus
(string)- Log
Analytics stringWorkspace The name of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses '{resource group}-{subscription id}-{location code}' (string)
- Log
Analytics stringWorkspace Resource Group The resource group of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses the 'Cluster' resource group (string)
- Max
Pods int The AKS node pool max pods. Default:
110
(int)- Network
Plugin string The AKS network plugin. Required if
imported=false
(string)- Network
Policy string The AKS network policy (string)
- Pod
Cidr string A CIDR IP range from which to assign Kubernetes Pod IPs (string)
- Service
Cidr string A CIDR IP range from which to assign Kubernetes Service IPs (string)
- Tag map[string]interface{}
Use
tags
argument instead as []stringUse tags argument instead as []string
- []string
The GKE node config tags (List)
- agent
Dns StringPrefix DNS prefix to be used to create the FQDN for the agent pool (string)
- client
Id String Azure client ID to use (string)
- client
Secret String Azure client secret associated with the "client id" (string)
- kubernetes
Version String The Kubernetes version that will be used for your master and OKE worker nodes (string)
- master
Dns StringPrefix DNS prefix to use the Kubernetes cluster control pane (string)
- resource
Group String The AKS resource group (string)
- ssh
Public StringKey Contents Contents of the SSH public key used to authenticate with Linux hosts (string)
- subnet String
The AKS subnet (string)
- subscription
Id String Subscription credentials which uniquely identify Microsoft Azure subscription (string)
- tenant
Id String Azure tenant ID to use (string)
- virtual
Network String The name of the virtual network to use. If it's not specified Rancher will create a new VPC (string)
- virtual
Network StringResource Group The AKS virtual network resource group (string)
- aad
Server StringApp Secret The secret of an Azure Active Directory server application (string)
- aad
Tenant StringId The ID of an Azure Active Directory tenant (string)
- add
Client StringApp Id The ID of an Azure Active Directory client application of type "Native". This application is for user login via kubectl (string)
- add
Server StringApp Id The ID of an Azure Active Directory server application of type "Web app/API". This application represents the managed cluster's apiserver (Server application) (string)
- admin
Username String The administrator username to use for Linux hosts. Default
azureuser
(string)- agent
Os IntegerDisk Size GB size to be used to specify the disk for every machine in the agent pool. If you specify 0, it will apply the default according to the "agent vm size" specified. Default
0
(int)- agent
Pool StringName Name for the agent pool, upto 12 alphanumeric characters. Default
agentpool0
(string)- agent
Storage StringProfile Storage profile specifies what kind of storage used on machine in the agent pool. Chooses from [ManagedDisks StorageAccount]. Default
ManagedDisks
(string)- agent
Vm StringSize Size of machine in the agent pool. Default
Standard_D1_v2
(string)- auth
Base StringUrl The AKS auth base url (string)
- base
Url String The AKS base url (string)
- count Integer
The AKS node pool count. Default:
1
(int)- dns
Service StringIp An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes Service address range specified in "service cidr". Default
10.0.0.10
(string)- docker
Bridge StringCidr A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes Service address range specified in "service cidr". Default
172.17.0.1/16
(string)- enable
Http BooleanApplication Routing Enable the Kubernetes ingress with automatic public DNS name creation. Default
false
(bool)- enable
Monitoring Boolean Turn on Azure Log Analytics monitoring. Uses the Log Analytics "Default" workspace if it exists, else creates one. if using an existing workspace, specifies "log analytics workspace resource id". Default
true
(bool)- load
Balancer StringSku The AKS load balancer sku (string)
- location String
Azure Kubernetes cluster location. Default
eastus
(string)- log
Analytics StringWorkspace The name of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses '{resource group}-{subscription id}-{location code}' (string)
- log
Analytics StringWorkspace Resource Group The resource group of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses the 'Cluster' resource group (string)
- max
Pods Integer The AKS node pool max pods. Default:
110
(int)- network
Plugin String The AKS network plugin. Required if
imported=false
(string)- network
Policy String The AKS network policy (string)
- pod
Cidr String A CIDR IP range from which to assign Kubernetes Pod IPs (string)
- service
Cidr String A CIDR IP range from which to assign Kubernetes Service IPs (string)
- tag Map<String,Object>
Use
tags
argument instead as []stringUse tags argument instead as []string
- List<String>
The GKE node config tags (List)
- agent
Dns stringPrefix DNS prefix to be used to create the FQDN for the agent pool (string)
- client
Id string Azure client ID to use (string)
- client
Secret string Azure client secret associated with the "client id" (string)
- kubernetes
Version string The Kubernetes version that will be used for your master and OKE worker nodes (string)
- master
Dns stringPrefix DNS prefix to use the Kubernetes cluster control pane (string)
- resource
Group string The AKS resource group (string)
- ssh
Public stringKey Contents Contents of the SSH public key used to authenticate with Linux hosts (string)
- subnet string
The AKS subnet (string)
- subscription
Id string Subscription credentials which uniquely identify Microsoft Azure subscription (string)
- tenant
Id string Azure tenant ID to use (string)
- virtual
Network string The name of the virtual network to use. If it's not specified Rancher will create a new VPC (string)
- virtual
Network stringResource Group The AKS virtual network resource group (string)
- aad
Server stringApp Secret The secret of an Azure Active Directory server application (string)
- aad
Tenant stringId The ID of an Azure Active Directory tenant (string)
- add
Client stringApp Id The ID of an Azure Active Directory client application of type "Native". This application is for user login via kubectl (string)
- add
Server stringApp Id The ID of an Azure Active Directory server application of type "Web app/API". This application represents the managed cluster's apiserver (Server application) (string)
- admin
Username string The administrator username to use for Linux hosts. Default
azureuser
(string)- agent
Os numberDisk Size GB size to be used to specify the disk for every machine in the agent pool. If you specify 0, it will apply the default according to the "agent vm size" specified. Default
0
(int)- agent
Pool stringName Name for the agent pool, upto 12 alphanumeric characters. Default
agentpool0
(string)- agent
Storage stringProfile Storage profile specifies what kind of storage used on machine in the agent pool. Chooses from [ManagedDisks StorageAccount]. Default
ManagedDisks
(string)- agent
Vm stringSize Size of machine in the agent pool. Default
Standard_D1_v2
(string)- auth
Base stringUrl The AKS auth base url (string)
- base
Url string The AKS base url (string)
- count number
The AKS node pool count. Default:
1
(int)- dns
Service stringIp An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes Service address range specified in "service cidr". Default
10.0.0.10
(string)- docker
Bridge stringCidr A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes Service address range specified in "service cidr". Default
172.17.0.1/16
(string)- enable
Http booleanApplication Routing Enable the Kubernetes ingress with automatic public DNS name creation. Default
false
(bool)- enable
Monitoring boolean Turn on Azure Log Analytics monitoring. Uses the Log Analytics "Default" workspace if it exists, else creates one. if using an existing workspace, specifies "log analytics workspace resource id". Default
true
(bool)- load
Balancer stringSku The AKS load balancer sku (string)
- location string
Azure Kubernetes cluster location. Default
eastus
(string)- log
Analytics stringWorkspace The name of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses '{resource group}-{subscription id}-{location code}' (string)
- log
Analytics stringWorkspace Resource Group The resource group of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses the 'Cluster' resource group (string)
- max
Pods number The AKS node pool max pods. Default:
110
(int)- network
Plugin string The AKS network plugin. Required if
imported=false
(string)- network
Policy string The AKS network policy (string)
- pod
Cidr string A CIDR IP range from which to assign Kubernetes Pod IPs (string)
- service
Cidr string A CIDR IP range from which to assign Kubernetes Service IPs (string)
- tag {[key: string]: any}
Use
tags
argument instead as []stringUse tags argument instead as []string
- string[]
The GKE node config tags (List)
- agent_
dns_ strprefix DNS prefix to be used to create the FQDN for the agent pool (string)
- client_
id str Azure client ID to use (string)
- client_
secret str Azure client secret associated with the "client id" (string)
- kubernetes_
version str The Kubernetes version that will be used for your master and OKE worker nodes (string)
- master_
dns_ strprefix DNS prefix to use the Kubernetes cluster control pane (string)
- resource_
group str The AKS resource group (string)
- ssh_
public_ strkey_ contents Contents of the SSH public key used to authenticate with Linux hosts (string)
- subnet str
The AKS subnet (string)
- subscription_
id str Subscription credentials which uniquely identify Microsoft Azure subscription (string)
- tenant_
id str Azure tenant ID to use (string)
- virtual_
network str The name of the virtual network to use. If it's not specified Rancher will create a new VPC (string)
- virtual_
network_ strresource_ group The AKS virtual network resource group (string)
- aad_
server_ strapp_ secret The secret of an Azure Active Directory server application (string)
- aad_
tenant_ strid The ID of an Azure Active Directory tenant (string)
- add_
client_ strapp_ id The ID of an Azure Active Directory client application of type "Native". This application is for user login via kubectl (string)
- add_
server_ strapp_ id The ID of an Azure Active Directory server application of type "Web app/API". This application represents the managed cluster's apiserver (Server application) (string)
- admin_
username str The administrator username to use for Linux hosts. Default
azureuser
(string)- agent_
os_ intdisk_ size GB size to be used to specify the disk for every machine in the agent pool. If you specify 0, it will apply the default according to the "agent vm size" specified. Default
0
(int)- agent_
pool_ strname Name for the agent pool, upto 12 alphanumeric characters. Default
agentpool0
(string)- agent_
storage_ strprofile Storage profile specifies what kind of storage used on machine in the agent pool. Chooses from [ManagedDisks StorageAccount]. Default
ManagedDisks
(string)- agent_
vm_ strsize Size of machine in the agent pool. Default
Standard_D1_v2
(string)- auth_
base_ strurl The AKS auth base url (string)
- base_
url str The AKS base url (string)
- count int
The AKS node pool count. Default:
1
(int)- dns_
service_ strip An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes Service address range specified in "service cidr". Default
10.0.0.10
(string)- docker_
bridge_ strcidr A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes Service address range specified in "service cidr". Default
172.17.0.1/16
(string)- enable_
http_ boolapplication_ routing Enable the Kubernetes ingress with automatic public DNS name creation. Default
false
(bool)- enable_
monitoring bool Turn on Azure Log Analytics monitoring. Uses the Log Analytics "Default" workspace if it exists, else creates one. if using an existing workspace, specifies "log analytics workspace resource id". Default
true
(bool)- load_
balancer_ strsku The AKS load balancer sku (string)
- location str
Azure Kubernetes cluster location. Default
eastus
(string)- log_
analytics_ strworkspace The name of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses '{resource group}-{subscription id}-{location code}' (string)
- log_
analytics_ strworkspace_ resource_ group The resource group of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses the 'Cluster' resource group (string)
- max_
pods int The AKS node pool max pods. Default:
110
(int)- network_
plugin str The AKS network plugin. Required if
imported=false
(string)- network_
policy str The AKS network policy (string)
- pod_
cidr str A CIDR IP range from which to assign Kubernetes Pod IPs (string)
- service_
cidr str A CIDR IP range from which to assign Kubernetes Service IPs (string)
- tag Mapping[str, Any]
Use
tags
argument instead as []stringUse tags argument instead as []string
- Sequence[str]
The GKE node config tags (List)
- agent
Dns StringPrefix DNS prefix to be used to create the FQDN for the agent pool (string)
- client
Id String Azure client ID to use (string)
- client
Secret String Azure client secret associated with the "client id" (string)
- kubernetes
Version String The Kubernetes version that will be used for your master and OKE worker nodes (string)
- master
Dns StringPrefix DNS prefix to use the Kubernetes cluster control pane (string)
- resource
Group String The AKS resource group (string)
- ssh
Public StringKey Contents Contents of the SSH public key used to authenticate with Linux hosts (string)
- subnet String
The AKS subnet (string)
- subscription
Id String Subscription credentials which uniquely identify Microsoft Azure subscription (string)
- tenant
Id String Azure tenant ID to use (string)
- virtual
Network String The name of the virtual network to use. If it's not specified Rancher will create a new VPC (string)
- virtual
Network StringResource Group The AKS virtual network resource group (string)
- aad
Server StringApp Secret The secret of an Azure Active Directory server application (string)
- aad
Tenant StringId The ID of an Azure Active Directory tenant (string)
- add
Client StringApp Id The ID of an Azure Active Directory client application of type "Native". This application is for user login via kubectl (string)
- add
Server StringApp Id The ID of an Azure Active Directory server application of type "Web app/API". This application represents the managed cluster's apiserver (Server application) (string)
- admin
Username String The administrator username to use for Linux hosts. Default
azureuser
(string)- agent
Os NumberDisk Size GB size to be used to specify the disk for every machine in the agent pool. If you specify 0, it will apply the default according to the "agent vm size" specified. Default
0
(int)- agent
Pool StringName Name for the agent pool, upto 12 alphanumeric characters. Default
agentpool0
(string)- agent
Storage StringProfile Storage profile specifies what kind of storage used on machine in the agent pool. Chooses from [ManagedDisks StorageAccount]. Default
ManagedDisks
(string)- agent
Vm StringSize Size of machine in the agent pool. Default
Standard_D1_v2
(string)- auth
Base StringUrl The AKS auth base url (string)
- base
Url String The AKS base url (string)
- count Number
The AKS node pool count. Default:
1
(int)- dns
Service StringIp An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes Service address range specified in "service cidr". Default
10.0.0.10
(string)- docker
Bridge StringCidr A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes Service address range specified in "service cidr". Default
172.17.0.1/16
(string)- enable
Http BooleanApplication Routing Enable the Kubernetes ingress with automatic public DNS name creation. Default
false
(bool)- enable
Monitoring Boolean Turn on Azure Log Analytics monitoring. Uses the Log Analytics "Default" workspace if it exists, else creates one. if using an existing workspace, specifies "log analytics workspace resource id". Default
true
(bool)- load
Balancer StringSku The AKS load balancer sku (string)
- location String
Azure Kubernetes cluster location. Default
eastus
(string)- log
Analytics StringWorkspace The name of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses '{resource group}-{subscription id}-{location code}' (string)
- log
Analytics StringWorkspace Resource Group The resource group of an existing Azure Log Analytics Workspace to use for storing monitoring data. If not specified, uses the 'Cluster' resource group (string)
- max
Pods Number The AKS node pool max pods. Default:
110
(int)- network
Plugin String The AKS network plugin. Required if
imported=false
(string)- network
Policy String The AKS network policy (string)
- pod
Cidr String A CIDR IP range from which to assign Kubernetes Pod IPs (string)
- service
Cidr String A CIDR IP range from which to assign Kubernetes Service IPs (string)
- tag Map<Any>
Use
tags
argument instead as []stringUse tags argument instead as []string
- List<String>
The GKE node config tags (List)
ClusterAksConfigV2, ClusterAksConfigV2Args
- Cloud
Credential stringId The EKS cloud_credential id (string)
- Resource
Group string The AKS resource group (string)
- Resource
Location string The AKS resource location (string)
- Auth
Base stringUrl The AKS auth base url (string)
- List<string>
The AKS authorized ip ranges (list)
- Base
Url string The AKS base url (string)
- Dns
Prefix string The AKS dns prefix. Required if
imported=false
(string)- Http
Application boolRouting Enable AKS http application routing? (bool)
- Imported bool
Is GKE cluster imported? Default:
false
(bool)- Kubernetes
Version string The Kubernetes version that will be used for your master and OKE worker nodes (string)
- Linux
Admin stringUsername The AKS linux admin username (string)
- Linux
Ssh stringPublic Key The AKS linux ssh public key (string)
- Load
Balancer stringSku The AKS load balancer sku (string)
- Log
Analytics stringWorkspace Group The AKS log analytics workspace group (string)
- Log
Analytics stringWorkspace Name The AKS log analytics workspace name (string)
- Monitoring bool
Is AKS cluster monitoring enabled? (bool)
- Name string
The name of the Cluster (string)
- Network
Dns stringService Ip The AKS network dns service ip (string)
- Network
Docker stringBridge Cidr The AKS network docker bridge cidr (string)
- Network
Plugin string The AKS network plugin. Required if
imported=false
(string)- Network
Pod stringCidr The AKS network pod cidr (string)
- Network
Policy string The AKS network policy (string)
- Network
Service stringCidr The AKS network service cidr (string)
- Node
Pools List<ClusterAks Config V2Node Pool> The GKE cluster node pools. Required for create new cluster (List)
- Private
Cluster bool Is AKS cluster private? (bool)
- Subnet string
The AKS subnet (string)
- Dictionary<string, object>
The GKE node config tags (List)
- Virtual
Network string The name of the virtual network to use. If it's not specified Rancher will create a new VPC (string)
- Virtual
Network stringResource Group The AKS virtual network resource group (string)
- Cloud
Credential stringId The EKS cloud_credential id (string)
- Resource
Group string The AKS resource group (string)
- Resource
Location string The AKS resource location (string)
- Auth
Base stringUrl The AKS auth base url (string)
- []string
The AKS authorized ip ranges (list)
- Base
Url string The AKS base url (string)
- Dns
Prefix string The AKS dns prefix. Required if
imported=false
(string)- Http
Application boolRouting Enable AKS http application routing? (bool)
- Imported bool
Is GKE cluster imported? Default:
false
(bool)- Kubernetes
Version string The Kubernetes version that will be used for your master and OKE worker nodes (string)
- Linux
Admin stringUsername The AKS linux admin username (string)
- Linux
Ssh stringPublic Key The AKS linux ssh public key (string)
- Load
Balancer stringSku The AKS load balancer sku (string)
- Log
Analytics stringWorkspace Group The AKS log analytics workspace group (string)
- Log
Analytics stringWorkspace Name The AKS log analytics workspace name (string)
- Monitoring bool
Is AKS cluster monitoring enabled? (bool)
- Name string
The name of the Cluster (string)
- Network
Dns stringService Ip The AKS network dns service ip (string)
- Network
Docker stringBridge Cidr The AKS network docker bridge cidr (string)
- Network
Plugin string The AKS network plugin. Required if
imported=false
(string)- Network
Pod stringCidr The AKS network pod cidr (string)
- Network
Policy string The AKS network policy (string)
- Network
Service stringCidr The AKS network service cidr (string)
- Node
Pools []ClusterAks Config V2Node Pool The GKE cluster node pools. Required for create new cluster (List)
- Private
Cluster bool Is AKS cluster private? (bool)
- Subnet string
The AKS subnet (string)
- map[string]interface{}
The GKE node config tags (List)
- Virtual
Network string The name of the virtual network to use. If it's not specified Rancher will create a new VPC (string)
- Virtual
Network stringResource Group The AKS virtual network resource group (string)
- cloud
Credential StringId The EKS cloud_credential id (string)
- resource
Group String The AKS resource group (string)
- resource
Location String The AKS resource location (string)
- auth
Base StringUrl The AKS auth base url (string)
- List<String>
The AKS authorized ip ranges (list)
- base
Url String The AKS base url (string)
- dns
Prefix String The AKS dns prefix. Required if
imported=false
(string)- http
Application BooleanRouting Enable AKS http application routing? (bool)
- imported Boolean
Is GKE cluster imported? Default:
false
(bool)- kubernetes
Version String The Kubernetes version that will be used for your master and OKE worker nodes (string)
- linux
Admin StringUsername The AKS linux admin username (string)
- linux
Ssh StringPublic Key The AKS linux ssh public key (string)
- load
Balancer StringSku The AKS load balancer sku (string)
- log
Analytics StringWorkspace Group The AKS log analytics workspace group (string)
- log
Analytics StringWorkspace Name The AKS log analytics workspace name (string)
- monitoring Boolean
Is AKS cluster monitoring enabled? (bool)
- name String
The name of the Cluster (string)
- network
Dns StringService Ip The AKS network dns service ip (string)
- network
Docker StringBridge Cidr The AKS network docker bridge cidr (string)
- network
Plugin String The AKS network plugin. Required if
imported=false
(string)- network
Pod StringCidr The AKS network pod cidr (string)
- network
Policy String The AKS network policy (string)
- network
Service StringCidr The AKS network service cidr (string)
- node
Pools List<ClusterAks Config V2Node Pool> The GKE cluster node pools. Required for create new cluster (List)
- private
Cluster Boolean Is AKS cluster private? (bool)
- subnet String
The AKS subnet (string)
- Map<String,Object>
The GKE node config tags (List)
- virtual
Network String The name of the virtual network to use. If it's not specified Rancher will create a new VPC (string)
- virtual
Network StringResource Group The AKS virtual network resource group (string)
- cloud
Credential stringId The EKS cloud_credential id (string)
- resource
Group string The AKS resource group (string)
- resource
Location string The AKS resource location (string)
- auth
Base stringUrl The AKS auth base url (string)
- string[]
The AKS authorized ip ranges (list)
- base
Url string The AKS base url (string)
- dns
Prefix string The AKS dns prefix. Required if
imported=false
(string)- http
Application booleanRouting Enable AKS http application routing? (bool)
- imported boolean
Is GKE cluster imported? Default:
false
(bool)- kubernetes
Version string The Kubernetes version that will be used for your master and OKE worker nodes (string)
- linux
Admin stringUsername The AKS linux admin username (string)
- linux
Ssh stringPublic Key The AKS linux ssh public key (string)
- load
Balancer stringSku The AKS load balancer sku (string)
- log
Analytics stringWorkspace Group The AKS log analytics workspace group (string)
- log
Analytics stringWorkspace Name The AKS log analytics workspace name (string)
- monitoring boolean
Is AKS cluster monitoring enabled? (bool)
- name string
The name of the Cluster (string)
- network
Dns stringService Ip The AKS network dns service ip (string)
- network
Docker stringBridge Cidr The AKS network docker bridge cidr (string)
- network
Plugin string The AKS network plugin. Required if
imported=false
(string)- network
Pod stringCidr The AKS network pod cidr (string)
- network
Policy string The AKS network policy (string)
- network
Service stringCidr The AKS network service cidr (string)
- node
Pools ClusterAks Config V2Node Pool[] The GKE cluster node pools. Required for create new cluster (List)
- private
Cluster boolean Is AKS cluster private? (bool)
- subnet string
The AKS subnet (string)
- {[key: string]: any}
The GKE node config tags (List)
- virtual
Network string The name of the virtual network to use. If it's not specified Rancher will create a new VPC (string)
- virtual
Network stringResource Group The AKS virtual network resource group (string)
- cloud_
credential_ strid The EKS cloud_credential id (string)
- resource_
group str The AKS resource group (string)
- resource_
location str The AKS resource location (string)
- auth_
base_ strurl The AKS auth base url (string)
- Sequence[str]
The AKS authorized ip ranges (list)
- base_
url str The AKS base url (string)
- dns_
prefix str The AKS dns prefix. Required if
imported=false
(string)- http_
application_ boolrouting Enable AKS http application routing? (bool)
- imported bool
Is GKE cluster imported? Default:
false
(bool)- kubernetes_
version str The Kubernetes version that will be used for your master and OKE worker nodes (string)
- linux_
admin_ strusername The AKS linux admin username (string)
- linux_
ssh_ strpublic_ key The AKS linux ssh public key (string)
- load_
balancer_ strsku The AKS load balancer sku (string)
- log_
analytics_ strworkspace_ group The AKS log analytics workspace group (string)
- log_
analytics_ strworkspace_ name The AKS log analytics workspace name (string)
- monitoring bool
Is AKS cluster monitoring enabled? (bool)
- name str
The name of the Cluster (string)
- network_
dns_ strservice_ ip The AKS network dns service ip (string)
- network_
docker_ strbridge_ cidr The AKS network docker bridge cidr (string)
- network_
plugin str The AKS network plugin. Required if
imported=false
(string)- network_
pod_ strcidr The AKS network pod cidr (string)
- network_
policy str The AKS network policy (string)
- network_
service_ strcidr The AKS network service cidr (string)
- node_
pools Sequence[ClusterAks Config V2Node Pool] The GKE cluster node pools. Required for create new cluster (List)
- private_
cluster bool Is AKS cluster private? (bool)
- subnet str
The AKS subnet (string)
- Mapping[str, Any]
The GKE node config tags (List)
- virtual_
network str The name of the virtual network to use. If it's not specified Rancher will create a new VPC (string)
- virtual_
network_ strresource_ group The AKS virtual network resource group (string)
- cloud
Credential StringId The EKS cloud_credential id (string)
- resource
Group String The AKS resource group (string)
- resource
Location String The AKS resource location (string)
- auth
Base StringUrl The AKS auth base url (string)
- List<String>
The AKS authorized ip ranges (list)
- base
Url String The AKS base url (string)
- dns
Prefix String The AKS dns prefix. Required if
imported=false
(string)- http
Application BooleanRouting Enable AKS http application routing? (bool)
- imported Boolean
Is GKE cluster imported? Default:
false
(bool)- kubernetes
Version String The Kubernetes version that will be used for your master and OKE worker nodes (string)
- linux
Admin StringUsername The AKS linux admin username (string)
- linux
Ssh StringPublic Key The AKS linux ssh public key (string)
- load
Balancer StringSku The AKS load balancer sku (string)
- log
Analytics StringWorkspace Group The AKS log analytics workspace group (string)
- log
Analytics StringWorkspace Name The AKS log analytics workspace name (string)
- monitoring Boolean
Is AKS cluster monitoring enabled? (bool)
- name String
The name of the Cluster (string)
- network
Dns StringService Ip The AKS network dns service ip (string)
- network
Docker StringBridge Cidr The AKS network docker bridge cidr (string)
- network
Plugin String The AKS network plugin. Required if
imported=false
(string)- network
Pod StringCidr The AKS network pod cidr (string)
- network
Policy String The AKS network policy (string)
- network
Service StringCidr The AKS network service cidr (string)
- node
Pools List<Property Map> The GKE cluster node pools. Required for create new cluster (List)
- private
Cluster Boolean Is AKS cluster private? (bool)
- subnet String
The AKS subnet (string)
- Map<Any>
The GKE node config tags (List)
- virtual
Network String The name of the virtual network to use. If it's not specified Rancher will create a new VPC (string)
- virtual
Network StringResource Group The AKS virtual network resource group (string)
ClusterAksConfigV2NodePool, ClusterAksConfigV2NodePoolArgs
- Name string
The name of the Cluster (string)
- Availability
Zones List<string> The AKS node pool availability zones (list)
- Count int
The AKS node pool count. Default:
1
(int)- Enable
Auto boolScaling Is AKS node pool auto scaling enabled? Default:
false
(bool)- Labels Dictionary<string, object>
Labels for the Cluster (map)
- Max
Count int The AKS node pool max count. Required if
enable_auto_scaling=true
(int)- Max
Pods int The AKS node pool max pods. Default:
110
(int)- Max
Surge string The AKS node pool max surge (string), example value:
25%
- Min
Count int The AKS node pool min count. Required if
enable_auto_scaling=true
(int)- Mode string
The AKS node group mode. Default:
System
(string)- Orchestrator
Version string The AKS node pool orchestrator version (string)
- Os
Disk intSize Gb The AKS node pool os disk size gb. Default:
128
(int)- Os
Disk stringType The AKS node pool os disk type. Default:
Managed
(string)- Os
Type string The AKS node pool os type. Default:
Linux
(string)- Taints List<string>
The GKE node config taints (List)
- Vm
Size string The AKS node pool orchestrator version (string)
- Name string
The name of the Cluster (string)
- Availability
Zones []string The AKS node pool availability zones (list)
- Count int
The AKS node pool count. Default:
1
(int)- Enable
Auto boolScaling Is AKS node pool auto scaling enabled? Default:
false
(bool)- Labels map[string]interface{}
Labels for the Cluster (map)
- Max
Count int The AKS node pool max count. Required if
enable_auto_scaling=true
(int)- Max
Pods int The AKS node pool max pods. Default:
110
(int)- Max
Surge string The AKS node pool max surge (string), example value:
25%
- Min
Count int The AKS node pool min count