azure-native.hdinsight.ClusterPoolCluster
Explore with Pulumi AI
The cluster.
Uses Azure REST API version 2024-05-01-preview.
Other available API versions: 2023-06-01-preview, 2023-11-01-preview. These can be accessed by generating a local SDK package using the CLI command pulumi package add azure-native hdinsight [ApiVersion]
. See the version guide for details.
Example Usage
HDInsightClusterPut
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using AzureNative = Pulumi.AzureNative;
return await Deployment.RunAsync(() =>
{
var clusterPoolCluster = new AzureNative.HDInsight.ClusterPoolCluster("clusterPoolCluster", new()
{
ClusterName = "cluster1",
ClusterPoolName = "clusterpool1",
ClusterProfile = new AzureNative.HDInsight.Inputs.ClusterProfileArgs
{
AuthorizationProfile = new AzureNative.HDInsight.Inputs.AuthorizationProfileArgs
{
UserIds = new[]
{
"testuser1",
"testuser2",
},
},
AutoscaleProfile = new AzureNative.HDInsight.Inputs.AutoscaleProfileArgs
{
AutoscaleType = AzureNative.HDInsight.AutoscaleType.ScheduleBased,
Enabled = true,
GracefulDecommissionTimeout = 3600,
LoadBasedConfig = new AzureNative.HDInsight.Inputs.LoadBasedConfigArgs
{
CooldownPeriod = 300,
MaxNodes = 20,
MinNodes = 10,
PollInterval = 60,
ScalingRules = new[]
{
new AzureNative.HDInsight.Inputs.ScalingRuleArgs
{
ActionType = AzureNative.HDInsight.ScaleActionType.Scaleup,
ComparisonRule = new AzureNative.HDInsight.Inputs.ComparisonRuleArgs
{
Operator = AzureNative.HDInsight.ComparisonOperator.GreaterThan,
Threshold = 90,
},
EvaluationCount = 3,
ScalingMetric = "cpu",
},
new AzureNative.HDInsight.Inputs.ScalingRuleArgs
{
ActionType = AzureNative.HDInsight.ScaleActionType.Scaledown,
ComparisonRule = new AzureNative.HDInsight.Inputs.ComparisonRuleArgs
{
Operator = AzureNative.HDInsight.ComparisonOperator.LessThan,
Threshold = 20,
},
EvaluationCount = 3,
ScalingMetric = "cpu",
},
},
},
ScheduleBasedConfig = new AzureNative.HDInsight.Inputs.ScheduleBasedConfigArgs
{
DefaultCount = 10,
Schedules = new[]
{
new AzureNative.HDInsight.Inputs.ScheduleArgs
{
Count = 20,
Days = new[]
{
AzureNative.HDInsight.ScheduleDay.Monday,
},
EndTime = "12:00",
StartTime = "00:00",
},
new AzureNative.HDInsight.Inputs.ScheduleArgs
{
Count = 25,
Days = new[]
{
AzureNative.HDInsight.ScheduleDay.Sunday,
},
EndTime = "12:00",
StartTime = "00:00",
},
},
TimeZone = "Cen. Australia Standard Time",
},
},
ClusterVersion = "1.0.6",
ManagedIdentityProfile = new AzureNative.HDInsight.Inputs.ManagedIdentityProfileArgs
{
IdentityList = new[]
{
new AzureNative.HDInsight.Inputs.ManagedIdentitySpecArgs
{
ClientId = "de91f1d8-767f-460a-ac11-3cf103f74b34",
ObjectId = "40491351-c240-4042-91e0-f644a1d2b441",
ResourceId = "/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi",
Type = AzureNative.HDInsight.ManagedIdentityType.Cluster,
},
},
},
OssVersion = "0.410.0",
SshProfile = new AzureNative.HDInsight.Inputs.ClusterPoolSshProfileArgs
{
Count = 2,
VmSize = "Standard_E8as_v5",
},
TrinoProfile = null,
},
ClusterType = "Trino",
ComputeProfile = new AzureNative.HDInsight.Inputs.ClusterPoolComputeProfileArgs
{
AvailabilityZones = new[]
{
"1",
"2",
"3",
},
Nodes = new[]
{
new AzureNative.HDInsight.Inputs.NodeProfileArgs
{
Count = 2,
Type = "Head",
VmSize = "Standard_E8as_v5",
},
new AzureNative.HDInsight.Inputs.NodeProfileArgs
{
Count = 3,
Type = "Worker",
VmSize = "Standard_E8as_v5",
},
},
},
Location = "West US 2",
ResourceGroupName = "hiloResourcegroup",
});
});
package main
import (
hdinsight "github.com/pulumi/pulumi-azure-native-sdk/hdinsight/v3"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := hdinsight.NewClusterPoolCluster(ctx, "clusterPoolCluster", &hdinsight.ClusterPoolClusterArgs{
ClusterName: pulumi.String("cluster1"),
ClusterPoolName: pulumi.String("clusterpool1"),
ClusterProfile: &hdinsight.ClusterProfileArgs{
AuthorizationProfile: &hdinsight.AuthorizationProfileArgs{
UserIds: pulumi.StringArray{
pulumi.String("testuser1"),
pulumi.String("testuser2"),
},
},
AutoscaleProfile: &hdinsight.AutoscaleProfileArgs{
AutoscaleType: pulumi.String(hdinsight.AutoscaleTypeScheduleBased),
Enabled: pulumi.Bool(true),
GracefulDecommissionTimeout: pulumi.Int(3600),
LoadBasedConfig: &hdinsight.LoadBasedConfigArgs{
CooldownPeriod: pulumi.Int(300),
MaxNodes: pulumi.Int(20),
MinNodes: pulumi.Int(10),
PollInterval: pulumi.Int(60),
ScalingRules: hdinsight.ScalingRuleArray{
&hdinsight.ScalingRuleArgs{
ActionType: pulumi.String(hdinsight.ScaleActionTypeScaleup),
ComparisonRule: &hdinsight.ComparisonRuleArgs{
Operator: pulumi.String(hdinsight.ComparisonOperatorGreaterThan),
Threshold: pulumi.Float64(90),
},
EvaluationCount: pulumi.Int(3),
ScalingMetric: pulumi.String("cpu"),
},
&hdinsight.ScalingRuleArgs{
ActionType: pulumi.String(hdinsight.ScaleActionTypeScaledown),
ComparisonRule: &hdinsight.ComparisonRuleArgs{
Operator: pulumi.String(hdinsight.ComparisonOperatorLessThan),
Threshold: pulumi.Float64(20),
},
EvaluationCount: pulumi.Int(3),
ScalingMetric: pulumi.String("cpu"),
},
},
},
ScheduleBasedConfig: &hdinsight.ScheduleBasedConfigArgs{
DefaultCount: pulumi.Int(10),
Schedules: hdinsight.ScheduleArray{
&hdinsight.ScheduleArgs{
Count: pulumi.Int(20),
Days: pulumi.StringArray{
pulumi.String(hdinsight.ScheduleDayMonday),
},
EndTime: pulumi.String("12:00"),
StartTime: pulumi.String("00:00"),
},
&hdinsight.ScheduleArgs{
Count: pulumi.Int(25),
Days: pulumi.StringArray{
pulumi.String(hdinsight.ScheduleDaySunday),
},
EndTime: pulumi.String("12:00"),
StartTime: pulumi.String("00:00"),
},
},
TimeZone: pulumi.String("Cen. Australia Standard Time"),
},
},
ClusterVersion: pulumi.String("1.0.6"),
ManagedIdentityProfile: &hdinsight.ManagedIdentityProfileArgs{
IdentityList: hdinsight.ManagedIdentitySpecArray{
&hdinsight.ManagedIdentitySpecArgs{
ClientId: pulumi.String("de91f1d8-767f-460a-ac11-3cf103f74b34"),
ObjectId: pulumi.String("40491351-c240-4042-91e0-f644a1d2b441"),
ResourceId: pulumi.String("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi"),
Type: pulumi.String(hdinsight.ManagedIdentityTypeCluster),
},
},
},
OssVersion: pulumi.String("0.410.0"),
SshProfile: &hdinsight.ClusterPoolSshProfileArgs{
Count: pulumi.Int(2),
VmSize: pulumi.String("Standard_E8as_v5"),
},
TrinoProfile: &hdinsight.TrinoProfileArgs{},
},
ClusterType: pulumi.String("Trino"),
ComputeProfile: &hdinsight.ClusterPoolComputeProfileArgs{
AvailabilityZones: pulumi.StringArray{
pulumi.String("1"),
pulumi.String("2"),
pulumi.String("3"),
},
Nodes: hdinsight.NodeProfileArray{
&hdinsight.NodeProfileArgs{
Count: pulumi.Int(2),
Type: pulumi.String("Head"),
VmSize: pulumi.String("Standard_E8as_v5"),
},
&hdinsight.NodeProfileArgs{
Count: pulumi.Int(3),
Type: pulumi.String("Worker"),
VmSize: pulumi.String("Standard_E8as_v5"),
},
},
},
Location: pulumi.String("West US 2"),
ResourceGroupName: pulumi.String("hiloResourcegroup"),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azurenative.hdinsight.ClusterPoolCluster;
import com.pulumi.azurenative.hdinsight.ClusterPoolClusterArgs;
import com.pulumi.azurenative.hdinsight.inputs.ClusterProfileArgs;
import com.pulumi.azurenative.hdinsight.inputs.AuthorizationProfileArgs;
import com.pulumi.azurenative.hdinsight.inputs.AutoscaleProfileArgs;
import com.pulumi.azurenative.hdinsight.inputs.LoadBasedConfigArgs;
import com.pulumi.azurenative.hdinsight.inputs.ScheduleBasedConfigArgs;
import com.pulumi.azurenative.hdinsight.inputs.ManagedIdentityProfileArgs;
import com.pulumi.azurenative.hdinsight.inputs.ClusterPoolSshProfileArgs;
import com.pulumi.azurenative.hdinsight.inputs.TrinoProfileArgs;
import com.pulumi.azurenative.hdinsight.inputs.ClusterPoolComputeProfileArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var clusterPoolCluster = new ClusterPoolCluster("clusterPoolCluster", ClusterPoolClusterArgs.builder()
.clusterName("cluster1")
.clusterPoolName("clusterpool1")
.clusterProfile(ClusterProfileArgs.builder()
.authorizationProfile(AuthorizationProfileArgs.builder()
.userIds(
"testuser1",
"testuser2")
.build())
.autoscaleProfile(AutoscaleProfileArgs.builder()
.autoscaleType("ScheduleBased")
.enabled(true)
.gracefulDecommissionTimeout(3600)
.loadBasedConfig(LoadBasedConfigArgs.builder()
.cooldownPeriod(300)
.maxNodes(20)
.minNodes(10)
.pollInterval(60)
.scalingRules(
ScalingRuleArgs.builder()
.actionType("scaleup")
.comparisonRule(ComparisonRuleArgs.builder()
.operator("greaterThan")
.threshold(90.0)
.build())
.evaluationCount(3)
.scalingMetric("cpu")
.build(),
ScalingRuleArgs.builder()
.actionType("scaledown")
.comparisonRule(ComparisonRuleArgs.builder()
.operator("lessThan")
.threshold(20.0)
.build())
.evaluationCount(3)
.scalingMetric("cpu")
.build())
.build())
.scheduleBasedConfig(ScheduleBasedConfigArgs.builder()
.defaultCount(10)
.schedules(
ScheduleArgs.builder()
.count(20)
.days("Monday")
.endTime("12:00")
.startTime("00:00")
.build(),
ScheduleArgs.builder()
.count(25)
.days("Sunday")
.endTime("12:00")
.startTime("00:00")
.build())
.timeZone("Cen. Australia Standard Time")
.build())
.build())
.clusterVersion("1.0.6")
.managedIdentityProfile(ManagedIdentityProfileArgs.builder()
.identityList(ManagedIdentitySpecArgs.builder()
.clientId("de91f1d8-767f-460a-ac11-3cf103f74b34")
.objectId("40491351-c240-4042-91e0-f644a1d2b441")
.resourceId("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi")
.type("cluster")
.build())
.build())
.ossVersion("0.410.0")
.sshProfile(ClusterPoolSshProfileArgs.builder()
.count(2)
.vmSize("Standard_E8as_v5")
.build())
.trinoProfile(TrinoProfileArgs.builder()
.build())
.build())
.clusterType("Trino")
.computeProfile(ClusterPoolComputeProfileArgs.builder()
.availabilityZones(
"1",
"2",
"3")
.nodes(
NodeProfileArgs.builder()
.count(2)
.type("Head")
.vmSize("Standard_E8as_v5")
.build(),
NodeProfileArgs.builder()
.count(3)
.type("Worker")
.vmSize("Standard_E8as_v5")
.build())
.build())
.location("West US 2")
.resourceGroupName("hiloResourcegroup")
.build());
}
}
import * as pulumi from "@pulumi/pulumi";
import * as azure_native from "@pulumi/azure-native";
const clusterPoolCluster = new azure_native.hdinsight.ClusterPoolCluster("clusterPoolCluster", {
clusterName: "cluster1",
clusterPoolName: "clusterpool1",
clusterProfile: {
authorizationProfile: {
userIds: [
"testuser1",
"testuser2",
],
},
autoscaleProfile: {
autoscaleType: azure_native.hdinsight.AutoscaleType.ScheduleBased,
enabled: true,
gracefulDecommissionTimeout: 3600,
loadBasedConfig: {
cooldownPeriod: 300,
maxNodes: 20,
minNodes: 10,
pollInterval: 60,
scalingRules: [
{
actionType: azure_native.hdinsight.ScaleActionType.Scaleup,
comparisonRule: {
operator: azure_native.hdinsight.ComparisonOperator.GreaterThan,
threshold: 90,
},
evaluationCount: 3,
scalingMetric: "cpu",
},
{
actionType: azure_native.hdinsight.ScaleActionType.Scaledown,
comparisonRule: {
operator: azure_native.hdinsight.ComparisonOperator.LessThan,
threshold: 20,
},
evaluationCount: 3,
scalingMetric: "cpu",
},
],
},
scheduleBasedConfig: {
defaultCount: 10,
schedules: [
{
count: 20,
days: [azure_native.hdinsight.ScheduleDay.Monday],
endTime: "12:00",
startTime: "00:00",
},
{
count: 25,
days: [azure_native.hdinsight.ScheduleDay.Sunday],
endTime: "12:00",
startTime: "00:00",
},
],
timeZone: "Cen. Australia Standard Time",
},
},
clusterVersion: "1.0.6",
managedIdentityProfile: {
identityList: [{
clientId: "de91f1d8-767f-460a-ac11-3cf103f74b34",
objectId: "40491351-c240-4042-91e0-f644a1d2b441",
resourceId: "/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi",
type: azure_native.hdinsight.ManagedIdentityType.Cluster,
}],
},
ossVersion: "0.410.0",
sshProfile: {
count: 2,
vmSize: "Standard_E8as_v5",
},
trinoProfile: {},
},
clusterType: "Trino",
computeProfile: {
availabilityZones: [
"1",
"2",
"3",
],
nodes: [
{
count: 2,
type: "Head",
vmSize: "Standard_E8as_v5",
},
{
count: 3,
type: "Worker",
vmSize: "Standard_E8as_v5",
},
],
},
location: "West US 2",
resourceGroupName: "hiloResourcegroup",
});
import pulumi
import pulumi_azure_native as azure_native
cluster_pool_cluster = azure_native.hdinsight.ClusterPoolCluster("clusterPoolCluster",
cluster_name="cluster1",
cluster_pool_name="clusterpool1",
cluster_profile={
"authorization_profile": {
"user_ids": [
"testuser1",
"testuser2",
],
},
"autoscale_profile": {
"autoscale_type": azure_native.hdinsight.AutoscaleType.SCHEDULE_BASED,
"enabled": True,
"graceful_decommission_timeout": 3600,
"load_based_config": {
"cooldown_period": 300,
"max_nodes": 20,
"min_nodes": 10,
"poll_interval": 60,
"scaling_rules": [
{
"action_type": azure_native.hdinsight.ScaleActionType.SCALEUP,
"comparison_rule": {
"operator": azure_native.hdinsight.ComparisonOperator.GREATER_THAN,
"threshold": 90,
},
"evaluation_count": 3,
"scaling_metric": "cpu",
},
{
"action_type": azure_native.hdinsight.ScaleActionType.SCALEDOWN,
"comparison_rule": {
"operator": azure_native.hdinsight.ComparisonOperator.LESS_THAN,
"threshold": 20,
},
"evaluation_count": 3,
"scaling_metric": "cpu",
},
],
},
"schedule_based_config": {
"default_count": 10,
"schedules": [
{
"count": 20,
"days": [azure_native.hdinsight.ScheduleDay.MONDAY],
"end_time": "12:00",
"start_time": "00:00",
},
{
"count": 25,
"days": [azure_native.hdinsight.ScheduleDay.SUNDAY],
"end_time": "12:00",
"start_time": "00:00",
},
],
"time_zone": "Cen. Australia Standard Time",
},
},
"cluster_version": "1.0.6",
"managed_identity_profile": {
"identity_list": [{
"client_id": "de91f1d8-767f-460a-ac11-3cf103f74b34",
"object_id": "40491351-c240-4042-91e0-f644a1d2b441",
"resource_id": "/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi",
"type": azure_native.hdinsight.ManagedIdentityType.CLUSTER,
}],
},
"oss_version": "0.410.0",
"ssh_profile": {
"count": 2,
"vm_size": "Standard_E8as_v5",
},
"trino_profile": {},
},
cluster_type="Trino",
compute_profile={
"availability_zones": [
"1",
"2",
"3",
],
"nodes": [
{
"count": 2,
"type": "Head",
"vm_size": "Standard_E8as_v5",
},
{
"count": 3,
"type": "Worker",
"vm_size": "Standard_E8as_v5",
},
],
},
location="West US 2",
resource_group_name="hiloResourcegroup")
resources:
clusterPoolCluster:
type: azure-native:hdinsight:ClusterPoolCluster
properties:
clusterName: cluster1
clusterPoolName: clusterpool1
clusterProfile:
authorizationProfile:
userIds:
- testuser1
- testuser2
autoscaleProfile:
autoscaleType: ScheduleBased
enabled: true
gracefulDecommissionTimeout: 3600
loadBasedConfig:
cooldownPeriod: 300
maxNodes: 20
minNodes: 10
pollInterval: 60
scalingRules:
- actionType: scaleup
comparisonRule:
operator: greaterThan
threshold: 90
evaluationCount: 3
scalingMetric: cpu
- actionType: scaledown
comparisonRule:
operator: lessThan
threshold: 20
evaluationCount: 3
scalingMetric: cpu
scheduleBasedConfig:
defaultCount: 10
schedules:
- count: 20
days:
- Monday
endTime: 12:00
startTime: 00:00
- count: 25
days:
- Sunday
endTime: 12:00
startTime: 00:00
timeZone: Cen. Australia Standard Time
clusterVersion: 1.0.6
managedIdentityProfile:
identityList:
- clientId: de91f1d8-767f-460a-ac11-3cf103f74b34
objectId: 40491351-c240-4042-91e0-f644a1d2b441
resourceId: /subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi
type: cluster
ossVersion: 0.410.0
sshProfile:
count: 2
vmSize: Standard_E8as_v5
trinoProfile: {}
clusterType: Trino
computeProfile:
availabilityZones:
- '1'
- '2'
- '3'
nodes:
- count: 2
type: Head
vmSize: Standard_E8as_v5
- count: 3
type: Worker
vmSize: Standard_E8as_v5
location: West US 2
resourceGroupName: hiloResourcegroup
HDInsightRangerClusterPut
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using AzureNative = Pulumi.AzureNative;
return await Deployment.RunAsync(() =>
{
var clusterPoolCluster = new AzureNative.HDInsight.ClusterPoolCluster("clusterPoolCluster", new()
{
ClusterName = "cluster1",
ClusterPoolName = "clusterpool1",
ClusterProfile = new AzureNative.HDInsight.Inputs.ClusterProfileArgs
{
AuthorizationProfile = new AzureNative.HDInsight.Inputs.AuthorizationProfileArgs
{
UserIds = new[]
{
"testuser1",
"testuser2",
},
},
ClusterVersion = "0.0.1",
ManagedIdentityProfile = new AzureNative.HDInsight.Inputs.ManagedIdentityProfileArgs
{
IdentityList = new[]
{
new AzureNative.HDInsight.Inputs.ManagedIdentitySpecArgs
{
ClientId = "de91f1d8-767f-460a-ac11-3cf103f74b34",
ObjectId = "40491351-c240-4042-91e0-f644a1d2b441",
ResourceId = "/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi",
Type = AzureNative.HDInsight.ManagedIdentityType.Cluster,
},
},
},
OssVersion = "2.2.3",
RangerProfile = new AzureNative.HDInsight.Inputs.RangerProfileArgs
{
RangerAdmin = new AzureNative.HDInsight.Inputs.RangerAdminSpecArgs
{
Admins = new[]
{
"testuser1@contoso.com",
"testuser2@contoso.com",
},
Database = new AzureNative.HDInsight.Inputs.RangerAdminSpecDatabaseArgs
{
Host = "testsqlserver.database.windows.net",
Name = "testdb",
PasswordSecretRef = "https://testkv.vault.azure.net/secrets/mysecret/5df6584d9c25418c8d900240aa6c3452",
Username = "admin",
},
},
RangerAudit = new AzureNative.HDInsight.Inputs.RangerAuditSpecArgs
{
StorageAccount = "https://teststorage.blob.core.windows.net/testblob",
},
RangerUsersync = new AzureNative.HDInsight.Inputs.RangerUsersyncSpecArgs
{
Enabled = true,
Groups = new[]
{
"0a53828f-36c9-44c3-be3d-99a7fce977ad",
"13be6971-79db-4f33-9d41-b25589ca25ac",
},
Mode = AzureNative.HDInsight.RangerUsersyncMode.Automatic,
Users = new[]
{
"testuser1@contoso.com",
"testuser2@contoso.com",
},
},
},
},
ClusterType = "ranger",
ComputeProfile = new AzureNative.HDInsight.Inputs.ClusterPoolComputeProfileArgs
{
AvailabilityZones = new[]
{
"1",
"2",
"3",
},
Nodes = new[]
{
new AzureNative.HDInsight.Inputs.NodeProfileArgs
{
Count = 2,
Type = "head",
VmSize = "Standard_D3_v2",
},
},
},
Location = "West US 2",
ResourceGroupName = "hiloResourcegroup",
});
});
package main
import (
hdinsight "github.com/pulumi/pulumi-azure-native-sdk/hdinsight/v3"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := hdinsight.NewClusterPoolCluster(ctx, "clusterPoolCluster", &hdinsight.ClusterPoolClusterArgs{
ClusterName: pulumi.String("cluster1"),
ClusterPoolName: pulumi.String("clusterpool1"),
ClusterProfile: &hdinsight.ClusterProfileArgs{
AuthorizationProfile: &hdinsight.AuthorizationProfileArgs{
UserIds: pulumi.StringArray{
pulumi.String("testuser1"),
pulumi.String("testuser2"),
},
},
ClusterVersion: pulumi.String("0.0.1"),
ManagedIdentityProfile: &hdinsight.ManagedIdentityProfileArgs{
IdentityList: hdinsight.ManagedIdentitySpecArray{
&hdinsight.ManagedIdentitySpecArgs{
ClientId: pulumi.String("de91f1d8-767f-460a-ac11-3cf103f74b34"),
ObjectId: pulumi.String("40491351-c240-4042-91e0-f644a1d2b441"),
ResourceId: pulumi.String("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi"),
Type: pulumi.String(hdinsight.ManagedIdentityTypeCluster),
},
},
},
OssVersion: pulumi.String("2.2.3"),
RangerProfile: &hdinsight.RangerProfileArgs{
RangerAdmin: &hdinsight.RangerAdminSpecArgs{
Admins: pulumi.StringArray{
pulumi.String("testuser1@contoso.com"),
pulumi.String("testuser2@contoso.com"),
},
Database: &hdinsight.RangerAdminSpecDatabaseArgs{
Host: pulumi.String("testsqlserver.database.windows.net"),
Name: pulumi.String("testdb"),
PasswordSecretRef: pulumi.String("https://testkv.vault.azure.net/secrets/mysecret/5df6584d9c25418c8d900240aa6c3452"),
Username: pulumi.String("admin"),
},
},
RangerAudit: &hdinsight.RangerAuditSpecArgs{
StorageAccount: pulumi.String("https://teststorage.blob.core.windows.net/testblob"),
},
RangerUsersync: &hdinsight.RangerUsersyncSpecArgs{
Enabled: pulumi.Bool(true),
Groups: pulumi.StringArray{
pulumi.String("0a53828f-36c9-44c3-be3d-99a7fce977ad"),
pulumi.String("13be6971-79db-4f33-9d41-b25589ca25ac"),
},
Mode: pulumi.String(hdinsight.RangerUsersyncModeAutomatic),
Users: pulumi.StringArray{
pulumi.String("testuser1@contoso.com"),
pulumi.String("testuser2@contoso.com"),
},
},
},
},
ClusterType: pulumi.String("ranger"),
ComputeProfile: &hdinsight.ClusterPoolComputeProfileArgs{
AvailabilityZones: pulumi.StringArray{
pulumi.String("1"),
pulumi.String("2"),
pulumi.String("3"),
},
Nodes: hdinsight.NodeProfileArray{
&hdinsight.NodeProfileArgs{
Count: pulumi.Int(2),
Type: pulumi.String("head"),
VmSize: pulumi.String("Standard_D3_v2"),
},
},
},
Location: pulumi.String("West US 2"),
ResourceGroupName: pulumi.String("hiloResourcegroup"),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azurenative.hdinsight.ClusterPoolCluster;
import com.pulumi.azurenative.hdinsight.ClusterPoolClusterArgs;
import com.pulumi.azurenative.hdinsight.inputs.ClusterProfileArgs;
import com.pulumi.azurenative.hdinsight.inputs.AuthorizationProfileArgs;
import com.pulumi.azurenative.hdinsight.inputs.ManagedIdentityProfileArgs;
import com.pulumi.azurenative.hdinsight.inputs.RangerProfileArgs;
import com.pulumi.azurenative.hdinsight.inputs.RangerAdminSpecArgs;
import com.pulumi.azurenative.hdinsight.inputs.RangerAdminSpecDatabaseArgs;
import com.pulumi.azurenative.hdinsight.inputs.RangerAuditSpecArgs;
import com.pulumi.azurenative.hdinsight.inputs.RangerUsersyncSpecArgs;
import com.pulumi.azurenative.hdinsight.inputs.ClusterPoolComputeProfileArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var clusterPoolCluster = new ClusterPoolCluster("clusterPoolCluster", ClusterPoolClusterArgs.builder()
.clusterName("cluster1")
.clusterPoolName("clusterpool1")
.clusterProfile(ClusterProfileArgs.builder()
.authorizationProfile(AuthorizationProfileArgs.builder()
.userIds(
"testuser1",
"testuser2")
.build())
.clusterVersion("0.0.1")
.managedIdentityProfile(ManagedIdentityProfileArgs.builder()
.identityList(ManagedIdentitySpecArgs.builder()
.clientId("de91f1d8-767f-460a-ac11-3cf103f74b34")
.objectId("40491351-c240-4042-91e0-f644a1d2b441")
.resourceId("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi")
.type("cluster")
.build())
.build())
.ossVersion("2.2.3")
.rangerProfile(RangerProfileArgs.builder()
.rangerAdmin(RangerAdminSpecArgs.builder()
.admins(
"testuser1@contoso.com",
"testuser2@contoso.com")
.database(RangerAdminSpecDatabaseArgs.builder()
.host("testsqlserver.database.windows.net")
.name("testdb")
.passwordSecretRef("https://testkv.vault.azure.net/secrets/mysecret/5df6584d9c25418c8d900240aa6c3452")
.username("admin")
.build())
.build())
.rangerAudit(RangerAuditSpecArgs.builder()
.storageAccount("https://teststorage.blob.core.windows.net/testblob")
.build())
.rangerUsersync(RangerUsersyncSpecArgs.builder()
.enabled(true)
.groups(
"0a53828f-36c9-44c3-be3d-99a7fce977ad",
"13be6971-79db-4f33-9d41-b25589ca25ac")
.mode("automatic")
.users(
"testuser1@contoso.com",
"testuser2@contoso.com")
.build())
.build())
.build())
.clusterType("ranger")
.computeProfile(ClusterPoolComputeProfileArgs.builder()
.availabilityZones(
"1",
"2",
"3")
.nodes(NodeProfileArgs.builder()
.count(2)
.type("head")
.vmSize("Standard_D3_v2")
.build())
.build())
.location("West US 2")
.resourceGroupName("hiloResourcegroup")
.build());
}
}
import * as pulumi from "@pulumi/pulumi";
import * as azure_native from "@pulumi/azure-native";
const clusterPoolCluster = new azure_native.hdinsight.ClusterPoolCluster("clusterPoolCluster", {
clusterName: "cluster1",
clusterPoolName: "clusterpool1",
clusterProfile: {
authorizationProfile: {
userIds: [
"testuser1",
"testuser2",
],
},
clusterVersion: "0.0.1",
managedIdentityProfile: {
identityList: [{
clientId: "de91f1d8-767f-460a-ac11-3cf103f74b34",
objectId: "40491351-c240-4042-91e0-f644a1d2b441",
resourceId: "/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi",
type: azure_native.hdinsight.ManagedIdentityType.Cluster,
}],
},
ossVersion: "2.2.3",
rangerProfile: {
rangerAdmin: {
admins: [
"testuser1@contoso.com",
"testuser2@contoso.com",
],
database: {
host: "testsqlserver.database.windows.net",
name: "testdb",
passwordSecretRef: "https://testkv.vault.azure.net/secrets/mysecret/5df6584d9c25418c8d900240aa6c3452",
username: "admin",
},
},
rangerAudit: {
storageAccount: "https://teststorage.blob.core.windows.net/testblob",
},
rangerUsersync: {
enabled: true,
groups: [
"0a53828f-36c9-44c3-be3d-99a7fce977ad",
"13be6971-79db-4f33-9d41-b25589ca25ac",
],
mode: azure_native.hdinsight.RangerUsersyncMode.Automatic,
users: [
"testuser1@contoso.com",
"testuser2@contoso.com",
],
},
},
},
clusterType: "ranger",
computeProfile: {
availabilityZones: [
"1",
"2",
"3",
],
nodes: [{
count: 2,
type: "head",
vmSize: "Standard_D3_v2",
}],
},
location: "West US 2",
resourceGroupName: "hiloResourcegroup",
});
import pulumi
import pulumi_azure_native as azure_native
cluster_pool_cluster = azure_native.hdinsight.ClusterPoolCluster("clusterPoolCluster",
cluster_name="cluster1",
cluster_pool_name="clusterpool1",
cluster_profile={
"authorization_profile": {
"user_ids": [
"testuser1",
"testuser2",
],
},
"cluster_version": "0.0.1",
"managed_identity_profile": {
"identity_list": [{
"client_id": "de91f1d8-767f-460a-ac11-3cf103f74b34",
"object_id": "40491351-c240-4042-91e0-f644a1d2b441",
"resource_id": "/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi",
"type": azure_native.hdinsight.ManagedIdentityType.CLUSTER,
}],
},
"oss_version": "2.2.3",
"ranger_profile": {
"ranger_admin": {
"admins": [
"testuser1@contoso.com",
"testuser2@contoso.com",
],
"database": {
"host": "testsqlserver.database.windows.net",
"name": "testdb",
"password_secret_ref": "https://testkv.vault.azure.net/secrets/mysecret/5df6584d9c25418c8d900240aa6c3452",
"username": "admin",
},
},
"ranger_audit": {
"storage_account": "https://teststorage.blob.core.windows.net/testblob",
},
"ranger_usersync": {
"enabled": True,
"groups": [
"0a53828f-36c9-44c3-be3d-99a7fce977ad",
"13be6971-79db-4f33-9d41-b25589ca25ac",
],
"mode": azure_native.hdinsight.RangerUsersyncMode.AUTOMATIC,
"users": [
"testuser1@contoso.com",
"testuser2@contoso.com",
],
},
},
},
cluster_type="ranger",
compute_profile={
"availability_zones": [
"1",
"2",
"3",
],
"nodes": [{
"count": 2,
"type": "head",
"vm_size": "Standard_D3_v2",
}],
},
location="West US 2",
resource_group_name="hiloResourcegroup")
resources:
clusterPoolCluster:
type: azure-native:hdinsight:ClusterPoolCluster
properties:
clusterName: cluster1
clusterPoolName: clusterpool1
clusterProfile:
authorizationProfile:
userIds:
- testuser1
- testuser2
clusterVersion: 0.0.1
managedIdentityProfile:
identityList:
- clientId: de91f1d8-767f-460a-ac11-3cf103f74b34
objectId: 40491351-c240-4042-91e0-f644a1d2b441
resourceId: /subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi
type: cluster
ossVersion: 2.2.3
rangerProfile:
rangerAdmin:
admins:
- testuser1@contoso.com
- testuser2@contoso.com
database:
host: testsqlserver.database.windows.net
name: testdb
passwordSecretRef: https://testkv.vault.azure.net/secrets/mysecret/5df6584d9c25418c8d900240aa6c3452
username: admin
rangerAudit:
storageAccount: https://teststorage.blob.core.windows.net/testblob
rangerUsersync:
enabled: true
groups:
- 0a53828f-36c9-44c3-be3d-99a7fce977ad
- 13be6971-79db-4f33-9d41-b25589ca25ac
mode: automatic
users:
- testuser1@contoso.com
- testuser2@contoso.com
clusterType: ranger
computeProfile:
availabilityZones:
- '1'
- '2'
- '3'
nodes:
- count: 2
type: head
vmSize: Standard_D3_v2
location: West US 2
resourceGroupName: hiloResourcegroup
HDInsightSparkClusterPut
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using AzureNative = Pulumi.AzureNative;
return await Deployment.RunAsync(() =>
{
var clusterPoolCluster = new AzureNative.HDInsight.ClusterPoolCluster("clusterPoolCluster", new()
{
ClusterName = "cluster1",
ClusterPoolName = "clusterpool1",
ClusterProfile = new AzureNative.HDInsight.Inputs.ClusterProfileArgs
{
AuthorizationProfile = new AzureNative.HDInsight.Inputs.AuthorizationProfileArgs
{
UserIds = new[]
{
"testuser1",
"testuser2",
},
},
ClusterVersion = "0.0.1",
ManagedIdentityProfile = new AzureNative.HDInsight.Inputs.ManagedIdentityProfileArgs
{
IdentityList = new[]
{
new AzureNative.HDInsight.Inputs.ManagedIdentitySpecArgs
{
ClientId = "de91f1d8-767f-460a-ac11-3cf103f74b34",
ObjectId = "40491351-c240-4042-91e0-f644a1d2b441",
ResourceId = "/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi",
Type = AzureNative.HDInsight.ManagedIdentityType.Cluster,
},
},
},
OssVersion = "2.2.3",
ServiceConfigsProfiles = new[]
{
new AzureNative.HDInsight.Inputs.ClusterServiceConfigsProfileArgs
{
Configs = new[]
{
new AzureNative.HDInsight.Inputs.ClusterServiceConfigArgs
{
Component = "spark-config",
Files = new[]
{
new AzureNative.HDInsight.Inputs.ClusterConfigFileArgs
{
FileName = "spark-defaults.conf",
Values =
{
{ "spark.eventLog.enabled", "true" },
},
},
},
},
},
ServiceName = "spark-service",
},
new AzureNative.HDInsight.Inputs.ClusterServiceConfigsProfileArgs
{
Configs = new[]
{
new AzureNative.HDInsight.Inputs.ClusterServiceConfigArgs
{
Component = "yarn-config",
Files = new[]
{
new AzureNative.HDInsight.Inputs.ClusterConfigFileArgs
{
FileName = "core-site.xml",
Values =
{
{ "fs.defaultFS", "wasb://testcontainer@teststorage.dfs.core.windows.net/" },
{ "storage.container", "testcontainer" },
{ "storage.key", "test key" },
{ "storage.name", "teststorage" },
{ "storage.protocol", "wasb" },
},
},
new AzureNative.HDInsight.Inputs.ClusterConfigFileArgs
{
FileName = "yarn-site.xml",
Values =
{
{ "yarn.webapp.ui2.enable", "false" },
},
},
},
},
},
ServiceName = "yarn-service",
},
},
SparkProfile = null,
SshProfile = new AzureNative.HDInsight.Inputs.ClusterPoolSshProfileArgs
{
Count = 2,
VmSize = "Standard_D3_v2",
},
},
ClusterType = "spark",
ComputeProfile = new AzureNative.HDInsight.Inputs.ClusterPoolComputeProfileArgs
{
AvailabilityZones = new[]
{
"1",
"2",
"3",
},
Nodes = new[]
{
new AzureNative.HDInsight.Inputs.NodeProfileArgs
{
Count = 4,
Type = "worker",
VmSize = "Standard_D3_v2",
},
},
},
Location = "West US 2",
ResourceGroupName = "hiloResourcegroup",
});
});
package main
import (
hdinsight "github.com/pulumi/pulumi-azure-native-sdk/hdinsight/v3"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := hdinsight.NewClusterPoolCluster(ctx, "clusterPoolCluster", &hdinsight.ClusterPoolClusterArgs{
ClusterName: pulumi.String("cluster1"),
ClusterPoolName: pulumi.String("clusterpool1"),
ClusterProfile: &hdinsight.ClusterProfileArgs{
AuthorizationProfile: &hdinsight.AuthorizationProfileArgs{
UserIds: pulumi.StringArray{
pulumi.String("testuser1"),
pulumi.String("testuser2"),
},
},
ClusterVersion: pulumi.String("0.0.1"),
ManagedIdentityProfile: &hdinsight.ManagedIdentityProfileArgs{
IdentityList: hdinsight.ManagedIdentitySpecArray{
&hdinsight.ManagedIdentitySpecArgs{
ClientId: pulumi.String("de91f1d8-767f-460a-ac11-3cf103f74b34"),
ObjectId: pulumi.String("40491351-c240-4042-91e0-f644a1d2b441"),
ResourceId: pulumi.String("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi"),
Type: pulumi.String(hdinsight.ManagedIdentityTypeCluster),
},
},
},
OssVersion: pulumi.String("2.2.3"),
ServiceConfigsProfiles: hdinsight.ClusterServiceConfigsProfileArray{
&hdinsight.ClusterServiceConfigsProfileArgs{
Configs: hdinsight.ClusterServiceConfigArray{
&hdinsight.ClusterServiceConfigArgs{
Component: pulumi.String("spark-config"),
Files: hdinsight.ClusterConfigFileArray{
&hdinsight.ClusterConfigFileArgs{
FileName: pulumi.String("spark-defaults.conf"),
Values: pulumi.StringMap{
"spark.eventLog.enabled": pulumi.String("true"),
},
},
},
},
},
ServiceName: pulumi.String("spark-service"),
},
&hdinsight.ClusterServiceConfigsProfileArgs{
Configs: hdinsight.ClusterServiceConfigArray{
&hdinsight.ClusterServiceConfigArgs{
Component: pulumi.String("yarn-config"),
Files: hdinsight.ClusterConfigFileArray{
&hdinsight.ClusterConfigFileArgs{
FileName: pulumi.String("core-site.xml"),
Values: pulumi.StringMap{
"fs.defaultFS": pulumi.String("wasb://testcontainer@teststorage.dfs.core.windows.net/"),
"storage.container": pulumi.String("testcontainer"),
"storage.key": pulumi.String("test key"),
"storage.name": pulumi.String("teststorage"),
"storage.protocol": pulumi.String("wasb"),
},
},
&hdinsight.ClusterConfigFileArgs{
FileName: pulumi.String("yarn-site.xml"),
Values: pulumi.StringMap{
"yarn.webapp.ui2.enable": pulumi.String("false"),
},
},
},
},
},
ServiceName: pulumi.String("yarn-service"),
},
},
SparkProfile: &hdinsight.SparkProfileArgs{},
SshProfile: &hdinsight.ClusterPoolSshProfileArgs{
Count: pulumi.Int(2),
VmSize: pulumi.String("Standard_D3_v2"),
},
},
ClusterType: pulumi.String("spark"),
ComputeProfile: &hdinsight.ClusterPoolComputeProfileArgs{
AvailabilityZones: pulumi.StringArray{
pulumi.String("1"),
pulumi.String("2"),
pulumi.String("3"),
},
Nodes: hdinsight.NodeProfileArray{
&hdinsight.NodeProfileArgs{
Count: pulumi.Int(4),
Type: pulumi.String("worker"),
VmSize: pulumi.String("Standard_D3_v2"),
},
},
},
Location: pulumi.String("West US 2"),
ResourceGroupName: pulumi.String("hiloResourcegroup"),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azurenative.hdinsight.ClusterPoolCluster;
import com.pulumi.azurenative.hdinsight.ClusterPoolClusterArgs;
import com.pulumi.azurenative.hdinsight.inputs.ClusterProfileArgs;
import com.pulumi.azurenative.hdinsight.inputs.AuthorizationProfileArgs;
import com.pulumi.azurenative.hdinsight.inputs.ManagedIdentityProfileArgs;
import com.pulumi.azurenative.hdinsight.inputs.SparkProfileArgs;
import com.pulumi.azurenative.hdinsight.inputs.ClusterPoolSshProfileArgs;
import com.pulumi.azurenative.hdinsight.inputs.ClusterPoolComputeProfileArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var clusterPoolCluster = new ClusterPoolCluster("clusterPoolCluster", ClusterPoolClusterArgs.builder()
.clusterName("cluster1")
.clusterPoolName("clusterpool1")
.clusterProfile(ClusterProfileArgs.builder()
.authorizationProfile(AuthorizationProfileArgs.builder()
.userIds(
"testuser1",
"testuser2")
.build())
.clusterVersion("0.0.1")
.managedIdentityProfile(ManagedIdentityProfileArgs.builder()
.identityList(ManagedIdentitySpecArgs.builder()
.clientId("de91f1d8-767f-460a-ac11-3cf103f74b34")
.objectId("40491351-c240-4042-91e0-f644a1d2b441")
.resourceId("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi")
.type("cluster")
.build())
.build())
.ossVersion("2.2.3")
.serviceConfigsProfiles(
ClusterServiceConfigsProfileArgs.builder()
.configs(ClusterServiceConfigArgs.builder()
.component("spark-config")
.files(ClusterConfigFileArgs.builder()
.fileName("spark-defaults.conf")
.values(Map.of("spark.eventLog.enabled", "true"))
.build())
.build())
.serviceName("spark-service")
.build(),
ClusterServiceConfigsProfileArgs.builder()
.configs(ClusterServiceConfigArgs.builder()
.component("yarn-config")
.files(
ClusterConfigFileArgs.builder()
.fileName("core-site.xml")
.values(Map.ofEntries(
Map.entry("fs.defaultFS", "wasb://testcontainer@teststorage.dfs.core.windows.net/"),
Map.entry("storage.container", "testcontainer"),
Map.entry("storage.key", "test key"),
Map.entry("storage.name", "teststorage"),
Map.entry("storage.protocol", "wasb")
))
.build(),
ClusterConfigFileArgs.builder()
.fileName("yarn-site.xml")
.values(Map.of("yarn.webapp.ui2.enable", "false"))
.build())
.build())
.serviceName("yarn-service")
.build())
.sparkProfile(SparkProfileArgs.builder()
.build())
.sshProfile(ClusterPoolSshProfileArgs.builder()
.count(2)
.vmSize("Standard_D3_v2")
.build())
.build())
.clusterType("spark")
.computeProfile(ClusterPoolComputeProfileArgs.builder()
.availabilityZones(
"1",
"2",
"3")
.nodes(NodeProfileArgs.builder()
.count(4)
.type("worker")
.vmSize("Standard_D3_v2")
.build())
.build())
.location("West US 2")
.resourceGroupName("hiloResourcegroup")
.build());
}
}
import * as pulumi from "@pulumi/pulumi";
import * as azure_native from "@pulumi/azure-native";
const clusterPoolCluster = new azure_native.hdinsight.ClusterPoolCluster("clusterPoolCluster", {
clusterName: "cluster1",
clusterPoolName: "clusterpool1",
clusterProfile: {
authorizationProfile: {
userIds: [
"testuser1",
"testuser2",
],
},
clusterVersion: "0.0.1",
managedIdentityProfile: {
identityList: [{
clientId: "de91f1d8-767f-460a-ac11-3cf103f74b34",
objectId: "40491351-c240-4042-91e0-f644a1d2b441",
resourceId: "/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi",
type: azure_native.hdinsight.ManagedIdentityType.Cluster,
}],
},
ossVersion: "2.2.3",
serviceConfigsProfiles: [
{
configs: [{
component: "spark-config",
files: [{
fileName: "spark-defaults.conf",
values: {
"spark.eventLog.enabled": "true",
},
}],
}],
serviceName: "spark-service",
},
{
configs: [{
component: "yarn-config",
files: [
{
fileName: "core-site.xml",
values: {
"fs.defaultFS": "wasb://testcontainer@teststorage.dfs.core.windows.net/",
"storage.container": "testcontainer",
"storage.key": "test key",
"storage.name": "teststorage",
"storage.protocol": "wasb",
},
},
{
fileName: "yarn-site.xml",
values: {
"yarn.webapp.ui2.enable": "false",
},
},
],
}],
serviceName: "yarn-service",
},
],
sparkProfile: {},
sshProfile: {
count: 2,
vmSize: "Standard_D3_v2",
},
},
clusterType: "spark",
computeProfile: {
availabilityZones: [
"1",
"2",
"3",
],
nodes: [{
count: 4,
type: "worker",
vmSize: "Standard_D3_v2",
}],
},
location: "West US 2",
resourceGroupName: "hiloResourcegroup",
});
import pulumi
import pulumi_azure_native as azure_native
cluster_pool_cluster = azure_native.hdinsight.ClusterPoolCluster("clusterPoolCluster",
cluster_name="cluster1",
cluster_pool_name="clusterpool1",
cluster_profile={
"authorization_profile": {
"user_ids": [
"testuser1",
"testuser2",
],
},
"cluster_version": "0.0.1",
"managed_identity_profile": {
"identity_list": [{
"client_id": "de91f1d8-767f-460a-ac11-3cf103f74b34",
"object_id": "40491351-c240-4042-91e0-f644a1d2b441",
"resource_id": "/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi",
"type": azure_native.hdinsight.ManagedIdentityType.CLUSTER,
}],
},
"oss_version": "2.2.3",
"service_configs_profiles": [
{
"configs": [{
"component": "spark-config",
"files": [{
"file_name": "spark-defaults.conf",
"values": {
"spark.eventLog.enabled": "true",
},
}],
}],
"service_name": "spark-service",
},
{
"configs": [{
"component": "yarn-config",
"files": [
{
"file_name": "core-site.xml",
"values": {
"fs.defaultFS": "wasb://testcontainer@teststorage.dfs.core.windows.net/",
"storage.container": "testcontainer",
"storage.key": "test key",
"storage.name": "teststorage",
"storage.protocol": "wasb",
},
},
{
"file_name": "yarn-site.xml",
"values": {
"yarn.webapp.ui2.enable": "false",
},
},
],
}],
"service_name": "yarn-service",
},
],
"spark_profile": {},
"ssh_profile": {
"count": 2,
"vm_size": "Standard_D3_v2",
},
},
cluster_type="spark",
compute_profile={
"availability_zones": [
"1",
"2",
"3",
],
"nodes": [{
"count": 4,
"type": "worker",
"vm_size": "Standard_D3_v2",
}],
},
location="West US 2",
resource_group_name="hiloResourcegroup")
resources:
clusterPoolCluster:
type: azure-native:hdinsight:ClusterPoolCluster
properties:
clusterName: cluster1
clusterPoolName: clusterpool1
clusterProfile:
authorizationProfile:
userIds:
- testuser1
- testuser2
clusterVersion: 0.0.1
managedIdentityProfile:
identityList:
- clientId: de91f1d8-767f-460a-ac11-3cf103f74b34
objectId: 40491351-c240-4042-91e0-f644a1d2b441
resourceId: /subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi
type: cluster
ossVersion: 2.2.3
serviceConfigsProfiles:
- configs:
- component: spark-config
files:
- fileName: spark-defaults.conf
values:
spark.eventLog.enabled: 'true'
serviceName: spark-service
- configs:
- component: yarn-config
files:
- fileName: core-site.xml
values:
fs.defaultFS: wasb://testcontainer@teststorage.dfs.core.windows.net/
storage.container: testcontainer
storage.key: test key
storage.name: teststorage
storage.protocol: wasb
- fileName: yarn-site.xml
values:
yarn.webapp.ui2.enable: 'false'
serviceName: yarn-service
sparkProfile: {}
sshProfile:
count: 2
vmSize: Standard_D3_v2
clusterType: spark
computeProfile:
availabilityZones:
- '1'
- '2'
- '3'
nodes:
- count: 4
type: worker
vmSize: Standard_D3_v2
location: West US 2
resourceGroupName: hiloResourcegroup
HDInsightSparkClusterPutWithInternalIngress
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using AzureNative = Pulumi.AzureNative;
return await Deployment.RunAsync(() =>
{
var clusterPoolCluster = new AzureNative.HDInsight.ClusterPoolCluster("clusterPoolCluster", new()
{
ClusterName = "cluster1",
ClusterPoolName = "clusterpool1",
ClusterProfile = new AzureNative.HDInsight.Inputs.ClusterProfileArgs
{
AuthorizationProfile = new AzureNative.HDInsight.Inputs.AuthorizationProfileArgs
{
UserIds = new[]
{
"testuser1",
"testuser2",
},
},
ClusterAccessProfile = new AzureNative.HDInsight.Inputs.ClusterAccessProfileArgs
{
EnableInternalIngress = true,
},
ClusterVersion = "0.0.1",
ManagedIdentityProfile = new AzureNative.HDInsight.Inputs.ManagedIdentityProfileArgs
{
IdentityList = new[]
{
new AzureNative.HDInsight.Inputs.ManagedIdentitySpecArgs
{
ClientId = "de91f1d8-767f-460a-ac11-3cf103f74b34",
ObjectId = "40491351-c240-4042-91e0-f644a1d2b441",
ResourceId = "/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi",
Type = AzureNative.HDInsight.ManagedIdentityType.Cluster,
},
},
},
OssVersion = "2.2.3",
ServiceConfigsProfiles = new[]
{
new AzureNative.HDInsight.Inputs.ClusterServiceConfigsProfileArgs
{
Configs = new[]
{
new AzureNative.HDInsight.Inputs.ClusterServiceConfigArgs
{
Component = "spark-config",
Files = new[]
{
new AzureNative.HDInsight.Inputs.ClusterConfigFileArgs
{
FileName = "spark-defaults.conf",
Values =
{
{ "spark.eventLog.enabled", "true" },
},
},
},
},
},
ServiceName = "spark-service",
},
new AzureNative.HDInsight.Inputs.ClusterServiceConfigsProfileArgs
{
Configs = new[]
{
new AzureNative.HDInsight.Inputs.ClusterServiceConfigArgs
{
Component = "yarn-config",
Files = new[]
{
new AzureNative.HDInsight.Inputs.ClusterConfigFileArgs
{
FileName = "core-site.xml",
Values =
{
{ "fs.defaultFS", "wasb://testcontainer@teststorage.dfs.core.windows.net/" },
{ "storage.container", "testcontainer" },
{ "storage.key", "test key" },
{ "storage.name", "teststorage" },
{ "storage.protocol", "wasb" },
},
},
new AzureNative.HDInsight.Inputs.ClusterConfigFileArgs
{
FileName = "yarn-site.xml",
Values =
{
{ "yarn.webapp.ui2.enable", "false" },
},
},
},
},
},
ServiceName = "yarn-service",
},
},
SparkProfile = null,
SshProfile = new AzureNative.HDInsight.Inputs.ClusterPoolSshProfileArgs
{
Count = 2,
VmSize = "Standard_D3_v2",
},
},
ClusterType = "spark",
ComputeProfile = new AzureNative.HDInsight.Inputs.ClusterPoolComputeProfileArgs
{
AvailabilityZones = new[]
{
"1",
"2",
"3",
},
Nodes = new[]
{
new AzureNative.HDInsight.Inputs.NodeProfileArgs
{
Count = 4,
Type = "worker",
VmSize = "Standard_D3_v2",
},
},
},
Location = "West US 2",
ResourceGroupName = "hiloResourcegroup",
});
});
package main
import (
hdinsight "github.com/pulumi/pulumi-azure-native-sdk/hdinsight/v3"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := hdinsight.NewClusterPoolCluster(ctx, "clusterPoolCluster", &hdinsight.ClusterPoolClusterArgs{
ClusterName: pulumi.String("cluster1"),
ClusterPoolName: pulumi.String("clusterpool1"),
ClusterProfile: &hdinsight.ClusterProfileArgs{
AuthorizationProfile: &hdinsight.AuthorizationProfileArgs{
UserIds: pulumi.StringArray{
pulumi.String("testuser1"),
pulumi.String("testuser2"),
},
},
ClusterAccessProfile: &hdinsight.ClusterAccessProfileArgs{
EnableInternalIngress: pulumi.Bool(true),
},
ClusterVersion: pulumi.String("0.0.1"),
ManagedIdentityProfile: &hdinsight.ManagedIdentityProfileArgs{
IdentityList: hdinsight.ManagedIdentitySpecArray{
&hdinsight.ManagedIdentitySpecArgs{
ClientId: pulumi.String("de91f1d8-767f-460a-ac11-3cf103f74b34"),
ObjectId: pulumi.String("40491351-c240-4042-91e0-f644a1d2b441"),
ResourceId: pulumi.String("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi"),
Type: pulumi.String(hdinsight.ManagedIdentityTypeCluster),
},
},
},
OssVersion: pulumi.String("2.2.3"),
ServiceConfigsProfiles: hdinsight.ClusterServiceConfigsProfileArray{
&hdinsight.ClusterServiceConfigsProfileArgs{
Configs: hdinsight.ClusterServiceConfigArray{
&hdinsight.ClusterServiceConfigArgs{
Component: pulumi.String("spark-config"),
Files: hdinsight.ClusterConfigFileArray{
&hdinsight.ClusterConfigFileArgs{
FileName: pulumi.String("spark-defaults.conf"),
Values: pulumi.StringMap{
"spark.eventLog.enabled": pulumi.String("true"),
},
},
},
},
},
ServiceName: pulumi.String("spark-service"),
},
&hdinsight.ClusterServiceConfigsProfileArgs{
Configs: hdinsight.ClusterServiceConfigArray{
&hdinsight.ClusterServiceConfigArgs{
Component: pulumi.String("yarn-config"),
Files: hdinsight.ClusterConfigFileArray{
&hdinsight.ClusterConfigFileArgs{
FileName: pulumi.String("core-site.xml"),
Values: pulumi.StringMap{
"fs.defaultFS": pulumi.String("wasb://testcontainer@teststorage.dfs.core.windows.net/"),
"storage.container": pulumi.String("testcontainer"),
"storage.key": pulumi.String("test key"),
"storage.name": pulumi.String("teststorage"),
"storage.protocol": pulumi.String("wasb"),
},
},
&hdinsight.ClusterConfigFileArgs{
FileName: pulumi.String("yarn-site.xml"),
Values: pulumi.StringMap{
"yarn.webapp.ui2.enable": pulumi.String("false"),
},
},
},
},
},
ServiceName: pulumi.String("yarn-service"),
},
},
SparkProfile: &hdinsight.SparkProfileArgs{},
SshProfile: &hdinsight.ClusterPoolSshProfileArgs{
Count: pulumi.Int(2),
VmSize: pulumi.String("Standard_D3_v2"),
},
},
ClusterType: pulumi.String("spark"),
ComputeProfile: &hdinsight.ClusterPoolComputeProfileArgs{
AvailabilityZones: pulumi.StringArray{
pulumi.String("1"),
pulumi.String("2"),
pulumi.String("3"),
},
Nodes: hdinsight.NodeProfileArray{
&hdinsight.NodeProfileArgs{
Count: pulumi.Int(4),
Type: pulumi.String("worker"),
VmSize: pulumi.String("Standard_D3_v2"),
},
},
},
Location: pulumi.String("West US 2"),
ResourceGroupName: pulumi.String("hiloResourcegroup"),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azurenative.hdinsight.ClusterPoolCluster;
import com.pulumi.azurenative.hdinsight.ClusterPoolClusterArgs;
import com.pulumi.azurenative.hdinsight.inputs.ClusterProfileArgs;
import com.pulumi.azurenative.hdinsight.inputs.AuthorizationProfileArgs;
import com.pulumi.azurenative.hdinsight.inputs.ClusterAccessProfileArgs;
import com.pulumi.azurenative.hdinsight.inputs.ManagedIdentityProfileArgs;
import com.pulumi.azurenative.hdinsight.inputs.SparkProfileArgs;
import com.pulumi.azurenative.hdinsight.inputs.ClusterPoolSshProfileArgs;
import com.pulumi.azurenative.hdinsight.inputs.ClusterPoolComputeProfileArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var clusterPoolCluster = new ClusterPoolCluster("clusterPoolCluster", ClusterPoolClusterArgs.builder()
.clusterName("cluster1")
.clusterPoolName("clusterpool1")
.clusterProfile(ClusterProfileArgs.builder()
.authorizationProfile(AuthorizationProfileArgs.builder()
.userIds(
"testuser1",
"testuser2")
.build())
.clusterAccessProfile(ClusterAccessProfileArgs.builder()
.enableInternalIngress(true)
.build())
.clusterVersion("0.0.1")
.managedIdentityProfile(ManagedIdentityProfileArgs.builder()
.identityList(ManagedIdentitySpecArgs.builder()
.clientId("de91f1d8-767f-460a-ac11-3cf103f74b34")
.objectId("40491351-c240-4042-91e0-f644a1d2b441")
.resourceId("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi")
.type("cluster")
.build())
.build())
.ossVersion("2.2.3")
.serviceConfigsProfiles(
ClusterServiceConfigsProfileArgs.builder()
.configs(ClusterServiceConfigArgs.builder()
.component("spark-config")
.files(ClusterConfigFileArgs.builder()
.fileName("spark-defaults.conf")
.values(Map.of("spark.eventLog.enabled", "true"))
.build())
.build())
.serviceName("spark-service")
.build(),
ClusterServiceConfigsProfileArgs.builder()
.configs(ClusterServiceConfigArgs.builder()
.component("yarn-config")
.files(
ClusterConfigFileArgs.builder()
.fileName("core-site.xml")
.values(Map.ofEntries(
Map.entry("fs.defaultFS", "wasb://testcontainer@teststorage.dfs.core.windows.net/"),
Map.entry("storage.container", "testcontainer"),
Map.entry("storage.key", "test key"),
Map.entry("storage.name", "teststorage"),
Map.entry("storage.protocol", "wasb")
))
.build(),
ClusterConfigFileArgs.builder()
.fileName("yarn-site.xml")
.values(Map.of("yarn.webapp.ui2.enable", "false"))
.build())
.build())
.serviceName("yarn-service")
.build())
.sparkProfile(SparkProfileArgs.builder()
.build())
.sshProfile(ClusterPoolSshProfileArgs.builder()
.count(2)
.vmSize("Standard_D3_v2")
.build())
.build())
.clusterType("spark")
.computeProfile(ClusterPoolComputeProfileArgs.builder()
.availabilityZones(
"1",
"2",
"3")
.nodes(NodeProfileArgs.builder()
.count(4)
.type("worker")
.vmSize("Standard_D3_v2")
.build())
.build())
.location("West US 2")
.resourceGroupName("hiloResourcegroup")
.build());
}
}
import * as pulumi from "@pulumi/pulumi";
import * as azure_native from "@pulumi/azure-native";
const clusterPoolCluster = new azure_native.hdinsight.ClusterPoolCluster("clusterPoolCluster", {
clusterName: "cluster1",
clusterPoolName: "clusterpool1",
clusterProfile: {
authorizationProfile: {
userIds: [
"testuser1",
"testuser2",
],
},
clusterAccessProfile: {
enableInternalIngress: true,
},
clusterVersion: "0.0.1",
managedIdentityProfile: {
identityList: [{
clientId: "de91f1d8-767f-460a-ac11-3cf103f74b34",
objectId: "40491351-c240-4042-91e0-f644a1d2b441",
resourceId: "/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi",
type: azure_native.hdinsight.ManagedIdentityType.Cluster,
}],
},
ossVersion: "2.2.3",
serviceConfigsProfiles: [
{
configs: [{
component: "spark-config",
files: [{
fileName: "spark-defaults.conf",
values: {
"spark.eventLog.enabled": "true",
},
}],
}],
serviceName: "spark-service",
},
{
configs: [{
component: "yarn-config",
files: [
{
fileName: "core-site.xml",
values: {
"fs.defaultFS": "wasb://testcontainer@teststorage.dfs.core.windows.net/",
"storage.container": "testcontainer",
"storage.key": "test key",
"storage.name": "teststorage",
"storage.protocol": "wasb",
},
},
{
fileName: "yarn-site.xml",
values: {
"yarn.webapp.ui2.enable": "false",
},
},
],
}],
serviceName: "yarn-service",
},
],
sparkProfile: {},
sshProfile: {
count: 2,
vmSize: "Standard_D3_v2",
},
},
clusterType: "spark",
computeProfile: {
availabilityZones: [
"1",
"2",
"3",
],
nodes: [{
count: 4,
type: "worker",
vmSize: "Standard_D3_v2",
}],
},
location: "West US 2",
resourceGroupName: "hiloResourcegroup",
});
import pulumi
import pulumi_azure_native as azure_native
cluster_pool_cluster = azure_native.hdinsight.ClusterPoolCluster("clusterPoolCluster",
cluster_name="cluster1",
cluster_pool_name="clusterpool1",
cluster_profile={
"authorization_profile": {
"user_ids": [
"testuser1",
"testuser2",
],
},
"cluster_access_profile": {
"enable_internal_ingress": True,
},
"cluster_version": "0.0.1",
"managed_identity_profile": {
"identity_list": [{
"client_id": "de91f1d8-767f-460a-ac11-3cf103f74b34",
"object_id": "40491351-c240-4042-91e0-f644a1d2b441",
"resource_id": "/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi",
"type": azure_native.hdinsight.ManagedIdentityType.CLUSTER,
}],
},
"oss_version": "2.2.3",
"service_configs_profiles": [
{
"configs": [{
"component": "spark-config",
"files": [{
"file_name": "spark-defaults.conf",
"values": {
"spark.eventLog.enabled": "true",
},
}],
}],
"service_name": "spark-service",
},
{
"configs": [{
"component": "yarn-config",
"files": [
{
"file_name": "core-site.xml",
"values": {
"fs.defaultFS": "wasb://testcontainer@teststorage.dfs.core.windows.net/",
"storage.container": "testcontainer",
"storage.key": "test key",
"storage.name": "teststorage",
"storage.protocol": "wasb",
},
},
{
"file_name": "yarn-site.xml",
"values": {
"yarn.webapp.ui2.enable": "false",
},
},
],
}],
"service_name": "yarn-service",
},
],
"spark_profile": {},
"ssh_profile": {
"count": 2,
"vm_size": "Standard_D3_v2",
},
},
cluster_type="spark",
compute_profile={
"availability_zones": [
"1",
"2",
"3",
],
"nodes": [{
"count": 4,
"type": "worker",
"vm_size": "Standard_D3_v2",
}],
},
location="West US 2",
resource_group_name="hiloResourcegroup")
resources:
clusterPoolCluster:
type: azure-native:hdinsight:ClusterPoolCluster
properties:
clusterName: cluster1
clusterPoolName: clusterpool1
clusterProfile:
authorizationProfile:
userIds:
- testuser1
- testuser2
clusterAccessProfile:
enableInternalIngress: true
clusterVersion: 0.0.1
managedIdentityProfile:
identityList:
- clientId: de91f1d8-767f-460a-ac11-3cf103f74b34
objectId: 40491351-c240-4042-91e0-f644a1d2b441
resourceId: /subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi
type: cluster
ossVersion: 2.2.3
serviceConfigsProfiles:
- configs:
- component: spark-config
files:
- fileName: spark-defaults.conf
values:
spark.eventLog.enabled: 'true'
serviceName: spark-service
- configs:
- component: yarn-config
files:
- fileName: core-site.xml
values:
fs.defaultFS: wasb://testcontainer@teststorage.dfs.core.windows.net/
storage.container: testcontainer
storage.key: test key
storage.name: teststorage
storage.protocol: wasb
- fileName: yarn-site.xml
values:
yarn.webapp.ui2.enable: 'false'
serviceName: yarn-service
sparkProfile: {}
sshProfile:
count: 2
vmSize: Standard_D3_v2
clusterType: spark
computeProfile:
availabilityZones:
- '1'
- '2'
- '3'
nodes:
- count: 4
type: worker
vmSize: Standard_D3_v2
location: West US 2
resourceGroupName: hiloResourcegroup
Create ClusterPoolCluster Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new ClusterPoolCluster(name: string, args: ClusterPoolClusterArgs, opts?: CustomResourceOptions);
@overload
def ClusterPoolCluster(resource_name: str,
args: ClusterPoolClusterArgs,
opts: Optional[ResourceOptions] = None)
@overload
def ClusterPoolCluster(resource_name: str,
opts: Optional[ResourceOptions] = None,
cluster_pool_name: Optional[str] = None,
cluster_profile: Optional[ClusterProfileArgs] = None,
cluster_type: Optional[str] = None,
compute_profile: Optional[ClusterPoolComputeProfileArgs] = None,
resource_group_name: Optional[str] = None,
cluster_name: Optional[str] = None,
location: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None)
func NewClusterPoolCluster(ctx *Context, name string, args ClusterPoolClusterArgs, opts ...ResourceOption) (*ClusterPoolCluster, error)
public ClusterPoolCluster(string name, ClusterPoolClusterArgs args, CustomResourceOptions? opts = null)
public ClusterPoolCluster(String name, ClusterPoolClusterArgs args)
public ClusterPoolCluster(String name, ClusterPoolClusterArgs args, CustomResourceOptions options)
type: azure-native:hdinsight:ClusterPoolCluster
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args ClusterPoolClusterArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args ClusterPoolClusterArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args ClusterPoolClusterArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args ClusterPoolClusterArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args ClusterPoolClusterArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var clusterPoolClusterResource = new AzureNative.HDInsight.ClusterPoolCluster("clusterPoolClusterResource", new()
{
ClusterPoolName = "string",
ClusterProfile = new AzureNative.HDInsight.Inputs.ClusterProfileArgs
{
ClusterVersion = "string",
OssVersion = "string",
AuthorizationProfile = new AzureNative.HDInsight.Inputs.AuthorizationProfileArgs
{
GroupIds = new[]
{
"string",
},
UserIds = new[]
{
"string",
},
},
IdentityProfile = new AzureNative.HDInsight.Inputs.IdentityProfileArgs
{
MsiClientId = "string",
MsiObjectId = "string",
MsiResourceId = "string",
},
RangerPluginProfile = new AzureNative.HDInsight.Inputs.ClusterRangerPluginProfileArgs
{
Enabled = false,
},
ClusterAccessProfile = new AzureNative.HDInsight.Inputs.ClusterAccessProfileArgs
{
EnableInternalIngress = false,
},
KafkaProfile = new AzureNative.HDInsight.Inputs.KafkaProfileArgs
{
DiskStorage = new AzureNative.HDInsight.Inputs.DiskStorageProfileArgs
{
DataDiskSize = 0,
DataDiskType = "string",
},
EnableKRaft = false,
EnablePublicEndpoints = false,
RemoteStorageUri = "string",
},
LlapProfile = "any",
LogAnalyticsProfile = new AzureNative.HDInsight.Inputs.ClusterLogAnalyticsProfileArgs
{
Enabled = false,
ApplicationLogs = new AzureNative.HDInsight.Inputs.ClusterLogAnalyticsApplicationLogsArgs
{
StdErrorEnabled = false,
StdOutEnabled = false,
},
MetricsEnabled = false,
},
ManagedIdentityProfile = new AzureNative.HDInsight.Inputs.ManagedIdentityProfileArgs
{
IdentityList = new[]
{
new AzureNative.HDInsight.Inputs.ManagedIdentitySpecArgs
{
ClientId = "string",
ObjectId = "string",
ResourceId = "string",
Type = "string",
},
},
},
AutoscaleProfile = new AzureNative.HDInsight.Inputs.AutoscaleProfileArgs
{
Enabled = false,
AutoscaleType = "string",
GracefulDecommissionTimeout = 0,
LoadBasedConfig = new AzureNative.HDInsight.Inputs.LoadBasedConfigArgs
{
MaxNodes = 0,
MinNodes = 0,
ScalingRules = new[]
{
new AzureNative.HDInsight.Inputs.ScalingRuleArgs
{
ActionType = "string",
ComparisonRule = new AzureNative.HDInsight.Inputs.ComparisonRuleArgs
{
Operator = "string",
Threshold = 0,
},
EvaluationCount = 0,
ScalingMetric = "string",
},
},
CooldownPeriod = 0,
PollInterval = 0,
},
ScheduleBasedConfig = new AzureNative.HDInsight.Inputs.ScheduleBasedConfigArgs
{
DefaultCount = 0,
Schedules = new[]
{
new AzureNative.HDInsight.Inputs.ScheduleArgs
{
Count = 0,
Days = new[]
{
"string",
},
EndTime = "string",
StartTime = "string",
},
},
TimeZone = "string",
},
},
PrometheusProfile = new AzureNative.HDInsight.Inputs.ClusterPrometheusProfileArgs
{
Enabled = false,
},
FlinkProfile = new AzureNative.HDInsight.Inputs.FlinkProfileArgs
{
JobManager = new AzureNative.HDInsight.Inputs.ComputeResourceDefinitionArgs
{
Cpu = 0,
Memory = 0,
},
Storage = new AzureNative.HDInsight.Inputs.FlinkStorageProfileArgs
{
StorageUri = "string",
Storagekey = "string",
},
TaskManager = new AzureNative.HDInsight.Inputs.ComputeResourceDefinitionArgs
{
Cpu = 0,
Memory = 0,
},
CatalogOptions = new AzureNative.HDInsight.Inputs.FlinkCatalogOptionsArgs
{
Hive = new AzureNative.HDInsight.Inputs.FlinkHiveCatalogOptionArgs
{
MetastoreDbConnectionURL = "string",
MetastoreDbConnectionAuthenticationMode = "string",
MetastoreDbConnectionPasswordSecret = "string",
MetastoreDbConnectionUserName = "string",
},
},
DeploymentMode = "string",
HistoryServer = new AzureNative.HDInsight.Inputs.ComputeResourceDefinitionArgs
{
Cpu = 0,
Memory = 0,
},
JobSpec = new AzureNative.HDInsight.Inputs.FlinkJobProfileArgs
{
JarName = "string",
JobJarDirectory = "string",
UpgradeMode = "string",
Args = "string",
EntryClass = "string",
SavePointName = "string",
},
NumReplicas = 0,
},
RangerProfile = new AzureNative.HDInsight.Inputs.RangerProfileArgs
{
RangerAdmin = new AzureNative.HDInsight.Inputs.RangerAdminSpecArgs
{
Admins = new[]
{
"string",
},
Database = new AzureNative.HDInsight.Inputs.RangerAdminSpecDatabaseArgs
{
Host = "string",
Name = "string",
PasswordSecretRef = "string",
Username = "string",
},
},
RangerUsersync = new AzureNative.HDInsight.Inputs.RangerUsersyncSpecArgs
{
Enabled = false,
Groups = new[]
{
"string",
},
Mode = "string",
UserMappingLocation = "string",
Users = new[]
{
"string",
},
},
RangerAudit = new AzureNative.HDInsight.Inputs.RangerAuditSpecArgs
{
StorageAccount = "string",
},
},
ScriptActionProfiles = new[]
{
new AzureNative.HDInsight.Inputs.ScriptActionProfileArgs
{
Name = "string",
Services = new[]
{
"string",
},
Type = "string",
Url = "string",
Parameters = "string",
ShouldPersist = false,
TimeoutInMinutes = 0,
},
},
SecretsProfile = new AzureNative.HDInsight.Inputs.SecretsProfileArgs
{
KeyVaultResourceId = "string",
Secrets = new[]
{
new AzureNative.HDInsight.Inputs.SecretReferenceArgs
{
KeyVaultObjectName = "string",
ReferenceName = "string",
Type = "string",
Version = "string",
},
},
},
ServiceConfigsProfiles = new[]
{
new AzureNative.HDInsight.Inputs.ClusterServiceConfigsProfileArgs
{
Configs = new[]
{
new AzureNative.HDInsight.Inputs.ClusterServiceConfigArgs
{
Component = "string",
Files = new[]
{
new AzureNative.HDInsight.Inputs.ClusterConfigFileArgs
{
FileName = "string",
Content = "string",
Encoding = "string",
Path = "string",
Values =
{
{ "string", "string" },
},
},
},
},
},
ServiceName = "string",
},
},
SparkProfile = new AzureNative.HDInsight.Inputs.SparkProfileArgs
{
DefaultStorageUrl = "string",
MetastoreSpec = new AzureNative.HDInsight.Inputs.SparkMetastoreSpecArgs
{
DbName = "string",
DbServerHost = "string",
DbConnectionAuthenticationMode = "string",
DbPasswordSecretName = "string",
DbUserName = "string",
KeyVaultId = "string",
ThriftUrl = "string",
},
UserPluginsSpec = new AzureNative.HDInsight.Inputs.SparkUserPluginsArgs
{
Plugins = new[]
{
new AzureNative.HDInsight.Inputs.SparkUserPluginArgs
{
Path = "string",
},
},
},
},
SshProfile = new AzureNative.HDInsight.Inputs.ClusterPoolSshProfileArgs
{
Count = 0,
VmSize = "string",
},
StubProfile = "any",
TrinoProfile = new AzureNative.HDInsight.Inputs.TrinoProfileArgs
{
CatalogOptions = new AzureNative.HDInsight.Inputs.CatalogOptionsArgs
{
Hive = new[]
{
new AzureNative.HDInsight.Inputs.HiveCatalogOptionArgs
{
CatalogName = "string",
MetastoreDbConnectionURL = "string",
MetastoreWarehouseDir = "string",
MetastoreDbConnectionAuthenticationMode = "string",
MetastoreDbConnectionPasswordSecret = "string",
MetastoreDbConnectionUserName = "string",
},
},
},
Coordinator = new AzureNative.HDInsight.Inputs.TrinoCoordinatorArgs
{
Enable = false,
HighAvailabilityEnabled = false,
Port = 0,
Suspend = false,
},
UserPluginsSpec = new AzureNative.HDInsight.Inputs.TrinoUserPluginsArgs
{
Plugins = new[]
{
new AzureNative.HDInsight.Inputs.TrinoUserPluginArgs
{
Enabled = false,
Name = "string",
Path = "string",
},
},
},
UserTelemetrySpec = new AzureNative.HDInsight.Inputs.TrinoUserTelemetryArgs
{
Storage = new AzureNative.HDInsight.Inputs.TrinoTelemetryConfigArgs
{
HivecatalogName = "string",
HivecatalogSchema = "string",
PartitionRetentionInDays = 0,
Path = "string",
},
},
Worker = new AzureNative.HDInsight.Inputs.TrinoWorkerArgs
{
Enable = false,
Port = 0,
Suspend = false,
},
},
},
ClusterType = "string",
ComputeProfile = new AzureNative.HDInsight.Inputs.ClusterPoolComputeProfileArgs
{
Nodes = new[]
{
new AzureNative.HDInsight.Inputs.NodeProfileArgs
{
Count = 0,
Type = "string",
VmSize = "string",
},
},
AvailabilityZones = new[]
{
"string",
},
},
ResourceGroupName = "string",
ClusterName = "string",
Location = "string",
Tags =
{
{ "string", "string" },
},
});
example, err := hdinsight.NewClusterPoolCluster(ctx, "clusterPoolClusterResource", &hdinsight.ClusterPoolClusterArgs{
ClusterPoolName: pulumi.String("string"),
ClusterProfile: &hdinsight.ClusterProfileArgs{
ClusterVersion: pulumi.String("string"),
OssVersion: pulumi.String("string"),
AuthorizationProfile: &hdinsight.AuthorizationProfileArgs{
GroupIds: pulumi.StringArray{
pulumi.String("string"),
},
UserIds: pulumi.StringArray{
pulumi.String("string"),
},
},
IdentityProfile: &hdinsight.IdentityProfileArgs{
MsiClientId: pulumi.String("string"),
MsiObjectId: pulumi.String("string"),
MsiResourceId: pulumi.String("string"),
},
RangerPluginProfile: &hdinsight.ClusterRangerPluginProfileArgs{
Enabled: pulumi.Bool(false),
},
ClusterAccessProfile: &hdinsight.ClusterAccessProfileArgs{
EnableInternalIngress: pulumi.Bool(false),
},
KafkaProfile: &hdinsight.KafkaProfileArgs{
DiskStorage: &hdinsight.DiskStorageProfileArgs{
DataDiskSize: pulumi.Int(0),
DataDiskType: pulumi.String("string"),
},
EnableKRaft: pulumi.Bool(false),
EnablePublicEndpoints: pulumi.Bool(false),
RemoteStorageUri: pulumi.String("string"),
},
LlapProfile: pulumi.Any("any"),
LogAnalyticsProfile: &hdinsight.ClusterLogAnalyticsProfileArgs{
Enabled: pulumi.Bool(false),
ApplicationLogs: &hdinsight.ClusterLogAnalyticsApplicationLogsArgs{
StdErrorEnabled: pulumi.Bool(false),
StdOutEnabled: pulumi.Bool(false),
},
MetricsEnabled: pulumi.Bool(false),
},
ManagedIdentityProfile: &hdinsight.ManagedIdentityProfileArgs{
IdentityList: hdinsight.ManagedIdentitySpecArray{
&hdinsight.ManagedIdentitySpecArgs{
ClientId: pulumi.String("string"),
ObjectId: pulumi.String("string"),
ResourceId: pulumi.String("string"),
Type: pulumi.String("string"),
},
},
},
AutoscaleProfile: &hdinsight.AutoscaleProfileArgs{
Enabled: pulumi.Bool(false),
AutoscaleType: pulumi.String("string"),
GracefulDecommissionTimeout: pulumi.Int(0),
LoadBasedConfig: &hdinsight.LoadBasedConfigArgs{
MaxNodes: pulumi.Int(0),
MinNodes: pulumi.Int(0),
ScalingRules: hdinsight.ScalingRuleArray{
&hdinsight.ScalingRuleArgs{
ActionType: pulumi.String("string"),
ComparisonRule: &hdinsight.ComparisonRuleArgs{
Operator: pulumi.String("string"),
Threshold: pulumi.Float64(0),
},
EvaluationCount: pulumi.Int(0),
ScalingMetric: pulumi.String("string"),
},
},
CooldownPeriod: pulumi.Int(0),
PollInterval: pulumi.Int(0),
},
ScheduleBasedConfig: &hdinsight.ScheduleBasedConfigArgs{
DefaultCount: pulumi.Int(0),
Schedules: hdinsight.ScheduleArray{
&hdinsight.ScheduleArgs{
Count: pulumi.Int(0),
Days: pulumi.StringArray{
pulumi.String("string"),
},
EndTime: pulumi.String("string"),
StartTime: pulumi.String("string"),
},
},
TimeZone: pulumi.String("string"),
},
},
PrometheusProfile: &hdinsight.ClusterPrometheusProfileArgs{
Enabled: pulumi.Bool(false),
},
FlinkProfile: &hdinsight.FlinkProfileArgs{
JobManager: &hdinsight.ComputeResourceDefinitionArgs{
Cpu: pulumi.Float64(0),
Memory: pulumi.Float64(0),
},
Storage: &hdinsight.FlinkStorageProfileArgs{
StorageUri: pulumi.String("string"),
Storagekey: pulumi.String("string"),
},
TaskManager: &hdinsight.ComputeResourceDefinitionArgs{
Cpu: pulumi.Float64(0),
Memory: pulumi.Float64(0),
},
CatalogOptions: &hdinsight.FlinkCatalogOptionsArgs{
Hive: &hdinsight.FlinkHiveCatalogOptionArgs{
MetastoreDbConnectionURL: pulumi.String("string"),
MetastoreDbConnectionAuthenticationMode: pulumi.String("string"),
MetastoreDbConnectionPasswordSecret: pulumi.String("string"),
MetastoreDbConnectionUserName: pulumi.String("string"),
},
},
DeploymentMode: pulumi.String("string"),
HistoryServer: &hdinsight.ComputeResourceDefinitionArgs{
Cpu: pulumi.Float64(0),
Memory: pulumi.Float64(0),
},
JobSpec: &hdinsight.FlinkJobProfileArgs{
JarName: pulumi.String("string"),
JobJarDirectory: pulumi.String("string"),
UpgradeMode: pulumi.String("string"),
Args: pulumi.String("string"),
EntryClass: pulumi.String("string"),
SavePointName: pulumi.String("string"),
},
NumReplicas: pulumi.Int(0),
},
RangerProfile: &hdinsight.RangerProfileArgs{
RangerAdmin: &hdinsight.RangerAdminSpecArgs{
Admins: pulumi.StringArray{
pulumi.String("string"),
},
Database: &hdinsight.RangerAdminSpecDatabaseArgs{
Host: pulumi.String("string"),
Name: pulumi.String("string"),
PasswordSecretRef: pulumi.String("string"),
Username: pulumi.String("string"),
},
},
RangerUsersync: &hdinsight.RangerUsersyncSpecArgs{
Enabled: pulumi.Bool(false),
Groups: pulumi.StringArray{
pulumi.String("string"),
},
Mode: pulumi.String("string"),
UserMappingLocation: pulumi.String("string"),
Users: pulumi.StringArray{
pulumi.String("string"),
},
},
RangerAudit: &hdinsight.RangerAuditSpecArgs{
StorageAccount: pulumi.String("string"),
},
},
ScriptActionProfiles: hdinsight.ScriptActionProfileArray{
&hdinsight.ScriptActionProfileArgs{
Name: pulumi.String("string"),
Services: pulumi.StringArray{
pulumi.String("string"),
},
Type: pulumi.String("string"),
Url: pulumi.String("string"),
Parameters: pulumi.String("string"),
ShouldPersist: pulumi.Bool(false),
TimeoutInMinutes: pulumi.Int(0),
},
},
SecretsProfile: &hdinsight.SecretsProfileArgs{
KeyVaultResourceId: pulumi.String("string"),
Secrets: hdinsight.SecretReferenceArray{
&hdinsight.SecretReferenceArgs{
KeyVaultObjectName: pulumi.String("string"),
ReferenceName: pulumi.String("string"),
Type: pulumi.String("string"),
Version: pulumi.String("string"),
},
},
},
ServiceConfigsProfiles: hdinsight.ClusterServiceConfigsProfileArray{
&hdinsight.ClusterServiceConfigsProfileArgs{
Configs: hdinsight.ClusterServiceConfigArray{
&hdinsight.ClusterServiceConfigArgs{
Component: pulumi.String("string"),
Files: hdinsight.ClusterConfigFileArray{
&hdinsight.ClusterConfigFileArgs{
FileName: pulumi.String("string"),
Content: pulumi.String("string"),
Encoding: pulumi.String("string"),
Path: pulumi.String("string"),
Values: pulumi.StringMap{
"string": pulumi.String("string"),
},
},
},
},
},
ServiceName: pulumi.String("string"),
},
},
SparkProfile: &hdinsight.SparkProfileArgs{
DefaultStorageUrl: pulumi.String("string"),
MetastoreSpec: &hdinsight.SparkMetastoreSpecArgs{
DbName: pulumi.String("string"),
DbServerHost: pulumi.String("string"),
DbConnectionAuthenticationMode: pulumi.String("string"),
DbPasswordSecretName: pulumi.String("string"),
DbUserName: pulumi.String("string"),
KeyVaultId: pulumi.String("string"),
ThriftUrl: pulumi.String("string"),
},
UserPluginsSpec: &hdinsight.SparkUserPluginsArgs{
Plugins: hdinsight.SparkUserPluginArray{
&hdinsight.SparkUserPluginArgs{
Path: pulumi.String("string"),
},
},
},
},
SshProfile: &hdinsight.ClusterPoolSshProfileArgs{
Count: pulumi.Int(0),
VmSize: pulumi.String("string"),
},
StubProfile: pulumi.Any("any"),
TrinoProfile: &hdinsight.TrinoProfileArgs{
CatalogOptions: &hdinsight.CatalogOptionsArgs{
Hive: hdinsight.HiveCatalogOptionArray{
&hdinsight.HiveCatalogOptionArgs{
CatalogName: pulumi.String("string"),
MetastoreDbConnectionURL: pulumi.String("string"),
MetastoreWarehouseDir: pulumi.String("string"),
MetastoreDbConnectionAuthenticationMode: pulumi.String("string"),
MetastoreDbConnectionPasswordSecret: pulumi.String("string"),
MetastoreDbConnectionUserName: pulumi.String("string"),
},
},
},
Coordinator: &hdinsight.TrinoCoordinatorArgs{
Enable: pulumi.Bool(false),
HighAvailabilityEnabled: pulumi.Bool(false),
Port: pulumi.Int(0),
Suspend: pulumi.Bool(false),
},
UserPluginsSpec: &hdinsight.TrinoUserPluginsArgs{
Plugins: hdinsight.TrinoUserPluginArray{
&hdinsight.TrinoUserPluginArgs{
Enabled: pulumi.Bool(false),
Name: pulumi.String("string"),
Path: pulumi.String("string"),
},
},
},
UserTelemetrySpec: &hdinsight.TrinoUserTelemetryArgs{
Storage: &hdinsight.TrinoTelemetryConfigArgs{
HivecatalogName: pulumi.String("string"),
HivecatalogSchema: pulumi.String("string"),
PartitionRetentionInDays: pulumi.Int(0),
Path: pulumi.String("string"),
},
},
Worker: &hdinsight.TrinoWorkerArgs{
Enable: pulumi.Bool(false),
Port: pulumi.Int(0),
Suspend: pulumi.Bool(false),
},
},
},
ClusterType: pulumi.String("string"),
ComputeProfile: &hdinsight.ClusterPoolComputeProfileArgs{
Nodes: hdinsight.NodeProfileArray{
&hdinsight.NodeProfileArgs{
Count: pulumi.Int(0),
Type: pulumi.String("string"),
VmSize: pulumi.String("string"),
},
},
AvailabilityZones: pulumi.StringArray{
pulumi.String("string"),
},
},
ResourceGroupName: pulumi.String("string"),
ClusterName: pulumi.String("string"),
Location: pulumi.String("string"),
Tags: pulumi.StringMap{
"string": pulumi.String("string"),
},
})
var clusterPoolClusterResource = new ClusterPoolCluster("clusterPoolClusterResource", ClusterPoolClusterArgs.builder()
.clusterPoolName("string")
.clusterProfile(ClusterProfileArgs.builder()
.clusterVersion("string")
.ossVersion("string")
.authorizationProfile(AuthorizationProfileArgs.builder()
.groupIds("string")
.userIds("string")
.build())
.identityProfile(IdentityProfileArgs.builder()
.msiClientId("string")
.msiObjectId("string")
.msiResourceId("string")
.build())
.rangerPluginProfile(ClusterRangerPluginProfileArgs.builder()
.enabled(false)
.build())
.clusterAccessProfile(ClusterAccessProfileArgs.builder()
.enableInternalIngress(false)
.build())
.kafkaProfile(KafkaProfileArgs.builder()
.diskStorage(DiskStorageProfileArgs.builder()
.dataDiskSize(0)
.dataDiskType("string")
.build())
.enableKRaft(false)
.enablePublicEndpoints(false)
.remoteStorageUri("string")
.build())
.llapProfile("any")
.logAnalyticsProfile(ClusterLogAnalyticsProfileArgs.builder()
.enabled(false)
.applicationLogs(ClusterLogAnalyticsApplicationLogsArgs.builder()
.stdErrorEnabled(false)
.stdOutEnabled(false)
.build())
.metricsEnabled(false)
.build())
.managedIdentityProfile(ManagedIdentityProfileArgs.builder()
.identityList(ManagedIdentitySpecArgs.builder()
.clientId("string")
.objectId("string")
.resourceId("string")
.type("string")
.build())
.build())
.autoscaleProfile(AutoscaleProfileArgs.builder()
.enabled(false)
.autoscaleType("string")
.gracefulDecommissionTimeout(0)
.loadBasedConfig(LoadBasedConfigArgs.builder()
.maxNodes(0)
.minNodes(0)
.scalingRules(ScalingRuleArgs.builder()
.actionType("string")
.comparisonRule(ComparisonRuleArgs.builder()
.operator("string")
.threshold(0)
.build())
.evaluationCount(0)
.scalingMetric("string")
.build())
.cooldownPeriod(0)
.pollInterval(0)
.build())
.scheduleBasedConfig(ScheduleBasedConfigArgs.builder()
.defaultCount(0)
.schedules(ScheduleArgs.builder()
.count(0)
.days("string")
.endTime("string")
.startTime("string")
.build())
.timeZone("string")
.build())
.build())
.prometheusProfile(ClusterPrometheusProfileArgs.builder()
.enabled(false)
.build())
.flinkProfile(FlinkProfileArgs.builder()
.jobManager(ComputeResourceDefinitionArgs.builder()
.cpu(0)
.memory(0)
.build())
.storage(FlinkStorageProfileArgs.builder()
.storageUri("string")
.storagekey("string")
.build())
.taskManager(ComputeResourceDefinitionArgs.builder()
.cpu(0)
.memory(0)
.build())
.catalogOptions(FlinkCatalogOptionsArgs.builder()
.hive(FlinkHiveCatalogOptionArgs.builder()
.metastoreDbConnectionURL("string")
.metastoreDbConnectionAuthenticationMode("string")
.metastoreDbConnectionPasswordSecret("string")
.metastoreDbConnectionUserName("string")
.build())
.build())
.deploymentMode("string")
.historyServer(ComputeResourceDefinitionArgs.builder()
.cpu(0)
.memory(0)
.build())
.jobSpec(FlinkJobProfileArgs.builder()
.jarName("string")
.jobJarDirectory("string")
.upgradeMode("string")
.args("string")
.entryClass("string")
.savePointName("string")
.build())
.numReplicas(0)
.build())
.rangerProfile(RangerProfileArgs.builder()
.rangerAdmin(RangerAdminSpecArgs.builder()
.admins("string")
.database(RangerAdminSpecDatabaseArgs.builder()
.host("string")
.name("string")
.passwordSecretRef("string")
.username("string")
.build())
.build())
.rangerUsersync(RangerUsersyncSpecArgs.builder()
.enabled(false)
.groups("string")
.mode("string")
.userMappingLocation("string")
.users("string")
.build())
.rangerAudit(RangerAuditSpecArgs.builder()
.storageAccount("string")
.build())
.build())
.scriptActionProfiles(ScriptActionProfileArgs.builder()
.name("string")
.services("string")
.type("string")
.url("string")
.parameters("string")
.shouldPersist(false)
.timeoutInMinutes(0)
.build())
.secretsProfile(SecretsProfileArgs.builder()
.keyVaultResourceId("string")
.secrets(SecretReferenceArgs.builder()
.keyVaultObjectName("string")
.referenceName("string")
.type("string")
.version("string")
.build())
.build())
.serviceConfigsProfiles(ClusterServiceConfigsProfileArgs.builder()
.configs(ClusterServiceConfigArgs.builder()
.component("string")
.files(ClusterConfigFileArgs.builder()
.fileName("string")
.content("string")
.encoding("string")
.path("string")
.values(Map.of("string", "string"))
.build())
.build())
.serviceName("string")
.build())
.sparkProfile(SparkProfileArgs.builder()
.defaultStorageUrl("string")
.metastoreSpec(SparkMetastoreSpecArgs.builder()
.dbName("string")
.dbServerHost("string")
.dbConnectionAuthenticationMode("string")
.dbPasswordSecretName("string")
.dbUserName("string")
.keyVaultId("string")
.thriftUrl("string")
.build())
.userPluginsSpec(SparkUserPluginsArgs.builder()
.plugins(SparkUserPluginArgs.builder()
.path("string")
.build())
.build())
.build())
.sshProfile(ClusterPoolSshProfileArgs.builder()
.count(0)
.vmSize("string")
.build())
.stubProfile("any")
.trinoProfile(TrinoProfileArgs.builder()
.catalogOptions(CatalogOptionsArgs.builder()
.hive(HiveCatalogOptionArgs.builder()
.catalogName("string")
.metastoreDbConnectionURL("string")
.metastoreWarehouseDir("string")
.metastoreDbConnectionAuthenticationMode("string")
.metastoreDbConnectionPasswordSecret("string")
.metastoreDbConnectionUserName("string")
.build())
.build())
.coordinator(TrinoCoordinatorArgs.builder()
.enable(false)
.highAvailabilityEnabled(false)
.port(0)
.suspend(false)
.build())
.userPluginsSpec(TrinoUserPluginsArgs.builder()
.plugins(TrinoUserPluginArgs.builder()
.enabled(false)
.name("string")
.path("string")
.build())
.build())
.userTelemetrySpec(TrinoUserTelemetryArgs.builder()
.storage(TrinoTelemetryConfigArgs.builder()
.hivecatalogName("string")
.hivecatalogSchema("string")
.partitionRetentionInDays(0)
.path("string")
.build())
.build())
.worker(TrinoWorkerArgs.builder()
.enable(false)
.port(0)
.suspend(false)
.build())
.build())
.build())
.clusterType("string")
.computeProfile(ClusterPoolComputeProfileArgs.builder()
.nodes(NodeProfileArgs.builder()
.count(0)
.type("string")
.vmSize("string")
.build())
.availabilityZones("string")
.build())
.resourceGroupName("string")
.clusterName("string")
.location("string")
.tags(Map.of("string", "string"))
.build());
cluster_pool_cluster_resource = azure_native.hdinsight.ClusterPoolCluster("clusterPoolClusterResource",
cluster_pool_name="string",
cluster_profile={
"cluster_version": "string",
"oss_version": "string",
"authorization_profile": {
"group_ids": ["string"],
"user_ids": ["string"],
},
"identity_profile": {
"msi_client_id": "string",
"msi_object_id": "string",
"msi_resource_id": "string",
},
"ranger_plugin_profile": {
"enabled": False,
},
"cluster_access_profile": {
"enable_internal_ingress": False,
},
"kafka_profile": {
"disk_storage": {
"data_disk_size": 0,
"data_disk_type": "string",
},
"enable_k_raft": False,
"enable_public_endpoints": False,
"remote_storage_uri": "string",
},
"llap_profile": "any",
"log_analytics_profile": {
"enabled": False,
"application_logs": {
"std_error_enabled": False,
"std_out_enabled": False,
},
"metrics_enabled": False,
},
"managed_identity_profile": {
"identity_list": [{
"client_id": "string",
"object_id": "string",
"resource_id": "string",
"type": "string",
}],
},
"autoscale_profile": {
"enabled": False,
"autoscale_type": "string",
"graceful_decommission_timeout": 0,
"load_based_config": {
"max_nodes": 0,
"min_nodes": 0,
"scaling_rules": [{
"action_type": "string",
"comparison_rule": {
"operator": "string",
"threshold": 0,
},
"evaluation_count": 0,
"scaling_metric": "string",
}],
"cooldown_period": 0,
"poll_interval": 0,
},
"schedule_based_config": {
"default_count": 0,
"schedules": [{
"count": 0,
"days": ["string"],
"end_time": "string",
"start_time": "string",
}],
"time_zone": "string",
},
},
"prometheus_profile": {
"enabled": False,
},
"flink_profile": {
"job_manager": {
"cpu": 0,
"memory": 0,
},
"storage": {
"storage_uri": "string",
"storagekey": "string",
},
"task_manager": {
"cpu": 0,
"memory": 0,
},
"catalog_options": {
"hive": {
"metastore_db_connection_url": "string",
"metastore_db_connection_authentication_mode": "string",
"metastore_db_connection_password_secret": "string",
"metastore_db_connection_user_name": "string",
},
},
"deployment_mode": "string",
"history_server": {
"cpu": 0,
"memory": 0,
},
"job_spec": {
"jar_name": "string",
"job_jar_directory": "string",
"upgrade_mode": "string",
"args": "string",
"entry_class": "string",
"save_point_name": "string",
},
"num_replicas": 0,
},
"ranger_profile": {
"ranger_admin": {
"admins": ["string"],
"database": {
"host": "string",
"name": "string",
"password_secret_ref": "string",
"username": "string",
},
},
"ranger_usersync": {
"enabled": False,
"groups": ["string"],
"mode": "string",
"user_mapping_location": "string",
"users": ["string"],
},
"ranger_audit": {
"storage_account": "string",
},
},
"script_action_profiles": [{
"name": "string",
"services": ["string"],
"type": "string",
"url": "string",
"parameters": "string",
"should_persist": False,
"timeout_in_minutes": 0,
}],
"secrets_profile": {
"key_vault_resource_id": "string",
"secrets": [{
"key_vault_object_name": "string",
"reference_name": "string",
"type": "string",
"version": "string",
}],
},
"service_configs_profiles": [{
"configs": [{
"component": "string",
"files": [{
"file_name": "string",
"content": "string",
"encoding": "string",
"path": "string",
"values": {
"string": "string",
},
}],
}],
"service_name": "string",
}],
"spark_profile": {
"default_storage_url": "string",
"metastore_spec": {
"db_name": "string",
"db_server_host": "string",
"db_connection_authentication_mode": "string",
"db_password_secret_name": "string",
"db_user_name": "string",
"key_vault_id": "string",
"thrift_url": "string",
},
"user_plugins_spec": {
"plugins": [{
"path": "string",
}],
},
},
"ssh_profile": {
"count": 0,
"vm_size": "string",
},
"stub_profile": "any",
"trino_profile": {
"catalog_options": {
"hive": [{
"catalog_name": "string",
"metastore_db_connection_url": "string",
"metastore_warehouse_dir": "string",
"metastore_db_connection_authentication_mode": "string",
"metastore_db_connection_password_secret": "string",
"metastore_db_connection_user_name": "string",
}],
},
"coordinator": {
"enable": False,
"high_availability_enabled": False,
"port": 0,
"suspend": False,
},
"user_plugins_spec": {
"plugins": [{
"enabled": False,
"name": "string",
"path": "string",
}],
},
"user_telemetry_spec": {
"storage": {
"hivecatalog_name": "string",
"hivecatalog_schema": "string",
"partition_retention_in_days": 0,
"path": "string",
},
},
"worker": {
"enable": False,
"port": 0,
"suspend": False,
},
},
},
cluster_type="string",
compute_profile={
"nodes": [{
"count": 0,
"type": "string",
"vm_size": "string",
}],
"availability_zones": ["string"],
},
resource_group_name="string",
cluster_name="string",
location="string",
tags={
"string": "string",
})
const clusterPoolClusterResource = new azure_native.hdinsight.ClusterPoolCluster("clusterPoolClusterResource", {
clusterPoolName: "string",
clusterProfile: {
clusterVersion: "string",
ossVersion: "string",
authorizationProfile: {
groupIds: ["string"],
userIds: ["string"],
},
identityProfile: {
msiClientId: "string",
msiObjectId: "string",
msiResourceId: "string",
},
rangerPluginProfile: {
enabled: false,
},
clusterAccessProfile: {
enableInternalIngress: false,
},
kafkaProfile: {
diskStorage: {
dataDiskSize: 0,
dataDiskType: "string",
},
enableKRaft: false,
enablePublicEndpoints: false,
remoteStorageUri: "string",
},
llapProfile: "any",
logAnalyticsProfile: {
enabled: false,
applicationLogs: {
stdErrorEnabled: false,
stdOutEnabled: false,
},
metricsEnabled: false,
},
managedIdentityProfile: {
identityList: [{
clientId: "string",
objectId: "string",
resourceId: "string",
type: "string",
}],
},
autoscaleProfile: {
enabled: false,
autoscaleType: "string",
gracefulDecommissionTimeout: 0,
loadBasedConfig: {
maxNodes: 0,
minNodes: 0,
scalingRules: [{
actionType: "string",
comparisonRule: {
operator: "string",
threshold: 0,
},
evaluationCount: 0,
scalingMetric: "string",
}],
cooldownPeriod: 0,
pollInterval: 0,
},
scheduleBasedConfig: {
defaultCount: 0,
schedules: [{
count: 0,
days: ["string"],
endTime: "string",
startTime: "string",
}],
timeZone: "string",
},
},
prometheusProfile: {
enabled: false,
},
flinkProfile: {
jobManager: {
cpu: 0,
memory: 0,
},
storage: {
storageUri: "string",
storagekey: "string",
},
taskManager: {
cpu: 0,
memory: 0,
},
catalogOptions: {
hive: {
metastoreDbConnectionURL: "string",
metastoreDbConnectionAuthenticationMode: "string",
metastoreDbConnectionPasswordSecret: "string",
metastoreDbConnectionUserName: "string",
},
},
deploymentMode: "string",
historyServer: {
cpu: 0,
memory: 0,
},
jobSpec: {
jarName: "string",
jobJarDirectory: "string",
upgradeMode: "string",
args: "string",
entryClass: "string",
savePointName: "string",
},
numReplicas: 0,
},
rangerProfile: {
rangerAdmin: {
admins: ["string"],
database: {
host: "string",
name: "string",
passwordSecretRef: "string",
username: "string",
},
},
rangerUsersync: {
enabled: false,
groups: ["string"],
mode: "string",
userMappingLocation: "string",
users: ["string"],
},
rangerAudit: {
storageAccount: "string",
},
},
scriptActionProfiles: [{
name: "string",
services: ["string"],
type: "string",
url: "string",
parameters: "string",
shouldPersist: false,
timeoutInMinutes: 0,
}],
secretsProfile: {
keyVaultResourceId: "string",
secrets: [{
keyVaultObjectName: "string",
referenceName: "string",
type: "string",
version: "string",
}],
},
serviceConfigsProfiles: [{
configs: [{
component: "string",
files: [{
fileName: "string",
content: "string",
encoding: "string",
path: "string",
values: {
string: "string",
},
}],
}],
serviceName: "string",
}],
sparkProfile: {
defaultStorageUrl: "string",
metastoreSpec: {
dbName: "string",
dbServerHost: "string",
dbConnectionAuthenticationMode: "string",
dbPasswordSecretName: "string",
dbUserName: "string",
keyVaultId: "string",
thriftUrl: "string",
},
userPluginsSpec: {
plugins: [{
path: "string",
}],
},
},
sshProfile: {
count: 0,
vmSize: "string",
},
stubProfile: "any",
trinoProfile: {
catalogOptions: {
hive: [{
catalogName: "string",
metastoreDbConnectionURL: "string",
metastoreWarehouseDir: "string",
metastoreDbConnectionAuthenticationMode: "string",
metastoreDbConnectionPasswordSecret: "string",
metastoreDbConnectionUserName: "string",
}],
},
coordinator: {
enable: false,
highAvailabilityEnabled: false,
port: 0,
suspend: false,
},
userPluginsSpec: {
plugins: [{
enabled: false,
name: "string",
path: "string",
}],
},
userTelemetrySpec: {
storage: {
hivecatalogName: "string",
hivecatalogSchema: "string",
partitionRetentionInDays: 0,
path: "string",
},
},
worker: {
enable: false,
port: 0,
suspend: false,
},
},
},
clusterType: "string",
computeProfile: {
nodes: [{
count: 0,
type: "string",
vmSize: "string",
}],
availabilityZones: ["string"],
},
resourceGroupName: "string",
clusterName: "string",
location: "string",
tags: {
string: "string",
},
});
type: azure-native:hdinsight:ClusterPoolCluster
properties:
clusterName: string
clusterPoolName: string
clusterProfile:
authorizationProfile:
groupIds:
- string
userIds:
- string
autoscaleProfile:
autoscaleType: string
enabled: false
gracefulDecommissionTimeout: 0
loadBasedConfig:
cooldownPeriod: 0
maxNodes: 0
minNodes: 0
pollInterval: 0
scalingRules:
- actionType: string
comparisonRule:
operator: string
threshold: 0
evaluationCount: 0
scalingMetric: string
scheduleBasedConfig:
defaultCount: 0
schedules:
- count: 0
days:
- string
endTime: string
startTime: string
timeZone: string
clusterAccessProfile:
enableInternalIngress: false
clusterVersion: string
flinkProfile:
catalogOptions:
hive:
metastoreDbConnectionAuthenticationMode: string
metastoreDbConnectionPasswordSecret: string
metastoreDbConnectionURL: string
metastoreDbConnectionUserName: string
deploymentMode: string
historyServer:
cpu: 0
memory: 0
jobManager:
cpu: 0
memory: 0
jobSpec:
args: string
entryClass: string
jarName: string
jobJarDirectory: string
savePointName: string
upgradeMode: string
numReplicas: 0
storage:
storageUri: string
storagekey: string
taskManager:
cpu: 0
memory: 0
identityProfile:
msiClientId: string
msiObjectId: string
msiResourceId: string
kafkaProfile:
diskStorage:
dataDiskSize: 0
dataDiskType: string
enableKRaft: false
enablePublicEndpoints: false
remoteStorageUri: string
llapProfile: any
logAnalyticsProfile:
applicationLogs:
stdErrorEnabled: false
stdOutEnabled: false
enabled: false
metricsEnabled: false
managedIdentityProfile:
identityList:
- clientId: string
objectId: string
resourceId: string
type: string
ossVersion: string
prometheusProfile:
enabled: false
rangerPluginProfile:
enabled: false
rangerProfile:
rangerAdmin:
admins:
- string
database:
host: string
name: string
passwordSecretRef: string
username: string
rangerAudit:
storageAccount: string
rangerUsersync:
enabled: false
groups:
- string
mode: string
userMappingLocation: string
users:
- string
scriptActionProfiles:
- name: string
parameters: string
services:
- string
shouldPersist: false
timeoutInMinutes: 0
type: string
url: string
secretsProfile:
keyVaultResourceId: string
secrets:
- keyVaultObjectName: string
referenceName: string
type: string
version: string
serviceConfigsProfiles:
- configs:
- component: string
files:
- content: string
encoding: string
fileName: string
path: string
values:
string: string
serviceName: string
sparkProfile:
defaultStorageUrl: string
metastoreSpec:
dbConnectionAuthenticationMode: string
dbName: string
dbPasswordSecretName: string
dbServerHost: string
dbUserName: string
keyVaultId: string
thriftUrl: string
userPluginsSpec:
plugins:
- path: string
sshProfile:
count: 0
vmSize: string
stubProfile: any
trinoProfile:
catalogOptions:
hive:
- catalogName: string
metastoreDbConnectionAuthenticationMode: string
metastoreDbConnectionPasswordSecret: string
metastoreDbConnectionURL: string
metastoreDbConnectionUserName: string
metastoreWarehouseDir: string
coordinator:
enable: false
highAvailabilityEnabled: false
port: 0
suspend: false
userPluginsSpec:
plugins:
- enabled: false
name: string
path: string
userTelemetrySpec:
storage:
hivecatalogName: string
hivecatalogSchema: string
partitionRetentionInDays: 0
path: string
worker:
enable: false
port: 0
suspend: false
clusterType: string
computeProfile:
availabilityZones:
- string
nodes:
- count: 0
type: string
vmSize: string
location: string
resourceGroupName: string
tags:
string: string
ClusterPoolCluster Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The ClusterPoolCluster resource accepts the following input properties:
- Cluster
Pool stringName - The name of the cluster pool.
- Cluster
Profile Pulumi.Azure Native. HDInsight. Inputs. Cluster Profile - Cluster profile.
- Cluster
Type string - The type of cluster.
- Compute
Profile Pulumi.Azure Native. HDInsight. Inputs. Cluster Pool Compute Profile - The compute profile.
- Resource
Group stringName - The name of the resource group. The name is case insensitive.
- Cluster
Name string - The name of the HDInsight cluster.
- Location string
- The geo-location where the resource lives
- Dictionary<string, string>
- Resource tags.
- Cluster
Pool stringName - The name of the cluster pool.
- Cluster
Profile ClusterProfile Args - Cluster profile.
- Cluster
Type string - The type of cluster.
- Compute
Profile ClusterPool Compute Profile Args - The compute profile.
- Resource
Group stringName - The name of the resource group. The name is case insensitive.
- Cluster
Name string - The name of the HDInsight cluster.
- Location string
- The geo-location where the resource lives
- map[string]string
- Resource tags.
- cluster
Pool StringName - The name of the cluster pool.
- cluster
Profile ClusterProfile - Cluster profile.
- cluster
Type String - The type of cluster.
- compute
Profile ClusterPool Compute Profile - The compute profile.
- resource
Group StringName - The name of the resource group. The name is case insensitive.
- cluster
Name String - The name of the HDInsight cluster.
- location String
- The geo-location where the resource lives
- Map<String,String>
- Resource tags.
- cluster
Pool stringName - The name of the cluster pool.
- cluster
Profile ClusterProfile - Cluster profile.
- cluster
Type string - The type of cluster.
- compute
Profile ClusterPool Compute Profile - The compute profile.
- resource
Group stringName - The name of the resource group. The name is case insensitive.
- cluster
Name string - The name of the HDInsight cluster.
- location string
- The geo-location where the resource lives
- {[key: string]: string}
- Resource tags.
- cluster_
pool_ strname - The name of the cluster pool.
- cluster_
profile ClusterProfile Args - Cluster profile.
- cluster_
type str - The type of cluster.
- compute_
profile ClusterPool Compute Profile Args - The compute profile.
- resource_
group_ strname - The name of the resource group. The name is case insensitive.
- cluster_
name str - The name of the HDInsight cluster.
- location str
- The geo-location where the resource lives
- Mapping[str, str]
- Resource tags.
- cluster
Pool StringName - The name of the cluster pool.
- cluster
Profile Property Map - Cluster profile.
- cluster
Type String - The type of cluster.
- compute
Profile Property Map - The compute profile.
- resource
Group StringName - The name of the resource group. The name is case insensitive.
- cluster
Name String - The name of the HDInsight cluster.
- location String
- The geo-location where the resource lives
- Map<String>
- Resource tags.
Outputs
All input properties are implicitly available as output properties. Additionally, the ClusterPoolCluster resource produces the following output properties:
- Azure
Api stringVersion - The Azure API version of the resource.
- Deployment
Id string - A unique id generated by the RP to identify the resource.
- Id string
- The provider-assigned unique ID for this managed resource.
- Name string
- The name of the resource
- Provisioning
State string - Provisioning state of the resource.
- Status string
- Business status of the resource.
- System
Data Pulumi.Azure Native. HDInsight. Outputs. System Data Response - Azure Resource Manager metadata containing createdBy and modifiedBy information.
- Type string
- The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
- Azure
Api stringVersion - The Azure API version of the resource.
- Deployment
Id string - A unique id generated by the RP to identify the resource.
- Id string
- The provider-assigned unique ID for this managed resource.
- Name string
- The name of the resource
- Provisioning
State string - Provisioning state of the resource.
- Status string
- Business status of the resource.
- System
Data SystemData Response - Azure Resource Manager metadata containing createdBy and modifiedBy information.
- Type string
- The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
- azure
Api StringVersion - The Azure API version of the resource.
- deployment
Id String - A unique id generated by the RP to identify the resource.
- id String
- The provider-assigned unique ID for this managed resource.
- name String
- The name of the resource
- provisioning
State String - Provisioning state of the resource.
- status String
- Business status of the resource.
- system
Data SystemData Response - Azure Resource Manager metadata containing createdBy and modifiedBy information.
- type String
- The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
- azure
Api stringVersion - The Azure API version of the resource.
- deployment
Id string - A unique id generated by the RP to identify the resource.
- id string
- The provider-assigned unique ID for this managed resource.
- name string
- The name of the resource
- provisioning
State string - Provisioning state of the resource.
- status string
- Business status of the resource.
- system
Data SystemData Response - Azure Resource Manager metadata containing createdBy and modifiedBy information.
- type string
- The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
- azure_
api_ strversion - The Azure API version of the resource.
- deployment_
id str - A unique id generated by the RP to identify the resource.
- id str
- The provider-assigned unique ID for this managed resource.
- name str
- The name of the resource
- provisioning_
state str - Provisioning state of the resource.
- status str
- Business status of the resource.
- system_
data SystemData Response - Azure Resource Manager metadata containing createdBy and modifiedBy information.
- type str
- The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
- azure
Api StringVersion - The Azure API version of the resource.
- deployment
Id String - A unique id generated by the RP to identify the resource.
- id String
- The provider-assigned unique ID for this managed resource.
- name String
- The name of the resource
- provisioning
State String - Provisioning state of the resource.
- status String
- Business status of the resource.
- system
Data Property Map - Azure Resource Manager metadata containing createdBy and modifiedBy information.
- type String
- The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
Supporting Types
AuthorizationProfile, AuthorizationProfileArgs
AuthorizationProfileResponse, AuthorizationProfileResponseArgs
AutoscaleProfile, AutoscaleProfileArgs
- Enabled bool
- This indicates whether auto scale is enabled on HDInsight on AKS cluster.
- Autoscale
Type string | Pulumi.Azure Native. HDInsight. Autoscale Type - User to specify which type of Autoscale to be implemented - Scheduled Based or Load Based.
- Graceful
Decommission intTimeout - This property is for graceful decommission timeout; It has a default setting of 3600 seconds before forced shutdown takes place. This is the maximal time to wait for running containers and applications to complete before transition a DECOMMISSIONING node into DECOMMISSIONED. The default value is 3600 seconds. Negative value (like -1) is handled as infinite timeout.
- Load
Based Pulumi.Config Azure Native. HDInsight. Inputs. Load Based Config - Profiles of load based Autoscale.
- Schedule
Based Pulumi.Config Azure Native. HDInsight. Inputs. Schedule Based Config - Profiles of schedule based Autoscale.
- Enabled bool
- This indicates whether auto scale is enabled on HDInsight on AKS cluster.
- Autoscale
Type string | AutoscaleType - User to specify which type of Autoscale to be implemented - Scheduled Based or Load Based.
- Graceful
Decommission intTimeout - This property is for graceful decommission timeout; It has a default setting of 3600 seconds before forced shutdown takes place. This is the maximal time to wait for running containers and applications to complete before transition a DECOMMISSIONING node into DECOMMISSIONED. The default value is 3600 seconds. Negative value (like -1) is handled as infinite timeout.
- Load
Based LoadConfig Based Config - Profiles of load based Autoscale.
- Schedule
Based ScheduleConfig Based Config - Profiles of schedule based Autoscale.
- enabled Boolean
- This indicates whether auto scale is enabled on HDInsight on AKS cluster.
- autoscale
Type String | AutoscaleType - User to specify which type of Autoscale to be implemented - Scheduled Based or Load Based.
- graceful
Decommission IntegerTimeout - This property is for graceful decommission timeout; It has a default setting of 3600 seconds before forced shutdown takes place. This is the maximal time to wait for running containers and applications to complete before transition a DECOMMISSIONING node into DECOMMISSIONED. The default value is 3600 seconds. Negative value (like -1) is handled as infinite timeout.
- load
Based LoadConfig Based Config - Profiles of load based Autoscale.
- schedule
Based ScheduleConfig Based Config - Profiles of schedule based Autoscale.
- enabled boolean
- This indicates whether auto scale is enabled on HDInsight on AKS cluster.
- autoscale
Type string | AutoscaleType - User to specify which type of Autoscale to be implemented - Scheduled Based or Load Based.
- graceful
Decommission numberTimeout - This property is for graceful decommission timeout; It has a default setting of 3600 seconds before forced shutdown takes place. This is the maximal time to wait for running containers and applications to complete before transition a DECOMMISSIONING node into DECOMMISSIONED. The default value is 3600 seconds. Negative value (like -1) is handled as infinite timeout.
- load
Based LoadConfig Based Config - Profiles of load based Autoscale.
- schedule
Based ScheduleConfig Based Config - Profiles of schedule based Autoscale.
- enabled bool
- This indicates whether auto scale is enabled on HDInsight on AKS cluster.
- autoscale_
type str | AutoscaleType - User to specify which type of Autoscale to be implemented - Scheduled Based or Load Based.
- graceful_
decommission_ inttimeout - This property is for graceful decommission timeout; It has a default setting of 3600 seconds before forced shutdown takes place. This is the maximal time to wait for running containers and applications to complete before transition a DECOMMISSIONING node into DECOMMISSIONED. The default value is 3600 seconds. Negative value (like -1) is handled as infinite timeout.
- load_
based_ Loadconfig Based Config - Profiles of load based Autoscale.
- schedule_
based_ Scheduleconfig Based Config - Profiles of schedule based Autoscale.
- enabled Boolean
- This indicates whether auto scale is enabled on HDInsight on AKS cluster.
- autoscale
Type String | "ScheduleBased" | "Load Based" - User to specify which type of Autoscale to be implemented - Scheduled Based or Load Based.
- graceful
Decommission NumberTimeout - This property is for graceful decommission timeout; It has a default setting of 3600 seconds before forced shutdown takes place. This is the maximal time to wait for running containers and applications to complete before transition a DECOMMISSIONING node into DECOMMISSIONED. The default value is 3600 seconds. Negative value (like -1) is handled as infinite timeout.
- load
Based Property MapConfig - Profiles of load based Autoscale.
- schedule
Based Property MapConfig - Profiles of schedule based Autoscale.
AutoscaleProfileResponse, AutoscaleProfileResponseArgs
- Enabled bool
- This indicates whether auto scale is enabled on HDInsight on AKS cluster.
- Autoscale
Type string - User to specify which type of Autoscale to be implemented - Scheduled Based or Load Based.
- Graceful
Decommission intTimeout - This property is for graceful decommission timeout; It has a default setting of 3600 seconds before forced shutdown takes place. This is the maximal time to wait for running containers and applications to complete before transition a DECOMMISSIONING node into DECOMMISSIONED. The default value is 3600 seconds. Negative value (like -1) is handled as infinite timeout.
- Load
Based Pulumi.Config Azure Native. HDInsight. Inputs. Load Based Config Response - Profiles of load based Autoscale.
- Schedule
Based Pulumi.Config Azure Native. HDInsight. Inputs. Schedule Based Config Response - Profiles of schedule based Autoscale.
- Enabled bool
- This indicates whether auto scale is enabled on HDInsight on AKS cluster.
- Autoscale
Type string - User to specify which type of Autoscale to be implemented - Scheduled Based or Load Based.
- Graceful
Decommission intTimeout - This property is for graceful decommission timeout; It has a default setting of 3600 seconds before forced shutdown takes place. This is the maximal time to wait for running containers and applications to complete before transition a DECOMMISSIONING node into DECOMMISSIONED. The default value is 3600 seconds. Negative value (like -1) is handled as infinite timeout.
- Load
Based LoadConfig Based Config Response - Profiles of load based Autoscale.
- Schedule
Based ScheduleConfig Based Config Response - Profiles of schedule based Autoscale.
- enabled Boolean
- This indicates whether auto scale is enabled on HDInsight on AKS cluster.
- autoscale
Type String - User to specify which type of Autoscale to be implemented - Scheduled Based or Load Based.
- graceful
Decommission IntegerTimeout - This property is for graceful decommission timeout; It has a default setting of 3600 seconds before forced shutdown takes place. This is the maximal time to wait for running containers and applications to complete before transition a DECOMMISSIONING node into DECOMMISSIONED. The default value is 3600 seconds. Negative value (like -1) is handled as infinite timeout.
- load
Based LoadConfig Based Config Response - Profiles of load based Autoscale.
- schedule
Based ScheduleConfig Based Config Response - Profiles of schedule based Autoscale.
- enabled boolean
- This indicates whether auto scale is enabled on HDInsight on AKS cluster.
- autoscale
Type string - User to specify which type of Autoscale to be implemented - Scheduled Based or Load Based.
- graceful
Decommission numberTimeout - This property is for graceful decommission timeout; It has a default setting of 3600 seconds before forced shutdown takes place. This is the maximal time to wait for running containers and applications to complete before transition a DECOMMISSIONING node into DECOMMISSIONED. The default value is 3600 seconds. Negative value (like -1) is handled as infinite timeout.
- load
Based LoadConfig Based Config Response - Profiles of load based Autoscale.
- schedule
Based ScheduleConfig Based Config Response - Profiles of schedule based Autoscale.
- enabled bool
- This indicates whether auto scale is enabled on HDInsight on AKS cluster.
- autoscale_
type str - User to specify which type of Autoscale to be implemented - Scheduled Based or Load Based.
- graceful_
decommission_ inttimeout - This property is for graceful decommission timeout; It has a default setting of 3600 seconds before forced shutdown takes place. This is the maximal time to wait for running containers and applications to complete before transition a DECOMMISSIONING node into DECOMMISSIONED. The default value is 3600 seconds. Negative value (like -1) is handled as infinite timeout.
- load_
based_ Loadconfig Based Config Response - Profiles of load based Autoscale.
- schedule_
based_ Scheduleconfig Based Config Response - Profiles of schedule based Autoscale.
- enabled Boolean
- This indicates whether auto scale is enabled on HDInsight on AKS cluster.
- autoscale
Type String - User to specify which type of Autoscale to be implemented - Scheduled Based or Load Based.
- graceful
Decommission NumberTimeout - This property is for graceful decommission timeout; It has a default setting of 3600 seconds before forced shutdown takes place. This is the maximal time to wait for running containers and applications to complete before transition a DECOMMISSIONING node into DECOMMISSIONED. The default value is 3600 seconds. Negative value (like -1) is handled as infinite timeout.
- load
Based Property MapConfig - Profiles of load based Autoscale.
- schedule
Based Property MapConfig - Profiles of schedule based Autoscale.
AutoscaleType, AutoscaleTypeArgs
- Schedule
Based - ScheduleBased
- Load
Based - LoadBased
- Autoscale
Type Schedule Based - ScheduleBased
- Autoscale
Type Load Based - LoadBased
- Schedule
Based - ScheduleBased
- Load
Based - LoadBased
- Schedule
Based - ScheduleBased
- Load
Based - LoadBased
- SCHEDULE_BASED
- ScheduleBased
- LOAD_BASED
- LoadBased
- "Schedule
Based" - ScheduleBased
- "Load
Based" - LoadBased
CatalogOptions, CatalogOptionsArgs
- Hive
List<Pulumi.
Azure Native. HDInsight. Inputs. Hive Catalog Option> - hive catalog options.
- Hive
[]Hive
Catalog Option - hive catalog options.
- hive
List<Hive
Catalog Option> - hive catalog options.
- hive
Hive
Catalog Option[] - hive catalog options.
- hive
Sequence[Hive
Catalog Option] - hive catalog options.
- hive List<Property Map>
- hive catalog options.
CatalogOptionsResponse, CatalogOptionsResponseArgs
- Hive
List<Pulumi.
Azure Native. HDInsight. Inputs. Hive Catalog Option Response> - hive catalog options.
- Hive
[]Hive
Catalog Option Response - hive catalog options.
- hive
List<Hive
Catalog Option Response> - hive catalog options.
- hive
Hive
Catalog Option Response[] - hive catalog options.
- hive
Sequence[Hive
Catalog Option Response] - hive catalog options.
- hive List<Property Map>
- hive catalog options.
ClusterAccessProfile, ClusterAccessProfileArgs
- Enable
Internal boolIngress - Whether to create cluster using private IP instead of public IP. This property must be set at create time.
- Enable
Internal boolIngress - Whether to create cluster using private IP instead of public IP. This property must be set at create time.
- enable
Internal BooleanIngress - Whether to create cluster using private IP instead of public IP. This property must be set at create time.
- enable
Internal booleanIngress - Whether to create cluster using private IP instead of public IP. This property must be set at create time.
- enable_
internal_ boolingress - Whether to create cluster using private IP instead of public IP. This property must be set at create time.
- enable
Internal BooleanIngress - Whether to create cluster using private IP instead of public IP. This property must be set at create time.
ClusterAccessProfileResponse, ClusterAccessProfileResponseArgs
- Enable
Internal boolIngress - Whether to create cluster using private IP instead of public IP. This property must be set at create time.
- Private
Link stringService Id - Private link service resource ID. Only when enableInternalIngress is true, this property will be returned.
- Enable
Internal boolIngress - Whether to create cluster using private IP instead of public IP. This property must be set at create time.
- Private
Link stringService Id - Private link service resource ID. Only when enableInternalIngress is true, this property will be returned.
- enable
Internal BooleanIngress - Whether to create cluster using private IP instead of public IP. This property must be set at create time.
- private
Link StringService Id - Private link service resource ID. Only when enableInternalIngress is true, this property will be returned.
- enable
Internal booleanIngress - Whether to create cluster using private IP instead of public IP. This property must be set at create time.
- private
Link stringService Id - Private link service resource ID. Only when enableInternalIngress is true, this property will be returned.
- enable_
internal_ boolingress - Whether to create cluster using private IP instead of public IP. This property must be set at create time.
- private_
link_ strservice_ id - Private link service resource ID. Only when enableInternalIngress is true, this property will be returned.
- enable
Internal BooleanIngress - Whether to create cluster using private IP instead of public IP. This property must be set at create time.
- private
Link StringService Id - Private link service resource ID. Only when enableInternalIngress is true, this property will be returned.
ClusterConfigFile, ClusterConfigFileArgs
- File
Name string - Configuration file name.
- Content string
- Free form content of the entire configuration file.
- Encoding
string | Pulumi.
Azure Native. HDInsight. Content Encoding - This property indicates if the content is encoded and is case-insensitive. Please set the value to base64 if the content is base64 encoded. Set it to none or skip it if the content is plain text.
- Path string
- Path of the config file if content is specified.
- Values Dictionary<string, string>
- List of key value pairs where key represents a valid service configuration name and value represents the value of the config.
- File
Name string - Configuration file name.
- Content string
- Free form content of the entire configuration file.
- Encoding
string | Content
Encoding - This property indicates if the content is encoded and is case-insensitive. Please set the value to base64 if the content is base64 encoded. Set it to none or skip it if the content is plain text.
- Path string
- Path of the config file if content is specified.
- Values map[string]string
- List of key value pairs where key represents a valid service configuration name and value represents the value of the config.
- file
Name String - Configuration file name.
- content String
- Free form content of the entire configuration file.
- encoding
String | Content
Encoding - This property indicates if the content is encoded and is case-insensitive. Please set the value to base64 if the content is base64 encoded. Set it to none or skip it if the content is plain text.
- path String
- Path of the config file if content is specified.
- values Map<String,String>
- List of key value pairs where key represents a valid service configuration name and value represents the value of the config.
- file
Name string - Configuration file name.
- content string
- Free form content of the entire configuration file.
- encoding
string | Content
Encoding - This property indicates if the content is encoded and is case-insensitive. Please set the value to base64 if the content is base64 encoded. Set it to none or skip it if the content is plain text.
- path string
- Path of the config file if content is specified.
- values {[key: string]: string}
- List of key value pairs where key represents a valid service configuration name and value represents the value of the config.
- file_
name str - Configuration file name.
- content str
- Free form content of the entire configuration file.
- encoding
str | Content
Encoding - This property indicates if the content is encoded and is case-insensitive. Please set the value to base64 if the content is base64 encoded. Set it to none or skip it if the content is plain text.
- path str
- Path of the config file if content is specified.
- values Mapping[str, str]
- List of key value pairs where key represents a valid service configuration name and value represents the value of the config.
- file
Name String - Configuration file name.
- content String
- Free form content of the entire configuration file.
- encoding String | "Base64" | "None"
- This property indicates if the content is encoded and is case-insensitive. Please set the value to base64 if the content is base64 encoded. Set it to none or skip it if the content is plain text.
- path String
- Path of the config file if content is specified.
- values Map<String>
- List of key value pairs where key represents a valid service configuration name and value represents the value of the config.
ClusterConfigFileResponse, ClusterConfigFileResponseArgs
- File
Name string - Configuration file name.
- Content string
- Free form content of the entire configuration file.
- Encoding string
- This property indicates if the content is encoded and is case-insensitive. Please set the value to base64 if the content is base64 encoded. Set it to none or skip it if the content is plain text.
- Path string
- Path of the config file if content is specified.
- Values Dictionary<string, string>
- List of key value pairs where key represents a valid service configuration name and value represents the value of the config.
- File
Name string - Configuration file name.
- Content string
- Free form content of the entire configuration file.
- Encoding string
- This property indicates if the content is encoded and is case-insensitive. Please set the value to base64 if the content is base64 encoded. Set it to none or skip it if the content is plain text.
- Path string
- Path of the config file if content is specified.
- Values map[string]string
- List of key value pairs where key represents a valid service configuration name and value represents the value of the config.
- file
Name String - Configuration file name.
- content String
- Free form content of the entire configuration file.
- encoding String
- This property indicates if the content is encoded and is case-insensitive. Please set the value to base64 if the content is base64 encoded. Set it to none or skip it if the content is plain text.
- path String
- Path of the config file if content is specified.
- values Map<String,String>
- List of key value pairs where key represents a valid service configuration name and value represents the value of the config.
- file
Name string - Configuration file name.
- content string
- Free form content of the entire configuration file.
- encoding string
- This property indicates if the content is encoded and is case-insensitive. Please set the value to base64 if the content is base64 encoded. Set it to none or skip it if the content is plain text.
- path string
- Path of the config file if content is specified.
- values {[key: string]: string}
- List of key value pairs where key represents a valid service configuration name and value represents the value of the config.
- file_
name str - Configuration file name.
- content str
- Free form content of the entire configuration file.
- encoding str
- This property indicates if the content is encoded and is case-insensitive. Please set the value to base64 if the content is base64 encoded. Set it to none or skip it if the content is plain text.
- path str
- Path of the config file if content is specified.
- values Mapping[str, str]
- List of key value pairs where key represents a valid service configuration name and value represents the value of the config.
- file
Name String - Configuration file name.
- content String
- Free form content of the entire configuration file.
- encoding String
- This property indicates if the content is encoded and is case-insensitive. Please set the value to base64 if the content is base64 encoded. Set it to none or skip it if the content is plain text.
- path String
- Path of the config file if content is specified.
- values Map<String>
- List of key value pairs where key represents a valid service configuration name and value represents the value of the config.
ClusterLogAnalyticsApplicationLogs, ClusterLogAnalyticsApplicationLogsArgs
- Std
Error boolEnabled - True if stderror is enabled, otherwise false.
- Std
Out boolEnabled - True if stdout is enabled, otherwise false.
- Std
Error boolEnabled - True if stderror is enabled, otherwise false.
- Std
Out boolEnabled - True if stdout is enabled, otherwise false.
- std
Error BooleanEnabled - True if stderror is enabled, otherwise false.
- std
Out BooleanEnabled - True if stdout is enabled, otherwise false.
- std
Error booleanEnabled - True if stderror is enabled, otherwise false.
- std
Out booleanEnabled - True if stdout is enabled, otherwise false.
- std_
error_ boolenabled - True if stderror is enabled, otherwise false.
- std_
out_ boolenabled - True if stdout is enabled, otherwise false.
- std
Error BooleanEnabled - True if stderror is enabled, otherwise false.
- std
Out BooleanEnabled - True if stdout is enabled, otherwise false.
ClusterLogAnalyticsApplicationLogsResponse, ClusterLogAnalyticsApplicationLogsResponseArgs
- Std
Error boolEnabled - True if stderror is enabled, otherwise false.
- Std
Out boolEnabled - True if stdout is enabled, otherwise false.
- Std
Error boolEnabled - True if stderror is enabled, otherwise false.
- Std
Out boolEnabled - True if stdout is enabled, otherwise false.
- std
Error BooleanEnabled - True if stderror is enabled, otherwise false.
- std
Out BooleanEnabled - True if stdout is enabled, otherwise false.
- std
Error booleanEnabled - True if stderror is enabled, otherwise false.
- std
Out booleanEnabled - True if stdout is enabled, otherwise false.
- std_
error_ boolenabled - True if stderror is enabled, otherwise false.
- std_
out_ boolenabled - True if stdout is enabled, otherwise false.
- std
Error BooleanEnabled - True if stderror is enabled, otherwise false.
- std
Out BooleanEnabled - True if stdout is enabled, otherwise false.
ClusterLogAnalyticsProfile, ClusterLogAnalyticsProfileArgs
- Enabled bool
- True if log analytics is enabled for the cluster, otherwise false.
- Application
Logs Pulumi.Azure Native. HDInsight. Inputs. Cluster Log Analytics Application Logs - Collection of logs to be enabled or disabled for log analytics.
- Metrics
Enabled bool - True if metrics are enabled, otherwise false.
- Enabled bool
- True if log analytics is enabled for the cluster, otherwise false.
- Application
Logs ClusterLog Analytics Application Logs - Collection of logs to be enabled or disabled for log analytics.
- Metrics
Enabled bool - True if metrics are enabled, otherwise false.
- enabled Boolean
- True if log analytics is enabled for the cluster, otherwise false.
- application
Logs ClusterLog Analytics Application Logs - Collection of logs to be enabled or disabled for log analytics.
- metrics
Enabled Boolean - True if metrics are enabled, otherwise false.
- enabled boolean
- True if log analytics is enabled for the cluster, otherwise false.
- application
Logs ClusterLog Analytics Application Logs - Collection of logs to be enabled or disabled for log analytics.
- metrics
Enabled boolean - True if metrics are enabled, otherwise false.
- enabled bool
- True if log analytics is enabled for the cluster, otherwise false.
- application_
logs ClusterLog Analytics Application Logs - Collection of logs to be enabled or disabled for log analytics.
- metrics_
enabled bool - True if metrics are enabled, otherwise false.
- enabled Boolean
- True if log analytics is enabled for the cluster, otherwise false.
- application
Logs Property Map - Collection of logs to be enabled or disabled for log analytics.
- metrics
Enabled Boolean - True if metrics are enabled, otherwise false.
ClusterLogAnalyticsProfileResponse, ClusterLogAnalyticsProfileResponseArgs
- Enabled bool
- True if log analytics is enabled for the cluster, otherwise false.
- Application
Logs Pulumi.Azure Native. HDInsight. Inputs. Cluster Log Analytics Application Logs Response - Collection of logs to be enabled or disabled for log analytics.
- Metrics
Enabled bool - True if metrics are enabled, otherwise false.
- Enabled bool
- True if log analytics is enabled for the cluster, otherwise false.
- Application
Logs ClusterLog Analytics Application Logs Response - Collection of logs to be enabled or disabled for log analytics.
- Metrics
Enabled bool - True if metrics are enabled, otherwise false.
- enabled Boolean
- True if log analytics is enabled for the cluster, otherwise false.
- application
Logs ClusterLog Analytics Application Logs Response - Collection of logs to be enabled or disabled for log analytics.
- metrics
Enabled Boolean - True if metrics are enabled, otherwise false.
- enabled boolean
- True if log analytics is enabled for the cluster, otherwise false.
- application
Logs ClusterLog Analytics Application Logs Response - Collection of logs to be enabled or disabled for log analytics.
- metrics
Enabled boolean - True if metrics are enabled, otherwise false.
- enabled bool
- True if log analytics is enabled for the cluster, otherwise false.
- application_
logs ClusterLog Analytics Application Logs Response - Collection of logs to be enabled or disabled for log analytics.
- metrics_
enabled bool - True if metrics are enabled, otherwise false.
- enabled Boolean
- True if log analytics is enabled for the cluster, otherwise false.
- application
Logs Property Map - Collection of logs to be enabled or disabled for log analytics.
- metrics
Enabled Boolean - True if metrics are enabled, otherwise false.
ClusterPoolComputeProfile, ClusterPoolComputeProfileArgs
- Nodes
List<Pulumi.
Azure Native. HDInsight. Inputs. Node Profile> - The nodes definitions.
- Availability
Zones List<string> - The list of Availability zones to use for AKS VMSS nodes.
- Nodes
[]Node
Profile - The nodes definitions.
- Availability
Zones []string - The list of Availability zones to use for AKS VMSS nodes.
- nodes
List<Node
Profile> - The nodes definitions.
- availability
Zones List<String> - The list of Availability zones to use for AKS VMSS nodes.
- nodes
Node
Profile[] - The nodes definitions.
- availability
Zones string[] - The list of Availability zones to use for AKS VMSS nodes.
- nodes
Sequence[Node
Profile] - The nodes definitions.
- availability_
zones Sequence[str] - The list of Availability zones to use for AKS VMSS nodes.
- nodes List<Property Map>
- The nodes definitions.
- availability
Zones List<String> - The list of Availability zones to use for AKS VMSS nodes.
ClusterPoolComputeProfileResponse, ClusterPoolComputeProfileResponseArgs
- Nodes
List<Pulumi.
Azure Native. HDInsight. Inputs. Node Profile Response> - The nodes definitions.
- Availability
Zones List<string> - The list of Availability zones to use for AKS VMSS nodes.
- Nodes
[]Node
Profile Response - The nodes definitions.
- Availability
Zones []string - The list of Availability zones to use for AKS VMSS nodes.
- nodes
List<Node
Profile Response> - The nodes definitions.
- availability
Zones List<String> - The list of Availability zones to use for AKS VMSS nodes.
- nodes
Node
Profile Response[] - The nodes definitions.
- availability
Zones string[] - The list of Availability zones to use for AKS VMSS nodes.
- nodes
Sequence[Node
Profile Response] - The nodes definitions.
- availability_
zones Sequence[str] - The list of Availability zones to use for AKS VMSS nodes.
- nodes List<Property Map>
- The nodes definitions.
- availability
Zones List<String> - The list of Availability zones to use for AKS VMSS nodes.
ClusterPoolSshProfile, ClusterPoolSshProfileArgs
ClusterPoolSshProfileResponse, ClusterPoolSshProfileResponseArgs
- count int
- Number of ssh pods per cluster.
- pod_
prefix str - Prefix of the pod names. Pod number will be appended to the prefix. The ingress URLs for the pods will be available at //-
- vm_
size str - The virtual machine SKU.
ClusterProfile, ClusterProfileArgs
- Pulumi.
Azure Native. HDInsight. Inputs. Authorization Profile - Authorization profile with details of AAD user Ids and group Ids authorized for data plane access.
- Cluster
Version string - Version with 3/4 part.
- Oss
Version string - Version with three part.
- Autoscale
Profile Pulumi.Azure Native. HDInsight. Inputs. Autoscale Profile - This is the Autoscale profile for the cluster. This will allow customer to create cluster enabled with Autoscale.
- Cluster
Access Pulumi.Profile Azure Native. HDInsight. Inputs. Cluster Access Profile - Cluster access profile.
- Flink
Profile Pulumi.Azure Native. HDInsight. Inputs. Flink Profile - The Flink cluster profile.
- Identity
Profile Pulumi.Azure Native. HDInsight. Inputs. Identity Profile - This is deprecated. Please use managed identity profile instead.
- Kafka
Profile Pulumi.Azure Native. HDInsight. Inputs. Kafka Profile - The Kafka cluster profile.
- Llap
Profile object - LLAP cluster profile.
- Log
Analytics Pulumi.Profile Azure Native. HDInsight. Inputs. Cluster Log Analytics Profile - Cluster log analytics profile to enable or disable OMS agent for cluster.
- Managed
Identity Pulumi.Profile Azure Native. HDInsight. Inputs. Managed Identity Profile - This property is required by Trino, Spark and Flink cluster but is optional for Kafka cluster.
- Prometheus
Profile Pulumi.Azure Native. HDInsight. Inputs. Cluster Prometheus Profile - Cluster Prometheus profile.
- Ranger
Plugin Pulumi.Profile Azure Native. HDInsight. Inputs. Cluster Ranger Plugin Profile - Cluster Ranger plugin profile.
- Ranger
Profile Pulumi.Azure Native. HDInsight. Inputs. Ranger Profile - The ranger cluster profile.
- Script
Action List<Pulumi.Profiles Azure Native. HDInsight. Inputs. Script Action Profile> - The script action profile list.
- Secrets
Profile Pulumi.Azure Native. HDInsight. Inputs. Secrets Profile - The cluster secret profile.
- Service
Configs List<Pulumi.Profiles Azure Native. HDInsight. Inputs. Cluster Service Configs Profile> - The service configs profiles.
- Spark
Profile Pulumi.Azure Native. HDInsight. Inputs. Spark Profile - The spark cluster profile.
- Ssh
Profile Pulumi.Azure Native. HDInsight. Inputs. Cluster Pool Ssh Profile - Ssh profile for the cluster.
- Stub
Profile object - Stub cluster profile.
- Trino
Profile Pulumi.Azure Native. HDInsight. Inputs. Trino Profile - Trino Cluster profile.
- Authorization
Profile - Authorization profile with details of AAD user Ids and group Ids authorized for data plane access.
- Cluster
Version string - Version with 3/4 part.
- Oss
Version string - Version with three part.
- Autoscale
Profile AutoscaleProfile - This is the Autoscale profile for the cluster. This will allow customer to create cluster enabled with Autoscale.
- Cluster
Access ClusterProfile Access Profile - Cluster access profile.
- Flink
Profile FlinkProfile - The Flink cluster profile.
- Identity
Profile IdentityProfile - This is deprecated. Please use managed identity profile instead.
- Kafka
Profile KafkaProfile - The Kafka cluster profile.
- Llap
Profile interface{} - LLAP cluster profile.
- Log
Analytics ClusterProfile Log Analytics Profile - Cluster log analytics profile to enable or disable OMS agent for cluster.
- Managed
Identity ManagedProfile Identity Profile - This property is required by Trino, Spark and Flink cluster but is optional for Kafka cluster.
- Prometheus
Profile ClusterPrometheus Profile - Cluster Prometheus profile.
- Ranger
Plugin ClusterProfile Ranger Plugin Profile - Cluster Ranger plugin profile.
- Ranger
Profile RangerProfile - The ranger cluster profile.
- Script
Action []ScriptProfiles Action Profile - The script action profile list.
- Secrets
Profile SecretsProfile - The cluster secret profile.
- Service
Configs []ClusterProfiles Service Configs Profile - The service configs profiles.
- Spark
Profile SparkProfile - The spark cluster profile.
- Ssh
Profile ClusterPool Ssh Profile - Ssh profile for the cluster.
- Stub
Profile interface{} - Stub cluster profile.
- Trino
Profile TrinoProfile - Trino Cluster profile.
- Authorization
Profile - Authorization profile with details of AAD user Ids and group Ids authorized for data plane access.
- cluster
Version String - Version with 3/4 part.
- oss
Version String - Version with three part.
- autoscale
Profile AutoscaleProfile - This is the Autoscale profile for the cluster. This will allow customer to create cluster enabled with Autoscale.
- cluster
Access ClusterProfile Access Profile - Cluster access profile.
- flink
Profile FlinkProfile - The Flink cluster profile.
- identity
Profile IdentityProfile - This is deprecated. Please use managed identity profile instead.
- kafka
Profile KafkaProfile - The Kafka cluster profile.
- llap
Profile Object - LLAP cluster profile.
- log
Analytics ClusterProfile Log Analytics Profile - Cluster log analytics profile to enable or disable OMS agent for cluster.
- managed
Identity ManagedProfile Identity Profile - This property is required by Trino, Spark and Flink cluster but is optional for Kafka cluster.
- prometheus
Profile ClusterPrometheus Profile - Cluster Prometheus profile.
- ranger
Plugin ClusterProfile Ranger Plugin Profile - Cluster Ranger plugin profile.
- ranger
Profile RangerProfile - The ranger cluster profile.
- script
Action List<ScriptProfiles Action Profile> - The script action profile list.
- secrets
Profile SecretsProfile - The cluster secret profile.
- service
Configs List<ClusterProfiles Service Configs Profile> - The service configs profiles.
- spark
Profile SparkProfile - The spark cluster profile.
- ssh
Profile ClusterPool Ssh Profile - Ssh profile for the cluster.
- stub
Profile Object - Stub cluster profile.
- trino
Profile TrinoProfile - Trino Cluster profile.
- Authorization
Profile - Authorization profile with details of AAD user Ids and group Ids authorized for data plane access.
- cluster
Version string - Version with 3/4 part.
- oss
Version string - Version with three part.
- autoscale
Profile AutoscaleProfile - This is the Autoscale profile for the cluster. This will allow customer to create cluster enabled with Autoscale.
- cluster
Access ClusterProfile Access Profile - Cluster access profile.
- flink
Profile FlinkProfile - The Flink cluster profile.
- identity
Profile IdentityProfile - This is deprecated. Please use managed identity profile instead.
- kafka
Profile KafkaProfile - The Kafka cluster profile.
- llap
Profile any - LLAP cluster profile.
- log
Analytics ClusterProfile Log Analytics Profile - Cluster log analytics profile to enable or disable OMS agent for cluster.
- managed
Identity ManagedProfile Identity Profile - This property is required by Trino, Spark and Flink cluster but is optional for Kafka cluster.
- prometheus
Profile ClusterPrometheus Profile - Cluster Prometheus profile.
- ranger
Plugin ClusterProfile Ranger Plugin Profile - Cluster Ranger plugin profile.
- ranger
Profile RangerProfile - The ranger cluster profile.
- script
Action ScriptProfiles Action Profile[] - The script action profile list.
- secrets
Profile SecretsProfile - The cluster secret profile.
- service
Configs ClusterProfiles Service Configs Profile[] - The service configs profiles.
- spark
Profile SparkProfile - The spark cluster profile.
- ssh
Profile ClusterPool Ssh Profile - Ssh profile for the cluster.
- stub
Profile any - Stub cluster profile.
- trino
Profile TrinoProfile - Trino Cluster profile.
- Authorization
Profile - Authorization profile with details of AAD user Ids and group Ids authorized for data plane access.
- cluster_
version str - Version with 3/4 part.
- oss_
version str - Version with three part.
- autoscale_
profile AutoscaleProfile - This is the Autoscale profile for the cluster. This will allow customer to create cluster enabled with Autoscale.
- cluster_
access_ Clusterprofile Access Profile - Cluster access profile.
- flink_
profile FlinkProfile - The Flink cluster profile.
- identity_
profile IdentityProfile - This is deprecated. Please use managed identity profile instead.
- kafka_
profile KafkaProfile - The Kafka cluster profile.
- llap_
profile Any - LLAP cluster profile.
- log_
analytics_ Clusterprofile Log Analytics Profile - Cluster log analytics profile to enable or disable OMS agent for cluster.
- managed_
identity_ Managedprofile Identity Profile - This property is required by Trino, Spark and Flink cluster but is optional for Kafka cluster.
- prometheus_
profile ClusterPrometheus Profile - Cluster Prometheus profile.
- ranger_
plugin_ Clusterprofile Ranger Plugin Profile - Cluster Ranger plugin profile.
- ranger_
profile RangerProfile - The ranger cluster profile.
- script_
action_ Sequence[Scriptprofiles Action Profile] - The script action profile list.
- secrets_
profile SecretsProfile - The cluster secret profile.
- service_
configs_ Sequence[Clusterprofiles Service Configs Profile] - The service configs profiles.
- spark_
profile SparkProfile - The spark cluster profile.
- ssh_
profile ClusterPool Ssh Profile - Ssh profile for the cluster.
- stub_
profile Any - Stub cluster profile.
- trino_
profile TrinoProfile - Trino Cluster profile.
- Property Map
- Authorization profile with details of AAD user Ids and group Ids authorized for data plane access.
- cluster
Version String - Version with 3/4 part.
- oss
Version String - Version with three part.
- autoscale
Profile Property Map - This is the Autoscale profile for the cluster. This will allow customer to create cluster enabled with Autoscale.
- cluster
Access Property MapProfile - Cluster access profile.
- flink
Profile Property Map - The Flink cluster profile.
- identity
Profile Property Map - This is deprecated. Please use managed identity profile instead.
- kafka
Profile Property Map - The Kafka cluster profile.
- llap
Profile Any - LLAP cluster profile.
- log
Analytics Property MapProfile - Cluster log analytics profile to enable or disable OMS agent for cluster.
- managed
Identity Property MapProfile - This property is required by Trino, Spark and Flink cluster but is optional for Kafka cluster.
- prometheus
Profile Property Map - Cluster Prometheus profile.
- ranger
Plugin Property MapProfile - Cluster Ranger plugin profile.
- ranger
Profile Property Map - The ranger cluster profile.
- script
Action List<Property Map>Profiles - The script action profile list.
- secrets
Profile Property Map - The cluster secret profile.
- service
Configs List<Property Map>Profiles - The service configs profiles.
- spark
Profile Property Map - The spark cluster profile.
- ssh
Profile Property Map - Ssh profile for the cluster.
- stub
Profile Any - Stub cluster profile.
- trino
Profile Property Map - Trino Cluster profile.
ClusterProfileResponse, ClusterProfileResponseArgs
- Pulumi.
Azure Native. HDInsight. Inputs. Authorization Profile Response - Authorization profile with details of AAD user Ids and group Ids authorized for data plane access.
- Cluster
Version string - Version with 3/4 part.
- Components
List<Pulumi.
Azure Native. HDInsight. Inputs. Cluster Profile Response Components> - Component list of this cluster type and version.
- Connectivity
Profile Pulumi.Azure Native. HDInsight. Inputs. Connectivity Profile Response - Cluster connectivity profile.
- Oss
Version string - Version with three part.
- Autoscale
Profile Pulumi.Azure Native. HDInsight. Inputs. Autoscale Profile Response - This is the Autoscale profile for the cluster. This will allow customer to create cluster enabled with Autoscale.
- Cluster
Access Pulumi.Profile Azure Native. HDInsight. Inputs. Cluster Access Profile Response - Cluster access profile.
- Flink
Profile Pulumi.Azure Native. HDInsight. Inputs. Flink Profile Response - The Flink cluster profile.
- Identity
Profile Pulumi.Azure Native. HDInsight. Inputs. Identity Profile Response - This is deprecated. Please use managed identity profile instead.
- Kafka
Profile Pulumi.Azure Native. HDInsight. Inputs. Kafka Profile Response - The Kafka cluster profile.
- Llap
Profile object - LLAP cluster profile.
- Log
Analytics Pulumi.Profile Azure Native. HDInsight. Inputs. Cluster Log Analytics Profile Response - Cluster log analytics profile to enable or disable OMS agent for cluster.
- Managed
Identity Pulumi.Profile Azure Native. HDInsight. Inputs. Managed Identity Profile Response - This property is required by Trino, Spark and Flink cluster but is optional for Kafka cluster.
- Prometheus
Profile Pulumi.Azure Native. HDInsight. Inputs. Cluster Prometheus Profile Response - Cluster Prometheus profile.
- Ranger
Plugin Pulumi.Profile Azure Native. HDInsight. Inputs. Cluster Ranger Plugin Profile Response - Cluster Ranger plugin profile.
- Ranger
Profile Pulumi.Azure Native. HDInsight. Inputs. Ranger Profile Response - The ranger cluster profile.
- Script
Action List<Pulumi.Profiles Azure Native. HDInsight. Inputs. Script Action Profile Response> - The script action profile list.
- Secrets
Profile Pulumi.Azure Native. HDInsight. Inputs. Secrets Profile Response - The cluster secret profile.
- Service
Configs List<Pulumi.Profiles Azure Native. HDInsight. Inputs. Cluster Service Configs Profile Response> - The service configs profiles.
- Spark
Profile Pulumi.Azure Native. HDInsight. Inputs. Spark Profile Response - The spark cluster profile.
- Ssh
Profile Pulumi.Azure Native. HDInsight. Inputs. Cluster Pool Ssh Profile Response - Ssh profile for the cluster.
- Stub
Profile object - Stub cluster profile.
- Trino
Profile Pulumi.Azure Native. HDInsight. Inputs. Trino Profile Response - Trino Cluster profile.
- Authorization
Profile Response - Authorization profile with details of AAD user Ids and group Ids authorized for data plane access.
- Cluster
Version string - Version with 3/4 part.
- Components
[]Cluster
Profile Response Components - Component list of this cluster type and version.
- Connectivity
Profile ConnectivityProfile Response - Cluster connectivity profile.
- Oss
Version string - Version with three part.
- Autoscale
Profile AutoscaleProfile Response - This is the Autoscale profile for the cluster. This will allow customer to create cluster enabled with Autoscale.
- Cluster
Access ClusterProfile Access Profile Response - Cluster access profile.
- Flink
Profile FlinkProfile Response - The Flink cluster profile.
- Identity
Profile IdentityProfile Response - This is deprecated. Please use managed identity profile instead.
- Kafka
Profile KafkaProfile Response - The Kafka cluster profile.
- Llap
Profile interface{} - LLAP cluster profile.
- Log
Analytics ClusterProfile Log Analytics Profile Response - Cluster log analytics profile to enable or disable OMS agent for cluster.
- Managed
Identity ManagedProfile Identity Profile Response - This property is required by Trino, Spark and Flink cluster but is optional for Kafka cluster.
- Prometheus
Profile ClusterPrometheus Profile Response - Cluster Prometheus profile.
- Ranger
Plugin ClusterProfile Ranger Plugin Profile Response - Cluster Ranger plugin profile.
- Ranger
Profile RangerProfile Response - The ranger cluster profile.
- Script
Action []ScriptProfiles Action Profile Response - The script action profile list.
- Secrets
Profile SecretsProfile Response - The cluster secret profile.
- Service
Configs []ClusterProfiles Service Configs Profile Response - The service configs profiles.
- Spark
Profile SparkProfile Response - The spark cluster profile.
- Ssh
Profile ClusterPool Ssh Profile Response - Ssh profile for the cluster.
- Stub
Profile interface{} - Stub cluster profile.
- Trino
Profile TrinoProfile Response - Trino Cluster profile.
- Authorization
Profile Response - Authorization profile with details of AAD user Ids and group Ids authorized for data plane access.
- cluster
Version String - Version with 3/4 part.
- components
List<Cluster
Profile Response Components> - Component list of this cluster type and version.
- connectivity
Profile ConnectivityProfile Response - Cluster connectivity profile.
- oss
Version String - Version with three part.
- autoscale
Profile AutoscaleProfile Response - This is the Autoscale profile for the cluster. This will allow customer to create cluster enabled with Autoscale.
- cluster
Access ClusterProfile Access Profile Response - Cluster access profile.
- flink
Profile FlinkProfile Response - The Flink cluster profile.
- identity
Profile IdentityProfile Response - This is deprecated. Please use managed identity profile instead.
- kafka
Profile KafkaProfile Response - The Kafka cluster profile.
- llap
Profile Object - LLAP cluster profile.
- log
Analytics ClusterProfile Log Analytics Profile Response - Cluster log analytics profile to enable or disable OMS agent for cluster.
- managed
Identity ManagedProfile Identity Profile Response - This property is required by Trino, Spark and Flink cluster but is optional for Kafka cluster.
- prometheus
Profile ClusterPrometheus Profile Response - Cluster Prometheus profile.
- ranger
Plugin ClusterProfile Ranger Plugin Profile Response - Cluster Ranger plugin profile.
- ranger
Profile RangerProfile Response - The ranger cluster profile.
- script
Action List<ScriptProfiles Action Profile Response> - The script action profile list.
- secrets
Profile SecretsProfile Response - The cluster secret profile.
- service
Configs List<ClusterProfiles Service Configs Profile Response> - The service configs profiles.
- spark
Profile SparkProfile Response - The spark cluster profile.
- ssh
Profile ClusterPool Ssh Profile Response - Ssh profile for the cluster.
- stub
Profile Object - Stub cluster profile.
- trino
Profile TrinoProfile Response - Trino Cluster profile.
- Authorization
Profile Response - Authorization profile with details of AAD user Ids and group Ids authorized for data plane access.
- cluster
Version string - Version with 3/4 part.
- components
Cluster
Profile Response Components[] - Component list of this cluster type and version.
- connectivity
Profile ConnectivityProfile Response - Cluster connectivity profile.
- oss
Version string - Version with three part.
- autoscale
Profile AutoscaleProfile Response - This is the Autoscale profile for the cluster. This will allow customer to create cluster enabled with Autoscale.
- cluster
Access ClusterProfile Access Profile Response - Cluster access profile.
- flink
Profile FlinkProfile Response - The Flink cluster profile.
- identity
Profile IdentityProfile Response - This is deprecated. Please use managed identity profile instead.
- kafka
Profile KafkaProfile Response - The Kafka cluster profile.
- llap
Profile any - LLAP cluster profile.
- log
Analytics ClusterProfile Log Analytics Profile Response - Cluster log analytics profile to enable or disable OMS agent for cluster.
- managed
Identity ManagedProfile Identity Profile Response - This property is required by Trino, Spark and Flink cluster but is optional for Kafka cluster.
- prometheus
Profile ClusterPrometheus Profile Response - Cluster Prometheus profile.
- ranger
Plugin ClusterProfile Ranger Plugin Profile Response - Cluster Ranger plugin profile.
- ranger
Profile RangerProfile Response - The ranger cluster profile.
- script
Action ScriptProfiles Action Profile Response[] - The script action profile list.
- secrets
Profile SecretsProfile Response - The cluster secret profile.
- service
Configs ClusterProfiles Service Configs Profile Response[] - The service configs profiles.
- spark
Profile SparkProfile Response - The spark cluster profile.
- ssh
Profile ClusterPool Ssh Profile Response - Ssh profile for the cluster.
- stub
Profile any - Stub cluster profile.
- trino
Profile TrinoProfile Response - Trino Cluster profile.
- Authorization
Profile Response - Authorization profile with details of AAD user Ids and group Ids authorized for data plane access.
- cluster_
version str - Version with 3/4 part.
- components
Sequence[Cluster
Profile Response Components] - Component list of this cluster type and version.
- connectivity_
profile ConnectivityProfile Response - Cluster connectivity profile.
- oss_
version str - Version with three part.
- autoscale_
profile AutoscaleProfile Response - This is the Autoscale profile for the cluster. This will allow customer to create cluster enabled with Autoscale.
- cluster_
access_ Clusterprofile Access Profile Response - Cluster access profile.
- flink_
profile FlinkProfile Response - The Flink cluster profile.
- identity_
profile IdentityProfile Response - This is deprecated. Please use managed identity profile instead.
- kafka_
profile KafkaProfile Response - The Kafka cluster profile.
- llap_
profile Any - LLAP cluster profile.
- log_
analytics_ Clusterprofile Log Analytics Profile Response - Cluster log analytics profile to enable or disable OMS agent for cluster.
- managed_
identity_ Managedprofile Identity Profile Response - This property is required by Trino, Spark and Flink cluster but is optional for Kafka cluster.
- prometheus_
profile ClusterPrometheus Profile Response - Cluster Prometheus profile.
- ranger_
plugin_ Clusterprofile Ranger Plugin Profile Response - Cluster Ranger plugin profile.
- ranger_
profile RangerProfile Response - The ranger cluster profile.
- script_
action_ Sequence[Scriptprofiles Action Profile Response] - The script action profile list.
- secrets_
profile SecretsProfile Response - The cluster secret profile.
- service_
configs_ Sequence[Clusterprofiles Service Configs Profile Response] - The service configs profiles.
- spark_
profile SparkProfile Response - The spark cluster profile.
- ssh_
profile ClusterPool Ssh Profile Response - Ssh profile for the cluster.
- stub_
profile Any - Stub cluster profile.
- trino_
profile TrinoProfile Response - Trino Cluster profile.
- Property Map
- Authorization profile with details of AAD user Ids and group Ids authorized for data plane access.
- cluster
Version String - Version with 3/4 part.
- components List<Property Map>
- Component list of this cluster type and version.
- connectivity
Profile Property Map - Cluster connectivity profile.
- oss
Version String - Version with three part.
- autoscale
Profile Property Map - This is the Autoscale profile for the cluster. This will allow customer to create cluster enabled with Autoscale.
- cluster
Access Property MapProfile - Cluster access profile.
- flink
Profile Property Map - The Flink cluster profile.
- identity
Profile Property Map - This is deprecated. Please use managed identity profile instead.
- kafka
Profile Property Map - The Kafka cluster profile.
- llap
Profile Any - LLAP cluster profile.
- log
Analytics Property MapProfile - Cluster log analytics profile to enable or disable OMS agent for cluster.
- managed
Identity Property MapProfile - This property is required by Trino, Spark and Flink cluster but is optional for Kafka cluster.
- prometheus
Profile Property Map - Cluster Prometheus profile.
- ranger
Plugin Property MapProfile - Cluster Ranger plugin profile.
- ranger
Profile Property Map - The ranger cluster profile.
- script
Action List<Property Map>Profiles - The script action profile list.
- secrets
Profile Property Map - The cluster secret profile.
- service
Configs List<Property Map>Profiles - The service configs profiles.
- spark
Profile Property Map - The spark cluster profile.
- ssh
Profile Property Map - Ssh profile for the cluster.
- stub
Profile Any - Stub cluster profile.
- trino
Profile Property Map - Trino Cluster profile.
ClusterProfileResponseComponents, ClusterProfileResponseComponentsArgs
ClusterPrometheusProfile, ClusterPrometheusProfileArgs
- Enabled bool
- Enable Prometheus for cluster or not.
- Enabled bool
- Enable Prometheus for cluster or not.
- enabled Boolean
- Enable Prometheus for cluster or not.
- enabled boolean
- Enable Prometheus for cluster or not.
- enabled bool
- Enable Prometheus for cluster or not.
- enabled Boolean
- Enable Prometheus for cluster or not.
ClusterPrometheusProfileResponse, ClusterPrometheusProfileResponseArgs
- Enabled bool
- Enable Prometheus for cluster or not.
- Enabled bool
- Enable Prometheus for cluster or not.
- enabled Boolean
- Enable Prometheus for cluster or not.
- enabled boolean
- Enable Prometheus for cluster or not.
- enabled bool
- Enable Prometheus for cluster or not.
- enabled Boolean
- Enable Prometheus for cluster or not.
ClusterRangerPluginProfile, ClusterRangerPluginProfileArgs
- Enabled bool
- Enable Ranger for cluster or not.
- Enabled bool
- Enable Ranger for cluster or not.
- enabled Boolean
- Enable Ranger for cluster or not.
- enabled boolean
- Enable Ranger for cluster or not.
- enabled bool
- Enable Ranger for cluster or not.
- enabled Boolean
- Enable Ranger for cluster or not.
ClusterRangerPluginProfileResponse, ClusterRangerPluginProfileResponseArgs
- Enabled bool
- Enable Ranger for cluster or not.
- Enabled bool
- Enable Ranger for cluster or not.
- enabled Boolean
- Enable Ranger for cluster or not.
- enabled boolean
- Enable Ranger for cluster or not.
- enabled bool
- Enable Ranger for cluster or not.
- enabled Boolean
- Enable Ranger for cluster or not.
ClusterServiceConfig, ClusterServiceConfigArgs
- Component string
- Name of the component the config files should apply to.
- Files
List<Pulumi.
Azure Native. HDInsight. Inputs. Cluster Config File> - List of Config Files.
- Component string
- Name of the component the config files should apply to.
- Files
[]Cluster
Config File - List of Config Files.
- component String
- Name of the component the config files should apply to.
- files
List<Cluster
Config File> - List of Config Files.
- component string
- Name of the component the config files should apply to.
- files
Cluster
Config File[] - List of Config Files.
- component str
- Name of the component the config files should apply to.
- files
Sequence[Cluster
Config File] - List of Config Files.
- component String
- Name of the component the config files should apply to.
- files List<Property Map>
- List of Config Files.
ClusterServiceConfigResponse, ClusterServiceConfigResponseArgs
- Component string
- Name of the component the config files should apply to.
- Files
List<Pulumi.
Azure Native. HDInsight. Inputs. Cluster Config File Response> - List of Config Files.
- Component string
- Name of the component the config files should apply to.
- Files
[]Cluster
Config File Response - List of Config Files.
- component String
- Name of the component the config files should apply to.
- files
List<Cluster
Config File Response> - List of Config Files.
- component string
- Name of the component the config files should apply to.
- files
Cluster
Config File Response[] - List of Config Files.
- component str
- Name of the component the config files should apply to.
- files
Sequence[Cluster
Config File Response] - List of Config Files.
- component String
- Name of the component the config files should apply to.
- files List<Property Map>
- List of Config Files.
ClusterServiceConfigsProfile, ClusterServiceConfigsProfileArgs
- Configs
List<Pulumi.
Azure Native. HDInsight. Inputs. Cluster Service Config> - List of service configs.
- Service
Name string - Name of the service the configurations should apply to.
- Configs
[]Cluster
Service Config - List of service configs.
- Service
Name string - Name of the service the configurations should apply to.
- configs
List<Cluster
Service Config> - List of service configs.
- service
Name String - Name of the service the configurations should apply to.
- configs
Cluster
Service Config[] - List of service configs.
- service
Name string - Name of the service the configurations should apply to.
- configs
Sequence[Cluster
Service Config] - List of service configs.
- service_
name str - Name of the service the configurations should apply to.
- configs List<Property Map>
- List of service configs.
- service
Name String - Name of the service the configurations should apply to.
ClusterServiceConfigsProfileResponse, ClusterServiceConfigsProfileResponseArgs
- Configs
List<Pulumi.
Azure Native. HDInsight. Inputs. Cluster Service Config Response> - List of service configs.
- Service
Name string - Name of the service the configurations should apply to.
- Configs
[]Cluster
Service Config Response - List of service configs.
- Service
Name string - Name of the service the configurations should apply to.
- configs
List<Cluster
Service Config Response> - List of service configs.
- service
Name String - Name of the service the configurations should apply to.
- configs
Cluster
Service Config Response[] - List of service configs.
- service
Name string - Name of the service the configurations should apply to.
- configs
Sequence[Cluster
Service Config Response] - List of service configs.
- service_
name str - Name of the service the configurations should apply to.
- configs List<Property Map>
- List of service configs.
- service
Name String - Name of the service the configurations should apply to.
ComparisonOperator, ComparisonOperatorArgs
- Greater
Than - greaterThan
- Greater
Than Or Equal - greaterThanOrEqual
- Less
Than - lessThan
- Less
Than Or Equal - lessThanOrEqual
- Comparison
Operator Greater Than - greaterThan
- Comparison
Operator Greater Than Or Equal - greaterThanOrEqual
- Comparison
Operator Less Than - lessThan
- Comparison
Operator Less Than Or Equal - lessThanOrEqual
- Greater
Than - greaterThan
- Greater
Than Or Equal - greaterThanOrEqual
- Less
Than - lessThan
- Less
Than Or Equal - lessThanOrEqual
- Greater
Than - greaterThan
- Greater
Than Or Equal - greaterThanOrEqual
- Less
Than - lessThan
- Less
Than Or Equal - lessThanOrEqual
- GREATER_THAN
- greaterThan
- GREATER_THAN_OR_EQUAL
- greaterThanOrEqual
- LESS_THAN
- lessThan
- LESS_THAN_OR_EQUAL
- lessThanOrEqual
- "greater
Than" - greaterThan
- "greater
Than Or Equal" - greaterThanOrEqual
- "less
Than" - lessThan
- "less
Than Or Equal" - lessThanOrEqual
ComparisonRule, ComparisonRuleArgs
- Operator
string | Pulumi.
Azure Native. HDInsight. Comparison Operator - The comparison operator.
- Threshold double
- Threshold setting.
- Operator
string | Comparison
Operator - The comparison operator.
- Threshold float64
- Threshold setting.
- operator
String | Comparison
Operator - The comparison operator.
- threshold Double
- Threshold setting.
- operator
string | Comparison
Operator - The comparison operator.
- threshold number
- Threshold setting.
- operator
str | Comparison
Operator - The comparison operator.
- threshold float
- Threshold setting.
- operator
String | "greater
Than" | "greater Than Or Equal" | "less Than" | "less Than Or Equal" - The comparison operator.
- threshold Number
- Threshold setting.
ComparisonRuleResponse, ComparisonRuleResponseArgs
ComputeResourceDefinition, ComputeResourceDefinitionArgs
ComputeResourceDefinitionResponse, ComputeResourceDefinitionResponseArgs
ConnectivityProfileResponse, ConnectivityProfileResponseArgs
- Web
Pulumi.
Azure Native. HDInsight. Inputs. Connectivity Profile Response Web - Web connectivity endpoint details.
- Ssh
List<Pulumi.
Azure Native. HDInsight. Inputs. Ssh Connectivity Endpoint Response> - List of SSH connectivity endpoints.
- Web
Connectivity
Profile Response Web - Web connectivity endpoint details.
- Ssh
[]Ssh
Connectivity Endpoint Response - List of SSH connectivity endpoints.
- web
Connectivity
Profile Response Web - Web connectivity endpoint details.
- ssh
List<Ssh
Connectivity Endpoint Response> - List of SSH connectivity endpoints.
- web
Connectivity
Profile Response Web - Web connectivity endpoint details.
- ssh
Ssh
Connectivity Endpoint Response[] - List of SSH connectivity endpoints.
- web
Connectivity
Profile Response Web - Web connectivity endpoint details.
- ssh
Sequence[Ssh
Connectivity Endpoint Response] - List of SSH connectivity endpoints.
- web Property Map
- Web connectivity endpoint details.
- ssh List<Property Map>
- List of SSH connectivity endpoints.
ConnectivityProfileResponseWeb, ConnectivityProfileResponseWebArgs
- Fqdn string
- Web connectivity endpoint.
- Private
Fqdn string - Private web connectivity endpoint. This property will only be returned when enableInternalIngress is true.
- Fqdn string
- Web connectivity endpoint.
- Private
Fqdn string - Private web connectivity endpoint. This property will only be returned when enableInternalIngress is true.
- fqdn String
- Web connectivity endpoint.
- private
Fqdn String - Private web connectivity endpoint. This property will only be returned when enableInternalIngress is true.
- fqdn string
- Web connectivity endpoint.
- private
Fqdn string - Private web connectivity endpoint. This property will only be returned when enableInternalIngress is true.
- fqdn str
- Web connectivity endpoint.
- private_
fqdn str - Private web connectivity endpoint. This property will only be returned when enableInternalIngress is true.
- fqdn String
- Web connectivity endpoint.
- private
Fqdn String - Private web connectivity endpoint. This property will only be returned when enableInternalIngress is true.
ContentEncoding, ContentEncodingArgs
- Base64
- Base64
- None
- None
- Content
Encoding Base64 - Base64
- Content
Encoding None - None
- Base64
- Base64
- None
- None
- Base64
- Base64
- None
- None
- BASE64
- Base64
- NONE
- None
- "Base64"
- Base64
- "None"
- None
DataDiskType, DataDiskTypeArgs
- Standard_HDD_LRS
- Standard_HDD_LRS
- Standard_SSD_LRS
- Standard_SSD_LRS
- Standard_SSD_ZRS
- Standard_SSD_ZRS
- Premium_SSD_LRS
- Premium_SSD_LRS
- Premium_SSD_ZRS
- Premium_SSD_ZRS
- Premium_SSD_
v2_LRS - Premium_SSD_v2_LRS
- Data
Disk Type_Standard_HDD_LRS - Standard_HDD_LRS
- Data
Disk Type_Standard_SSD_LRS - Standard_SSD_LRS
- Data
Disk Type_Standard_SSD_ZRS - Standard_SSD_ZRS
- Data
Disk Type_Premium_SSD_LRS - Premium_SSD_LRS
- Data
Disk Type_Premium_SSD_ZRS - Premium_SSD_ZRS
- Data
Disk Type_Premium_SSD_ v2_LRS - Premium_SSD_v2_LRS
- Standard_HDD_LRS
- Standard_HDD_LRS
- Standard_SSD_LRS
- Standard_SSD_LRS
- Standard_SSD_ZRS
- Standard_SSD_ZRS
- Premium_SSD_LRS
- Premium_SSD_LRS
- Premium_SSD_ZRS
- Premium_SSD_ZRS
- Premium_SSD_
v2_LRS - Premium_SSD_v2_LRS
- Standard_HDD_LRS
- Standard_HDD_LRS
- Standard_SSD_LRS
- Standard_SSD_LRS
- Standard_SSD_ZRS
- Standard_SSD_ZRS
- Premium_SSD_LRS
- Premium_SSD_LRS
- Premium_SSD_ZRS
- Premium_SSD_ZRS
- Premium_SSD_
v2_LRS - Premium_SSD_v2_LRS
- STANDARD_HD_D_LRS
- Standard_HDD_LRS
- STANDARD_SS_D_LRS
- Standard_SSD_LRS
- STANDARD_SS_D_ZRS
- Standard_SSD_ZRS
- PREMIUM_SS_D_LRS
- Premium_SSD_LRS
- PREMIUM_SS_D_ZRS
- Premium_SSD_ZRS
- PREMIUM_SS_D_V2_LRS
- Premium_SSD_v2_LRS
- "Standard_HDD_LRS"
- Standard_HDD_LRS
- "Standard_SSD_LRS"
- Standard_SSD_LRS
- "Standard_SSD_ZRS"
- Standard_SSD_ZRS
- "Premium_SSD_LRS"
- Premium_SSD_LRS
- "Premium_SSD_ZRS"
- Premium_SSD_ZRS
- "Premium_SSD_
v2_LRS" - Premium_SSD_v2_LRS
DbConnectionAuthenticationMode, DbConnectionAuthenticationModeArgs
- Sql
Auth - SqlAuthThe password-based authentication to connect to your Hive metastore database.
- Identity
Auth - IdentityAuthThe managed-identity-based authentication to connect to your Hive metastore database.
- Db
Connection Authentication Mode Sql Auth - SqlAuthThe password-based authentication to connect to your Hive metastore database.
- Db
Connection Authentication Mode Identity Auth - IdentityAuthThe managed-identity-based authentication to connect to your Hive metastore database.
- Sql
Auth - SqlAuthThe password-based authentication to connect to your Hive metastore database.
- Identity
Auth - IdentityAuthThe managed-identity-based authentication to connect to your Hive metastore database.
- Sql
Auth - SqlAuthThe password-based authentication to connect to your Hive metastore database.
- Identity
Auth - IdentityAuthThe managed-identity-based authentication to connect to your Hive metastore database.
- SQL_AUTH
- SqlAuthThe password-based authentication to connect to your Hive metastore database.
- IDENTITY_AUTH
- IdentityAuthThe managed-identity-based authentication to connect to your Hive metastore database.
- "Sql
Auth" - SqlAuthThe password-based authentication to connect to your Hive metastore database.
- "Identity
Auth" - IdentityAuthThe managed-identity-based authentication to connect to your Hive metastore database.
DeploymentMode, DeploymentModeArgs
- Application
- Application
- Session
- Session
- Deployment
Mode Application - Application
- Deployment
Mode Session - Session
- Application
- Application
- Session
- Session
- Application
- Application
- Session
- Session
- APPLICATION
- Application
- SESSION
- Session
- "Application"
- Application
- "Session"
- Session
DiskStorageProfile, DiskStorageProfileArgs
- Data
Disk intSize - Managed Disk size in GB. The maximum supported disk size for Standard and Premium HDD/SSD is 32TB, except for Premium SSD v2, which supports up to 64TB.
- Data
Disk string | Pulumi.Type Azure Native. HDInsight. Data Disk Type - Managed Disk Type.
- Data
Disk intSize - Managed Disk size in GB. The maximum supported disk size for Standard and Premium HDD/SSD is 32TB, except for Premium SSD v2, which supports up to 64TB.
- Data
Disk string | DataType Disk Type - Managed Disk Type.
- data
Disk IntegerSize - Managed Disk size in GB. The maximum supported disk size for Standard and Premium HDD/SSD is 32TB, except for Premium SSD v2, which supports up to 64TB.
- data
Disk String | DataType Disk Type - Managed Disk Type.
- data
Disk numberSize - Managed Disk size in GB. The maximum supported disk size for Standard and Premium HDD/SSD is 32TB, except for Premium SSD v2, which supports up to 64TB.
- data
Disk string | DataType Disk Type - Managed Disk Type.
- data_
disk_ intsize - Managed Disk size in GB. The maximum supported disk size for Standard and Premium HDD/SSD is 32TB, except for Premium SSD v2, which supports up to 64TB.
- data_
disk_ str | Datatype Disk Type - Managed Disk Type.
- data
Disk NumberSize - Managed Disk size in GB. The maximum supported disk size for Standard and Premium HDD/SSD is 32TB, except for Premium SSD v2, which supports up to 64TB.
- data
Disk String | "Standard_HDD_LRS" | "Standard_SSD_LRS" | "Standard_SSD_ZRS" | "Premium_SSD_LRS" | "Premium_SSD_ZRS" | "Premium_SSD_Type v2_LRS" - Managed Disk Type.
DiskStorageProfileResponse, DiskStorageProfileResponseArgs
- Data
Disk intSize - Managed Disk size in GB. The maximum supported disk size for Standard and Premium HDD/SSD is 32TB, except for Premium SSD v2, which supports up to 64TB.
- Data
Disk stringType - Managed Disk Type.
- Data
Disk intSize - Managed Disk size in GB. The maximum supported disk size for Standard and Premium HDD/SSD is 32TB, except for Premium SSD v2, which supports up to 64TB.
- Data
Disk stringType - Managed Disk Type.
- data
Disk IntegerSize - Managed Disk size in GB. The maximum supported disk size for Standard and Premium HDD/SSD is 32TB, except for Premium SSD v2, which supports up to 64TB.
- data
Disk StringType - Managed Disk Type.
- data
Disk numberSize - Managed Disk size in GB. The maximum supported disk size for Standard and Premium HDD/SSD is 32TB, except for Premium SSD v2, which supports up to 64TB.
- data
Disk stringType - Managed Disk Type.
- data_
disk_ intsize - Managed Disk size in GB. The maximum supported disk size for Standard and Premium HDD/SSD is 32TB, except for Premium SSD v2, which supports up to 64TB.
- data_
disk_ strtype - Managed Disk Type.
- data
Disk NumberSize - Managed Disk size in GB. The maximum supported disk size for Standard and Premium HDD/SSD is 32TB, except for Premium SSD v2, which supports up to 64TB.
- data
Disk StringType - Managed Disk Type.
FlinkCatalogOptions, FlinkCatalogOptionsArgs
- Hive
Pulumi.
Azure Native. HDInsight. Inputs. Flink Hive Catalog Option - Hive Catalog Option for Flink cluster.
- Hive
Flink
Hive Catalog Option - Hive Catalog Option for Flink cluster.
- hive
Flink
Hive Catalog Option - Hive Catalog Option for Flink cluster.
- hive
Flink
Hive Catalog Option - Hive Catalog Option for Flink cluster.
- hive
Flink
Hive Catalog Option - Hive Catalog Option for Flink cluster.
- hive Property Map
- Hive Catalog Option for Flink cluster.
FlinkCatalogOptionsResponse, FlinkCatalogOptionsResponseArgs
- Hive
Pulumi.
Azure Native. HDInsight. Inputs. Flink Hive Catalog Option Response - Hive Catalog Option for Flink cluster.
- Hive
Flink
Hive Catalog Option Response - Hive Catalog Option for Flink cluster.
- hive
Flink
Hive Catalog Option Response - Hive Catalog Option for Flink cluster.
- hive
Flink
Hive Catalog Option Response - Hive Catalog Option for Flink cluster.
- hive
Flink
Hive Catalog Option Response - Hive Catalog Option for Flink cluster.
- hive Property Map
- Hive Catalog Option for Flink cluster.
FlinkHiveCatalogOption, FlinkHiveCatalogOptionArgs
- Metastore
Db stringConnection URL - Connection string for hive metastore database.
- Metastore
Db string | Pulumi.Connection Authentication Mode Azure Native. HDInsight. Metastore Db Connection Authentication Mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- Metastore
Db stringConnection Password Secret - Secret reference name from secretsProfile.secrets containing password for database connection.
- Metastore
Db stringConnection User Name - User name for database connection.
- Metastore
Db stringConnection URL - Connection string for hive metastore database.
- Metastore
Db string | MetastoreConnection Authentication Mode Db Connection Authentication Mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- Metastore
Db stringConnection Password Secret - Secret reference name from secretsProfile.secrets containing password for database connection.
- Metastore
Db stringConnection User Name - User name for database connection.
- metastore
Db StringConnection URL - Connection string for hive metastore database.
- metastore
Db String | MetastoreConnection Authentication Mode Db Connection Authentication Mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- metastore
Db StringConnection Password Secret - Secret reference name from secretsProfile.secrets containing password for database connection.
- metastore
Db StringConnection User Name - User name for database connection.
- metastore
Db stringConnection URL - Connection string for hive metastore database.
- metastore
Db string | MetastoreConnection Authentication Mode Db Connection Authentication Mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- metastore
Db stringConnection Password Secret - Secret reference name from secretsProfile.secrets containing password for database connection.
- metastore
Db stringConnection User Name - User name for database connection.
- metastore_
db_ strconnection_ url - Connection string for hive metastore database.
- metastore_
db_ str | Metastoreconnection_ authentication_ mode Db Connection Authentication Mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- metastore_
db_ strconnection_ password_ secret - Secret reference name from secretsProfile.secrets containing password for database connection.
- metastore_
db_ strconnection_ user_ name - User name for database connection.
- metastore
Db StringConnection URL - Connection string for hive metastore database.
- metastore
Db String | "SqlConnection Authentication Mode Auth" | "Identity Auth" - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- metastore
Db StringConnection Password Secret - Secret reference name from secretsProfile.secrets containing password for database connection.
- metastore
Db StringConnection User Name - User name for database connection.
FlinkHiveCatalogOptionResponse, FlinkHiveCatalogOptionResponseArgs
- Metastore
Db stringConnection URL - Connection string for hive metastore database.
- Metastore
Db stringConnection Authentication Mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- Metastore
Db stringConnection Password Secret - Secret reference name from secretsProfile.secrets containing password for database connection.
- Metastore
Db stringConnection User Name - User name for database connection.
- Metastore
Db stringConnection URL - Connection string for hive metastore database.
- Metastore
Db stringConnection Authentication Mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- Metastore
Db stringConnection Password Secret - Secret reference name from secretsProfile.secrets containing password for database connection.
- Metastore
Db stringConnection User Name - User name for database connection.
- metastore
Db StringConnection URL - Connection string for hive metastore database.
- metastore
Db StringConnection Authentication Mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- metastore
Db StringConnection Password Secret - Secret reference name from secretsProfile.secrets containing password for database connection.
- metastore
Db StringConnection User Name - User name for database connection.
- metastore
Db stringConnection URL - Connection string for hive metastore database.
- metastore
Db stringConnection Authentication Mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- metastore
Db stringConnection Password Secret - Secret reference name from secretsProfile.secrets containing password for database connection.
- metastore
Db stringConnection User Name - User name for database connection.
- metastore_
db_ strconnection_ url - Connection string for hive metastore database.
- metastore_
db_ strconnection_ authentication_ mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- metastore_
db_ strconnection_ password_ secret - Secret reference name from secretsProfile.secrets containing password for database connection.
- metastore_
db_ strconnection_ user_ name - User name for database connection.
- metastore
Db StringConnection URL - Connection string for hive metastore database.
- metastore
Db StringConnection Authentication Mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- metastore
Db StringConnection Password Secret - Secret reference name from secretsProfile.secrets containing password for database connection.
- metastore
Db StringConnection User Name - User name for database connection.
FlinkJobProfile, FlinkJobProfileArgs
- Jar
Name string - A string property that represents the name of the job JAR.
- Job
Jar stringDirectory - A string property that specifies the directory where the job JAR is located.
- Upgrade
Mode string | Pulumi.Azure Native. HDInsight. Upgrade Mode - A string property that indicates the upgrade mode to be performed on the Flink job. It can have one of the following enum values => STATELESS_UPDATE, UPDATE, LAST_STATE_UPDATE.
- Args string
- A string property representing additional JVM arguments for the Flink job. It should be space separated value.
- Entry
Class string - A string property that specifies the entry class for the Flink job. If not specified, the entry point is auto-detected from the flink job jar package.
- Save
Point stringName - A string property that represents the name of the savepoint for the Flink job
- Jar
Name string - A string property that represents the name of the job JAR.
- Job
Jar stringDirectory - A string property that specifies the directory where the job JAR is located.
- Upgrade
Mode string | UpgradeMode - A string property that indicates the upgrade mode to be performed on the Flink job. It can have one of the following enum values => STATELESS_UPDATE, UPDATE, LAST_STATE_UPDATE.
- Args string
- A string property representing additional JVM arguments for the Flink job. It should be space separated value.
- Entry
Class string - A string property that specifies the entry class for the Flink job. If not specified, the entry point is auto-detected from the flink job jar package.
- Save
Point stringName - A string property that represents the name of the savepoint for the Flink job
- jar
Name String - A string property that represents the name of the job JAR.
- job
Jar StringDirectory - A string property that specifies the directory where the job JAR is located.
- upgrade
Mode String | UpgradeMode - A string property that indicates the upgrade mode to be performed on the Flink job. It can have one of the following enum values => STATELESS_UPDATE, UPDATE, LAST_STATE_UPDATE.
- args String
- A string property representing additional JVM arguments for the Flink job. It should be space separated value.
- entry
Class String - A string property that specifies the entry class for the Flink job. If not specified, the entry point is auto-detected from the flink job jar package.
- save
Point StringName - A string property that represents the name of the savepoint for the Flink job
- jar
Name string - A string property that represents the name of the job JAR.
- job
Jar stringDirectory - A string property that specifies the directory where the job JAR is located.
- upgrade
Mode string | UpgradeMode - A string property that indicates the upgrade mode to be performed on the Flink job. It can have one of the following enum values => STATELESS_UPDATE, UPDATE, LAST_STATE_UPDATE.
- args string
- A string property representing additional JVM arguments for the Flink job. It should be space separated value.
- entry
Class string - A string property that specifies the entry class for the Flink job. If not specified, the entry point is auto-detected from the flink job jar package.
- save
Point stringName - A string property that represents the name of the savepoint for the Flink job
- jar_
name str - A string property that represents the name of the job JAR.
- job_
jar_ strdirectory - A string property that specifies the directory where the job JAR is located.
- upgrade_
mode str | UpgradeMode - A string property that indicates the upgrade mode to be performed on the Flink job. It can have one of the following enum values => STATELESS_UPDATE, UPDATE, LAST_STATE_UPDATE.
- args str
- A string property representing additional JVM arguments for the Flink job. It should be space separated value.
- entry_
class str - A string property that specifies the entry class for the Flink job. If not specified, the entry point is auto-detected from the flink job jar package.
- save_
point_ strname - A string property that represents the name of the savepoint for the Flink job
- jar
Name String - A string property that represents the name of the job JAR.
- job
Jar StringDirectory - A string property that specifies the directory where the job JAR is located.
- upgrade
Mode String | "STATELESS_UPDATE" | "UPDATE" | "LAST_STATE_UPDATE" - A string property that indicates the upgrade mode to be performed on the Flink job. It can have one of the following enum values => STATELESS_UPDATE, UPDATE, LAST_STATE_UPDATE.
- args String
- A string property representing additional JVM arguments for the Flink job. It should be space separated value.
- entry
Class String - A string property that specifies the entry class for the Flink job. If not specified, the entry point is auto-detected from the flink job jar package.
- save
Point StringName - A string property that represents the name of the savepoint for the Flink job
FlinkJobProfileResponse, FlinkJobProfileResponseArgs
- Jar
Name string - A string property that represents the name of the job JAR.
- Job
Jar stringDirectory - A string property that specifies the directory where the job JAR is located.
- Upgrade
Mode string - A string property that indicates the upgrade mode to be performed on the Flink job. It can have one of the following enum values => STATELESS_UPDATE, UPDATE, LAST_STATE_UPDATE.
- Args string
- A string property representing additional JVM arguments for the Flink job. It should be space separated value.
- Entry
Class string - A string property that specifies the entry class for the Flink job. If not specified, the entry point is auto-detected from the flink job jar package.
- Save
Point stringName - A string property that represents the name of the savepoint for the Flink job
- Jar
Name string - A string property that represents the name of the job JAR.
- Job
Jar stringDirectory - A string property that specifies the directory where the job JAR is located.
- Upgrade
Mode string - A string property that indicates the upgrade mode to be performed on the Flink job. It can have one of the following enum values => STATELESS_UPDATE, UPDATE, LAST_STATE_UPDATE.
- Args string
- A string property representing additional JVM arguments for the Flink job. It should be space separated value.
- Entry
Class string - A string property that specifies the entry class for the Flink job. If not specified, the entry point is auto-detected from the flink job jar package.
- Save
Point stringName - A string property that represents the name of the savepoint for the Flink job
- jar
Name String - A string property that represents the name of the job JAR.
- job
Jar StringDirectory - A string property that specifies the directory where the job JAR is located.
- upgrade
Mode String - A string property that indicates the upgrade mode to be performed on the Flink job. It can have one of the following enum values => STATELESS_UPDATE, UPDATE, LAST_STATE_UPDATE.
- args String
- A string property representing additional JVM arguments for the Flink job. It should be space separated value.
- entry
Class String - A string property that specifies the entry class for the Flink job. If not specified, the entry point is auto-detected from the flink job jar package.
- save
Point StringName - A string property that represents the name of the savepoint for the Flink job
- jar
Name string - A string property that represents the name of the job JAR.
- job
Jar stringDirectory - A string property that specifies the directory where the job JAR is located.
- upgrade
Mode string - A string property that indicates the upgrade mode to be performed on the Flink job. It can have one of the following enum values => STATELESS_UPDATE, UPDATE, LAST_STATE_UPDATE.
- args string
- A string property representing additional JVM arguments for the Flink job. It should be space separated value.
- entry
Class string - A string property that specifies the entry class for the Flink job. If not specified, the entry point is auto-detected from the flink job jar package.
- save
Point stringName - A string property that represents the name of the savepoint for the Flink job
- jar_
name str - A string property that represents the name of the job JAR.
- job_
jar_ strdirectory - A string property that specifies the directory where the job JAR is located.
- upgrade_
mode str - A string property that indicates the upgrade mode to be performed on the Flink job. It can have one of the following enum values => STATELESS_UPDATE, UPDATE, LAST_STATE_UPDATE.
- args str
- A string property representing additional JVM arguments for the Flink job. It should be space separated value.
- entry_
class str - A string property that specifies the entry class for the Flink job. If not specified, the entry point is auto-detected from the flink job jar package.
- save_
point_ strname - A string property that represents the name of the savepoint for the Flink job
- jar
Name String - A string property that represents the name of the job JAR.
- job
Jar StringDirectory - A string property that specifies the directory where the job JAR is located.
- upgrade
Mode String - A string property that indicates the upgrade mode to be performed on the Flink job. It can have one of the following enum values => STATELESS_UPDATE, UPDATE, LAST_STATE_UPDATE.
- args String
- A string property representing additional JVM arguments for the Flink job. It should be space separated value.
- entry
Class String - A string property that specifies the entry class for the Flink job. If not specified, the entry point is auto-detected from the flink job jar package.
- save
Point StringName - A string property that represents the name of the savepoint for the Flink job
FlinkProfile, FlinkProfileArgs
- Job
Manager Pulumi.Azure Native. HDInsight. Inputs. Compute Resource Definition - Job Manager container/ process CPU and memory requirements
- Storage
Pulumi.
Azure Native. HDInsight. Inputs. Flink Storage Profile - The storage profile
- Task
Manager Pulumi.Azure Native. HDInsight. Inputs. Compute Resource Definition - Task Manager container/ process CPU and memory requirements
- Catalog
Options Pulumi.Azure Native. HDInsight. Inputs. Flink Catalog Options - Flink cluster catalog options.
- Deployment
Mode string | Pulumi.Azure Native. HDInsight. Deployment Mode - A string property that indicates the deployment mode of Flink cluster. It can have one of the following enum values => Application, Session. Default value is Session
- History
Server Pulumi.Azure Native. HDInsight. Inputs. Compute Resource Definition - History Server container/ process CPU and memory requirements
- Job
Spec Pulumi.Azure Native. HDInsight. Inputs. Flink Job Profile - Job specifications for flink clusters in application deployment mode. The specification is immutable even if job properties are changed by calling the RunJob API, please use the ListJob API to get the latest job information.
- Num
Replicas int - The number of task managers.
- Job
Manager ComputeResource Definition - Job Manager container/ process CPU and memory requirements
- Storage
Flink
Storage Profile - The storage profile
- Task
Manager ComputeResource Definition - Task Manager container/ process CPU and memory requirements
- Catalog
Options FlinkCatalog Options - Flink cluster catalog options.
- Deployment
Mode string | DeploymentMode - A string property that indicates the deployment mode of Flink cluster. It can have one of the following enum values => Application, Session. Default value is Session
- History
Server ComputeResource Definition - History Server container/ process CPU and memory requirements
- Job
Spec FlinkJob Profile - Job specifications for flink clusters in application deployment mode. The specification is immutable even if job properties are changed by calling the RunJob API, please use the ListJob API to get the latest job information.
- Num
Replicas int - The number of task managers.
- job
Manager ComputeResource Definition - Job Manager container/ process CPU and memory requirements
- storage
Flink
Storage Profile - The storage profile
- task
Manager ComputeResource Definition - Task Manager container/ process CPU and memory requirements
- catalog
Options FlinkCatalog Options - Flink cluster catalog options.
- deployment
Mode String | DeploymentMode - A string property that indicates the deployment mode of Flink cluster. It can have one of the following enum values => Application, Session. Default value is Session
- history
Server ComputeResource Definition - History Server container/ process CPU and memory requirements
- job
Spec FlinkJob Profile - Job specifications for flink clusters in application deployment mode. The specification is immutable even if job properties are changed by calling the RunJob API, please use the ListJob API to get the latest job information.
- num
Replicas Integer - The number of task managers.
- job
Manager ComputeResource Definition - Job Manager container/ process CPU and memory requirements
- storage
Flink
Storage Profile - The storage profile
- task
Manager ComputeResource Definition - Task Manager container/ process CPU and memory requirements
- catalog
Options FlinkCatalog Options - Flink cluster catalog options.
- deployment
Mode string | DeploymentMode - A string property that indicates the deployment mode of Flink cluster. It can have one of the following enum values => Application, Session. Default value is Session
- history
Server ComputeResource Definition - History Server container/ process CPU and memory requirements
- job
Spec FlinkJob Profile - Job specifications for flink clusters in application deployment mode. The specification is immutable even if job properties are changed by calling the RunJob API, please use the ListJob API to get the latest job information.
- num
Replicas number - The number of task managers.
- job_
manager ComputeResource Definition - Job Manager container/ process CPU and memory requirements
- storage
Flink
Storage Profile - The storage profile
- task_
manager ComputeResource Definition - Task Manager container/ process CPU and memory requirements
- catalog_
options FlinkCatalog Options - Flink cluster catalog options.
- deployment_
mode str | DeploymentMode - A string property that indicates the deployment mode of Flink cluster. It can have one of the following enum values => Application, Session. Default value is Session
- history_
server ComputeResource Definition - History Server container/ process CPU and memory requirements
- job_
spec FlinkJob Profile - Job specifications for flink clusters in application deployment mode. The specification is immutable even if job properties are changed by calling the RunJob API, please use the ListJob API to get the latest job information.
- num_
replicas int - The number of task managers.
- job
Manager Property Map - Job Manager container/ process CPU and memory requirements
- storage Property Map
- The storage profile
- task
Manager Property Map - Task Manager container/ process CPU and memory requirements
- catalog
Options Property Map - Flink cluster catalog options.
- deployment
Mode String | "Application" | "Session" - A string property that indicates the deployment mode of Flink cluster. It can have one of the following enum values => Application, Session. Default value is Session
- history
Server Property Map - History Server container/ process CPU and memory requirements
- job
Spec Property Map - Job specifications for flink clusters in application deployment mode. The specification is immutable even if job properties are changed by calling the RunJob API, please use the ListJob API to get the latest job information.
- num
Replicas Number - The number of task managers.
FlinkProfileResponse, FlinkProfileResponseArgs
- Job
Manager Pulumi.Azure Native. HDInsight. Inputs. Compute Resource Definition Response - Job Manager container/ process CPU and memory requirements
- Storage
Pulumi.
Azure Native. HDInsight. Inputs. Flink Storage Profile Response - The storage profile
- Task
Manager Pulumi.Azure Native. HDInsight. Inputs. Compute Resource Definition Response - Task Manager container/ process CPU and memory requirements
- Catalog
Options Pulumi.Azure Native. HDInsight. Inputs. Flink Catalog Options Response - Flink cluster catalog options.
- Deployment
Mode string - A string property that indicates the deployment mode of Flink cluster. It can have one of the following enum values => Application, Session. Default value is Session
- History
Server Pulumi.Azure Native. HDInsight. Inputs. Compute Resource Definition Response - History Server container/ process CPU and memory requirements
- Job
Spec Pulumi.Azure Native. HDInsight. Inputs. Flink Job Profile Response - Job specifications for flink clusters in application deployment mode. The specification is immutable even if job properties are changed by calling the RunJob API, please use the ListJob API to get the latest job information.
- Num
Replicas int - The number of task managers.
- Job
Manager ComputeResource Definition Response - Job Manager container/ process CPU and memory requirements
- Storage
Flink
Storage Profile Response - The storage profile
- Task
Manager ComputeResource Definition Response - Task Manager container/ process CPU and memory requirements
- Catalog
Options FlinkCatalog Options Response - Flink cluster catalog options.
- Deployment
Mode string - A string property that indicates the deployment mode of Flink cluster. It can have one of the following enum values => Application, Session. Default value is Session
- History
Server ComputeResource Definition Response - History Server container/ process CPU and memory requirements
- Job
Spec FlinkJob Profile Response - Job specifications for flink clusters in application deployment mode. The specification is immutable even if job properties are changed by calling the RunJob API, please use the ListJob API to get the latest job information.
- Num
Replicas int - The number of task managers.
- job
Manager ComputeResource Definition Response - Job Manager container/ process CPU and memory requirements
- storage
Flink
Storage Profile Response - The storage profile
- task
Manager ComputeResource Definition Response - Task Manager container/ process CPU and memory requirements
- catalog
Options FlinkCatalog Options Response - Flink cluster catalog options.
- deployment
Mode String - A string property that indicates the deployment mode of Flink cluster. It can have one of the following enum values => Application, Session. Default value is Session
- history
Server ComputeResource Definition Response - History Server container/ process CPU and memory requirements
- job
Spec FlinkJob Profile Response - Job specifications for flink clusters in application deployment mode. The specification is immutable even if job properties are changed by calling the RunJob API, please use the ListJob API to get the latest job information.
- num
Replicas Integer - The number of task managers.
- job
Manager ComputeResource Definition Response - Job Manager container/ process CPU and memory requirements
- storage
Flink
Storage Profile Response - The storage profile
- task
Manager ComputeResource Definition Response - Task Manager container/ process CPU and memory requirements
- catalog
Options FlinkCatalog Options Response - Flink cluster catalog options.
- deployment
Mode string - A string property that indicates the deployment mode of Flink cluster. It can have one of the following enum values => Application, Session. Default value is Session
- history
Server ComputeResource Definition Response - History Server container/ process CPU and memory requirements
- job
Spec FlinkJob Profile Response - Job specifications for flink clusters in application deployment mode. The specification is immutable even if job properties are changed by calling the RunJob API, please use the ListJob API to get the latest job information.
- num
Replicas number - The number of task managers.
- job_
manager ComputeResource Definition Response - Job Manager container/ process CPU and memory requirements
- storage
Flink
Storage Profile Response - The storage profile
- task_
manager ComputeResource Definition Response - Task Manager container/ process CPU and memory requirements
- catalog_
options FlinkCatalog Options Response - Flink cluster catalog options.
- deployment_
mode str - A string property that indicates the deployment mode of Flink cluster. It can have one of the following enum values => Application, Session. Default value is Session
- history_
server ComputeResource Definition Response - History Server container/ process CPU and memory requirements
- job_
spec FlinkJob Profile Response - Job specifications for flink clusters in application deployment mode. The specification is immutable even if job properties are changed by calling the RunJob API, please use the ListJob API to get the latest job information.
- num_
replicas int - The number of task managers.
- job
Manager Property Map - Job Manager container/ process CPU and memory requirements
- storage Property Map
- The storage profile
- task
Manager Property Map - Task Manager container/ process CPU and memory requirements
- catalog
Options Property Map - Flink cluster catalog options.
- deployment
Mode String - A string property that indicates the deployment mode of Flink cluster. It can have one of the following enum values => Application, Session. Default value is Session
- history
Server Property Map - History Server container/ process CPU and memory requirements
- job
Spec Property Map - Job specifications for flink clusters in application deployment mode. The specification is immutable even if job properties are changed by calling the RunJob API, please use the ListJob API to get the latest job information.
- num
Replicas Number - The number of task managers.
FlinkStorageProfile, FlinkStorageProfileArgs
- Storage
Uri string - Storage account uri which is used for savepoint and checkpoint state.
- Storagekey string
- Storage key is only required for wasb(s) storage.
- Storage
Uri string - Storage account uri which is used for savepoint and checkpoint state.
- Storagekey string
- Storage key is only required for wasb(s) storage.
- storage
Uri String - Storage account uri which is used for savepoint and checkpoint state.
- storagekey String
- Storage key is only required for wasb(s) storage.
- storage
Uri string - Storage account uri which is used for savepoint and checkpoint state.
- storagekey string
- Storage key is only required for wasb(s) storage.
- storage_
uri str - Storage account uri which is used for savepoint and checkpoint state.
- storagekey str
- Storage key is only required for wasb(s) storage.
- storage
Uri String - Storage account uri which is used for savepoint and checkpoint state.
- storagekey String
- Storage key is only required for wasb(s) storage.
FlinkStorageProfileResponse, FlinkStorageProfileResponseArgs
- Storage
Uri string - Storage account uri which is used for savepoint and checkpoint state.
- Storagekey string
- Storage key is only required for wasb(s) storage.
- Storage
Uri string - Storage account uri which is used for savepoint and checkpoint state.
- Storagekey string
- Storage key is only required for wasb(s) storage.
- storage
Uri String - Storage account uri which is used for savepoint and checkpoint state.
- storagekey String
- Storage key is only required for wasb(s) storage.
- storage
Uri string - Storage account uri which is used for savepoint and checkpoint state.
- storagekey string
- Storage key is only required for wasb(s) storage.
- storage_
uri str - Storage account uri which is used for savepoint and checkpoint state.
- storagekey str
- Storage key is only required for wasb(s) storage.
- storage
Uri String - Storage account uri which is used for savepoint and checkpoint state.
- storagekey String
- Storage key is only required for wasb(s) storage.
HiveCatalogOption, HiveCatalogOptionArgs
- Catalog
Name string - Name of trino catalog which should use specified hive metastore.
- Metastore
Db stringConnection URL - Connection string for hive metastore database.
- Metastore
Warehouse stringDir - Metastore root directory URI, format: abfs[s]://@<account_name>.dfs.core.windows.net/. More details: https://docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-introduction-abfs-uri
- Metastore
Db string | Pulumi.Connection Authentication Mode Azure Native. HDInsight. Metastore Db Connection Authentication Mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- Metastore
Db stringConnection Password Secret - Secret reference name from secretsProfile.secrets containing password for database connection.
- Metastore
Db stringConnection User Name - User name for database connection.
- Catalog
Name string - Name of trino catalog which should use specified hive metastore.
- Metastore
Db stringConnection URL - Connection string for hive metastore database.
- Metastore
Warehouse stringDir - Metastore root directory URI, format: abfs[s]://@<account_name>.dfs.core.windows.net/. More details: https://docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-introduction-abfs-uri
- Metastore
Db string | MetastoreConnection Authentication Mode Db Connection Authentication Mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- Metastore
Db stringConnection Password Secret - Secret reference name from secretsProfile.secrets containing password for database connection.
- Metastore
Db stringConnection User Name - User name for database connection.
- catalog
Name String - Name of trino catalog which should use specified hive metastore.
- metastore
Db StringConnection URL - Connection string for hive metastore database.
- metastore
Warehouse StringDir - Metastore root directory URI, format: abfs[s]://@<account_name>.dfs.core.windows.net/. More details: https://docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-introduction-abfs-uri
- metastore
Db String | MetastoreConnection Authentication Mode Db Connection Authentication Mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- metastore
Db StringConnection Password Secret - Secret reference name from secretsProfile.secrets containing password for database connection.
- metastore
Db StringConnection User Name - User name for database connection.
- catalog
Name string - Name of trino catalog which should use specified hive metastore.
- metastore
Db stringConnection URL - Connection string for hive metastore database.
- metastore
Warehouse stringDir - Metastore root directory URI, format: abfs[s]://@<account_name>.dfs.core.windows.net/. More details: https://docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-introduction-abfs-uri
- metastore
Db string | MetastoreConnection Authentication Mode Db Connection Authentication Mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- metastore
Db stringConnection Password Secret - Secret reference name from secretsProfile.secrets containing password for database connection.
- metastore
Db stringConnection User Name - User name for database connection.
- catalog_
name str - Name of trino catalog which should use specified hive metastore.
- metastore_
db_ strconnection_ url - Connection string for hive metastore database.
- metastore_
warehouse_ strdir - Metastore root directory URI, format: abfs[s]://@<account_name>.dfs.core.windows.net/. More details: https://docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-introduction-abfs-uri
- metastore_
db_ str | Metastoreconnection_ authentication_ mode Db Connection Authentication Mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- metastore_
db_ strconnection_ password_ secret - Secret reference name from secretsProfile.secrets containing password for database connection.
- metastore_
db_ strconnection_ user_ name - User name for database connection.
- catalog
Name String - Name of trino catalog which should use specified hive metastore.
- metastore
Db StringConnection URL - Connection string for hive metastore database.
- metastore
Warehouse StringDir - Metastore root directory URI, format: abfs[s]://@<account_name>.dfs.core.windows.net/. More details: https://docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-introduction-abfs-uri
- metastore
Db String | "SqlConnection Authentication Mode Auth" | "Identity Auth" - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- metastore
Db StringConnection Password Secret - Secret reference name from secretsProfile.secrets containing password for database connection.
- metastore
Db StringConnection User Name - User name for database connection.
HiveCatalogOptionResponse, HiveCatalogOptionResponseArgs
- Catalog
Name string - Name of trino catalog which should use specified hive metastore.
- Metastore
Db stringConnection URL - Connection string for hive metastore database.
- Metastore
Warehouse stringDir - Metastore root directory URI, format: abfs[s]://@<account_name>.dfs.core.windows.net/. More details: https://docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-introduction-abfs-uri
- Metastore
Db stringConnection Authentication Mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- Metastore
Db stringConnection Password Secret - Secret reference name from secretsProfile.secrets containing password for database connection.
- Metastore
Db stringConnection User Name - User name for database connection.
- Catalog
Name string - Name of trino catalog which should use specified hive metastore.
- Metastore
Db stringConnection URL - Connection string for hive metastore database.
- Metastore
Warehouse stringDir - Metastore root directory URI, format: abfs[s]://@<account_name>.dfs.core.windows.net/. More details: https://docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-introduction-abfs-uri
- Metastore
Db stringConnection Authentication Mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- Metastore
Db stringConnection Password Secret - Secret reference name from secretsProfile.secrets containing password for database connection.
- Metastore
Db stringConnection User Name - User name for database connection.
- catalog
Name String - Name of trino catalog which should use specified hive metastore.
- metastore
Db StringConnection URL - Connection string for hive metastore database.
- metastore
Warehouse StringDir - Metastore root directory URI, format: abfs[s]://@<account_name>.dfs.core.windows.net/. More details: https://docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-introduction-abfs-uri
- metastore
Db StringConnection Authentication Mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- metastore
Db StringConnection Password Secret - Secret reference name from secretsProfile.secrets containing password for database connection.
- metastore
Db StringConnection User Name - User name for database connection.
- catalog
Name string - Name of trino catalog which should use specified hive metastore.
- metastore
Db stringConnection URL - Connection string for hive metastore database.
- metastore
Warehouse stringDir - Metastore root directory URI, format: abfs[s]://@<account_name>.dfs.core.windows.net/. More details: https://docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-introduction-abfs-uri
- metastore
Db stringConnection Authentication Mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- metastore
Db stringConnection Password Secret - Secret reference name from secretsProfile.secrets containing password for database connection.
- metastore
Db stringConnection User Name - User name for database connection.
- catalog_
name str - Name of trino catalog which should use specified hive metastore.
- metastore_
db_ strconnection_ url - Connection string for hive metastore database.
- metastore_
warehouse_ strdir - Metastore root directory URI, format: abfs[s]://@<account_name>.dfs.core.windows.net/. More details: https://docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-introduction-abfs-uri
- metastore_
db_ strconnection_ authentication_ mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- metastore_
db_ strconnection_ password_ secret - Secret reference name from secretsProfile.secrets containing password for database connection.
- metastore_
db_ strconnection_ user_ name - User name for database connection.
- catalog
Name String - Name of trino catalog which should use specified hive metastore.
- metastore
Db StringConnection URL - Connection string for hive metastore database.
- metastore
Warehouse StringDir - Metastore root directory URI, format: abfs[s]://@<account_name>.dfs.core.windows.net/. More details: https://docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-introduction-abfs-uri
- metastore
Db StringConnection Authentication Mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- metastore
Db StringConnection Password Secret - Secret reference name from secretsProfile.secrets containing password for database connection.
- metastore
Db StringConnection User Name - User name for database connection.
IdentityProfile, IdentityProfileArgs
- Msi
Client stringId - ClientId of the MSI.
- Msi
Object stringId - ObjectId of the MSI.
- Msi
Resource stringId - ResourceId of the MSI.
- Msi
Client stringId - ClientId of the MSI.
- Msi
Object stringId - ObjectId of the MSI.
- Msi
Resource stringId - ResourceId of the MSI.
- msi
Client StringId - ClientId of the MSI.
- msi
Object StringId - ObjectId of the MSI.
- msi
Resource StringId - ResourceId of the MSI.
- msi
Client stringId - ClientId of the MSI.
- msi
Object stringId - ObjectId of the MSI.
- msi
Resource stringId - ResourceId of the MSI.
- msi_
client_ strid - ClientId of the MSI.
- msi_
object_ strid - ObjectId of the MSI.
- msi_
resource_ strid - ResourceId of the MSI.
- msi
Client StringId - ClientId of the MSI.
- msi
Object StringId - ObjectId of the MSI.
- msi
Resource StringId - ResourceId of the MSI.
IdentityProfileResponse, IdentityProfileResponseArgs
- Msi
Client stringId - ClientId of the MSI.
- Msi
Object stringId - ObjectId of the MSI.
- Msi
Resource stringId - ResourceId of the MSI.
- Msi
Client stringId - ClientId of the MSI.
- Msi
Object stringId - ObjectId of the MSI.
- Msi
Resource stringId - ResourceId of the MSI.
- msi
Client StringId - ClientId of the MSI.
- msi
Object StringId - ObjectId of the MSI.
- msi
Resource StringId - ResourceId of the MSI.
- msi
Client stringId - ClientId of the MSI.
- msi
Object stringId - ObjectId of the MSI.
- msi
Resource stringId - ResourceId of the MSI.
- msi_
client_ strid - ClientId of the MSI.
- msi_
object_ strid - ObjectId of the MSI.
- msi_
resource_ strid - ResourceId of the MSI.
- msi
Client StringId - ClientId of the MSI.
- msi
Object StringId - ObjectId of the MSI.
- msi
Resource StringId - ResourceId of the MSI.
KafkaConnectivityEndpointsResponse, KafkaConnectivityEndpointsResponseArgs
- Bootstrap
Server stringEndpoint - bootstrap server connectivity endpoint.
- Broker
Endpoints List<string> - Kafka broker endpoint list.
- Bootstrap
Server stringEndpoint - bootstrap server connectivity endpoint.
- Broker
Endpoints []string - Kafka broker endpoint list.
- bootstrap
Server StringEndpoint - bootstrap server connectivity endpoint.
- broker
Endpoints List<String> - Kafka broker endpoint list.
- bootstrap
Server stringEndpoint - bootstrap server connectivity endpoint.
- broker
Endpoints string[] - Kafka broker endpoint list.
- bootstrap_
server_ strendpoint - bootstrap server connectivity endpoint.
- broker_
endpoints Sequence[str] - Kafka broker endpoint list.
- bootstrap
Server StringEndpoint - bootstrap server connectivity endpoint.
- broker
Endpoints List<String> - Kafka broker endpoint list.
KafkaProfile, KafkaProfileArgs
- Disk
Storage Pulumi.Azure Native. HDInsight. Inputs. Disk Storage Profile - Kafka disk storage profile.
- Enable
KRaft bool - Expose Kafka cluster in KRaft mode.
- Enable
Public boolEndpoints - Expose worker nodes as public endpoints.
- Remote
Storage stringUri - Fully qualified path of Azure Storage container used for Tiered Storage.
- Disk
Storage DiskStorage Profile - Kafka disk storage profile.
- Enable
KRaft bool - Expose Kafka cluster in KRaft mode.
- Enable
Public boolEndpoints - Expose worker nodes as public endpoints.
- Remote
Storage stringUri - Fully qualified path of Azure Storage container used for Tiered Storage.
- disk
Storage DiskStorage Profile - Kafka disk storage profile.
- enable
KRaft Boolean - Expose Kafka cluster in KRaft mode.
- enable
Public BooleanEndpoints - Expose worker nodes as public endpoints.
- remote
Storage StringUri - Fully qualified path of Azure Storage container used for Tiered Storage.
- disk
Storage DiskStorage Profile - Kafka disk storage profile.
- enable
KRaft boolean - Expose Kafka cluster in KRaft mode.
- enable
Public booleanEndpoints - Expose worker nodes as public endpoints.
- remote
Storage stringUri - Fully qualified path of Azure Storage container used for Tiered Storage.
- disk_
storage DiskStorage Profile - Kafka disk storage profile.
- enable_
k_ boolraft - Expose Kafka cluster in KRaft mode.
- enable_
public_ boolendpoints - Expose worker nodes as public endpoints.
- remote_
storage_ struri - Fully qualified path of Azure Storage container used for Tiered Storage.
- disk
Storage Property Map - Kafka disk storage profile.
- enable
KRaft Boolean - Expose Kafka cluster in KRaft mode.
- enable
Public BooleanEndpoints - Expose worker nodes as public endpoints.
- remote
Storage StringUri - Fully qualified path of Azure Storage container used for Tiered Storage.
KafkaProfileResponse, KafkaProfileResponseArgs
- Connectivity
Endpoints Pulumi.Azure Native. HDInsight. Inputs. Kafka Connectivity Endpoints Response - Kafka bootstrap server and brokers related connectivity endpoints.
- Disk
Storage Pulumi.Azure Native. HDInsight. Inputs. Disk Storage Profile Response - Kafka disk storage profile.
- Enable
KRaft bool - Expose Kafka cluster in KRaft mode.
- Enable
Public boolEndpoints - Expose worker nodes as public endpoints.
- Remote
Storage stringUri - Fully qualified path of Azure Storage container used for Tiered Storage.
- Connectivity
Endpoints KafkaConnectivity Endpoints Response - Kafka bootstrap server and brokers related connectivity endpoints.
- Disk
Storage DiskStorage Profile Response - Kafka disk storage profile.
- Enable
KRaft bool - Expose Kafka cluster in KRaft mode.
- Enable
Public boolEndpoints - Expose worker nodes as public endpoints.
- Remote
Storage stringUri - Fully qualified path of Azure Storage container used for Tiered Storage.
- connectivity
Endpoints KafkaConnectivity Endpoints Response - Kafka bootstrap server and brokers related connectivity endpoints.
- disk
Storage DiskStorage Profile Response - Kafka disk storage profile.
- enable
KRaft Boolean - Expose Kafka cluster in KRaft mode.
- enable
Public BooleanEndpoints - Expose worker nodes as public endpoints.
- remote
Storage StringUri - Fully qualified path of Azure Storage container used for Tiered Storage.
- connectivity
Endpoints KafkaConnectivity Endpoints Response - Kafka bootstrap server and brokers related connectivity endpoints.
- disk
Storage DiskStorage Profile Response - Kafka disk storage profile.
- enable
KRaft boolean - Expose Kafka cluster in KRaft mode.
- enable
Public booleanEndpoints - Expose worker nodes as public endpoints.
- remote
Storage stringUri - Fully qualified path of Azure Storage container used for Tiered Storage.
- connectivity_
endpoints KafkaConnectivity Endpoints Response - Kafka bootstrap server and brokers related connectivity endpoints.
- disk_
storage DiskStorage Profile Response - Kafka disk storage profile.
- enable_
k_ boolraft - Expose Kafka cluster in KRaft mode.
- enable_
public_ boolendpoints - Expose worker nodes as public endpoints.
- remote_
storage_ struri - Fully qualified path of Azure Storage container used for Tiered Storage.
- connectivity
Endpoints Property Map - Kafka bootstrap server and brokers related connectivity endpoints.
- disk
Storage Property Map - Kafka disk storage profile.
- enable
KRaft Boolean - Expose Kafka cluster in KRaft mode.
- enable
Public BooleanEndpoints - Expose worker nodes as public endpoints.
- remote
Storage StringUri - Fully qualified path of Azure Storage container used for Tiered Storage.
KeyVaultObjectType, KeyVaultObjectTypeArgs
- Key
- Key
- Secret
- Secret
- Certificate
- Certificate
- Key
Vault Object Type Key - Key
- Key
Vault Object Type Secret - Secret
- Key
Vault Object Type Certificate - Certificate
- Key
- Key
- Secret
- Secret
- Certificate
- Certificate
- Key
- Key
- Secret
- Secret
- Certificate
- Certificate
- KEY
- Key
- SECRET
- Secret
- CERTIFICATE
- Certificate
- "Key"
- Key
- "Secret"
- Secret
- "Certificate"
- Certificate
LoadBasedConfig, LoadBasedConfigArgs
- Max
Nodes int - User needs to set the maximum number of nodes for load based scaling, the load based scaling will use this to scale up and scale down between minimum and maximum number of nodes.
- Min
Nodes int - User needs to set the minimum number of nodes for load based scaling, the load based scaling will use this to scale up and scale down between minimum and maximum number of nodes.
- Scaling
Rules List<Pulumi.Azure Native. HDInsight. Inputs. Scaling Rule> - The scaling rules.
- Cooldown
Period int - This is a cool down period, this is a time period in seconds, which determines the amount of time that must elapse between a scaling activity started by a rule and the start of the next scaling activity, regardless of the rule that triggers it. The default value is 300 seconds.
- Poll
Interval int - User can specify the poll interval, this is the time period (in seconds) after which scaling metrics are polled for triggering a scaling operation.
- Max
Nodes int - User needs to set the maximum number of nodes for load based scaling, the load based scaling will use this to scale up and scale down between minimum and maximum number of nodes.
- Min
Nodes int - User needs to set the minimum number of nodes for load based scaling, the load based scaling will use this to scale up and scale down between minimum and maximum number of nodes.
- Scaling
Rules []ScalingRule - The scaling rules.
- Cooldown
Period int - This is a cool down period, this is a time period in seconds, which determines the amount of time that must elapse between a scaling activity started by a rule and the start of the next scaling activity, regardless of the rule that triggers it. The default value is 300 seconds.
- Poll
Interval int - User can specify the poll interval, this is the time period (in seconds) after which scaling metrics are polled for triggering a scaling operation.
- max
Nodes Integer - User needs to set the maximum number of nodes for load based scaling, the load based scaling will use this to scale up and scale down between minimum and maximum number of nodes.
- min
Nodes Integer - User needs to set the minimum number of nodes for load based scaling, the load based scaling will use this to scale up and scale down between minimum and maximum number of nodes.
- scaling
Rules List<ScalingRule> - The scaling rules.
- cooldown
Period Integer - This is a cool down period, this is a time period in seconds, which determines the amount of time that must elapse between a scaling activity started by a rule and the start of the next scaling activity, regardless of the rule that triggers it. The default value is 300 seconds.
- poll
Interval Integer - User can specify the poll interval, this is the time period (in seconds) after which scaling metrics are polled for triggering a scaling operation.
- max
Nodes number - User needs to set the maximum number of nodes for load based scaling, the load based scaling will use this to scale up and scale down between minimum and maximum number of nodes.
- min
Nodes number - User needs to set the minimum number of nodes for load based scaling, the load based scaling will use this to scale up and scale down between minimum and maximum number of nodes.
- scaling
Rules ScalingRule[] - The scaling rules.
- cooldown
Period number - This is a cool down period, this is a time period in seconds, which determines the amount of time that must elapse between a scaling activity started by a rule and the start of the next scaling activity, regardless of the rule that triggers it. The default value is 300 seconds.
- poll
Interval number - User can specify the poll interval, this is the time period (in seconds) after which scaling metrics are polled for triggering a scaling operation.
- max_
nodes int - User needs to set the maximum number of nodes for load based scaling, the load based scaling will use this to scale up and scale down between minimum and maximum number of nodes.
- min_
nodes int - User needs to set the minimum number of nodes for load based scaling, the load based scaling will use this to scale up and scale down between minimum and maximum number of nodes.
- scaling_
rules Sequence[ScalingRule] - The scaling rules.
- cooldown_
period int - This is a cool down period, this is a time period in seconds, which determines the amount of time that must elapse between a scaling activity started by a rule and the start of the next scaling activity, regardless of the rule that triggers it. The default value is 300 seconds.
- poll_
interval int - User can specify the poll interval, this is the time period (in seconds) after which scaling metrics are polled for triggering a scaling operation.
- max
Nodes Number - User needs to set the maximum number of nodes for load based scaling, the load based scaling will use this to scale up and scale down between minimum and maximum number of nodes.
- min
Nodes Number - User needs to set the minimum number of nodes for load based scaling, the load based scaling will use this to scale up and scale down between minimum and maximum number of nodes.
- scaling
Rules List<Property Map> - The scaling rules.
- cooldown
Period Number - This is a cool down period, this is a time period in seconds, which determines the amount of time that must elapse between a scaling activity started by a rule and the start of the next scaling activity, regardless of the rule that triggers it. The default value is 300 seconds.
- poll
Interval Number - User can specify the poll interval, this is the time period (in seconds) after which scaling metrics are polled for triggering a scaling operation.
LoadBasedConfigResponse, LoadBasedConfigResponseArgs
- Max
Nodes int - User needs to set the maximum number of nodes for load based scaling, the load based scaling will use this to scale up and scale down between minimum and maximum number of nodes.
- Min
Nodes int - User needs to set the minimum number of nodes for load based scaling, the load based scaling will use this to scale up and scale down between minimum and maximum number of nodes.
- Scaling
Rules List<Pulumi.Azure Native. HDInsight. Inputs. Scaling Rule Response> - The scaling rules.
- Cooldown
Period int - This is a cool down period, this is a time period in seconds, which determines the amount of time that must elapse between a scaling activity started by a rule and the start of the next scaling activity, regardless of the rule that triggers it. The default value is 300 seconds.
- Poll
Interval int - User can specify the poll interval, this is the time period (in seconds) after which scaling metrics are polled for triggering a scaling operation.
- Max
Nodes int - User needs to set the maximum number of nodes for load based scaling, the load based scaling will use this to scale up and scale down between minimum and maximum number of nodes.
- Min
Nodes int - User needs to set the minimum number of nodes for load based scaling, the load based scaling will use this to scale up and scale down between minimum and maximum number of nodes.
- Scaling
Rules []ScalingRule Response - The scaling rules.
- Cooldown
Period int - This is a cool down period, this is a time period in seconds, which determines the amount of time that must elapse between a scaling activity started by a rule and the start of the next scaling activity, regardless of the rule that triggers it. The default value is 300 seconds.
- Poll
Interval int - User can specify the poll interval, this is the time period (in seconds) after which scaling metrics are polled for triggering a scaling operation.
- max
Nodes Integer - User needs to set the maximum number of nodes for load based scaling, the load based scaling will use this to scale up and scale down between minimum and maximum number of nodes.
- min
Nodes Integer - User needs to set the minimum number of nodes for load based scaling, the load based scaling will use this to scale up and scale down between minimum and maximum number of nodes.
- scaling
Rules List<ScalingRule Response> - The scaling rules.
- cooldown
Period Integer - This is a cool down period, this is a time period in seconds, which determines the amount of time that must elapse between a scaling activity started by a rule and the start of the next scaling activity, regardless of the rule that triggers it. The default value is 300 seconds.
- poll
Interval Integer - User can specify the poll interval, this is the time period (in seconds) after which scaling metrics are polled for triggering a scaling operation.
- max
Nodes number - User needs to set the maximum number of nodes for load based scaling, the load based scaling will use this to scale up and scale down between minimum and maximum number of nodes.
- min
Nodes number - User needs to set the minimum number of nodes for load based scaling, the load based scaling will use this to scale up and scale down between minimum and maximum number of nodes.
- scaling
Rules ScalingRule Response[] - The scaling rules.
- cooldown
Period number - This is a cool down period, this is a time period in seconds, which determines the amount of time that must elapse between a scaling activity started by a rule and the start of the next scaling activity, regardless of the rule that triggers it. The default value is 300 seconds.
- poll
Interval number - User can specify the poll interval, this is the time period (in seconds) after which scaling metrics are polled for triggering a scaling operation.
- max_
nodes int - User needs to set the maximum number of nodes for load based scaling, the load based scaling will use this to scale up and scale down between minimum and maximum number of nodes.
- min_
nodes int - User needs to set the minimum number of nodes for load based scaling, the load based scaling will use this to scale up and scale down between minimum and maximum number of nodes.
- scaling_
rules Sequence[ScalingRule Response] - The scaling rules.
- cooldown_
period int - This is a cool down period, this is a time period in seconds, which determines the amount of time that must elapse between a scaling activity started by a rule and the start of the next scaling activity, regardless of the rule that triggers it. The default value is 300 seconds.
- poll_
interval int - User can specify the poll interval, this is the time period (in seconds) after which scaling metrics are polled for triggering a scaling operation.
- max
Nodes Number - User needs to set the maximum number of nodes for load based scaling, the load based scaling will use this to scale up and scale down between minimum and maximum number of nodes.
- min
Nodes Number - User needs to set the minimum number of nodes for load based scaling, the load based scaling will use this to scale up and scale down between minimum and maximum number of nodes.
- scaling
Rules List<Property Map> - The scaling rules.
- cooldown
Period Number - This is a cool down period, this is a time period in seconds, which determines the amount of time that must elapse between a scaling activity started by a rule and the start of the next scaling activity, regardless of the rule that triggers it. The default value is 300 seconds.
- poll
Interval Number - User can specify the poll interval, this is the time period (in seconds) after which scaling metrics are polled for triggering a scaling operation.
ManagedIdentityProfile, ManagedIdentityProfileArgs
- Identity
List List<Pulumi.Azure Native. HDInsight. Inputs. Managed Identity Spec> - The list of managed identity.
- Identity
List []ManagedIdentity Spec - The list of managed identity.
- identity
List List<ManagedIdentity Spec> - The list of managed identity.
- identity
List ManagedIdentity Spec[] - The list of managed identity.
- identity_
list Sequence[ManagedIdentity Spec] - The list of managed identity.
- identity
List List<Property Map> - The list of managed identity.
ManagedIdentityProfileResponse, ManagedIdentityProfileResponseArgs
- Identity
List List<Pulumi.Azure Native. HDInsight. Inputs. Managed Identity Spec Response> - The list of managed identity.
- Identity
List []ManagedIdentity Spec Response - The list of managed identity.
- identity
List List<ManagedIdentity Spec Response> - The list of managed identity.
- identity
List ManagedIdentity Spec Response[] - The list of managed identity.
- identity_
list Sequence[ManagedIdentity Spec Response] - The list of managed identity.
- identity
List List<Property Map> - The list of managed identity.
ManagedIdentitySpec, ManagedIdentitySpecArgs
- Client
Id string - ClientId of the managed identity.
- Object
Id string - ObjectId of the managed identity.
- Resource
Id string - ResourceId of the managed identity.
- Type
string | Pulumi.
Azure Native. HDInsight. Managed Identity Type - The type of managed identity.
- Client
Id string - ClientId of the managed identity.
- Object
Id string - ObjectId of the managed identity.
- Resource
Id string - ResourceId of the managed identity.
- Type
string | Managed
Identity Type - The type of managed identity.
- client
Id String - ClientId of the managed identity.
- object
Id String - ObjectId of the managed identity.
- resource
Id String - ResourceId of the managed identity.
- type
String | Managed
Identity Type - The type of managed identity.
- client
Id string - ClientId of the managed identity.
- object
Id string - ObjectId of the managed identity.
- resource
Id string - ResourceId of the managed identity.
- type
string | Managed
Identity Type - The type of managed identity.
- client_
id str - ClientId of the managed identity.
- object_
id str - ObjectId of the managed identity.
- resource_
id str - ResourceId of the managed identity.
- type
str | Managed
Identity Type - The type of managed identity.
- client
Id String - ClientId of the managed identity.
- object
Id String - ObjectId of the managed identity.
- resource
Id String - ResourceId of the managed identity.
- type String | "cluster" | "user" | "internal"
- The type of managed identity.
ManagedIdentitySpecResponse, ManagedIdentitySpecResponseArgs
- Client
Id string - ClientId of the managed identity.
- Object
Id string - ObjectId of the managed identity.
- Resource
Id string - ResourceId of the managed identity.
- Type string
- The type of managed identity.
- Client
Id string - ClientId of the managed identity.
- Object
Id string - ObjectId of the managed identity.
- Resource
Id string - ResourceId of the managed identity.
- Type string
- The type of managed identity.
- client
Id String - ClientId of the managed identity.
- object
Id String - ObjectId of the managed identity.
- resource
Id String - ResourceId of the managed identity.
- type String
- The type of managed identity.
- client
Id string - ClientId of the managed identity.
- object
Id string - ObjectId of the managed identity.
- resource
Id string - ResourceId of the managed identity.
- type string
- The type of managed identity.
- client_
id str - ClientId of the managed identity.
- object_
id str - ObjectId of the managed identity.
- resource_
id str - ResourceId of the managed identity.
- type str
- The type of managed identity.
- client
Id String - ClientId of the managed identity.
- object
Id String - ObjectId of the managed identity.
- resource
Id String - ResourceId of the managed identity.
- type String
- The type of managed identity.
ManagedIdentityType, ManagedIdentityTypeArgs
- Cluster
- cluster
- User
- user
- @Internal
- internal
- Managed
Identity Type Cluster - cluster
- Managed
Identity Type User - user
- Managed
Identity Type Internal - internal
- Cluster
- cluster
- User
- user
- Internal
- internal
- Cluster
- cluster
- User
- user
- Internal
- internal
- CLUSTER
- cluster
- USER
- user
- INTERNAL
- internal
- "cluster"
- cluster
- "user"
- user
- "internal"
- internal
MetastoreDbConnectionAuthenticationMode, MetastoreDbConnectionAuthenticationModeArgs
- Sql
Auth - SqlAuthThe password-based authentication to connect to your Hive metastore database.
- Identity
Auth - IdentityAuthThe managed-identity-based authentication to connect to your Hive metastore database.
- Metastore
Db Connection Authentication Mode Sql Auth - SqlAuthThe password-based authentication to connect to your Hive metastore database.
- Metastore
Db Connection Authentication Mode Identity Auth - IdentityAuthThe managed-identity-based authentication to connect to your Hive metastore database.
- Sql
Auth - SqlAuthThe password-based authentication to connect to your Hive metastore database.
- Identity
Auth - IdentityAuthThe managed-identity-based authentication to connect to your Hive metastore database.
- Sql
Auth - SqlAuthThe password-based authentication to connect to your Hive metastore database.
- Identity
Auth - IdentityAuthThe managed-identity-based authentication to connect to your Hive metastore database.
- SQL_AUTH
- SqlAuthThe password-based authentication to connect to your Hive metastore database.
- IDENTITY_AUTH
- IdentityAuthThe managed-identity-based authentication to connect to your Hive metastore database.
- "Sql
Auth" - SqlAuthThe password-based authentication to connect to your Hive metastore database.
- "Identity
Auth" - IdentityAuthThe managed-identity-based authentication to connect to your Hive metastore database.
NodeProfile, NodeProfileArgs
NodeProfileResponse, NodeProfileResponseArgs
RangerAdminSpec, RangerAdminSpecArgs
- Admins List<string>
- List of usernames that should be marked as ranger admins. These usernames should match the user principal name (UPN) of the respective AAD users.
- Database
Pulumi.
Azure Native. HDInsight. Inputs. Ranger Admin Spec Database
- Admins []string
- List of usernames that should be marked as ranger admins. These usernames should match the user principal name (UPN) of the respective AAD users.
- Database
Ranger
Admin Spec Database
- admins List<String>
- List of usernames that should be marked as ranger admins. These usernames should match the user principal name (UPN) of the respective AAD users.
- database
Ranger
Admin Spec Database
- admins string[]
- List of usernames that should be marked as ranger admins. These usernames should match the user principal name (UPN) of the respective AAD users.
- database
Ranger
Admin Spec Database
- admins Sequence[str]
- List of usernames that should be marked as ranger admins. These usernames should match the user principal name (UPN) of the respective AAD users.
- database
Ranger
Admin Spec Database
- admins List<String>
- List of usernames that should be marked as ranger admins. These usernames should match the user principal name (UPN) of the respective AAD users.
- database Property Map
RangerAdminSpecDatabase, RangerAdminSpecDatabaseArgs
- Host string
- The database URL
- Name string
- The database name
- Password
Secret stringRef - Reference for the database password
- Username string
- The name of the database user
- Host string
- The database URL
- Name string
- The database name
- Password
Secret stringRef - Reference for the database password
- Username string
- The name of the database user
- host String
- The database URL
- name String
- The database name
- password
Secret StringRef - Reference for the database password
- username String
- The name of the database user
- host string
- The database URL
- name string
- The database name
- password
Secret stringRef - Reference for the database password
- username string
- The name of the database user
- host str
- The database URL
- name str
- The database name
- password_
secret_ strref - Reference for the database password
- username str
- The name of the database user
- host String
- The database URL
- name String
- The database name
- password
Secret StringRef - Reference for the database password
- username String
- The name of the database user
RangerAdminSpecResponse, RangerAdminSpecResponseArgs
- Admins List<string>
- List of usernames that should be marked as ranger admins. These usernames should match the user principal name (UPN) of the respective AAD users.
- Database
Pulumi.
Azure Native. HDInsight. Inputs. Ranger Admin Spec Response Database
- Admins []string
- List of usernames that should be marked as ranger admins. These usernames should match the user principal name (UPN) of the respective AAD users.
- Database
Ranger
Admin Spec Response Database
- admins List<String>
- List of usernames that should be marked as ranger admins. These usernames should match the user principal name (UPN) of the respective AAD users.
- database
Ranger
Admin Spec Response Database
- admins string[]
- List of usernames that should be marked as ranger admins. These usernames should match the user principal name (UPN) of the respective AAD users.
- database
Ranger
Admin Spec Response Database
- admins Sequence[str]
- List of usernames that should be marked as ranger admins. These usernames should match the user principal name (UPN) of the respective AAD users.
- database
Ranger
Admin Spec Response Database
- admins List<String>
- List of usernames that should be marked as ranger admins. These usernames should match the user principal name (UPN) of the respective AAD users.
- database Property Map
RangerAdminSpecResponseDatabase, RangerAdminSpecResponseDatabaseArgs
- Host string
- The database URL
- Name string
- The database name
- Password
Secret stringRef - Reference for the database password
- Username string
- The name of the database user
- Host string
- The database URL
- Name string
- The database name
- Password
Secret stringRef - Reference for the database password
- Username string
- The name of the database user
- host String
- The database URL
- name String
- The database name
- password
Secret StringRef - Reference for the database password
- username String
- The name of the database user
- host string
- The database URL
- name string
- The database name
- password
Secret stringRef - Reference for the database password
- username string
- The name of the database user
- host str
- The database URL
- name str
- The database name
- password_
secret_ strref - Reference for the database password
- username str
- The name of the database user
- host String
- The database URL
- name String
- The database name
- password
Secret StringRef - Reference for the database password
- username String
- The name of the database user
RangerAuditSpec, RangerAuditSpecArgs
- Storage
Account string - Azure storage location of the blobs. MSI should have read/write access to this Storage account.
- Storage
Account string - Azure storage location of the blobs. MSI should have read/write access to this Storage account.
- storage
Account String - Azure storage location of the blobs. MSI should have read/write access to this Storage account.
- storage
Account string - Azure storage location of the blobs. MSI should have read/write access to this Storage account.
- storage_
account str - Azure storage location of the blobs. MSI should have read/write access to this Storage account.
- storage
Account String - Azure storage location of the blobs. MSI should have read/write access to this Storage account.
RangerAuditSpecResponse, RangerAuditSpecResponseArgs
- Storage
Account string - Azure storage location of the blobs. MSI should have read/write access to this Storage account.
- Storage
Account string - Azure storage location of the blobs. MSI should have read/write access to this Storage account.
- storage
Account String - Azure storage location of the blobs. MSI should have read/write access to this Storage account.
- storage
Account string - Azure storage location of the blobs. MSI should have read/write access to this Storage account.
- storage_
account str - Azure storage location of the blobs. MSI should have read/write access to this Storage account.
- storage
Account String - Azure storage location of the blobs. MSI should have read/write access to this Storage account.
RangerProfile, RangerProfileArgs
- Ranger
Admin Pulumi.Azure Native. HDInsight. Inputs. Ranger Admin Spec - Specification for the Ranger Admin service.
- Ranger
Usersync Pulumi.Azure Native. HDInsight. Inputs. Ranger Usersync Spec - Specification for the Ranger Usersync service
- Ranger
Audit Pulumi.Azure Native. HDInsight. Inputs. Ranger Audit Spec - Properties required to describe audit log storage.
- Ranger
Admin RangerAdmin Spec - Specification for the Ranger Admin service.
- Ranger
Usersync RangerUsersync Spec - Specification for the Ranger Usersync service
- Ranger
Audit RangerAudit Spec - Properties required to describe audit log storage.
- ranger
Admin RangerAdmin Spec - Specification for the Ranger Admin service.
- ranger
Usersync RangerUsersync Spec - Specification for the Ranger Usersync service
- ranger
Audit RangerAudit Spec - Properties required to describe audit log storage.
- ranger
Admin RangerAdmin Spec - Specification for the Ranger Admin service.
- ranger
Usersync RangerUsersync Spec - Specification for the Ranger Usersync service
- ranger
Audit RangerAudit Spec - Properties required to describe audit log storage.
- ranger_
admin RangerAdmin Spec - Specification for the Ranger Admin service.
- ranger_
usersync RangerUsersync Spec - Specification for the Ranger Usersync service
- ranger_
audit RangerAudit Spec - Properties required to describe audit log storage.
- ranger
Admin Property Map - Specification for the Ranger Admin service.
- ranger
Usersync Property Map - Specification for the Ranger Usersync service
- ranger
Audit Property Map - Properties required to describe audit log storage.
RangerProfileResponse, RangerProfileResponseArgs
- Ranger
Admin Pulumi.Azure Native. HDInsight. Inputs. Ranger Admin Spec Response - Specification for the Ranger Admin service.
- Ranger
Usersync Pulumi.Azure Native. HDInsight. Inputs. Ranger Usersync Spec Response - Specification for the Ranger Usersync service
- Ranger
Audit Pulumi.Azure Native. HDInsight. Inputs. Ranger Audit Spec Response - Properties required to describe audit log storage.
- Ranger
Admin RangerAdmin Spec Response - Specification for the Ranger Admin service.
- Ranger
Usersync RangerUsersync Spec Response - Specification for the Ranger Usersync service
- Ranger
Audit RangerAudit Spec Response - Properties required to describe audit log storage.
- ranger
Admin RangerAdmin Spec Response - Specification for the Ranger Admin service.
- ranger
Usersync RangerUsersync Spec Response - Specification for the Ranger Usersync service
- ranger
Audit RangerAudit Spec Response - Properties required to describe audit log storage.
- ranger
Admin RangerAdmin Spec Response - Specification for the Ranger Admin service.
- ranger
Usersync RangerUsersync Spec Response - Specification for the Ranger Usersync service
- ranger
Audit RangerAudit Spec Response - Properties required to describe audit log storage.
- ranger_
admin RangerAdmin Spec Response - Specification for the Ranger Admin service.
- ranger_
usersync RangerUsersync Spec Response - Specification for the Ranger Usersync service
- ranger_
audit RangerAudit Spec Response - Properties required to describe audit log storage.
- ranger
Admin Property Map - Specification for the Ranger Admin service.
- ranger
Usersync Property Map - Specification for the Ranger Usersync service
- ranger
Audit Property Map - Properties required to describe audit log storage.
RangerUsersyncMode, RangerUsersyncModeArgs
- @Static
- static
- Automatic
- automatic
- Ranger
Usersync Mode Static - static
- Ranger
Usersync Mode Automatic - automatic
- Static_
- static
- Automatic
- automatic
- Static
- static
- Automatic
- automatic
- STATIC
- static
- AUTOMATIC
- automatic
- "static"
- static
- "automatic"
- automatic
RangerUsersyncSpec, RangerUsersyncSpecArgs
- Enabled bool
- Denotes whether usersync service should be enabled
- Groups List<string>
- List of groups that should be synced. These group names should match the object id of the respective AAD groups.
- Mode
string | Pulumi.
Azure Native. HDInsight. Ranger Usersync Mode - User & groups can be synced automatically or via a static list that's refreshed.
- User
Mapping stringLocation - Azure storage location of a mapping file that lists user & group associations.
- Users List<string>
- List of user names that should be synced. These usernames should match the User principal name of the respective AAD users.
- Enabled bool
- Denotes whether usersync service should be enabled
- Groups []string
- List of groups that should be synced. These group names should match the object id of the respective AAD groups.
- Mode
string | Ranger
Usersync Mode - User & groups can be synced automatically or via a static list that's refreshed.
- User
Mapping stringLocation - Azure storage location of a mapping file that lists user & group associations.
- Users []string
- List of user names that should be synced. These usernames should match the User principal name of the respective AAD users.
- enabled Boolean
- Denotes whether usersync service should be enabled
- groups List<String>
- List of groups that should be synced. These group names should match the object id of the respective AAD groups.
- mode
String | Ranger
Usersync Mode - User & groups can be synced automatically or via a static list that's refreshed.
- user
Mapping StringLocation - Azure storage location of a mapping file that lists user & group associations.
- users List<String>
- List of user names that should be synced. These usernames should match the User principal name of the respective AAD users.
- enabled boolean
- Denotes whether usersync service should be enabled
- groups string[]
- List of groups that should be synced. These group names should match the object id of the respective AAD groups.
- mode
string | Ranger
Usersync Mode - User & groups can be synced automatically or via a static list that's refreshed.
- user
Mapping stringLocation - Azure storage location of a mapping file that lists user & group associations.
- users string[]
- List of user names that should be synced. These usernames should match the User principal name of the respective AAD users.
- enabled bool
- Denotes whether usersync service should be enabled
- groups Sequence[str]
- List of groups that should be synced. These group names should match the object id of the respective AAD groups.
- mode
str | Ranger
Usersync Mode - User & groups can be synced automatically or via a static list that's refreshed.
- user_
mapping_ strlocation - Azure storage location of a mapping file that lists user & group associations.
- users Sequence[str]
- List of user names that should be synced. These usernames should match the User principal name of the respective AAD users.
- enabled Boolean
- Denotes whether usersync service should be enabled
- groups List<String>
- List of groups that should be synced. These group names should match the object id of the respective AAD groups.
- mode String | "static" | "automatic"
- User & groups can be synced automatically or via a static list that's refreshed.
- user
Mapping StringLocation - Azure storage location of a mapping file that lists user & group associations.
- users List<String>
- List of user names that should be synced. These usernames should match the User principal name of the respective AAD users.
RangerUsersyncSpecResponse, RangerUsersyncSpecResponseArgs
- Enabled bool
- Denotes whether usersync service should be enabled
- Groups List<string>
- List of groups that should be synced. These group names should match the object id of the respective AAD groups.
- Mode string
- User & groups can be synced automatically or via a static list that's refreshed.
- User
Mapping stringLocation - Azure storage location of a mapping file that lists user & group associations.
- Users List<string>
- List of user names that should be synced. These usernames should match the User principal name of the respective AAD users.
- Enabled bool
- Denotes whether usersync service should be enabled
- Groups []string
- List of groups that should be synced. These group names should match the object id of the respective AAD groups.
- Mode string
- User & groups can be synced automatically or via a static list that's refreshed.
- User
Mapping stringLocation - Azure storage location of a mapping file that lists user & group associations.
- Users []string
- List of user names that should be synced. These usernames should match the User principal name of the respective AAD users.
- enabled Boolean
- Denotes whether usersync service should be enabled
- groups List<String>
- List of groups that should be synced. These group names should match the object id of the respective AAD groups.
- mode String
- User & groups can be synced automatically or via a static list that's refreshed.
- user
Mapping StringLocation - Azure storage location of a mapping file that lists user & group associations.
- users List<String>
- List of user names that should be synced. These usernames should match the User principal name of the respective AAD users.
- enabled boolean
- Denotes whether usersync service should be enabled
- groups string[]
- List of groups that should be synced. These group names should match the object id of the respective AAD groups.
- mode string
- User & groups can be synced automatically or via a static list that's refreshed.
- user
Mapping stringLocation - Azure storage location of a mapping file that lists user & group associations.
- users string[]
- List of user names that should be synced. These usernames should match the User principal name of the respective AAD users.
- enabled bool
- Denotes whether usersync service should be enabled
- groups Sequence[str]
- List of groups that should be synced. These group names should match the object id of the respective AAD groups.
- mode str
- User & groups can be synced automatically or via a static list that's refreshed.
- user_
mapping_ strlocation - Azure storage location of a mapping file that lists user & group associations.
- users Sequence[str]
- List of user names that should be synced. These usernames should match the User principal name of the respective AAD users.
- enabled Boolean
- Denotes whether usersync service should be enabled
- groups List<String>
- List of groups that should be synced. These group names should match the object id of the respective AAD groups.
- mode String
- User & groups can be synced automatically or via a static list that's refreshed.
- user
Mapping StringLocation - Azure storage location of a mapping file that lists user & group associations.
- users List<String>
- List of user names that should be synced. These usernames should match the User principal name of the respective AAD users.
ScaleActionType, ScaleActionTypeArgs
- Scaleup
- scaleup
- Scaledown
- scaledown
- Scale
Action Type Scaleup - scaleup
- Scale
Action Type Scaledown - scaledown
- Scaleup
- scaleup
- Scaledown
- scaledown
- Scaleup
- scaleup
- Scaledown
- scaledown
- SCALEUP
- scaleup
- SCALEDOWN
- scaledown
- "scaleup"
- scaleup
- "scaledown"
- scaledown
ScalingRule, ScalingRuleArgs
- Action
Type string | Pulumi.Azure Native. HDInsight. Scale Action Type - The action type.
- Comparison
Rule Pulumi.Azure Native. HDInsight. Inputs. Comparison Rule - The comparison rule.
- Evaluation
Count int - This is an evaluation count for a scaling condition, the number of times a trigger condition should be successful, before scaling activity is triggered.
- Scaling
Metric string - Metrics name for individual workloads. For example: cpu
- Action
Type string | ScaleAction Type - The action type.
- Comparison
Rule ComparisonRule - The comparison rule.
- Evaluation
Count int - This is an evaluation count for a scaling condition, the number of times a trigger condition should be successful, before scaling activity is triggered.
- Scaling
Metric string - Metrics name for individual workloads. For example: cpu
- action
Type String | ScaleAction Type - The action type.
- comparison
Rule ComparisonRule - The comparison rule.
- evaluation
Count Integer - This is an evaluation count for a scaling condition, the number of times a trigger condition should be successful, before scaling activity is triggered.
- scaling
Metric String - Metrics name for individual workloads. For example: cpu
- action
Type string | ScaleAction Type - The action type.
- comparison
Rule ComparisonRule - The comparison rule.
- evaluation
Count number - This is an evaluation count for a scaling condition, the number of times a trigger condition should be successful, before scaling activity is triggered.
- scaling
Metric string - Metrics name for individual workloads. For example: cpu
- action_
type str | ScaleAction Type - The action type.
- comparison_
rule ComparisonRule - The comparison rule.
- evaluation_
count int - This is an evaluation count for a scaling condition, the number of times a trigger condition should be successful, before scaling activity is triggered.
- scaling_
metric str - Metrics name for individual workloads. For example: cpu
- action
Type String | "scaleup" | "scaledown" - The action type.
- comparison
Rule Property Map - The comparison rule.
- evaluation
Count Number - This is an evaluation count for a scaling condition, the number of times a trigger condition should be successful, before scaling activity is triggered.
- scaling
Metric String - Metrics name for individual workloads. For example: cpu
ScalingRuleResponse, ScalingRuleResponseArgs
- Action
Type string - The action type.
- Comparison
Rule Pulumi.Azure Native. HDInsight. Inputs. Comparison Rule Response - The comparison rule.
- Evaluation
Count int - This is an evaluation count for a scaling condition, the number of times a trigger condition should be successful, before scaling activity is triggered.
- Scaling
Metric string - Metrics name for individual workloads. For example: cpu
- Action
Type string - The action type.
- Comparison
Rule ComparisonRule Response - The comparison rule.
- Evaluation
Count int - This is an evaluation count for a scaling condition, the number of times a trigger condition should be successful, before scaling activity is triggered.
- Scaling
Metric string - Metrics name for individual workloads. For example: cpu
- action
Type String - The action type.
- comparison
Rule ComparisonRule Response - The comparison rule.
- evaluation
Count Integer - This is an evaluation count for a scaling condition, the number of times a trigger condition should be successful, before scaling activity is triggered.
- scaling
Metric String - Metrics name for individual workloads. For example: cpu
- action
Type string - The action type.
- comparison
Rule ComparisonRule Response - The comparison rule.
- evaluation
Count number - This is an evaluation count for a scaling condition, the number of times a trigger condition should be successful, before scaling activity is triggered.
- scaling
Metric string - Metrics name for individual workloads. For example: cpu
- action_
type str - The action type.
- comparison_
rule ComparisonRule Response - The comparison rule.
- evaluation_
count int - This is an evaluation count for a scaling condition, the number of times a trigger condition should be successful, before scaling activity is triggered.
- scaling_
metric str - Metrics name for individual workloads. For example: cpu
- action
Type String - The action type.
- comparison
Rule Property Map - The comparison rule.
- evaluation
Count Number - This is an evaluation count for a scaling condition, the number of times a trigger condition should be successful, before scaling activity is triggered.
- scaling
Metric String - Metrics name for individual workloads. For example: cpu
Schedule, ScheduleArgs
- Count int
- User has to set the node count anticipated at end of the scaling operation of the set current schedule configuration, format is integer.
- Days
List<Union<string, Pulumi.
Azure Native. HDInsight. Schedule Day>> - User has to set the days where schedule has to be set for autoscale operation.
- End
Time string - User has to set the end time of current schedule configuration, format like 10:30 (HH:MM).
- Start
Time string - User has to set the start time of current schedule configuration, format like 10:30 (HH:MM).
- Count int
- User has to set the node count anticipated at end of the scaling operation of the set current schedule configuration, format is integer.
- Days []string
- User has to set the days where schedule has to be set for autoscale operation.
- End
Time string - User has to set the end time of current schedule configuration, format like 10:30 (HH:MM).
- Start
Time string - User has to set the start time of current schedule configuration, format like 10:30 (HH:MM).
- count Integer
- User has to set the node count anticipated at end of the scaling operation of the set current schedule configuration, format is integer.
- days
List<Either<String,Schedule
Day>> - User has to set the days where schedule has to be set for autoscale operation.
- end
Time String - User has to set the end time of current schedule configuration, format like 10:30 (HH:MM).
- start
Time String - User has to set the start time of current schedule configuration, format like 10:30 (HH:MM).
- count number
- User has to set the node count anticipated at end of the scaling operation of the set current schedule configuration, format is integer.
- days
(string | Schedule
Day)[] - User has to set the days where schedule has to be set for autoscale operation.
- end
Time string - User has to set the end time of current schedule configuration, format like 10:30 (HH:MM).
- start
Time string - User has to set the start time of current schedule configuration, format like 10:30 (HH:MM).
- count int
- User has to set the node count anticipated at end of the scaling operation of the set current schedule configuration, format is integer.
- days
Sequence[Union[str, Schedule
Day]] - User has to set the days where schedule has to be set for autoscale operation.
- end_
time str - User has to set the end time of current schedule configuration, format like 10:30 (HH:MM).
- start_
time str - User has to set the start time of current schedule configuration, format like 10:30 (HH:MM).
- count Number
- User has to set the node count anticipated at end of the scaling operation of the set current schedule configuration, format is integer.
- days List<String | "Sunday" | "Monday" | "Tuesday" | "Wednesday" | "Thursday" | "Friday" | "Saturday">
- User has to set the days where schedule has to be set for autoscale operation.
- end
Time String - User has to set the end time of current schedule configuration, format like 10:30 (HH:MM).
- start
Time String - User has to set the start time of current schedule configuration, format like 10:30 (HH:MM).
ScheduleBasedConfig, ScheduleBasedConfigArgs
- Default
Count int - Setting default node count of current schedule configuration. Default node count specifies the number of nodes which are default when an specified scaling operation is executed (scale up/scale down)
- Schedules
List<Pulumi.
Azure Native. HDInsight. Inputs. Schedule> - This specifies the schedules where scheduled based Autoscale to be enabled, the user has a choice to set multiple rules within the schedule across days and times (start/end).
- Time
Zone string - User has to specify the timezone on which the schedule has to be set for schedule based autoscale configuration.
- Default
Count int - Setting default node count of current schedule configuration. Default node count specifies the number of nodes which are default when an specified scaling operation is executed (scale up/scale down)
- Schedules []Schedule
- This specifies the schedules where scheduled based Autoscale to be enabled, the user has a choice to set multiple rules within the schedule across days and times (start/end).
- Time
Zone string - User has to specify the timezone on which the schedule has to be set for schedule based autoscale configuration.
- default
Count Integer - Setting default node count of current schedule configuration. Default node count specifies the number of nodes which are default when an specified scaling operation is executed (scale up/scale down)
- schedules List<Schedule>
- This specifies the schedules where scheduled based Autoscale to be enabled, the user has a choice to set multiple rules within the schedule across days and times (start/end).
- time
Zone String - User has to specify the timezone on which the schedule has to be set for schedule based autoscale configuration.
- default
Count number - Setting default node count of current schedule configuration. Default node count specifies the number of nodes which are default when an specified scaling operation is executed (scale up/scale down)
- schedules Schedule[]
- This specifies the schedules where scheduled based Autoscale to be enabled, the user has a choice to set multiple rules within the schedule across days and times (start/end).
- time
Zone string - User has to specify the timezone on which the schedule has to be set for schedule based autoscale configuration.
- default_
count int - Setting default node count of current schedule configuration. Default node count specifies the number of nodes which are default when an specified scaling operation is executed (scale up/scale down)
- schedules Sequence[Schedule]
- This specifies the schedules where scheduled based Autoscale to be enabled, the user has a choice to set multiple rules within the schedule across days and times (start/end).
- time_
zone str - User has to specify the timezone on which the schedule has to be set for schedule based autoscale configuration.
- default
Count Number - Setting default node count of current schedule configuration. Default node count specifies the number of nodes which are default when an specified scaling operation is executed (scale up/scale down)
- schedules List<Property Map>
- This specifies the schedules where scheduled based Autoscale to be enabled, the user has a choice to set multiple rules within the schedule across days and times (start/end).
- time
Zone String - User has to specify the timezone on which the schedule has to be set for schedule based autoscale configuration.
ScheduleBasedConfigResponse, ScheduleBasedConfigResponseArgs
- Default
Count int - Setting default node count of current schedule configuration. Default node count specifies the number of nodes which are default when an specified scaling operation is executed (scale up/scale down)
- Schedules
List<Pulumi.
Azure Native. HDInsight. Inputs. Schedule Response> - This specifies the schedules where scheduled based Autoscale to be enabled, the user has a choice to set multiple rules within the schedule across days and times (start/end).
- Time
Zone string - User has to specify the timezone on which the schedule has to be set for schedule based autoscale configuration.
- Default
Count int - Setting default node count of current schedule configuration. Default node count specifies the number of nodes which are default when an specified scaling operation is executed (scale up/scale down)
- Schedules
[]Schedule
Response - This specifies the schedules where scheduled based Autoscale to be enabled, the user has a choice to set multiple rules within the schedule across days and times (start/end).
- Time
Zone string - User has to specify the timezone on which the schedule has to be set for schedule based autoscale configuration.
- default
Count Integer - Setting default node count of current schedule configuration. Default node count specifies the number of nodes which are default when an specified scaling operation is executed (scale up/scale down)
- schedules
List<Schedule
Response> - This specifies the schedules where scheduled based Autoscale to be enabled, the user has a choice to set multiple rules within the schedule across days and times (start/end).
- time
Zone String - User has to specify the timezone on which the schedule has to be set for schedule based autoscale configuration.
- default
Count number - Setting default node count of current schedule configuration. Default node count specifies the number of nodes which are default when an specified scaling operation is executed (scale up/scale down)
- schedules
Schedule
Response[] - This specifies the schedules where scheduled based Autoscale to be enabled, the user has a choice to set multiple rules within the schedule across days and times (start/end).
- time
Zone string - User has to specify the timezone on which the schedule has to be set for schedule based autoscale configuration.
- default_
count int - Setting default node count of current schedule configuration. Default node count specifies the number of nodes which are default when an specified scaling operation is executed (scale up/scale down)
- schedules
Sequence[Schedule
Response] - This specifies the schedules where scheduled based Autoscale to be enabled, the user has a choice to set multiple rules within the schedule across days and times (start/end).
- time_
zone str - User has to specify the timezone on which the schedule has to be set for schedule based autoscale configuration.
- default
Count Number - Setting default node count of current schedule configuration. Default node count specifies the number of nodes which are default when an specified scaling operation is executed (scale up/scale down)
- schedules List<Property Map>
- This specifies the schedules where scheduled based Autoscale to be enabled, the user has a choice to set multiple rules within the schedule across days and times (start/end).
- time
Zone String - User has to specify the timezone on which the schedule has to be set for schedule based autoscale configuration.
ScheduleDay, ScheduleDayArgs
- Sunday
- Sunday
- Monday
- Monday
- Tuesday
- Tuesday
- Wednesday
- Wednesday
- Thursday
- Thursday
- Friday
- Friday
- Saturday
- Saturday
- Schedule
Day Sunday - Sunday
- Schedule
Day Monday - Monday
- Schedule
Day Tuesday - Tuesday
- Schedule
Day Wednesday - Wednesday
- Schedule
Day Thursday - Thursday
- Schedule
Day Friday - Friday
- Schedule
Day Saturday - Saturday
- Sunday
- Sunday
- Monday
- Monday
- Tuesday
- Tuesday
- Wednesday
- Wednesday
- Thursday
- Thursday
- Friday
- Friday
- Saturday
- Saturday
- Sunday
- Sunday
- Monday
- Monday
- Tuesday
- Tuesday
- Wednesday
- Wednesday
- Thursday
- Thursday
- Friday
- Friday
- Saturday
- Saturday
- SUNDAY
- Sunday
- MONDAY
- Monday
- TUESDAY
- Tuesday
- WEDNESDAY
- Wednesday
- THURSDAY
- Thursday
- FRIDAY
- Friday
- SATURDAY
- Saturday
- "Sunday"
- Sunday
- "Monday"
- Monday
- "Tuesday"
- Tuesday
- "Wednesday"
- Wednesday
- "Thursday"
- Thursday
- "Friday"
- Friday
- "Saturday"
- Saturday
ScheduleResponse, ScheduleResponseArgs
- Count int
- User has to set the node count anticipated at end of the scaling operation of the set current schedule configuration, format is integer.
- Days List<string>
- User has to set the days where schedule has to be set for autoscale operation.
- End
Time string - User has to set the end time of current schedule configuration, format like 10:30 (HH:MM).
- Start
Time string - User has to set the start time of current schedule configuration, format like 10:30 (HH:MM).
- Count int
- User has to set the node count anticipated at end of the scaling operation of the set current schedule configuration, format is integer.
- Days []string
- User has to set the days where schedule has to be set for autoscale operation.
- End
Time string - User has to set the end time of current schedule configuration, format like 10:30 (HH:MM).
- Start
Time string - User has to set the start time of current schedule configuration, format like 10:30 (HH:MM).
- count Integer
- User has to set the node count anticipated at end of the scaling operation of the set current schedule configuration, format is integer.
- days List<String>
- User has to set the days where schedule has to be set for autoscale operation.
- end
Time String - User has to set the end time of current schedule configuration, format like 10:30 (HH:MM).
- start
Time String - User has to set the start time of current schedule configuration, format like 10:30 (HH:MM).
- count number
- User has to set the node count anticipated at end of the scaling operation of the set current schedule configuration, format is integer.
- days string[]
- User has to set the days where schedule has to be set for autoscale operation.
- end
Time string - User has to set the end time of current schedule configuration, format like 10:30 (HH:MM).
- start
Time string - User has to set the start time of current schedule configuration, format like 10:30 (HH:MM).
- count int
- User has to set the node count anticipated at end of the scaling operation of the set current schedule configuration, format is integer.
- days Sequence[str]
- User has to set the days where schedule has to be set for autoscale operation.
- end_
time str - User has to set the end time of current schedule configuration, format like 10:30 (HH:MM).
- start_
time str - User has to set the start time of current schedule configuration, format like 10:30 (HH:MM).
- count Number
- User has to set the node count anticipated at end of the scaling operation of the set current schedule configuration, format is integer.
- days List<String>
- User has to set the days where schedule has to be set for autoscale operation.
- end
Time String - User has to set the end time of current schedule configuration, format like 10:30 (HH:MM).
- start
Time String - User has to set the start time of current schedule configuration, format like 10:30 (HH:MM).
ScriptActionProfile, ScriptActionProfileArgs
- Name string
- Script name.
- Services List<string>
- List of services to apply the script action.
- Type string
- Type of the script action. Supported type is bash scripts.
- Url string
- Url of the script file.
- Parameters string
- Additional parameters for the script action. It should be space-separated list of arguments required for script execution.
- Should
Persist bool - Specify if the script should persist on the cluster.
- Timeout
In intMinutes - Timeout duration for the script action in minutes.
- Name string
- Script name.
- Services []string
- List of services to apply the script action.
- Type string
- Type of the script action. Supported type is bash scripts.
- Url string
- Url of the script file.
- Parameters string
- Additional parameters for the script action. It should be space-separated list of arguments required for script execution.
- Should
Persist bool - Specify if the script should persist on the cluster.
- Timeout
In intMinutes - Timeout duration for the script action in minutes.
- name String
- Script name.
- services List<String>
- List of services to apply the script action.
- type String
- Type of the script action. Supported type is bash scripts.
- url String
- Url of the script file.
- parameters String
- Additional parameters for the script action. It should be space-separated list of arguments required for script execution.
- should
Persist Boolean - Specify if the script should persist on the cluster.
- timeout
In IntegerMinutes - Timeout duration for the script action in minutes.
- name string
- Script name.
- services string[]
- List of services to apply the script action.
- type string
- Type of the script action. Supported type is bash scripts.
- url string
- Url of the script file.
- parameters string
- Additional parameters for the script action. It should be space-separated list of arguments required for script execution.
- should
Persist boolean - Specify if the script should persist on the cluster.
- timeout
In numberMinutes - Timeout duration for the script action in minutes.
- name str
- Script name.
- services Sequence[str]
- List of services to apply the script action.
- type str
- Type of the script action. Supported type is bash scripts.
- url str
- Url of the script file.
- parameters str
- Additional parameters for the script action. It should be space-separated list of arguments required for script execution.
- should_
persist bool - Specify if the script should persist on the cluster.
- timeout_
in_ intminutes - Timeout duration for the script action in minutes.
- name String
- Script name.
- services List<String>
- List of services to apply the script action.
- type String
- Type of the script action. Supported type is bash scripts.
- url String
- Url of the script file.
- parameters String
- Additional parameters for the script action. It should be space-separated list of arguments required for script execution.
- should
Persist Boolean - Specify if the script should persist on the cluster.
- timeout
In NumberMinutes - Timeout duration for the script action in minutes.
ScriptActionProfileResponse, ScriptActionProfileResponseArgs
- Name string
- Script name.
- Services List<string>
- List of services to apply the script action.
- Type string
- Type of the script action. Supported type is bash scripts.
- Url string
- Url of the script file.
- Parameters string
- Additional parameters for the script action. It should be space-separated list of arguments required for script execution.
- Should
Persist bool - Specify if the script should persist on the cluster.
- Timeout
In intMinutes - Timeout duration for the script action in minutes.
- Name string
- Script name.
- Services []string
- List of services to apply the script action.
- Type string
- Type of the script action. Supported type is bash scripts.
- Url string
- Url of the script file.
- Parameters string
- Additional parameters for the script action. It should be space-separated list of arguments required for script execution.
- Should
Persist bool - Specify if the script should persist on the cluster.
- Timeout
In intMinutes - Timeout duration for the script action in minutes.
- name String
- Script name.
- services List<String>
- List of services to apply the script action.
- type String
- Type of the script action. Supported type is bash scripts.
- url String
- Url of the script file.
- parameters String
- Additional parameters for the script action. It should be space-separated list of arguments required for script execution.
- should
Persist Boolean - Specify if the script should persist on the cluster.
- timeout
In IntegerMinutes - Timeout duration for the script action in minutes.
- name string
- Script name.
- services string[]
- List of services to apply the script action.
- type string
- Type of the script action. Supported type is bash scripts.
- url string
- Url of the script file.
- parameters string
- Additional parameters for the script action. It should be space-separated list of arguments required for script execution.
- should
Persist boolean - Specify if the script should persist on the cluster.
- timeout
In numberMinutes - Timeout duration for the script action in minutes.
- name str
- Script name.
- services Sequence[str]
- List of services to apply the script action.
- type str
- Type of the script action. Supported type is bash scripts.
- url str
- Url of the script file.
- parameters str
- Additional parameters for the script action. It should be space-separated list of arguments required for script execution.
- should_
persist bool - Specify if the script should persist on the cluster.
- timeout_
in_ intminutes - Timeout duration for the script action in minutes.
- name String
- Script name.
- services List<String>
- List of services to apply the script action.
- type String
- Type of the script action. Supported type is bash scripts.
- url String
- Url of the script file.
- parameters String
- Additional parameters for the script action. It should be space-separated list of arguments required for script execution.
- should
Persist Boolean - Specify if the script should persist on the cluster.
- timeout
In NumberMinutes - Timeout duration for the script action in minutes.
SecretReference, SecretReferenceArgs
- Key
Vault stringObject Name - Object identifier name of the secret in key vault.
- Reference
Name string - Reference name of the secret to be used in service configs.
- Type
string | Pulumi.
Azure Native. HDInsight. Key Vault Object Type - Type of key vault object: secret, key or certificate.
- Version string
- Version of the secret in key vault.
- Key
Vault stringObject Name - Object identifier name of the secret in key vault.
- Reference
Name string - Reference name of the secret to be used in service configs.
- Type
string | Key
Vault Object Type - Type of key vault object: secret, key or certificate.
- Version string
- Version of the secret in key vault.
- key
Vault StringObject Name - Object identifier name of the secret in key vault.
- reference
Name String - Reference name of the secret to be used in service configs.
- type
String | Key
Vault Object Type - Type of key vault object: secret, key or certificate.
- version String
- Version of the secret in key vault.
- key
Vault stringObject Name - Object identifier name of the secret in key vault.
- reference
Name string - Reference name of the secret to be used in service configs.
- type
string | Key
Vault Object Type - Type of key vault object: secret, key or certificate.
- version string
- Version of the secret in key vault.
- key_
vault_ strobject_ name - Object identifier name of the secret in key vault.
- reference_
name str - Reference name of the secret to be used in service configs.
- type
str | Key
Vault Object Type - Type of key vault object: secret, key or certificate.
- version str
- Version of the secret in key vault.
- key
Vault StringObject Name - Object identifier name of the secret in key vault.
- reference
Name String - Reference name of the secret to be used in service configs.
- type String | "Key" | "Secret" | "Certificate"
- Type of key vault object: secret, key or certificate.
- version String
- Version of the secret in key vault.
SecretReferenceResponse, SecretReferenceResponseArgs
- Key
Vault stringObject Name - Object identifier name of the secret in key vault.
- Reference
Name string - Reference name of the secret to be used in service configs.
- Type string
- Type of key vault object: secret, key or certificate.
- Version string
- Version of the secret in key vault.
- Key
Vault stringObject Name - Object identifier name of the secret in key vault.
- Reference
Name string - Reference name of the secret to be used in service configs.
- Type string
- Type of key vault object: secret, key or certificate.
- Version string
- Version of the secret in key vault.
- key
Vault StringObject Name - Object identifier name of the secret in key vault.
- reference
Name String - Reference name of the secret to be used in service configs.
- type String
- Type of key vault object: secret, key or certificate.
- version String
- Version of the secret in key vault.
- key
Vault stringObject Name - Object identifier name of the secret in key vault.
- reference
Name string - Reference name of the secret to be used in service configs.
- type string
- Type of key vault object: secret, key or certificate.
- version string
- Version of the secret in key vault.
- key_
vault_ strobject_ name - Object identifier name of the secret in key vault.
- reference_
name str - Reference name of the secret to be used in service configs.
- type str
- Type of key vault object: secret, key or certificate.
- version str
- Version of the secret in key vault.
- key
Vault StringObject Name - Object identifier name of the secret in key vault.
- reference
Name String - Reference name of the secret to be used in service configs.
- type String
- Type of key vault object: secret, key or certificate.
- version String
- Version of the secret in key vault.
SecretsProfile, SecretsProfileArgs
- Key
Vault stringResource Id - Name of the user Key Vault where all the cluster specific user secrets are stored.
- Secrets
List<Pulumi.
Azure Native. HDInsight. Inputs. Secret Reference> - Properties of Key Vault secret.
- Key
Vault stringResource Id - Name of the user Key Vault where all the cluster specific user secrets are stored.
- Secrets
[]Secret
Reference - Properties of Key Vault secret.
- key
Vault StringResource Id - Name of the user Key Vault where all the cluster specific user secrets are stored.
- secrets
List<Secret
Reference> - Properties of Key Vault secret.
- key
Vault stringResource Id - Name of the user Key Vault where all the cluster specific user secrets are stored.
- secrets
Secret
Reference[] - Properties of Key Vault secret.
- key_
vault_ strresource_ id - Name of the user Key Vault where all the cluster specific user secrets are stored.
- secrets
Sequence[Secret
Reference] - Properties of Key Vault secret.
- key
Vault StringResource Id - Name of the user Key Vault where all the cluster specific user secrets are stored.
- secrets List<Property Map>
- Properties of Key Vault secret.
SecretsProfileResponse, SecretsProfileResponseArgs
- Key
Vault stringResource Id - Name of the user Key Vault where all the cluster specific user secrets are stored.
- Secrets
List<Pulumi.
Azure Native. HDInsight. Inputs. Secret Reference Response> - Properties of Key Vault secret.
- Key
Vault stringResource Id - Name of the user Key Vault where all the cluster specific user secrets are stored.
- Secrets
[]Secret
Reference Response - Properties of Key Vault secret.
- key
Vault StringResource Id - Name of the user Key Vault where all the cluster specific user secrets are stored.
- secrets
List<Secret
Reference Response> - Properties of Key Vault secret.
- key
Vault stringResource Id - Name of the user Key Vault where all the cluster specific user secrets are stored.
- secrets
Secret
Reference Response[] - Properties of Key Vault secret.
- key_
vault_ strresource_ id - Name of the user Key Vault where all the cluster specific user secrets are stored.
- secrets
Sequence[Secret
Reference Response] - Properties of Key Vault secret.
- key
Vault StringResource Id - Name of the user Key Vault where all the cluster specific user secrets are stored.
- secrets List<Property Map>
- Properties of Key Vault secret.
SparkMetastoreSpec, SparkMetastoreSpecArgs
- Db
Name string - The database name.
- Db
Server stringHost - The database server host.
- Db
Connection string | Pulumi.Authentication Mode Azure Native. HDInsight. Db Connection Authentication Mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- Db
Password stringSecret Name - The secret name which contains the database user password.
- Db
User stringName - The database user name.
- Key
Vault stringId - The key vault resource id.
- Thrift
Url string - The thrift url.
- Db
Name string - The database name.
- Db
Server stringHost - The database server host.
- Db
Connection string | DbAuthentication Mode Connection Authentication Mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- Db
Password stringSecret Name - The secret name which contains the database user password.
- Db
User stringName - The database user name.
- Key
Vault stringId - The key vault resource id.
- Thrift
Url string - The thrift url.
- db
Name String - The database name.
- db
Server StringHost - The database server host.
- db
Connection String | DbAuthentication Mode Connection Authentication Mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- db
Password StringSecret Name - The secret name which contains the database user password.
- db
User StringName - The database user name.
- key
Vault StringId - The key vault resource id.
- thrift
Url String - The thrift url.
- db
Name string - The database name.
- db
Server stringHost - The database server host.
- db
Connection string | DbAuthentication Mode Connection Authentication Mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- db
Password stringSecret Name - The secret name which contains the database user password.
- db
User stringName - The database user name.
- key
Vault stringId - The key vault resource id.
- thrift
Url string - The thrift url.
- db_
name str - The database name.
- db_
server_ strhost - The database server host.
- db_
connection_ str | Dbauthentication_ mode Connection Authentication Mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- db_
password_ strsecret_ name - The secret name which contains the database user password.
- db_
user_ strname - The database user name.
- key_
vault_ strid - The key vault resource id.
- thrift_
url str - The thrift url.
- db
Name String - The database name.
- db
Server StringHost - The database server host.
- db
Connection String | "SqlAuthentication Mode Auth" | "Identity Auth" - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- db
Password StringSecret Name - The secret name which contains the database user password.
- db
User StringName - The database user name.
- key
Vault StringId - The key vault resource id.
- thrift
Url String - The thrift url.
SparkMetastoreSpecResponse, SparkMetastoreSpecResponseArgs
- Db
Name string - The database name.
- Db
Server stringHost - The database server host.
- Db
Connection stringAuthentication Mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- Db
Password stringSecret Name - The secret name which contains the database user password.
- Db
User stringName - The database user name.
- Key
Vault stringId - The key vault resource id.
- Thrift
Url string - The thrift url.
- Db
Name string - The database name.
- Db
Server stringHost - The database server host.
- Db
Connection stringAuthentication Mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- Db
Password stringSecret Name - The secret name which contains the database user password.
- Db
User stringName - The database user name.
- Key
Vault stringId - The key vault resource id.
- Thrift
Url string - The thrift url.
- db
Name String - The database name.
- db
Server StringHost - The database server host.
- db
Connection StringAuthentication Mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- db
Password StringSecret Name - The secret name which contains the database user password.
- db
User StringName - The database user name.
- key
Vault StringId - The key vault resource id.
- thrift
Url String - The thrift url.
- db
Name string - The database name.
- db
Server stringHost - The database server host.
- db
Connection stringAuthentication Mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- db
Password stringSecret Name - The secret name which contains the database user password.
- db
User stringName - The database user name.
- key
Vault stringId - The key vault resource id.
- thrift
Url string - The thrift url.
- db_
name str - The database name.
- db_
server_ strhost - The database server host.
- db_
connection_ strauthentication_ mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- db_
password_ strsecret_ name - The secret name which contains the database user password.
- db_
user_ strname - The database user name.
- key_
vault_ strid - The key vault resource id.
- thrift_
url str - The thrift url.
- db
Name String - The database name.
- db
Server StringHost - The database server host.
- db
Connection StringAuthentication Mode - The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization
- db
Password StringSecret Name - The secret name which contains the database user password.
- db
User StringName - The database user name.
- key
Vault StringId - The key vault resource id.
- thrift
Url String - The thrift url.
SparkProfile, SparkProfileArgs
- Default
Storage stringUrl - The default storage URL.
- Metastore
Spec Pulumi.Azure Native. HDInsight. Inputs. Spark Metastore Spec - The metastore specification for Spark cluster.
- User
Plugins Pulumi.Spec Azure Native. HDInsight. Inputs. Spark User Plugins - Spark user plugins spec
- Default
Storage stringUrl - The default storage URL.
- Metastore
Spec SparkMetastore Spec - The metastore specification for Spark cluster.
- User
Plugins SparkSpec User Plugins - Spark user plugins spec
- default
Storage StringUrl - The default storage URL.
- metastore
Spec SparkMetastore Spec - The metastore specification for Spark cluster.
- user
Plugins SparkSpec User Plugins - Spark user plugins spec
- default
Storage stringUrl - The default storage URL.
- metastore
Spec SparkMetastore Spec - The metastore specification for Spark cluster.
- user
Plugins SparkSpec User Plugins - Spark user plugins spec
- default_
storage_ strurl - The default storage URL.
- metastore_
spec SparkMetastore Spec - The metastore specification for Spark cluster.
- user_
plugins_ Sparkspec User Plugins - Spark user plugins spec
- default
Storage StringUrl - The default storage URL.
- metastore
Spec Property Map - The metastore specification for Spark cluster.
- user
Plugins Property MapSpec - Spark user plugins spec
SparkProfileResponse, SparkProfileResponseArgs
- Default
Storage stringUrl - The default storage URL.
- Metastore
Spec Pulumi.Azure Native. HDInsight. Inputs. Spark Metastore Spec Response - The metastore specification for Spark cluster.
- User
Plugins Pulumi.Spec Azure Native. HDInsight. Inputs. Spark User Plugins Response - Spark user plugins spec
- Default
Storage stringUrl - The default storage URL.
- Metastore
Spec SparkMetastore Spec Response - The metastore specification for Spark cluster.
- User
Plugins SparkSpec User Plugins Response - Spark user plugins spec
- default
Storage StringUrl - The default storage URL.
- metastore
Spec SparkMetastore Spec Response - The metastore specification for Spark cluster.
- user
Plugins SparkSpec User Plugins Response - Spark user plugins spec
- default
Storage stringUrl - The default storage URL.
- metastore
Spec SparkMetastore Spec Response - The metastore specification for Spark cluster.
- user
Plugins SparkSpec User Plugins Response - Spark user plugins spec
- default_
storage_ strurl - The default storage URL.
- metastore_
spec SparkMetastore Spec Response - The metastore specification for Spark cluster.
- user_
plugins_ Sparkspec User Plugins Response - Spark user plugins spec
- default
Storage StringUrl - The default storage URL.
- metastore
Spec Property Map - The metastore specification for Spark cluster.
- user
Plugins Property MapSpec - Spark user plugins spec
SparkUserPlugin, SparkUserPluginArgs
- Path string
- Fully qualified path to the folder containing the plugins.
- Path string
- Fully qualified path to the folder containing the plugins.
- path String
- Fully qualified path to the folder containing the plugins.
- path string
- Fully qualified path to the folder containing the plugins.
- path str
- Fully qualified path to the folder containing the plugins.
- path String
- Fully qualified path to the folder containing the plugins.
SparkUserPluginResponse, SparkUserPluginResponseArgs
- Path string
- Fully qualified path to the folder containing the plugins.
- Path string
- Fully qualified path to the folder containing the plugins.
- path String
- Fully qualified path to the folder containing the plugins.
- path string
- Fully qualified path to the folder containing the plugins.
- path str
- Fully qualified path to the folder containing the plugins.
- path String
- Fully qualified path to the folder containing the plugins.
SparkUserPlugins, SparkUserPluginsArgs
- Plugins
List<Pulumi.
Azure Native. HDInsight. Inputs. Spark User Plugin> - Spark user plugins.
- Plugins
[]Spark
User Plugin - Spark user plugins.
- plugins
List<Spark
User Plugin> - Spark user plugins.
- plugins
Spark
User Plugin[] - Spark user plugins.
- plugins
Sequence[Spark
User Plugin] - Spark user plugins.
- plugins List<Property Map>
- Spark user plugins.
SparkUserPluginsResponse, SparkUserPluginsResponseArgs
- Plugins
List<Pulumi.
Azure Native. HDInsight. Inputs. Spark User Plugin Response> - Spark user plugins.
- Plugins
[]Spark
User Plugin Response - Spark user plugins.
- plugins
List<Spark
User Plugin Response> - Spark user plugins.
- plugins
Spark
User Plugin Response[] - Spark user plugins.
- plugins
Sequence[Spark
User Plugin Response] - Spark user plugins.
- plugins List<Property Map>
- Spark user plugins.
SshConnectivityEndpointResponse, SshConnectivityEndpointResponseArgs
- Endpoint string
- SSH connectivity endpoint.
- Private
Ssh stringEndpoint - Private SSH connectivity endpoint. This property will only be returned when enableInternalIngress is true.
- Endpoint string
- SSH connectivity endpoint.
- Private
Ssh stringEndpoint - Private SSH connectivity endpoint. This property will only be returned when enableInternalIngress is true.
- endpoint String
- SSH connectivity endpoint.
- private
Ssh StringEndpoint - Private SSH connectivity endpoint. This property will only be returned when enableInternalIngress is true.
- endpoint string
- SSH connectivity endpoint.
- private
Ssh stringEndpoint - Private SSH connectivity endpoint. This property will only be returned when enableInternalIngress is true.
- endpoint str
- SSH connectivity endpoint.
- private_
ssh_ strendpoint - Private SSH connectivity endpoint. This property will only be returned when enableInternalIngress is true.
- endpoint String
- SSH connectivity endpoint.
- private
Ssh StringEndpoint - Private SSH connectivity endpoint. This property will only be returned when enableInternalIngress is true.
SystemDataResponse, SystemDataResponseArgs
- Created
At string - The timestamp of resource creation (UTC).
- Created
By string - The identity that created the resource.
- Created
By stringType - The type of identity that created the resource.
- Last
Modified stringAt - The timestamp of resource last modification (UTC)
- Last
Modified stringBy - The identity that last modified the resource.
- Last
Modified stringBy Type - The type of identity that last modified the resource.
- Created
At string - The timestamp of resource creation (UTC).
- Created
By string - The identity that created the resource.
- Created
By stringType - The type of identity that created the resource.
- Last
Modified stringAt - The timestamp of resource last modification (UTC)
- Last
Modified stringBy - The identity that last modified the resource.
- Last
Modified stringBy Type - The type of identity that last modified the resource.
- created
At String - The timestamp of resource creation (UTC).
- created
By String - The identity that created the resource.
- created
By StringType - The type of identity that created the resource.
- last
Modified StringAt - The timestamp of resource last modification (UTC)
- last
Modified StringBy - The identity that last modified the resource.
- last
Modified StringBy Type - The type of identity that last modified the resource.
- created
At string - The timestamp of resource creation (UTC).
- created
By string - The identity that created the resource.
- created
By stringType - The type of identity that created the resource.
- last
Modified stringAt - The timestamp of resource last modification (UTC)
- last
Modified stringBy - The identity that last modified the resource.
- last
Modified stringBy Type - The type of identity that last modified the resource.
- created_
at str - The timestamp of resource creation (UTC).
- created_
by str - The identity that created the resource.
- created_
by_ strtype - The type of identity that created the resource.
- last_
modified_ strat - The timestamp of resource last modification (UTC)
- last_
modified_ strby - The identity that last modified the resource.
- last_
modified_ strby_ type - The type of identity that last modified the resource.
- created
At String - The timestamp of resource creation (UTC).
- created
By String - The identity that created the resource.
- created
By StringType - The type of identity that created the resource.
- last
Modified StringAt - The timestamp of resource last modification (UTC)
- last
Modified StringBy - The identity that last modified the resource.
- last
Modified StringBy Type - The type of identity that last modified the resource.
TrinoCoordinator, TrinoCoordinatorArgs
- Enable bool
- The flag that if enable debug or not.
- High
Availability boolEnabled - The flag that if enable coordinator HA, uses multiple coordinator replicas with auto failover, one per each head node. Default: true.
- Port int
- The debug port.
- Suspend bool
- The flag that if suspend debug or not.
- Enable bool
- The flag that if enable debug or not.
- High
Availability boolEnabled - The flag that if enable coordinator HA, uses multiple coordinator replicas with auto failover, one per each head node. Default: true.
- Port int
- The debug port.
- Suspend bool
- The flag that if suspend debug or not.
- enable Boolean
- The flag that if enable debug or not.
- high
Availability BooleanEnabled - The flag that if enable coordinator HA, uses multiple coordinator replicas with auto failover, one per each head node. Default: true.
- port Integer
- The debug port.
- suspend Boolean
- The flag that if suspend debug or not.
- enable boolean
- The flag that if enable debug or not.
- high
Availability booleanEnabled - The flag that if enable coordinator HA, uses multiple coordinator replicas with auto failover, one per each head node. Default: true.
- port number
- The debug port.
- suspend boolean
- The flag that if suspend debug or not.
- enable bool
- The flag that if enable debug or not.
- high_
availability_ boolenabled - The flag that if enable coordinator HA, uses multiple coordinator replicas with auto failover, one per each head node. Default: true.
- port int
- The debug port.
- suspend bool
- The flag that if suspend debug or not.
- enable Boolean
- The flag that if enable debug or not.
- high
Availability BooleanEnabled - The flag that if enable coordinator HA, uses multiple coordinator replicas with auto failover, one per each head node. Default: true.
- port Number
- The debug port.
- suspend Boolean
- The flag that if suspend debug or not.
TrinoCoordinatorResponse, TrinoCoordinatorResponseArgs
- Enable bool
- The flag that if enable debug or not.
- High
Availability boolEnabled - The flag that if enable coordinator HA, uses multiple coordinator replicas with auto failover, one per each head node. Default: true.
- Port int
- The debug port.
- Suspend bool
- The flag that if suspend debug or not.
- Enable bool
- The flag that if enable debug or not.
- High
Availability boolEnabled - The flag that if enable coordinator HA, uses multiple coordinator replicas with auto failover, one per each head node. Default: true.
- Port int
- The debug port.
- Suspend bool
- The flag that if suspend debug or not.
- enable Boolean
- The flag that if enable debug or not.
- high
Availability BooleanEnabled - The flag that if enable coordinator HA, uses multiple coordinator replicas with auto failover, one per each head node. Default: true.
- port Integer
- The debug port.
- suspend Boolean
- The flag that if suspend debug or not.
- enable boolean
- The flag that if enable debug or not.
- high
Availability booleanEnabled - The flag that if enable coordinator HA, uses multiple coordinator replicas with auto failover, one per each head node. Default: true.
- port number
- The debug port.
- suspend boolean
- The flag that if suspend debug or not.
- enable bool
- The flag that if enable debug or not.
- high_
availability_ boolenabled - The flag that if enable coordinator HA, uses multiple coordinator replicas with auto failover, one per each head node. Default: true.
- port int
- The debug port.
- suspend bool
- The flag that if suspend debug or not.
- enable Boolean
- The flag that if enable debug or not.
- high
Availability BooleanEnabled - The flag that if enable coordinator HA, uses multiple coordinator replicas with auto failover, one per each head node. Default: true.
- port Number
- The debug port.
- suspend Boolean
- The flag that if suspend debug or not.
TrinoProfile, TrinoProfileArgs
- Catalog
Options Pulumi.Azure Native. HDInsight. Inputs. Catalog Options - Trino cluster catalog options.
- Coordinator
Pulumi.
Azure Native. HDInsight. Inputs. Trino Coordinator - Trino Coordinator.
- User
Plugins Pulumi.Spec Azure Native. HDInsight. Inputs. Trino User Plugins - Trino user plugins spec
- User
Telemetry Pulumi.Spec Azure Native. HDInsight. Inputs. Trino User Telemetry - User telemetry
- Worker
Pulumi.
Azure Native. HDInsight. Inputs. Trino Worker - Trino worker.
- Catalog
Options CatalogOptions - Trino cluster catalog options.
- Coordinator
Trino
Coordinator - Trino Coordinator.
- User
Plugins TrinoSpec User Plugins - Trino user plugins spec
- User
Telemetry TrinoSpec User Telemetry - User telemetry
- Worker
Trino
Worker - Trino worker.
- catalog
Options CatalogOptions - Trino cluster catalog options.
- coordinator
Trino
Coordinator - Trino Coordinator.
- user
Plugins TrinoSpec User Plugins - Trino user plugins spec
- user
Telemetry TrinoSpec User Telemetry - User telemetry
- worker
Trino
Worker - Trino worker.
- catalog
Options CatalogOptions - Trino cluster catalog options.
- coordinator
Trino
Coordinator - Trino Coordinator.
- user
Plugins TrinoSpec User Plugins - Trino user plugins spec
- user
Telemetry TrinoSpec User Telemetry - User telemetry
- worker
Trino
Worker - Trino worker.
- catalog_
options CatalogOptions - Trino cluster catalog options.
- coordinator
Trino
Coordinator - Trino Coordinator.
- user_
plugins_ Trinospec User Plugins - Trino user plugins spec
- user_
telemetry_ Trinospec User Telemetry - User telemetry
- worker
Trino
Worker - Trino worker.
- catalog
Options Property Map - Trino cluster catalog options.
- coordinator Property Map
- Trino Coordinator.
- user
Plugins Property MapSpec - Trino user plugins spec
- user
Telemetry Property MapSpec - User telemetry
- worker Property Map
- Trino worker.
TrinoProfileResponse, TrinoProfileResponseArgs
- Catalog
Options Pulumi.Azure Native. HDInsight. Inputs. Catalog Options Response - Trino cluster catalog options.
- Coordinator
Pulumi.
Azure Native. HDInsight. Inputs. Trino Coordinator Response - Trino Coordinator.
- User
Plugins Pulumi.Spec Azure Native. HDInsight. Inputs. Trino User Plugins Response - Trino user plugins spec
- User
Telemetry Pulumi.Spec Azure Native. HDInsight. Inputs. Trino User Telemetry Response - User telemetry
- Worker
Pulumi.
Azure Native. HDInsight. Inputs. Trino Worker Response - Trino worker.
- Catalog
Options CatalogOptions Response - Trino cluster catalog options.
- Coordinator
Trino
Coordinator Response - Trino Coordinator.
- User
Plugins TrinoSpec User Plugins Response - Trino user plugins spec
- User
Telemetry TrinoSpec User Telemetry Response - User telemetry
- Worker
Trino
Worker Response - Trino worker.
- catalog
Options CatalogOptions Response - Trino cluster catalog options.
- coordinator
Trino
Coordinator Response - Trino Coordinator.
- user
Plugins TrinoSpec User Plugins Response - Trino user plugins spec
- user
Telemetry TrinoSpec User Telemetry Response - User telemetry
- worker
Trino
Worker Response - Trino worker.
- catalog
Options CatalogOptions Response - Trino cluster catalog options.
- coordinator
Trino
Coordinator Response - Trino Coordinator.
- user
Plugins TrinoSpec User Plugins Response - Trino user plugins spec
- user
Telemetry TrinoSpec User Telemetry Response - User telemetry
- worker
Trino
Worker Response - Trino worker.
- catalog_
options CatalogOptions Response - Trino cluster catalog options.
- coordinator
Trino
Coordinator Response - Trino Coordinator.
- user_
plugins_ Trinospec User Plugins Response - Trino user plugins spec
- user_
telemetry_ Trinospec User Telemetry Response - User telemetry
- worker
Trino
Worker Response - Trino worker.
- catalog
Options Property Map - Trino cluster catalog options.
- coordinator Property Map
- Trino Coordinator.
- user
Plugins Property MapSpec - Trino user plugins spec
- user
Telemetry Property MapSpec - User telemetry
- worker Property Map
- Trino worker.
TrinoTelemetryConfig, TrinoTelemetryConfigArgs
- Hivecatalog
Name string - Hive Catalog name used to mount external tables on the logs written by trino, if not specified there tables are not created.
- Hivecatalog
Schema string - Schema of the above catalog to use, to mount query logs as external tables, if not specified tables will be mounted under schema trinologs.
- Partition
Retention intIn Days - Retention period for query log table partitions, this doesn't have any affect on actual data.
- Path string
- Azure storage location of the blobs.
- Hivecatalog
Name string - Hive Catalog name used to mount external tables on the logs written by trino, if not specified there tables are not created.
- Hivecatalog
Schema string - Schema of the above catalog to use, to mount query logs as external tables, if not specified tables will be mounted under schema trinologs.
- Partition
Retention intIn Days - Retention period for query log table partitions, this doesn't have any affect on actual data.
- Path string
- Azure storage location of the blobs.
- hivecatalog
Name String - Hive Catalog name used to mount external tables on the logs written by trino, if not specified there tables are not created.
- hivecatalog
Schema String - Schema of the above catalog to use, to mount query logs as external tables, if not specified tables will be mounted under schema trinologs.
- partition
Retention IntegerIn Days - Retention period for query log table partitions, this doesn't have any affect on actual data.
- path String
- Azure storage location of the blobs.
- hivecatalog
Name string - Hive Catalog name used to mount external tables on the logs written by trino, if not specified there tables are not created.
- hivecatalog
Schema string - Schema of the above catalog to use, to mount query logs as external tables, if not specified tables will be mounted under schema trinologs.
- partition
Retention numberIn Days - Retention period for query log table partitions, this doesn't have any affect on actual data.
- path string
- Azure storage location of the blobs.
- hivecatalog_
name str - Hive Catalog name used to mount external tables on the logs written by trino, if not specified there tables are not created.
- hivecatalog_
schema str - Schema of the above catalog to use, to mount query logs as external tables, if not specified tables will be mounted under schema trinologs.
- partition_
retention_ intin_ days - Retention period for query log table partitions, this doesn't have any affect on actual data.
- path str
- Azure storage location of the blobs.
- hivecatalog
Name String - Hive Catalog name used to mount external tables on the logs written by trino, if not specified there tables are not created.
- hivecatalog
Schema String - Schema of the above catalog to use, to mount query logs as external tables, if not specified tables will be mounted under schema trinologs.
- partition
Retention NumberIn Days - Retention period for query log table partitions, this doesn't have any affect on actual data.
- path String
- Azure storage location of the blobs.
TrinoTelemetryConfigResponse, TrinoTelemetryConfigResponseArgs
- Hivecatalog
Name string - Hive Catalog name used to mount external tables on the logs written by trino, if not specified there tables are not created.
- Hivecatalog
Schema string - Schema of the above catalog to use, to mount query logs as external tables, if not specified tables will be mounted under schema trinologs.
- Partition
Retention intIn Days - Retention period for query log table partitions, this doesn't have any affect on actual data.
- Path string
- Azure storage location of the blobs.
- Hivecatalog
Name string - Hive Catalog name used to mount external tables on the logs written by trino, if not specified there tables are not created.
- Hivecatalog
Schema string - Schema of the above catalog to use, to mount query logs as external tables, if not specified tables will be mounted under schema trinologs.
- Partition
Retention intIn Days - Retention period for query log table partitions, this doesn't have any affect on actual data.
- Path string
- Azure storage location of the blobs.
- hivecatalog
Name String - Hive Catalog name used to mount external tables on the logs written by trino, if not specified there tables are not created.
- hivecatalog
Schema String - Schema of the above catalog to use, to mount query logs as external tables, if not specified tables will be mounted under schema trinologs.
- partition
Retention IntegerIn Days - Retention period for query log table partitions, this doesn't have any affect on actual data.
- path String
- Azure storage location of the blobs.
- hivecatalog
Name string - Hive Catalog name used to mount external tables on the logs written by trino, if not specified there tables are not created.
- hivecatalog
Schema string - Schema of the above catalog to use, to mount query logs as external tables, if not specified tables will be mounted under schema trinologs.
- partition
Retention numberIn Days - Retention period for query log table partitions, this doesn't have any affect on actual data.
- path string
- Azure storage location of the blobs.
- hivecatalog_
name str - Hive Catalog name used to mount external tables on the logs written by trino, if not specified there tables are not created.
- hivecatalog_
schema str - Schema of the above catalog to use, to mount query logs as external tables, if not specified tables will be mounted under schema trinologs.
- partition_
retention_ intin_ days - Retention period for query log table partitions, this doesn't have any affect on actual data.
- path str
- Azure storage location of the blobs.
- hivecatalog
Name String - Hive Catalog name used to mount external tables on the logs written by trino, if not specified there tables are not created.
- hivecatalog
Schema String - Schema of the above catalog to use, to mount query logs as external tables, if not specified tables will be mounted under schema trinologs.
- partition
Retention NumberIn Days - Retention period for query log table partitions, this doesn't have any affect on actual data.
- path String
- Azure storage location of the blobs.
TrinoUserPlugin, TrinoUserPluginArgs
TrinoUserPluginResponse, TrinoUserPluginResponseArgs
TrinoUserPlugins, TrinoUserPluginsArgs
- Plugins
List<Pulumi.
Azure Native. HDInsight. Inputs. Trino User Plugin> - Trino user plugins.
- Plugins
[]Trino
User Plugin - Trino user plugins.
- plugins
List<Trino
User Plugin> - Trino user plugins.
- plugins
Trino
User Plugin[] - Trino user plugins.
- plugins
Sequence[Trino
User Plugin] - Trino user plugins.
- plugins List<Property Map>
- Trino user plugins.
TrinoUserPluginsResponse, TrinoUserPluginsResponseArgs
- Plugins
List<Pulumi.
Azure Native. HDInsight. Inputs. Trino User Plugin Response> - Trino user plugins.
- Plugins
[]Trino
User Plugin Response - Trino user plugins.
- plugins
List<Trino
User Plugin Response> - Trino user plugins.
- plugins
Trino
User Plugin Response[] - Trino user plugins.
- plugins
Sequence[Trino
User Plugin Response] - Trino user plugins.
- plugins List<Property Map>
- Trino user plugins.
TrinoUserTelemetry, TrinoUserTelemetryArgs
- Storage
Pulumi.
Azure Native. HDInsight. Inputs. Trino Telemetry Config - Trino user telemetry definition.
- Storage
Trino
Telemetry Config - Trino user telemetry definition.
- storage
Trino
Telemetry Config - Trino user telemetry definition.
- storage
Trino
Telemetry Config - Trino user telemetry definition.
- storage
Trino
Telemetry Config - Trino user telemetry definition.
- storage Property Map
- Trino user telemetry definition.
TrinoUserTelemetryResponse, TrinoUserTelemetryResponseArgs
- Storage
Pulumi.
Azure Native. HDInsight. Inputs. Trino Telemetry Config Response - Trino user telemetry definition.
- Storage
Trino
Telemetry Config Response - Trino user telemetry definition.
- storage
Trino
Telemetry Config Response - Trino user telemetry definition.
- storage
Trino
Telemetry Config Response - Trino user telemetry definition.
- storage
Trino
Telemetry Config Response - Trino user telemetry definition.
- storage Property Map
- Trino user telemetry definition.
TrinoWorker, TrinoWorkerArgs
TrinoWorkerResponse, TrinoWorkerResponseArgs
UpgradeMode, UpgradeModeArgs
- STATELESS_UPDATE
- STATELESS_UPDATE
- UPDATE
- UPDATE
- LAST_STATE_UPDATE
- LAST_STATE_UPDATE
- Upgrade
Mode_STATELESS_UPDATE - STATELESS_UPDATE
- Upgrade
Mode UPDATE - UPDATE
- Upgrade
Mode_LAST_STATE_UPDATE - LAST_STATE_UPDATE
- STATELESS_UPDATE
- STATELESS_UPDATE
- UPDATE
- UPDATE
- LAST_STATE_UPDATE
- LAST_STATE_UPDATE
- STATELESS_UPDATE
- STATELESS_UPDATE
- UPDATE
- UPDATE
- LAST_STATE_UPDATE
- LAST_STATE_UPDATE
- STATELES_S_UPDATE
- STATELESS_UPDATE
- UPDATE
- UPDATE
- LAS_T_STAT_E_UPDATE
- LAST_STATE_UPDATE
- "STATELESS_UPDATE"
- STATELESS_UPDATE
- "UPDATE"
- UPDATE
- "LAST_STATE_UPDATE"
- LAST_STATE_UPDATE
Import
An existing resource can be imported using its type token, name, and identifier, e.g.
$ pulumi import azure-native:hdinsight:ClusterPoolCluster cluster1 /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusterpools/{clusterPoolName}/clusters/{clusterName}
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Azure Native pulumi/pulumi-azure-native
- License
- Apache-2.0