1. Packages
  2. vSphere
  3. API Docs
  4. ComputeCluster
vSphere v4.9.0 published on Tuesday, Nov 28, 2023 by Pulumi

vsphere.ComputeCluster

Explore with Pulumi AI

vsphere logo
vSphere v4.9.0 published on Tuesday, Nov 28, 2023 by Pulumi

    Create ComputeCluster Resource

    new ComputeCluster(name: string, args: ComputeClusterArgs, opts?: CustomResourceOptions);
    @overload
    def ComputeCluster(resource_name: str,
                       opts: Optional[ResourceOptions] = None,
                       custom_attributes: Optional[Mapping[str, str]] = None,
                       datacenter_id: Optional[str] = None,
                       dpm_automation_level: Optional[str] = None,
                       dpm_enabled: Optional[bool] = None,
                       dpm_threshold: Optional[int] = None,
                       drs_advanced_options: Optional[Mapping[str, str]] = None,
                       drs_automation_level: Optional[str] = None,
                       drs_enable_predictive_drs: Optional[bool] = None,
                       drs_enable_vm_overrides: Optional[bool] = None,
                       drs_enabled: Optional[bool] = None,
                       drs_migration_threshold: Optional[int] = None,
                       drs_scale_descendants_shares: Optional[str] = None,
                       folder: Optional[str] = None,
                       force_evacuate_on_destroy: Optional[bool] = None,
                       ha_admission_control_failover_host_system_ids: Optional[Sequence[str]] = None,
                       ha_admission_control_host_failure_tolerance: Optional[int] = None,
                       ha_admission_control_performance_tolerance: Optional[int] = None,
                       ha_admission_control_policy: Optional[str] = None,
                       ha_admission_control_resource_percentage_auto_compute: Optional[bool] = None,
                       ha_admission_control_resource_percentage_cpu: Optional[int] = None,
                       ha_admission_control_resource_percentage_memory: Optional[int] = None,
                       ha_admission_control_slot_policy_explicit_cpu: Optional[int] = None,
                       ha_admission_control_slot_policy_explicit_memory: Optional[int] = None,
                       ha_admission_control_slot_policy_use_explicit_size: Optional[bool] = None,
                       ha_advanced_options: Optional[Mapping[str, str]] = None,
                       ha_datastore_apd_recovery_action: Optional[str] = None,
                       ha_datastore_apd_response: Optional[str] = None,
                       ha_datastore_apd_response_delay: Optional[int] = None,
                       ha_datastore_pdl_response: Optional[str] = None,
                       ha_enabled: Optional[bool] = None,
                       ha_heartbeat_datastore_ids: Optional[Sequence[str]] = None,
                       ha_heartbeat_datastore_policy: Optional[str] = None,
                       ha_host_isolation_response: Optional[str] = None,
                       ha_host_monitoring: Optional[str] = None,
                       ha_vm_component_protection: Optional[str] = None,
                       ha_vm_dependency_restart_condition: Optional[str] = None,
                       ha_vm_failure_interval: Optional[int] = None,
                       ha_vm_maximum_failure_window: Optional[int] = None,
                       ha_vm_maximum_resets: Optional[int] = None,
                       ha_vm_minimum_uptime: Optional[int] = None,
                       ha_vm_monitoring: Optional[str] = None,
                       ha_vm_restart_additional_delay: Optional[int] = None,
                       ha_vm_restart_priority: Optional[str] = None,
                       ha_vm_restart_timeout: Optional[int] = None,
                       host_cluster_exit_timeout: Optional[int] = None,
                       host_managed: Optional[bool] = None,
                       host_system_ids: Optional[Sequence[str]] = None,
                       name: Optional[str] = None,
                       proactive_ha_automation_level: Optional[str] = None,
                       proactive_ha_enabled: Optional[bool] = None,
                       proactive_ha_moderate_remediation: Optional[str] = None,
                       proactive_ha_provider_ids: Optional[Sequence[str]] = None,
                       proactive_ha_severe_remediation: Optional[str] = None,
                       tags: Optional[Sequence[str]] = None,
                       vsan_compression_enabled: Optional[bool] = None,
                       vsan_dedup_enabled: Optional[bool] = None,
                       vsan_disk_groups: Optional[Sequence[ComputeClusterVsanDiskGroupArgs]] = None,
                       vsan_dit_encryption_enabled: Optional[bool] = None,
                       vsan_dit_rekey_interval: Optional[int] = None,
                       vsan_enabled: Optional[bool] = None,
                       vsan_esa_enabled: Optional[bool] = None,
                       vsan_fault_domains: Optional[Sequence[ComputeClusterVsanFaultDomainArgs]] = None,
                       vsan_network_diagnostic_mode_enabled: Optional[bool] = None,
                       vsan_performance_enabled: Optional[bool] = None,
                       vsan_remote_datastore_ids: Optional[Sequence[str]] = None,
                       vsan_stretched_cluster: Optional[ComputeClusterVsanStretchedClusterArgs] = None,
                       vsan_unmap_enabled: Optional[bool] = None,
                       vsan_verbose_mode_enabled: Optional[bool] = None)
    @overload
    def ComputeCluster(resource_name: str,
                       args: ComputeClusterArgs,
                       opts: Optional[ResourceOptions] = None)
    func NewComputeCluster(ctx *Context, name string, args ComputeClusterArgs, opts ...ResourceOption) (*ComputeCluster, error)
    public ComputeCluster(string name, ComputeClusterArgs args, CustomResourceOptions? opts = null)
    public ComputeCluster(String name, ComputeClusterArgs args)
    public ComputeCluster(String name, ComputeClusterArgs args, CustomResourceOptions options)
    
    type: vsphere:ComputeCluster
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    
    name string
    The unique name of the resource.
    args ComputeClusterArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args ComputeClusterArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args ComputeClusterArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args ComputeClusterArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args ComputeClusterArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    ComputeCluster Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The ComputeCluster resource accepts the following input properties:

    DatacenterId string

    The managed object ID of the datacenter to create the cluster in. Forces a new resource if changed.

    CustomAttributes Dictionary<string, string>

    A map of custom attribute ids to attribute value strings to set for the datastore cluster.

    NOTE: Custom attributes are unsupported on direct ESXi connections and require vCenter Server.

    DpmAutomationLevel string

    The automation level for host power operations in this cluster. Can be one of manual or automated. Default: manual.

    DpmEnabled bool

    Enable DPM support for DRS in this cluster. Requires drs_enabled to be true in order to be effective. Default: false.

    DpmThreshold int

    A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting. Default: 3.

    DrsAdvancedOptions Dictionary<string, string>

    A key/value map that specifies advanced options for DRS and DPM.

    DrsAutomationLevel string

    The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or fullyAutomated. Default: manual.

    DrsEnablePredictiveDrs bool

    When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations. *

    DrsEnableVmOverrides bool

    Allow individual DRS overrides to be set for virtual machines in the cluster. Default: true.

    DrsEnabled bool

    Enable DRS for this cluster. Default: false.

    DrsMigrationThreshold int

    A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less. Default: 3.

    DrsScaleDescendantsShares string

    Enable scalable shares for all resource pools in the cluster. Can be one of disabled or scaleCpuAndMemoryShares. Default: disabled.

    Folder string

    The relative path to a folder to put this cluster in. This is a path relative to the datacenter you are deploying the cluster to. Example: for the dc1 datacenter, and a provided folder of foo/bar, The provider will place a cluster named compute-cluster-test in a host folder located at /dc1/host/foo/bar, with the final inventory path being /dc1/host/foo/bar/datastore-cluster-test.

    ForceEvacuateOnDestroy bool

    When destroying the resource, setting this to true will auto-remove any hosts that are currently a member of the cluster, as if they were removed by taking their entry out of host_system_ids (see below. This is an advanced option and should only be used for testing. Default: false.

    NOTE: Do not set force_evacuate_on_destroy in production operation as there are many pitfalls to its use when working with complex cluster configurations. Depending on the virtual machines currently on the cluster, and your DRS and HA settings, the full host evacuation may fail. Instead, incrementally remove hosts from your configuration by adjusting the contents of the host_system_ids attribute.

    HaAdmissionControlFailoverHostSystemIds List<string>

    Defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.

    HaAdmissionControlHostFailureTolerance int

    The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster. Default: 1. *

    HaAdmissionControlPerformanceTolerance int

    The percentage of resource reduction that a cluster of virtual machines can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting. Default: 100 (disabled).

    HaAdmissionControlPolicy string

    The type of admission control policy to use with vSphere HA. Can be one of resourcePercentage, slotPolicy, failoverHosts, or disabled. Default: resourcePercentage.

    HaAdmissionControlResourcePercentageAutoCompute bool

    Automatically determine available resource percentages by subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting from the total amount of resources in the cluster. Disable to supply user-defined values. Default: true. *

    HaAdmissionControlResourcePercentageCpu int

    Controls the user-defined percentage of CPU resources in the cluster to reserve for failover. Default: 100.

    HaAdmissionControlResourcePercentageMemory int

    Controls the user-defined percentage of memory resources in the cluster to reserve for failover. Default: 100.

    HaAdmissionControlSlotPolicyExplicitCpu int

    Controls the user-defined CPU slot size, in MHz. Default: 32.

    HaAdmissionControlSlotPolicyExplicitMemory int

    Controls the user-defined memory slot size, in MB. Default: 100.

    HaAdmissionControlSlotPolicyUseExplicitSize bool

    Controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is false, which tells vSphere to gather a automatic average based on all powered-on virtual machines currently in the cluster.

    HaAdvancedOptions Dictionary<string, string>

    A key/value map that specifies advanced options for vSphere HA.

    HaDatastoreApdRecoveryAction string

    Controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of none or reset. Default: none. *

    HaDatastoreApdResponse string

    Controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or restartAggressive. Default: disabled. *

    HaDatastoreApdResponseDelay int

    The time, in seconds, to wait after an APD timeout event to run the response action defined in ha_datastore_apd_response. Default: 180 seconds (3 minutes). *

    HaDatastorePdlResponse string

    Controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive. Default: disabled. *

    HaEnabled bool

    Enable vSphere HA for this cluster. Default: false.

    HaHeartbeatDatastoreIds List<string>

    The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.

    HaHeartbeatDatastorePolicy string

    The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or allFeasibleDsWithUserPreference. Default: allFeasibleDsWithUserPreference.

    HaHostIsolationResponse string

    The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of none, powerOff, or shutdown. Default: none.

    HaHostMonitoring string

    Global setting that controls whether vSphere HA remediates virtual machines on host failure. Can be one of enabled or disabled. Default: enabled.

    HaVmComponentProtection string

    Controls vSphere VM component protection for virtual machines in this cluster. Can be one of enabled or disabled. Default: enabled. *

    HaVmDependencyRestartCondition string

    The condition used to determine whether or not virtual machines in a certain restart priority class are online, allowing HA to move on to restarting virtual machines on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen. The default is none, which means that a virtual machine is considered ready immediately after a host is found to start it on. *

    HaVmFailureInterval int

    The time interval, in seconds, a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. Default: 30 seconds.

    HaVmMaximumFailureWindow int

    The time, in seconds, for the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset time is allotted. Default: -1 (no window).

    HaVmMaximumResets int

    The maximum number of resets that HA will perform to a virtual machine when responding to a failure event. Default: 3

    HaVmMinimumUptime int

    The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats. Default: 120 seconds (2 minutes).

    HaVmMonitoring string

    The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled, vmMonitoringOnly, or vmAndAppMonitoring. Default: vmMonitoringDisabled.

    HaVmRestartAdditionalDelay int

    Additional delay, in seconds, after ready condition is met. A VM is considered ready at this point. Default: 0 seconds (no delay). *

    HaVmRestartPriority string

    The default restart priority for affected virtual machines when vSphere detects a host failure. Can be one of lowest, low, medium, high, or highest. Default: medium.

    HaVmRestartTimeout int

    The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority. Default: 600 seconds (10 minutes). *

    HostClusterExitTimeout int

    The timeout, in seconds, for each host maintenance mode operation when removing hosts from a cluster. Default: 3600 seconds (1 hour).

    HostManaged bool

    Can be set to true if compute cluster membership will be managed through the host resource rather than the compute_cluster resource. Conflicts with: host_system_ids.

    HostSystemIds List<string>

    The managed object IDs of the hosts to put in the cluster. Conflicts with: host_managed.

    Name string

    The name of the cluster.

    ProactiveHaAutomationLevel string

    Determines how the host quarantine, maintenance mode, or virtual machine migration recommendations made by proactive HA are to be handled. Can be one of Automated or Manual. Default: Manual. *

    ProactiveHaEnabled bool

    Enables Proactive HA. Default: false. *

    ProactiveHaModerateRemediation string

    The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode. Default: QuarantineMode. *

    ProactiveHaProviderIds List<string>

    The list of IDs for health update providers configured for this cluster. *

    ProactiveHaSevereRemediation string

    The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode. Default: QuarantineMode. *

    Tags List<string>

    The IDs of any tags to attach to this resource.

    VsanCompressionEnabled bool

    Enables vSAN compression on the cluster.

    VsanDedupEnabled bool

    Enables vSAN deduplication on the cluster. Cannot be independently set to true. When vSAN deduplication is enabled, vSAN compression must also be enabled.

    VsanDiskGroups List<Pulumi.VSphere.Inputs.ComputeClusterVsanDiskGroup>

    Represents the configuration of a host disk group in the cluster.

    VsanDitEncryptionEnabled bool

    Enables vSAN data-in-transit encryption on the cluster. Conflicts with vsan_remote_datastore_ids, i.e., vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature at the same time.

    VsanDitRekeyInterval int

    Indicates the rekey interval in minutes for data-in-transit encryption. The valid rekey interval is 30 to 10800 (feature defaults to 1440). Conflicts with vsan_remote_datastore_ids.

    VsanEnabled bool

    Enables vSAN on the cluster.

    VsanEsaEnabled bool

    Enables vSAN ESA on the cluster.

    VsanFaultDomains List<Pulumi.VSphere.Inputs.ComputeClusterVsanFaultDomain>

    Configurations of vSAN fault domains.

    VsanNetworkDiagnosticModeEnabled bool

    Enables network diagnostic mode for vSAN performance service on the cluster.

    VsanPerformanceEnabled bool

    Enables vSAN performance service on the cluster. Default: true.

    VsanRemoteDatastoreIds List<string>

    The remote vSAN datastore IDs to be mounted to this cluster. Conflicts with vsan_dit_encryption_enabled and vsan_dit_rekey_interval, i.e., vSAN HCI Mesh feature cannot be enabled with data-in-transit encryption feature at the same time.

    VsanStretchedCluster Pulumi.VSphere.Inputs.ComputeClusterVsanStretchedCluster

    Configurations of vSAN stretched cluster.

    VsanUnmapEnabled bool

    Enables vSAN unmap on the cluster. You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.

    VsanVerboseModeEnabled bool

    Enables verbose mode for vSAN performance service on the cluster.

    DatacenterId string

    The managed object ID of the datacenter to create the cluster in. Forces a new resource if changed.

    CustomAttributes map[string]string

    A map of custom attribute ids to attribute value strings to set for the datastore cluster.

    NOTE: Custom attributes are unsupported on direct ESXi connections and require vCenter Server.

    DpmAutomationLevel string

    The automation level for host power operations in this cluster. Can be one of manual or automated. Default: manual.

    DpmEnabled bool

    Enable DPM support for DRS in this cluster. Requires drs_enabled to be true in order to be effective. Default: false.

    DpmThreshold int

    A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting. Default: 3.

    DrsAdvancedOptions map[string]string

    A key/value map that specifies advanced options for DRS and DPM.

    DrsAutomationLevel string

    The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or fullyAutomated. Default: manual.

    DrsEnablePredictiveDrs bool

    When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations. *

    DrsEnableVmOverrides bool

    Allow individual DRS overrides to be set for virtual machines in the cluster. Default: true.

    DrsEnabled bool

    Enable DRS for this cluster. Default: false.

    DrsMigrationThreshold int

    A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less. Default: 3.

    DrsScaleDescendantsShares string

    Enable scalable shares for all resource pools in the cluster. Can be one of disabled or scaleCpuAndMemoryShares. Default: disabled.

    Folder string

    The relative path to a folder to put this cluster in. This is a path relative to the datacenter you are deploying the cluster to. Example: for the dc1 datacenter, and a provided folder of foo/bar, The provider will place a cluster named compute-cluster-test in a host folder located at /dc1/host/foo/bar, with the final inventory path being /dc1/host/foo/bar/datastore-cluster-test.

    ForceEvacuateOnDestroy bool

    When destroying the resource, setting this to true will auto-remove any hosts that are currently a member of the cluster, as if they were removed by taking their entry out of host_system_ids (see below. This is an advanced option and should only be used for testing. Default: false.

    NOTE: Do not set force_evacuate_on_destroy in production operation as there are many pitfalls to its use when working with complex cluster configurations. Depending on the virtual machines currently on the cluster, and your DRS and HA settings, the full host evacuation may fail. Instead, incrementally remove hosts from your configuration by adjusting the contents of the host_system_ids attribute.

    HaAdmissionControlFailoverHostSystemIds []string

    Defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.

    HaAdmissionControlHostFailureTolerance int

    The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster. Default: 1. *

    HaAdmissionControlPerformanceTolerance int

    The percentage of resource reduction that a cluster of virtual machines can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting. Default: 100 (disabled).

    HaAdmissionControlPolicy string

    The type of admission control policy to use with vSphere HA. Can be one of resourcePercentage, slotPolicy, failoverHosts, or disabled. Default: resourcePercentage.

    HaAdmissionControlResourcePercentageAutoCompute bool

    Automatically determine available resource percentages by subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting from the total amount of resources in the cluster. Disable to supply user-defined values. Default: true. *

    HaAdmissionControlResourcePercentageCpu int

    Controls the user-defined percentage of CPU resources in the cluster to reserve for failover. Default: 100.

    HaAdmissionControlResourcePercentageMemory int

    Controls the user-defined percentage of memory resources in the cluster to reserve for failover. Default: 100.

    HaAdmissionControlSlotPolicyExplicitCpu int

    Controls the user-defined CPU slot size, in MHz. Default: 32.

    HaAdmissionControlSlotPolicyExplicitMemory int

    Controls the user-defined memory slot size, in MB. Default: 100.

    HaAdmissionControlSlotPolicyUseExplicitSize bool

    Controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is false, which tells vSphere to gather a automatic average based on all powered-on virtual machines currently in the cluster.

    HaAdvancedOptions map[string]string

    A key/value map that specifies advanced options for vSphere HA.

    HaDatastoreApdRecoveryAction string

    Controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of none or reset. Default: none. *

    HaDatastoreApdResponse string

    Controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or restartAggressive. Default: disabled. *

    HaDatastoreApdResponseDelay int

    The time, in seconds, to wait after an APD timeout event to run the response action defined in ha_datastore_apd_response. Default: 180 seconds (3 minutes). *

    HaDatastorePdlResponse string

    Controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive. Default: disabled. *

    HaEnabled bool

    Enable vSphere HA for this cluster. Default: false.

    HaHeartbeatDatastoreIds []string

    The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.

    HaHeartbeatDatastorePolicy string

    The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or allFeasibleDsWithUserPreference. Default: allFeasibleDsWithUserPreference.

    HaHostIsolationResponse string

    The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of none, powerOff, or shutdown. Default: none.

    HaHostMonitoring string

    Global setting that controls whether vSphere HA remediates virtual machines on host failure. Can be one of enabled or disabled. Default: enabled.

    HaVmComponentProtection string

    Controls vSphere VM component protection for virtual machines in this cluster. Can be one of enabled or disabled. Default: enabled. *

    HaVmDependencyRestartCondition string

    The condition used to determine whether or not virtual machines in a certain restart priority class are online, allowing HA to move on to restarting virtual machines on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen. The default is none, which means that a virtual machine is considered ready immediately after a host is found to start it on. *

    HaVmFailureInterval int

    The time interval, in seconds, a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. Default: 30 seconds.

    HaVmMaximumFailureWindow int

    The time, in seconds, for the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset time is allotted. Default: -1 (no window).

    HaVmMaximumResets int

    The maximum number of resets that HA will perform to a virtual machine when responding to a failure event. Default: 3

    HaVmMinimumUptime int

    The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats. Default: 120 seconds (2 minutes).

    HaVmMonitoring string

    The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled, vmMonitoringOnly, or vmAndAppMonitoring. Default: vmMonitoringDisabled.

    HaVmRestartAdditionalDelay int

    Additional delay, in seconds, after ready condition is met. A VM is considered ready at this point. Default: 0 seconds (no delay). *

    HaVmRestartPriority string

    The default restart priority for affected virtual machines when vSphere detects a host failure. Can be one of lowest, low, medium, high, or highest. Default: medium.

    HaVmRestartTimeout int

    The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority. Default: 600 seconds (10 minutes). *

    HostClusterExitTimeout int

    The timeout, in seconds, for each host maintenance mode operation when removing hosts from a cluster. Default: 3600 seconds (1 hour).

    HostManaged bool

    Can be set to true if compute cluster membership will be managed through the host resource rather than the compute_cluster resource. Conflicts with: host_system_ids.

    HostSystemIds []string

    The managed object IDs of the hosts to put in the cluster. Conflicts with: host_managed.

    Name string

    The name of the cluster.

    ProactiveHaAutomationLevel string

    Determines how the host quarantine, maintenance mode, or virtual machine migration recommendations made by proactive HA are to be handled. Can be one of Automated or Manual. Default: Manual. *

    ProactiveHaEnabled bool

    Enables Proactive HA. Default: false. *

    ProactiveHaModerateRemediation string

    The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode. Default: QuarantineMode. *

    ProactiveHaProviderIds []string

    The list of IDs for health update providers configured for this cluster. *

    ProactiveHaSevereRemediation string

    The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode. Default: QuarantineMode. *

    Tags []string

    The IDs of any tags to attach to this resource.

    VsanCompressionEnabled bool

    Enables vSAN compression on the cluster.

    VsanDedupEnabled bool

    Enables vSAN deduplication on the cluster. Cannot be independently set to true. When vSAN deduplication is enabled, vSAN compression must also be enabled.

    VsanDiskGroups []ComputeClusterVsanDiskGroupArgs

    Represents the configuration of a host disk group in the cluster.

    VsanDitEncryptionEnabled bool

    Enables vSAN data-in-transit encryption on the cluster. Conflicts with vsan_remote_datastore_ids, i.e., vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature at the same time.

    VsanDitRekeyInterval int

    Indicates the rekey interval in minutes for data-in-transit encryption. The valid rekey interval is 30 to 10800 (feature defaults to 1440). Conflicts with vsan_remote_datastore_ids.

    VsanEnabled bool

    Enables vSAN on the cluster.

    VsanEsaEnabled bool

    Enables vSAN ESA on the cluster.

    VsanFaultDomains []ComputeClusterVsanFaultDomainArgs

    Configurations of vSAN fault domains.

    VsanNetworkDiagnosticModeEnabled bool

    Enables network diagnostic mode for vSAN performance service on the cluster.

    VsanPerformanceEnabled bool

    Enables vSAN performance service on the cluster. Default: true.

    VsanRemoteDatastoreIds []string

    The remote vSAN datastore IDs to be mounted to this cluster. Conflicts with vsan_dit_encryption_enabled and vsan_dit_rekey_interval, i.e., vSAN HCI Mesh feature cannot be enabled with data-in-transit encryption feature at the same time.

    VsanStretchedCluster ComputeClusterVsanStretchedClusterArgs

    Configurations of vSAN stretched cluster.

    VsanUnmapEnabled bool

    Enables vSAN unmap on the cluster. You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.

    VsanVerboseModeEnabled bool

    Enables verbose mode for vSAN performance service on the cluster.

    datacenterId String

    The managed object ID of the datacenter to create the cluster in. Forces a new resource if changed.

    customAttributes Map<String,String>

    A map of custom attribute ids to attribute value strings to set for the datastore cluster.

    NOTE: Custom attributes are unsupported on direct ESXi connections and require vCenter Server.

    dpmAutomationLevel String

    The automation level for host power operations in this cluster. Can be one of manual or automated. Default: manual.

    dpmEnabled Boolean

    Enable DPM support for DRS in this cluster. Requires drs_enabled to be true in order to be effective. Default: false.

    dpmThreshold Integer

    A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting. Default: 3.

    drsAdvancedOptions Map<String,String>

    A key/value map that specifies advanced options for DRS and DPM.

    drsAutomationLevel String

    The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or fullyAutomated. Default: manual.

    drsEnablePredictiveDrs Boolean

    When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations. *

    drsEnableVmOverrides Boolean

    Allow individual DRS overrides to be set for virtual machines in the cluster. Default: true.

    drsEnabled Boolean

    Enable DRS for this cluster. Default: false.

    drsMigrationThreshold Integer

    A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less. Default: 3.

    drsScaleDescendantsShares String

    Enable scalable shares for all resource pools in the cluster. Can be one of disabled or scaleCpuAndMemoryShares. Default: disabled.

    folder String

    The relative path to a folder to put this cluster in. This is a path relative to the datacenter you are deploying the cluster to. Example: for the dc1 datacenter, and a provided folder of foo/bar, The provider will place a cluster named compute-cluster-test in a host folder located at /dc1/host/foo/bar, with the final inventory path being /dc1/host/foo/bar/datastore-cluster-test.

    forceEvacuateOnDestroy Boolean

    When destroying the resource, setting this to true will auto-remove any hosts that are currently a member of the cluster, as if they were removed by taking their entry out of host_system_ids (see below. This is an advanced option and should only be used for testing. Default: false.

    NOTE: Do not set force_evacuate_on_destroy in production operation as there are many pitfalls to its use when working with complex cluster configurations. Depending on the virtual machines currently on the cluster, and your DRS and HA settings, the full host evacuation may fail. Instead, incrementally remove hosts from your configuration by adjusting the contents of the host_system_ids attribute.

    haAdmissionControlFailoverHostSystemIds List<String>

    Defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.

    haAdmissionControlHostFailureTolerance Integer

    The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster. Default: 1. *

    haAdmissionControlPerformanceTolerance Integer

    The percentage of resource reduction that a cluster of virtual machines can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting. Default: 100 (disabled).

    haAdmissionControlPolicy String

    The type of admission control policy to use with vSphere HA. Can be one of resourcePercentage, slotPolicy, failoverHosts, or disabled. Default: resourcePercentage.

    haAdmissionControlResourcePercentageAutoCompute Boolean

    Automatically determine available resource percentages by subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting from the total amount of resources in the cluster. Disable to supply user-defined values. Default: true. *

    haAdmissionControlResourcePercentageCpu Integer

    Controls the user-defined percentage of CPU resources in the cluster to reserve for failover. Default: 100.

    haAdmissionControlResourcePercentageMemory Integer

    Controls the user-defined percentage of memory resources in the cluster to reserve for failover. Default: 100.

    haAdmissionControlSlotPolicyExplicitCpu Integer

    Controls the user-defined CPU slot size, in MHz. Default: 32.

    haAdmissionControlSlotPolicyExplicitMemory Integer

    Controls the user-defined memory slot size, in MB. Default: 100.

    haAdmissionControlSlotPolicyUseExplicitSize Boolean

    Controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is false, which tells vSphere to gather a automatic average based on all powered-on virtual machines currently in the cluster.

    haAdvancedOptions Map<String,String>

    A key/value map that specifies advanced options for vSphere HA.

    haDatastoreApdRecoveryAction String

    Controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of none or reset. Default: none. *

    haDatastoreApdResponse String

    Controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or restartAggressive. Default: disabled. *

    haDatastoreApdResponseDelay Integer

    The time, in seconds, to wait after an APD timeout event to run the response action defined in ha_datastore_apd_response. Default: 180 seconds (3 minutes). *

    haDatastorePdlResponse String

    Controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive. Default: disabled. *

    haEnabled Boolean

    Enable vSphere HA for this cluster. Default: false.

    haHeartbeatDatastoreIds List<String>

    The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.

    haHeartbeatDatastorePolicy String

    The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or allFeasibleDsWithUserPreference. Default: allFeasibleDsWithUserPreference.

    haHostIsolationResponse String

    The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of none, powerOff, or shutdown. Default: none.

    haHostMonitoring String

    Global setting that controls whether vSphere HA remediates virtual machines on host failure. Can be one of enabled or disabled. Default: enabled.

    haVmComponentProtection String

    Controls vSphere VM component protection for virtual machines in this cluster. Can be one of enabled or disabled. Default: enabled. *

    haVmDependencyRestartCondition String

    The condition used to determine whether or not virtual machines in a certain restart priority class are online, allowing HA to move on to restarting virtual machines on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen. The default is none, which means that a virtual machine is considered ready immediately after a host is found to start it on. *

    haVmFailureInterval Integer

    The time interval, in seconds, a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. Default: 30 seconds.

    haVmMaximumFailureWindow Integer

    The time, in seconds, for the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset time is allotted. Default: -1 (no window).

    haVmMaximumResets Integer

    The maximum number of resets that HA will perform to a virtual machine when responding to a failure event. Default: 3

    haVmMinimumUptime Integer

    The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats. Default: 120 seconds (2 minutes).

    haVmMonitoring String

    The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled, vmMonitoringOnly, or vmAndAppMonitoring. Default: vmMonitoringDisabled.

    haVmRestartAdditionalDelay Integer

    Additional delay, in seconds, after ready condition is met. A VM is considered ready at this point. Default: 0 seconds (no delay). *

    haVmRestartPriority String

    The default restart priority for affected virtual machines when vSphere detects a host failure. Can be one of lowest, low, medium, high, or highest. Default: medium.

    haVmRestartTimeout Integer

    The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority. Default: 600 seconds (10 minutes). *

    hostClusterExitTimeout Integer

    The timeout, in seconds, for each host maintenance mode operation when removing hosts from a cluster. Default: 3600 seconds (1 hour).

    hostManaged Boolean

    Can be set to true if compute cluster membership will be managed through the host resource rather than the compute_cluster resource. Conflicts with: host_system_ids.

    hostSystemIds List<String>

    The managed object IDs of the hosts to put in the cluster. Conflicts with: host_managed.

    name String

    The name of the cluster.

    proactiveHaAutomationLevel String

    Determines how the host quarantine, maintenance mode, or virtual machine migration recommendations made by proactive HA are to be handled. Can be one of Automated or Manual. Default: Manual. *

    proactiveHaEnabled Boolean

    Enables Proactive HA. Default: false. *

    proactiveHaModerateRemediation String

    The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode. Default: QuarantineMode. *

    proactiveHaProviderIds List<String>

    The list of IDs for health update providers configured for this cluster. *

    proactiveHaSevereRemediation String

    The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode. Default: QuarantineMode. *

    tags List<String>

    The IDs of any tags to attach to this resource.

    vsanCompressionEnabled Boolean

    Enables vSAN compression on the cluster.

    vsanDedupEnabled Boolean

    Enables vSAN deduplication on the cluster. Cannot be independently set to true. When vSAN deduplication is enabled, vSAN compression must also be enabled.

    vsanDiskGroups List<ComputeClusterVsanDiskGroup>

    Represents the configuration of a host disk group in the cluster.

    vsanDitEncryptionEnabled Boolean

    Enables vSAN data-in-transit encryption on the cluster. Conflicts with vsan_remote_datastore_ids, i.e., vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature at the same time.

    vsanDitRekeyInterval Integer

    Indicates the rekey interval in minutes for data-in-transit encryption. The valid rekey interval is 30 to 10800 (feature defaults to 1440). Conflicts with vsan_remote_datastore_ids.

    vsanEnabled Boolean

    Enables vSAN on the cluster.

    vsanEsaEnabled Boolean

    Enables vSAN ESA on the cluster.

    vsanFaultDomains List<ComputeClusterVsanFaultDomain>

    Configurations of vSAN fault domains.

    vsanNetworkDiagnosticModeEnabled Boolean

    Enables network diagnostic mode for vSAN performance service on the cluster.

    vsanPerformanceEnabled Boolean

    Enables vSAN performance service on the cluster. Default: true.

    vsanRemoteDatastoreIds List<String>

    The remote vSAN datastore IDs to be mounted to this cluster. Conflicts with vsan_dit_encryption_enabled and vsan_dit_rekey_interval, i.e., vSAN HCI Mesh feature cannot be enabled with data-in-transit encryption feature at the same time.

    vsanStretchedCluster ComputeClusterVsanStretchedCluster

    Configurations of vSAN stretched cluster.

    vsanUnmapEnabled Boolean

    Enables vSAN unmap on the cluster. You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.

    vsanVerboseModeEnabled Boolean

    Enables verbose mode for vSAN performance service on the cluster.

    datacenterId string

    The managed object ID of the datacenter to create the cluster in. Forces a new resource if changed.

    customAttributes {[key: string]: string}

    A map of custom attribute ids to attribute value strings to set for the datastore cluster.

    NOTE: Custom attributes are unsupported on direct ESXi connections and require vCenter Server.

    dpmAutomationLevel string

    The automation level for host power operations in this cluster. Can be one of manual or automated. Default: manual.

    dpmEnabled boolean

    Enable DPM support for DRS in this cluster. Requires drs_enabled to be true in order to be effective. Default: false.

    dpmThreshold number

    A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting. Default: 3.

    drsAdvancedOptions {[key: string]: string}

    A key/value map that specifies advanced options for DRS and DPM.

    drsAutomationLevel string

    The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or fullyAutomated. Default: manual.

    drsEnablePredictiveDrs boolean

    When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations. *

    drsEnableVmOverrides boolean

    Allow individual DRS overrides to be set for virtual machines in the cluster. Default: true.

    drsEnabled boolean

    Enable DRS for this cluster. Default: false.

    drsMigrationThreshold number

    A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less. Default: 3.

    drsScaleDescendantsShares string

    Enable scalable shares for all resource pools in the cluster. Can be one of disabled or scaleCpuAndMemoryShares. Default: disabled.

    folder string

    The relative path to a folder to put this cluster in. This is a path relative to the datacenter you are deploying the cluster to. Example: for the dc1 datacenter, and a provided folder of foo/bar, The provider will place a cluster named compute-cluster-test in a host folder located at /dc1/host/foo/bar, with the final inventory path being /dc1/host/foo/bar/datastore-cluster-test.

    forceEvacuateOnDestroy boolean

    When destroying the resource, setting this to true will auto-remove any hosts that are currently a member of the cluster, as if they were removed by taking their entry out of host_system_ids (see below. This is an advanced option and should only be used for testing. Default: false.

    NOTE: Do not set force_evacuate_on_destroy in production operation as there are many pitfalls to its use when working with complex cluster configurations. Depending on the virtual machines currently on the cluster, and your DRS and HA settings, the full host evacuation may fail. Instead, incrementally remove hosts from your configuration by adjusting the contents of the host_system_ids attribute.

    haAdmissionControlFailoverHostSystemIds string[]

    Defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.

    haAdmissionControlHostFailureTolerance number

    The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster. Default: 1. *

    haAdmissionControlPerformanceTolerance number

    The percentage of resource reduction that a cluster of virtual machines can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting. Default: 100 (disabled).

    haAdmissionControlPolicy string

    The type of admission control policy to use with vSphere HA. Can be one of resourcePercentage, slotPolicy, failoverHosts, or disabled. Default: resourcePercentage.

    haAdmissionControlResourcePercentageAutoCompute boolean

    Automatically determine available resource percentages by subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting from the total amount of resources in the cluster. Disable to supply user-defined values. Default: true. *

    haAdmissionControlResourcePercentageCpu number

    Controls the user-defined percentage of CPU resources in the cluster to reserve for failover. Default: 100.

    haAdmissionControlResourcePercentageMemory number

    Controls the user-defined percentage of memory resources in the cluster to reserve for failover. Default: 100.

    haAdmissionControlSlotPolicyExplicitCpu number

    Controls the user-defined CPU slot size, in MHz. Default: 32.

    haAdmissionControlSlotPolicyExplicitMemory number

    Controls the user-defined memory slot size, in MB. Default: 100.

    haAdmissionControlSlotPolicyUseExplicitSize boolean

    Controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is false, which tells vSphere to gather a automatic average based on all powered-on virtual machines currently in the cluster.

    haAdvancedOptions {[key: string]: string}

    A key/value map that specifies advanced options for vSphere HA.

    haDatastoreApdRecoveryAction string

    Controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of none or reset. Default: none. *

    haDatastoreApdResponse string

    Controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or restartAggressive. Default: disabled. *

    haDatastoreApdResponseDelay number

    The time, in seconds, to wait after an APD timeout event to run the response action defined in ha_datastore_apd_response. Default: 180 seconds (3 minutes). *

    haDatastorePdlResponse string

    Controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive. Default: disabled. *

    haEnabled boolean

    Enable vSphere HA for this cluster. Default: false.

    haHeartbeatDatastoreIds string[]

    The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.

    haHeartbeatDatastorePolicy string

    The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or allFeasibleDsWithUserPreference. Default: allFeasibleDsWithUserPreference.

    haHostIsolationResponse string

    The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of none, powerOff, or shutdown. Default: none.

    haHostMonitoring string

    Global setting that controls whether vSphere HA remediates virtual machines on host failure. Can be one of enabled or disabled. Default: enabled.

    haVmComponentProtection string

    Controls vSphere VM component protection for virtual machines in this cluster. Can be one of enabled or disabled. Default: enabled. *

    haVmDependencyRestartCondition string

    The condition used to determine whether or not virtual machines in a certain restart priority class are online, allowing HA to move on to restarting virtual machines on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen. The default is none, which means that a virtual machine is considered ready immediately after a host is found to start it on. *

    haVmFailureInterval number

    The time interval, in seconds, a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. Default: 30 seconds.

    haVmMaximumFailureWindow number

    The time, in seconds, for the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset time is allotted. Default: -1 (no window).

    haVmMaximumResets number

    The maximum number of resets that HA will perform to a virtual machine when responding to a failure event. Default: 3

    haVmMinimumUptime number

    The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats. Default: 120 seconds (2 minutes).

    haVmMonitoring string

    The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled, vmMonitoringOnly, or vmAndAppMonitoring. Default: vmMonitoringDisabled.

    haVmRestartAdditionalDelay number

    Additional delay, in seconds, after ready condition is met. A VM is considered ready at this point. Default: 0 seconds (no delay). *

    haVmRestartPriority string

    The default restart priority for affected virtual machines when vSphere detects a host failure. Can be one of lowest, low, medium, high, or highest. Default: medium.

    haVmRestartTimeout number

    The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority. Default: 600 seconds (10 minutes). *

    hostClusterExitTimeout number

    The timeout, in seconds, for each host maintenance mode operation when removing hosts from a cluster. Default: 3600 seconds (1 hour).

    hostManaged boolean

    Can be set to true if compute cluster membership will be managed through the host resource rather than the compute_cluster resource. Conflicts with: host_system_ids.

    hostSystemIds string[]

    The managed object IDs of the hosts to put in the cluster. Conflicts with: host_managed.

    name string

    The name of the cluster.

    proactiveHaAutomationLevel string

    Determines how the host quarantine, maintenance mode, or virtual machine migration recommendations made by proactive HA are to be handled. Can be one of Automated or Manual. Default: Manual. *

    proactiveHaEnabled boolean

    Enables Proactive HA. Default: false. *

    proactiveHaModerateRemediation string

    The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode. Default: QuarantineMode. *

    proactiveHaProviderIds string[]

    The list of IDs for health update providers configured for this cluster. *

    proactiveHaSevereRemediation string

    The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode. Default: QuarantineMode. *

    tags string[]

    The IDs of any tags to attach to this resource.

    vsanCompressionEnabled boolean

    Enables vSAN compression on the cluster.

    vsanDedupEnabled boolean

    Enables vSAN deduplication on the cluster. Cannot be independently set to true. When vSAN deduplication is enabled, vSAN compression must also be enabled.

    vsanDiskGroups ComputeClusterVsanDiskGroup[]

    Represents the configuration of a host disk group in the cluster.

    vsanDitEncryptionEnabled boolean

    Enables vSAN data-in-transit encryption on the cluster. Conflicts with vsan_remote_datastore_ids, i.e., vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature at the same time.

    vsanDitRekeyInterval number

    Indicates the rekey interval in minutes for data-in-transit encryption. The valid rekey interval is 30 to 10800 (feature defaults to 1440). Conflicts with vsan_remote_datastore_ids.

    vsanEnabled boolean

    Enables vSAN on the cluster.

    vsanEsaEnabled boolean

    Enables vSAN ESA on the cluster.

    vsanFaultDomains ComputeClusterVsanFaultDomain[]

    Configurations of vSAN fault domains.

    vsanNetworkDiagnosticModeEnabled boolean

    Enables network diagnostic mode for vSAN performance service on the cluster.

    vsanPerformanceEnabled boolean

    Enables vSAN performance service on the cluster. Default: true.

    vsanRemoteDatastoreIds string[]

    The remote vSAN datastore IDs to be mounted to this cluster. Conflicts with vsan_dit_encryption_enabled and vsan_dit_rekey_interval, i.e., vSAN HCI Mesh feature cannot be enabled with data-in-transit encryption feature at the same time.

    vsanStretchedCluster ComputeClusterVsanStretchedCluster

    Configurations of vSAN stretched cluster.

    vsanUnmapEnabled boolean

    Enables vSAN unmap on the cluster. You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.

    vsanVerboseModeEnabled boolean

    Enables verbose mode for vSAN performance service on the cluster.

    datacenter_id str

    The managed object ID of the datacenter to create the cluster in. Forces a new resource if changed.

    custom_attributes Mapping[str, str]

    A map of custom attribute ids to attribute value strings to set for the datastore cluster.

    NOTE: Custom attributes are unsupported on direct ESXi connections and require vCenter Server.

    dpm_automation_level str

    The automation level for host power operations in this cluster. Can be one of manual or automated. Default: manual.

    dpm_enabled bool

    Enable DPM support for DRS in this cluster. Requires drs_enabled to be true in order to be effective. Default: false.

    dpm_threshold int

    A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting. Default: 3.

    drs_advanced_options Mapping[str, str]

    A key/value map that specifies advanced options for DRS and DPM.

    drs_automation_level str

    The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or fullyAutomated. Default: manual.

    drs_enable_predictive_drs bool

    When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations. *

    drs_enable_vm_overrides bool

    Allow individual DRS overrides to be set for virtual machines in the cluster. Default: true.

    drs_enabled bool

    Enable DRS for this cluster. Default: false.

    drs_migration_threshold int

    A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less. Default: 3.

    drs_scale_descendants_shares str

    Enable scalable shares for all resource pools in the cluster. Can be one of disabled or scaleCpuAndMemoryShares. Default: disabled.

    folder str

    The relative path to a folder to put this cluster in. This is a path relative to the datacenter you are deploying the cluster to. Example: for the dc1 datacenter, and a provided folder of foo/bar, The provider will place a cluster named compute-cluster-test in a host folder located at /dc1/host/foo/bar, with the final inventory path being /dc1/host/foo/bar/datastore-cluster-test.

    force_evacuate_on_destroy bool

    When destroying the resource, setting this to true will auto-remove any hosts that are currently a member of the cluster, as if they were removed by taking their entry out of host_system_ids (see below. This is an advanced option and should only be used for testing. Default: false.

    NOTE: Do not set force_evacuate_on_destroy in production operation as there are many pitfalls to its use when working with complex cluster configurations. Depending on the virtual machines currently on the cluster, and your DRS and HA settings, the full host evacuation may fail. Instead, incrementally remove hosts from your configuration by adjusting the contents of the host_system_ids attribute.

    ha_admission_control_failover_host_system_ids Sequence[str]

    Defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.

    ha_admission_control_host_failure_tolerance int

    The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster. Default: 1. *

    ha_admission_control_performance_tolerance int

    The percentage of resource reduction that a cluster of virtual machines can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting. Default: 100 (disabled).

    ha_admission_control_policy str

    The type of admission control policy to use with vSphere HA. Can be one of resourcePercentage, slotPolicy, failoverHosts, or disabled. Default: resourcePercentage.

    ha_admission_control_resource_percentage_auto_compute bool

    Automatically determine available resource percentages by subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting from the total amount of resources in the cluster. Disable to supply user-defined values. Default: true. *

    ha_admission_control_resource_percentage_cpu int

    Controls the user-defined percentage of CPU resources in the cluster to reserve for failover. Default: 100.

    ha_admission_control_resource_percentage_memory int

    Controls the user-defined percentage of memory resources in the cluster to reserve for failover. Default: 100.

    ha_admission_control_slot_policy_explicit_cpu int

    Controls the user-defined CPU slot size, in MHz. Default: 32.

    ha_admission_control_slot_policy_explicit_memory int

    Controls the user-defined memory slot size, in MB. Default: 100.

    ha_admission_control_slot_policy_use_explicit_size bool

    Controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is false, which tells vSphere to gather a automatic average based on all powered-on virtual machines currently in the cluster.

    ha_advanced_options Mapping[str, str]

    A key/value map that specifies advanced options for vSphere HA.

    ha_datastore_apd_recovery_action str

    Controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of none or reset. Default: none. *

    ha_datastore_apd_response str

    Controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or restartAggressive. Default: disabled. *

    ha_datastore_apd_response_delay int

    The time, in seconds, to wait after an APD timeout event to run the response action defined in ha_datastore_apd_response. Default: 180 seconds (3 minutes). *

    ha_datastore_pdl_response str

    Controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive. Default: disabled. *

    ha_enabled bool

    Enable vSphere HA for this cluster. Default: false.

    ha_heartbeat_datastore_ids Sequence[str]

    The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.

    ha_heartbeat_datastore_policy str

    The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or allFeasibleDsWithUserPreference. Default: allFeasibleDsWithUserPreference.

    ha_host_isolation_response str

    The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of none, powerOff, or shutdown. Default: none.

    ha_host_monitoring str

    Global setting that controls whether vSphere HA remediates virtual machines on host failure. Can be one of enabled or disabled. Default: enabled.

    ha_vm_component_protection str

    Controls vSphere VM component protection for virtual machines in this cluster. Can be one of enabled or disabled. Default: enabled. *

    ha_vm_dependency_restart_condition str

    The condition used to determine whether or not virtual machines in a certain restart priority class are online, allowing HA to move on to restarting virtual machines on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen. The default is none, which means that a virtual machine is considered ready immediately after a host is found to start it on. *

    ha_vm_failure_interval int

    The time interval, in seconds, a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. Default: 30 seconds.

    ha_vm_maximum_failure_window int

    The time, in seconds, for the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset time is allotted. Default: -1 (no window).

    ha_vm_maximum_resets int

    The maximum number of resets that HA will perform to a virtual machine when responding to a failure event. Default: 3

    ha_vm_minimum_uptime int

    The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats. Default: 120 seconds (2 minutes).

    ha_vm_monitoring str

    The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled, vmMonitoringOnly, or vmAndAppMonitoring. Default: vmMonitoringDisabled.

    ha_vm_restart_additional_delay int

    Additional delay, in seconds, after ready condition is met. A VM is considered ready at this point. Default: 0 seconds (no delay). *

    ha_vm_restart_priority str

    The default restart priority for affected virtual machines when vSphere detects a host failure. Can be one of lowest, low, medium, high, or highest. Default: medium.

    ha_vm_restart_timeout int

    The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority. Default: 600 seconds (10 minutes). *

    host_cluster_exit_timeout int

    The timeout, in seconds, for each host maintenance mode operation when removing hosts from a cluster. Default: 3600 seconds (1 hour).

    host_managed bool

    Can be set to true if compute cluster membership will be managed through the host resource rather than the compute_cluster resource. Conflicts with: host_system_ids.

    host_system_ids Sequence[str]

    The managed object IDs of the hosts to put in the cluster. Conflicts with: host_managed.

    name str

    The name of the cluster.

    proactive_ha_automation_level str

    Determines how the host quarantine, maintenance mode, or virtual machine migration recommendations made by proactive HA are to be handled. Can be one of Automated or Manual. Default: Manual. *

    proactive_ha_enabled bool

    Enables Proactive HA. Default: false. *

    proactive_ha_moderate_remediation str

    The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode. Default: QuarantineMode. *

    proactive_ha_provider_ids Sequence[str]

    The list of IDs for health update providers configured for this cluster. *

    proactive_ha_severe_remediation str

    The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode. Default: QuarantineMode. *

    tags Sequence[str]

    The IDs of any tags to attach to this resource.

    vsan_compression_enabled bool

    Enables vSAN compression on the cluster.

    vsan_dedup_enabled bool

    Enables vSAN deduplication on the cluster. Cannot be independently set to true. When vSAN deduplication is enabled, vSAN compression must also be enabled.

    vsan_disk_groups Sequence[ComputeClusterVsanDiskGroupArgs]

    Represents the configuration of a host disk group in the cluster.

    vsan_dit_encryption_enabled bool

    Enables vSAN data-in-transit encryption on the cluster. Conflicts with vsan_remote_datastore_ids, i.e., vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature at the same time.

    vsan_dit_rekey_interval int

    Indicates the rekey interval in minutes for data-in-transit encryption. The valid rekey interval is 30 to 10800 (feature defaults to 1440). Conflicts with vsan_remote_datastore_ids.

    vsan_enabled bool

    Enables vSAN on the cluster.

    vsan_esa_enabled bool

    Enables vSAN ESA on the cluster.

    vsan_fault_domains Sequence[ComputeClusterVsanFaultDomainArgs]

    Configurations of vSAN fault domains.

    vsan_network_diagnostic_mode_enabled bool

    Enables network diagnostic mode for vSAN performance service on the cluster.

    vsan_performance_enabled bool

    Enables vSAN performance service on the cluster. Default: true.

    vsan_remote_datastore_ids Sequence[str]

    The remote vSAN datastore IDs to be mounted to this cluster. Conflicts with vsan_dit_encryption_enabled and vsan_dit_rekey_interval, i.e., vSAN HCI Mesh feature cannot be enabled with data-in-transit encryption feature at the same time.

    vsan_stretched_cluster ComputeClusterVsanStretchedClusterArgs

    Configurations of vSAN stretched cluster.

    vsan_unmap_enabled bool

    Enables vSAN unmap on the cluster. You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.

    vsan_verbose_mode_enabled bool

    Enables verbose mode for vSAN performance service on the cluster.

    datacenterId String

    The managed object ID of the datacenter to create the cluster in. Forces a new resource if changed.

    customAttributes Map<String>

    A map of custom attribute ids to attribute value strings to set for the datastore cluster.

    NOTE: Custom attributes are unsupported on direct ESXi connections and require vCenter Server.

    dpmAutomationLevel String

    The automation level for host power operations in this cluster. Can be one of manual or automated. Default: manual.

    dpmEnabled Boolean

    Enable DPM support for DRS in this cluster. Requires drs_enabled to be true in order to be effective. Default: false.

    dpmThreshold Number

    A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting. Default: 3.

    drsAdvancedOptions Map<String>

    A key/value map that specifies advanced options for DRS and DPM.

    drsAutomationLevel String

    The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or fullyAutomated. Default: manual.

    drsEnablePredictiveDrs Boolean

    When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations. *

    drsEnableVmOverrides Boolean

    Allow individual DRS overrides to be set for virtual machines in the cluster. Default: true.

    drsEnabled Boolean

    Enable DRS for this cluster. Default: false.

    drsMigrationThreshold Number

    A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less. Default: 3.

    drsScaleDescendantsShares String

    Enable scalable shares for all resource pools in the cluster. Can be one of disabled or scaleCpuAndMemoryShares. Default: disabled.

    folder String

    The relative path to a folder to put this cluster in. This is a path relative to the datacenter you are deploying the cluster to. Example: for the dc1 datacenter, and a provided folder of foo/bar, The provider will place a cluster named compute-cluster-test in a host folder located at /dc1/host/foo/bar, with the final inventory path being /dc1/host/foo/bar/datastore-cluster-test.

    forceEvacuateOnDestroy Boolean

    When destroying the resource, setting this to true will auto-remove any hosts that are currently a member of the cluster, as if they were removed by taking their entry out of host_system_ids (see below. This is an advanced option and should only be used for testing. Default: false.

    NOTE: Do not set force_evacuate_on_destroy in production operation as there are many pitfalls to its use when working with complex cluster configurations. Depending on the virtual machines currently on the cluster, and your DRS and HA settings, the full host evacuation may fail. Instead, incrementally remove hosts from your configuration by adjusting the contents of the host_system_ids attribute.

    haAdmissionControlFailoverHostSystemIds List<String>

    Defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.

    haAdmissionControlHostFailureTolerance Number

    The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster. Default: 1. *

    haAdmissionControlPerformanceTolerance Number

    The percentage of resource reduction that a cluster of virtual machines can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting. Default: 100 (disabled).

    haAdmissionControlPolicy String

    The type of admission control policy to use with vSphere HA. Can be one of resourcePercentage, slotPolicy, failoverHosts, or disabled. Default: resourcePercentage.

    haAdmissionControlResourcePercentageAutoCompute Boolean

    Automatically determine available resource percentages by subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting from the total amount of resources in the cluster. Disable to supply user-defined values. Default: true. *

    haAdmissionControlResourcePercentageCpu Number

    Controls the user-defined percentage of CPU resources in the cluster to reserve for failover. Default: 100.

    haAdmissionControlResourcePercentageMemory Number

    Controls the user-defined percentage of memory resources in the cluster to reserve for failover. Default: 100.

    haAdmissionControlSlotPolicyExplicitCpu Number

    Controls the user-defined CPU slot size, in MHz. Default: 32.

    haAdmissionControlSlotPolicyExplicitMemory Number

    Controls the user-defined memory slot size, in MB. Default: 100.

    haAdmissionControlSlotPolicyUseExplicitSize Boolean

    Controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is false, which tells vSphere to gather a automatic average based on all powered-on virtual machines currently in the cluster.

    haAdvancedOptions Map<String>

    A key/value map that specifies advanced options for vSphere HA.

    haDatastoreApdRecoveryAction String

    Controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of none or reset. Default: none. *

    haDatastoreApdResponse String

    Controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or restartAggressive. Default: disabled. *

    haDatastoreApdResponseDelay Number

    The time, in seconds, to wait after an APD timeout event to run the response action defined in ha_datastore_apd_response. Default: 180 seconds (3 minutes). *

    haDatastorePdlResponse String

    Controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive. Default: disabled. *

    haEnabled Boolean

    Enable vSphere HA for this cluster. Default: false.

    haHeartbeatDatastoreIds List<String>

    The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.

    haHeartbeatDatastorePolicy String

    The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or allFeasibleDsWithUserPreference. Default: allFeasibleDsWithUserPreference.

    haHostIsolationResponse String

    The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of none, powerOff, or shutdown. Default: none.

    haHostMonitoring String

    Global setting that controls whether vSphere HA remediates virtual machines on host failure. Can be one of enabled or disabled. Default: enabled.

    haVmComponentProtection String

    Controls vSphere VM component protection for virtual machines in this cluster. Can be one of enabled or disabled. Default: enabled. *

    haVmDependencyRestartCondition String

    The condition used to determine whether or not virtual machines in a certain restart priority class are online, allowing HA to move on to restarting virtual machines on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen. The default is none, which means that a virtual machine is considered ready immediately after a host is found to start it on. *

    haVmFailureInterval Number

    The time interval, in seconds, a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. Default: 30 seconds.

    haVmMaximumFailureWindow Number

    The time, in seconds, for the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset time is allotted. Default: -1 (no window).

    haVmMaximumResets Number

    The maximum number of resets that HA will perform to a virtual machine when responding to a failure event. Default: 3

    haVmMinimumUptime Number

    The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats. Default: 120 seconds (2 minutes).

    haVmMonitoring String

    The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled, vmMonitoringOnly, or vmAndAppMonitoring. Default: vmMonitoringDisabled.

    haVmRestartAdditionalDelay Number

    Additional delay, in seconds, after ready condition is met. A VM is considered ready at this point. Default: 0 seconds (no delay). *

    haVmRestartPriority String

    The default restart priority for affected virtual machines when vSphere detects a host failure. Can be one of lowest, low, medium, high, or highest. Default: medium.

    haVmRestartTimeout Number

    The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority. Default: 600 seconds (10 minutes). *

    hostClusterExitTimeout Number

    The timeout, in seconds, for each host maintenance mode operation when removing hosts from a cluster. Default: 3600 seconds (1 hour).

    hostManaged Boolean

    Can be set to true if compute cluster membership will be managed through the host resource rather than the compute_cluster resource. Conflicts with: host_system_ids.

    hostSystemIds List<String>

    The managed object IDs of the hosts to put in the cluster. Conflicts with: host_managed.

    name String

    The name of the cluster.

    proactiveHaAutomationLevel String

    Determines how the host quarantine, maintenance mode, or virtual machine migration recommendations made by proactive HA are to be handled. Can be one of Automated or Manual. Default: Manual. *

    proactiveHaEnabled Boolean

    Enables Proactive HA. Default: false. *

    proactiveHaModerateRemediation String

    The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode. Default: QuarantineMode. *

    proactiveHaProviderIds List<String>

    The list of IDs for health update providers configured for this cluster. *

    proactiveHaSevereRemediation String

    The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode. Default: QuarantineMode. *

    tags List<String>

    The IDs of any tags to attach to this resource.

    vsanCompressionEnabled Boolean

    Enables vSAN compression on the cluster.

    vsanDedupEnabled Boolean

    Enables vSAN deduplication on the cluster. Cannot be independently set to true. When vSAN deduplication is enabled, vSAN compression must also be enabled.

    vsanDiskGroups List<Property Map>

    Represents the configuration of a host disk group in the cluster.

    vsanDitEncryptionEnabled Boolean

    Enables vSAN data-in-transit encryption on the cluster. Conflicts with vsan_remote_datastore_ids, i.e., vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature at the same time.

    vsanDitRekeyInterval Number

    Indicates the rekey interval in minutes for data-in-transit encryption. The valid rekey interval is 30 to 10800 (feature defaults to 1440). Conflicts with vsan_remote_datastore_ids.

    vsanEnabled Boolean

    Enables vSAN on the cluster.

    vsanEsaEnabled Boolean

    Enables vSAN ESA on the cluster.

    vsanFaultDomains List<Property Map>

    Configurations of vSAN fault domains.

    vsanNetworkDiagnosticModeEnabled Boolean

    Enables network diagnostic mode for vSAN performance service on the cluster.

    vsanPerformanceEnabled Boolean

    Enables vSAN performance service on the cluster. Default: true.

    vsanRemoteDatastoreIds List<String>

    The remote vSAN datastore IDs to be mounted to this cluster. Conflicts with vsan_dit_encryption_enabled and vsan_dit_rekey_interval, i.e., vSAN HCI Mesh feature cannot be enabled with data-in-transit encryption feature at the same time.

    vsanStretchedCluster Property Map

    Configurations of vSAN stretched cluster.

    vsanUnmapEnabled Boolean

    Enables vSAN unmap on the cluster. You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.

    vsanVerboseModeEnabled Boolean

    Enables verbose mode for vSAN performance service on the cluster.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the ComputeCluster resource produces the following output properties:

    Id string

    The provider-assigned unique ID for this managed resource.

    ResourcePoolId string

    The managed object ID of the primary resource pool for this cluster. This can be passed directly to the resource_pool_id attribute of the vsphere.VirtualMachine resource.

    Id string

    The provider-assigned unique ID for this managed resource.

    ResourcePoolId string

    The managed object ID of the primary resource pool for this cluster. This can be passed directly to the resource_pool_id attribute of the vsphere.VirtualMachine resource.

    id String

    The provider-assigned unique ID for this managed resource.

    resourcePoolId String

    The managed object ID of the primary resource pool for this cluster. This can be passed directly to the resource_pool_id attribute of the vsphere.VirtualMachine resource.

    id string

    The provider-assigned unique ID for this managed resource.

    resourcePoolId string

    The managed object ID of the primary resource pool for this cluster. This can be passed directly to the resource_pool_id attribute of the vsphere.VirtualMachine resource.

    id str

    The provider-assigned unique ID for this managed resource.

    resource_pool_id str

    The managed object ID of the primary resource pool for this cluster. This can be passed directly to the resource_pool_id attribute of the vsphere.VirtualMachine resource.

    id String

    The provider-assigned unique ID for this managed resource.

    resourcePoolId String

    The managed object ID of the primary resource pool for this cluster. This can be passed directly to the resource_pool_id attribute of the vsphere.VirtualMachine resource.

    Look up Existing ComputeCluster Resource

    Get an existing ComputeCluster resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: ComputeClusterState, opts?: CustomResourceOptions): ComputeCluster
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            custom_attributes: Optional[Mapping[str, str]] = None,
            datacenter_id: Optional[str] = None,
            dpm_automation_level: Optional[str] = None,
            dpm_enabled: Optional[bool] = None,
            dpm_threshold: Optional[int] = None,
            drs_advanced_options: Optional[Mapping[str, str]] = None,
            drs_automation_level: Optional[str] = None,
            drs_enable_predictive_drs: Optional[bool] = None,
            drs_enable_vm_overrides: Optional[bool] = None,
            drs_enabled: Optional[bool] = None,
            drs_migration_threshold: Optional[int] = None,
            drs_scale_descendants_shares: Optional[str] = None,
            folder: Optional[str] = None,
            force_evacuate_on_destroy: Optional[bool] = None,
            ha_admission_control_failover_host_system_ids: Optional[Sequence[str]] = None,
            ha_admission_control_host_failure_tolerance: Optional[int] = None,
            ha_admission_control_performance_tolerance: Optional[int] = None,
            ha_admission_control_policy: Optional[str] = None,
            ha_admission_control_resource_percentage_auto_compute: Optional[bool] = None,
            ha_admission_control_resource_percentage_cpu: Optional[int] = None,
            ha_admission_control_resource_percentage_memory: Optional[int] = None,
            ha_admission_control_slot_policy_explicit_cpu: Optional[int] = None,
            ha_admission_control_slot_policy_explicit_memory: Optional[int] = None,
            ha_admission_control_slot_policy_use_explicit_size: Optional[bool] = None,
            ha_advanced_options: Optional[Mapping[str, str]] = None,
            ha_datastore_apd_recovery_action: Optional[str] = None,
            ha_datastore_apd_response: Optional[str] = None,
            ha_datastore_apd_response_delay: Optional[int] = None,
            ha_datastore_pdl_response: Optional[str] = None,
            ha_enabled: Optional[bool] = None,
            ha_heartbeat_datastore_ids: Optional[Sequence[str]] = None,
            ha_heartbeat_datastore_policy: Optional[str] = None,
            ha_host_isolation_response: Optional[str] = None,
            ha_host_monitoring: Optional[str] = None,
            ha_vm_component_protection: Optional[str] = None,
            ha_vm_dependency_restart_condition: Optional[str] = None,
            ha_vm_failure_interval: Optional[int] = None,
            ha_vm_maximum_failure_window: Optional[int] = None,
            ha_vm_maximum_resets: Optional[int] = None,
            ha_vm_minimum_uptime: Optional[int] = None,
            ha_vm_monitoring: Optional[str] = None,
            ha_vm_restart_additional_delay: Optional[int] = None,
            ha_vm_restart_priority: Optional[str] = None,
            ha_vm_restart_timeout: Optional[int] = None,
            host_cluster_exit_timeout: Optional[int] = None,
            host_managed: Optional[bool] = None,
            host_system_ids: Optional[Sequence[str]] = None,
            name: Optional[str] = None,
            proactive_ha_automation_level: Optional[str] = None,
            proactive_ha_enabled: Optional[bool] = None,
            proactive_ha_moderate_remediation: Optional[str] = None,
            proactive_ha_provider_ids: Optional[Sequence[str]] = None,
            proactive_ha_severe_remediation: Optional[str] = None,
            resource_pool_id: Optional[str] = None,
            tags: Optional[Sequence[str]] = None,
            vsan_compression_enabled: Optional[bool] = None,
            vsan_dedup_enabled: Optional[bool] = None,
            vsan_disk_groups: Optional[Sequence[ComputeClusterVsanDiskGroupArgs]] = None,
            vsan_dit_encryption_enabled: Optional[bool] = None,
            vsan_dit_rekey_interval: Optional[int] = None,
            vsan_enabled: Optional[bool] = None,
            vsan_esa_enabled: Optional[bool] = None,
            vsan_fault_domains: Optional[Sequence[ComputeClusterVsanFaultDomainArgs]] = None,
            vsan_network_diagnostic_mode_enabled: Optional[bool] = None,
            vsan_performance_enabled: Optional[bool] = None,
            vsan_remote_datastore_ids: Optional[Sequence[str]] = None,
            vsan_stretched_cluster: Optional[ComputeClusterVsanStretchedClusterArgs] = None,
            vsan_unmap_enabled: Optional[bool] = None,
            vsan_verbose_mode_enabled: Optional[bool] = None) -> ComputeCluster
    func GetComputeCluster(ctx *Context, name string, id IDInput, state *ComputeClusterState, opts ...ResourceOption) (*ComputeCluster, error)
    public static ComputeCluster Get(string name, Input<string> id, ComputeClusterState? state, CustomResourceOptions? opts = null)
    public static ComputeCluster get(String name, Output<String> id, ComputeClusterState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    CustomAttributes Dictionary<string, string>

    A map of custom attribute ids to attribute value strings to set for the datastore cluster.

    NOTE: Custom attributes are unsupported on direct ESXi connections and require vCenter Server.

    DatacenterId string

    The managed object ID of the datacenter to create the cluster in. Forces a new resource if changed.

    DpmAutomationLevel string

    The automation level for host power operations in this cluster. Can be one of manual or automated. Default: manual.

    DpmEnabled bool

    Enable DPM support for DRS in this cluster. Requires drs_enabled to be true in order to be effective. Default: false.

    DpmThreshold int

    A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting. Default: 3.

    DrsAdvancedOptions Dictionary<string, string>

    A key/value map that specifies advanced options for DRS and DPM.

    DrsAutomationLevel string

    The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or fullyAutomated. Default: manual.

    DrsEnablePredictiveDrs bool

    When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations. *

    DrsEnableVmOverrides bool

    Allow individual DRS overrides to be set for virtual machines in the cluster. Default: true.

    DrsEnabled bool

    Enable DRS for this cluster. Default: false.

    DrsMigrationThreshold int

    A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less. Default: 3.

    DrsScaleDescendantsShares string

    Enable scalable shares for all resource pools in the cluster. Can be one of disabled or scaleCpuAndMemoryShares. Default: disabled.

    Folder string

    The relative path to a folder to put this cluster in. This is a path relative to the datacenter you are deploying the cluster to. Example: for the dc1 datacenter, and a provided folder of foo/bar, The provider will place a cluster named compute-cluster-test in a host folder located at /dc1/host/foo/bar, with the final inventory path being /dc1/host/foo/bar/datastore-cluster-test.

    ForceEvacuateOnDestroy bool

    When destroying the resource, setting this to true will auto-remove any hosts that are currently a member of the cluster, as if they were removed by taking their entry out of host_system_ids (see below. This is an advanced option and should only be used for testing. Default: false.

    NOTE: Do not set force_evacuate_on_destroy in production operation as there are many pitfalls to its use when working with complex cluster configurations. Depending on the virtual machines currently on the cluster, and your DRS and HA settings, the full host evacuation may fail. Instead, incrementally remove hosts from your configuration by adjusting the contents of the host_system_ids attribute.

    HaAdmissionControlFailoverHostSystemIds List<string>

    Defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.

    HaAdmissionControlHostFailureTolerance int

    The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster. Default: 1. *

    HaAdmissionControlPerformanceTolerance int

    The percentage of resource reduction that a cluster of virtual machines can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting. Default: 100 (disabled).

    HaAdmissionControlPolicy string

    The type of admission control policy to use with vSphere HA. Can be one of resourcePercentage, slotPolicy, failoverHosts, or disabled. Default: resourcePercentage.

    HaAdmissionControlResourcePercentageAutoCompute bool

    Automatically determine available resource percentages by subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting from the total amount of resources in the cluster. Disable to supply user-defined values. Default: true. *

    HaAdmissionControlResourcePercentageCpu int

    Controls the user-defined percentage of CPU resources in the cluster to reserve for failover. Default: 100.

    HaAdmissionControlResourcePercentageMemory int

    Controls the user-defined percentage of memory resources in the cluster to reserve for failover. Default: 100.

    HaAdmissionControlSlotPolicyExplicitCpu int

    Controls the user-defined CPU slot size, in MHz. Default: 32.

    HaAdmissionControlSlotPolicyExplicitMemory int

    Controls the user-defined memory slot size, in MB. Default: 100.

    HaAdmissionControlSlotPolicyUseExplicitSize bool

    Controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is false, which tells vSphere to gather a automatic average based on all powered-on virtual machines currently in the cluster.

    HaAdvancedOptions Dictionary<string, string>

    A key/value map that specifies advanced options for vSphere HA.

    HaDatastoreApdRecoveryAction string

    Controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of none or reset. Default: none. *

    HaDatastoreApdResponse string

    Controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or restartAggressive. Default: disabled. *

    HaDatastoreApdResponseDelay int

    The time, in seconds, to wait after an APD timeout event to run the response action defined in ha_datastore_apd_response. Default: 180 seconds (3 minutes). *

    HaDatastorePdlResponse string

    Controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive. Default: disabled. *

    HaEnabled bool

    Enable vSphere HA for this cluster. Default: false.

    HaHeartbeatDatastoreIds List<string>

    The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.

    HaHeartbeatDatastorePolicy string

    The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or allFeasibleDsWithUserPreference. Default: allFeasibleDsWithUserPreference.

    HaHostIsolationResponse string

    The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of none, powerOff, or shutdown. Default: none.

    HaHostMonitoring string

    Global setting that controls whether vSphere HA remediates virtual machines on host failure. Can be one of enabled or disabled. Default: enabled.

    HaVmComponentProtection string

    Controls vSphere VM component protection for virtual machines in this cluster. Can be one of enabled or disabled. Default: enabled. *

    HaVmDependencyRestartCondition string

    The condition used to determine whether or not virtual machines in a certain restart priority class are online, allowing HA to move on to restarting virtual machines on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen. The default is none, which means that a virtual machine is considered ready immediately after a host is found to start it on. *

    HaVmFailureInterval int

    The time interval, in seconds, a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. Default: 30 seconds.

    HaVmMaximumFailureWindow int

    The time, in seconds, for the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset time is allotted. Default: -1 (no window).

    HaVmMaximumResets int

    The maximum number of resets that HA will perform to a virtual machine when responding to a failure event. Default: 3

    HaVmMinimumUptime int

    The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats. Default: 120 seconds (2 minutes).

    HaVmMonitoring string

    The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled, vmMonitoringOnly, or vmAndAppMonitoring. Default: vmMonitoringDisabled.

    HaVmRestartAdditionalDelay int

    Additional delay, in seconds, after ready condition is met. A VM is considered ready at this point. Default: 0 seconds (no delay). *

    HaVmRestartPriority string

    The default restart priority for affected virtual machines when vSphere detects a host failure. Can be one of lowest, low, medium, high, or highest. Default: medium.

    HaVmRestartTimeout int

    The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority. Default: 600 seconds (10 minutes). *

    HostClusterExitTimeout int

    The timeout, in seconds, for each host maintenance mode operation when removing hosts from a cluster. Default: 3600 seconds (1 hour).

    HostManaged bool

    Can be set to true if compute cluster membership will be managed through the host resource rather than the compute_cluster resource. Conflicts with: host_system_ids.

    HostSystemIds List<string>

    The managed object IDs of the hosts to put in the cluster. Conflicts with: host_managed.

    Name string

    The name of the cluster.

    ProactiveHaAutomationLevel string

    Determines how the host quarantine, maintenance mode, or virtual machine migration recommendations made by proactive HA are to be handled. Can be one of Automated or Manual. Default: Manual. *

    ProactiveHaEnabled bool

    Enables Proactive HA. Default: false. *

    ProactiveHaModerateRemediation string

    The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode. Default: QuarantineMode. *

    ProactiveHaProviderIds List<string>

    The list of IDs for health update providers configured for this cluster. *

    ProactiveHaSevereRemediation string

    The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode. Default: QuarantineMode. *

    ResourcePoolId string

    The managed object ID of the primary resource pool for this cluster. This can be passed directly to the resource_pool_id attribute of the vsphere.VirtualMachine resource.

    Tags List<string>

    The IDs of any tags to attach to this resource.

    VsanCompressionEnabled bool

    Enables vSAN compression on the cluster.

    VsanDedupEnabled bool

    Enables vSAN deduplication on the cluster. Cannot be independently set to true. When vSAN deduplication is enabled, vSAN compression must also be enabled.

    VsanDiskGroups List<Pulumi.VSphere.Inputs.ComputeClusterVsanDiskGroup>

    Represents the configuration of a host disk group in the cluster.

    VsanDitEncryptionEnabled bool

    Enables vSAN data-in-transit encryption on the cluster. Conflicts with vsan_remote_datastore_ids, i.e., vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature at the same time.

    VsanDitRekeyInterval int

    Indicates the rekey interval in minutes for data-in-transit encryption. The valid rekey interval is 30 to 10800 (feature defaults to 1440). Conflicts with vsan_remote_datastore_ids.

    VsanEnabled bool

    Enables vSAN on the cluster.

    VsanEsaEnabled bool

    Enables vSAN ESA on the cluster.

    VsanFaultDomains List<Pulumi.VSphere.Inputs.ComputeClusterVsanFaultDomain>

    Configurations of vSAN fault domains.

    VsanNetworkDiagnosticModeEnabled bool

    Enables network diagnostic mode for vSAN performance service on the cluster.

    VsanPerformanceEnabled bool

    Enables vSAN performance service on the cluster. Default: true.

    VsanRemoteDatastoreIds List<string>

    The remote vSAN datastore IDs to be mounted to this cluster. Conflicts with vsan_dit_encryption_enabled and vsan_dit_rekey_interval, i.e., vSAN HCI Mesh feature cannot be enabled with data-in-transit encryption feature at the same time.

    VsanStretchedCluster Pulumi.VSphere.Inputs.ComputeClusterVsanStretchedCluster

    Configurations of vSAN stretched cluster.

    VsanUnmapEnabled bool

    Enables vSAN unmap on the cluster. You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.

    VsanVerboseModeEnabled bool

    Enables verbose mode for vSAN performance service on the cluster.

    CustomAttributes map[string]string

    A map of custom attribute ids to attribute value strings to set for the datastore cluster.

    NOTE: Custom attributes are unsupported on direct ESXi connections and require vCenter Server.

    DatacenterId string

    The managed object ID of the datacenter to create the cluster in. Forces a new resource if changed.

    DpmAutomationLevel string

    The automation level for host power operations in this cluster. Can be one of manual or automated. Default: manual.

    DpmEnabled bool

    Enable DPM support for DRS in this cluster. Requires drs_enabled to be true in order to be effective. Default: false.

    DpmThreshold int

    A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting. Default: 3.

    DrsAdvancedOptions map[string]string

    A key/value map that specifies advanced options for DRS and DPM.

    DrsAutomationLevel string

    The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or fullyAutomated. Default: manual.

    DrsEnablePredictiveDrs bool

    When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations. *

    DrsEnableVmOverrides bool

    Allow individual DRS overrides to be set for virtual machines in the cluster. Default: true.

    DrsEnabled bool

    Enable DRS for this cluster. Default: false.

    DrsMigrationThreshold int

    A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less. Default: 3.

    DrsScaleDescendantsShares string

    Enable scalable shares for all resource pools in the cluster. Can be one of disabled or scaleCpuAndMemoryShares. Default: disabled.

    Folder string

    The relative path to a folder to put this cluster in. This is a path relative to the datacenter you are deploying the cluster to. Example: for the dc1 datacenter, and a provided folder of foo/bar, The provider will place a cluster named compute-cluster-test in a host folder located at /dc1/host/foo/bar, with the final inventory path being /dc1/host/foo/bar/datastore-cluster-test.

    ForceEvacuateOnDestroy bool

    When destroying the resource, setting this to true will auto-remove any hosts that are currently a member of the cluster, as if they were removed by taking their entry out of host_system_ids (see below. This is an advanced option and should only be used for testing. Default: false.

    NOTE: Do not set force_evacuate_on_destroy in production operation as there are many pitfalls to its use when working with complex cluster configurations. Depending on the virtual machines currently on the cluster, and your DRS and HA settings, the full host evacuation may fail. Instead, incrementally remove hosts from your configuration by adjusting the contents of the host_system_ids attribute.

    HaAdmissionControlFailoverHostSystemIds []string

    Defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.

    HaAdmissionControlHostFailureTolerance int

    The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster. Default: 1. *

    HaAdmissionControlPerformanceTolerance int

    The percentage of resource reduction that a cluster of virtual machines can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting. Default: 100 (disabled).

    HaAdmissionControlPolicy string

    The type of admission control policy to use with vSphere HA. Can be one of resourcePercentage, slotPolicy, failoverHosts, or disabled. Default: resourcePercentage.

    HaAdmissionControlResourcePercentageAutoCompute bool

    Automatically determine available resource percentages by subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting from the total amount of resources in the cluster. Disable to supply user-defined values. Default: true. *

    HaAdmissionControlResourcePercentageCpu int

    Controls the user-defined percentage of CPU resources in the cluster to reserve for failover. Default: 100.

    HaAdmissionControlResourcePercentageMemory int

    Controls the user-defined percentage of memory resources in the cluster to reserve for failover. Default: 100.

    HaAdmissionControlSlotPolicyExplicitCpu int

    Controls the user-defined CPU slot size, in MHz. Default: 32.

    HaAdmissionControlSlotPolicyExplicitMemory int

    Controls the user-defined memory slot size, in MB. Default: 100.

    HaAdmissionControlSlotPolicyUseExplicitSize bool

    Controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is false, which tells vSphere to gather a automatic average based on all powered-on virtual machines currently in the cluster.

    HaAdvancedOptions map[string]string

    A key/value map that specifies advanced options for vSphere HA.

    HaDatastoreApdRecoveryAction string

    Controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of none or reset. Default: none. *

    HaDatastoreApdResponse string

    Controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or restartAggressive. Default: disabled. *

    HaDatastoreApdResponseDelay int

    The time, in seconds, to wait after an APD timeout event to run the response action defined in ha_datastore_apd_response. Default: 180 seconds (3 minutes). *

    HaDatastorePdlResponse string

    Controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive. Default: disabled. *

    HaEnabled bool

    Enable vSphere HA for this cluster. Default: false.

    HaHeartbeatDatastoreIds []string

    The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.

    HaHeartbeatDatastorePolicy string

    The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or allFeasibleDsWithUserPreference. Default: allFeasibleDsWithUserPreference.

    HaHostIsolationResponse string

    The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of none, powerOff, or shutdown. Default: none.

    HaHostMonitoring string

    Global setting that controls whether vSphere HA remediates virtual machines on host failure. Can be one of enabled or disabled. Default: enabled.

    HaVmComponentProtection string

    Controls vSphere VM component protection for virtual machines in this cluster. Can be one of enabled or disabled. Default: enabled. *

    HaVmDependencyRestartCondition string

    The condition used to determine whether or not virtual machines in a certain restart priority class are online, allowing HA to move on to restarting virtual machines on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen. The default is none, which means that a virtual machine is considered ready immediately after a host is found to start it on. *

    HaVmFailureInterval int

    The time interval, in seconds, a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. Default: 30 seconds.

    HaVmMaximumFailureWindow int

    The time, in seconds, for the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset time is allotted. Default: -1 (no window).

    HaVmMaximumResets int

    The maximum number of resets that HA will perform to a virtual machine when responding to a failure event. Default: 3

    HaVmMinimumUptime int

    The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats. Default: 120 seconds (2 minutes).

    HaVmMonitoring string

    The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled, vmMonitoringOnly, or vmAndAppMonitoring. Default: vmMonitoringDisabled.

    HaVmRestartAdditionalDelay int

    Additional delay, in seconds, after ready condition is met. A VM is considered ready at this point. Default: 0 seconds (no delay). *

    HaVmRestartPriority string

    The default restart priority for affected virtual machines when vSphere detects a host failure. Can be one of lowest, low, medium, high, or highest. Default: medium.

    HaVmRestartTimeout int

    The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority. Default: 600 seconds (10 minutes). *

    HostClusterExitTimeout int

    The timeout, in seconds, for each host maintenance mode operation when removing hosts from a cluster. Default: 3600 seconds (1 hour).

    HostManaged bool

    Can be set to true if compute cluster membership will be managed through the host resource rather than the compute_cluster resource. Conflicts with: host_system_ids.

    HostSystemIds []string

    The managed object IDs of the hosts to put in the cluster. Conflicts with: host_managed.

    Name string

    The name of the cluster.

    ProactiveHaAutomationLevel string

    Determines how the host quarantine, maintenance mode, or virtual machine migration recommendations made by proactive HA are to be handled. Can be one of Automated or Manual. Default: Manual. *

    ProactiveHaEnabled bool

    Enables Proactive HA. Default: false. *

    ProactiveHaModerateRemediation string

    The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode. Default: QuarantineMode. *

    ProactiveHaProviderIds []string

    The list of IDs for health update providers configured for this cluster. *

    ProactiveHaSevereRemediation string

    The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode. Default: QuarantineMode. *

    ResourcePoolId string

    The managed object ID of the primary resource pool for this cluster. This can be passed directly to the resource_pool_id attribute of the vsphere.VirtualMachine resource.

    Tags []string

    The IDs of any tags to attach to this resource.

    VsanCompressionEnabled bool

    Enables vSAN compression on the cluster.

    VsanDedupEnabled bool

    Enables vSAN deduplication on the cluster. Cannot be independently set to true. When vSAN deduplication is enabled, vSAN compression must also be enabled.

    VsanDiskGroups []ComputeClusterVsanDiskGroupArgs

    Represents the configuration of a host disk group in the cluster.

    VsanDitEncryptionEnabled bool

    Enables vSAN data-in-transit encryption on the cluster. Conflicts with vsan_remote_datastore_ids, i.e., vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature at the same time.

    VsanDitRekeyInterval int

    Indicates the rekey interval in minutes for data-in-transit encryption. The valid rekey interval is 30 to 10800 (feature defaults to 1440). Conflicts with vsan_remote_datastore_ids.

    VsanEnabled bool

    Enables vSAN on the cluster.

    VsanEsaEnabled bool

    Enables vSAN ESA on the cluster.

    VsanFaultDomains []ComputeClusterVsanFaultDomainArgs

    Configurations of vSAN fault domains.

    VsanNetworkDiagnosticModeEnabled bool

    Enables network diagnostic mode for vSAN performance service on the cluster.

    VsanPerformanceEnabled bool

    Enables vSAN performance service on the cluster. Default: true.

    VsanRemoteDatastoreIds []string

    The remote vSAN datastore IDs to be mounted to this cluster. Conflicts with vsan_dit_encryption_enabled and vsan_dit_rekey_interval, i.e., vSAN HCI Mesh feature cannot be enabled with data-in-transit encryption feature at the same time.

    VsanStretchedCluster ComputeClusterVsanStretchedClusterArgs

    Configurations of vSAN stretched cluster.

    VsanUnmapEnabled bool

    Enables vSAN unmap on the cluster. You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.

    VsanVerboseModeEnabled bool

    Enables verbose mode for vSAN performance service on the cluster.

    customAttributes Map<String,String>

    A map of custom attribute ids to attribute value strings to set for the datastore cluster.

    NOTE: Custom attributes are unsupported on direct ESXi connections and require vCenter Server.

    datacenterId String

    The managed object ID of the datacenter to create the cluster in. Forces a new resource if changed.

    dpmAutomationLevel String

    The automation level for host power operations in this cluster. Can be one of manual or automated. Default: manual.

    dpmEnabled Boolean

    Enable DPM support for DRS in this cluster. Requires drs_enabled to be true in order to be effective. Default: false.

    dpmThreshold Integer

    A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting. Default: 3.

    drsAdvancedOptions Map<String,String>

    A key/value map that specifies advanced options for DRS and DPM.

    drsAutomationLevel String

    The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or fullyAutomated. Default: manual.

    drsEnablePredictiveDrs Boolean

    When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations. *

    drsEnableVmOverrides Boolean

    Allow individual DRS overrides to be set for virtual machines in the cluster. Default: true.

    drsEnabled Boolean

    Enable DRS for this cluster. Default: false.

    drsMigrationThreshold Integer

    A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less. Default: 3.

    drsScaleDescendantsShares String

    Enable scalable shares for all resource pools in the cluster. Can be one of disabled or scaleCpuAndMemoryShares. Default: disabled.

    folder String

    The relative path to a folder to put this cluster in. This is a path relative to the datacenter you are deploying the cluster to. Example: for the dc1 datacenter, and a provided folder of foo/bar, The provider will place a cluster named compute-cluster-test in a host folder located at /dc1/host/foo/bar, with the final inventory path being /dc1/host/foo/bar/datastore-cluster-test.

    forceEvacuateOnDestroy Boolean

    When destroying the resource, setting this to true will auto-remove any hosts that are currently a member of the cluster, as if they were removed by taking their entry out of host_system_ids (see below. This is an advanced option and should only be used for testing. Default: false.

    NOTE: Do not set force_evacuate_on_destroy in production operation as there are many pitfalls to its use when working with complex cluster configurations. Depending on the virtual machines currently on the cluster, and your DRS and HA settings, the full host evacuation may fail. Instead, incrementally remove hosts from your configuration by adjusting the contents of the host_system_ids attribute.

    haAdmissionControlFailoverHostSystemIds List<String>

    Defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.

    haAdmissionControlHostFailureTolerance Integer

    The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster. Default: 1. *

    haAdmissionControlPerformanceTolerance Integer

    The percentage of resource reduction that a cluster of virtual machines can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting. Default: 100 (disabled).

    haAdmissionControlPolicy String

    The type of admission control policy to use with vSphere HA. Can be one of resourcePercentage, slotPolicy, failoverHosts, or disabled. Default: resourcePercentage.

    haAdmissionControlResourcePercentageAutoCompute Boolean

    Automatically determine available resource percentages by subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting from the total amount of resources in the cluster. Disable to supply user-defined values. Default: true. *

    haAdmissionControlResourcePercentageCpu Integer

    Controls the user-defined percentage of CPU resources in the cluster to reserve for failover. Default: 100.

    haAdmissionControlResourcePercentageMemory Integer

    Controls the user-defined percentage of memory resources in the cluster to reserve for failover. Default: 100.

    haAdmissionControlSlotPolicyExplicitCpu Integer

    Controls the user-defined CPU slot size, in MHz. Default: 32.

    haAdmissionControlSlotPolicyExplicitMemory Integer

    Controls the user-defined memory slot size, in MB. Default: 100.

    haAdmissionControlSlotPolicyUseExplicitSize Boolean

    Controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is false, which tells vSphere to gather a automatic average based on all powered-on virtual machines currently in the cluster.

    haAdvancedOptions Map<String,String>

    A key/value map that specifies advanced options for vSphere HA.

    haDatastoreApdRecoveryAction String

    Controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of none or reset. Default: none. *

    haDatastoreApdResponse String

    Controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or restartAggressive. Default: disabled. *

    haDatastoreApdResponseDelay Integer

    The time, in seconds, to wait after an APD timeout event to run the response action defined in ha_datastore_apd_response. Default: 180 seconds (3 minutes). *

    haDatastorePdlResponse String

    Controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive. Default: disabled. *

    haEnabled Boolean

    Enable vSphere HA for this cluster. Default: false.

    haHeartbeatDatastoreIds List<String>

    The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.

    haHeartbeatDatastorePolicy String

    The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or allFeasibleDsWithUserPreference. Default: allFeasibleDsWithUserPreference.

    haHostIsolationResponse String

    The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of none, powerOff, or shutdown. Default: none.

    haHostMonitoring String

    Global setting that controls whether vSphere HA remediates virtual machines on host failure. Can be one of enabled or disabled. Default: enabled.

    haVmComponentProtection String

    Controls vSphere VM component protection for virtual machines in this cluster. Can be one of enabled or disabled. Default: enabled. *

    haVmDependencyRestartCondition String

    The condition used to determine whether or not virtual machines in a certain restart priority class are online, allowing HA to move on to restarting virtual machines on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen. The default is none, which means that a virtual machine is considered ready immediately after a host is found to start it on. *

    haVmFailureInterval Integer

    The time interval, in seconds, a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. Default: 30 seconds.

    haVmMaximumFailureWindow Integer

    The time, in seconds, for the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset time is allotted. Default: -1 (no window).

    haVmMaximumResets Integer

    The maximum number of resets that HA will perform to a virtual machine when responding to a failure event. Default: 3

    haVmMinimumUptime Integer

    The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats. Default: 120 seconds (2 minutes).

    haVmMonitoring String

    The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled, vmMonitoringOnly, or vmAndAppMonitoring. Default: vmMonitoringDisabled.

    haVmRestartAdditionalDelay Integer

    Additional delay, in seconds, after ready condition is met. A VM is considered ready at this point. Default: 0 seconds (no delay). *

    haVmRestartPriority String

    The default restart priority for affected virtual machines when vSphere detects a host failure. Can be one of lowest, low, medium, high, or highest. Default: medium.

    haVmRestartTimeout Integer

    The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority. Default: 600 seconds (10 minutes). *

    hostClusterExitTimeout Integer

    The timeout, in seconds, for each host maintenance mode operation when removing hosts from a cluster. Default: 3600 seconds (1 hour).

    hostManaged Boolean

    Can be set to true if compute cluster membership will be managed through the host resource rather than the compute_cluster resource. Conflicts with: host_system_ids.

    hostSystemIds List<String>

    The managed object IDs of the hosts to put in the cluster. Conflicts with: host_managed.

    name String

    The name of the cluster.

    proactiveHaAutomationLevel String

    Determines how the host quarantine, maintenance mode, or virtual machine migration recommendations made by proactive HA are to be handled. Can be one of Automated or Manual. Default: Manual. *

    proactiveHaEnabled Boolean

    Enables Proactive HA. Default: false. *

    proactiveHaModerateRemediation String

    The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode. Default: QuarantineMode. *

    proactiveHaProviderIds List<String>

    The list of IDs for health update providers configured for this cluster. *

    proactiveHaSevereRemediation String

    The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode. Default: QuarantineMode. *

    resourcePoolId String

    The managed object ID of the primary resource pool for this cluster. This can be passed directly to the resource_pool_id attribute of the vsphere.VirtualMachine resource.

    tags List<String>

    The IDs of any tags to attach to this resource.

    vsanCompressionEnabled Boolean

    Enables vSAN compression on the cluster.

    vsanDedupEnabled Boolean

    Enables vSAN deduplication on the cluster. Cannot be independently set to true. When vSAN deduplication is enabled, vSAN compression must also be enabled.

    vsanDiskGroups List<ComputeClusterVsanDiskGroup>

    Represents the configuration of a host disk group in the cluster.

    vsanDitEncryptionEnabled Boolean

    Enables vSAN data-in-transit encryption on the cluster. Conflicts with vsan_remote_datastore_ids, i.e., vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature at the same time.

    vsanDitRekeyInterval Integer

    Indicates the rekey interval in minutes for data-in-transit encryption. The valid rekey interval is 30 to 10800 (feature defaults to 1440). Conflicts with vsan_remote_datastore_ids.

    vsanEnabled Boolean

    Enables vSAN on the cluster.

    vsanEsaEnabled Boolean

    Enables vSAN ESA on the cluster.

    vsanFaultDomains List<ComputeClusterVsanFaultDomain>

    Configurations of vSAN fault domains.

    vsanNetworkDiagnosticModeEnabled Boolean

    Enables network diagnostic mode for vSAN performance service on the cluster.

    vsanPerformanceEnabled Boolean

    Enables vSAN performance service on the cluster. Default: true.

    vsanRemoteDatastoreIds List<String>

    The remote vSAN datastore IDs to be mounted to this cluster. Conflicts with vsan_dit_encryption_enabled and vsan_dit_rekey_interval, i.e., vSAN HCI Mesh feature cannot be enabled with data-in-transit encryption feature at the same time.

    vsanStretchedCluster ComputeClusterVsanStretchedCluster

    Configurations of vSAN stretched cluster.

    vsanUnmapEnabled Boolean

    Enables vSAN unmap on the cluster. You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.

    vsanVerboseModeEnabled Boolean

    Enables verbose mode for vSAN performance service on the cluster.

    customAttributes {[key: string]: string}

    A map of custom attribute ids to attribute value strings to set for the datastore cluster.

    NOTE: Custom attributes are unsupported on direct ESXi connections and require vCenter Server.

    datacenterId string

    The managed object ID of the datacenter to create the cluster in. Forces a new resource if changed.

    dpmAutomationLevel string

    The automation level for host power operations in this cluster. Can be one of manual or automated. Default: manual.

    dpmEnabled boolean

    Enable DPM support for DRS in this cluster. Requires drs_enabled to be true in order to be effective. Default: false.

    dpmThreshold number

    A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting. Default: 3.

    drsAdvancedOptions {[key: string]: string}

    A key/value map that specifies advanced options for DRS and DPM.

    drsAutomationLevel string

    The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or fullyAutomated. Default: manual.

    drsEnablePredictiveDrs boolean

    When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations. *

    drsEnableVmOverrides boolean

    Allow individual DRS overrides to be set for virtual machines in the cluster. Default: true.

    drsEnabled boolean

    Enable DRS for this cluster. Default: false.

    drsMigrationThreshold number

    A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less. Default: 3.

    drsScaleDescendantsShares string

    Enable scalable shares for all resource pools in the cluster. Can be one of disabled or scaleCpuAndMemoryShares. Default: disabled.

    folder string

    The relative path to a folder to put this cluster in. This is a path relative to the datacenter you are deploying the cluster to. Example: for the dc1 datacenter, and a provided folder of foo/bar, The provider will place a cluster named compute-cluster-test in a host folder located at /dc1/host/foo/bar, with the final inventory path being /dc1/host/foo/bar/datastore-cluster-test.

    forceEvacuateOnDestroy boolean

    When destroying the resource, setting this to true will auto-remove any hosts that are currently a member of the cluster, as if they were removed by taking their entry out of host_system_ids (see below. This is an advanced option and should only be used for testing. Default: false.

    NOTE: Do not set force_evacuate_on_destroy in production operation as there are many pitfalls to its use when working with complex cluster configurations. Depending on the virtual machines currently on the cluster, and your DRS and HA settings, the full host evacuation may fail. Instead, incrementally remove hosts from your configuration by adjusting the contents of the host_system_ids attribute.

    haAdmissionControlFailoverHostSystemIds string[]

    Defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.

    haAdmissionControlHostFailureTolerance number

    The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster. Default: 1. *

    haAdmissionControlPerformanceTolerance number

    The percentage of resource reduction that a cluster of virtual machines can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting. Default: 100 (disabled).

    haAdmissionControlPolicy string

    The type of admission control policy to use with vSphere HA. Can be one of resourcePercentage, slotPolicy, failoverHosts, or disabled. Default: resourcePercentage.

    haAdmissionControlResourcePercentageAutoCompute boolean

    Automatically determine available resource percentages by subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting from the total amount of resources in the cluster. Disable to supply user-defined values. Default: true. *

    haAdmissionControlResourcePercentageCpu number

    Controls the user-defined percentage of CPU resources in the cluster to reserve for failover. Default: 100.

    haAdmissionControlResourcePercentageMemory number

    Controls the user-defined percentage of memory resources in the cluster to reserve for failover. Default: 100.

    haAdmissionControlSlotPolicyExplicitCpu number

    Controls the user-defined CPU slot size, in MHz. Default: 32.

    haAdmissionControlSlotPolicyExplicitMemory number

    Controls the user-defined memory slot size, in MB. Default: 100.

    haAdmissionControlSlotPolicyUseExplicitSize boolean

    Controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is false, which tells vSphere to gather a automatic average based on all powered-on virtual machines currently in the cluster.

    haAdvancedOptions {[key: string]: string}

    A key/value map that specifies advanced options for vSphere HA.

    haDatastoreApdRecoveryAction string

    Controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of none or reset. Default: none. *

    haDatastoreApdResponse string

    Controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or restartAggressive. Default: disabled. *

    haDatastoreApdResponseDelay number

    The time, in seconds, to wait after an APD timeout event to run the response action defined in ha_datastore_apd_response. Default: 180 seconds (3 minutes). *

    haDatastorePdlResponse string

    Controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive. Default: disabled. *

    haEnabled boolean

    Enable vSphere HA for this cluster. Default: false.

    haHeartbeatDatastoreIds string[]

    The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.

    haHeartbeatDatastorePolicy string

    The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or allFeasibleDsWithUserPreference. Default: allFeasibleDsWithUserPreference.

    haHostIsolationResponse string

    The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of none, powerOff, or shutdown. Default: none.

    haHostMonitoring string

    Global setting that controls whether vSphere HA remediates virtual machines on host failure. Can be one of enabled or disabled. Default: enabled.

    haVmComponentProtection string

    Controls vSphere VM component protection for virtual machines in this cluster. Can be one of enabled or disabled. Default: enabled. *

    haVmDependencyRestartCondition string

    The condition used to determine whether or not virtual machines in a certain restart priority class are online, allowing HA to move on to restarting virtual machines on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen. The default is none, which means that a virtual machine is considered ready immediately after a host is found to start it on. *

    haVmFailureInterval number

    The time interval, in seconds, a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. Default: 30 seconds.

    haVmMaximumFailureWindow number

    The time, in seconds, for the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset time is allotted. Default: -1 (no window).

    haVmMaximumResets number

    The maximum number of resets that HA will perform to a virtual machine when responding to a failure event. Default: 3

    haVmMinimumUptime number

    The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats. Default: 120 seconds (2 minutes).

    haVmMonitoring string

    The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled, vmMonitoringOnly, or vmAndAppMonitoring. Default: vmMonitoringDisabled.

    haVmRestartAdditionalDelay number

    Additional delay, in seconds, after ready condition is met. A VM is considered ready at this point. Default: 0 seconds (no delay). *

    haVmRestartPriority string

    The default restart priority for affected virtual machines when vSphere detects a host failure. Can be one of lowest, low, medium, high, or highest. Default: medium.

    haVmRestartTimeout number

    The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority. Default: 600 seconds (10 minutes). *

    hostClusterExitTimeout number

    The timeout, in seconds, for each host maintenance mode operation when removing hosts from a cluster. Default: 3600 seconds (1 hour).

    hostManaged boolean

    Can be set to true if compute cluster membership will be managed through the host resource rather than the compute_cluster resource. Conflicts with: host_system_ids.

    hostSystemIds string[]

    The managed object IDs of the hosts to put in the cluster. Conflicts with: host_managed.

    name string

    The name of the cluster.

    proactiveHaAutomationLevel string

    Determines how the host quarantine, maintenance mode, or virtual machine migration recommendations made by proactive HA are to be handled. Can be one of Automated or Manual. Default: Manual. *

    proactiveHaEnabled boolean

    Enables Proactive HA. Default: false. *

    proactiveHaModerateRemediation string

    The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode. Default: QuarantineMode. *

    proactiveHaProviderIds string[]

    The list of IDs for health update providers configured for this cluster. *

    proactiveHaSevereRemediation string

    The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode. Default: QuarantineMode. *

    resourcePoolId string

    The managed object ID of the primary resource pool for this cluster. This can be passed directly to the resource_pool_id attribute of the vsphere.VirtualMachine resource.

    tags string[]

    The IDs of any tags to attach to this resource.

    vsanCompressionEnabled boolean

    Enables vSAN compression on the cluster.

    vsanDedupEnabled boolean

    Enables vSAN deduplication on the cluster. Cannot be independently set to true. When vSAN deduplication is enabled, vSAN compression must also be enabled.

    vsanDiskGroups ComputeClusterVsanDiskGroup[]

    Represents the configuration of a host disk group in the cluster.

    vsanDitEncryptionEnabled boolean

    Enables vSAN data-in-transit encryption on the cluster. Conflicts with vsan_remote_datastore_ids, i.e., vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature at the same time.

    vsanDitRekeyInterval number

    Indicates the rekey interval in minutes for data-in-transit encryption. The valid rekey interval is 30 to 10800 (feature defaults to 1440). Conflicts with vsan_remote_datastore_ids.

    vsanEnabled boolean

    Enables vSAN on the cluster.

    vsanEsaEnabled boolean

    Enables vSAN ESA on the cluster.

    vsanFaultDomains ComputeClusterVsanFaultDomain[]

    Configurations of vSAN fault domains.

    vsanNetworkDiagnosticModeEnabled boolean

    Enables network diagnostic mode for vSAN performance service on the cluster.

    vsanPerformanceEnabled boolean

    Enables vSAN performance service on the cluster. Default: true.

    vsanRemoteDatastoreIds string[]

    The remote vSAN datastore IDs to be mounted to this cluster. Conflicts with vsan_dit_encryption_enabled and vsan_dit_rekey_interval, i.e., vSAN HCI Mesh feature cannot be enabled with data-in-transit encryption feature at the same time.

    vsanStretchedCluster ComputeClusterVsanStretchedCluster

    Configurations of vSAN stretched cluster.

    vsanUnmapEnabled boolean

    Enables vSAN unmap on the cluster. You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.

    vsanVerboseModeEnabled boolean

    Enables verbose mode for vSAN performance service on the cluster.

    custom_attributes Mapping[str, str]

    A map of custom attribute ids to attribute value strings to set for the datastore cluster.

    NOTE: Custom attributes are unsupported on direct ESXi connections and require vCenter Server.

    datacenter_id str

    The managed object ID of the datacenter to create the cluster in. Forces a new resource if changed.

    dpm_automation_level str

    The automation level for host power operations in this cluster. Can be one of manual or automated. Default: manual.

    dpm_enabled bool

    Enable DPM support for DRS in this cluster. Requires drs_enabled to be true in order to be effective. Default: false.

    dpm_threshold int

    A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting. Default: 3.

    drs_advanced_options Mapping[str, str]

    A key/value map that specifies advanced options for DRS and DPM.

    drs_automation_level str

    The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or fullyAutomated. Default: manual.

    drs_enable_predictive_drs bool

    When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations. *

    drs_enable_vm_overrides bool

    Allow individual DRS overrides to be set for virtual machines in the cluster. Default: true.

    drs_enabled bool

    Enable DRS for this cluster. Default: false.

    drs_migration_threshold int

    A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less. Default: 3.

    drs_scale_descendants_shares str

    Enable scalable shares for all resource pools in the cluster. Can be one of disabled or scaleCpuAndMemoryShares. Default: disabled.

    folder str

    The relative path to a folder to put this cluster in. This is a path relative to the datacenter you are deploying the cluster to. Example: for the dc1 datacenter, and a provided folder of foo/bar, The provider will place a cluster named compute-cluster-test in a host folder located at /dc1/host/foo/bar, with the final inventory path being /dc1/host/foo/bar/datastore-cluster-test.

    force_evacuate_on_destroy bool

    When destroying the resource, setting this to true will auto-remove any hosts that are currently a member of the cluster, as if they were removed by taking their entry out of host_system_ids (see below. This is an advanced option and should only be used for testing. Default: false.

    NOTE: Do not set force_evacuate_on_destroy in production operation as there are many pitfalls to its use when working with complex cluster configurations. Depending on the virtual machines currently on the cluster, and your DRS and HA settings, the full host evacuation may fail. Instead, incrementally remove hosts from your configuration by adjusting the contents of the host_system_ids attribute.

    ha_admission_control_failover_host_system_ids Sequence[str]

    Defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.

    ha_admission_control_host_failure_tolerance int

    The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster. Default: 1. *

    ha_admission_control_performance_tolerance int

    The percentage of resource reduction that a cluster of virtual machines can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting. Default: 100 (disabled).

    ha_admission_control_policy str

    The type of admission control policy to use with vSphere HA. Can be one of resourcePercentage, slotPolicy, failoverHosts, or disabled. Default: resourcePercentage.

    ha_admission_control_resource_percentage_auto_compute bool

    Automatically determine available resource percentages by subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting from the total amount of resources in the cluster. Disable to supply user-defined values. Default: true. *

    ha_admission_control_resource_percentage_cpu int

    Controls the user-defined percentage of CPU resources in the cluster to reserve for failover. Default: 100.

    ha_admission_control_resource_percentage_memory int

    Controls the user-defined percentage of memory resources in the cluster to reserve for failover. Default: 100.

    ha_admission_control_slot_policy_explicit_cpu int

    Controls the user-defined CPU slot size, in MHz. Default: 32.

    ha_admission_control_slot_policy_explicit_memory int

    Controls the user-defined memory slot size, in MB. Default: 100.

    ha_admission_control_slot_policy_use_explicit_size bool

    Controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is false, which tells vSphere to gather a automatic average based on all powered-on virtual machines currently in the cluster.

    ha_advanced_options Mapping[str, str]

    A key/value map that specifies advanced options for vSphere HA.

    ha_datastore_apd_recovery_action str

    Controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of none or reset. Default: none. *

    ha_datastore_apd_response str

    Controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or restartAggressive. Default: disabled. *

    ha_datastore_apd_response_delay int

    The time, in seconds, to wait after an APD timeout event to run the response action defined in ha_datastore_apd_response. Default: 180 seconds (3 minutes). *

    ha_datastore_pdl_response str

    Controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive. Default: disabled. *

    ha_enabled bool

    Enable vSphere HA for this cluster. Default: false.

    ha_heartbeat_datastore_ids Sequence[str]

    The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.

    ha_heartbeat_datastore_policy str

    The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or allFeasibleDsWithUserPreference. Default: allFeasibleDsWithUserPreference.

    ha_host_isolation_response str

    The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of none, powerOff, or shutdown. Default: none.

    ha_host_monitoring str

    Global setting that controls whether vSphere HA remediates virtual machines on host failure. Can be one of enabled or disabled. Default: enabled.

    ha_vm_component_protection str

    Controls vSphere VM component protection for virtual machines in this cluster. Can be one of enabled or disabled. Default: enabled. *

    ha_vm_dependency_restart_condition str

    The condition used to determine whether or not virtual machines in a certain restart priority class are online, allowing HA to move on to restarting virtual machines on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen. The default is none, which means that a virtual machine is considered ready immediately after a host is found to start it on. *

    ha_vm_failure_interval int

    The time interval, in seconds, a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. Default: 30 seconds.

    ha_vm_maximum_failure_window int

    The time, in seconds, for the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset time is allotted. Default: -1 (no window).

    ha_vm_maximum_resets int

    The maximum number of resets that HA will perform to a virtual machine when responding to a failure event. Default: 3

    ha_vm_minimum_uptime int

    The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats. Default: 120 seconds (2 minutes).

    ha_vm_monitoring str

    The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled, vmMonitoringOnly, or vmAndAppMonitoring. Default: vmMonitoringDisabled.

    ha_vm_restart_additional_delay int

    Additional delay, in seconds, after ready condition is met. A VM is considered ready at this point. Default: 0 seconds (no delay). *

    ha_vm_restart_priority str

    The default restart priority for affected virtual machines when vSphere detects a host failure. Can be one of lowest, low, medium, high, or highest. Default: medium.

    ha_vm_restart_timeout int

    The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority. Default: 600 seconds (10 minutes). *

    host_cluster_exit_timeout int

    The timeout, in seconds, for each host maintenance mode operation when removing hosts from a cluster. Default: 3600 seconds (1 hour).

    host_managed bool

    Can be set to true if compute cluster membership will be managed through the host resource rather than the compute_cluster resource. Conflicts with: host_system_ids.

    host_system_ids Sequence[str]

    The managed object IDs of the hosts to put in the cluster. Conflicts with: host_managed.

    name str

    The name of the cluster.

    proactive_ha_automation_level str

    Determines how the host quarantine, maintenance mode, or virtual machine migration recommendations made by proactive HA are to be handled. Can be one of Automated or Manual. Default: Manual. *

    proactive_ha_enabled bool

    Enables Proactive HA. Default: false. *

    proactive_ha_moderate_remediation str

    The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode. Default: QuarantineMode. *

    proactive_ha_provider_ids Sequence[str]

    The list of IDs for health update providers configured for this cluster. *

    proactive_ha_severe_remediation str

    The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode. Default: QuarantineMode. *

    resource_pool_id str

    The managed object ID of the primary resource pool for this cluster. This can be passed directly to the resource_pool_id attribute of the vsphere.VirtualMachine resource.

    tags Sequence[str]

    The IDs of any tags to attach to this resource.

    vsan_compression_enabled bool

    Enables vSAN compression on the cluster.

    vsan_dedup_enabled bool

    Enables vSAN deduplication on the cluster. Cannot be independently set to true. When vSAN deduplication is enabled, vSAN compression must also be enabled.

    vsan_disk_groups Sequence[ComputeClusterVsanDiskGroupArgs]

    Represents the configuration of a host disk group in the cluster.

    vsan_dit_encryption_enabled bool

    Enables vSAN data-in-transit encryption on the cluster. Conflicts with vsan_remote_datastore_ids, i.e., vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature at the same time.

    vsan_dit_rekey_interval int

    Indicates the rekey interval in minutes for data-in-transit encryption. The valid rekey interval is 30 to 10800 (feature defaults to 1440). Conflicts with vsan_remote_datastore_ids.

    vsan_enabled bool

    Enables vSAN on the cluster.

    vsan_esa_enabled bool

    Enables vSAN ESA on the cluster.

    vsan_fault_domains Sequence[ComputeClusterVsanFaultDomainArgs]

    Configurations of vSAN fault domains.

    vsan_network_diagnostic_mode_enabled bool

    Enables network diagnostic mode for vSAN performance service on the cluster.

    vsan_performance_enabled bool

    Enables vSAN performance service on the cluster. Default: true.

    vsan_remote_datastore_ids Sequence[str]

    The remote vSAN datastore IDs to be mounted to this cluster. Conflicts with vsan_dit_encryption_enabled and vsan_dit_rekey_interval, i.e., vSAN HCI Mesh feature cannot be enabled with data-in-transit encryption feature at the same time.

    vsan_stretched_cluster ComputeClusterVsanStretchedClusterArgs

    Configurations of vSAN stretched cluster.

    vsan_unmap_enabled bool

    Enables vSAN unmap on the cluster. You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.

    vsan_verbose_mode_enabled bool

    Enables verbose mode for vSAN performance service on the cluster.

    customAttributes Map<String>

    A map of custom attribute ids to attribute value strings to set for the datastore cluster.

    NOTE: Custom attributes are unsupported on direct ESXi connections and require vCenter Server.

    datacenterId String

    The managed object ID of the datacenter to create the cluster in. Forces a new resource if changed.

    dpmAutomationLevel String

    The automation level for host power operations in this cluster. Can be one of manual or automated. Default: manual.

    dpmEnabled Boolean

    Enable DPM support for DRS in this cluster. Requires drs_enabled to be true in order to be effective. Default: false.

    dpmThreshold Number

    A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting. Default: 3.

    drsAdvancedOptions Map<String>

    A key/value map that specifies advanced options for DRS and DPM.

    drsAutomationLevel String

    The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or fullyAutomated. Default: manual.

    drsEnablePredictiveDrs Boolean

    When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations. *

    drsEnableVmOverrides Boolean

    Allow individual DRS overrides to be set for virtual machines in the cluster. Default: true.

    drsEnabled Boolean

    Enable DRS for this cluster. Default: false.

    drsMigrationThreshold Number

    A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less. Default: 3.

    drsScaleDescendantsShares String

    Enable scalable shares for all resource pools in the cluster. Can be one of disabled or scaleCpuAndMemoryShares. Default: disabled.

    folder String

    The relative path to a folder to put this cluster in. This is a path relative to the datacenter you are deploying the cluster to. Example: for the dc1 datacenter, and a provided folder of foo/bar, The provider will place a cluster named compute-cluster-test in a host folder located at /dc1/host/foo/bar, with the final inventory path being /dc1/host/foo/bar/datastore-cluster-test.

    forceEvacuateOnDestroy Boolean

    When destroying the resource, setting this to true will auto-remove any hosts that are currently a member of the cluster, as if they were removed by taking their entry out of host_system_ids (see below. This is an advanced option and should only be used for testing. Default: false.

    NOTE: Do not set force_evacuate_on_destroy in production operation as there are many pitfalls to its use when working with complex cluster configurations. Depending on the virtual machines currently on the cluster, and your DRS and HA settings, the full host evacuation may fail. Instead, incrementally remove hosts from your configuration by adjusting the contents of the host_system_ids attribute.

    haAdmissionControlFailoverHostSystemIds List<String>

    Defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.

    haAdmissionControlHostFailureTolerance Number

    The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster. Default: 1. *

    haAdmissionControlPerformanceTolerance Number

    The percentage of resource reduction that a cluster of virtual machines can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting. Default: 100 (disabled).

    haAdmissionControlPolicy String

    The type of admission control policy to use with vSphere HA. Can be one of resourcePercentage, slotPolicy, failoverHosts, or disabled. Default: resourcePercentage.

    haAdmissionControlResourcePercentageAutoCompute Boolean

    Automatically determine available resource percentages by subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting from the total amount of resources in the cluster. Disable to supply user-defined values. Default: true. *

    haAdmissionControlResourcePercentageCpu Number

    Controls the user-defined percentage of CPU resources in the cluster to reserve for failover. Default: 100.

    haAdmissionControlResourcePercentageMemory Number

    Controls the user-defined percentage of memory resources in the cluster to reserve for failover. Default: 100.

    haAdmissionControlSlotPolicyExplicitCpu Number

    Controls the user-defined CPU slot size, in MHz. Default: 32.

    haAdmissionControlSlotPolicyExplicitMemory Number

    Controls the user-defined memory slot size, in MB. Default: 100.

    haAdmissionControlSlotPolicyUseExplicitSize Boolean

    Controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is false, which tells vSphere to gather a automatic average based on all powered-on virtual machines currently in the cluster.

    haAdvancedOptions Map<String>

    A key/value map that specifies advanced options for vSphere HA.

    haDatastoreApdRecoveryAction String

    Controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of none or reset. Default: none. *

    haDatastoreApdResponse String

    Controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or restartAggressive. Default: disabled. *

    haDatastoreApdResponseDelay Number

    The time, in seconds, to wait after an APD timeout event to run the response action defined in ha_datastore_apd_response. Default: 180 seconds (3 minutes). *

    haDatastorePdlResponse String

    Controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive. Default: disabled. *

    haEnabled Boolean

    Enable vSphere HA for this cluster. Default: false.

    haHeartbeatDatastoreIds List<String>

    The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.

    haHeartbeatDatastorePolicy String

    The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or allFeasibleDsWithUserPreference. Default: allFeasibleDsWithUserPreference.

    haHostIsolationResponse String

    The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of none, powerOff, or shutdown. Default: none.

    haHostMonitoring String

    Global setting that controls whether vSphere HA remediates virtual machines on host failure. Can be one of enabled or disabled. Default: enabled.

    haVmComponentProtection String

    Controls vSphere VM component protection for virtual machines in this cluster. Can be one of enabled or disabled. Default: enabled. *

    haVmDependencyRestartCondition String

    The condition used to determine whether or not virtual machines in a certain restart priority class are online, allowing HA to move on to restarting virtual machines on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen. The default is none, which means that a virtual machine is considered ready immediately after a host is found to start it on. *

    haVmFailureInterval Number

    The time interval, in seconds, a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. Default: 30 seconds.

    haVmMaximumFailureWindow Number

    The time, in seconds, for the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset time is allotted. Default: -1 (no window).

    haVmMaximumResets Number

    The maximum number of resets that HA will perform to a virtual machine when responding to a failure event. Default: 3

    haVmMinimumUptime Number

    The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats. Default: 120 seconds (2 minutes).

    haVmMonitoring String

    The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled, vmMonitoringOnly, or vmAndAppMonitoring. Default: vmMonitoringDisabled.

    haVmRestartAdditionalDelay Number

    Additional delay, in seconds, after ready condition is met. A VM is considered ready at this point. Default: 0 seconds (no delay). *

    haVmRestartPriority String

    The default restart priority for affected virtual machines when vSphere detects a host failure. Can be one of lowest, low, medium, high, or highest. Default: medium.

    haVmRestartTimeout Number

    The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority. Default: 600 seconds (10 minutes). *

    hostClusterExitTimeout Number

    The timeout, in seconds, for each host maintenance mode operation when removing hosts from a cluster. Default: 3600 seconds (1 hour).

    hostManaged Boolean

    Can be set to true if compute cluster membership will be managed through the host resource rather than the compute_cluster resource. Conflicts with: host_system_ids.

    hostSystemIds List<String>

    The managed object IDs of the hosts to put in the cluster. Conflicts with: host_managed.

    name String

    The name of the cluster.

    proactiveHaAutomationLevel String

    Determines how the host quarantine, maintenance mode, or virtual machine migration recommendations made by proactive HA are to be handled. Can be one of Automated or Manual. Default: Manual. *

    proactiveHaEnabled Boolean

    Enables Proactive HA. Default: false. *

    proactiveHaModerateRemediation String

    The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode. Default: QuarantineMode. *

    proactiveHaProviderIds List<String>

    The list of IDs for health update providers configured for this cluster. *

    proactiveHaSevereRemediation String

    The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode. Default: QuarantineMode. *

    resourcePoolId String

    The managed object ID of the primary resource pool for this cluster. This can be passed directly to the resource_pool_id attribute of the vsphere.VirtualMachine resource.

    tags List<String>

    The IDs of any tags to attach to this resource.

    vsanCompressionEnabled Boolean

    Enables vSAN compression on the cluster.

    vsanDedupEnabled Boolean

    Enables vSAN deduplication on the cluster. Cannot be independently set to true. When vSAN deduplication is enabled, vSAN compression must also be enabled.

    vsanDiskGroups List<Property Map>

    Represents the configuration of a host disk group in the cluster.

    vsanDitEncryptionEnabled Boolean

    Enables vSAN data-in-transit encryption on the cluster. Conflicts with vsan_remote_datastore_ids, i.e., vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature at the same time.

    vsanDitRekeyInterval Number

    Indicates the rekey interval in minutes for data-in-transit encryption. The valid rekey interval is 30 to 10800 (feature defaults to 1440). Conflicts with vsan_remote_datastore_ids.

    vsanEnabled Boolean

    Enables vSAN on the cluster.

    vsanEsaEnabled Boolean

    Enables vSAN ESA on the cluster.

    vsanFaultDomains List<Property Map>

    Configurations of vSAN fault domains.

    vsanNetworkDiagnosticModeEnabled Boolean

    Enables network diagnostic mode for vSAN performance service on the cluster.

    vsanPerformanceEnabled Boolean

    Enables vSAN performance service on the cluster. Default: true.

    vsanRemoteDatastoreIds List<String>

    The remote vSAN datastore IDs to be mounted to this cluster. Conflicts with vsan_dit_encryption_enabled and vsan_dit_rekey_interval, i.e., vSAN HCI Mesh feature cannot be enabled with data-in-transit encryption feature at the same time.

    vsanStretchedCluster Property Map

    Configurations of vSAN stretched cluster.

    vsanUnmapEnabled Boolean

    Enables vSAN unmap on the cluster. You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.

    vsanVerboseModeEnabled Boolean

    Enables verbose mode for vSAN performance service on the cluster.

    Supporting Types

    ComputeClusterVsanDiskGroup, ComputeClusterVsanDiskGroupArgs

    Cache string

    The canonical name of the disk to use for vSAN cache.

    Storages List<string>

    An array of disk canonical names for vSAN storage.

    Cache string

    The canonical name of the disk to use for vSAN cache.

    Storages []string

    An array of disk canonical names for vSAN storage.

    cache String

    The canonical name of the disk to use for vSAN cache.

    storages List<String>

    An array of disk canonical names for vSAN storage.

    cache string

    The canonical name of the disk to use for vSAN cache.

    storages string[]

    An array of disk canonical names for vSAN storage.

    cache str

    The canonical name of the disk to use for vSAN cache.

    storages Sequence[str]

    An array of disk canonical names for vSAN storage.

    cache String

    The canonical name of the disk to use for vSAN cache.

    storages List<String>

    An array of disk canonical names for vSAN storage.

    ComputeClusterVsanFaultDomain, ComputeClusterVsanFaultDomainArgs

    FaultDomains []ComputeClusterVsanFaultDomainFaultDomain

    The configuration for single fault domain.

    faultDomains List<ComputeClusterVsanFaultDomainFaultDomain>

    The configuration for single fault domain.

    faultDomains ComputeClusterVsanFaultDomainFaultDomain[]

    The configuration for single fault domain.

    fault_domains Sequence[ComputeClusterVsanFaultDomainFaultDomain]

    The configuration for single fault domain.

    faultDomains List<Property Map>

    The configuration for single fault domain.

    ComputeClusterVsanFaultDomainFaultDomain, ComputeClusterVsanFaultDomainFaultDomainArgs

    HostIds List<string>

    The managed object IDs of the hosts to put in the fault domain.

    Name string

    The name of the cluster.

    HostIds []string

    The managed object IDs of the hosts to put in the fault domain.

    Name string

    The name of the cluster.

    hostIds List<String>

    The managed object IDs of the hosts to put in the fault domain.

    name String

    The name of the cluster.

    hostIds string[]

    The managed object IDs of the hosts to put in the fault domain.

    name string

    The name of the cluster.

    host_ids Sequence[str]

    The managed object IDs of the hosts to put in the fault domain.

    name str

    The name of the cluster.

    hostIds List<String>

    The managed object IDs of the hosts to put in the fault domain.

    name String

    The name of the cluster.

    ComputeClusterVsanStretchedCluster, ComputeClusterVsanStretchedClusterArgs

    PreferredFaultDomainHostIds List<string>

    The managed object IDs of the hosts to put in the first fault domain.

    SecondaryFaultDomainHostIds List<string>

    The managed object IDs of the hosts to put in the second fault domain.

    WitnessNode string

    The managed object IDs of the host selected as witness node when enable stretched cluster.

    PreferredFaultDomainName string

    The name of first fault domain. Default is Preferred.

    SecondaryFaultDomainName string

    The name of second fault domain. Default is Secondary.

    NOTE: You must disable vSphere HA before you enable vSAN on the cluster. You can enable or re-enable vSphere HA after vSAN is configured.

    import * as pulumi from "@pulumi/pulumi";
    import * as vsphere from "@pulumi/vsphere";
    

    const computeCluster = new vsphere.ComputeCluster("computeCluster", { datacenterId: data.vsphere_datacenter.datacenter.id, hostSystemIds: [data.vsphere_host.host.map(__item => __item.id)], drsEnabled: true, drsAutomationLevel: "fullyAutomated", haEnabled: false, vsanEnabled: true, vsanEsaEnabled: true, vsanDedupEnabled: true, vsanCompressionEnabled: true, vsanPerformanceEnabled: true, vsanVerboseModeEnabled: true, vsanNetworkDiagnosticModeEnabled: true, vsanUnmapEnabled: true, vsanDitEncryptionEnabled: true, vsanDitRekeyInterval: 1800, vsanDiskGroups: [{ cache: data.vsphere_vmfs_disks.cache_disks[0], storages: data.vsphere_vmfs_disks.storage_disks, }], vsanFaultDomains: [{ faultDomains: [ { name: "fd1", hostIds: [data.vsphere_host.faultdomain1_hosts.map(__item => __item.id)], }, { name: "fd2", hostIds: [data.vsphere_host.faultdomain2_hosts.map(__item => __item.id)], }, ], }], vsanStretchedCluster: { preferredFaultDomainHostIds: [data.vsphere_host.preferred_fault_domain_host.map(__item => __item.id)], secondaryFaultDomainHostIds: [data.vsphere_host.secondary_fault_domain_host.map(__item => __item.id)], witnessNode: data.vsphere_host.witness_host.id, }, });

    import pulumi
    import pulumi_vsphere as vsphere
    
    compute_cluster = vsphere.ComputeCluster("computeCluster",
        datacenter_id=data["vsphere_datacenter"]["datacenter"]["id"],
        host_system_ids=[[__item["id"] for __item in data["vsphere_host"]["host"]]],
        drs_enabled=True,
        drs_automation_level="fullyAutomated",
        ha_enabled=False,
        vsan_enabled=True,
        vsan_esa_enabled=True,
        vsan_dedup_enabled=True,
        vsan_compression_enabled=True,
        vsan_performance_enabled=True,
        vsan_verbose_mode_enabled=True,
        vsan_network_diagnostic_mode_enabled=True,
        vsan_unmap_enabled=True,
        vsan_dit_encryption_enabled=True,
        vsan_dit_rekey_interval=1800,
        vsan_disk_groups=[vsphere.ComputeClusterVsanDiskGroupArgs(
            cache=data["vsphere_vmfs_disks"]["cache_disks"],
            storages=data["vsphere_vmfs_disks"]["storage_disks"],
        )],
        vsan_fault_domains=[vsphere.ComputeClusterVsanFaultDomainArgs(
            fault_domains=[
                vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs(
                    name="fd1",
                    host_ids=[[__item["id"] for __item in data["vsphere_host"]["faultdomain1_hosts"]]],
                ),
                vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs(
                    name="fd2",
                    host_ids=[[__item["id"] for __item in data["vsphere_host"]["faultdomain2_hosts"]]],
                ),
            ],
        )],
        vsan_stretched_cluster=vsphere.ComputeClusterVsanStretchedClusterArgs(
            preferred_fault_domain_host_ids=[[__item["id"] for __item in data["vsphere_host"]["preferred_fault_domain_host"]]],
            secondary_fault_domain_host_ids=[[__item["id"] for __item in data["vsphere_host"]["secondary_fault_domain_host"]]],
            witness_node=data["vsphere_host"]["witness_host"]["id"],
        ))
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using VSphere = Pulumi.VSphere;
    
    return await Deployment.RunAsync(() => 
    {
        var computeCluster = new VSphere.ComputeCluster("computeCluster", new()
        {
            DatacenterId = data.Vsphere_datacenter.Datacenter.Id,
            HostSystemIds = new[]
            {
                data.Vsphere_host.Host.Select(__item => __item.Id).ToList(),
            },
            DrsEnabled = true,
            DrsAutomationLevel = "fullyAutomated",
            HaEnabled = false,
            VsanEnabled = true,
            VsanEsaEnabled = true,
            VsanDedupEnabled = true,
            VsanCompressionEnabled = true,
            VsanPerformanceEnabled = true,
            VsanVerboseModeEnabled = true,
            VsanNetworkDiagnosticModeEnabled = true,
            VsanUnmapEnabled = true,
            VsanDitEncryptionEnabled = true,
            VsanDitRekeyInterval = 1800,
            VsanDiskGroups = new[]
            {
                new VSphere.Inputs.ComputeClusterVsanDiskGroupArgs
                {
                    Cache = data.Vsphere_vmfs_disks.Cache_disks[0],
                    Storages = data.Vsphere_vmfs_disks.Storage_disks,
                },
            },
            VsanFaultDomains = new[]
            {
                new VSphere.Inputs.ComputeClusterVsanFaultDomainArgs
                {
                    FaultDomains = new[]
                    {
                        new VSphere.Inputs.ComputeClusterVsanFaultDomainFaultDomainArgs
                        {
                            Name = "fd1",
                            HostIds = new[]
                            {
                                data.Vsphere_host.Faultdomain1_hosts.Select(__item => __item.Id).ToList(),
                            },
                        },
                        new VSphere.Inputs.ComputeClusterVsanFaultDomainFaultDomainArgs
                        {
                            Name = "fd2",
                            HostIds = new[]
                            {
                                data.Vsphere_host.Faultdomain2_hosts.Select(__item => __item.Id).ToList(),
                            },
                        },
                    },
                },
            },
            VsanStretchedCluster = new VSphere.Inputs.ComputeClusterVsanStretchedClusterArgs
            {
                PreferredFaultDomainHostIds = new[]
                {
                    data.Vsphere_host.Preferred_fault_domain_host.Select(__item => __item.Id).ToList(),
                },
                SecondaryFaultDomainHostIds = new[]
                {
                    data.Vsphere_host.Secondary_fault_domain_host.Select(__item => __item.Id).ToList(),
                },
                WitnessNode = data.Vsphere_host.Witness_host.Id,
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-vsphere/sdk/v4/go/vsphere"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    func main() {
    pulumi.Run(func(ctx *pulumi.Context) error {
    _, err := vsphere.NewComputeCluster(ctx, "computeCluster", &vsphere.ComputeClusterArgs{
    DatacenterId: pulumi.Any(data.Vsphere_datacenter.Datacenter.Id),
    HostSystemIds: pulumi.StringArray{
    %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:2,18-45),
    },
    DrsEnabled: pulumi.Bool(true),
    DrsAutomationLevel: pulumi.String("fullyAutomated"),
    HaEnabled: pulumi.Bool(false),
    VsanEnabled: pulumi.Bool(true),
    VsanEsaEnabled: pulumi.Bool(true),
    VsanDedupEnabled: pulumi.Bool(true),
    VsanCompressionEnabled: pulumi.Bool(true),
    VsanPerformanceEnabled: pulumi.Bool(true),
    VsanVerboseModeEnabled: pulumi.Bool(true),
    VsanNetworkDiagnosticModeEnabled: pulumi.Bool(true),
    VsanUnmapEnabled: pulumi.Bool(true),
    VsanDitEncryptionEnabled: pulumi.Bool(true),
    VsanDitRekeyInterval: pulumi.Int(1800),
    VsanDiskGroups: vsphere.ComputeClusterVsanDiskGroupArray{
    &vsphere.ComputeClusterVsanDiskGroupArgs{
    Cache: pulumi.Any(data.Vsphere_vmfs_disks.Cache_disks[0]),
    Storages: pulumi.Any(data.Vsphere_vmfs_disks.Storage_disks),
    },
    },
    VsanFaultDomains: vsphere.ComputeClusterVsanFaultDomainArray{
    &vsphere.ComputeClusterVsanFaultDomainArgs{
    FaultDomains: vsphere.ComputeClusterVsanFaultDomainFaultDomainArray{
    &vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs{
    Name: pulumi.String("fd1"),
    HostIds: pulumi.StringArray{
    %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:23,12-53),
    },
    },
    &vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs{
    Name: pulumi.String("fd2"),
    HostIds: pulumi.StringArray{
    %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:27,12-53),
    },
    },
    },
    },
    },
    VsanStretchedCluster: &vsphere.ComputeClusterVsanStretchedClusterArgs{
    PreferredFaultDomainHostIds: pulumi.StringArray{
    %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:31,32-82),
    },
    SecondaryFaultDomainHostIds: pulumi.StringArray{
    %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:32,32-82),
    },
    WitnessNode: pulumi.Any(data.Vsphere_host.Witness_host.Id),
    },
    })
    if err != nil {
    return err
    }
    return nil
    })
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.vsphere.ComputeCluster;
    import com.pulumi.vsphere.ComputeClusterArgs;
    import com.pulumi.vsphere.inputs.ComputeClusterVsanDiskGroupArgs;
    import com.pulumi.vsphere.inputs.ComputeClusterVsanFaultDomainArgs;
    import com.pulumi.vsphere.inputs.ComputeClusterVsanStretchedClusterArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var computeCluster = new ComputeCluster("computeCluster", ComputeClusterArgs.builder()        
                .datacenterId(data.vsphere_datacenter().datacenter().id())
                .hostSystemIds(data.vsphere_host().host().stream().map(element -> element.id()).collect(toList()))
                .drsEnabled(true)
                .drsAutomationLevel("fullyAutomated")
                .haEnabled(false)
                .vsanEnabled(true)
                .vsanEsaEnabled(true)
                .vsanDedupEnabled(true)
                .vsanCompressionEnabled(true)
                .vsanPerformanceEnabled(true)
                .vsanVerboseModeEnabled(true)
                .vsanNetworkDiagnosticModeEnabled(true)
                .vsanUnmapEnabled(true)
                .vsanDitEncryptionEnabled(true)
                .vsanDitRekeyInterval(1800)
                .vsanDiskGroups(ComputeClusterVsanDiskGroupArgs.builder()
                    .cache(data.vsphere_vmfs_disks().cache_disks()[0])
                    .storages(data.vsphere_vmfs_disks().storage_disks())
                    .build())
                .vsanFaultDomains(ComputeClusterVsanFaultDomainArgs.builder()
                    .faultDomains(                
                        ComputeClusterVsanFaultDomainFaultDomainArgs.builder()
                            .name("fd1")
                            .hostIds(data.vsphere_host().faultdomain1_hosts().stream().map(element -> element.id()).collect(toList()))
                            .build(),
                        ComputeClusterVsanFaultDomainFaultDomainArgs.builder()
                            .name("fd2")
                            .hostIds(data.vsphere_host().faultdomain2_hosts().stream().map(element -> element.id()).collect(toList()))
                            .build())
                    .build())
                .vsanStretchedCluster(ComputeClusterVsanStretchedClusterArgs.builder()
                    .preferredFaultDomainHostIds(data.vsphere_host().preferred_fault_domain_host().stream().map(element -> element.id()).collect(toList()))
                    .secondaryFaultDomainHostIds(data.vsphere_host().secondary_fault_domain_host().stream().map(element -> element.id()).collect(toList()))
                    .witnessNode(data.vsphere_host().witness_host().id())
                    .build())
                .build());
    
        }
    }
    
    PreferredFaultDomainHostIds []string

    The managed object IDs of the hosts to put in the first fault domain.

    SecondaryFaultDomainHostIds []string

    The managed object IDs of the hosts to put in the second fault domain.

    WitnessNode string

    The managed object IDs of the host selected as witness node when enable stretched cluster.

    PreferredFaultDomainName string

    The name of first fault domain. Default is Preferred.

    SecondaryFaultDomainName string

    The name of second fault domain. Default is Secondary.

    NOTE: You must disable vSphere HA before you enable vSAN on the cluster. You can enable or re-enable vSphere HA after vSAN is configured.

    import * as pulumi from "@pulumi/pulumi";
    import * as vsphere from "@pulumi/vsphere";
    

    const computeCluster = new vsphere.ComputeCluster("computeCluster", { datacenterId: data.vsphere_datacenter.datacenter.id, hostSystemIds: [data.vsphere_host.host.map(__item => __item.id)], drsEnabled: true, drsAutomationLevel: "fullyAutomated", haEnabled: false, vsanEnabled: true, vsanEsaEnabled: true, vsanDedupEnabled: true, vsanCompressionEnabled: true, vsanPerformanceEnabled: true, vsanVerboseModeEnabled: true, vsanNetworkDiagnosticModeEnabled: true, vsanUnmapEnabled: true, vsanDitEncryptionEnabled: true, vsanDitRekeyInterval: 1800, vsanDiskGroups: [{ cache: data.vsphere_vmfs_disks.cache_disks[0], storages: data.vsphere_vmfs_disks.storage_disks, }], vsanFaultDomains: [{ faultDomains: [ { name: "fd1", hostIds: [data.vsphere_host.faultdomain1_hosts.map(__item => __item.id)], }, { name: "fd2", hostIds: [data.vsphere_host.faultdomain2_hosts.map(__item => __item.id)], }, ], }], vsanStretchedCluster: { preferredFaultDomainHostIds: [data.vsphere_host.preferred_fault_domain_host.map(__item => __item.id)], secondaryFaultDomainHostIds: [data.vsphere_host.secondary_fault_domain_host.map(__item => __item.id)], witnessNode: data.vsphere_host.witness_host.id, }, });

    import pulumi
    import pulumi_vsphere as vsphere
    
    compute_cluster = vsphere.ComputeCluster("computeCluster",
        datacenter_id=data["vsphere_datacenter"]["datacenter"]["id"],
        host_system_ids=[[__item["id"] for __item in data["vsphere_host"]["host"]]],
        drs_enabled=True,
        drs_automation_level="fullyAutomated",
        ha_enabled=False,
        vsan_enabled=True,
        vsan_esa_enabled=True,
        vsan_dedup_enabled=True,
        vsan_compression_enabled=True,
        vsan_performance_enabled=True,
        vsan_verbose_mode_enabled=True,
        vsan_network_diagnostic_mode_enabled=True,
        vsan_unmap_enabled=True,
        vsan_dit_encryption_enabled=True,
        vsan_dit_rekey_interval=1800,
        vsan_disk_groups=[vsphere.ComputeClusterVsanDiskGroupArgs(
            cache=data["vsphere_vmfs_disks"]["cache_disks"],
            storages=data["vsphere_vmfs_disks"]["storage_disks"],
        )],
        vsan_fault_domains=[vsphere.ComputeClusterVsanFaultDomainArgs(
            fault_domains=[
                vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs(
                    name="fd1",
                    host_ids=[[__item["id"] for __item in data["vsphere_host"]["faultdomain1_hosts"]]],
                ),
                vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs(
                    name="fd2",
                    host_ids=[[__item["id"] for __item in data["vsphere_host"]["faultdomain2_hosts"]]],
                ),
            ],
        )],
        vsan_stretched_cluster=vsphere.ComputeClusterVsanStretchedClusterArgs(
            preferred_fault_domain_host_ids=[[__item["id"] for __item in data["vsphere_host"]["preferred_fault_domain_host"]]],
            secondary_fault_domain_host_ids=[[__item["id"] for __item in data["vsphere_host"]["secondary_fault_domain_host"]]],
            witness_node=data["vsphere_host"]["witness_host"]["id"],
        ))
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using VSphere = Pulumi.VSphere;
    
    return await Deployment.RunAsync(() => 
    {
        var computeCluster = new VSphere.ComputeCluster("computeCluster", new()
        {
            DatacenterId = data.Vsphere_datacenter.Datacenter.Id,
            HostSystemIds = new[]
            {
                data.Vsphere_host.Host.Select(__item => __item.Id).ToList(),
            },
            DrsEnabled = true,
            DrsAutomationLevel = "fullyAutomated",
            HaEnabled = false,
            VsanEnabled = true,
            VsanEsaEnabled = true,
            VsanDedupEnabled = true,
            VsanCompressionEnabled = true,
            VsanPerformanceEnabled = true,
            VsanVerboseModeEnabled = true,
            VsanNetworkDiagnosticModeEnabled = true,
            VsanUnmapEnabled = true,
            VsanDitEncryptionEnabled = true,
            VsanDitRekeyInterval = 1800,
            VsanDiskGroups = new[]
            {
                new VSphere.Inputs.ComputeClusterVsanDiskGroupArgs
                {
                    Cache = data.Vsphere_vmfs_disks.Cache_disks[0],
                    Storages = data.Vsphere_vmfs_disks.Storage_disks,
                },
            },
            VsanFaultDomains = new[]
            {
                new VSphere.Inputs.ComputeClusterVsanFaultDomainArgs
                {
                    FaultDomains = new[]
                    {
                        new VSphere.Inputs.ComputeClusterVsanFaultDomainFaultDomainArgs
                        {
                            Name = "fd1",
                            HostIds = new[]
                            {
                                data.Vsphere_host.Faultdomain1_hosts.Select(__item => __item.Id).ToList(),
                            },
                        },
                        new VSphere.Inputs.ComputeClusterVsanFaultDomainFaultDomainArgs
                        {
                            Name = "fd2",
                            HostIds = new[]
                            {
                                data.Vsphere_host.Faultdomain2_hosts.Select(__item => __item.Id).ToList(),
                            },
                        },
                    },
                },
            },
            VsanStretchedCluster = new VSphere.Inputs.ComputeClusterVsanStretchedClusterArgs
            {
                PreferredFaultDomainHostIds = new[]
                {
                    data.Vsphere_host.Preferred_fault_domain_host.Select(__item => __item.Id).ToList(),
                },
                SecondaryFaultDomainHostIds = new[]
                {
                    data.Vsphere_host.Secondary_fault_domain_host.Select(__item => __item.Id).ToList(),
                },
                WitnessNode = data.Vsphere_host.Witness_host.Id,
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-vsphere/sdk/v4/go/vsphere"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    func main() {
    pulumi.Run(func(ctx *pulumi.Context) error {
    _, err := vsphere.NewComputeCluster(ctx, "computeCluster", &vsphere.ComputeClusterArgs{
    DatacenterId: pulumi.Any(data.Vsphere_datacenter.Datacenter.Id),
    HostSystemIds: pulumi.StringArray{
    %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:2,18-45),
    },
    DrsEnabled: pulumi.Bool(true),
    DrsAutomationLevel: pulumi.String("fullyAutomated"),
    HaEnabled: pulumi.Bool(false),
    VsanEnabled: pulumi.Bool(true),
    VsanEsaEnabled: pulumi.Bool(true),
    VsanDedupEnabled: pulumi.Bool(true),
    VsanCompressionEnabled: pulumi.Bool(true),
    VsanPerformanceEnabled: pulumi.Bool(true),
    VsanVerboseModeEnabled: pulumi.Bool(true),
    VsanNetworkDiagnosticModeEnabled: pulumi.Bool(true),
    VsanUnmapEnabled: pulumi.Bool(true),
    VsanDitEncryptionEnabled: pulumi.Bool(true),
    VsanDitRekeyInterval: pulumi.Int(1800),
    VsanDiskGroups: vsphere.ComputeClusterVsanDiskGroupArray{
    &vsphere.ComputeClusterVsanDiskGroupArgs{
    Cache: pulumi.Any(data.Vsphere_vmfs_disks.Cache_disks[0]),
    Storages: pulumi.Any(data.Vsphere_vmfs_disks.Storage_disks),
    },
    },
    VsanFaultDomains: vsphere.ComputeClusterVsanFaultDomainArray{
    &vsphere.ComputeClusterVsanFaultDomainArgs{
    FaultDomains: vsphere.ComputeClusterVsanFaultDomainFaultDomainArray{
    &vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs{
    Name: pulumi.String("fd1"),
    HostIds: pulumi.StringArray{
    %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:23,12-53),
    },
    },
    &vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs{
    Name: pulumi.String("fd2"),
    HostIds: pulumi.StringArray{
    %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:27,12-53),
    },
    },
    },
    },
    },
    VsanStretchedCluster: &vsphere.ComputeClusterVsanStretchedClusterArgs{
    PreferredFaultDomainHostIds: pulumi.StringArray{
    %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:31,32-82),
    },
    SecondaryFaultDomainHostIds: pulumi.StringArray{
    %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:32,32-82),
    },
    WitnessNode: pulumi.Any(data.Vsphere_host.Witness_host.Id),
    },
    })
    if err != nil {
    return err
    }
    return nil
    })
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.vsphere.ComputeCluster;
    import com.pulumi.vsphere.ComputeClusterArgs;
    import com.pulumi.vsphere.inputs.ComputeClusterVsanDiskGroupArgs;
    import com.pulumi.vsphere.inputs.ComputeClusterVsanFaultDomainArgs;
    import com.pulumi.vsphere.inputs.ComputeClusterVsanStretchedClusterArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var computeCluster = new ComputeCluster("computeCluster", ComputeClusterArgs.builder()        
                .datacenterId(data.vsphere_datacenter().datacenter().id())
                .hostSystemIds(data.vsphere_host().host().stream().map(element -> element.id()).collect(toList()))
                .drsEnabled(true)
                .drsAutomationLevel("fullyAutomated")
                .haEnabled(false)
                .vsanEnabled(true)
                .vsanEsaEnabled(true)
                .vsanDedupEnabled(true)
                .vsanCompressionEnabled(true)
                .vsanPerformanceEnabled(true)
                .vsanVerboseModeEnabled(true)
                .vsanNetworkDiagnosticModeEnabled(true)
                .vsanUnmapEnabled(true)
                .vsanDitEncryptionEnabled(true)
                .vsanDitRekeyInterval(1800)
                .vsanDiskGroups(ComputeClusterVsanDiskGroupArgs.builder()
                    .cache(data.vsphere_vmfs_disks().cache_disks()[0])
                    .storages(data.vsphere_vmfs_disks().storage_disks())
                    .build())
                .vsanFaultDomains(ComputeClusterVsanFaultDomainArgs.builder()
                    .faultDomains(                
                        ComputeClusterVsanFaultDomainFaultDomainArgs.builder()
                            .name("fd1")
                            .hostIds(data.vsphere_host().faultdomain1_hosts().stream().map(element -> element.id()).collect(toList()))
                            .build(),
                        ComputeClusterVsanFaultDomainFaultDomainArgs.builder()
                            .name("fd2")
                            .hostIds(data.vsphere_host().faultdomain2_hosts().stream().map(element -> element.id()).collect(toList()))
                            .build())
                    .build())
                .vsanStretchedCluster(ComputeClusterVsanStretchedClusterArgs.builder()
                    .preferredFaultDomainHostIds(data.vsphere_host().preferred_fault_domain_host().stream().map(element -> element.id()).collect(toList()))
                    .secondaryFaultDomainHostIds(data.vsphere_host().secondary_fault_domain_host().stream().map(element -> element.id()).collect(toList()))
                    .witnessNode(data.vsphere_host().witness_host().id())
                    .build())
                .build());
    
        }
    }
    
    preferredFaultDomainHostIds List<String>

    The managed object IDs of the hosts to put in the first fault domain.

    secondaryFaultDomainHostIds List<String>

    The managed object IDs of the hosts to put in the second fault domain.

    witnessNode String

    The managed object IDs of the host selected as witness node when enable stretched cluster.

    preferredFaultDomainName String

    The name of first fault domain. Default is Preferred.

    secondaryFaultDomainName String

    The name of second fault domain. Default is Secondary.

    NOTE: You must disable vSphere HA before you enable vSAN on the cluster. You can enable or re-enable vSphere HA after vSAN is configured.

    import * as pulumi from "@pulumi/pulumi";
    import * as vsphere from "@pulumi/vsphere";
    

    const computeCluster = new vsphere.ComputeCluster("computeCluster", { datacenterId: data.vsphere_datacenter.datacenter.id, hostSystemIds: [data.vsphere_host.host.map(__item => __item.id)], drsEnabled: true, drsAutomationLevel: "fullyAutomated", haEnabled: false, vsanEnabled: true, vsanEsaEnabled: true, vsanDedupEnabled: true, vsanCompressionEnabled: true, vsanPerformanceEnabled: true, vsanVerboseModeEnabled: true, vsanNetworkDiagnosticModeEnabled: true, vsanUnmapEnabled: true, vsanDitEncryptionEnabled: true, vsanDitRekeyInterval: 1800, vsanDiskGroups: [{ cache: data.vsphere_vmfs_disks.cache_disks[0], storages: data.vsphere_vmfs_disks.storage_disks, }], vsanFaultDomains: [{ faultDomains: [ { name: "fd1", hostIds: [data.vsphere_host.faultdomain1_hosts.map(__item => __item.id)], }, { name: "fd2", hostIds: [data.vsphere_host.faultdomain2_hosts.map(__item => __item.id)], }, ], }], vsanStretchedCluster: { preferredFaultDomainHostIds: [data.vsphere_host.preferred_fault_domain_host.map(__item => __item.id)], secondaryFaultDomainHostIds: [data.vsphere_host.secondary_fault_domain_host.map(__item => __item.id)], witnessNode: data.vsphere_host.witness_host.id, }, });

    import pulumi
    import pulumi_vsphere as vsphere
    
    compute_cluster = vsphere.ComputeCluster("computeCluster",
        datacenter_id=data["vsphere_datacenter"]["datacenter"]["id"],
        host_system_ids=[[__item["id"] for __item in data["vsphere_host"]["host"]]],
        drs_enabled=True,
        drs_automation_level="fullyAutomated",
        ha_enabled=False,
        vsan_enabled=True,
        vsan_esa_enabled=True,
        vsan_dedup_enabled=True,
        vsan_compression_enabled=True,
        vsan_performance_enabled=True,
        vsan_verbose_mode_enabled=True,
        vsan_network_diagnostic_mode_enabled=True,
        vsan_unmap_enabled=True,
        vsan_dit_encryption_enabled=True,
        vsan_dit_rekey_interval=1800,
        vsan_disk_groups=[vsphere.ComputeClusterVsanDiskGroupArgs(
            cache=data["vsphere_vmfs_disks"]["cache_disks"],
            storages=data["vsphere_vmfs_disks"]["storage_disks"],
        )],
        vsan_fault_domains=[vsphere.ComputeClusterVsanFaultDomainArgs(
            fault_domains=[
                vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs(
                    name="fd1",
                    host_ids=[[__item["id"] for __item in data["vsphere_host"]["faultdomain1_hosts"]]],
                ),
                vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs(
                    name="fd2",
                    host_ids=[[__item["id"] for __item in data["vsphere_host"]["faultdomain2_hosts"]]],
                ),
            ],
        )],
        vsan_stretched_cluster=vsphere.ComputeClusterVsanStretchedClusterArgs(
            preferred_fault_domain_host_ids=[[__item["id"] for __item in data["vsphere_host"]["preferred_fault_domain_host"]]],
            secondary_fault_domain_host_ids=[[__item["id"] for __item in data["vsphere_host"]["secondary_fault_domain_host"]]],
            witness_node=data["vsphere_host"]["witness_host"]["id"],
        ))
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using VSphere = Pulumi.VSphere;
    
    return await Deployment.RunAsync(() => 
    {
        var computeCluster = new VSphere.ComputeCluster("computeCluster", new()
        {
            DatacenterId = data.Vsphere_datacenter.Datacenter.Id,
            HostSystemIds = new[]
            {
                data.Vsphere_host.Host.Select(__item => __item.Id).ToList(),
            },
            DrsEnabled = true,
            DrsAutomationLevel = "fullyAutomated",
            HaEnabled = false,
            VsanEnabled = true,
            VsanEsaEnabled = true,
            VsanDedupEnabled = true,
            VsanCompressionEnabled = true,
            VsanPerformanceEnabled = true,
            VsanVerboseModeEnabled = true,
            VsanNetworkDiagnosticModeEnabled = true,
            VsanUnmapEnabled = true,
            VsanDitEncryptionEnabled = true,
            VsanDitRekeyInterval = 1800,
            VsanDiskGroups = new[]
            {
                new VSphere.Inputs.ComputeClusterVsanDiskGroupArgs
                {
                    Cache = data.Vsphere_vmfs_disks.Cache_disks[0],
                    Storages = data.Vsphere_vmfs_disks.Storage_disks,
                },
            },
            VsanFaultDomains = new[]
            {
                new VSphere.Inputs.ComputeClusterVsanFaultDomainArgs
                {
                    FaultDomains = new[]
                    {
                        new VSphere.Inputs.ComputeClusterVsanFaultDomainFaultDomainArgs
                        {
                            Name = "fd1",
                            HostIds = new[]
                            {
                                data.Vsphere_host.Faultdomain1_hosts.Select(__item => __item.Id).ToList(),
                            },
                        },
                        new VSphere.Inputs.ComputeClusterVsanFaultDomainFaultDomainArgs
                        {
                            Name = "fd2",
                            HostIds = new[]
                            {
                                data.Vsphere_host.Faultdomain2_hosts.Select(__item => __item.Id).ToList(),
                            },
                        },
                    },
                },
            },
            VsanStretchedCluster = new VSphere.Inputs.ComputeClusterVsanStretchedClusterArgs
            {
                PreferredFaultDomainHostIds = new[]
                {
                    data.Vsphere_host.Preferred_fault_domain_host.Select(__item => __item.Id).ToList(),
                },
                SecondaryFaultDomainHostIds = new[]
                {
                    data.Vsphere_host.Secondary_fault_domain_host.Select(__item => __item.Id).ToList(),
                },
                WitnessNode = data.Vsphere_host.Witness_host.Id,
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-vsphere/sdk/v4/go/vsphere"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    func main() {
    pulumi.Run(func(ctx *pulumi.Context) error {
    _, err := vsphere.NewComputeCluster(ctx, "computeCluster", &vsphere.ComputeClusterArgs{
    DatacenterId: pulumi.Any(data.Vsphere_datacenter.Datacenter.Id),
    HostSystemIds: pulumi.StringArray{
    %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:2,18-45),
    },
    DrsEnabled: pulumi.Bool(true),
    DrsAutomationLevel: pulumi.String("fullyAutomated"),
    HaEnabled: pulumi.Bool(false),
    VsanEnabled: pulumi.Bool(true),
    VsanEsaEnabled: pulumi.Bool(true),
    VsanDedupEnabled: pulumi.Bool(true),
    VsanCompressionEnabled: pulumi.Bool(true),
    VsanPerformanceEnabled: pulumi.Bool(true),
    VsanVerboseModeEnabled: pulumi.Bool(true),
    VsanNetworkDiagnosticModeEnabled: pulumi.Bool(true),
    VsanUnmapEnabled: pulumi.Bool(true),
    VsanDitEncryptionEnabled: pulumi.Bool(true),
    VsanDitRekeyInterval: pulumi.Int(1800),
    VsanDiskGroups: vsphere.ComputeClusterVsanDiskGroupArray{
    &vsphere.ComputeClusterVsanDiskGroupArgs{
    Cache: pulumi.Any(data.Vsphere_vmfs_disks.Cache_disks[0]),
    Storages: pulumi.Any(data.Vsphere_vmfs_disks.Storage_disks),
    },
    },
    VsanFaultDomains: vsphere.ComputeClusterVsanFaultDomainArray{
    &vsphere.ComputeClusterVsanFaultDomainArgs{
    FaultDomains: vsphere.ComputeClusterVsanFaultDomainFaultDomainArray{
    &vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs{
    Name: pulumi.String("fd1"),
    HostIds: pulumi.StringArray{
    %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:23,12-53),
    },
    },
    &vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs{
    Name: pulumi.String("fd2"),
    HostIds: pulumi.StringArray{
    %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:27,12-53),
    },
    },
    },
    },
    },
    VsanStretchedCluster: &vsphere.ComputeClusterVsanStretchedClusterArgs{
    PreferredFaultDomainHostIds: pulumi.StringArray{
    %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:31,32-82),
    },
    SecondaryFaultDomainHostIds: pulumi.StringArray{
    %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:32,32-82),
    },
    WitnessNode: pulumi.Any(data.Vsphere_host.Witness_host.Id),
    },
    })
    if err != nil {
    return err
    }
    return nil
    })
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.vsphere.ComputeCluster;
    import com.pulumi.vsphere.ComputeClusterArgs;
    import com.pulumi.vsphere.inputs.ComputeClusterVsanDiskGroupArgs;
    import com.pulumi.vsphere.inputs.ComputeClusterVsanFaultDomainArgs;
    import com.pulumi.vsphere.inputs.ComputeClusterVsanStretchedClusterArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var computeCluster = new ComputeCluster("computeCluster", ComputeClusterArgs.builder()        
                .datacenterId(data.vsphere_datacenter().datacenter().id())
                .hostSystemIds(data.vsphere_host().host().stream().map(element -> element.id()).collect(toList()))
                .drsEnabled(true)
                .drsAutomationLevel("fullyAutomated")
                .haEnabled(false)
                .vsanEnabled(true)
                .vsanEsaEnabled(true)
                .vsanDedupEnabled(true)
                .vsanCompressionEnabled(true)
                .vsanPerformanceEnabled(true)
                .vsanVerboseModeEnabled(true)
                .vsanNetworkDiagnosticModeEnabled(true)
                .vsanUnmapEnabled(true)
                .vsanDitEncryptionEnabled(true)
                .vsanDitRekeyInterval(1800)
                .vsanDiskGroups(ComputeClusterVsanDiskGroupArgs.builder()
                    .cache(data.vsphere_vmfs_disks().cache_disks()[0])
                    .storages(data.vsphere_vmfs_disks().storage_disks())
                    .build())
                .vsanFaultDomains(ComputeClusterVsanFaultDomainArgs.builder()
                    .faultDomains(                
                        ComputeClusterVsanFaultDomainFaultDomainArgs.builder()
                            .name("fd1")
                            .hostIds(data.vsphere_host().faultdomain1_hosts().stream().map(element -> element.id()).collect(toList()))
                            .build(),
                        ComputeClusterVsanFaultDomainFaultDomainArgs.builder()
                            .name("fd2")
                            .hostIds(data.vsphere_host().faultdomain2_hosts().stream().map(element -> element.id()).collect(toList()))
                            .build())
                    .build())
                .vsanStretchedCluster(ComputeClusterVsanStretchedClusterArgs.builder()
                    .preferredFaultDomainHostIds(data.vsphere_host().preferred_fault_domain_host().stream().map(element -> element.id()).collect(toList()))
                    .secondaryFaultDomainHostIds(data.vsphere_host().secondary_fault_domain_host().stream().map(element -> element.id()).collect(toList()))
                    .witnessNode(data.vsphere_host().witness_host().id())
                    .build())
                .build());
    
        }
    }
    
    preferredFaultDomainHostIds string[]

    The managed object IDs of the hosts to put in the first fault domain.

    secondaryFaultDomainHostIds string[]

    The managed object IDs of the hosts to put in the second fault domain.

    witnessNode string

    The managed object IDs of the host selected as witness node when enable stretched cluster.

    preferredFaultDomainName string

    The name of first fault domain. Default is Preferred.

    secondaryFaultDomainName string

    The name of second fault domain. Default is Secondary.

    NOTE: You must disable vSphere HA before you enable vSAN on the cluster. You can enable or re-enable vSphere HA after vSAN is configured.

    import * as pulumi from "@pulumi/pulumi";
    import * as vsphere from "@pulumi/vsphere";
    

    const computeCluster = new vsphere.ComputeCluster("computeCluster", { datacenterId: data.vsphere_datacenter.datacenter.id, hostSystemIds: [data.vsphere_host.host.map(__item => __item.id)], drsEnabled: true, drsAutomationLevel: "fullyAutomated", haEnabled: false, vsanEnabled: true, vsanEsaEnabled: true, vsanDedupEnabled: true, vsanCompressionEnabled: true, vsanPerformanceEnabled: true, vsanVerboseModeEnabled: true, vsanNetworkDiagnosticModeEnabled: true, vsanUnmapEnabled: true, vsanDitEncryptionEnabled: true, vsanDitRekeyInterval: 1800, vsanDiskGroups: [{ cache: data.vsphere_vmfs_disks.cache_disks[0], storages: data.vsphere_vmfs_disks.storage_disks, }], vsanFaultDomains: [{ faultDomains: [ { name: "fd1", hostIds: [data.vsphere_host.faultdomain1_hosts.map(__item => __item.id)], }, { name: "fd2", hostIds: [data.vsphere_host.faultdomain2_hosts.map(__item => __item.id)], }, ], }], vsanStretchedCluster: { preferredFaultDomainHostIds: [data.vsphere_host.preferred_fault_domain_host.map(__item => __item.id)], secondaryFaultDomainHostIds: [data.vsphere_host.secondary_fault_domain_host.map(__item => __item.id)], witnessNode: data.vsphere_host.witness_host.id, }, });

    import pulumi
    import pulumi_vsphere as vsphere
    
    compute_cluster = vsphere.ComputeCluster("computeCluster",
        datacenter_id=data["vsphere_datacenter"]["datacenter"]["id"],
        host_system_ids=[[__item["id"] for __item in data["vsphere_host"]["host"]]],
        drs_enabled=True,
        drs_automation_level="fullyAutomated",
        ha_enabled=False,
        vsan_enabled=True,
        vsan_esa_enabled=True,
        vsan_dedup_enabled=True,
        vsan_compression_enabled=True,
        vsan_performance_enabled=True,
        vsan_verbose_mode_enabled=True,
        vsan_network_diagnostic_mode_enabled=True,
        vsan_unmap_enabled=True,
        vsan_dit_encryption_enabled=True,
        vsan_dit_rekey_interval=1800,
        vsan_disk_groups=[vsphere.ComputeClusterVsanDiskGroupArgs(
            cache=data["vsphere_vmfs_disks"]["cache_disks"],
            storages=data["vsphere_vmfs_disks"]["storage_disks"],
        )],
        vsan_fault_domains=[vsphere.ComputeClusterVsanFaultDomainArgs(
            fault_domains=[
                vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs(
                    name="fd1",
                    host_ids=[[__item["id"] for __item in data["vsphere_host"]["faultdomain1_hosts"]]],
                ),
                vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs(
                    name="fd2",
                    host_ids=[[__item["id"] for __item in data["vsphere_host"]["faultdomain2_hosts"]]],
                ),
            ],
        )],
        vsan_stretched_cluster=vsphere.ComputeClusterVsanStretchedClusterArgs(
            preferred_fault_domain_host_ids=[[__item["id"] for __item in data["vsphere_host"]["preferred_fault_domain_host"]]],
            secondary_fault_domain_host_ids=[[__item["id"] for __item in data["vsphere_host"]["secondary_fault_domain_host"]]],
            witness_node=data["vsphere_host"]["witness_host"]["id"],
        ))
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using VSphere = Pulumi.VSphere;
    
    return await Deployment.RunAsync(() => 
    {
        var computeCluster = new VSphere.ComputeCluster("computeCluster", new()
        {
            DatacenterId = data.Vsphere_datacenter.Datacenter.Id,
            HostSystemIds = new[]
            {
                data.Vsphere_host.Host.Select(__item => __item.Id).ToList(),
            },
            DrsEnabled = true,
            DrsAutomationLevel = "fullyAutomated",
            HaEnabled = false,
            VsanEnabled = true,
            VsanEsaEnabled = true,
            VsanDedupEnabled = true,
            VsanCompressionEnabled = true,
            VsanPerformanceEnabled = true,
            VsanVerboseModeEnabled = true,
            VsanNetworkDiagnosticModeEnabled = true,
            VsanUnmapEnabled = true,
            VsanDitEncryptionEnabled = true,
            VsanDitRekeyInterval = 1800,
            VsanDiskGroups = new[]
            {
                new VSphere.Inputs.ComputeClusterVsanDiskGroupArgs
                {
                    Cache = data.Vsphere_vmfs_disks.Cache_disks[0],
                    Storages = data.Vsphere_vmfs_disks.Storage_disks,
                },
            },
            VsanFaultDomains = new[]
            {
                new VSphere.Inputs.ComputeClusterVsanFaultDomainArgs
                {
                    FaultDomains = new[]
                    {
                        new VSphere.Inputs.ComputeClusterVsanFaultDomainFaultDomainArgs
                        {
                            Name = "fd1",
                            HostIds = new[]
                            {
                                data.Vsphere_host.Faultdomain1_hosts.Select(__item => __item.Id).ToList(),
                            },
                        },
                        new VSphere.Inputs.ComputeClusterVsanFaultDomainFaultDomainArgs
                        {
                            Name = "fd2",
                            HostIds = new[]
                            {
                                data.Vsphere_host.Faultdomain2_hosts.Select(__item => __item.Id).ToList(),
                            },
                        },
                    },
                },
            },
            VsanStretchedCluster = new VSphere.Inputs.ComputeClusterVsanStretchedClusterArgs
            {
                PreferredFaultDomainHostIds = new[]
                {
                    data.Vsphere_host.Preferred_fault_domain_host.Select(__item => __item.Id).ToList(),
                },
                SecondaryFaultDomainHostIds = new[]
                {
                    data.Vsphere_host.Secondary_fault_domain_host.Select(__item => __item.Id).ToList(),
                },
                WitnessNode = data.Vsphere_host.Witness_host.Id,
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-vsphere/sdk/v4/go/vsphere"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    func main() {
    pulumi.Run(func(ctx *pulumi.Context) error {
    _, err := vsphere.NewComputeCluster(ctx, "computeCluster", &vsphere.ComputeClusterArgs{
    DatacenterId: pulumi.Any(data.Vsphere_datacenter.Datacenter.Id),
    HostSystemIds: pulumi.StringArray{
    %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:2,18-45),
    },
    DrsEnabled: pulumi.Bool(true),
    DrsAutomationLevel: pulumi.String("fullyAutomated"),
    HaEnabled: pulumi.Bool(false),
    VsanEnabled: pulumi.Bool(true),
    VsanEsaEnabled: pulumi.Bool(true),
    VsanDedupEnabled: pulumi.Bool(true),
    VsanCompressionEnabled: pulumi.Bool(true),
    VsanPerformanceEnabled: pulumi.Bool(true),
    VsanVerboseModeEnabled: pulumi.Bool(true),
    VsanNetworkDiagnosticModeEnabled: pulumi.Bool(true),
    VsanUnmapEnabled: pulumi.Bool(true),
    VsanDitEncryptionEnabled: pulumi.Bool(true),
    VsanDitRekeyInterval: pulumi.Int(1800),
    VsanDiskGroups: vsphere.ComputeClusterVsanDiskGroupArray{
    &vsphere.ComputeClusterVsanDiskGroupArgs{
    Cache: pulumi.Any(data.Vsphere_vmfs_disks.Cache_disks[0]),
    Storages: pulumi.Any(data.Vsphere_vmfs_disks.Storage_disks),
    },
    },
    VsanFaultDomains: vsphere.ComputeClusterVsanFaultDomainArray{
    &vsphere.ComputeClusterVsanFaultDomainArgs{
    FaultDomains: vsphere.ComputeClusterVsanFaultDomainFaultDomainArray{
    &vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs{
    Name: pulumi.String("fd1"),
    HostIds: pulumi.StringArray{
    %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:23,12-53),
    },
    },
    &vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs{
    Name: pulumi.String("fd2"),
    HostIds: pulumi.StringArray{
    %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:27,12-53),
    },
    },
    },
    },
    },
    VsanStretchedCluster: &vsphere.ComputeClusterVsanStretchedClusterArgs{
    PreferredFaultDomainHostIds: pulumi.StringArray{
    %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:31,32-82),
    },
    SecondaryFaultDomainHostIds: pulumi.StringArray{
    %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:32,32-82),
    },
    WitnessNode: pulumi.Any(data.Vsphere_host.Witness_host.Id),
    },
    })
    if err != nil {
    return err
    }
    return nil
    })
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.vsphere.ComputeCluster;
    import com.pulumi.vsphere.ComputeClusterArgs;
    import com.pulumi.vsphere.inputs.ComputeClusterVsanDiskGroupArgs;
    import com.pulumi.vsphere.inputs.ComputeClusterVsanFaultDomainArgs;
    import com.pulumi.vsphere.inputs.ComputeClusterVsanStretchedClusterArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var computeCluster = new ComputeCluster("computeCluster", ComputeClusterArgs.builder()        
                .datacenterId(data.vsphere_datacenter().datacenter().id())
                .hostSystemIds(data.vsphere_host().host().stream().map(element -> element.id()).collect(toList()))
                .drsEnabled(true)
                .drsAutomationLevel("fullyAutomated")
                .haEnabled(false)
                .vsanEnabled(true)
                .vsanEsaEnabled(true)
                .vsanDedupEnabled(true)
                .vsanCompressionEnabled(true)
                .vsanPerformanceEnabled(true)
                .vsanVerboseModeEnabled(true)
                .vsanNetworkDiagnosticModeEnabled(true)
                .vsanUnmapEnabled(true)
                .vsanDitEncryptionEnabled(true)
                .vsanDitRekeyInterval(1800)
                .vsanDiskGroups(ComputeClusterVsanDiskGroupArgs.builder()
                    .cache(data.vsphere_vmfs_disks().cache_disks()[0])
                    .storages(data.vsphere_vmfs_disks().storage_disks())
                    .build())
                .vsanFaultDomains(ComputeClusterVsanFaultDomainArgs.builder()
                    .faultDomains(                
                        ComputeClusterVsanFaultDomainFaultDomainArgs.builder()
                            .name("fd1")
                            .hostIds(data.vsphere_host().faultdomain1_hosts().stream().map(element -> element.id()).collect(toList()))
                            .build(),
                        ComputeClusterVsanFaultDomainFaultDomainArgs.builder()
                            .name("fd2")
                            .hostIds(data.vsphere_host().faultdomain2_hosts().stream().map(element -> element.id()).collect(toList()))
                            .build())
                    .build())
                .vsanStretchedCluster(ComputeClusterVsanStretchedClusterArgs.builder()
                    .preferredFaultDomainHostIds(data.vsphere_host().preferred_fault_domain_host().stream().map(element -> element.id()).collect(toList()))
                    .secondaryFaultDomainHostIds(data.vsphere_host().secondary_fault_domain_host().stream().map(element -> element.id()).collect(toList()))
                    .witnessNode(data.vsphere_host().witness_host().id())
                    .build())
                .build());
    
        }
    }
    
    preferred_fault_domain_host_ids Sequence[str]

    The managed object IDs of the hosts to put in the first fault domain.

    secondary_fault_domain_host_ids Sequence[str]

    The managed object IDs of the hosts to put in the second fault domain.

    witness_node str

    The managed object IDs of the host selected as witness node when enable stretched cluster.

    preferred_fault_domain_name str

    The name of first fault domain. Default is Preferred.

    secondary_fault_domain_name str

    The name of second fault domain. Default is Secondary.

    NOTE: You must disable vSphere HA before you enable vSAN on the cluster. You can enable or re-enable vSphere HA after vSAN is configured.

    import * as pulumi from "@pulumi/pulumi";
    import * as vsphere from "@pulumi/vsphere";
    

    const computeCluster = new vsphere.ComputeCluster("computeCluster", { datacenterId: data.vsphere_datacenter.datacenter.id, hostSystemIds: [data.vsphere_host.host.map(__item => __item.id)], drsEnabled: true, drsAutomationLevel: "fullyAutomated", haEnabled: false, vsanEnabled: true, vsanEsaEnabled: true, vsanDedupEnabled: true, vsanCompressionEnabled: true, vsanPerformanceEnabled: true, vsanVerboseModeEnabled: true, vsanNetworkDiagnosticModeEnabled: true, vsanUnmapEnabled: true, vsanDitEncryptionEnabled: true, vsanDitRekeyInterval: 1800, vsanDiskGroups: [{ cache: data.vsphere_vmfs_disks.cache_disks[0], storages: data.vsphere_vmfs_disks.storage_disks, }], vsanFaultDomains: [{ faultDomains: [ { name: "fd1", hostIds: [data.vsphere_host.faultdomain1_hosts.map(__item => __item.id)], }, { name: "fd2", hostIds: [data.vsphere_host.faultdomain2_hosts.map(__item => __item.id)], }, ], }], vsanStretchedCluster: { preferredFaultDomainHostIds: [data.vsphere_host.preferred_fault_domain_host.map(__item => __item.id)], secondaryFaultDomainHostIds: [data.vsphere_host.secondary_fault_domain_host.map(__item => __item.id)], witnessNode: data.vsphere_host.witness_host.id, }, });

    import pulumi
    import pulumi_vsphere as vsphere
    
    compute_cluster = vsphere.ComputeCluster("computeCluster",
        datacenter_id=data["vsphere_datacenter"]["datacenter"]["id"],
        host_system_ids=[[__item["id"] for __item in data["vsphere_host"]["host"]]],
        drs_enabled=True,
        drs_automation_level="fullyAutomated",
        ha_enabled=False,
        vsan_enabled=True,
        vsan_esa_enabled=True,
        vsan_dedup_enabled=True,
        vsan_compression_enabled=True,
        vsan_performance_enabled=True,
        vsan_verbose_mode_enabled=True,
        vsan_network_diagnostic_mode_enabled=True,
        vsan_unmap_enabled=True,
        vsan_dit_encryption_enabled=True,
        vsan_dit_rekey_interval=1800,
        vsan_disk_groups=[vsphere.ComputeClusterVsanDiskGroupArgs(
            cache=data["vsphere_vmfs_disks"]["cache_disks"],
            storages=data["vsphere_vmfs_disks"]["storage_disks"],
        )],
        vsan_fault_domains=[vsphere.ComputeClusterVsanFaultDomainArgs(
            fault_domains=[
                vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs(
                    name="fd1",
                    host_ids=[[__item["id"] for __item in data["vsphere_host"]["faultdomain1_hosts"]]],
                ),
                vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs(
                    name="fd2",
                    host_ids=[[__item["id"] for __item in data["vsphere_host"]["faultdomain2_hosts"]]],
                ),
            ],
        )],
        vsan_stretched_cluster=vsphere.ComputeClusterVsanStretchedClusterArgs(
            preferred_fault_domain_host_ids=[[__item["id"] for __item in data["vsphere_host"]["preferred_fault_domain_host"]]],
            secondary_fault_domain_host_ids=[[__item["id"] for __item in data["vsphere_host"]["secondary_fault_domain_host"]]],
            witness_node=data["vsphere_host"]["witness_host"]["id"],
        ))
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using VSphere = Pulumi.VSphere;
    
    return await Deployment.RunAsync(() => 
    {
        var computeCluster = new VSphere.ComputeCluster("computeCluster", new()
        {
            DatacenterId = data.Vsphere_datacenter.Datacenter.Id,
            HostSystemIds = new[]
            {
                data.Vsphere_host.Host.Select(__item => __item.Id).ToList(),
            },
            DrsEnabled = true,
            DrsAutomationLevel = "fullyAutomated",
            HaEnabled = false,
            VsanEnabled = true,
            VsanEsaEnabled = true,
            VsanDedupEnabled = true,
            VsanCompressionEnabled = true,
            VsanPerformanceEnabled = true,
            VsanVerboseModeEnabled = true,
            VsanNetworkDiagnosticModeEnabled = true,
            VsanUnmapEnabled = true,
            VsanDitEncryptionEnabled = true,
            VsanDitRekeyInterval = 1800,
            VsanDiskGroups = new[]
            {
                new VSphere.Inputs.ComputeClusterVsanDiskGroupArgs
                {
                    Cache = data.Vsphere_vmfs_disks.Cache_disks[0],
                    Storages = data.Vsphere_vmfs_disks.Storage_disks,
                },
            },
            VsanFaultDomains = new[]
            {
                new VSphere.Inputs.ComputeClusterVsanFaultDomainArgs
                {
                    FaultDomains = new[]
                    {
                        new VSphere.Inputs.ComputeClusterVsanFaultDomainFaultDomainArgs
                        {
                            Name = "fd1",
                            HostIds = new[]
                            {
                                data.Vsphere_host.Faultdomain1_hosts.Select(__item => __item.Id).ToList(),
                            },
                        },
                        new VSphere.Inputs.ComputeClusterVsanFaultDomainFaultDomainArgs
                        {
                            Name = "fd2",
                            HostIds = new[]
                            {
                                data.Vsphere_host.Faultdomain2_hosts.Select(__item => __item.Id).ToList(),
                            },
                        },
                    },
                },
            },
            VsanStretchedCluster = new VSphere.Inputs.ComputeClusterVsanStretchedClusterArgs
            {
                PreferredFaultDomainHostIds = new[]
                {
                    data.Vsphere_host.Preferred_fault_domain_host.Select(__item => __item.Id).ToList(),
                },
                SecondaryFaultDomainHostIds = new[]
                {
                    data.Vsphere_host.Secondary_fault_domain_host.Select(__item => __item.Id).ToList(),
                },
                WitnessNode = data.Vsphere_host.Witness_host.Id,
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-vsphere/sdk/v4/go/vsphere"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    func main() {
    pulumi.Run(func(ctx *pulumi.Context) error {
    _, err := vsphere.NewComputeCluster(ctx, "computeCluster", &vsphere.ComputeClusterArgs{
    DatacenterId: pulumi.Any(data.Vsphere_datacenter.Datacenter.Id),
    HostSystemIds: pulumi.StringArray{
    %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:2,18-45),
    },
    DrsEnabled: pulumi.Bool(true),
    DrsAutomationLevel: pulumi.String("fullyAutomated"),
    HaEnabled: pulumi.Bool(false),
    VsanEnabled: pulumi.Bool(true),
    VsanEsaEnabled: pulumi.Bool(true),
    VsanDedupEnabled: pulumi.Bool(true),
    VsanCompressionEnabled: pulumi.Bool(true),
    VsanPerformanceEnabled: pulumi.Bool(true),
    VsanVerboseModeEnabled: pulumi.Bool(true),
    VsanNetworkDiagnosticModeEnabled: pulumi.Bool(true),
    VsanUnmapEnabled: pulumi.Bool(true),
    VsanDitEncryptionEnabled: pulumi.Bool(true),
    VsanDitRekeyInterval: pulumi.Int(1800),
    VsanDiskGroups: vsphere.ComputeClusterVsanDiskGroupArray{
    &vsphere.ComputeClusterVsanDiskGroupArgs{
    Cache: pulumi.Any(data.Vsphere_vmfs_disks.Cache_disks[0]),
    Storages: pulumi.Any(data.Vsphere_vmfs_disks.Storage_disks),
    },
    },
    VsanFaultDomains: vsphere.ComputeClusterVsanFaultDomainArray{
    &vsphere.ComputeClusterVsanFaultDomainArgs{
    FaultDomains: vsphere.ComputeClusterVsanFaultDomainFaultDomainArray{
    &vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs{
    Name: pulumi.String("fd1"),
    HostIds: pulumi.StringArray{
    %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:23,12-53),
    },
    },
    &vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs{
    Name: pulumi.String("fd2"),
    HostIds: pulumi.StringArray{
    %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:27,12-53),
    },
    },
    },
    },
    },
    VsanStretchedCluster: &vsphere.ComputeClusterVsanStretchedClusterArgs{
    PreferredFaultDomainHostIds: pulumi.StringArray{
    %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:31,32-82),
    },
    SecondaryFaultDomainHostIds: pulumi.StringArray{
    %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:32,32-82),
    },
    WitnessNode: pulumi.Any(data.Vsphere_host.Witness_host.Id),
    },
    })
    if err != nil {
    return err
    }
    return nil
    })
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.vsphere.ComputeCluster;
    import com.pulumi.vsphere.ComputeClusterArgs;
    import com.pulumi.vsphere.inputs.ComputeClusterVsanDiskGroupArgs;
    import com.pulumi.vsphere.inputs.ComputeClusterVsanFaultDomainArgs;
    import com.pulumi.vsphere.inputs.ComputeClusterVsanStretchedClusterArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var computeCluster = new ComputeCluster("computeCluster", ComputeClusterArgs.builder()        
                .datacenterId(data.vsphere_datacenter().datacenter().id())
                .hostSystemIds(data.vsphere_host().host().stream().map(element -> element.id()).collect(toList()))
                .drsEnabled(true)
                .drsAutomationLevel("fullyAutomated")
                .haEnabled(false)
                .vsanEnabled(true)
                .vsanEsaEnabled(true)
                .vsanDedupEnabled(true)
                .vsanCompressionEnabled(true)
                .vsanPerformanceEnabled(true)
                .vsanVerboseModeEnabled(true)
                .vsanNetworkDiagnosticModeEnabled(true)
                .vsanUnmapEnabled(true)
                .vsanDitEncryptionEnabled(true)
                .vsanDitRekeyInterval(1800)
                .vsanDiskGroups(ComputeClusterVsanDiskGroupArgs.builder()
                    .cache(data.vsphere_vmfs_disks().cache_disks()[0])
                    .storages(data.vsphere_vmfs_disks().storage_disks())
                    .build())
                .vsanFaultDomains(ComputeClusterVsanFaultDomainArgs.builder()
                    .faultDomains(                
                        ComputeClusterVsanFaultDomainFaultDomainArgs.builder()
                            .name("fd1")
                            .hostIds(data.vsphere_host().faultdomain1_hosts().stream().map(element -> element.id()).collect(toList()))
                            .build(),
                        ComputeClusterVsanFaultDomainFaultDomainArgs.builder()
                            .name("fd2")
                            .hostIds(data.vsphere_host().faultdomain2_hosts().stream().map(element -> element.id()).collect(toList()))
                            .build())
                    .build())
                .vsanStretchedCluster(ComputeClusterVsanStretchedClusterArgs.builder()
                    .preferredFaultDomainHostIds(data.vsphere_host().preferred_fault_domain_host().stream().map(element -> element.id()).collect(toList()))
                    .secondaryFaultDomainHostIds(data.vsphere_host().secondary_fault_domain_host().stream().map(element -> element.id()).collect(toList()))
                    .witnessNode(data.vsphere_host().witness_host().id())
                    .build())
                .build());
    
        }
    }
    
    preferredFaultDomainHostIds List<String>

    The managed object IDs of the hosts to put in the first fault domain.

    secondaryFaultDomainHostIds List<String>

    The managed object IDs of the hosts to put in the second fault domain.

    witnessNode String

    The managed object IDs of the host selected as witness node when enable stretched cluster.

    preferredFaultDomainName String

    The name of first fault domain. Default is Preferred.

    secondaryFaultDomainName String

    The name of second fault domain. Default is Secondary.

    NOTE: You must disable vSphere HA before you enable vSAN on the cluster. You can enable or re-enable vSphere HA after vSAN is configured.

    import * as pulumi from "@pulumi/pulumi";
    import * as vsphere from "@pulumi/vsphere";
    

    const computeCluster = new vsphere.ComputeCluster("computeCluster", { datacenterId: data.vsphere_datacenter.datacenter.id, hostSystemIds: [data.vsphere_host.host.map(__item => __item.id)], drsEnabled: true, drsAutomationLevel: "fullyAutomated", haEnabled: false, vsanEnabled: true, vsanEsaEnabled: true, vsanDedupEnabled: true, vsanCompressionEnabled: true, vsanPerformanceEnabled: true, vsanVerboseModeEnabled: true, vsanNetworkDiagnosticModeEnabled: true, vsanUnmapEnabled: true, vsanDitEncryptionEnabled: true, vsanDitRekeyInterval: 1800, vsanDiskGroups: [{ cache: data.vsphere_vmfs_disks.cache_disks[0], storages: data.vsphere_vmfs_disks.storage_disks, }], vsanFaultDomains: [{ faultDomains: [ { name: "fd1", hostIds: [data.vsphere_host.faultdomain1_hosts.map(__item => __item.id)], }, { name: "fd2", hostIds: [data.vsphere_host.faultdomain2_hosts.map(__item => __item.id)], }, ], }], vsanStretchedCluster: { preferredFaultDomainHostIds: [data.vsphere_host.preferred_fault_domain_host.map(__item => __item.id)], secondaryFaultDomainHostIds: [data.vsphere_host.secondary_fault_domain_host.map(__item => __item.id)], witnessNode: data.vsphere_host.witness_host.id, }, });

    import pulumi
    import pulumi_vsphere as vsphere
    
    compute_cluster = vsphere.ComputeCluster("computeCluster",
        datacenter_id=data["vsphere_datacenter"]["datacenter"]["id"],
        host_system_ids=[[__item["id"] for __item in data["vsphere_host"]["host"]]],
        drs_enabled=True,
        drs_automation_level="fullyAutomated",
        ha_enabled=False,
        vsan_enabled=True,
        vsan_esa_enabled=True,
        vsan_dedup_enabled=True,
        vsan_compression_enabled=True,
        vsan_performance_enabled=True,
        vsan_verbose_mode_enabled=True,
        vsan_network_diagnostic_mode_enabled=True,
        vsan_unmap_enabled=True,
        vsan_dit_encryption_enabled=True,
        vsan_dit_rekey_interval=1800,
        vsan_disk_groups=[vsphere.ComputeClusterVsanDiskGroupArgs(
            cache=data["vsphere_vmfs_disks"]["cache_disks"],
            storages=data["vsphere_vmfs_disks"]["storage_disks"],
        )],
        vsan_fault_domains=[vsphere.ComputeClusterVsanFaultDomainArgs(
            fault_domains=[
                vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs(
                    name="fd1",
                    host_ids=[[__item["id"] for __item in data["vsphere_host"]["faultdomain1_hosts"]]],
                ),
                vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs(
                    name="fd2",
                    host_ids=[[__item["id"] for __item in data["vsphere_host"]["faultdomain2_hosts"]]],
                ),
            ],
        )],
        vsan_stretched_cluster=vsphere.ComputeClusterVsanStretchedClusterArgs(
            preferred_fault_domain_host_ids=[[__item["id"] for __item in data["vsphere_host"]["preferred_fault_domain_host"]]],
            secondary_fault_domain_host_ids=[[__item["id"] for __item in data["vsphere_host"]["secondary_fault_domain_host"]]],
            witness_node=data["vsphere_host"]["witness_host"]["id"],
        ))
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using VSphere = Pulumi.VSphere;
    
    return await Deployment.RunAsync(() => 
    {
        var computeCluster = new VSphere.ComputeCluster("computeCluster", new()
        {
            DatacenterId = data.Vsphere_datacenter.Datacenter.Id,
            HostSystemIds = new[]
            {
                data.Vsphere_host.Host.Select(__item => __item.Id).ToList(),
            },
            DrsEnabled = true,
            DrsAutomationLevel = "fullyAutomated",
            HaEnabled = false,
            VsanEnabled = true,
            VsanEsaEnabled = true,
            VsanDedupEnabled = true,
            VsanCompressionEnabled = true,
            VsanPerformanceEnabled = true,
            VsanVerboseModeEnabled = true,
            VsanNetworkDiagnosticModeEnabled = true,
            VsanUnmapEnabled = true,
            VsanDitEncryptionEnabled = true,
            VsanDitRekeyInterval = 1800,
            VsanDiskGroups = new[]
            {
                new VSphere.Inputs.ComputeClusterVsanDiskGroupArgs
                {
                    Cache = data.Vsphere_vmfs_disks.Cache_disks[0],
                    Storages = data.Vsphere_vmfs_disks.Storage_disks,
                },
            },
            VsanFaultDomains = new[]
            {
                new VSphere.Inputs.ComputeClusterVsanFaultDomainArgs
                {
                    FaultDomains = new[]
                    {
                        new VSphere.Inputs.ComputeClusterVsanFaultDomainFaultDomainArgs
                        {
                            Name = "fd1",
                            HostIds = new[]
                            {
                                data.Vsphere_host.Faultdomain1_hosts.Select(__item => __item.Id).ToList(),
                            },
                        },
                        new VSphere.Inputs.ComputeClusterVsanFaultDomainFaultDomainArgs
                        {
                            Name = "fd2",
                            HostIds = new[]
                            {
                                data.Vsphere_host.Faultdomain2_hosts.Select(__item => __item.Id).ToList(),
                            },
                        },
                    },
                },
            },
            VsanStretchedCluster = new VSphere.Inputs.ComputeClusterVsanStretchedClusterArgs
            {
                PreferredFaultDomainHostIds = new[]
                {
                    data.Vsphere_host.Preferred_fault_domain_host.Select(__item => __item.Id).ToList(),
                },
                SecondaryFaultDomainHostIds = new[]
                {
                    data.Vsphere_host.Secondary_fault_domain_host.Select(__item => __item.Id).ToList(),
                },
                WitnessNode = data.Vsphere_host.Witness_host.Id,
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-vsphere/sdk/v4/go/vsphere"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    func main() {
    pulumi.Run(func(ctx *pulumi.Context) error {
    _, err := vsphere.NewComputeCluster(ctx, "computeCluster", &vsphere.ComputeClusterArgs{
    DatacenterId: pulumi.Any(data.Vsphere_datacenter.Datacenter.Id),
    HostSystemIds: pulumi.StringArray{
    %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:2,18-45),
    },
    DrsEnabled: pulumi.Bool(true),
    DrsAutomationLevel: pulumi.String("fullyAutomated"),
    HaEnabled: pulumi.Bool(false),
    VsanEnabled: pulumi.Bool(true),
    VsanEsaEnabled: pulumi.Bool(true),
    VsanDedupEnabled: pulumi.Bool(true),
    VsanCompressionEnabled: pulumi.Bool(true),
    VsanPerformanceEnabled: pulumi.Bool(true),
    VsanVerboseModeEnabled: pulumi.Bool(true),
    VsanNetworkDiagnosticModeEnabled: pulumi.Bool(true),
    VsanUnmapEnabled: pulumi.Bool(true),
    VsanDitEncryptionEnabled: pulumi.Bool(true),
    VsanDitRekeyInterval: pulumi.Int(1800),
    VsanDiskGroups: vsphere.ComputeClusterVsanDiskGroupArray{
    &vsphere.ComputeClusterVsanDiskGroupArgs{
    Cache: pulumi.Any(data.Vsphere_vmfs_disks.Cache_disks[0]),
    Storages: pulumi.Any(data.Vsphere_vmfs_disks.Storage_disks),
    },
    },
    VsanFaultDomains: vsphere.ComputeClusterVsanFaultDomainArray{
    &vsphere.ComputeClusterVsanFaultDomainArgs{
    FaultDomains: vsphere.ComputeClusterVsanFaultDomainFaultDomainArray{
    &vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs{
    Name: pulumi.String("fd1"),
    HostIds: pulumi.StringArray{
    %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:23,12-53),
    },
    },
    &vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs{
    Name: pulumi.String("fd2"),
    HostIds: pulumi.StringArray{
    %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:27,12-53),
    },
    },
    },
    },
    },
    VsanStretchedCluster: &vsphere.ComputeClusterVsanStretchedClusterArgs{
    PreferredFaultDomainHostIds: pulumi.StringArray{
    %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:31,32-82),
    },
    SecondaryFaultDomainHostIds: pulumi.StringArray{
    %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:32,32-82),
    },
    WitnessNode: pulumi.Any(data.Vsphere_host.Witness_host.Id),
    },
    })
    if err != nil {
    return err
    }
    return nil
    })
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.vsphere.ComputeCluster;
    import com.pulumi.vsphere.ComputeClusterArgs;
    import com.pulumi.vsphere.inputs.ComputeClusterVsanDiskGroupArgs;
    import com.pulumi.vsphere.inputs.ComputeClusterVsanFaultDomainArgs;
    import com.pulumi.vsphere.inputs.ComputeClusterVsanStretchedClusterArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var computeCluster = new ComputeCluster("computeCluster", ComputeClusterArgs.builder()        
                .datacenterId(data.vsphere_datacenter().datacenter().id())
                .hostSystemIds(data.vsphere_host().host().stream().map(element -> element.id()).collect(toList()))
                .drsEnabled(true)
                .drsAutomationLevel("fullyAutomated")
                .haEnabled(false)
                .vsanEnabled(true)
                .vsanEsaEnabled(true)
                .vsanDedupEnabled(true)
                .vsanCompressionEnabled(true)
                .vsanPerformanceEnabled(true)
                .vsanVerboseModeEnabled(true)
                .vsanNetworkDiagnosticModeEnabled(true)
                .vsanUnmapEnabled(true)
                .vsanDitEncryptionEnabled(true)
                .vsanDitRekeyInterval(1800)
                .vsanDiskGroups(ComputeClusterVsanDiskGroupArgs.builder()
                    .cache(data.vsphere_vmfs_disks().cache_disks()[0])
                    .storages(data.vsphere_vmfs_disks().storage_disks())
                    .build())
                .vsanFaultDomains(ComputeClusterVsanFaultDomainArgs.builder()
                    .faultDomains(                
                        ComputeClusterVsanFaultDomainFaultDomainArgs.builder()
                            .name("fd1")
                            .hostIds(data.vsphere_host().faultdomain1_hosts().stream().map(element -> element.id()).collect(toList()))
                            .build(),
                        ComputeClusterVsanFaultDomainFaultDomainArgs.builder()
                            .name("fd2")
                            .hostIds(data.vsphere_host().faultdomain2_hosts().stream().map(element -> element.id()).collect(toList()))
                            .build())
                    .build())
                .vsanStretchedCluster(ComputeClusterVsanStretchedClusterArgs.builder()
                    .preferredFaultDomainHostIds(data.vsphere_host().preferred_fault_domain_host().stream().map(element -> element.id()).collect(toList()))
                    .secondaryFaultDomainHostIds(data.vsphere_host().secondary_fault_domain_host().stream().map(element -> element.id()).collect(toList()))
                    .witnessNode(data.vsphere_host().witness_host().id())
                    .build())
                .build());
    
        }
    }
    

    Package Details

    Repository
    vSphere pulumi/pulumi-vsphere
    License
    Apache-2.0
    Notes

    This Pulumi package is based on the vsphere Terraform Provider.

    vsphere logo
    vSphere v4.9.0 published on Tuesday, Nov 28, 2023 by Pulumi