vsphere.ComputeCluster
Explore with Pulumi AI
Create ComputeCluster Resource
new ComputeCluster(name: string, args: ComputeClusterArgs, opts?: CustomResourceOptions);
@overload
def ComputeCluster(resource_name: str,
opts: Optional[ResourceOptions] = None,
custom_attributes: Optional[Mapping[str, str]] = None,
datacenter_id: Optional[str] = None,
dpm_automation_level: Optional[str] = None,
dpm_enabled: Optional[bool] = None,
dpm_threshold: Optional[int] = None,
drs_advanced_options: Optional[Mapping[str, str]] = None,
drs_automation_level: Optional[str] = None,
drs_enable_predictive_drs: Optional[bool] = None,
drs_enable_vm_overrides: Optional[bool] = None,
drs_enabled: Optional[bool] = None,
drs_migration_threshold: Optional[int] = None,
drs_scale_descendants_shares: Optional[str] = None,
folder: Optional[str] = None,
force_evacuate_on_destroy: Optional[bool] = None,
ha_admission_control_failover_host_system_ids: Optional[Sequence[str]] = None,
ha_admission_control_host_failure_tolerance: Optional[int] = None,
ha_admission_control_performance_tolerance: Optional[int] = None,
ha_admission_control_policy: Optional[str] = None,
ha_admission_control_resource_percentage_auto_compute: Optional[bool] = None,
ha_admission_control_resource_percentage_cpu: Optional[int] = None,
ha_admission_control_resource_percentage_memory: Optional[int] = None,
ha_admission_control_slot_policy_explicit_cpu: Optional[int] = None,
ha_admission_control_slot_policy_explicit_memory: Optional[int] = None,
ha_admission_control_slot_policy_use_explicit_size: Optional[bool] = None,
ha_advanced_options: Optional[Mapping[str, str]] = None,
ha_datastore_apd_recovery_action: Optional[str] = None,
ha_datastore_apd_response: Optional[str] = None,
ha_datastore_apd_response_delay: Optional[int] = None,
ha_datastore_pdl_response: Optional[str] = None,
ha_enabled: Optional[bool] = None,
ha_heartbeat_datastore_ids: Optional[Sequence[str]] = None,
ha_heartbeat_datastore_policy: Optional[str] = None,
ha_host_isolation_response: Optional[str] = None,
ha_host_monitoring: Optional[str] = None,
ha_vm_component_protection: Optional[str] = None,
ha_vm_dependency_restart_condition: Optional[str] = None,
ha_vm_failure_interval: Optional[int] = None,
ha_vm_maximum_failure_window: Optional[int] = None,
ha_vm_maximum_resets: Optional[int] = None,
ha_vm_minimum_uptime: Optional[int] = None,
ha_vm_monitoring: Optional[str] = None,
ha_vm_restart_additional_delay: Optional[int] = None,
ha_vm_restart_priority: Optional[str] = None,
ha_vm_restart_timeout: Optional[int] = None,
host_cluster_exit_timeout: Optional[int] = None,
host_managed: Optional[bool] = None,
host_system_ids: Optional[Sequence[str]] = None,
name: Optional[str] = None,
proactive_ha_automation_level: Optional[str] = None,
proactive_ha_enabled: Optional[bool] = None,
proactive_ha_moderate_remediation: Optional[str] = None,
proactive_ha_provider_ids: Optional[Sequence[str]] = None,
proactive_ha_severe_remediation: Optional[str] = None,
tags: Optional[Sequence[str]] = None,
vsan_compression_enabled: Optional[bool] = None,
vsan_dedup_enabled: Optional[bool] = None,
vsan_disk_groups: Optional[Sequence[ComputeClusterVsanDiskGroupArgs]] = None,
vsan_dit_encryption_enabled: Optional[bool] = None,
vsan_dit_rekey_interval: Optional[int] = None,
vsan_enabled: Optional[bool] = None,
vsan_esa_enabled: Optional[bool] = None,
vsan_fault_domains: Optional[Sequence[ComputeClusterVsanFaultDomainArgs]] = None,
vsan_network_diagnostic_mode_enabled: Optional[bool] = None,
vsan_performance_enabled: Optional[bool] = None,
vsan_remote_datastore_ids: Optional[Sequence[str]] = None,
vsan_stretched_cluster: Optional[ComputeClusterVsanStretchedClusterArgs] = None,
vsan_unmap_enabled: Optional[bool] = None,
vsan_verbose_mode_enabled: Optional[bool] = None)
@overload
def ComputeCluster(resource_name: str,
args: ComputeClusterArgs,
opts: Optional[ResourceOptions] = None)
func NewComputeCluster(ctx *Context, name string, args ComputeClusterArgs, opts ...ResourceOption) (*ComputeCluster, error)
public ComputeCluster(string name, ComputeClusterArgs args, CustomResourceOptions? opts = null)
public ComputeCluster(String name, ComputeClusterArgs args)
public ComputeCluster(String name, ComputeClusterArgs args, CustomResourceOptions options)
type: vsphere:ComputeCluster
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args ComputeClusterArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args ComputeClusterArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args ComputeClusterArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args ComputeClusterArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args ComputeClusterArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
ComputeCluster Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
The ComputeCluster resource accepts the following input properties:
- Datacenter
Id string The managed object ID of the datacenter to create the cluster in. Forces a new resource if changed.
- Custom
Attributes Dictionary<string, string> A map of custom attribute ids to attribute value strings to set for the datastore cluster.
NOTE: Custom attributes are unsupported on direct ESXi connections and require vCenter Server.
- Dpm
Automation stringLevel The automation level for host power operations in this cluster. Can be one of
manual
orautomated
. Default:manual
.- Dpm
Enabled bool Enable DPM support for DRS in this cluster. Requires
drs_enabled
to betrue
in order to be effective. Default:false
.- Dpm
Threshold int A value between
1
and5
indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting. Default:3
.- Drs
Advanced Dictionary<string, string>Options A key/value map that specifies advanced options for DRS and DPM.
- Drs
Automation stringLevel The default automation level for all virtual machines in this cluster. Can be one of
manual
,partiallyAutomated
, orfullyAutomated
. Default:manual
.- Drs
Enable boolPredictive Drs When
true
, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations. *- Drs
Enable boolVm Overrides Allow individual DRS overrides to be set for virtual machines in the cluster. Default:
true
.- Drs
Enabled bool Enable DRS for this cluster. Default:
false
.- Drs
Migration intThreshold A value between
1
and5
indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less. Default:3
.- string
Enable scalable shares for all resource pools in the cluster. Can be one of
disabled
orscaleCpuAndMemoryShares
. Default:disabled
.- Folder string
The relative path to a folder to put this cluster in. This is a path relative to the datacenter you are deploying the cluster to. Example: for the
dc1
datacenter, and a providedfolder
offoo/bar
, The provider will place a cluster namedcompute-cluster-test
in a host folder located at/dc1/host/foo/bar
, with the final inventory path being/dc1/host/foo/bar/datastore-cluster-test
.- Force
Evacuate boolOn Destroy When destroying the resource, setting this to
true
will auto-remove any hosts that are currently a member of the cluster, as if they were removed by taking their entry out ofhost_system_ids
(see below. This is an advanced option and should only be used for testing. Default:false
.NOTE: Do not set
force_evacuate_on_destroy
in production operation as there are many pitfalls to its use when working with complex cluster configurations. Depending on the virtual machines currently on the cluster, and your DRS and HA settings, the full host evacuation may fail. Instead, incrementally remove hosts from your configuration by adjusting the contents of thehost_system_ids
attribute.- Ha
Admission List<string>Control Failover Host System Ids Defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.
- Ha
Admission intControl Host Failure Tolerance The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster. Default:
1
. *- Ha
Admission intControl Performance Tolerance The percentage of resource reduction that a cluster of virtual machines can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting. Default:
100
(disabled).- Ha
Admission stringControl Policy The type of admission control policy to use with vSphere HA. Can be one of
resourcePercentage
,slotPolicy
,failoverHosts
, ordisabled
. Default:resourcePercentage
.- Ha
Admission boolControl Resource Percentage Auto Compute Automatically determine available resource percentages by subtracting the average number of host resources represented by the
ha_admission_control_host_failure_tolerance
setting from the total amount of resources in the cluster. Disable to supply user-defined values. Default:true
. *- Ha
Admission intControl Resource Percentage Cpu Controls the user-defined percentage of CPU resources in the cluster to reserve for failover. Default:
100
.- Ha
Admission intControl Resource Percentage Memory Controls the user-defined percentage of memory resources in the cluster to reserve for failover. Default:
100
.- Ha
Admission intControl Slot Policy Explicit Cpu Controls the user-defined CPU slot size, in MHz. Default:
32
.- Ha
Admission intControl Slot Policy Explicit Memory Controls the user-defined memory slot size, in MB. Default:
100
.- Ha
Admission boolControl Slot Policy Use Explicit Size Controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is
false
, which tells vSphere to gather a automatic average based on all powered-on virtual machines currently in the cluster.- Ha
Advanced Dictionary<string, string>Options A key/value map that specifies advanced options for vSphere HA.
- Ha
Datastore stringApd Recovery Action Controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of
none
orreset
. Default:none
. *- Ha
Datastore stringApd Response Controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of
disabled
,warning
,restartConservative
, orrestartAggressive
. Default:disabled
. *- Ha
Datastore intApd Response Delay The time, in seconds, to wait after an APD timeout event to run the response action defined in
ha_datastore_apd_response
. Default:180
seconds (3 minutes). *- Ha
Datastore stringPdl Response Controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of
disabled
,warning
, orrestartAggressive
. Default:disabled
. *- Ha
Enabled bool Enable vSphere HA for this cluster. Default:
false
.- Ha
Heartbeat List<string>Datastore Ids The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
ha_heartbeat_datastore_policy
is set to eitheruserSelectedDs
orallFeasibleDsWithUserPreference
.- Ha
Heartbeat stringDatastore Policy The selection policy for HA heartbeat datastores. Can be one of
allFeasibleDs
,userSelectedDs
, orallFeasibleDsWithUserPreference
. Default:allFeasibleDsWithUserPreference
.- Ha
Host stringIsolation Response The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of
none
,powerOff
, orshutdown
. Default:none
.- Ha
Host stringMonitoring Global setting that controls whether vSphere HA remediates virtual machines on host failure. Can be one of
enabled
ordisabled
. Default:enabled
.- Ha
Vm stringComponent Protection Controls vSphere VM component protection for virtual machines in this cluster. Can be one of
enabled
ordisabled
. Default:enabled
. *- Ha
Vm stringDependency Restart Condition The condition used to determine whether or not virtual machines in a certain restart priority class are online, allowing HA to move on to restarting virtual machines on the next priority. Can be one of
none
,poweredOn
,guestHbStatusGreen
, orappHbStatusGreen
. The default isnone
, which means that a virtual machine is considered ready immediately after a host is found to start it on. *- Ha
Vm intFailure Interval The time interval, in seconds, a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. Default:
30
seconds.- Ha
Vm intMaximum Failure Window The time, in seconds, for the reset window in which
ha_vm_maximum_resets
can operate. When this window expires, no more resets are attempted regardless of the setting configured inha_vm_maximum_resets
.-1
means no window, meaning an unlimited reset time is allotted. Default:-1
(no window).- Ha
Vm intMaximum Resets The maximum number of resets that HA will perform to a virtual machine when responding to a failure event. Default:
3
- Ha
Vm intMinimum Uptime The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats. Default:
120
seconds (2 minutes).- Ha
Vm stringMonitoring The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of
vmMonitoringDisabled
,vmMonitoringOnly
, orvmAndAppMonitoring
. Default:vmMonitoringDisabled
.- Ha
Vm intRestart Additional Delay Additional delay, in seconds, after ready condition is met. A VM is considered ready at this point. Default:
0
seconds (no delay). *- Ha
Vm stringRestart Priority The default restart priority for affected virtual machines when vSphere detects a host failure. Can be one of
lowest
,low
,medium
,high
, orhighest
. Default:medium
.- Ha
Vm intRestart Timeout The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority. Default:
600
seconds (10 minutes). *- Host
Cluster intExit Timeout The timeout, in seconds, for each host maintenance mode operation when removing hosts from a cluster. Default:
3600
seconds (1 hour).- Host
Managed bool Can be set to
true
if compute cluster membership will be managed through thehost
resource rather than thecompute_cluster
resource. Conflicts with:host_system_ids
.- Host
System List<string>Ids The managed object IDs of the hosts to put in the cluster. Conflicts with:
host_managed
.- Name string
The name of the cluster.
- Proactive
Ha stringAutomation Level Determines how the host quarantine, maintenance mode, or virtual machine migration recommendations made by proactive HA are to be handled. Can be one of
Automated
orManual
. Default:Manual
. *- Proactive
Ha boolEnabled Enables Proactive HA. Default:
false
. *- Proactive
Ha stringModerate Remediation The configured remediation for moderately degraded hosts. Can be one of
MaintenanceMode
orQuarantineMode
. Note that this cannot be set toMaintenanceMode
whenproactive_ha_severe_remediation
is set toQuarantineMode
. Default:QuarantineMode
. *- Proactive
Ha List<string>Provider Ids The list of IDs for health update providers configured for this cluster. *
- Proactive
Ha stringSevere Remediation The configured remediation for severely degraded hosts. Can be one of
MaintenanceMode
orQuarantineMode
. Note that this cannot be set toQuarantineMode
whenproactive_ha_moderate_remediation
is set toMaintenanceMode
. Default:QuarantineMode
. *- List<string>
The IDs of any tags to attach to this resource.
- Vsan
Compression boolEnabled Enables vSAN compression on the cluster.
- Vsan
Dedup boolEnabled Enables vSAN deduplication on the cluster. Cannot be independently set to
true
. When vSAN deduplication is enabled, vSAN compression must also be enabled.- Vsan
Disk List<Pulumi.Groups VSphere. Inputs. Compute Cluster Vsan Disk Group> Represents the configuration of a host disk group in the cluster.
- Vsan
Dit boolEncryption Enabled Enables vSAN data-in-transit encryption on the cluster. Conflicts with
vsan_remote_datastore_ids
, i.e., vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature at the same time.- Vsan
Dit intRekey Interval Indicates the rekey interval in minutes for data-in-transit encryption. The valid rekey interval is 30 to 10800 (feature defaults to 1440). Conflicts with
vsan_remote_datastore_ids
.- Vsan
Enabled bool Enables vSAN on the cluster.
- Vsan
Esa boolEnabled Enables vSAN ESA on the cluster.
- Vsan
Fault List<Pulumi.Domains VSphere. Inputs. Compute Cluster Vsan Fault Domain> Configurations of vSAN fault domains.
- Vsan
Network boolDiagnostic Mode Enabled Enables network diagnostic mode for vSAN performance service on the cluster.
- Vsan
Performance boolEnabled Enables vSAN performance service on the cluster. Default:
true
.- Vsan
Remote List<string>Datastore Ids The remote vSAN datastore IDs to be mounted to this cluster. Conflicts with
vsan_dit_encryption_enabled
andvsan_dit_rekey_interval
, i.e., vSAN HCI Mesh feature cannot be enabled with data-in-transit encryption feature at the same time.- Vsan
Stretched Pulumi.Cluster VSphere. Inputs. Compute Cluster Vsan Stretched Cluster Configurations of vSAN stretched cluster.
- Vsan
Unmap boolEnabled Enables vSAN unmap on the cluster. You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
- Vsan
Verbose boolMode Enabled Enables verbose mode for vSAN performance service on the cluster.
- Datacenter
Id string The managed object ID of the datacenter to create the cluster in. Forces a new resource if changed.
- Custom
Attributes map[string]string A map of custom attribute ids to attribute value strings to set for the datastore cluster.
NOTE: Custom attributes are unsupported on direct ESXi connections and require vCenter Server.
- Dpm
Automation stringLevel The automation level for host power operations in this cluster. Can be one of
manual
orautomated
. Default:manual
.- Dpm
Enabled bool Enable DPM support for DRS in this cluster. Requires
drs_enabled
to betrue
in order to be effective. Default:false
.- Dpm
Threshold int A value between
1
and5
indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting. Default:3
.- Drs
Advanced map[string]stringOptions A key/value map that specifies advanced options for DRS and DPM.
- Drs
Automation stringLevel The default automation level for all virtual machines in this cluster. Can be one of
manual
,partiallyAutomated
, orfullyAutomated
. Default:manual
.- Drs
Enable boolPredictive Drs When
true
, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations. *- Drs
Enable boolVm Overrides Allow individual DRS overrides to be set for virtual machines in the cluster. Default:
true
.- Drs
Enabled bool Enable DRS for this cluster. Default:
false
.- Drs
Migration intThreshold A value between
1
and5
indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less. Default:3
.- string
Enable scalable shares for all resource pools in the cluster. Can be one of
disabled
orscaleCpuAndMemoryShares
. Default:disabled
.- Folder string
The relative path to a folder to put this cluster in. This is a path relative to the datacenter you are deploying the cluster to. Example: for the
dc1
datacenter, and a providedfolder
offoo/bar
, The provider will place a cluster namedcompute-cluster-test
in a host folder located at/dc1/host/foo/bar
, with the final inventory path being/dc1/host/foo/bar/datastore-cluster-test
.- Force
Evacuate boolOn Destroy When destroying the resource, setting this to
true
will auto-remove any hosts that are currently a member of the cluster, as if they were removed by taking their entry out ofhost_system_ids
(see below. This is an advanced option and should only be used for testing. Default:false
.NOTE: Do not set
force_evacuate_on_destroy
in production operation as there are many pitfalls to its use when working with complex cluster configurations. Depending on the virtual machines currently on the cluster, and your DRS and HA settings, the full host evacuation may fail. Instead, incrementally remove hosts from your configuration by adjusting the contents of thehost_system_ids
attribute.- Ha
Admission []stringControl Failover Host System Ids Defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.
- Ha
Admission intControl Host Failure Tolerance The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster. Default:
1
. *- Ha
Admission intControl Performance Tolerance The percentage of resource reduction that a cluster of virtual machines can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting. Default:
100
(disabled).- Ha
Admission stringControl Policy The type of admission control policy to use with vSphere HA. Can be one of
resourcePercentage
,slotPolicy
,failoverHosts
, ordisabled
. Default:resourcePercentage
.- Ha
Admission boolControl Resource Percentage Auto Compute Automatically determine available resource percentages by subtracting the average number of host resources represented by the
ha_admission_control_host_failure_tolerance
setting from the total amount of resources in the cluster. Disable to supply user-defined values. Default:true
. *- Ha
Admission intControl Resource Percentage Cpu Controls the user-defined percentage of CPU resources in the cluster to reserve for failover. Default:
100
.- Ha
Admission intControl Resource Percentage Memory Controls the user-defined percentage of memory resources in the cluster to reserve for failover. Default:
100
.- Ha
Admission intControl Slot Policy Explicit Cpu Controls the user-defined CPU slot size, in MHz. Default:
32
.- Ha
Admission intControl Slot Policy Explicit Memory Controls the user-defined memory slot size, in MB. Default:
100
.- Ha
Admission boolControl Slot Policy Use Explicit Size Controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is
false
, which tells vSphere to gather a automatic average based on all powered-on virtual machines currently in the cluster.- Ha
Advanced map[string]stringOptions A key/value map that specifies advanced options for vSphere HA.
- Ha
Datastore stringApd Recovery Action Controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of
none
orreset
. Default:none
. *- Ha
Datastore stringApd Response Controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of
disabled
,warning
,restartConservative
, orrestartAggressive
. Default:disabled
. *- Ha
Datastore intApd Response Delay The time, in seconds, to wait after an APD timeout event to run the response action defined in
ha_datastore_apd_response
. Default:180
seconds (3 minutes). *- Ha
Datastore stringPdl Response Controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of
disabled
,warning
, orrestartAggressive
. Default:disabled
. *- Ha
Enabled bool Enable vSphere HA for this cluster. Default:
false
.- Ha
Heartbeat []stringDatastore Ids The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
ha_heartbeat_datastore_policy
is set to eitheruserSelectedDs
orallFeasibleDsWithUserPreference
.- Ha
Heartbeat stringDatastore Policy The selection policy for HA heartbeat datastores. Can be one of
allFeasibleDs
,userSelectedDs
, orallFeasibleDsWithUserPreference
. Default:allFeasibleDsWithUserPreference
.- Ha
Host stringIsolation Response The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of
none
,powerOff
, orshutdown
. Default:none
.- Ha
Host stringMonitoring Global setting that controls whether vSphere HA remediates virtual machines on host failure. Can be one of
enabled
ordisabled
. Default:enabled
.- Ha
Vm stringComponent Protection Controls vSphere VM component protection for virtual machines in this cluster. Can be one of
enabled
ordisabled
. Default:enabled
. *- Ha
Vm stringDependency Restart Condition The condition used to determine whether or not virtual machines in a certain restart priority class are online, allowing HA to move on to restarting virtual machines on the next priority. Can be one of
none
,poweredOn
,guestHbStatusGreen
, orappHbStatusGreen
. The default isnone
, which means that a virtual machine is considered ready immediately after a host is found to start it on. *- Ha
Vm intFailure Interval The time interval, in seconds, a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. Default:
30
seconds.- Ha
Vm intMaximum Failure Window The time, in seconds, for the reset window in which
ha_vm_maximum_resets
can operate. When this window expires, no more resets are attempted regardless of the setting configured inha_vm_maximum_resets
.-1
means no window, meaning an unlimited reset time is allotted. Default:-1
(no window).- Ha
Vm intMaximum Resets The maximum number of resets that HA will perform to a virtual machine when responding to a failure event. Default:
3
- Ha
Vm intMinimum Uptime The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats. Default:
120
seconds (2 minutes).- Ha
Vm stringMonitoring The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of
vmMonitoringDisabled
,vmMonitoringOnly
, orvmAndAppMonitoring
. Default:vmMonitoringDisabled
.- Ha
Vm intRestart Additional Delay Additional delay, in seconds, after ready condition is met. A VM is considered ready at this point. Default:
0
seconds (no delay). *- Ha
Vm stringRestart Priority The default restart priority for affected virtual machines when vSphere detects a host failure. Can be one of
lowest
,low
,medium
,high
, orhighest
. Default:medium
.- Ha
Vm intRestart Timeout The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority. Default:
600
seconds (10 minutes). *- Host
Cluster intExit Timeout The timeout, in seconds, for each host maintenance mode operation when removing hosts from a cluster. Default:
3600
seconds (1 hour).- Host
Managed bool Can be set to
true
if compute cluster membership will be managed through thehost
resource rather than thecompute_cluster
resource. Conflicts with:host_system_ids
.- Host
System []stringIds The managed object IDs of the hosts to put in the cluster. Conflicts with:
host_managed
.- Name string
The name of the cluster.
- Proactive
Ha stringAutomation Level Determines how the host quarantine, maintenance mode, or virtual machine migration recommendations made by proactive HA are to be handled. Can be one of
Automated
orManual
. Default:Manual
. *- Proactive
Ha boolEnabled Enables Proactive HA. Default:
false
. *- Proactive
Ha stringModerate Remediation The configured remediation for moderately degraded hosts. Can be one of
MaintenanceMode
orQuarantineMode
. Note that this cannot be set toMaintenanceMode
whenproactive_ha_severe_remediation
is set toQuarantineMode
. Default:QuarantineMode
. *- Proactive
Ha []stringProvider Ids The list of IDs for health update providers configured for this cluster. *
- Proactive
Ha stringSevere Remediation The configured remediation for severely degraded hosts. Can be one of
MaintenanceMode
orQuarantineMode
. Note that this cannot be set toQuarantineMode
whenproactive_ha_moderate_remediation
is set toMaintenanceMode
. Default:QuarantineMode
. *- []string
The IDs of any tags to attach to this resource.
- Vsan
Compression boolEnabled Enables vSAN compression on the cluster.
- Vsan
Dedup boolEnabled Enables vSAN deduplication on the cluster. Cannot be independently set to
true
. When vSAN deduplication is enabled, vSAN compression must also be enabled.- Vsan
Disk []ComputeGroups Cluster Vsan Disk Group Args Represents the configuration of a host disk group in the cluster.
- Vsan
Dit boolEncryption Enabled Enables vSAN data-in-transit encryption on the cluster. Conflicts with
vsan_remote_datastore_ids
, i.e., vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature at the same time.- Vsan
Dit intRekey Interval Indicates the rekey interval in minutes for data-in-transit encryption. The valid rekey interval is 30 to 10800 (feature defaults to 1440). Conflicts with
vsan_remote_datastore_ids
.- Vsan
Enabled bool Enables vSAN on the cluster.
- Vsan
Esa boolEnabled Enables vSAN ESA on the cluster.
- Vsan
Fault []ComputeDomains Cluster Vsan Fault Domain Args Configurations of vSAN fault domains.
- Vsan
Network boolDiagnostic Mode Enabled Enables network diagnostic mode for vSAN performance service on the cluster.
- Vsan
Performance boolEnabled Enables vSAN performance service on the cluster. Default:
true
.- Vsan
Remote []stringDatastore Ids The remote vSAN datastore IDs to be mounted to this cluster. Conflicts with
vsan_dit_encryption_enabled
andvsan_dit_rekey_interval
, i.e., vSAN HCI Mesh feature cannot be enabled with data-in-transit encryption feature at the same time.- Vsan
Stretched ComputeCluster Cluster Vsan Stretched Cluster Args Configurations of vSAN stretched cluster.
- Vsan
Unmap boolEnabled Enables vSAN unmap on the cluster. You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
- Vsan
Verbose boolMode Enabled Enables verbose mode for vSAN performance service on the cluster.
- datacenter
Id String The managed object ID of the datacenter to create the cluster in. Forces a new resource if changed.
- custom
Attributes Map<String,String> A map of custom attribute ids to attribute value strings to set for the datastore cluster.
NOTE: Custom attributes are unsupported on direct ESXi connections and require vCenter Server.
- dpm
Automation StringLevel The automation level for host power operations in this cluster. Can be one of
manual
orautomated
. Default:manual
.- dpm
Enabled Boolean Enable DPM support for DRS in this cluster. Requires
drs_enabled
to betrue
in order to be effective. Default:false
.- dpm
Threshold Integer A value between
1
and5
indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting. Default:3
.- drs
Advanced Map<String,String>Options A key/value map that specifies advanced options for DRS and DPM.
- drs
Automation StringLevel The default automation level for all virtual machines in this cluster. Can be one of
manual
,partiallyAutomated
, orfullyAutomated
. Default:manual
.- drs
Enable BooleanPredictive Drs When
true
, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations. *- drs
Enable BooleanVm Overrides Allow individual DRS overrides to be set for virtual machines in the cluster. Default:
true
.- drs
Enabled Boolean Enable DRS for this cluster. Default:
false
.- drs
Migration IntegerThreshold A value between
1
and5
indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less. Default:3
.- String
Enable scalable shares for all resource pools in the cluster. Can be one of
disabled
orscaleCpuAndMemoryShares
. Default:disabled
.- folder String
The relative path to a folder to put this cluster in. This is a path relative to the datacenter you are deploying the cluster to. Example: for the
dc1
datacenter, and a providedfolder
offoo/bar
, The provider will place a cluster namedcompute-cluster-test
in a host folder located at/dc1/host/foo/bar
, with the final inventory path being/dc1/host/foo/bar/datastore-cluster-test
.- force
Evacuate BooleanOn Destroy When destroying the resource, setting this to
true
will auto-remove any hosts that are currently a member of the cluster, as if they were removed by taking their entry out ofhost_system_ids
(see below. This is an advanced option and should only be used for testing. Default:false
.NOTE: Do not set
force_evacuate_on_destroy
in production operation as there are many pitfalls to its use when working with complex cluster configurations. Depending on the virtual machines currently on the cluster, and your DRS and HA settings, the full host evacuation may fail. Instead, incrementally remove hosts from your configuration by adjusting the contents of thehost_system_ids
attribute.- ha
Admission List<String>Control Failover Host System Ids Defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.
- ha
Admission IntegerControl Host Failure Tolerance The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster. Default:
1
. *- ha
Admission IntegerControl Performance Tolerance The percentage of resource reduction that a cluster of virtual machines can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting. Default:
100
(disabled).- ha
Admission StringControl Policy The type of admission control policy to use with vSphere HA. Can be one of
resourcePercentage
,slotPolicy
,failoverHosts
, ordisabled
. Default:resourcePercentage
.- ha
Admission BooleanControl Resource Percentage Auto Compute Automatically determine available resource percentages by subtracting the average number of host resources represented by the
ha_admission_control_host_failure_tolerance
setting from the total amount of resources in the cluster. Disable to supply user-defined values. Default:true
. *- ha
Admission IntegerControl Resource Percentage Cpu Controls the user-defined percentage of CPU resources in the cluster to reserve for failover. Default:
100
.- ha
Admission IntegerControl Resource Percentage Memory Controls the user-defined percentage of memory resources in the cluster to reserve for failover. Default:
100
.- ha
Admission IntegerControl Slot Policy Explicit Cpu Controls the user-defined CPU slot size, in MHz. Default:
32
.- ha
Admission IntegerControl Slot Policy Explicit Memory Controls the user-defined memory slot size, in MB. Default:
100
.- ha
Admission BooleanControl Slot Policy Use Explicit Size Controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is
false
, which tells vSphere to gather a automatic average based on all powered-on virtual machines currently in the cluster.- ha
Advanced Map<String,String>Options A key/value map that specifies advanced options for vSphere HA.
- ha
Datastore StringApd Recovery Action Controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of
none
orreset
. Default:none
. *- ha
Datastore StringApd Response Controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of
disabled
,warning
,restartConservative
, orrestartAggressive
. Default:disabled
. *- ha
Datastore IntegerApd Response Delay The time, in seconds, to wait after an APD timeout event to run the response action defined in
ha_datastore_apd_response
. Default:180
seconds (3 minutes). *- ha
Datastore StringPdl Response Controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of
disabled
,warning
, orrestartAggressive
. Default:disabled
. *- ha
Enabled Boolean Enable vSphere HA for this cluster. Default:
false
.- ha
Heartbeat List<String>Datastore Ids The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
ha_heartbeat_datastore_policy
is set to eitheruserSelectedDs
orallFeasibleDsWithUserPreference
.- ha
Heartbeat StringDatastore Policy The selection policy for HA heartbeat datastores. Can be one of
allFeasibleDs
,userSelectedDs
, orallFeasibleDsWithUserPreference
. Default:allFeasibleDsWithUserPreference
.- ha
Host StringIsolation Response The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of
none
,powerOff
, orshutdown
. Default:none
.- ha
Host StringMonitoring Global setting that controls whether vSphere HA remediates virtual machines on host failure. Can be one of
enabled
ordisabled
. Default:enabled
.- ha
Vm StringComponent Protection Controls vSphere VM component protection for virtual machines in this cluster. Can be one of
enabled
ordisabled
. Default:enabled
. *- ha
Vm StringDependency Restart Condition The condition used to determine whether or not virtual machines in a certain restart priority class are online, allowing HA to move on to restarting virtual machines on the next priority. Can be one of
none
,poweredOn
,guestHbStatusGreen
, orappHbStatusGreen
. The default isnone
, which means that a virtual machine is considered ready immediately after a host is found to start it on. *- ha
Vm IntegerFailure Interval The time interval, in seconds, a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. Default:
30
seconds.- ha
Vm IntegerMaximum Failure Window The time, in seconds, for the reset window in which
ha_vm_maximum_resets
can operate. When this window expires, no more resets are attempted regardless of the setting configured inha_vm_maximum_resets
.-1
means no window, meaning an unlimited reset time is allotted. Default:-1
(no window).- ha
Vm IntegerMaximum Resets The maximum number of resets that HA will perform to a virtual machine when responding to a failure event. Default:
3
- ha
Vm IntegerMinimum Uptime The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats. Default:
120
seconds (2 minutes).- ha
Vm StringMonitoring The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of
vmMonitoringDisabled
,vmMonitoringOnly
, orvmAndAppMonitoring
. Default:vmMonitoringDisabled
.- ha
Vm IntegerRestart Additional Delay Additional delay, in seconds, after ready condition is met. A VM is considered ready at this point. Default:
0
seconds (no delay). *- ha
Vm StringRestart Priority The default restart priority for affected virtual machines when vSphere detects a host failure. Can be one of
lowest
,low
,medium
,high
, orhighest
. Default:medium
.- ha
Vm IntegerRestart Timeout The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority. Default:
600
seconds (10 minutes). *- host
Cluster IntegerExit Timeout The timeout, in seconds, for each host maintenance mode operation when removing hosts from a cluster. Default:
3600
seconds (1 hour).- host
Managed Boolean Can be set to
true
if compute cluster membership will be managed through thehost
resource rather than thecompute_cluster
resource. Conflicts with:host_system_ids
.- host
System List<String>Ids The managed object IDs of the hosts to put in the cluster. Conflicts with:
host_managed
.- name String
The name of the cluster.
- proactive
Ha StringAutomation Level Determines how the host quarantine, maintenance mode, or virtual machine migration recommendations made by proactive HA are to be handled. Can be one of
Automated
orManual
. Default:Manual
. *- proactive
Ha BooleanEnabled Enables Proactive HA. Default:
false
. *- proactive
Ha StringModerate Remediation The configured remediation for moderately degraded hosts. Can be one of
MaintenanceMode
orQuarantineMode
. Note that this cannot be set toMaintenanceMode
whenproactive_ha_severe_remediation
is set toQuarantineMode
. Default:QuarantineMode
. *- proactive
Ha List<String>Provider Ids The list of IDs for health update providers configured for this cluster. *
- proactive
Ha StringSevere Remediation The configured remediation for severely degraded hosts. Can be one of
MaintenanceMode
orQuarantineMode
. Note that this cannot be set toQuarantineMode
whenproactive_ha_moderate_remediation
is set toMaintenanceMode
. Default:QuarantineMode
. *- List<String>
The IDs of any tags to attach to this resource.
- vsan
Compression BooleanEnabled Enables vSAN compression on the cluster.
- vsan
Dedup BooleanEnabled Enables vSAN deduplication on the cluster. Cannot be independently set to
true
. When vSAN deduplication is enabled, vSAN compression must also be enabled.- vsan
Disk List<ComputeGroups Cluster Vsan Disk Group> Represents the configuration of a host disk group in the cluster.
- vsan
Dit BooleanEncryption Enabled Enables vSAN data-in-transit encryption on the cluster. Conflicts with
vsan_remote_datastore_ids
, i.e., vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature at the same time.- vsan
Dit IntegerRekey Interval Indicates the rekey interval in minutes for data-in-transit encryption. The valid rekey interval is 30 to 10800 (feature defaults to 1440). Conflicts with
vsan_remote_datastore_ids
.- vsan
Enabled Boolean Enables vSAN on the cluster.
- vsan
Esa BooleanEnabled Enables vSAN ESA on the cluster.
- vsan
Fault List<ComputeDomains Cluster Vsan Fault Domain> Configurations of vSAN fault domains.
- vsan
Network BooleanDiagnostic Mode Enabled Enables network diagnostic mode for vSAN performance service on the cluster.
- vsan
Performance BooleanEnabled Enables vSAN performance service on the cluster. Default:
true
.- vsan
Remote List<String>Datastore Ids The remote vSAN datastore IDs to be mounted to this cluster. Conflicts with
vsan_dit_encryption_enabled
andvsan_dit_rekey_interval
, i.e., vSAN HCI Mesh feature cannot be enabled with data-in-transit encryption feature at the same time.- vsan
Stretched ComputeCluster Cluster Vsan Stretched Cluster Configurations of vSAN stretched cluster.
- vsan
Unmap BooleanEnabled Enables vSAN unmap on the cluster. You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
- vsan
Verbose BooleanMode Enabled Enables verbose mode for vSAN performance service on the cluster.
- datacenter
Id string The managed object ID of the datacenter to create the cluster in. Forces a new resource if changed.
- custom
Attributes {[key: string]: string} A map of custom attribute ids to attribute value strings to set for the datastore cluster.
NOTE: Custom attributes are unsupported on direct ESXi connections and require vCenter Server.
- dpm
Automation stringLevel The automation level for host power operations in this cluster. Can be one of
manual
orautomated
. Default:manual
.- dpm
Enabled boolean Enable DPM support for DRS in this cluster. Requires
drs_enabled
to betrue
in order to be effective. Default:false
.- dpm
Threshold number A value between
1
and5
indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting. Default:3
.- drs
Advanced {[key: string]: string}Options A key/value map that specifies advanced options for DRS and DPM.
- drs
Automation stringLevel The default automation level for all virtual machines in this cluster. Can be one of
manual
,partiallyAutomated
, orfullyAutomated
. Default:manual
.- drs
Enable booleanPredictive Drs When
true
, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations. *- drs
Enable booleanVm Overrides Allow individual DRS overrides to be set for virtual machines in the cluster. Default:
true
.- drs
Enabled boolean Enable DRS for this cluster. Default:
false
.- drs
Migration numberThreshold A value between
1
and5
indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less. Default:3
.- string
Enable scalable shares for all resource pools in the cluster. Can be one of
disabled
orscaleCpuAndMemoryShares
. Default:disabled
.- folder string
The relative path to a folder to put this cluster in. This is a path relative to the datacenter you are deploying the cluster to. Example: for the
dc1
datacenter, and a providedfolder
offoo/bar
, The provider will place a cluster namedcompute-cluster-test
in a host folder located at/dc1/host/foo/bar
, with the final inventory path being/dc1/host/foo/bar/datastore-cluster-test
.- force
Evacuate booleanOn Destroy When destroying the resource, setting this to
true
will auto-remove any hosts that are currently a member of the cluster, as if they were removed by taking their entry out ofhost_system_ids
(see below. This is an advanced option and should only be used for testing. Default:false
.NOTE: Do not set
force_evacuate_on_destroy
in production operation as there are many pitfalls to its use when working with complex cluster configurations. Depending on the virtual machines currently on the cluster, and your DRS and HA settings, the full host evacuation may fail. Instead, incrementally remove hosts from your configuration by adjusting the contents of thehost_system_ids
attribute.- ha
Admission string[]Control Failover Host System Ids Defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.
- ha
Admission numberControl Host Failure Tolerance The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster. Default:
1
. *- ha
Admission numberControl Performance Tolerance The percentage of resource reduction that a cluster of virtual machines can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting. Default:
100
(disabled).- ha
Admission stringControl Policy The type of admission control policy to use with vSphere HA. Can be one of
resourcePercentage
,slotPolicy
,failoverHosts
, ordisabled
. Default:resourcePercentage
.- ha
Admission booleanControl Resource Percentage Auto Compute Automatically determine available resource percentages by subtracting the average number of host resources represented by the
ha_admission_control_host_failure_tolerance
setting from the total amount of resources in the cluster. Disable to supply user-defined values. Default:true
. *- ha
Admission numberControl Resource Percentage Cpu Controls the user-defined percentage of CPU resources in the cluster to reserve for failover. Default:
100
.- ha
Admission numberControl Resource Percentage Memory Controls the user-defined percentage of memory resources in the cluster to reserve for failover. Default:
100
.- ha
Admission numberControl Slot Policy Explicit Cpu Controls the user-defined CPU slot size, in MHz. Default:
32
.- ha
Admission numberControl Slot Policy Explicit Memory Controls the user-defined memory slot size, in MB. Default:
100
.- ha
Admission booleanControl Slot Policy Use Explicit Size Controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is
false
, which tells vSphere to gather a automatic average based on all powered-on virtual machines currently in the cluster.- ha
Advanced {[key: string]: string}Options A key/value map that specifies advanced options for vSphere HA.
- ha
Datastore stringApd Recovery Action Controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of
none
orreset
. Default:none
. *- ha
Datastore stringApd Response Controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of
disabled
,warning
,restartConservative
, orrestartAggressive
. Default:disabled
. *- ha
Datastore numberApd Response Delay The time, in seconds, to wait after an APD timeout event to run the response action defined in
ha_datastore_apd_response
. Default:180
seconds (3 minutes). *- ha
Datastore stringPdl Response Controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of
disabled
,warning
, orrestartAggressive
. Default:disabled
. *- ha
Enabled boolean Enable vSphere HA for this cluster. Default:
false
.- ha
Heartbeat string[]Datastore Ids The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
ha_heartbeat_datastore_policy
is set to eitheruserSelectedDs
orallFeasibleDsWithUserPreference
.- ha
Heartbeat stringDatastore Policy The selection policy for HA heartbeat datastores. Can be one of
allFeasibleDs
,userSelectedDs
, orallFeasibleDsWithUserPreference
. Default:allFeasibleDsWithUserPreference
.- ha
Host stringIsolation Response The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of
none
,powerOff
, orshutdown
. Default:none
.- ha
Host stringMonitoring Global setting that controls whether vSphere HA remediates virtual machines on host failure. Can be one of
enabled
ordisabled
. Default:enabled
.- ha
Vm stringComponent Protection Controls vSphere VM component protection for virtual machines in this cluster. Can be one of
enabled
ordisabled
. Default:enabled
. *- ha
Vm stringDependency Restart Condition The condition used to determine whether or not virtual machines in a certain restart priority class are online, allowing HA to move on to restarting virtual machines on the next priority. Can be one of
none
,poweredOn
,guestHbStatusGreen
, orappHbStatusGreen
. The default isnone
, which means that a virtual machine is considered ready immediately after a host is found to start it on. *- ha
Vm numberFailure Interval The time interval, in seconds, a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. Default:
30
seconds.- ha
Vm numberMaximum Failure Window The time, in seconds, for the reset window in which
ha_vm_maximum_resets
can operate. When this window expires, no more resets are attempted regardless of the setting configured inha_vm_maximum_resets
.-1
means no window, meaning an unlimited reset time is allotted. Default:-1
(no window).- ha
Vm numberMaximum Resets The maximum number of resets that HA will perform to a virtual machine when responding to a failure event. Default:
3
- ha
Vm numberMinimum Uptime The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats. Default:
120
seconds (2 minutes).- ha
Vm stringMonitoring The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of
vmMonitoringDisabled
,vmMonitoringOnly
, orvmAndAppMonitoring
. Default:vmMonitoringDisabled
.- ha
Vm numberRestart Additional Delay Additional delay, in seconds, after ready condition is met. A VM is considered ready at this point. Default:
0
seconds (no delay). *- ha
Vm stringRestart Priority The default restart priority for affected virtual machines when vSphere detects a host failure. Can be one of
lowest
,low
,medium
,high
, orhighest
. Default:medium
.- ha
Vm numberRestart Timeout The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority. Default:
600
seconds (10 minutes). *- host
Cluster numberExit Timeout The timeout, in seconds, for each host maintenance mode operation when removing hosts from a cluster. Default:
3600
seconds (1 hour).- host
Managed boolean Can be set to
true
if compute cluster membership will be managed through thehost
resource rather than thecompute_cluster
resource. Conflicts with:host_system_ids
.- host
System string[]Ids The managed object IDs of the hosts to put in the cluster. Conflicts with:
host_managed
.- name string
The name of the cluster.
- proactive
Ha stringAutomation Level Determines how the host quarantine, maintenance mode, or virtual machine migration recommendations made by proactive HA are to be handled. Can be one of
Automated
orManual
. Default:Manual
. *- proactive
Ha booleanEnabled Enables Proactive HA. Default:
false
. *- proactive
Ha stringModerate Remediation The configured remediation for moderately degraded hosts. Can be one of
MaintenanceMode
orQuarantineMode
. Note that this cannot be set toMaintenanceMode
whenproactive_ha_severe_remediation
is set toQuarantineMode
. Default:QuarantineMode
. *- proactive
Ha string[]Provider Ids The list of IDs for health update providers configured for this cluster. *
- proactive
Ha stringSevere Remediation The configured remediation for severely degraded hosts. Can be one of
MaintenanceMode
orQuarantineMode
. Note that this cannot be set toQuarantineMode
whenproactive_ha_moderate_remediation
is set toMaintenanceMode
. Default:QuarantineMode
. *- string[]
The IDs of any tags to attach to this resource.
- vsan
Compression booleanEnabled Enables vSAN compression on the cluster.
- vsan
Dedup booleanEnabled Enables vSAN deduplication on the cluster. Cannot be independently set to
true
. When vSAN deduplication is enabled, vSAN compression must also be enabled.- vsan
Disk ComputeGroups Cluster Vsan Disk Group[] Represents the configuration of a host disk group in the cluster.
- vsan
Dit booleanEncryption Enabled Enables vSAN data-in-transit encryption on the cluster. Conflicts with
vsan_remote_datastore_ids
, i.e., vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature at the same time.- vsan
Dit numberRekey Interval Indicates the rekey interval in minutes for data-in-transit encryption. The valid rekey interval is 30 to 10800 (feature defaults to 1440). Conflicts with
vsan_remote_datastore_ids
.- vsan
Enabled boolean Enables vSAN on the cluster.
- vsan
Esa booleanEnabled Enables vSAN ESA on the cluster.
- vsan
Fault ComputeDomains Cluster Vsan Fault Domain[] Configurations of vSAN fault domains.
- vsan
Network booleanDiagnostic Mode Enabled Enables network diagnostic mode for vSAN performance service on the cluster.
- vsan
Performance booleanEnabled Enables vSAN performance service on the cluster. Default:
true
.- vsan
Remote string[]Datastore Ids The remote vSAN datastore IDs to be mounted to this cluster. Conflicts with
vsan_dit_encryption_enabled
andvsan_dit_rekey_interval
, i.e., vSAN HCI Mesh feature cannot be enabled with data-in-transit encryption feature at the same time.- vsan
Stretched ComputeCluster Cluster Vsan Stretched Cluster Configurations of vSAN stretched cluster.
- vsan
Unmap booleanEnabled Enables vSAN unmap on the cluster. You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
- vsan
Verbose booleanMode Enabled Enables verbose mode for vSAN performance service on the cluster.
- datacenter_
id str The managed object ID of the datacenter to create the cluster in. Forces a new resource if changed.
- custom_
attributes Mapping[str, str] A map of custom attribute ids to attribute value strings to set for the datastore cluster.
NOTE: Custom attributes are unsupported on direct ESXi connections and require vCenter Server.
- dpm_
automation_ strlevel The automation level for host power operations in this cluster. Can be one of
manual
orautomated
. Default:manual
.- dpm_
enabled bool Enable DPM support for DRS in this cluster. Requires
drs_enabled
to betrue
in order to be effective. Default:false
.- dpm_
threshold int A value between
1
and5
indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting. Default:3
.- drs_
advanced_ Mapping[str, str]options A key/value map that specifies advanced options for DRS and DPM.
- drs_
automation_ strlevel The default automation level for all virtual machines in this cluster. Can be one of
manual
,partiallyAutomated
, orfullyAutomated
. Default:manual
.- drs_
enable_ boolpredictive_ drs When
true
, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations. *- drs_
enable_ boolvm_ overrides Allow individual DRS overrides to be set for virtual machines in the cluster. Default:
true
.- drs_
enabled bool Enable DRS for this cluster. Default:
false
.- drs_
migration_ intthreshold A value between
1
and5
indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less. Default:3
.- str
Enable scalable shares for all resource pools in the cluster. Can be one of
disabled
orscaleCpuAndMemoryShares
. Default:disabled
.- folder str
The relative path to a folder to put this cluster in. This is a path relative to the datacenter you are deploying the cluster to. Example: for the
dc1
datacenter, and a providedfolder
offoo/bar
, The provider will place a cluster namedcompute-cluster-test
in a host folder located at/dc1/host/foo/bar
, with the final inventory path being/dc1/host/foo/bar/datastore-cluster-test
.- force_
evacuate_ boolon_ destroy When destroying the resource, setting this to
true
will auto-remove any hosts that are currently a member of the cluster, as if they were removed by taking their entry out ofhost_system_ids
(see below. This is an advanced option and should only be used for testing. Default:false
.NOTE: Do not set
force_evacuate_on_destroy
in production operation as there are many pitfalls to its use when working with complex cluster configurations. Depending on the virtual machines currently on the cluster, and your DRS and HA settings, the full host evacuation may fail. Instead, incrementally remove hosts from your configuration by adjusting the contents of thehost_system_ids
attribute.- ha_
admission_ Sequence[str]control_ failover_ host_ system_ ids Defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.
- ha_
admission_ intcontrol_ host_ failure_ tolerance The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster. Default:
1
. *- ha_
admission_ intcontrol_ performance_ tolerance The percentage of resource reduction that a cluster of virtual machines can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting. Default:
100
(disabled).- ha_
admission_ strcontrol_ policy The type of admission control policy to use with vSphere HA. Can be one of
resourcePercentage
,slotPolicy
,failoverHosts
, ordisabled
. Default:resourcePercentage
.- ha_
admission_ boolcontrol_ resource_ percentage_ auto_ compute Automatically determine available resource percentages by subtracting the average number of host resources represented by the
ha_admission_control_host_failure_tolerance
setting from the total amount of resources in the cluster. Disable to supply user-defined values. Default:true
. *- ha_
admission_ intcontrol_ resource_ percentage_ cpu Controls the user-defined percentage of CPU resources in the cluster to reserve for failover. Default:
100
.- ha_
admission_ intcontrol_ resource_ percentage_ memory Controls the user-defined percentage of memory resources in the cluster to reserve for failover. Default:
100
.- ha_
admission_ intcontrol_ slot_ policy_ explicit_ cpu Controls the user-defined CPU slot size, in MHz. Default:
32
.- ha_
admission_ intcontrol_ slot_ policy_ explicit_ memory Controls the user-defined memory slot size, in MB. Default:
100
.- ha_
admission_ boolcontrol_ slot_ policy_ use_ explicit_ size Controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is
false
, which tells vSphere to gather a automatic average based on all powered-on virtual machines currently in the cluster.- ha_
advanced_ Mapping[str, str]options A key/value map that specifies advanced options for vSphere HA.
- ha_
datastore_ strapd_ recovery_ action Controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of
none
orreset
. Default:none
. *- ha_
datastore_ strapd_ response Controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of
disabled
,warning
,restartConservative
, orrestartAggressive
. Default:disabled
. *- ha_
datastore_ intapd_ response_ delay The time, in seconds, to wait after an APD timeout event to run the response action defined in
ha_datastore_apd_response
. Default:180
seconds (3 minutes). *- ha_
datastore_ strpdl_ response Controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of
disabled
,warning
, orrestartAggressive
. Default:disabled
. *- ha_
enabled bool Enable vSphere HA for this cluster. Default:
false
.- ha_
heartbeat_ Sequence[str]datastore_ ids The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
ha_heartbeat_datastore_policy
is set to eitheruserSelectedDs
orallFeasibleDsWithUserPreference
.- ha_
heartbeat_ strdatastore_ policy The selection policy for HA heartbeat datastores. Can be one of
allFeasibleDs
,userSelectedDs
, orallFeasibleDsWithUserPreference
. Default:allFeasibleDsWithUserPreference
.- ha_
host_ strisolation_ response The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of
none
,powerOff
, orshutdown
. Default:none
.- ha_
host_ strmonitoring Global setting that controls whether vSphere HA remediates virtual machines on host failure. Can be one of
enabled
ordisabled
. Default:enabled
.- ha_
vm_ strcomponent_ protection Controls vSphere VM component protection for virtual machines in this cluster. Can be one of
enabled
ordisabled
. Default:enabled
. *- ha_
vm_ strdependency_ restart_ condition The condition used to determine whether or not virtual machines in a certain restart priority class are online, allowing HA to move on to restarting virtual machines on the next priority. Can be one of
none
,poweredOn
,guestHbStatusGreen
, orappHbStatusGreen
. The default isnone
, which means that a virtual machine is considered ready immediately after a host is found to start it on. *- ha_
vm_ intfailure_ interval The time interval, in seconds, a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. Default:
30
seconds.- ha_
vm_ intmaximum_ failure_ window The time, in seconds, for the reset window in which
ha_vm_maximum_resets
can operate. When this window expires, no more resets are attempted regardless of the setting configured inha_vm_maximum_resets
.-1
means no window, meaning an unlimited reset time is allotted. Default:-1
(no window).- ha_
vm_ intmaximum_ resets The maximum number of resets that HA will perform to a virtual machine when responding to a failure event. Default:
3
- ha_
vm_ intminimum_ uptime The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats. Default:
120
seconds (2 minutes).- ha_
vm_ strmonitoring The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of
vmMonitoringDisabled
,vmMonitoringOnly
, orvmAndAppMonitoring
. Default:vmMonitoringDisabled
.- ha_
vm_ intrestart_ additional_ delay Additional delay, in seconds, after ready condition is met. A VM is considered ready at this point. Default:
0
seconds (no delay). *- ha_
vm_ strrestart_ priority The default restart priority for affected virtual machines when vSphere detects a host failure. Can be one of
lowest
,low
,medium
,high
, orhighest
. Default:medium
.- ha_
vm_ intrestart_ timeout The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority. Default:
600
seconds (10 minutes). *- host_
cluster_ intexit_ timeout The timeout, in seconds, for each host maintenance mode operation when removing hosts from a cluster. Default:
3600
seconds (1 hour).- host_
managed bool Can be set to
true
if compute cluster membership will be managed through thehost
resource rather than thecompute_cluster
resource. Conflicts with:host_system_ids
.- host_
system_ Sequence[str]ids The managed object IDs of the hosts to put in the cluster. Conflicts with:
host_managed
.- name str
The name of the cluster.
- proactive_
ha_ strautomation_ level Determines how the host quarantine, maintenance mode, or virtual machine migration recommendations made by proactive HA are to be handled. Can be one of
Automated
orManual
. Default:Manual
. *- proactive_
ha_ boolenabled Enables Proactive HA. Default:
false
. *- proactive_
ha_ strmoderate_ remediation The configured remediation for moderately degraded hosts. Can be one of
MaintenanceMode
orQuarantineMode
. Note that this cannot be set toMaintenanceMode
whenproactive_ha_severe_remediation
is set toQuarantineMode
. Default:QuarantineMode
. *- proactive_
ha_ Sequence[str]provider_ ids The list of IDs for health update providers configured for this cluster. *
- proactive_
ha_ strsevere_ remediation The configured remediation for severely degraded hosts. Can be one of
MaintenanceMode
orQuarantineMode
. Note that this cannot be set toQuarantineMode
whenproactive_ha_moderate_remediation
is set toMaintenanceMode
. Default:QuarantineMode
. *- Sequence[str]
The IDs of any tags to attach to this resource.
- vsan_
compression_ boolenabled Enables vSAN compression on the cluster.
- vsan_
dedup_ boolenabled Enables vSAN deduplication on the cluster. Cannot be independently set to
true
. When vSAN deduplication is enabled, vSAN compression must also be enabled.- vsan_
disk_ Sequence[Computegroups Cluster Vsan Disk Group Args] Represents the configuration of a host disk group in the cluster.
- vsan_
dit_ boolencryption_ enabled Enables vSAN data-in-transit encryption on the cluster. Conflicts with
vsan_remote_datastore_ids
, i.e., vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature at the same time.- vsan_
dit_ intrekey_ interval Indicates the rekey interval in minutes for data-in-transit encryption. The valid rekey interval is 30 to 10800 (feature defaults to 1440). Conflicts with
vsan_remote_datastore_ids
.- vsan_
enabled bool Enables vSAN on the cluster.
- vsan_
esa_ boolenabled Enables vSAN ESA on the cluster.
- vsan_
fault_ Sequence[Computedomains Cluster Vsan Fault Domain Args] Configurations of vSAN fault domains.
- vsan_
network_ booldiagnostic_ mode_ enabled Enables network diagnostic mode for vSAN performance service on the cluster.
- vsan_
performance_ boolenabled Enables vSAN performance service on the cluster. Default:
true
.- vsan_
remote_ Sequence[str]datastore_ ids The remote vSAN datastore IDs to be mounted to this cluster. Conflicts with
vsan_dit_encryption_enabled
andvsan_dit_rekey_interval
, i.e., vSAN HCI Mesh feature cannot be enabled with data-in-transit encryption feature at the same time.- vsan_
stretched_ Computecluster Cluster Vsan Stretched Cluster Args Configurations of vSAN stretched cluster.
- vsan_
unmap_ boolenabled Enables vSAN unmap on the cluster. You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
- vsan_
verbose_ boolmode_ enabled Enables verbose mode for vSAN performance service on the cluster.
- datacenter
Id String The managed object ID of the datacenter to create the cluster in. Forces a new resource if changed.
- custom
Attributes Map<String> A map of custom attribute ids to attribute value strings to set for the datastore cluster.
NOTE: Custom attributes are unsupported on direct ESXi connections and require vCenter Server.
- dpm
Automation StringLevel The automation level for host power operations in this cluster. Can be one of
manual
orautomated
. Default:manual
.- dpm
Enabled Boolean Enable DPM support for DRS in this cluster. Requires
drs_enabled
to betrue
in order to be effective. Default:false
.- dpm
Threshold Number A value between
1
and5
indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting. Default:3
.- drs
Advanced Map<String>Options A key/value map that specifies advanced options for DRS and DPM.
- drs
Automation StringLevel The default automation level for all virtual machines in this cluster. Can be one of
manual
,partiallyAutomated
, orfullyAutomated
. Default:manual
.- drs
Enable BooleanPredictive Drs When
true
, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations. *- drs
Enable BooleanVm Overrides Allow individual DRS overrides to be set for virtual machines in the cluster. Default:
true
.- drs
Enabled Boolean Enable DRS for this cluster. Default:
false
.- drs
Migration NumberThreshold A value between
1
and5
indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less. Default:3
.- String
Enable scalable shares for all resource pools in the cluster. Can be one of
disabled
orscaleCpuAndMemoryShares
. Default:disabled
.- folder String
The relative path to a folder to put this cluster in. This is a path relative to the datacenter you are deploying the cluster to. Example: for the
dc1
datacenter, and a providedfolder
offoo/bar
, The provider will place a cluster namedcompute-cluster-test
in a host folder located at/dc1/host/foo/bar
, with the final inventory path being/dc1/host/foo/bar/datastore-cluster-test
.- force
Evacuate BooleanOn Destroy When destroying the resource, setting this to
true
will auto-remove any hosts that are currently a member of the cluster, as if they were removed by taking their entry out ofhost_system_ids
(see below. This is an advanced option and should only be used for testing. Default:false
.NOTE: Do not set
force_evacuate_on_destroy
in production operation as there are many pitfalls to its use when working with complex cluster configurations. Depending on the virtual machines currently on the cluster, and your DRS and HA settings, the full host evacuation may fail. Instead, incrementally remove hosts from your configuration by adjusting the contents of thehost_system_ids
attribute.- ha
Admission List<String>Control Failover Host System Ids Defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.
- ha
Admission NumberControl Host Failure Tolerance The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster. Default:
1
. *- ha
Admission NumberControl Performance Tolerance The percentage of resource reduction that a cluster of virtual machines can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting. Default:
100
(disabled).- ha
Admission StringControl Policy The type of admission control policy to use with vSphere HA. Can be one of
resourcePercentage
,slotPolicy
,failoverHosts
, ordisabled
. Default:resourcePercentage
.- ha
Admission BooleanControl Resource Percentage Auto Compute Automatically determine available resource percentages by subtracting the average number of host resources represented by the
ha_admission_control_host_failure_tolerance
setting from the total amount of resources in the cluster. Disable to supply user-defined values. Default:true
. *- ha
Admission NumberControl Resource Percentage Cpu Controls the user-defined percentage of CPU resources in the cluster to reserve for failover. Default:
100
.- ha
Admission NumberControl Resource Percentage Memory Controls the user-defined percentage of memory resources in the cluster to reserve for failover. Default:
100
.- ha
Admission NumberControl Slot Policy Explicit Cpu Controls the user-defined CPU slot size, in MHz. Default:
32
.- ha
Admission NumberControl Slot Policy Explicit Memory Controls the user-defined memory slot size, in MB. Default:
100
.- ha
Admission BooleanControl Slot Policy Use Explicit Size Controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is
false
, which tells vSphere to gather a automatic average based on all powered-on virtual machines currently in the cluster.- ha
Advanced Map<String>Options A key/value map that specifies advanced options for vSphere HA.
- ha
Datastore StringApd Recovery Action Controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of
none
orreset
. Default:none
. *- ha
Datastore StringApd Response Controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of
disabled
,warning
,restartConservative
, orrestartAggressive
. Default:disabled
. *- ha
Datastore NumberApd Response Delay The time, in seconds, to wait after an APD timeout event to run the response action defined in
ha_datastore_apd_response
. Default:180
seconds (3 minutes). *- ha
Datastore StringPdl Response Controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of
disabled
,warning
, orrestartAggressive
. Default:disabled
. *- ha
Enabled Boolean Enable vSphere HA for this cluster. Default:
false
.- ha
Heartbeat List<String>Datastore Ids The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
ha_heartbeat_datastore_policy
is set to eitheruserSelectedDs
orallFeasibleDsWithUserPreference
.- ha
Heartbeat StringDatastore Policy The selection policy for HA heartbeat datastores. Can be one of
allFeasibleDs
,userSelectedDs
, orallFeasibleDsWithUserPreference
. Default:allFeasibleDsWithUserPreference
.- ha
Host StringIsolation Response The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of
none
,powerOff
, orshutdown
. Default:none
.- ha
Host StringMonitoring Global setting that controls whether vSphere HA remediates virtual machines on host failure. Can be one of
enabled
ordisabled
. Default:enabled
.- ha
Vm StringComponent Protection Controls vSphere VM component protection for virtual machines in this cluster. Can be one of
enabled
ordisabled
. Default:enabled
. *- ha
Vm StringDependency Restart Condition The condition used to determine whether or not virtual machines in a certain restart priority class are online, allowing HA to move on to restarting virtual machines on the next priority. Can be one of
none
,poweredOn
,guestHbStatusGreen
, orappHbStatusGreen
. The default isnone
, which means that a virtual machine is considered ready immediately after a host is found to start it on. *- ha
Vm NumberFailure Interval The time interval, in seconds, a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. Default:
30
seconds.- ha
Vm NumberMaximum Failure Window The time, in seconds, for the reset window in which
ha_vm_maximum_resets
can operate. When this window expires, no more resets are attempted regardless of the setting configured inha_vm_maximum_resets
.-1
means no window, meaning an unlimited reset time is allotted. Default:-1
(no window).- ha
Vm NumberMaximum Resets The maximum number of resets that HA will perform to a virtual machine when responding to a failure event. Default:
3
- ha
Vm NumberMinimum Uptime The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats. Default:
120
seconds (2 minutes).- ha
Vm StringMonitoring The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of
vmMonitoringDisabled
,vmMonitoringOnly
, orvmAndAppMonitoring
. Default:vmMonitoringDisabled
.- ha
Vm NumberRestart Additional Delay Additional delay, in seconds, after ready condition is met. A VM is considered ready at this point. Default:
0
seconds (no delay). *- ha
Vm StringRestart Priority The default restart priority for affected virtual machines when vSphere detects a host failure. Can be one of
lowest
,low
,medium
,high
, orhighest
. Default:medium
.- ha
Vm NumberRestart Timeout The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority. Default:
600
seconds (10 minutes). *- host
Cluster NumberExit Timeout The timeout, in seconds, for each host maintenance mode operation when removing hosts from a cluster. Default:
3600
seconds (1 hour).- host
Managed Boolean Can be set to
true
if compute cluster membership will be managed through thehost
resource rather than thecompute_cluster
resource. Conflicts with:host_system_ids
.- host
System List<String>Ids The managed object IDs of the hosts to put in the cluster. Conflicts with:
host_managed
.- name String
The name of the cluster.
- proactive
Ha StringAutomation Level Determines how the host quarantine, maintenance mode, or virtual machine migration recommendations made by proactive HA are to be handled. Can be one of
Automated
orManual
. Default:Manual
. *- proactive
Ha BooleanEnabled Enables Proactive HA. Default:
false
. *- proactive
Ha StringModerate Remediation The configured remediation for moderately degraded hosts. Can be one of
MaintenanceMode
orQuarantineMode
. Note that this cannot be set toMaintenanceMode
whenproactive_ha_severe_remediation
is set toQuarantineMode
. Default:QuarantineMode
. *- proactive
Ha List<String>Provider Ids The list of IDs for health update providers configured for this cluster. *
- proactive
Ha StringSevere Remediation The configured remediation for severely degraded hosts. Can be one of
MaintenanceMode
orQuarantineMode
. Note that this cannot be set toQuarantineMode
whenproactive_ha_moderate_remediation
is set toMaintenanceMode
. Default:QuarantineMode
. *- List<String>
The IDs of any tags to attach to this resource.
- vsan
Compression BooleanEnabled Enables vSAN compression on the cluster.
- vsan
Dedup BooleanEnabled Enables vSAN deduplication on the cluster. Cannot be independently set to
true
. When vSAN deduplication is enabled, vSAN compression must also be enabled.- vsan
Disk List<Property Map>Groups Represents the configuration of a host disk group in the cluster.
- vsan
Dit BooleanEncryption Enabled Enables vSAN data-in-transit encryption on the cluster. Conflicts with
vsan_remote_datastore_ids
, i.e., vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature at the same time.- vsan
Dit NumberRekey Interval Indicates the rekey interval in minutes for data-in-transit encryption. The valid rekey interval is 30 to 10800 (feature defaults to 1440). Conflicts with
vsan_remote_datastore_ids
.- vsan
Enabled Boolean Enables vSAN on the cluster.
- vsan
Esa BooleanEnabled Enables vSAN ESA on the cluster.
- vsan
Fault List<Property Map>Domains Configurations of vSAN fault domains.
- vsan
Network BooleanDiagnostic Mode Enabled Enables network diagnostic mode for vSAN performance service on the cluster.
- vsan
Performance BooleanEnabled Enables vSAN performance service on the cluster. Default:
true
.- vsan
Remote List<String>Datastore Ids The remote vSAN datastore IDs to be mounted to this cluster. Conflicts with
vsan_dit_encryption_enabled
andvsan_dit_rekey_interval
, i.e., vSAN HCI Mesh feature cannot be enabled with data-in-transit encryption feature at the same time.- vsan
Stretched Property MapCluster Configurations of vSAN stretched cluster.
- vsan
Unmap BooleanEnabled Enables vSAN unmap on the cluster. You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
- vsan
Verbose BooleanMode Enabled Enables verbose mode for vSAN performance service on the cluster.
Outputs
All input properties are implicitly available as output properties. Additionally, the ComputeCluster resource produces the following output properties:
- Id string
The provider-assigned unique ID for this managed resource.
- Resource
Pool stringId The managed object ID of the primary resource pool for this cluster. This can be passed directly to the
resource_pool_id
attribute of thevsphere.VirtualMachine
resource.
- Id string
The provider-assigned unique ID for this managed resource.
- Resource
Pool stringId The managed object ID of the primary resource pool for this cluster. This can be passed directly to the
resource_pool_id
attribute of thevsphere.VirtualMachine
resource.
- id String
The provider-assigned unique ID for this managed resource.
- resource
Pool StringId The managed object ID of the primary resource pool for this cluster. This can be passed directly to the
resource_pool_id
attribute of thevsphere.VirtualMachine
resource.
- id string
The provider-assigned unique ID for this managed resource.
- resource
Pool stringId The managed object ID of the primary resource pool for this cluster. This can be passed directly to the
resource_pool_id
attribute of thevsphere.VirtualMachine
resource.
- id str
The provider-assigned unique ID for this managed resource.
- resource_
pool_ strid The managed object ID of the primary resource pool for this cluster. This can be passed directly to the
resource_pool_id
attribute of thevsphere.VirtualMachine
resource.
- id String
The provider-assigned unique ID for this managed resource.
- resource
Pool StringId The managed object ID of the primary resource pool for this cluster. This can be passed directly to the
resource_pool_id
attribute of thevsphere.VirtualMachine
resource.
Look up Existing ComputeCluster Resource
Get an existing ComputeCluster resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: ComputeClusterState, opts?: CustomResourceOptions): ComputeCluster
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
custom_attributes: Optional[Mapping[str, str]] = None,
datacenter_id: Optional[str] = None,
dpm_automation_level: Optional[str] = None,
dpm_enabled: Optional[bool] = None,
dpm_threshold: Optional[int] = None,
drs_advanced_options: Optional[Mapping[str, str]] = None,
drs_automation_level: Optional[str] = None,
drs_enable_predictive_drs: Optional[bool] = None,
drs_enable_vm_overrides: Optional[bool] = None,
drs_enabled: Optional[bool] = None,
drs_migration_threshold: Optional[int] = None,
drs_scale_descendants_shares: Optional[str] = None,
folder: Optional[str] = None,
force_evacuate_on_destroy: Optional[bool] = None,
ha_admission_control_failover_host_system_ids: Optional[Sequence[str]] = None,
ha_admission_control_host_failure_tolerance: Optional[int] = None,
ha_admission_control_performance_tolerance: Optional[int] = None,
ha_admission_control_policy: Optional[str] = None,
ha_admission_control_resource_percentage_auto_compute: Optional[bool] = None,
ha_admission_control_resource_percentage_cpu: Optional[int] = None,
ha_admission_control_resource_percentage_memory: Optional[int] = None,
ha_admission_control_slot_policy_explicit_cpu: Optional[int] = None,
ha_admission_control_slot_policy_explicit_memory: Optional[int] = None,
ha_admission_control_slot_policy_use_explicit_size: Optional[bool] = None,
ha_advanced_options: Optional[Mapping[str, str]] = None,
ha_datastore_apd_recovery_action: Optional[str] = None,
ha_datastore_apd_response: Optional[str] = None,
ha_datastore_apd_response_delay: Optional[int] = None,
ha_datastore_pdl_response: Optional[str] = None,
ha_enabled: Optional[bool] = None,
ha_heartbeat_datastore_ids: Optional[Sequence[str]] = None,
ha_heartbeat_datastore_policy: Optional[str] = None,
ha_host_isolation_response: Optional[str] = None,
ha_host_monitoring: Optional[str] = None,
ha_vm_component_protection: Optional[str] = None,
ha_vm_dependency_restart_condition: Optional[str] = None,
ha_vm_failure_interval: Optional[int] = None,
ha_vm_maximum_failure_window: Optional[int] = None,
ha_vm_maximum_resets: Optional[int] = None,
ha_vm_minimum_uptime: Optional[int] = None,
ha_vm_monitoring: Optional[str] = None,
ha_vm_restart_additional_delay: Optional[int] = None,
ha_vm_restart_priority: Optional[str] = None,
ha_vm_restart_timeout: Optional[int] = None,
host_cluster_exit_timeout: Optional[int] = None,
host_managed: Optional[bool] = None,
host_system_ids: Optional[Sequence[str]] = None,
name: Optional[str] = None,
proactive_ha_automation_level: Optional[str] = None,
proactive_ha_enabled: Optional[bool] = None,
proactive_ha_moderate_remediation: Optional[str] = None,
proactive_ha_provider_ids: Optional[Sequence[str]] = None,
proactive_ha_severe_remediation: Optional[str] = None,
resource_pool_id: Optional[str] = None,
tags: Optional[Sequence[str]] = None,
vsan_compression_enabled: Optional[bool] = None,
vsan_dedup_enabled: Optional[bool] = None,
vsan_disk_groups: Optional[Sequence[ComputeClusterVsanDiskGroupArgs]] = None,
vsan_dit_encryption_enabled: Optional[bool] = None,
vsan_dit_rekey_interval: Optional[int] = None,
vsan_enabled: Optional[bool] = None,
vsan_esa_enabled: Optional[bool] = None,
vsan_fault_domains: Optional[Sequence[ComputeClusterVsanFaultDomainArgs]] = None,
vsan_network_diagnostic_mode_enabled: Optional[bool] = None,
vsan_performance_enabled: Optional[bool] = None,
vsan_remote_datastore_ids: Optional[Sequence[str]] = None,
vsan_stretched_cluster: Optional[ComputeClusterVsanStretchedClusterArgs] = None,
vsan_unmap_enabled: Optional[bool] = None,
vsan_verbose_mode_enabled: Optional[bool] = None) -> ComputeCluster
func GetComputeCluster(ctx *Context, name string, id IDInput, state *ComputeClusterState, opts ...ResourceOption) (*ComputeCluster, error)
public static ComputeCluster Get(string name, Input<string> id, ComputeClusterState? state, CustomResourceOptions? opts = null)
public static ComputeCluster get(String name, Output<String> id, ComputeClusterState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Custom
Attributes Dictionary<string, string> A map of custom attribute ids to attribute value strings to set for the datastore cluster.
NOTE: Custom attributes are unsupported on direct ESXi connections and require vCenter Server.
- Datacenter
Id string The managed object ID of the datacenter to create the cluster in. Forces a new resource if changed.
- Dpm
Automation stringLevel The automation level for host power operations in this cluster. Can be one of
manual
orautomated
. Default:manual
.- Dpm
Enabled bool Enable DPM support for DRS in this cluster. Requires
drs_enabled
to betrue
in order to be effective. Default:false
.- Dpm
Threshold int A value between
1
and5
indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting. Default:3
.- Drs
Advanced Dictionary<string, string>Options A key/value map that specifies advanced options for DRS and DPM.
- Drs
Automation stringLevel The default automation level for all virtual machines in this cluster. Can be one of
manual
,partiallyAutomated
, orfullyAutomated
. Default:manual
.- Drs
Enable boolPredictive Drs When
true
, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations. *- Drs
Enable boolVm Overrides Allow individual DRS overrides to be set for virtual machines in the cluster. Default:
true
.- Drs
Enabled bool Enable DRS for this cluster. Default:
false
.- Drs
Migration intThreshold A value between
1
and5
indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less. Default:3
.- string
Enable scalable shares for all resource pools in the cluster. Can be one of
disabled
orscaleCpuAndMemoryShares
. Default:disabled
.- Folder string
The relative path to a folder to put this cluster in. This is a path relative to the datacenter you are deploying the cluster to. Example: for the
dc1
datacenter, and a providedfolder
offoo/bar
, The provider will place a cluster namedcompute-cluster-test
in a host folder located at/dc1/host/foo/bar
, with the final inventory path being/dc1/host/foo/bar/datastore-cluster-test
.- Force
Evacuate boolOn Destroy When destroying the resource, setting this to
true
will auto-remove any hosts that are currently a member of the cluster, as if they were removed by taking their entry out ofhost_system_ids
(see below. This is an advanced option and should only be used for testing. Default:false
.NOTE: Do not set
force_evacuate_on_destroy
in production operation as there are many pitfalls to its use when working with complex cluster configurations. Depending on the virtual machines currently on the cluster, and your DRS and HA settings, the full host evacuation may fail. Instead, incrementally remove hosts from your configuration by adjusting the contents of thehost_system_ids
attribute.- Ha
Admission List<string>Control Failover Host System Ids Defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.
- Ha
Admission intControl Host Failure Tolerance The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster. Default:
1
. *- Ha
Admission intControl Performance Tolerance The percentage of resource reduction that a cluster of virtual machines can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting. Default:
100
(disabled).- Ha
Admission stringControl Policy The type of admission control policy to use with vSphere HA. Can be one of
resourcePercentage
,slotPolicy
,failoverHosts
, ordisabled
. Default:resourcePercentage
.- Ha
Admission boolControl Resource Percentage Auto Compute Automatically determine available resource percentages by subtracting the average number of host resources represented by the
ha_admission_control_host_failure_tolerance
setting from the total amount of resources in the cluster. Disable to supply user-defined values. Default:true
. *- Ha
Admission intControl Resource Percentage Cpu Controls the user-defined percentage of CPU resources in the cluster to reserve for failover. Default:
100
.- Ha
Admission intControl Resource Percentage Memory Controls the user-defined percentage of memory resources in the cluster to reserve for failover. Default:
100
.- Ha
Admission intControl Slot Policy Explicit Cpu Controls the user-defined CPU slot size, in MHz. Default:
32
.- Ha
Admission intControl Slot Policy Explicit Memory Controls the user-defined memory slot size, in MB. Default:
100
.- Ha
Admission boolControl Slot Policy Use Explicit Size Controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is
false
, which tells vSphere to gather a automatic average based on all powered-on virtual machines currently in the cluster.- Ha
Advanced Dictionary<string, string>Options A key/value map that specifies advanced options for vSphere HA.
- Ha
Datastore stringApd Recovery Action Controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of
none
orreset
. Default:none
. *- Ha
Datastore stringApd Response Controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of
disabled
,warning
,restartConservative
, orrestartAggressive
. Default:disabled
. *- Ha
Datastore intApd Response Delay The time, in seconds, to wait after an APD timeout event to run the response action defined in
ha_datastore_apd_response
. Default:180
seconds (3 minutes). *- Ha
Datastore stringPdl Response Controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of
disabled
,warning
, orrestartAggressive
. Default:disabled
. *- Ha
Enabled bool Enable vSphere HA for this cluster. Default:
false
.- Ha
Heartbeat List<string>Datastore Ids The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
ha_heartbeat_datastore_policy
is set to eitheruserSelectedDs
orallFeasibleDsWithUserPreference
.- Ha
Heartbeat stringDatastore Policy The selection policy for HA heartbeat datastores. Can be one of
allFeasibleDs
,userSelectedDs
, orallFeasibleDsWithUserPreference
. Default:allFeasibleDsWithUserPreference
.- Ha
Host stringIsolation Response The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of
none
,powerOff
, orshutdown
. Default:none
.- Ha
Host stringMonitoring Global setting that controls whether vSphere HA remediates virtual machines on host failure. Can be one of
enabled
ordisabled
. Default:enabled
.- Ha
Vm stringComponent Protection Controls vSphere VM component protection for virtual machines in this cluster. Can be one of
enabled
ordisabled
. Default:enabled
. *- Ha
Vm stringDependency Restart Condition The condition used to determine whether or not virtual machines in a certain restart priority class are online, allowing HA to move on to restarting virtual machines on the next priority. Can be one of
none
,poweredOn
,guestHbStatusGreen
, orappHbStatusGreen
. The default isnone
, which means that a virtual machine is considered ready immediately after a host is found to start it on. *- Ha
Vm intFailure Interval The time interval, in seconds, a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. Default:
30
seconds.- Ha
Vm intMaximum Failure Window The time, in seconds, for the reset window in which
ha_vm_maximum_resets
can operate. When this window expires, no more resets are attempted regardless of the setting configured inha_vm_maximum_resets
.-1
means no window, meaning an unlimited reset time is allotted. Default:-1
(no window).- Ha
Vm intMaximum Resets The maximum number of resets that HA will perform to a virtual machine when responding to a failure event. Default:
3
- Ha
Vm intMinimum Uptime The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats. Default:
120
seconds (2 minutes).- Ha
Vm stringMonitoring The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of
vmMonitoringDisabled
,vmMonitoringOnly
, orvmAndAppMonitoring
. Default:vmMonitoringDisabled
.- Ha
Vm intRestart Additional Delay Additional delay, in seconds, after ready condition is met. A VM is considered ready at this point. Default:
0
seconds (no delay). *- Ha
Vm stringRestart Priority The default restart priority for affected virtual machines when vSphere detects a host failure. Can be one of
lowest
,low
,medium
,high
, orhighest
. Default:medium
.- Ha
Vm intRestart Timeout The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority. Default:
600
seconds (10 minutes). *- Host
Cluster intExit Timeout The timeout, in seconds, for each host maintenance mode operation when removing hosts from a cluster. Default:
3600
seconds (1 hour).- Host
Managed bool Can be set to
true
if compute cluster membership will be managed through thehost
resource rather than thecompute_cluster
resource. Conflicts with:host_system_ids
.- Host
System List<string>Ids The managed object IDs of the hosts to put in the cluster. Conflicts with:
host_managed
.- Name string
The name of the cluster.
- Proactive
Ha stringAutomation Level Determines how the host quarantine, maintenance mode, or virtual machine migration recommendations made by proactive HA are to be handled. Can be one of
Automated
orManual
. Default:Manual
. *- Proactive
Ha boolEnabled Enables Proactive HA. Default:
false
. *- Proactive
Ha stringModerate Remediation The configured remediation for moderately degraded hosts. Can be one of
MaintenanceMode
orQuarantineMode
. Note that this cannot be set toMaintenanceMode
whenproactive_ha_severe_remediation
is set toQuarantineMode
. Default:QuarantineMode
. *- Proactive
Ha List<string>Provider Ids The list of IDs for health update providers configured for this cluster. *
- Proactive
Ha stringSevere Remediation The configured remediation for severely degraded hosts. Can be one of
MaintenanceMode
orQuarantineMode
. Note that this cannot be set toQuarantineMode
whenproactive_ha_moderate_remediation
is set toMaintenanceMode
. Default:QuarantineMode
. *- Resource
Pool stringId The managed object ID of the primary resource pool for this cluster. This can be passed directly to the
resource_pool_id
attribute of thevsphere.VirtualMachine
resource.- List<string>
The IDs of any tags to attach to this resource.
- Vsan
Compression boolEnabled Enables vSAN compression on the cluster.
- Vsan
Dedup boolEnabled Enables vSAN deduplication on the cluster. Cannot be independently set to
true
. When vSAN deduplication is enabled, vSAN compression must also be enabled.- Vsan
Disk List<Pulumi.Groups VSphere. Inputs. Compute Cluster Vsan Disk Group> Represents the configuration of a host disk group in the cluster.
- Vsan
Dit boolEncryption Enabled Enables vSAN data-in-transit encryption on the cluster. Conflicts with
vsan_remote_datastore_ids
, i.e., vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature at the same time.- Vsan
Dit intRekey Interval Indicates the rekey interval in minutes for data-in-transit encryption. The valid rekey interval is 30 to 10800 (feature defaults to 1440). Conflicts with
vsan_remote_datastore_ids
.- Vsan
Enabled bool Enables vSAN on the cluster.
- Vsan
Esa boolEnabled Enables vSAN ESA on the cluster.
- Vsan
Fault List<Pulumi.Domains VSphere. Inputs. Compute Cluster Vsan Fault Domain> Configurations of vSAN fault domains.
- Vsan
Network boolDiagnostic Mode Enabled Enables network diagnostic mode for vSAN performance service on the cluster.
- Vsan
Performance boolEnabled Enables vSAN performance service on the cluster. Default:
true
.- Vsan
Remote List<string>Datastore Ids The remote vSAN datastore IDs to be mounted to this cluster. Conflicts with
vsan_dit_encryption_enabled
andvsan_dit_rekey_interval
, i.e., vSAN HCI Mesh feature cannot be enabled with data-in-transit encryption feature at the same time.- Vsan
Stretched Pulumi.Cluster VSphere. Inputs. Compute Cluster Vsan Stretched Cluster Configurations of vSAN stretched cluster.
- Vsan
Unmap boolEnabled Enables vSAN unmap on the cluster. You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
- Vsan
Verbose boolMode Enabled Enables verbose mode for vSAN performance service on the cluster.
- Custom
Attributes map[string]string A map of custom attribute ids to attribute value strings to set for the datastore cluster.
NOTE: Custom attributes are unsupported on direct ESXi connections and require vCenter Server.
- Datacenter
Id string The managed object ID of the datacenter to create the cluster in. Forces a new resource if changed.
- Dpm
Automation stringLevel The automation level for host power operations in this cluster. Can be one of
manual
orautomated
. Default:manual
.- Dpm
Enabled bool Enable DPM support for DRS in this cluster. Requires
drs_enabled
to betrue
in order to be effective. Default:false
.- Dpm
Threshold int A value between
1
and5
indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting. Default:3
.- Drs
Advanced map[string]stringOptions A key/value map that specifies advanced options for DRS and DPM.
- Drs
Automation stringLevel The default automation level for all virtual machines in this cluster. Can be one of
manual
,partiallyAutomated
, orfullyAutomated
. Default:manual
.- Drs
Enable boolPredictive Drs When
true
, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations. *- Drs
Enable boolVm Overrides Allow individual DRS overrides to be set for virtual machines in the cluster. Default:
true
.- Drs
Enabled bool Enable DRS for this cluster. Default:
false
.- Drs
Migration intThreshold A value between
1
and5
indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less. Default:3
.- string
Enable scalable shares for all resource pools in the cluster. Can be one of
disabled
orscaleCpuAndMemoryShares
. Default:disabled
.- Folder string
The relative path to a folder to put this cluster in. This is a path relative to the datacenter you are deploying the cluster to. Example: for the
dc1
datacenter, and a providedfolder
offoo/bar
, The provider will place a cluster namedcompute-cluster-test
in a host folder located at/dc1/host/foo/bar
, with the final inventory path being/dc1/host/foo/bar/datastore-cluster-test
.- Force
Evacuate boolOn Destroy When destroying the resource, setting this to
true
will auto-remove any hosts that are currently a member of the cluster, as if they were removed by taking their entry out ofhost_system_ids
(see below. This is an advanced option and should only be used for testing. Default:false
.NOTE: Do not set
force_evacuate_on_destroy
in production operation as there are many pitfalls to its use when working with complex cluster configurations. Depending on the virtual machines currently on the cluster, and your DRS and HA settings, the full host evacuation may fail. Instead, incrementally remove hosts from your configuration by adjusting the contents of thehost_system_ids
attribute.- Ha
Admission []stringControl Failover Host System Ids Defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.
- Ha
Admission intControl Host Failure Tolerance The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster. Default:
1
. *- Ha
Admission intControl Performance Tolerance The percentage of resource reduction that a cluster of virtual machines can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting. Default:
100
(disabled).- Ha
Admission stringControl Policy The type of admission control policy to use with vSphere HA. Can be one of
resourcePercentage
,slotPolicy
,failoverHosts
, ordisabled
. Default:resourcePercentage
.- Ha
Admission boolControl Resource Percentage Auto Compute Automatically determine available resource percentages by subtracting the average number of host resources represented by the
ha_admission_control_host_failure_tolerance
setting from the total amount of resources in the cluster. Disable to supply user-defined values. Default:true
. *- Ha
Admission intControl Resource Percentage Cpu Controls the user-defined percentage of CPU resources in the cluster to reserve for failover. Default:
100
.- Ha
Admission intControl Resource Percentage Memory Controls the user-defined percentage of memory resources in the cluster to reserve for failover. Default:
100
.- Ha
Admission intControl Slot Policy Explicit Cpu Controls the user-defined CPU slot size, in MHz. Default:
32
.- Ha
Admission intControl Slot Policy Explicit Memory Controls the user-defined memory slot size, in MB. Default:
100
.- Ha
Admission boolControl Slot Policy Use Explicit Size Controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is
false
, which tells vSphere to gather a automatic average based on all powered-on virtual machines currently in the cluster.- Ha
Advanced map[string]stringOptions A key/value map that specifies advanced options for vSphere HA.
- Ha
Datastore stringApd Recovery Action Controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of
none
orreset
. Default:none
. *- Ha
Datastore stringApd Response Controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of
disabled
,warning
,restartConservative
, orrestartAggressive
. Default:disabled
. *- Ha
Datastore intApd Response Delay The time, in seconds, to wait after an APD timeout event to run the response action defined in
ha_datastore_apd_response
. Default:180
seconds (3 minutes). *- Ha
Datastore stringPdl Response Controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of
disabled
,warning
, orrestartAggressive
. Default:disabled
. *- Ha
Enabled bool Enable vSphere HA for this cluster. Default:
false
.- Ha
Heartbeat []stringDatastore Ids The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
ha_heartbeat_datastore_policy
is set to eitheruserSelectedDs
orallFeasibleDsWithUserPreference
.- Ha
Heartbeat stringDatastore Policy The selection policy for HA heartbeat datastores. Can be one of
allFeasibleDs
,userSelectedDs
, orallFeasibleDsWithUserPreference
. Default:allFeasibleDsWithUserPreference
.- Ha
Host stringIsolation Response The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of
none
,powerOff
, orshutdown
. Default:none
.- Ha
Host stringMonitoring Global setting that controls whether vSphere HA remediates virtual machines on host failure. Can be one of
enabled
ordisabled
. Default:enabled
.- Ha
Vm stringComponent Protection Controls vSphere VM component protection for virtual machines in this cluster. Can be one of
enabled
ordisabled
. Default:enabled
. *- Ha
Vm stringDependency Restart Condition The condition used to determine whether or not virtual machines in a certain restart priority class are online, allowing HA to move on to restarting virtual machines on the next priority. Can be one of
none
,poweredOn
,guestHbStatusGreen
, orappHbStatusGreen
. The default isnone
, which means that a virtual machine is considered ready immediately after a host is found to start it on. *- Ha
Vm intFailure Interval The time interval, in seconds, a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. Default:
30
seconds.- Ha
Vm intMaximum Failure Window The time, in seconds, for the reset window in which
ha_vm_maximum_resets
can operate. When this window expires, no more resets are attempted regardless of the setting configured inha_vm_maximum_resets
.-1
means no window, meaning an unlimited reset time is allotted. Default:-1
(no window).- Ha
Vm intMaximum Resets The maximum number of resets that HA will perform to a virtual machine when responding to a failure event. Default:
3
- Ha
Vm intMinimum Uptime The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats. Default:
120
seconds (2 minutes).- Ha
Vm stringMonitoring The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of
vmMonitoringDisabled
,vmMonitoringOnly
, orvmAndAppMonitoring
. Default:vmMonitoringDisabled
.- Ha
Vm intRestart Additional Delay Additional delay, in seconds, after ready condition is met. A VM is considered ready at this point. Default:
0
seconds (no delay). *- Ha
Vm stringRestart Priority The default restart priority for affected virtual machines when vSphere detects a host failure. Can be one of
lowest
,low
,medium
,high
, orhighest
. Default:medium
.- Ha
Vm intRestart Timeout The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority. Default:
600
seconds (10 minutes). *- Host
Cluster intExit Timeout The timeout, in seconds, for each host maintenance mode operation when removing hosts from a cluster. Default:
3600
seconds (1 hour).- Host
Managed bool Can be set to
true
if compute cluster membership will be managed through thehost
resource rather than thecompute_cluster
resource. Conflicts with:host_system_ids
.- Host
System []stringIds The managed object IDs of the hosts to put in the cluster. Conflicts with:
host_managed
.- Name string
The name of the cluster.
- Proactive
Ha stringAutomation Level Determines how the host quarantine, maintenance mode, or virtual machine migration recommendations made by proactive HA are to be handled. Can be one of
Automated
orManual
. Default:Manual
. *- Proactive
Ha boolEnabled Enables Proactive HA. Default:
false
. *- Proactive
Ha stringModerate Remediation The configured remediation for moderately degraded hosts. Can be one of
MaintenanceMode
orQuarantineMode
. Note that this cannot be set toMaintenanceMode
whenproactive_ha_severe_remediation
is set toQuarantineMode
. Default:QuarantineMode
. *- Proactive
Ha []stringProvider Ids The list of IDs for health update providers configured for this cluster. *
- Proactive
Ha stringSevere Remediation The configured remediation for severely degraded hosts. Can be one of
MaintenanceMode
orQuarantineMode
. Note that this cannot be set toQuarantineMode
whenproactive_ha_moderate_remediation
is set toMaintenanceMode
. Default:QuarantineMode
. *- Resource
Pool stringId The managed object ID of the primary resource pool for this cluster. This can be passed directly to the
resource_pool_id
attribute of thevsphere.VirtualMachine
resource.- []string
The IDs of any tags to attach to this resource.
- Vsan
Compression boolEnabled Enables vSAN compression on the cluster.
- Vsan
Dedup boolEnabled Enables vSAN deduplication on the cluster. Cannot be independently set to
true
. When vSAN deduplication is enabled, vSAN compression must also be enabled.- Vsan
Disk []ComputeGroups Cluster Vsan Disk Group Args Represents the configuration of a host disk group in the cluster.
- Vsan
Dit boolEncryption Enabled Enables vSAN data-in-transit encryption on the cluster. Conflicts with
vsan_remote_datastore_ids
, i.e., vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature at the same time.- Vsan
Dit intRekey Interval Indicates the rekey interval in minutes for data-in-transit encryption. The valid rekey interval is 30 to 10800 (feature defaults to 1440). Conflicts with
vsan_remote_datastore_ids
.- Vsan
Enabled bool Enables vSAN on the cluster.
- Vsan
Esa boolEnabled Enables vSAN ESA on the cluster.
- Vsan
Fault []ComputeDomains Cluster Vsan Fault Domain Args Configurations of vSAN fault domains.
- Vsan
Network boolDiagnostic Mode Enabled Enables network diagnostic mode for vSAN performance service on the cluster.
- Vsan
Performance boolEnabled Enables vSAN performance service on the cluster. Default:
true
.- Vsan
Remote []stringDatastore Ids The remote vSAN datastore IDs to be mounted to this cluster. Conflicts with
vsan_dit_encryption_enabled
andvsan_dit_rekey_interval
, i.e., vSAN HCI Mesh feature cannot be enabled with data-in-transit encryption feature at the same time.- Vsan
Stretched ComputeCluster Cluster Vsan Stretched Cluster Args Configurations of vSAN stretched cluster.
- Vsan
Unmap boolEnabled Enables vSAN unmap on the cluster. You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
- Vsan
Verbose boolMode Enabled Enables verbose mode for vSAN performance service on the cluster.
- custom
Attributes Map<String,String> A map of custom attribute ids to attribute value strings to set for the datastore cluster.
NOTE: Custom attributes are unsupported on direct ESXi connections and require vCenter Server.
- datacenter
Id String The managed object ID of the datacenter to create the cluster in. Forces a new resource if changed.
- dpm
Automation StringLevel The automation level for host power operations in this cluster. Can be one of
manual
orautomated
. Default:manual
.- dpm
Enabled Boolean Enable DPM support for DRS in this cluster. Requires
drs_enabled
to betrue
in order to be effective. Default:false
.- dpm
Threshold Integer A value between
1
and5
indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting. Default:3
.- drs
Advanced Map<String,String>Options A key/value map that specifies advanced options for DRS and DPM.
- drs
Automation StringLevel The default automation level for all virtual machines in this cluster. Can be one of
manual
,partiallyAutomated
, orfullyAutomated
. Default:manual
.- drs
Enable BooleanPredictive Drs When
true
, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations. *- drs
Enable BooleanVm Overrides Allow individual DRS overrides to be set for virtual machines in the cluster. Default:
true
.- drs
Enabled Boolean Enable DRS for this cluster. Default:
false
.- drs
Migration IntegerThreshold A value between
1
and5
indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less. Default:3
.- String
Enable scalable shares for all resource pools in the cluster. Can be one of
disabled
orscaleCpuAndMemoryShares
. Default:disabled
.- folder String
The relative path to a folder to put this cluster in. This is a path relative to the datacenter you are deploying the cluster to. Example: for the
dc1
datacenter, and a providedfolder
offoo/bar
, The provider will place a cluster namedcompute-cluster-test
in a host folder located at/dc1/host/foo/bar
, with the final inventory path being/dc1/host/foo/bar/datastore-cluster-test
.- force
Evacuate BooleanOn Destroy When destroying the resource, setting this to
true
will auto-remove any hosts that are currently a member of the cluster, as if they were removed by taking their entry out ofhost_system_ids
(see below. This is an advanced option and should only be used for testing. Default:false
.NOTE: Do not set
force_evacuate_on_destroy
in production operation as there are many pitfalls to its use when working with complex cluster configurations. Depending on the virtual machines currently on the cluster, and your DRS and HA settings, the full host evacuation may fail. Instead, incrementally remove hosts from your configuration by adjusting the contents of thehost_system_ids
attribute.- ha
Admission List<String>Control Failover Host System Ids Defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.
- ha
Admission IntegerControl Host Failure Tolerance The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster. Default:
1
. *- ha
Admission IntegerControl Performance Tolerance The percentage of resource reduction that a cluster of virtual machines can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting. Default:
100
(disabled).- ha
Admission StringControl Policy The type of admission control policy to use with vSphere HA. Can be one of
resourcePercentage
,slotPolicy
,failoverHosts
, ordisabled
. Default:resourcePercentage
.- ha
Admission BooleanControl Resource Percentage Auto Compute Automatically determine available resource percentages by subtracting the average number of host resources represented by the
ha_admission_control_host_failure_tolerance
setting from the total amount of resources in the cluster. Disable to supply user-defined values. Default:true
. *- ha
Admission IntegerControl Resource Percentage Cpu Controls the user-defined percentage of CPU resources in the cluster to reserve for failover. Default:
100
.- ha
Admission IntegerControl Resource Percentage Memory Controls the user-defined percentage of memory resources in the cluster to reserve for failover. Default:
100
.- ha
Admission IntegerControl Slot Policy Explicit Cpu Controls the user-defined CPU slot size, in MHz. Default:
32
.- ha
Admission IntegerControl Slot Policy Explicit Memory Controls the user-defined memory slot size, in MB. Default:
100
.- ha
Admission BooleanControl Slot Policy Use Explicit Size Controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is
false
, which tells vSphere to gather a automatic average based on all powered-on virtual machines currently in the cluster.- ha
Advanced Map<String,String>Options A key/value map that specifies advanced options for vSphere HA.
- ha
Datastore StringApd Recovery Action Controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of
none
orreset
. Default:none
. *- ha
Datastore StringApd Response Controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of
disabled
,warning
,restartConservative
, orrestartAggressive
. Default:disabled
. *- ha
Datastore IntegerApd Response Delay The time, in seconds, to wait after an APD timeout event to run the response action defined in
ha_datastore_apd_response
. Default:180
seconds (3 minutes). *- ha
Datastore StringPdl Response Controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of
disabled
,warning
, orrestartAggressive
. Default:disabled
. *- ha
Enabled Boolean Enable vSphere HA for this cluster. Default:
false
.- ha
Heartbeat List<String>Datastore Ids The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
ha_heartbeat_datastore_policy
is set to eitheruserSelectedDs
orallFeasibleDsWithUserPreference
.- ha
Heartbeat StringDatastore Policy The selection policy for HA heartbeat datastores. Can be one of
allFeasibleDs
,userSelectedDs
, orallFeasibleDsWithUserPreference
. Default:allFeasibleDsWithUserPreference
.- ha
Host StringIsolation Response The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of
none
,powerOff
, orshutdown
. Default:none
.- ha
Host StringMonitoring Global setting that controls whether vSphere HA remediates virtual machines on host failure. Can be one of
enabled
ordisabled
. Default:enabled
.- ha
Vm StringComponent Protection Controls vSphere VM component protection for virtual machines in this cluster. Can be one of
enabled
ordisabled
. Default:enabled
. *- ha
Vm StringDependency Restart Condition The condition used to determine whether or not virtual machines in a certain restart priority class are online, allowing HA to move on to restarting virtual machines on the next priority. Can be one of
none
,poweredOn
,guestHbStatusGreen
, orappHbStatusGreen
. The default isnone
, which means that a virtual machine is considered ready immediately after a host is found to start it on. *- ha
Vm IntegerFailure Interval The time interval, in seconds, a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. Default:
30
seconds.- ha
Vm IntegerMaximum Failure Window The time, in seconds, for the reset window in which
ha_vm_maximum_resets
can operate. When this window expires, no more resets are attempted regardless of the setting configured inha_vm_maximum_resets
.-1
means no window, meaning an unlimited reset time is allotted. Default:-1
(no window).- ha
Vm IntegerMaximum Resets The maximum number of resets that HA will perform to a virtual machine when responding to a failure event. Default:
3
- ha
Vm IntegerMinimum Uptime The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats. Default:
120
seconds (2 minutes).- ha
Vm StringMonitoring The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of
vmMonitoringDisabled
,vmMonitoringOnly
, orvmAndAppMonitoring
. Default:vmMonitoringDisabled
.- ha
Vm IntegerRestart Additional Delay Additional delay, in seconds, after ready condition is met. A VM is considered ready at this point. Default:
0
seconds (no delay). *- ha
Vm StringRestart Priority The default restart priority for affected virtual machines when vSphere detects a host failure. Can be one of
lowest
,low
,medium
,high
, orhighest
. Default:medium
.- ha
Vm IntegerRestart Timeout The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority. Default:
600
seconds (10 minutes). *- host
Cluster IntegerExit Timeout The timeout, in seconds, for each host maintenance mode operation when removing hosts from a cluster. Default:
3600
seconds (1 hour).- host
Managed Boolean Can be set to
true
if compute cluster membership will be managed through thehost
resource rather than thecompute_cluster
resource. Conflicts with:host_system_ids
.- host
System List<String>Ids The managed object IDs of the hosts to put in the cluster. Conflicts with:
host_managed
.- name String
The name of the cluster.
- proactive
Ha StringAutomation Level Determines how the host quarantine, maintenance mode, or virtual machine migration recommendations made by proactive HA are to be handled. Can be one of
Automated
orManual
. Default:Manual
. *- proactive
Ha BooleanEnabled Enables Proactive HA. Default:
false
. *- proactive
Ha StringModerate Remediation The configured remediation for moderately degraded hosts. Can be one of
MaintenanceMode
orQuarantineMode
. Note that this cannot be set toMaintenanceMode
whenproactive_ha_severe_remediation
is set toQuarantineMode
. Default:QuarantineMode
. *- proactive
Ha List<String>Provider Ids The list of IDs for health update providers configured for this cluster. *
- proactive
Ha StringSevere Remediation The configured remediation for severely degraded hosts. Can be one of
MaintenanceMode
orQuarantineMode
. Note that this cannot be set toQuarantineMode
whenproactive_ha_moderate_remediation
is set toMaintenanceMode
. Default:QuarantineMode
. *- resource
Pool StringId The managed object ID of the primary resource pool for this cluster. This can be passed directly to the
resource_pool_id
attribute of thevsphere.VirtualMachine
resource.- List<String>
The IDs of any tags to attach to this resource.
- vsan
Compression BooleanEnabled Enables vSAN compression on the cluster.
- vsan
Dedup BooleanEnabled Enables vSAN deduplication on the cluster. Cannot be independently set to
true
. When vSAN deduplication is enabled, vSAN compression must also be enabled.- vsan
Disk List<ComputeGroups Cluster Vsan Disk Group> Represents the configuration of a host disk group in the cluster.
- vsan
Dit BooleanEncryption Enabled Enables vSAN data-in-transit encryption on the cluster. Conflicts with
vsan_remote_datastore_ids
, i.e., vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature at the same time.- vsan
Dit IntegerRekey Interval Indicates the rekey interval in minutes for data-in-transit encryption. The valid rekey interval is 30 to 10800 (feature defaults to 1440). Conflicts with
vsan_remote_datastore_ids
.- vsan
Enabled Boolean Enables vSAN on the cluster.
- vsan
Esa BooleanEnabled Enables vSAN ESA on the cluster.
- vsan
Fault List<ComputeDomains Cluster Vsan Fault Domain> Configurations of vSAN fault domains.
- vsan
Network BooleanDiagnostic Mode Enabled Enables network diagnostic mode for vSAN performance service on the cluster.
- vsan
Performance BooleanEnabled Enables vSAN performance service on the cluster. Default:
true
.- vsan
Remote List<String>Datastore Ids The remote vSAN datastore IDs to be mounted to this cluster. Conflicts with
vsan_dit_encryption_enabled
andvsan_dit_rekey_interval
, i.e., vSAN HCI Mesh feature cannot be enabled with data-in-transit encryption feature at the same time.- vsan
Stretched ComputeCluster Cluster Vsan Stretched Cluster Configurations of vSAN stretched cluster.
- vsan
Unmap BooleanEnabled Enables vSAN unmap on the cluster. You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
- vsan
Verbose BooleanMode Enabled Enables verbose mode for vSAN performance service on the cluster.
- custom
Attributes {[key: string]: string} A map of custom attribute ids to attribute value strings to set for the datastore cluster.
NOTE: Custom attributes are unsupported on direct ESXi connections and require vCenter Server.
- datacenter
Id string The managed object ID of the datacenter to create the cluster in. Forces a new resource if changed.
- dpm
Automation stringLevel The automation level for host power operations in this cluster. Can be one of
manual
orautomated
. Default:manual
.- dpm
Enabled boolean Enable DPM support for DRS in this cluster. Requires
drs_enabled
to betrue
in order to be effective. Default:false
.- dpm
Threshold number A value between
1
and5
indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting. Default:3
.- drs
Advanced {[key: string]: string}Options A key/value map that specifies advanced options for DRS and DPM.
- drs
Automation stringLevel The default automation level for all virtual machines in this cluster. Can be one of
manual
,partiallyAutomated
, orfullyAutomated
. Default:manual
.- drs
Enable booleanPredictive Drs When
true
, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations. *- drs
Enable booleanVm Overrides Allow individual DRS overrides to be set for virtual machines in the cluster. Default:
true
.- drs
Enabled boolean Enable DRS for this cluster. Default:
false
.- drs
Migration numberThreshold A value between
1
and5
indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less. Default:3
.- string
Enable scalable shares for all resource pools in the cluster. Can be one of
disabled
orscaleCpuAndMemoryShares
. Default:disabled
.- folder string
The relative path to a folder to put this cluster in. This is a path relative to the datacenter you are deploying the cluster to. Example: for the
dc1
datacenter, and a providedfolder
offoo/bar
, The provider will place a cluster namedcompute-cluster-test
in a host folder located at/dc1/host/foo/bar
, with the final inventory path being/dc1/host/foo/bar/datastore-cluster-test
.- force
Evacuate booleanOn Destroy When destroying the resource, setting this to
true
will auto-remove any hosts that are currently a member of the cluster, as if they were removed by taking their entry out ofhost_system_ids
(see below. This is an advanced option and should only be used for testing. Default:false
.NOTE: Do not set
force_evacuate_on_destroy
in production operation as there are many pitfalls to its use when working with complex cluster configurations. Depending on the virtual machines currently on the cluster, and your DRS and HA settings, the full host evacuation may fail. Instead, incrementally remove hosts from your configuration by adjusting the contents of thehost_system_ids
attribute.- ha
Admission string[]Control Failover Host System Ids Defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.
- ha
Admission numberControl Host Failure Tolerance The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster. Default:
1
. *- ha
Admission numberControl Performance Tolerance The percentage of resource reduction that a cluster of virtual machines can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting. Default:
100
(disabled).- ha
Admission stringControl Policy The type of admission control policy to use with vSphere HA. Can be one of
resourcePercentage
,slotPolicy
,failoverHosts
, ordisabled
. Default:resourcePercentage
.- ha
Admission booleanControl Resource Percentage Auto Compute Automatically determine available resource percentages by subtracting the average number of host resources represented by the
ha_admission_control_host_failure_tolerance
setting from the total amount of resources in the cluster. Disable to supply user-defined values. Default:true
. *- ha
Admission numberControl Resource Percentage Cpu Controls the user-defined percentage of CPU resources in the cluster to reserve for failover. Default:
100
.- ha
Admission numberControl Resource Percentage Memory Controls the user-defined percentage of memory resources in the cluster to reserve for failover. Default:
100
.- ha
Admission numberControl Slot Policy Explicit Cpu Controls the user-defined CPU slot size, in MHz. Default:
32
.- ha
Admission numberControl Slot Policy Explicit Memory Controls the user-defined memory slot size, in MB. Default:
100
.- ha
Admission booleanControl Slot Policy Use Explicit Size Controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is
false
, which tells vSphere to gather a automatic average based on all powered-on virtual machines currently in the cluster.- ha
Advanced {[key: string]: string}Options A key/value map that specifies advanced options for vSphere HA.
- ha
Datastore stringApd Recovery Action Controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of
none
orreset
. Default:none
. *- ha
Datastore stringApd Response Controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of
disabled
,warning
,restartConservative
, orrestartAggressive
. Default:disabled
. *- ha
Datastore numberApd Response Delay The time, in seconds, to wait after an APD timeout event to run the response action defined in
ha_datastore_apd_response
. Default:180
seconds (3 minutes). *- ha
Datastore stringPdl Response Controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of
disabled
,warning
, orrestartAggressive
. Default:disabled
. *- ha
Enabled boolean Enable vSphere HA for this cluster. Default:
false
.- ha
Heartbeat string[]Datastore Ids The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
ha_heartbeat_datastore_policy
is set to eitheruserSelectedDs
orallFeasibleDsWithUserPreference
.- ha
Heartbeat stringDatastore Policy The selection policy for HA heartbeat datastores. Can be one of
allFeasibleDs
,userSelectedDs
, orallFeasibleDsWithUserPreference
. Default:allFeasibleDsWithUserPreference
.- ha
Host stringIsolation Response The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of
none
,powerOff
, orshutdown
. Default:none
.- ha
Host stringMonitoring Global setting that controls whether vSphere HA remediates virtual machines on host failure. Can be one of
enabled
ordisabled
. Default:enabled
.- ha
Vm stringComponent Protection Controls vSphere VM component protection for virtual machines in this cluster. Can be one of
enabled
ordisabled
. Default:enabled
. *- ha
Vm stringDependency Restart Condition The condition used to determine whether or not virtual machines in a certain restart priority class are online, allowing HA to move on to restarting virtual machines on the next priority. Can be one of
none
,poweredOn
,guestHbStatusGreen
, orappHbStatusGreen
. The default isnone
, which means that a virtual machine is considered ready immediately after a host is found to start it on. *- ha
Vm numberFailure Interval The time interval, in seconds, a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. Default:
30
seconds.- ha
Vm numberMaximum Failure Window The time, in seconds, for the reset window in which
ha_vm_maximum_resets
can operate. When this window expires, no more resets are attempted regardless of the setting configured inha_vm_maximum_resets
.-1
means no window, meaning an unlimited reset time is allotted. Default:-1
(no window).- ha
Vm numberMaximum Resets The maximum number of resets that HA will perform to a virtual machine when responding to a failure event. Default:
3
- ha
Vm numberMinimum Uptime The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats. Default:
120
seconds (2 minutes).- ha
Vm stringMonitoring The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of
vmMonitoringDisabled
,vmMonitoringOnly
, orvmAndAppMonitoring
. Default:vmMonitoringDisabled
.- ha
Vm numberRestart Additional Delay Additional delay, in seconds, after ready condition is met. A VM is considered ready at this point. Default:
0
seconds (no delay). *- ha
Vm stringRestart Priority The default restart priority for affected virtual machines when vSphere detects a host failure. Can be one of
lowest
,low
,medium
,high
, orhighest
. Default:medium
.- ha
Vm numberRestart Timeout The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority. Default:
600
seconds (10 minutes). *- host
Cluster numberExit Timeout The timeout, in seconds, for each host maintenance mode operation when removing hosts from a cluster. Default:
3600
seconds (1 hour).- host
Managed boolean Can be set to
true
if compute cluster membership will be managed through thehost
resource rather than thecompute_cluster
resource. Conflicts with:host_system_ids
.- host
System string[]Ids The managed object IDs of the hosts to put in the cluster. Conflicts with:
host_managed
.- name string
The name of the cluster.
- proactive
Ha stringAutomation Level Determines how the host quarantine, maintenance mode, or virtual machine migration recommendations made by proactive HA are to be handled. Can be one of
Automated
orManual
. Default:Manual
. *- proactive
Ha booleanEnabled Enables Proactive HA. Default:
false
. *- proactive
Ha stringModerate Remediation The configured remediation for moderately degraded hosts. Can be one of
MaintenanceMode
orQuarantineMode
. Note that this cannot be set toMaintenanceMode
whenproactive_ha_severe_remediation
is set toQuarantineMode
. Default:QuarantineMode
. *- proactive
Ha string[]Provider Ids The list of IDs for health update providers configured for this cluster. *
- proactive
Ha stringSevere Remediation The configured remediation for severely degraded hosts. Can be one of
MaintenanceMode
orQuarantineMode
. Note that this cannot be set toQuarantineMode
whenproactive_ha_moderate_remediation
is set toMaintenanceMode
. Default:QuarantineMode
. *- resource
Pool stringId The managed object ID of the primary resource pool for this cluster. This can be passed directly to the
resource_pool_id
attribute of thevsphere.VirtualMachine
resource.- string[]
The IDs of any tags to attach to this resource.
- vsan
Compression booleanEnabled Enables vSAN compression on the cluster.
- vsan
Dedup booleanEnabled Enables vSAN deduplication on the cluster. Cannot be independently set to
true
. When vSAN deduplication is enabled, vSAN compression must also be enabled.- vsan
Disk ComputeGroups Cluster Vsan Disk Group[] Represents the configuration of a host disk group in the cluster.
- vsan
Dit booleanEncryption Enabled Enables vSAN data-in-transit encryption on the cluster. Conflicts with
vsan_remote_datastore_ids
, i.e., vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature at the same time.- vsan
Dit numberRekey Interval Indicates the rekey interval in minutes for data-in-transit encryption. The valid rekey interval is 30 to 10800 (feature defaults to 1440). Conflicts with
vsan_remote_datastore_ids
.- vsan
Enabled boolean Enables vSAN on the cluster.
- vsan
Esa booleanEnabled Enables vSAN ESA on the cluster.
- vsan
Fault ComputeDomains Cluster Vsan Fault Domain[] Configurations of vSAN fault domains.
- vsan
Network booleanDiagnostic Mode Enabled Enables network diagnostic mode for vSAN performance service on the cluster.
- vsan
Performance booleanEnabled Enables vSAN performance service on the cluster. Default:
true
.- vsan
Remote string[]Datastore Ids The remote vSAN datastore IDs to be mounted to this cluster. Conflicts with
vsan_dit_encryption_enabled
andvsan_dit_rekey_interval
, i.e., vSAN HCI Mesh feature cannot be enabled with data-in-transit encryption feature at the same time.- vsan
Stretched ComputeCluster Cluster Vsan Stretched Cluster Configurations of vSAN stretched cluster.
- vsan
Unmap booleanEnabled Enables vSAN unmap on the cluster. You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
- vsan
Verbose booleanMode Enabled Enables verbose mode for vSAN performance service on the cluster.
- custom_
attributes Mapping[str, str] A map of custom attribute ids to attribute value strings to set for the datastore cluster.
NOTE: Custom attributes are unsupported on direct ESXi connections and require vCenter Server.
- datacenter_
id str The managed object ID of the datacenter to create the cluster in. Forces a new resource if changed.
- dpm_
automation_ strlevel The automation level for host power operations in this cluster. Can be one of
manual
orautomated
. Default:manual
.- dpm_
enabled bool Enable DPM support for DRS in this cluster. Requires
drs_enabled
to betrue
in order to be effective. Default:false
.- dpm_
threshold int A value between
1
and5
indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting. Default:3
.- drs_
advanced_ Mapping[str, str]options A key/value map that specifies advanced options for DRS and DPM.
- drs_
automation_ strlevel The default automation level for all virtual machines in this cluster. Can be one of
manual
,partiallyAutomated
, orfullyAutomated
. Default:manual
.- drs_
enable_ boolpredictive_ drs When
true
, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations. *- drs_
enable_ boolvm_ overrides Allow individual DRS overrides to be set for virtual machines in the cluster. Default:
true
.- drs_
enabled bool Enable DRS for this cluster. Default:
false
.- drs_
migration_ intthreshold A value between
1
and5
indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less. Default:3
.- str
Enable scalable shares for all resource pools in the cluster. Can be one of
disabled
orscaleCpuAndMemoryShares
. Default:disabled
.- folder str
The relative path to a folder to put this cluster in. This is a path relative to the datacenter you are deploying the cluster to. Example: for the
dc1
datacenter, and a providedfolder
offoo/bar
, The provider will place a cluster namedcompute-cluster-test
in a host folder located at/dc1/host/foo/bar
, with the final inventory path being/dc1/host/foo/bar/datastore-cluster-test
.- force_
evacuate_ boolon_ destroy When destroying the resource, setting this to
true
will auto-remove any hosts that are currently a member of the cluster, as if they were removed by taking their entry out ofhost_system_ids
(see below. This is an advanced option and should only be used for testing. Default:false
.NOTE: Do not set
force_evacuate_on_destroy
in production operation as there are many pitfalls to its use when working with complex cluster configurations. Depending on the virtual machines currently on the cluster, and your DRS and HA settings, the full host evacuation may fail. Instead, incrementally remove hosts from your configuration by adjusting the contents of thehost_system_ids
attribute.- ha_
admission_ Sequence[str]control_ failover_ host_ system_ ids Defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.
- ha_
admission_ intcontrol_ host_ failure_ tolerance The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster. Default:
1
. *- ha_
admission_ intcontrol_ performance_ tolerance The percentage of resource reduction that a cluster of virtual machines can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting. Default:
100
(disabled).- ha_
admission_ strcontrol_ policy The type of admission control policy to use with vSphere HA. Can be one of
resourcePercentage
,slotPolicy
,failoverHosts
, ordisabled
. Default:resourcePercentage
.- ha_
admission_ boolcontrol_ resource_ percentage_ auto_ compute Automatically determine available resource percentages by subtracting the average number of host resources represented by the
ha_admission_control_host_failure_tolerance
setting from the total amount of resources in the cluster. Disable to supply user-defined values. Default:true
. *- ha_
admission_ intcontrol_ resource_ percentage_ cpu Controls the user-defined percentage of CPU resources in the cluster to reserve for failover. Default:
100
.- ha_
admission_ intcontrol_ resource_ percentage_ memory Controls the user-defined percentage of memory resources in the cluster to reserve for failover. Default:
100
.- ha_
admission_ intcontrol_ slot_ policy_ explicit_ cpu Controls the user-defined CPU slot size, in MHz. Default:
32
.- ha_
admission_ intcontrol_ slot_ policy_ explicit_ memory Controls the user-defined memory slot size, in MB. Default:
100
.- ha_
admission_ boolcontrol_ slot_ policy_ use_ explicit_ size Controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is
false
, which tells vSphere to gather a automatic average based on all powered-on virtual machines currently in the cluster.- ha_
advanced_ Mapping[str, str]options A key/value map that specifies advanced options for vSphere HA.
- ha_
datastore_ strapd_ recovery_ action Controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of
none
orreset
. Default:none
. *- ha_
datastore_ strapd_ response Controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of
disabled
,warning
,restartConservative
, orrestartAggressive
. Default:disabled
. *- ha_
datastore_ intapd_ response_ delay The time, in seconds, to wait after an APD timeout event to run the response action defined in
ha_datastore_apd_response
. Default:180
seconds (3 minutes). *- ha_
datastore_ strpdl_ response Controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of
disabled
,warning
, orrestartAggressive
. Default:disabled
. *- ha_
enabled bool Enable vSphere HA for this cluster. Default:
false
.- ha_
heartbeat_ Sequence[str]datastore_ ids The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
ha_heartbeat_datastore_policy
is set to eitheruserSelectedDs
orallFeasibleDsWithUserPreference
.- ha_
heartbeat_ strdatastore_ policy The selection policy for HA heartbeat datastores. Can be one of
allFeasibleDs
,userSelectedDs
, orallFeasibleDsWithUserPreference
. Default:allFeasibleDsWithUserPreference
.- ha_
host_ strisolation_ response The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of
none
,powerOff
, orshutdown
. Default:none
.- ha_
host_ strmonitoring Global setting that controls whether vSphere HA remediates virtual machines on host failure. Can be one of
enabled
ordisabled
. Default:enabled
.- ha_
vm_ strcomponent_ protection Controls vSphere VM component protection for virtual machines in this cluster. Can be one of
enabled
ordisabled
. Default:enabled
. *- ha_
vm_ strdependency_ restart_ condition The condition used to determine whether or not virtual machines in a certain restart priority class are online, allowing HA to move on to restarting virtual machines on the next priority. Can be one of
none
,poweredOn
,guestHbStatusGreen
, orappHbStatusGreen
. The default isnone
, which means that a virtual machine is considered ready immediately after a host is found to start it on. *- ha_
vm_ intfailure_ interval The time interval, in seconds, a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. Default:
30
seconds.- ha_
vm_ intmaximum_ failure_ window The time, in seconds, for the reset window in which
ha_vm_maximum_resets
can operate. When this window expires, no more resets are attempted regardless of the setting configured inha_vm_maximum_resets
.-1
means no window, meaning an unlimited reset time is allotted. Default:-1
(no window).- ha_
vm_ intmaximum_ resets The maximum number of resets that HA will perform to a virtual machine when responding to a failure event. Default:
3
- ha_
vm_ intminimum_ uptime The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats. Default:
120
seconds (2 minutes).- ha_
vm_ strmonitoring The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of
vmMonitoringDisabled
,vmMonitoringOnly
, orvmAndAppMonitoring
. Default:vmMonitoringDisabled
.- ha_
vm_ intrestart_ additional_ delay Additional delay, in seconds, after ready condition is met. A VM is considered ready at this point. Default:
0
seconds (no delay). *- ha_
vm_ strrestart_ priority The default restart priority for affected virtual machines when vSphere detects a host failure. Can be one of
lowest
,low
,medium
,high
, orhighest
. Default:medium
.- ha_
vm_ intrestart_ timeout The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority. Default:
600
seconds (10 minutes). *- host_
cluster_ intexit_ timeout The timeout, in seconds, for each host maintenance mode operation when removing hosts from a cluster. Default:
3600
seconds (1 hour).- host_
managed bool Can be set to
true
if compute cluster membership will be managed through thehost
resource rather than thecompute_cluster
resource. Conflicts with:host_system_ids
.- host_
system_ Sequence[str]ids The managed object IDs of the hosts to put in the cluster. Conflicts with:
host_managed
.- name str
The name of the cluster.
- proactive_
ha_ strautomation_ level Determines how the host quarantine, maintenance mode, or virtual machine migration recommendations made by proactive HA are to be handled. Can be one of
Automated
orManual
. Default:Manual
. *- proactive_
ha_ boolenabled Enables Proactive HA. Default:
false
. *- proactive_
ha_ strmoderate_ remediation The configured remediation for moderately degraded hosts. Can be one of
MaintenanceMode
orQuarantineMode
. Note that this cannot be set toMaintenanceMode
whenproactive_ha_severe_remediation
is set toQuarantineMode
. Default:QuarantineMode
. *- proactive_
ha_ Sequence[str]provider_ ids The list of IDs for health update providers configured for this cluster. *
- proactive_
ha_ strsevere_ remediation The configured remediation for severely degraded hosts. Can be one of
MaintenanceMode
orQuarantineMode
. Note that this cannot be set toQuarantineMode
whenproactive_ha_moderate_remediation
is set toMaintenanceMode
. Default:QuarantineMode
. *- resource_
pool_ strid The managed object ID of the primary resource pool for this cluster. This can be passed directly to the
resource_pool_id
attribute of thevsphere.VirtualMachine
resource.- Sequence[str]
The IDs of any tags to attach to this resource.
- vsan_
compression_ boolenabled Enables vSAN compression on the cluster.
- vsan_
dedup_ boolenabled Enables vSAN deduplication on the cluster. Cannot be independently set to
true
. When vSAN deduplication is enabled, vSAN compression must also be enabled.- vsan_
disk_ Sequence[Computegroups Cluster Vsan Disk Group Args] Represents the configuration of a host disk group in the cluster.
- vsan_
dit_ boolencryption_ enabled Enables vSAN data-in-transit encryption on the cluster. Conflicts with
vsan_remote_datastore_ids
, i.e., vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature at the same time.- vsan_
dit_ intrekey_ interval Indicates the rekey interval in minutes for data-in-transit encryption. The valid rekey interval is 30 to 10800 (feature defaults to 1440). Conflicts with
vsan_remote_datastore_ids
.- vsan_
enabled bool Enables vSAN on the cluster.
- vsan_
esa_ boolenabled Enables vSAN ESA on the cluster.
- vsan_
fault_ Sequence[Computedomains Cluster Vsan Fault Domain Args] Configurations of vSAN fault domains.
- vsan_
network_ booldiagnostic_ mode_ enabled Enables network diagnostic mode for vSAN performance service on the cluster.
- vsan_
performance_ boolenabled Enables vSAN performance service on the cluster. Default:
true
.- vsan_
remote_ Sequence[str]datastore_ ids The remote vSAN datastore IDs to be mounted to this cluster. Conflicts with
vsan_dit_encryption_enabled
andvsan_dit_rekey_interval
, i.e., vSAN HCI Mesh feature cannot be enabled with data-in-transit encryption feature at the same time.- vsan_
stretched_ Computecluster Cluster Vsan Stretched Cluster Args Configurations of vSAN stretched cluster.
- vsan_
unmap_ boolenabled Enables vSAN unmap on the cluster. You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
- vsan_
verbose_ boolmode_ enabled Enables verbose mode for vSAN performance service on the cluster.
- custom
Attributes Map<String> A map of custom attribute ids to attribute value strings to set for the datastore cluster.
NOTE: Custom attributes are unsupported on direct ESXi connections and require vCenter Server.
- datacenter
Id String The managed object ID of the datacenter to create the cluster in. Forces a new resource if changed.
- dpm
Automation StringLevel The automation level for host power operations in this cluster. Can be one of
manual
orautomated
. Default:manual
.- dpm
Enabled Boolean Enable DPM support for DRS in this cluster. Requires
drs_enabled
to betrue
in order to be effective. Default:false
.- dpm
Threshold Number A value between
1
and5
indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting. Default:3
.- drs
Advanced Map<String>Options A key/value map that specifies advanced options for DRS and DPM.
- drs
Automation StringLevel The default automation level for all virtual machines in this cluster. Can be one of
manual
,partiallyAutomated
, orfullyAutomated
. Default:manual
.- drs
Enable BooleanPredictive Drs When
true
, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations. *- drs
Enable BooleanVm Overrides Allow individual DRS overrides to be set for virtual machines in the cluster. Default:
true
.- drs
Enabled Boolean Enable DRS for this cluster. Default:
false
.- drs
Migration NumberThreshold A value between
1
and5
indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less. Default:3
.- String
Enable scalable shares for all resource pools in the cluster. Can be one of
disabled
orscaleCpuAndMemoryShares
. Default:disabled
.- folder String
The relative path to a folder to put this cluster in. This is a path relative to the datacenter you are deploying the cluster to. Example: for the
dc1
datacenter, and a providedfolder
offoo/bar
, The provider will place a cluster namedcompute-cluster-test
in a host folder located at/dc1/host/foo/bar
, with the final inventory path being/dc1/host/foo/bar/datastore-cluster-test
.- force
Evacuate BooleanOn Destroy When destroying the resource, setting this to
true
will auto-remove any hosts that are currently a member of the cluster, as if they were removed by taking their entry out ofhost_system_ids
(see below. This is an advanced option and should only be used for testing. Default:false
.NOTE: Do not set
force_evacuate_on_destroy
in production operation as there are many pitfalls to its use when working with complex cluster configurations. Depending on the virtual machines currently on the cluster, and your DRS and HA settings, the full host evacuation may fail. Instead, incrementally remove hosts from your configuration by adjusting the contents of thehost_system_ids
attribute.- ha
Admission List<String>Control Failover Host System Ids Defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.
- ha
Admission NumberControl Host Failure Tolerance The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster. Default:
1
. *- ha
Admission NumberControl Performance Tolerance The percentage of resource reduction that a cluster of virtual machines can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting. Default:
100
(disabled).- ha
Admission StringControl Policy The type of admission control policy to use with vSphere HA. Can be one of
resourcePercentage
,slotPolicy
,failoverHosts
, ordisabled
. Default:resourcePercentage
.- ha
Admission BooleanControl Resource Percentage Auto Compute Automatically determine available resource percentages by subtracting the average number of host resources represented by the
ha_admission_control_host_failure_tolerance
setting from the total amount of resources in the cluster. Disable to supply user-defined values. Default:true
. *- ha
Admission NumberControl Resource Percentage Cpu Controls the user-defined percentage of CPU resources in the cluster to reserve for failover. Default:
100
.- ha
Admission NumberControl Resource Percentage Memory Controls the user-defined percentage of memory resources in the cluster to reserve for failover. Default:
100
.- ha
Admission NumberControl Slot Policy Explicit Cpu Controls the user-defined CPU slot size, in MHz. Default:
32
.- ha
Admission NumberControl Slot Policy Explicit Memory Controls the user-defined memory slot size, in MB. Default:
100
.- ha
Admission BooleanControl Slot Policy Use Explicit Size Controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is
false
, which tells vSphere to gather a automatic average based on all powered-on virtual machines currently in the cluster.- ha
Advanced Map<String>Options A key/value map that specifies advanced options for vSphere HA.
- ha
Datastore StringApd Recovery Action Controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of
none
orreset
. Default:none
. *- ha
Datastore StringApd Response Controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of
disabled
,warning
,restartConservative
, orrestartAggressive
. Default:disabled
. *- ha
Datastore NumberApd Response Delay The time, in seconds, to wait after an APD timeout event to run the response action defined in
ha_datastore_apd_response
. Default:180
seconds (3 minutes). *- ha
Datastore StringPdl Response Controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of
disabled
,warning
, orrestartAggressive
. Default:disabled
. *- ha
Enabled Boolean Enable vSphere HA for this cluster. Default:
false
.- ha
Heartbeat List<String>Datastore Ids The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
ha_heartbeat_datastore_policy
is set to eitheruserSelectedDs
orallFeasibleDsWithUserPreference
.- ha
Heartbeat StringDatastore Policy The selection policy for HA heartbeat datastores. Can be one of
allFeasibleDs
,userSelectedDs
, orallFeasibleDsWithUserPreference
. Default:allFeasibleDsWithUserPreference
.- ha
Host StringIsolation Response The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of
none
,powerOff
, orshutdown
. Default:none
.- ha
Host StringMonitoring Global setting that controls whether vSphere HA remediates virtual machines on host failure. Can be one of
enabled
ordisabled
. Default:enabled
.- ha
Vm StringComponent Protection Controls vSphere VM component protection for virtual machines in this cluster. Can be one of
enabled
ordisabled
. Default:enabled
. *- ha
Vm StringDependency Restart Condition The condition used to determine whether or not virtual machines in a certain restart priority class are online, allowing HA to move on to restarting virtual machines on the next priority. Can be one of
none
,poweredOn
,guestHbStatusGreen
, orappHbStatusGreen
. The default isnone
, which means that a virtual machine is considered ready immediately after a host is found to start it on. *- ha
Vm NumberFailure Interval The time interval, in seconds, a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. Default:
30
seconds.- ha
Vm NumberMaximum Failure Window The time, in seconds, for the reset window in which
ha_vm_maximum_resets
can operate. When this window expires, no more resets are attempted regardless of the setting configured inha_vm_maximum_resets
.-1
means no window, meaning an unlimited reset time is allotted. Default:-1
(no window).- ha
Vm NumberMaximum Resets The maximum number of resets that HA will perform to a virtual machine when responding to a failure event. Default:
3
- ha
Vm NumberMinimum Uptime The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats. Default:
120
seconds (2 minutes).- ha
Vm StringMonitoring The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of
vmMonitoringDisabled
,vmMonitoringOnly
, orvmAndAppMonitoring
. Default:vmMonitoringDisabled
.- ha
Vm NumberRestart Additional Delay Additional delay, in seconds, after ready condition is met. A VM is considered ready at this point. Default:
0
seconds (no delay). *- ha
Vm StringRestart Priority The default restart priority for affected virtual machines when vSphere detects a host failure. Can be one of
lowest
,low
,medium
,high
, orhighest
. Default:medium
.- ha
Vm NumberRestart Timeout The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority. Default:
600
seconds (10 minutes). *- host
Cluster NumberExit Timeout The timeout, in seconds, for each host maintenance mode operation when removing hosts from a cluster. Default:
3600
seconds (1 hour).- host
Managed Boolean Can be set to
true
if compute cluster membership will be managed through thehost
resource rather than thecompute_cluster
resource. Conflicts with:host_system_ids
.- host
System List<String>Ids The managed object IDs of the hosts to put in the cluster. Conflicts with:
host_managed
.- name String
The name of the cluster.
- proactive
Ha StringAutomation Level Determines how the host quarantine, maintenance mode, or virtual machine migration recommendations made by proactive HA are to be handled. Can be one of
Automated
orManual
. Default:Manual
. *- proactive
Ha BooleanEnabled Enables Proactive HA. Default:
false
. *- proactive
Ha StringModerate Remediation The configured remediation for moderately degraded hosts. Can be one of
MaintenanceMode
orQuarantineMode
. Note that this cannot be set toMaintenanceMode
whenproactive_ha_severe_remediation
is set toQuarantineMode
. Default:QuarantineMode
. *- proactive
Ha List<String>Provider Ids The list of IDs for health update providers configured for this cluster. *
- proactive
Ha StringSevere Remediation The configured remediation for severely degraded hosts. Can be one of
MaintenanceMode
orQuarantineMode
. Note that this cannot be set toQuarantineMode
whenproactive_ha_moderate_remediation
is set toMaintenanceMode
. Default:QuarantineMode
. *- resource
Pool StringId The managed object ID of the primary resource pool for this cluster. This can be passed directly to the
resource_pool_id
attribute of thevsphere.VirtualMachine
resource.- List<String>
The IDs of any tags to attach to this resource.
- vsan
Compression BooleanEnabled Enables vSAN compression on the cluster.
- vsan
Dedup BooleanEnabled Enables vSAN deduplication on the cluster. Cannot be independently set to
true
. When vSAN deduplication is enabled, vSAN compression must also be enabled.- vsan
Disk List<Property Map>Groups Represents the configuration of a host disk group in the cluster.
- vsan
Dit BooleanEncryption Enabled Enables vSAN data-in-transit encryption on the cluster. Conflicts with
vsan_remote_datastore_ids
, i.e., vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature at the same time.- vsan
Dit NumberRekey Interval Indicates the rekey interval in minutes for data-in-transit encryption. The valid rekey interval is 30 to 10800 (feature defaults to 1440). Conflicts with
vsan_remote_datastore_ids
.- vsan
Enabled Boolean Enables vSAN on the cluster.
- vsan
Esa BooleanEnabled Enables vSAN ESA on the cluster.
- vsan
Fault List<Property Map>Domains Configurations of vSAN fault domains.
- vsan
Network BooleanDiagnostic Mode Enabled Enables network diagnostic mode for vSAN performance service on the cluster.
- vsan
Performance BooleanEnabled Enables vSAN performance service on the cluster. Default:
true
.- vsan
Remote List<String>Datastore Ids The remote vSAN datastore IDs to be mounted to this cluster. Conflicts with
vsan_dit_encryption_enabled
andvsan_dit_rekey_interval
, i.e., vSAN HCI Mesh feature cannot be enabled with data-in-transit encryption feature at the same time.- vsan
Stretched Property MapCluster Configurations of vSAN stretched cluster.
- vsan
Unmap BooleanEnabled Enables vSAN unmap on the cluster. You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
- vsan
Verbose BooleanMode Enabled Enables verbose mode for vSAN performance service on the cluster.
Supporting Types
ComputeClusterVsanDiskGroup, ComputeClusterVsanDiskGroupArgs
ComputeClusterVsanFaultDomain, ComputeClusterVsanFaultDomainArgs
- Fault
Domains List<Pulumi.VSphere. Inputs. Compute Cluster Vsan Fault Domain Fault Domain> The configuration for single fault domain.
- Fault
Domains []ComputeCluster Vsan Fault Domain Fault Domain The configuration for single fault domain.
- fault
Domains List<ComputeCluster Vsan Fault Domain Fault Domain> The configuration for single fault domain.
- fault
Domains ComputeCluster Vsan Fault Domain Fault Domain[] The configuration for single fault domain.
- fault_
domains Sequence[ComputeCluster Vsan Fault Domain Fault Domain] The configuration for single fault domain.
- fault
Domains List<Property Map> The configuration for single fault domain.
ComputeClusterVsanFaultDomainFaultDomain, ComputeClusterVsanFaultDomainFaultDomainArgs
ComputeClusterVsanStretchedCluster, ComputeClusterVsanStretchedClusterArgs
- Preferred
Fault List<string>Domain Host Ids The managed object IDs of the hosts to put in the first fault domain.
- Secondary
Fault List<string>Domain Host Ids The managed object IDs of the hosts to put in the second fault domain.
- Witness
Node string The managed object IDs of the host selected as witness node when enable stretched cluster.
- Preferred
Fault stringDomain Name The name of first fault domain. Default is
Preferred
.- Secondary
Fault stringDomain Name The name of second fault domain. Default is
Secondary
.NOTE: You must disable vSphere HA before you enable vSAN on the cluster. You can enable or re-enable vSphere HA after vSAN is configured.
import * as pulumi from "@pulumi/pulumi"; import * as vsphere from "@pulumi/vsphere";
const computeCluster = new vsphere.ComputeCluster("computeCluster", { datacenterId: data.vsphere_datacenter.datacenter.id, hostSystemIds: [data.vsphere_host.host.map(__item => __item.id)], drsEnabled: true, drsAutomationLevel: "fullyAutomated", haEnabled: false, vsanEnabled: true, vsanEsaEnabled: true, vsanDedupEnabled: true, vsanCompressionEnabled: true, vsanPerformanceEnabled: true, vsanVerboseModeEnabled: true, vsanNetworkDiagnosticModeEnabled: true, vsanUnmapEnabled: true, vsanDitEncryptionEnabled: true, vsanDitRekeyInterval: 1800, vsanDiskGroups: [{ cache: data.vsphere_vmfs_disks.cache_disks[0], storages: data.vsphere_vmfs_disks.storage_disks, }], vsanFaultDomains: [{ faultDomains: [ { name: "fd1", hostIds: [data.vsphere_host.faultdomain1_hosts.map(__item => __item.id)], }, { name: "fd2", hostIds: [data.vsphere_host.faultdomain2_hosts.map(__item => __item.id)], }, ], }], vsanStretchedCluster: { preferredFaultDomainHostIds: [data.vsphere_host.preferred_fault_domain_host.map(__item => __item.id)], secondaryFaultDomainHostIds: [data.vsphere_host.secondary_fault_domain_host.map(__item => __item.id)], witnessNode: data.vsphere_host.witness_host.id, }, });
import pulumi import pulumi_vsphere as vsphere compute_cluster = vsphere.ComputeCluster("computeCluster", datacenter_id=data["vsphere_datacenter"]["datacenter"]["id"], host_system_ids=[[__item["id"] for __item in data["vsphere_host"]["host"]]], drs_enabled=True, drs_automation_level="fullyAutomated", ha_enabled=False, vsan_enabled=True, vsan_esa_enabled=True, vsan_dedup_enabled=True, vsan_compression_enabled=True, vsan_performance_enabled=True, vsan_verbose_mode_enabled=True, vsan_network_diagnostic_mode_enabled=True, vsan_unmap_enabled=True, vsan_dit_encryption_enabled=True, vsan_dit_rekey_interval=1800, vsan_disk_groups=[vsphere.ComputeClusterVsanDiskGroupArgs( cache=data["vsphere_vmfs_disks"]["cache_disks"], storages=data["vsphere_vmfs_disks"]["storage_disks"], )], vsan_fault_domains=[vsphere.ComputeClusterVsanFaultDomainArgs( fault_domains=[ vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs( name="fd1", host_ids=[[__item["id"] for __item in data["vsphere_host"]["faultdomain1_hosts"]]], ), vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs( name="fd2", host_ids=[[__item["id"] for __item in data["vsphere_host"]["faultdomain2_hosts"]]], ), ], )], vsan_stretched_cluster=vsphere.ComputeClusterVsanStretchedClusterArgs( preferred_fault_domain_host_ids=[[__item["id"] for __item in data["vsphere_host"]["preferred_fault_domain_host"]]], secondary_fault_domain_host_ids=[[__item["id"] for __item in data["vsphere_host"]["secondary_fault_domain_host"]]], witness_node=data["vsphere_host"]["witness_host"]["id"], ))
using System.Collections.Generic; using System.Linq; using Pulumi; using VSphere = Pulumi.VSphere; return await Deployment.RunAsync(() => { var computeCluster = new VSphere.ComputeCluster("computeCluster", new() { DatacenterId = data.Vsphere_datacenter.Datacenter.Id, HostSystemIds = new[] { data.Vsphere_host.Host.Select(__item => __item.Id).ToList(), }, DrsEnabled = true, DrsAutomationLevel = "fullyAutomated", HaEnabled = false, VsanEnabled = true, VsanEsaEnabled = true, VsanDedupEnabled = true, VsanCompressionEnabled = true, VsanPerformanceEnabled = true, VsanVerboseModeEnabled = true, VsanNetworkDiagnosticModeEnabled = true, VsanUnmapEnabled = true, VsanDitEncryptionEnabled = true, VsanDitRekeyInterval = 1800, VsanDiskGroups = new[] { new VSphere.Inputs.ComputeClusterVsanDiskGroupArgs { Cache = data.Vsphere_vmfs_disks.Cache_disks[0], Storages = data.Vsphere_vmfs_disks.Storage_disks, }, }, VsanFaultDomains = new[] { new VSphere.Inputs.ComputeClusterVsanFaultDomainArgs { FaultDomains = new[] { new VSphere.Inputs.ComputeClusterVsanFaultDomainFaultDomainArgs { Name = "fd1", HostIds = new[] { data.Vsphere_host.Faultdomain1_hosts.Select(__item => __item.Id).ToList(), }, }, new VSphere.Inputs.ComputeClusterVsanFaultDomainFaultDomainArgs { Name = "fd2", HostIds = new[] { data.Vsphere_host.Faultdomain2_hosts.Select(__item => __item.Id).ToList(), }, }, }, }, }, VsanStretchedCluster = new VSphere.Inputs.ComputeClusterVsanStretchedClusterArgs { PreferredFaultDomainHostIds = new[] { data.Vsphere_host.Preferred_fault_domain_host.Select(__item => __item.Id).ToList(), }, SecondaryFaultDomainHostIds = new[] { data.Vsphere_host.Secondary_fault_domain_host.Select(__item => __item.Id).ToList(), }, WitnessNode = data.Vsphere_host.Witness_host.Id, }, }); });
package main import ( "github.com/pulumi/pulumi-vsphere/sdk/v4/go/vsphere" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { _, err := vsphere.NewComputeCluster(ctx, "computeCluster", &vsphere.ComputeClusterArgs{ DatacenterId: pulumi.Any(data.Vsphere_datacenter.Datacenter.Id), HostSystemIds: pulumi.StringArray{ %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:2,18-45), }, DrsEnabled: pulumi.Bool(true), DrsAutomationLevel: pulumi.String("fullyAutomated"), HaEnabled: pulumi.Bool(false), VsanEnabled: pulumi.Bool(true), VsanEsaEnabled: pulumi.Bool(true), VsanDedupEnabled: pulumi.Bool(true), VsanCompressionEnabled: pulumi.Bool(true), VsanPerformanceEnabled: pulumi.Bool(true), VsanVerboseModeEnabled: pulumi.Bool(true), VsanNetworkDiagnosticModeEnabled: pulumi.Bool(true), VsanUnmapEnabled: pulumi.Bool(true), VsanDitEncryptionEnabled: pulumi.Bool(true), VsanDitRekeyInterval: pulumi.Int(1800), VsanDiskGroups: vsphere.ComputeClusterVsanDiskGroupArray{ &vsphere.ComputeClusterVsanDiskGroupArgs{ Cache: pulumi.Any(data.Vsphere_vmfs_disks.Cache_disks[0]), Storages: pulumi.Any(data.Vsphere_vmfs_disks.Storage_disks), }, }, VsanFaultDomains: vsphere.ComputeClusterVsanFaultDomainArray{ &vsphere.ComputeClusterVsanFaultDomainArgs{ FaultDomains: vsphere.ComputeClusterVsanFaultDomainFaultDomainArray{ &vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs{ Name: pulumi.String("fd1"), HostIds: pulumi.StringArray{ %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:23,12-53), }, }, &vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs{ Name: pulumi.String("fd2"), HostIds: pulumi.StringArray{ %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:27,12-53), }, }, }, }, }, VsanStretchedCluster: &vsphere.ComputeClusterVsanStretchedClusterArgs{ PreferredFaultDomainHostIds: pulumi.StringArray{ %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:31,32-82), }, SecondaryFaultDomainHostIds: pulumi.StringArray{ %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:32,32-82), }, WitnessNode: pulumi.Any(data.Vsphere_host.Witness_host.Id), }, }) if err != nil { return err } return nil }) }
package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.vsphere.ComputeCluster; import com.pulumi.vsphere.ComputeClusterArgs; import com.pulumi.vsphere.inputs.ComputeClusterVsanDiskGroupArgs; import com.pulumi.vsphere.inputs.ComputeClusterVsanFaultDomainArgs; import com.pulumi.vsphere.inputs.ComputeClusterVsanStretchedClusterArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var computeCluster = new ComputeCluster("computeCluster", ComputeClusterArgs.builder() .datacenterId(data.vsphere_datacenter().datacenter().id()) .hostSystemIds(data.vsphere_host().host().stream().map(element -> element.id()).collect(toList())) .drsEnabled(true) .drsAutomationLevel("fullyAutomated") .haEnabled(false) .vsanEnabled(true) .vsanEsaEnabled(true) .vsanDedupEnabled(true) .vsanCompressionEnabled(true) .vsanPerformanceEnabled(true) .vsanVerboseModeEnabled(true) .vsanNetworkDiagnosticModeEnabled(true) .vsanUnmapEnabled(true) .vsanDitEncryptionEnabled(true) .vsanDitRekeyInterval(1800) .vsanDiskGroups(ComputeClusterVsanDiskGroupArgs.builder() .cache(data.vsphere_vmfs_disks().cache_disks()[0]) .storages(data.vsphere_vmfs_disks().storage_disks()) .build()) .vsanFaultDomains(ComputeClusterVsanFaultDomainArgs.builder() .faultDomains( ComputeClusterVsanFaultDomainFaultDomainArgs.builder() .name("fd1") .hostIds(data.vsphere_host().faultdomain1_hosts().stream().map(element -> element.id()).collect(toList())) .build(), ComputeClusterVsanFaultDomainFaultDomainArgs.builder() .name("fd2") .hostIds(data.vsphere_host().faultdomain2_hosts().stream().map(element -> element.id()).collect(toList())) .build()) .build()) .vsanStretchedCluster(ComputeClusterVsanStretchedClusterArgs.builder() .preferredFaultDomainHostIds(data.vsphere_host().preferred_fault_domain_host().stream().map(element -> element.id()).collect(toList())) .secondaryFaultDomainHostIds(data.vsphere_host().secondary_fault_domain_host().stream().map(element -> element.id()).collect(toList())) .witnessNode(data.vsphere_host().witness_host().id()) .build()) .build()); } }
- Preferred
Fault []stringDomain Host Ids The managed object IDs of the hosts to put in the first fault domain.
- Secondary
Fault []stringDomain Host Ids The managed object IDs of the hosts to put in the second fault domain.
- Witness
Node string The managed object IDs of the host selected as witness node when enable stretched cluster.
- Preferred
Fault stringDomain Name The name of first fault domain. Default is
Preferred
.- Secondary
Fault stringDomain Name The name of second fault domain. Default is
Secondary
.NOTE: You must disable vSphere HA before you enable vSAN on the cluster. You can enable or re-enable vSphere HA after vSAN is configured.
import * as pulumi from "@pulumi/pulumi"; import * as vsphere from "@pulumi/vsphere";
const computeCluster = new vsphere.ComputeCluster("computeCluster", { datacenterId: data.vsphere_datacenter.datacenter.id, hostSystemIds: [data.vsphere_host.host.map(__item => __item.id)], drsEnabled: true, drsAutomationLevel: "fullyAutomated", haEnabled: false, vsanEnabled: true, vsanEsaEnabled: true, vsanDedupEnabled: true, vsanCompressionEnabled: true, vsanPerformanceEnabled: true, vsanVerboseModeEnabled: true, vsanNetworkDiagnosticModeEnabled: true, vsanUnmapEnabled: true, vsanDitEncryptionEnabled: true, vsanDitRekeyInterval: 1800, vsanDiskGroups: [{ cache: data.vsphere_vmfs_disks.cache_disks[0], storages: data.vsphere_vmfs_disks.storage_disks, }], vsanFaultDomains: [{ faultDomains: [ { name: "fd1", hostIds: [data.vsphere_host.faultdomain1_hosts.map(__item => __item.id)], }, { name: "fd2", hostIds: [data.vsphere_host.faultdomain2_hosts.map(__item => __item.id)], }, ], }], vsanStretchedCluster: { preferredFaultDomainHostIds: [data.vsphere_host.preferred_fault_domain_host.map(__item => __item.id)], secondaryFaultDomainHostIds: [data.vsphere_host.secondary_fault_domain_host.map(__item => __item.id)], witnessNode: data.vsphere_host.witness_host.id, }, });
import pulumi import pulumi_vsphere as vsphere compute_cluster = vsphere.ComputeCluster("computeCluster", datacenter_id=data["vsphere_datacenter"]["datacenter"]["id"], host_system_ids=[[__item["id"] for __item in data["vsphere_host"]["host"]]], drs_enabled=True, drs_automation_level="fullyAutomated", ha_enabled=False, vsan_enabled=True, vsan_esa_enabled=True, vsan_dedup_enabled=True, vsan_compression_enabled=True, vsan_performance_enabled=True, vsan_verbose_mode_enabled=True, vsan_network_diagnostic_mode_enabled=True, vsan_unmap_enabled=True, vsan_dit_encryption_enabled=True, vsan_dit_rekey_interval=1800, vsan_disk_groups=[vsphere.ComputeClusterVsanDiskGroupArgs( cache=data["vsphere_vmfs_disks"]["cache_disks"], storages=data["vsphere_vmfs_disks"]["storage_disks"], )], vsan_fault_domains=[vsphere.ComputeClusterVsanFaultDomainArgs( fault_domains=[ vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs( name="fd1", host_ids=[[__item["id"] for __item in data["vsphere_host"]["faultdomain1_hosts"]]], ), vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs( name="fd2", host_ids=[[__item["id"] for __item in data["vsphere_host"]["faultdomain2_hosts"]]], ), ], )], vsan_stretched_cluster=vsphere.ComputeClusterVsanStretchedClusterArgs( preferred_fault_domain_host_ids=[[__item["id"] for __item in data["vsphere_host"]["preferred_fault_domain_host"]]], secondary_fault_domain_host_ids=[[__item["id"] for __item in data["vsphere_host"]["secondary_fault_domain_host"]]], witness_node=data["vsphere_host"]["witness_host"]["id"], ))
using System.Collections.Generic; using System.Linq; using Pulumi; using VSphere = Pulumi.VSphere; return await Deployment.RunAsync(() => { var computeCluster = new VSphere.ComputeCluster("computeCluster", new() { DatacenterId = data.Vsphere_datacenter.Datacenter.Id, HostSystemIds = new[] { data.Vsphere_host.Host.Select(__item => __item.Id).ToList(), }, DrsEnabled = true, DrsAutomationLevel = "fullyAutomated", HaEnabled = false, VsanEnabled = true, VsanEsaEnabled = true, VsanDedupEnabled = true, VsanCompressionEnabled = true, VsanPerformanceEnabled = true, VsanVerboseModeEnabled = true, VsanNetworkDiagnosticModeEnabled = true, VsanUnmapEnabled = true, VsanDitEncryptionEnabled = true, VsanDitRekeyInterval = 1800, VsanDiskGroups = new[] { new VSphere.Inputs.ComputeClusterVsanDiskGroupArgs { Cache = data.Vsphere_vmfs_disks.Cache_disks[0], Storages = data.Vsphere_vmfs_disks.Storage_disks, }, }, VsanFaultDomains = new[] { new VSphere.Inputs.ComputeClusterVsanFaultDomainArgs { FaultDomains = new[] { new VSphere.Inputs.ComputeClusterVsanFaultDomainFaultDomainArgs { Name = "fd1", HostIds = new[] { data.Vsphere_host.Faultdomain1_hosts.Select(__item => __item.Id).ToList(), }, }, new VSphere.Inputs.ComputeClusterVsanFaultDomainFaultDomainArgs { Name = "fd2", HostIds = new[] { data.Vsphere_host.Faultdomain2_hosts.Select(__item => __item.Id).ToList(), }, }, }, }, }, VsanStretchedCluster = new VSphere.Inputs.ComputeClusterVsanStretchedClusterArgs { PreferredFaultDomainHostIds = new[] { data.Vsphere_host.Preferred_fault_domain_host.Select(__item => __item.Id).ToList(), }, SecondaryFaultDomainHostIds = new[] { data.Vsphere_host.Secondary_fault_domain_host.Select(__item => __item.Id).ToList(), }, WitnessNode = data.Vsphere_host.Witness_host.Id, }, }); });
package main import ( "github.com/pulumi/pulumi-vsphere/sdk/v4/go/vsphere" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { _, err := vsphere.NewComputeCluster(ctx, "computeCluster", &vsphere.ComputeClusterArgs{ DatacenterId: pulumi.Any(data.Vsphere_datacenter.Datacenter.Id), HostSystemIds: pulumi.StringArray{ %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:2,18-45), }, DrsEnabled: pulumi.Bool(true), DrsAutomationLevel: pulumi.String("fullyAutomated"), HaEnabled: pulumi.Bool(false), VsanEnabled: pulumi.Bool(true), VsanEsaEnabled: pulumi.Bool(true), VsanDedupEnabled: pulumi.Bool(true), VsanCompressionEnabled: pulumi.Bool(true), VsanPerformanceEnabled: pulumi.Bool(true), VsanVerboseModeEnabled: pulumi.Bool(true), VsanNetworkDiagnosticModeEnabled: pulumi.Bool(true), VsanUnmapEnabled: pulumi.Bool(true), VsanDitEncryptionEnabled: pulumi.Bool(true), VsanDitRekeyInterval: pulumi.Int(1800), VsanDiskGroups: vsphere.ComputeClusterVsanDiskGroupArray{ &vsphere.ComputeClusterVsanDiskGroupArgs{ Cache: pulumi.Any(data.Vsphere_vmfs_disks.Cache_disks[0]), Storages: pulumi.Any(data.Vsphere_vmfs_disks.Storage_disks), }, }, VsanFaultDomains: vsphere.ComputeClusterVsanFaultDomainArray{ &vsphere.ComputeClusterVsanFaultDomainArgs{ FaultDomains: vsphere.ComputeClusterVsanFaultDomainFaultDomainArray{ &vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs{ Name: pulumi.String("fd1"), HostIds: pulumi.StringArray{ %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:23,12-53), }, }, &vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs{ Name: pulumi.String("fd2"), HostIds: pulumi.StringArray{ %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:27,12-53), }, }, }, }, }, VsanStretchedCluster: &vsphere.ComputeClusterVsanStretchedClusterArgs{ PreferredFaultDomainHostIds: pulumi.StringArray{ %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:31,32-82), }, SecondaryFaultDomainHostIds: pulumi.StringArray{ %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:32,32-82), }, WitnessNode: pulumi.Any(data.Vsphere_host.Witness_host.Id), }, }) if err != nil { return err } return nil }) }
package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.vsphere.ComputeCluster; import com.pulumi.vsphere.ComputeClusterArgs; import com.pulumi.vsphere.inputs.ComputeClusterVsanDiskGroupArgs; import com.pulumi.vsphere.inputs.ComputeClusterVsanFaultDomainArgs; import com.pulumi.vsphere.inputs.ComputeClusterVsanStretchedClusterArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var computeCluster = new ComputeCluster("computeCluster", ComputeClusterArgs.builder() .datacenterId(data.vsphere_datacenter().datacenter().id()) .hostSystemIds(data.vsphere_host().host().stream().map(element -> element.id()).collect(toList())) .drsEnabled(true) .drsAutomationLevel("fullyAutomated") .haEnabled(false) .vsanEnabled(true) .vsanEsaEnabled(true) .vsanDedupEnabled(true) .vsanCompressionEnabled(true) .vsanPerformanceEnabled(true) .vsanVerboseModeEnabled(true) .vsanNetworkDiagnosticModeEnabled(true) .vsanUnmapEnabled(true) .vsanDitEncryptionEnabled(true) .vsanDitRekeyInterval(1800) .vsanDiskGroups(ComputeClusterVsanDiskGroupArgs.builder() .cache(data.vsphere_vmfs_disks().cache_disks()[0]) .storages(data.vsphere_vmfs_disks().storage_disks()) .build()) .vsanFaultDomains(ComputeClusterVsanFaultDomainArgs.builder() .faultDomains( ComputeClusterVsanFaultDomainFaultDomainArgs.builder() .name("fd1") .hostIds(data.vsphere_host().faultdomain1_hosts().stream().map(element -> element.id()).collect(toList())) .build(), ComputeClusterVsanFaultDomainFaultDomainArgs.builder() .name("fd2") .hostIds(data.vsphere_host().faultdomain2_hosts().stream().map(element -> element.id()).collect(toList())) .build()) .build()) .vsanStretchedCluster(ComputeClusterVsanStretchedClusterArgs.builder() .preferredFaultDomainHostIds(data.vsphere_host().preferred_fault_domain_host().stream().map(element -> element.id()).collect(toList())) .secondaryFaultDomainHostIds(data.vsphere_host().secondary_fault_domain_host().stream().map(element -> element.id()).collect(toList())) .witnessNode(data.vsphere_host().witness_host().id()) .build()) .build()); } }
- preferred
Fault List<String>Domain Host Ids The managed object IDs of the hosts to put in the first fault domain.
- secondary
Fault List<String>Domain Host Ids The managed object IDs of the hosts to put in the second fault domain.
- witness
Node String The managed object IDs of the host selected as witness node when enable stretched cluster.
- preferred
Fault StringDomain Name The name of first fault domain. Default is
Preferred
.- secondary
Fault StringDomain Name The name of second fault domain. Default is
Secondary
.NOTE: You must disable vSphere HA before you enable vSAN on the cluster. You can enable or re-enable vSphere HA after vSAN is configured.
import * as pulumi from "@pulumi/pulumi"; import * as vsphere from "@pulumi/vsphere";
const computeCluster = new vsphere.ComputeCluster("computeCluster", { datacenterId: data.vsphere_datacenter.datacenter.id, hostSystemIds: [data.vsphere_host.host.map(__item => __item.id)], drsEnabled: true, drsAutomationLevel: "fullyAutomated", haEnabled: false, vsanEnabled: true, vsanEsaEnabled: true, vsanDedupEnabled: true, vsanCompressionEnabled: true, vsanPerformanceEnabled: true, vsanVerboseModeEnabled: true, vsanNetworkDiagnosticModeEnabled: true, vsanUnmapEnabled: true, vsanDitEncryptionEnabled: true, vsanDitRekeyInterval: 1800, vsanDiskGroups: [{ cache: data.vsphere_vmfs_disks.cache_disks[0], storages: data.vsphere_vmfs_disks.storage_disks, }], vsanFaultDomains: [{ faultDomains: [ { name: "fd1", hostIds: [data.vsphere_host.faultdomain1_hosts.map(__item => __item.id)], }, { name: "fd2", hostIds: [data.vsphere_host.faultdomain2_hosts.map(__item => __item.id)], }, ], }], vsanStretchedCluster: { preferredFaultDomainHostIds: [data.vsphere_host.preferred_fault_domain_host.map(__item => __item.id)], secondaryFaultDomainHostIds: [data.vsphere_host.secondary_fault_domain_host.map(__item => __item.id)], witnessNode: data.vsphere_host.witness_host.id, }, });
import pulumi import pulumi_vsphere as vsphere compute_cluster = vsphere.ComputeCluster("computeCluster", datacenter_id=data["vsphere_datacenter"]["datacenter"]["id"], host_system_ids=[[__item["id"] for __item in data["vsphere_host"]["host"]]], drs_enabled=True, drs_automation_level="fullyAutomated", ha_enabled=False, vsan_enabled=True, vsan_esa_enabled=True, vsan_dedup_enabled=True, vsan_compression_enabled=True, vsan_performance_enabled=True, vsan_verbose_mode_enabled=True, vsan_network_diagnostic_mode_enabled=True, vsan_unmap_enabled=True, vsan_dit_encryption_enabled=True, vsan_dit_rekey_interval=1800, vsan_disk_groups=[vsphere.ComputeClusterVsanDiskGroupArgs( cache=data["vsphere_vmfs_disks"]["cache_disks"], storages=data["vsphere_vmfs_disks"]["storage_disks"], )], vsan_fault_domains=[vsphere.ComputeClusterVsanFaultDomainArgs( fault_domains=[ vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs( name="fd1", host_ids=[[__item["id"] for __item in data["vsphere_host"]["faultdomain1_hosts"]]], ), vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs( name="fd2", host_ids=[[__item["id"] for __item in data["vsphere_host"]["faultdomain2_hosts"]]], ), ], )], vsan_stretched_cluster=vsphere.ComputeClusterVsanStretchedClusterArgs( preferred_fault_domain_host_ids=[[__item["id"] for __item in data["vsphere_host"]["preferred_fault_domain_host"]]], secondary_fault_domain_host_ids=[[__item["id"] for __item in data["vsphere_host"]["secondary_fault_domain_host"]]], witness_node=data["vsphere_host"]["witness_host"]["id"], ))
using System.Collections.Generic; using System.Linq; using Pulumi; using VSphere = Pulumi.VSphere; return await Deployment.RunAsync(() => { var computeCluster = new VSphere.ComputeCluster("computeCluster", new() { DatacenterId = data.Vsphere_datacenter.Datacenter.Id, HostSystemIds = new[] { data.Vsphere_host.Host.Select(__item => __item.Id).ToList(), }, DrsEnabled = true, DrsAutomationLevel = "fullyAutomated", HaEnabled = false, VsanEnabled = true, VsanEsaEnabled = true, VsanDedupEnabled = true, VsanCompressionEnabled = true, VsanPerformanceEnabled = true, VsanVerboseModeEnabled = true, VsanNetworkDiagnosticModeEnabled = true, VsanUnmapEnabled = true, VsanDitEncryptionEnabled = true, VsanDitRekeyInterval = 1800, VsanDiskGroups = new[] { new VSphere.Inputs.ComputeClusterVsanDiskGroupArgs { Cache = data.Vsphere_vmfs_disks.Cache_disks[0], Storages = data.Vsphere_vmfs_disks.Storage_disks, }, }, VsanFaultDomains = new[] { new VSphere.Inputs.ComputeClusterVsanFaultDomainArgs { FaultDomains = new[] { new VSphere.Inputs.ComputeClusterVsanFaultDomainFaultDomainArgs { Name = "fd1", HostIds = new[] { data.Vsphere_host.Faultdomain1_hosts.Select(__item => __item.Id).ToList(), }, }, new VSphere.Inputs.ComputeClusterVsanFaultDomainFaultDomainArgs { Name = "fd2", HostIds = new[] { data.Vsphere_host.Faultdomain2_hosts.Select(__item => __item.Id).ToList(), }, }, }, }, }, VsanStretchedCluster = new VSphere.Inputs.ComputeClusterVsanStretchedClusterArgs { PreferredFaultDomainHostIds = new[] { data.Vsphere_host.Preferred_fault_domain_host.Select(__item => __item.Id).ToList(), }, SecondaryFaultDomainHostIds = new[] { data.Vsphere_host.Secondary_fault_domain_host.Select(__item => __item.Id).ToList(), }, WitnessNode = data.Vsphere_host.Witness_host.Id, }, }); });
package main import ( "github.com/pulumi/pulumi-vsphere/sdk/v4/go/vsphere" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { _, err := vsphere.NewComputeCluster(ctx, "computeCluster", &vsphere.ComputeClusterArgs{ DatacenterId: pulumi.Any(data.Vsphere_datacenter.Datacenter.Id), HostSystemIds: pulumi.StringArray{ %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:2,18-45), }, DrsEnabled: pulumi.Bool(true), DrsAutomationLevel: pulumi.String("fullyAutomated"), HaEnabled: pulumi.Bool(false), VsanEnabled: pulumi.Bool(true), VsanEsaEnabled: pulumi.Bool(true), VsanDedupEnabled: pulumi.Bool(true), VsanCompressionEnabled: pulumi.Bool(true), VsanPerformanceEnabled: pulumi.Bool(true), VsanVerboseModeEnabled: pulumi.Bool(true), VsanNetworkDiagnosticModeEnabled: pulumi.Bool(true), VsanUnmapEnabled: pulumi.Bool(true), VsanDitEncryptionEnabled: pulumi.Bool(true), VsanDitRekeyInterval: pulumi.Int(1800), VsanDiskGroups: vsphere.ComputeClusterVsanDiskGroupArray{ &vsphere.ComputeClusterVsanDiskGroupArgs{ Cache: pulumi.Any(data.Vsphere_vmfs_disks.Cache_disks[0]), Storages: pulumi.Any(data.Vsphere_vmfs_disks.Storage_disks), }, }, VsanFaultDomains: vsphere.ComputeClusterVsanFaultDomainArray{ &vsphere.ComputeClusterVsanFaultDomainArgs{ FaultDomains: vsphere.ComputeClusterVsanFaultDomainFaultDomainArray{ &vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs{ Name: pulumi.String("fd1"), HostIds: pulumi.StringArray{ %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:23,12-53), }, }, &vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs{ Name: pulumi.String("fd2"), HostIds: pulumi.StringArray{ %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:27,12-53), }, }, }, }, }, VsanStretchedCluster: &vsphere.ComputeClusterVsanStretchedClusterArgs{ PreferredFaultDomainHostIds: pulumi.StringArray{ %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:31,32-82), }, SecondaryFaultDomainHostIds: pulumi.StringArray{ %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:32,32-82), }, WitnessNode: pulumi.Any(data.Vsphere_host.Witness_host.Id), }, }) if err != nil { return err } return nil }) }
package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.vsphere.ComputeCluster; import com.pulumi.vsphere.ComputeClusterArgs; import com.pulumi.vsphere.inputs.ComputeClusterVsanDiskGroupArgs; import com.pulumi.vsphere.inputs.ComputeClusterVsanFaultDomainArgs; import com.pulumi.vsphere.inputs.ComputeClusterVsanStretchedClusterArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var computeCluster = new ComputeCluster("computeCluster", ComputeClusterArgs.builder() .datacenterId(data.vsphere_datacenter().datacenter().id()) .hostSystemIds(data.vsphere_host().host().stream().map(element -> element.id()).collect(toList())) .drsEnabled(true) .drsAutomationLevel("fullyAutomated") .haEnabled(false) .vsanEnabled(true) .vsanEsaEnabled(true) .vsanDedupEnabled(true) .vsanCompressionEnabled(true) .vsanPerformanceEnabled(true) .vsanVerboseModeEnabled(true) .vsanNetworkDiagnosticModeEnabled(true) .vsanUnmapEnabled(true) .vsanDitEncryptionEnabled(true) .vsanDitRekeyInterval(1800) .vsanDiskGroups(ComputeClusterVsanDiskGroupArgs.builder() .cache(data.vsphere_vmfs_disks().cache_disks()[0]) .storages(data.vsphere_vmfs_disks().storage_disks()) .build()) .vsanFaultDomains(ComputeClusterVsanFaultDomainArgs.builder() .faultDomains( ComputeClusterVsanFaultDomainFaultDomainArgs.builder() .name("fd1") .hostIds(data.vsphere_host().faultdomain1_hosts().stream().map(element -> element.id()).collect(toList())) .build(), ComputeClusterVsanFaultDomainFaultDomainArgs.builder() .name("fd2") .hostIds(data.vsphere_host().faultdomain2_hosts().stream().map(element -> element.id()).collect(toList())) .build()) .build()) .vsanStretchedCluster(ComputeClusterVsanStretchedClusterArgs.builder() .preferredFaultDomainHostIds(data.vsphere_host().preferred_fault_domain_host().stream().map(element -> element.id()).collect(toList())) .secondaryFaultDomainHostIds(data.vsphere_host().secondary_fault_domain_host().stream().map(element -> element.id()).collect(toList())) .witnessNode(data.vsphere_host().witness_host().id()) .build()) .build()); } }
- preferred
Fault string[]Domain Host Ids The managed object IDs of the hosts to put in the first fault domain.
- secondary
Fault string[]Domain Host Ids The managed object IDs of the hosts to put in the second fault domain.
- witness
Node string The managed object IDs of the host selected as witness node when enable stretched cluster.
- preferred
Fault stringDomain Name The name of first fault domain. Default is
Preferred
.- secondary
Fault stringDomain Name The name of second fault domain. Default is
Secondary
.NOTE: You must disable vSphere HA before you enable vSAN on the cluster. You can enable or re-enable vSphere HA after vSAN is configured.
import * as pulumi from "@pulumi/pulumi"; import * as vsphere from "@pulumi/vsphere";
const computeCluster = new vsphere.ComputeCluster("computeCluster", { datacenterId: data.vsphere_datacenter.datacenter.id, hostSystemIds: [data.vsphere_host.host.map(__item => __item.id)], drsEnabled: true, drsAutomationLevel: "fullyAutomated", haEnabled: false, vsanEnabled: true, vsanEsaEnabled: true, vsanDedupEnabled: true, vsanCompressionEnabled: true, vsanPerformanceEnabled: true, vsanVerboseModeEnabled: true, vsanNetworkDiagnosticModeEnabled: true, vsanUnmapEnabled: true, vsanDitEncryptionEnabled: true, vsanDitRekeyInterval: 1800, vsanDiskGroups: [{ cache: data.vsphere_vmfs_disks.cache_disks[0], storages: data.vsphere_vmfs_disks.storage_disks, }], vsanFaultDomains: [{ faultDomains: [ { name: "fd1", hostIds: [data.vsphere_host.faultdomain1_hosts.map(__item => __item.id)], }, { name: "fd2", hostIds: [data.vsphere_host.faultdomain2_hosts.map(__item => __item.id)], }, ], }], vsanStretchedCluster: { preferredFaultDomainHostIds: [data.vsphere_host.preferred_fault_domain_host.map(__item => __item.id)], secondaryFaultDomainHostIds: [data.vsphere_host.secondary_fault_domain_host.map(__item => __item.id)], witnessNode: data.vsphere_host.witness_host.id, }, });
import pulumi import pulumi_vsphere as vsphere compute_cluster = vsphere.ComputeCluster("computeCluster", datacenter_id=data["vsphere_datacenter"]["datacenter"]["id"], host_system_ids=[[__item["id"] for __item in data["vsphere_host"]["host"]]], drs_enabled=True, drs_automation_level="fullyAutomated", ha_enabled=False, vsan_enabled=True, vsan_esa_enabled=True, vsan_dedup_enabled=True, vsan_compression_enabled=True, vsan_performance_enabled=True, vsan_verbose_mode_enabled=True, vsan_network_diagnostic_mode_enabled=True, vsan_unmap_enabled=True, vsan_dit_encryption_enabled=True, vsan_dit_rekey_interval=1800, vsan_disk_groups=[vsphere.ComputeClusterVsanDiskGroupArgs( cache=data["vsphere_vmfs_disks"]["cache_disks"], storages=data["vsphere_vmfs_disks"]["storage_disks"], )], vsan_fault_domains=[vsphere.ComputeClusterVsanFaultDomainArgs( fault_domains=[ vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs( name="fd1", host_ids=[[__item["id"] for __item in data["vsphere_host"]["faultdomain1_hosts"]]], ), vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs( name="fd2", host_ids=[[__item["id"] for __item in data["vsphere_host"]["faultdomain2_hosts"]]], ), ], )], vsan_stretched_cluster=vsphere.ComputeClusterVsanStretchedClusterArgs( preferred_fault_domain_host_ids=[[__item["id"] for __item in data["vsphere_host"]["preferred_fault_domain_host"]]], secondary_fault_domain_host_ids=[[__item["id"] for __item in data["vsphere_host"]["secondary_fault_domain_host"]]], witness_node=data["vsphere_host"]["witness_host"]["id"], ))
using System.Collections.Generic; using System.Linq; using Pulumi; using VSphere = Pulumi.VSphere; return await Deployment.RunAsync(() => { var computeCluster = new VSphere.ComputeCluster("computeCluster", new() { DatacenterId = data.Vsphere_datacenter.Datacenter.Id, HostSystemIds = new[] { data.Vsphere_host.Host.Select(__item => __item.Id).ToList(), }, DrsEnabled = true, DrsAutomationLevel = "fullyAutomated", HaEnabled = false, VsanEnabled = true, VsanEsaEnabled = true, VsanDedupEnabled = true, VsanCompressionEnabled = true, VsanPerformanceEnabled = true, VsanVerboseModeEnabled = true, VsanNetworkDiagnosticModeEnabled = true, VsanUnmapEnabled = true, VsanDitEncryptionEnabled = true, VsanDitRekeyInterval = 1800, VsanDiskGroups = new[] { new VSphere.Inputs.ComputeClusterVsanDiskGroupArgs { Cache = data.Vsphere_vmfs_disks.Cache_disks[0], Storages = data.Vsphere_vmfs_disks.Storage_disks, }, }, VsanFaultDomains = new[] { new VSphere.Inputs.ComputeClusterVsanFaultDomainArgs { FaultDomains = new[] { new VSphere.Inputs.ComputeClusterVsanFaultDomainFaultDomainArgs { Name = "fd1", HostIds = new[] { data.Vsphere_host.Faultdomain1_hosts.Select(__item => __item.Id).ToList(), }, }, new VSphere.Inputs.ComputeClusterVsanFaultDomainFaultDomainArgs { Name = "fd2", HostIds = new[] { data.Vsphere_host.Faultdomain2_hosts.Select(__item => __item.Id).ToList(), }, }, }, }, }, VsanStretchedCluster = new VSphere.Inputs.ComputeClusterVsanStretchedClusterArgs { PreferredFaultDomainHostIds = new[] { data.Vsphere_host.Preferred_fault_domain_host.Select(__item => __item.Id).ToList(), }, SecondaryFaultDomainHostIds = new[] { data.Vsphere_host.Secondary_fault_domain_host.Select(__item => __item.Id).ToList(), }, WitnessNode = data.Vsphere_host.Witness_host.Id, }, }); });
package main import ( "github.com/pulumi/pulumi-vsphere/sdk/v4/go/vsphere" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { _, err := vsphere.NewComputeCluster(ctx, "computeCluster", &vsphere.ComputeClusterArgs{ DatacenterId: pulumi.Any(data.Vsphere_datacenter.Datacenter.Id), HostSystemIds: pulumi.StringArray{ %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:2,18-45), }, DrsEnabled: pulumi.Bool(true), DrsAutomationLevel: pulumi.String("fullyAutomated"), HaEnabled: pulumi.Bool(false), VsanEnabled: pulumi.Bool(true), VsanEsaEnabled: pulumi.Bool(true), VsanDedupEnabled: pulumi.Bool(true), VsanCompressionEnabled: pulumi.Bool(true), VsanPerformanceEnabled: pulumi.Bool(true), VsanVerboseModeEnabled: pulumi.Bool(true), VsanNetworkDiagnosticModeEnabled: pulumi.Bool(true), VsanUnmapEnabled: pulumi.Bool(true), VsanDitEncryptionEnabled: pulumi.Bool(true), VsanDitRekeyInterval: pulumi.Int(1800), VsanDiskGroups: vsphere.ComputeClusterVsanDiskGroupArray{ &vsphere.ComputeClusterVsanDiskGroupArgs{ Cache: pulumi.Any(data.Vsphere_vmfs_disks.Cache_disks[0]), Storages: pulumi.Any(data.Vsphere_vmfs_disks.Storage_disks), }, }, VsanFaultDomains: vsphere.ComputeClusterVsanFaultDomainArray{ &vsphere.ComputeClusterVsanFaultDomainArgs{ FaultDomains: vsphere.ComputeClusterVsanFaultDomainFaultDomainArray{ &vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs{ Name: pulumi.String("fd1"), HostIds: pulumi.StringArray{ %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:23,12-53), }, }, &vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs{ Name: pulumi.String("fd2"), HostIds: pulumi.StringArray{ %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:27,12-53), }, }, }, }, }, VsanStretchedCluster: &vsphere.ComputeClusterVsanStretchedClusterArgs{ PreferredFaultDomainHostIds: pulumi.StringArray{ %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:31,32-82), }, SecondaryFaultDomainHostIds: pulumi.StringArray{ %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:32,32-82), }, WitnessNode: pulumi.Any(data.Vsphere_host.Witness_host.Id), }, }) if err != nil { return err } return nil }) }
package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.vsphere.ComputeCluster; import com.pulumi.vsphere.ComputeClusterArgs; import com.pulumi.vsphere.inputs.ComputeClusterVsanDiskGroupArgs; import com.pulumi.vsphere.inputs.ComputeClusterVsanFaultDomainArgs; import com.pulumi.vsphere.inputs.ComputeClusterVsanStretchedClusterArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var computeCluster = new ComputeCluster("computeCluster", ComputeClusterArgs.builder() .datacenterId(data.vsphere_datacenter().datacenter().id()) .hostSystemIds(data.vsphere_host().host().stream().map(element -> element.id()).collect(toList())) .drsEnabled(true) .drsAutomationLevel("fullyAutomated") .haEnabled(false) .vsanEnabled(true) .vsanEsaEnabled(true) .vsanDedupEnabled(true) .vsanCompressionEnabled(true) .vsanPerformanceEnabled(true) .vsanVerboseModeEnabled(true) .vsanNetworkDiagnosticModeEnabled(true) .vsanUnmapEnabled(true) .vsanDitEncryptionEnabled(true) .vsanDitRekeyInterval(1800) .vsanDiskGroups(ComputeClusterVsanDiskGroupArgs.builder() .cache(data.vsphere_vmfs_disks().cache_disks()[0]) .storages(data.vsphere_vmfs_disks().storage_disks()) .build()) .vsanFaultDomains(ComputeClusterVsanFaultDomainArgs.builder() .faultDomains( ComputeClusterVsanFaultDomainFaultDomainArgs.builder() .name("fd1") .hostIds(data.vsphere_host().faultdomain1_hosts().stream().map(element -> element.id()).collect(toList())) .build(), ComputeClusterVsanFaultDomainFaultDomainArgs.builder() .name("fd2") .hostIds(data.vsphere_host().faultdomain2_hosts().stream().map(element -> element.id()).collect(toList())) .build()) .build()) .vsanStretchedCluster(ComputeClusterVsanStretchedClusterArgs.builder() .preferredFaultDomainHostIds(data.vsphere_host().preferred_fault_domain_host().stream().map(element -> element.id()).collect(toList())) .secondaryFaultDomainHostIds(data.vsphere_host().secondary_fault_domain_host().stream().map(element -> element.id()).collect(toList())) .witnessNode(data.vsphere_host().witness_host().id()) .build()) .build()); } }
- preferred_
fault_ Sequence[str]domain_ host_ ids The managed object IDs of the hosts to put in the first fault domain.
- secondary_
fault_ Sequence[str]domain_ host_ ids The managed object IDs of the hosts to put in the second fault domain.
- witness_
node str The managed object IDs of the host selected as witness node when enable stretched cluster.
- preferred_
fault_ strdomain_ name The name of first fault domain. Default is
Preferred
.- secondary_
fault_ strdomain_ name The name of second fault domain. Default is
Secondary
.NOTE: You must disable vSphere HA before you enable vSAN on the cluster. You can enable or re-enable vSphere HA after vSAN is configured.
import * as pulumi from "@pulumi/pulumi"; import * as vsphere from "@pulumi/vsphere";
const computeCluster = new vsphere.ComputeCluster("computeCluster", { datacenterId: data.vsphere_datacenter.datacenter.id, hostSystemIds: [data.vsphere_host.host.map(__item => __item.id)], drsEnabled: true, drsAutomationLevel: "fullyAutomated", haEnabled: false, vsanEnabled: true, vsanEsaEnabled: true, vsanDedupEnabled: true, vsanCompressionEnabled: true, vsanPerformanceEnabled: true, vsanVerboseModeEnabled: true, vsanNetworkDiagnosticModeEnabled: true, vsanUnmapEnabled: true, vsanDitEncryptionEnabled: true, vsanDitRekeyInterval: 1800, vsanDiskGroups: [{ cache: data.vsphere_vmfs_disks.cache_disks[0], storages: data.vsphere_vmfs_disks.storage_disks, }], vsanFaultDomains: [{ faultDomains: [ { name: "fd1", hostIds: [data.vsphere_host.faultdomain1_hosts.map(__item => __item.id)], }, { name: "fd2", hostIds: [data.vsphere_host.faultdomain2_hosts.map(__item => __item.id)], }, ], }], vsanStretchedCluster: { preferredFaultDomainHostIds: [data.vsphere_host.preferred_fault_domain_host.map(__item => __item.id)], secondaryFaultDomainHostIds: [data.vsphere_host.secondary_fault_domain_host.map(__item => __item.id)], witnessNode: data.vsphere_host.witness_host.id, }, });
import pulumi import pulumi_vsphere as vsphere compute_cluster = vsphere.ComputeCluster("computeCluster", datacenter_id=data["vsphere_datacenter"]["datacenter"]["id"], host_system_ids=[[__item["id"] for __item in data["vsphere_host"]["host"]]], drs_enabled=True, drs_automation_level="fullyAutomated", ha_enabled=False, vsan_enabled=True, vsan_esa_enabled=True, vsan_dedup_enabled=True, vsan_compression_enabled=True, vsan_performance_enabled=True, vsan_verbose_mode_enabled=True, vsan_network_diagnostic_mode_enabled=True, vsan_unmap_enabled=True, vsan_dit_encryption_enabled=True, vsan_dit_rekey_interval=1800, vsan_disk_groups=[vsphere.ComputeClusterVsanDiskGroupArgs( cache=data["vsphere_vmfs_disks"]["cache_disks"], storages=data["vsphere_vmfs_disks"]["storage_disks"], )], vsan_fault_domains=[vsphere.ComputeClusterVsanFaultDomainArgs( fault_domains=[ vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs( name="fd1", host_ids=[[__item["id"] for __item in data["vsphere_host"]["faultdomain1_hosts"]]], ), vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs( name="fd2", host_ids=[[__item["id"] for __item in data["vsphere_host"]["faultdomain2_hosts"]]], ), ], )], vsan_stretched_cluster=vsphere.ComputeClusterVsanStretchedClusterArgs( preferred_fault_domain_host_ids=[[__item["id"] for __item in data["vsphere_host"]["preferred_fault_domain_host"]]], secondary_fault_domain_host_ids=[[__item["id"] for __item in data["vsphere_host"]["secondary_fault_domain_host"]]], witness_node=data["vsphere_host"]["witness_host"]["id"], ))
using System.Collections.Generic; using System.Linq; using Pulumi; using VSphere = Pulumi.VSphere; return await Deployment.RunAsync(() => { var computeCluster = new VSphere.ComputeCluster("computeCluster", new() { DatacenterId = data.Vsphere_datacenter.Datacenter.Id, HostSystemIds = new[] { data.Vsphere_host.Host.Select(__item => __item.Id).ToList(), }, DrsEnabled = true, DrsAutomationLevel = "fullyAutomated", HaEnabled = false, VsanEnabled = true, VsanEsaEnabled = true, VsanDedupEnabled = true, VsanCompressionEnabled = true, VsanPerformanceEnabled = true, VsanVerboseModeEnabled = true, VsanNetworkDiagnosticModeEnabled = true, VsanUnmapEnabled = true, VsanDitEncryptionEnabled = true, VsanDitRekeyInterval = 1800, VsanDiskGroups = new[] { new VSphere.Inputs.ComputeClusterVsanDiskGroupArgs { Cache = data.Vsphere_vmfs_disks.Cache_disks[0], Storages = data.Vsphere_vmfs_disks.Storage_disks, }, }, VsanFaultDomains = new[] { new VSphere.Inputs.ComputeClusterVsanFaultDomainArgs { FaultDomains = new[] { new VSphere.Inputs.ComputeClusterVsanFaultDomainFaultDomainArgs { Name = "fd1", HostIds = new[] { data.Vsphere_host.Faultdomain1_hosts.Select(__item => __item.Id).ToList(), }, }, new VSphere.Inputs.ComputeClusterVsanFaultDomainFaultDomainArgs { Name = "fd2", HostIds = new[] { data.Vsphere_host.Faultdomain2_hosts.Select(__item => __item.Id).ToList(), }, }, }, }, }, VsanStretchedCluster = new VSphere.Inputs.ComputeClusterVsanStretchedClusterArgs { PreferredFaultDomainHostIds = new[] { data.Vsphere_host.Preferred_fault_domain_host.Select(__item => __item.Id).ToList(), }, SecondaryFaultDomainHostIds = new[] { data.Vsphere_host.Secondary_fault_domain_host.Select(__item => __item.Id).ToList(), }, WitnessNode = data.Vsphere_host.Witness_host.Id, }, }); });
package main import ( "github.com/pulumi/pulumi-vsphere/sdk/v4/go/vsphere" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { _, err := vsphere.NewComputeCluster(ctx, "computeCluster", &vsphere.ComputeClusterArgs{ DatacenterId: pulumi.Any(data.Vsphere_datacenter.Datacenter.Id), HostSystemIds: pulumi.StringArray{ %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:2,18-45), }, DrsEnabled: pulumi.Bool(true), DrsAutomationLevel: pulumi.String("fullyAutomated"), HaEnabled: pulumi.Bool(false), VsanEnabled: pulumi.Bool(true), VsanEsaEnabled: pulumi.Bool(true), VsanDedupEnabled: pulumi.Bool(true), VsanCompressionEnabled: pulumi.Bool(true), VsanPerformanceEnabled: pulumi.Bool(true), VsanVerboseModeEnabled: pulumi.Bool(true), VsanNetworkDiagnosticModeEnabled: pulumi.Bool(true), VsanUnmapEnabled: pulumi.Bool(true), VsanDitEncryptionEnabled: pulumi.Bool(true), VsanDitRekeyInterval: pulumi.Int(1800), VsanDiskGroups: vsphere.ComputeClusterVsanDiskGroupArray{ &vsphere.ComputeClusterVsanDiskGroupArgs{ Cache: pulumi.Any(data.Vsphere_vmfs_disks.Cache_disks[0]), Storages: pulumi.Any(data.Vsphere_vmfs_disks.Storage_disks), }, }, VsanFaultDomains: vsphere.ComputeClusterVsanFaultDomainArray{ &vsphere.ComputeClusterVsanFaultDomainArgs{ FaultDomains: vsphere.ComputeClusterVsanFaultDomainFaultDomainArray{ &vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs{ Name: pulumi.String("fd1"), HostIds: pulumi.StringArray{ %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:23,12-53), }, }, &vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs{ Name: pulumi.String("fd2"), HostIds: pulumi.StringArray{ %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:27,12-53), }, }, }, }, }, VsanStretchedCluster: &vsphere.ComputeClusterVsanStretchedClusterArgs{ PreferredFaultDomainHostIds: pulumi.StringArray{ %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:31,32-82), }, SecondaryFaultDomainHostIds: pulumi.StringArray{ %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:32,32-82), }, WitnessNode: pulumi.Any(data.Vsphere_host.Witness_host.Id), }, }) if err != nil { return err } return nil }) }
package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.vsphere.ComputeCluster; import com.pulumi.vsphere.ComputeClusterArgs; import com.pulumi.vsphere.inputs.ComputeClusterVsanDiskGroupArgs; import com.pulumi.vsphere.inputs.ComputeClusterVsanFaultDomainArgs; import com.pulumi.vsphere.inputs.ComputeClusterVsanStretchedClusterArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var computeCluster = new ComputeCluster("computeCluster", ComputeClusterArgs.builder() .datacenterId(data.vsphere_datacenter().datacenter().id()) .hostSystemIds(data.vsphere_host().host().stream().map(element -> element.id()).collect(toList())) .drsEnabled(true) .drsAutomationLevel("fullyAutomated") .haEnabled(false) .vsanEnabled(true) .vsanEsaEnabled(true) .vsanDedupEnabled(true) .vsanCompressionEnabled(true) .vsanPerformanceEnabled(true) .vsanVerboseModeEnabled(true) .vsanNetworkDiagnosticModeEnabled(true) .vsanUnmapEnabled(true) .vsanDitEncryptionEnabled(true) .vsanDitRekeyInterval(1800) .vsanDiskGroups(ComputeClusterVsanDiskGroupArgs.builder() .cache(data.vsphere_vmfs_disks().cache_disks()[0]) .storages(data.vsphere_vmfs_disks().storage_disks()) .build()) .vsanFaultDomains(ComputeClusterVsanFaultDomainArgs.builder() .faultDomains( ComputeClusterVsanFaultDomainFaultDomainArgs.builder() .name("fd1") .hostIds(data.vsphere_host().faultdomain1_hosts().stream().map(element -> element.id()).collect(toList())) .build(), ComputeClusterVsanFaultDomainFaultDomainArgs.builder() .name("fd2") .hostIds(data.vsphere_host().faultdomain2_hosts().stream().map(element -> element.id()).collect(toList())) .build()) .build()) .vsanStretchedCluster(ComputeClusterVsanStretchedClusterArgs.builder() .preferredFaultDomainHostIds(data.vsphere_host().preferred_fault_domain_host().stream().map(element -> element.id()).collect(toList())) .secondaryFaultDomainHostIds(data.vsphere_host().secondary_fault_domain_host().stream().map(element -> element.id()).collect(toList())) .witnessNode(data.vsphere_host().witness_host().id()) .build()) .build()); } }
- preferred
Fault List<String>Domain Host Ids The managed object IDs of the hosts to put in the first fault domain.
- secondary
Fault List<String>Domain Host Ids The managed object IDs of the hosts to put in the second fault domain.
- witness
Node String The managed object IDs of the host selected as witness node when enable stretched cluster.
- preferred
Fault StringDomain Name The name of first fault domain. Default is
Preferred
.- secondary
Fault StringDomain Name The name of second fault domain. Default is
Secondary
.NOTE: You must disable vSphere HA before you enable vSAN on the cluster. You can enable or re-enable vSphere HA after vSAN is configured.
import * as pulumi from "@pulumi/pulumi"; import * as vsphere from "@pulumi/vsphere";
const computeCluster = new vsphere.ComputeCluster("computeCluster", { datacenterId: data.vsphere_datacenter.datacenter.id, hostSystemIds: [data.vsphere_host.host.map(__item => __item.id)], drsEnabled: true, drsAutomationLevel: "fullyAutomated", haEnabled: false, vsanEnabled: true, vsanEsaEnabled: true, vsanDedupEnabled: true, vsanCompressionEnabled: true, vsanPerformanceEnabled: true, vsanVerboseModeEnabled: true, vsanNetworkDiagnosticModeEnabled: true, vsanUnmapEnabled: true, vsanDitEncryptionEnabled: true, vsanDitRekeyInterval: 1800, vsanDiskGroups: [{ cache: data.vsphere_vmfs_disks.cache_disks[0], storages: data.vsphere_vmfs_disks.storage_disks, }], vsanFaultDomains: [{ faultDomains: [ { name: "fd1", hostIds: [data.vsphere_host.faultdomain1_hosts.map(__item => __item.id)], }, { name: "fd2", hostIds: [data.vsphere_host.faultdomain2_hosts.map(__item => __item.id)], }, ], }], vsanStretchedCluster: { preferredFaultDomainHostIds: [data.vsphere_host.preferred_fault_domain_host.map(__item => __item.id)], secondaryFaultDomainHostIds: [data.vsphere_host.secondary_fault_domain_host.map(__item => __item.id)], witnessNode: data.vsphere_host.witness_host.id, }, });
import pulumi import pulumi_vsphere as vsphere compute_cluster = vsphere.ComputeCluster("computeCluster", datacenter_id=data["vsphere_datacenter"]["datacenter"]["id"], host_system_ids=[[__item["id"] for __item in data["vsphere_host"]["host"]]], drs_enabled=True, drs_automation_level="fullyAutomated", ha_enabled=False, vsan_enabled=True, vsan_esa_enabled=True, vsan_dedup_enabled=True, vsan_compression_enabled=True, vsan_performance_enabled=True, vsan_verbose_mode_enabled=True, vsan_network_diagnostic_mode_enabled=True, vsan_unmap_enabled=True, vsan_dit_encryption_enabled=True, vsan_dit_rekey_interval=1800, vsan_disk_groups=[vsphere.ComputeClusterVsanDiskGroupArgs( cache=data["vsphere_vmfs_disks"]["cache_disks"], storages=data["vsphere_vmfs_disks"]["storage_disks"], )], vsan_fault_domains=[vsphere.ComputeClusterVsanFaultDomainArgs( fault_domains=[ vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs( name="fd1", host_ids=[[__item["id"] for __item in data["vsphere_host"]["faultdomain1_hosts"]]], ), vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs( name="fd2", host_ids=[[__item["id"] for __item in data["vsphere_host"]["faultdomain2_hosts"]]], ), ], )], vsan_stretched_cluster=vsphere.ComputeClusterVsanStretchedClusterArgs( preferred_fault_domain_host_ids=[[__item["id"] for __item in data["vsphere_host"]["preferred_fault_domain_host"]]], secondary_fault_domain_host_ids=[[__item["id"] for __item in data["vsphere_host"]["secondary_fault_domain_host"]]], witness_node=data["vsphere_host"]["witness_host"]["id"], ))
using System.Collections.Generic; using System.Linq; using Pulumi; using VSphere = Pulumi.VSphere; return await Deployment.RunAsync(() => { var computeCluster = new VSphere.ComputeCluster("computeCluster", new() { DatacenterId = data.Vsphere_datacenter.Datacenter.Id, HostSystemIds = new[] { data.Vsphere_host.Host.Select(__item => __item.Id).ToList(), }, DrsEnabled = true, DrsAutomationLevel = "fullyAutomated", HaEnabled = false, VsanEnabled = true, VsanEsaEnabled = true, VsanDedupEnabled = true, VsanCompressionEnabled = true, VsanPerformanceEnabled = true, VsanVerboseModeEnabled = true, VsanNetworkDiagnosticModeEnabled = true, VsanUnmapEnabled = true, VsanDitEncryptionEnabled = true, VsanDitRekeyInterval = 1800, VsanDiskGroups = new[] { new VSphere.Inputs.ComputeClusterVsanDiskGroupArgs { Cache = data.Vsphere_vmfs_disks.Cache_disks[0], Storages = data.Vsphere_vmfs_disks.Storage_disks, }, }, VsanFaultDomains = new[] { new VSphere.Inputs.ComputeClusterVsanFaultDomainArgs { FaultDomains = new[] { new VSphere.Inputs.ComputeClusterVsanFaultDomainFaultDomainArgs { Name = "fd1", HostIds = new[] { data.Vsphere_host.Faultdomain1_hosts.Select(__item => __item.Id).ToList(), }, }, new VSphere.Inputs.ComputeClusterVsanFaultDomainFaultDomainArgs { Name = "fd2", HostIds = new[] { data.Vsphere_host.Faultdomain2_hosts.Select(__item => __item.Id).ToList(), }, }, }, }, }, VsanStretchedCluster = new VSphere.Inputs.ComputeClusterVsanStretchedClusterArgs { PreferredFaultDomainHostIds = new[] { data.Vsphere_host.Preferred_fault_domain_host.Select(__item => __item.Id).ToList(), }, SecondaryFaultDomainHostIds = new[] { data.Vsphere_host.Secondary_fault_domain_host.Select(__item => __item.Id).ToList(), }, WitnessNode = data.Vsphere_host.Witness_host.Id, }, }); });
package main import ( "github.com/pulumi/pulumi-vsphere/sdk/v4/go/vsphere" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { _, err := vsphere.NewComputeCluster(ctx, "computeCluster", &vsphere.ComputeClusterArgs{ DatacenterId: pulumi.Any(data.Vsphere_datacenter.Datacenter.Id), HostSystemIds: pulumi.StringArray{ %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:2,18-45), }, DrsEnabled: pulumi.Bool(true), DrsAutomationLevel: pulumi.String("fullyAutomated"), HaEnabled: pulumi.Bool(false), VsanEnabled: pulumi.Bool(true), VsanEsaEnabled: pulumi.Bool(true), VsanDedupEnabled: pulumi.Bool(true), VsanCompressionEnabled: pulumi.Bool(true), VsanPerformanceEnabled: pulumi.Bool(true), VsanVerboseModeEnabled: pulumi.Bool(true), VsanNetworkDiagnosticModeEnabled: pulumi.Bool(true), VsanUnmapEnabled: pulumi.Bool(true), VsanDitEncryptionEnabled: pulumi.Bool(true), VsanDitRekeyInterval: pulumi.Int(1800), VsanDiskGroups: vsphere.ComputeClusterVsanDiskGroupArray{ &vsphere.ComputeClusterVsanDiskGroupArgs{ Cache: pulumi.Any(data.Vsphere_vmfs_disks.Cache_disks[0]), Storages: pulumi.Any(data.Vsphere_vmfs_disks.Storage_disks), }, }, VsanFaultDomains: vsphere.ComputeClusterVsanFaultDomainArray{ &vsphere.ComputeClusterVsanFaultDomainArgs{ FaultDomains: vsphere.ComputeClusterVsanFaultDomainFaultDomainArray{ &vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs{ Name: pulumi.String("fd1"), HostIds: pulumi.StringArray{ %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:23,12-53), }, }, &vsphere.ComputeClusterVsanFaultDomainFaultDomainArgs{ Name: pulumi.String("fd2"), HostIds: pulumi.StringArray{ %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:27,12-53), }, }, }, }, }, VsanStretchedCluster: &vsphere.ComputeClusterVsanStretchedClusterArgs{ PreferredFaultDomainHostIds: pulumi.StringArray{ %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:31,32-82), }, SecondaryFaultDomainHostIds: pulumi.StringArray{ %!v(PANIC=Format method: fatal: A failure has occurred: unlowered splat expression @ #-types-vsphere:index-ComputeClusterVsanStretchedCluster:ComputeClusterVsanStretchedCluster-secondaryFaultDomainName.pp:32,32-82), }, WitnessNode: pulumi.Any(data.Vsphere_host.Witness_host.Id), }, }) if err != nil { return err } return nil }) }
package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.vsphere.ComputeCluster; import com.pulumi.vsphere.ComputeClusterArgs; import com.pulumi.vsphere.inputs.ComputeClusterVsanDiskGroupArgs; import com.pulumi.vsphere.inputs.ComputeClusterVsanFaultDomainArgs; import com.pulumi.vsphere.inputs.ComputeClusterVsanStretchedClusterArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var computeCluster = new ComputeCluster("computeCluster", ComputeClusterArgs.builder() .datacenterId(data.vsphere_datacenter().datacenter().id()) .hostSystemIds(data.vsphere_host().host().stream().map(element -> element.id()).collect(toList())) .drsEnabled(true) .drsAutomationLevel("fullyAutomated") .haEnabled(false) .vsanEnabled(true) .vsanEsaEnabled(true) .vsanDedupEnabled(true) .vsanCompressionEnabled(true) .vsanPerformanceEnabled(true) .vsanVerboseModeEnabled(true) .vsanNetworkDiagnosticModeEnabled(true) .vsanUnmapEnabled(true) .vsanDitEncryptionEnabled(true) .vsanDitRekeyInterval(1800) .vsanDiskGroups(ComputeClusterVsanDiskGroupArgs.builder() .cache(data.vsphere_vmfs_disks().cache_disks()[0]) .storages(data.vsphere_vmfs_disks().storage_disks()) .build()) .vsanFaultDomains(ComputeClusterVsanFaultDomainArgs.builder() .faultDomains( ComputeClusterVsanFaultDomainFaultDomainArgs.builder() .name("fd1") .hostIds(data.vsphere_host().faultdomain1_hosts().stream().map(element -> element.id()).collect(toList())) .build(), ComputeClusterVsanFaultDomainFaultDomainArgs.builder() .name("fd2") .hostIds(data.vsphere_host().faultdomain2_hosts().stream().map(element -> element.id()).collect(toList())) .build()) .build()) .vsanStretchedCluster(ComputeClusterVsanStretchedClusterArgs.builder() .preferredFaultDomainHostIds(data.vsphere_host().preferred_fault_domain_host().stream().map(element -> element.id()).collect(toList())) .secondaryFaultDomainHostIds(data.vsphere_host().secondary_fault_domain_host().stream().map(element -> element.id()).collect(toList())) .witnessNode(data.vsphere_host().witness_host().id()) .build()) .build()); } }
Package Details
- Repository
- vSphere pulumi/pulumi-vsphere
- License
- Apache-2.0
- Notes
This Pulumi package is based on the
vsphere
Terraform Provider.