1. Packages
  2. Harness Provider
  3. API Docs
  4. cluster
  5. OrchestratorConfig
Harness v0.8.4 published on Thursday, Sep 11, 2025 by Pulumi

harness.cluster.OrchestratorConfig

Explore with Pulumi AI

harness logo
Harness v0.8.4 published on Thursday, Sep 11, 2025 by Pulumi

    Resource for ClusterOrchestrator Config.

    Example Usage

    import * as pulumi from "@pulumi/pulumi";
    import * as harness from "@pulumi/harness";
    
    const example = new harness.cluster.OrchestratorConfig("example", {
        orchestratorId: "orch-cvifpfl9rbg8neldj97g",
        distribution: {
            baseOndemandCapacity: 2,
            ondemandReplicaPercentage: 50,
            selector: "ALL",
            strategy: "CostOptimized",
        },
        binpacking: {
            podEviction: {
                threshold: {
                    cpu: 60,
                    memory: 80,
                },
            },
            disruption: {
                criteria: "WhenEmpty",
                delay: "10m",
                budgets: [
                    {
                        reasons: [
                            "Drifted",
                            "Underutilized",
                            "Empty",
                        ],
                        nodes: "20",
                    },
                    {
                        reasons: [
                            "Drifted",
                            "Empty",
                        ],
                        nodes: "1",
                        schedule: {
                            frequency: "@monthly",
                            duration: "10m",
                        },
                    },
                ],
            },
        },
        nodePreferences: {
            ttl: "Never",
            reverseFallbackInterval: "6h",
        },
        commitmentIntegration: {
            enabled: true,
            masterAccountId: "dummyAccountId",
        },
        replacementSchedule: {
            windowType: "Custom",
            appliesTo: {
                consolidation: true,
                harnessPodEviction: true,
                reverseFallback: true,
            },
            windowDetails: {
                days: [
                    "SUN",
                    "WED",
                    "SAT",
                ],
                timeZone: "Asia/Calcutta",
                allDay: false,
                startTime: "10:30",
                endTime: "11:30",
            },
        },
    });
    
    import pulumi
    import pulumi_harness as harness
    
    example = harness.cluster.OrchestratorConfig("example",
        orchestrator_id="orch-cvifpfl9rbg8neldj97g",
        distribution={
            "base_ondemand_capacity": 2,
            "ondemand_replica_percentage": 50,
            "selector": "ALL",
            "strategy": "CostOptimized",
        },
        binpacking={
            "pod_eviction": {
                "threshold": {
                    "cpu": 60,
                    "memory": 80,
                },
            },
            "disruption": {
                "criteria": "WhenEmpty",
                "delay": "10m",
                "budgets": [
                    {
                        "reasons": [
                            "Drifted",
                            "Underutilized",
                            "Empty",
                        ],
                        "nodes": "20",
                    },
                    {
                        "reasons": [
                            "Drifted",
                            "Empty",
                        ],
                        "nodes": "1",
                        "schedule": {
                            "frequency": "@monthly",
                            "duration": "10m",
                        },
                    },
                ],
            },
        },
        node_preferences={
            "ttl": "Never",
            "reverse_fallback_interval": "6h",
        },
        commitment_integration={
            "enabled": True,
            "master_account_id": "dummyAccountId",
        },
        replacement_schedule={
            "window_type": "Custom",
            "applies_to": {
                "consolidation": True,
                "harness_pod_eviction": True,
                "reverse_fallback": True,
            },
            "window_details": {
                "days": [
                    "SUN",
                    "WED",
                    "SAT",
                ],
                "time_zone": "Asia/Calcutta",
                "all_day": False,
                "start_time": "10:30",
                "end_time": "11:30",
            },
        })
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-harness/sdk/go/harness/cluster"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := cluster.NewOrchestratorConfig(ctx, "example", &cluster.OrchestratorConfigArgs{
    			OrchestratorId: pulumi.String("orch-cvifpfl9rbg8neldj97g"),
    			Distribution: &cluster.OrchestratorConfigDistributionArgs{
    				BaseOndemandCapacity:      pulumi.Int(2),
    				OndemandReplicaPercentage: pulumi.Float64(50),
    				Selector:                  pulumi.String("ALL"),
    				Strategy:                  pulumi.String("CostOptimized"),
    			},
    			Binpacking: &cluster.OrchestratorConfigBinpackingArgs{
    				PodEviction: &cluster.OrchestratorConfigBinpackingPodEvictionArgs{
    					Threshold: &cluster.OrchestratorConfigBinpackingPodEvictionThresholdArgs{
    						Cpu:    pulumi.Float64(60),
    						Memory: pulumi.Float64(80),
    					},
    				},
    				Disruption: &cluster.OrchestratorConfigBinpackingDisruptionArgs{
    					Criteria: pulumi.String("WhenEmpty"),
    					Delay:    pulumi.String("10m"),
    					Budgets: cluster.OrchestratorConfigBinpackingDisruptionBudgetArray{
    						&cluster.OrchestratorConfigBinpackingDisruptionBudgetArgs{
    							Reasons: pulumi.StringArray{
    								pulumi.String("Drifted"),
    								pulumi.String("Underutilized"),
    								pulumi.String("Empty"),
    							},
    							Nodes: pulumi.String("20"),
    						},
    						&cluster.OrchestratorConfigBinpackingDisruptionBudgetArgs{
    							Reasons: pulumi.StringArray{
    								pulumi.String("Drifted"),
    								pulumi.String("Empty"),
    							},
    							Nodes: pulumi.String("1"),
    							Schedule: &cluster.OrchestratorConfigBinpackingDisruptionBudgetScheduleArgs{
    								Frequency: pulumi.String("@monthly"),
    								Duration:  pulumi.String("10m"),
    							},
    						},
    					},
    				},
    			},
    			NodePreferences: &cluster.OrchestratorConfigNodePreferencesArgs{
    				Ttl:                     pulumi.String("Never"),
    				ReverseFallbackInterval: pulumi.String("6h"),
    			},
    			CommitmentIntegration: &cluster.OrchestratorConfigCommitmentIntegrationArgs{
    				Enabled:         pulumi.Bool(true),
    				MasterAccountId: pulumi.String("dummyAccountId"),
    			},
    			ReplacementSchedule: &cluster.OrchestratorConfigReplacementScheduleArgs{
    				WindowType: pulumi.String("Custom"),
    				AppliesTo: &cluster.OrchestratorConfigReplacementScheduleAppliesToArgs{
    					Consolidation:      pulumi.Bool(true),
    					HarnessPodEviction: pulumi.Bool(true),
    					ReverseFallback:    pulumi.Bool(true),
    				},
    				WindowDetails: &cluster.OrchestratorConfigReplacementScheduleWindowDetailsArgs{
    					Days: pulumi.StringArray{
    						pulumi.String("SUN"),
    						pulumi.String("WED"),
    						pulumi.String("SAT"),
    					},
    					TimeZone:  pulumi.String("Asia/Calcutta"),
    					AllDay:    pulumi.Bool(false),
    					StartTime: pulumi.String("10:30"),
    					EndTime:   pulumi.String("11:30"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Harness = Pulumi.Harness;
    
    return await Deployment.RunAsync(() => 
    {
        var example = new Harness.Cluster.OrchestratorConfig("example", new()
        {
            OrchestratorId = "orch-cvifpfl9rbg8neldj97g",
            Distribution = new Harness.Cluster.Inputs.OrchestratorConfigDistributionArgs
            {
                BaseOndemandCapacity = 2,
                OndemandReplicaPercentage = 50,
                Selector = "ALL",
                Strategy = "CostOptimized",
            },
            Binpacking = new Harness.Cluster.Inputs.OrchestratorConfigBinpackingArgs
            {
                PodEviction = new Harness.Cluster.Inputs.OrchestratorConfigBinpackingPodEvictionArgs
                {
                    Threshold = new Harness.Cluster.Inputs.OrchestratorConfigBinpackingPodEvictionThresholdArgs
                    {
                        Cpu = 60,
                        Memory = 80,
                    },
                },
                Disruption = new Harness.Cluster.Inputs.OrchestratorConfigBinpackingDisruptionArgs
                {
                    Criteria = "WhenEmpty",
                    Delay = "10m",
                    Budgets = new[]
                    {
                        new Harness.Cluster.Inputs.OrchestratorConfigBinpackingDisruptionBudgetArgs
                        {
                            Reasons = new[]
                            {
                                "Drifted",
                                "Underutilized",
                                "Empty",
                            },
                            Nodes = "20",
                        },
                        new Harness.Cluster.Inputs.OrchestratorConfigBinpackingDisruptionBudgetArgs
                        {
                            Reasons = new[]
                            {
                                "Drifted",
                                "Empty",
                            },
                            Nodes = "1",
                            Schedule = new Harness.Cluster.Inputs.OrchestratorConfigBinpackingDisruptionBudgetScheduleArgs
                            {
                                Frequency = "@monthly",
                                Duration = "10m",
                            },
                        },
                    },
                },
            },
            NodePreferences = new Harness.Cluster.Inputs.OrchestratorConfigNodePreferencesArgs
            {
                Ttl = "Never",
                ReverseFallbackInterval = "6h",
            },
            CommitmentIntegration = new Harness.Cluster.Inputs.OrchestratorConfigCommitmentIntegrationArgs
            {
                Enabled = true,
                MasterAccountId = "dummyAccountId",
            },
            ReplacementSchedule = new Harness.Cluster.Inputs.OrchestratorConfigReplacementScheduleArgs
            {
                WindowType = "Custom",
                AppliesTo = new Harness.Cluster.Inputs.OrchestratorConfigReplacementScheduleAppliesToArgs
                {
                    Consolidation = true,
                    HarnessPodEviction = true,
                    ReverseFallback = true,
                },
                WindowDetails = new Harness.Cluster.Inputs.OrchestratorConfigReplacementScheduleWindowDetailsArgs
                {
                    Days = new[]
                    {
                        "SUN",
                        "WED",
                        "SAT",
                    },
                    TimeZone = "Asia/Calcutta",
                    AllDay = false,
                    StartTime = "10:30",
                    EndTime = "11:30",
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.harness.cluster.OrchestratorConfig;
    import com.pulumi.harness.cluster.OrchestratorConfigArgs;
    import com.pulumi.harness.cluster.inputs.OrchestratorConfigDistributionArgs;
    import com.pulumi.harness.cluster.inputs.OrchestratorConfigBinpackingArgs;
    import com.pulumi.harness.cluster.inputs.OrchestratorConfigBinpackingPodEvictionArgs;
    import com.pulumi.harness.cluster.inputs.OrchestratorConfigBinpackingPodEvictionThresholdArgs;
    import com.pulumi.harness.cluster.inputs.OrchestratorConfigBinpackingDisruptionArgs;
    import com.pulumi.harness.cluster.inputs.OrchestratorConfigNodePreferencesArgs;
    import com.pulumi.harness.cluster.inputs.OrchestratorConfigCommitmentIntegrationArgs;
    import com.pulumi.harness.cluster.inputs.OrchestratorConfigReplacementScheduleArgs;
    import com.pulumi.harness.cluster.inputs.OrchestratorConfigReplacementScheduleAppliesToArgs;
    import com.pulumi.harness.cluster.inputs.OrchestratorConfigReplacementScheduleWindowDetailsArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var example = new OrchestratorConfig("example", OrchestratorConfigArgs.builder()
                .orchestratorId("orch-cvifpfl9rbg8neldj97g")
                .distribution(OrchestratorConfigDistributionArgs.builder()
                    .baseOndemandCapacity(2)
                    .ondemandReplicaPercentage(50.0)
                    .selector("ALL")
                    .strategy("CostOptimized")
                    .build())
                .binpacking(OrchestratorConfigBinpackingArgs.builder()
                    .podEviction(OrchestratorConfigBinpackingPodEvictionArgs.builder()
                        .threshold(OrchestratorConfigBinpackingPodEvictionThresholdArgs.builder()
                            .cpu(60.0)
                            .memory(80.0)
                            .build())
                        .build())
                    .disruption(OrchestratorConfigBinpackingDisruptionArgs.builder()
                        .criteria("WhenEmpty")
                        .delay("10m")
                        .budgets(                    
                            OrchestratorConfigBinpackingDisruptionBudgetArgs.builder()
                                .reasons(                            
                                    "Drifted",
                                    "Underutilized",
                                    "Empty")
                                .nodes("20")
                                .build(),
                            OrchestratorConfigBinpackingDisruptionBudgetArgs.builder()
                                .reasons(                            
                                    "Drifted",
                                    "Empty")
                                .nodes("1")
                                .schedule(OrchestratorConfigBinpackingDisruptionBudgetScheduleArgs.builder()
                                    .frequency("@monthly")
                                    .duration("10m")
                                    .build())
                                .build())
                        .build())
                    .build())
                .nodePreferences(OrchestratorConfigNodePreferencesArgs.builder()
                    .ttl("Never")
                    .reverseFallbackInterval("6h")
                    .build())
                .commitmentIntegration(OrchestratorConfigCommitmentIntegrationArgs.builder()
                    .enabled(true)
                    .masterAccountId("dummyAccountId")
                    .build())
                .replacementSchedule(OrchestratorConfigReplacementScheduleArgs.builder()
                    .windowType("Custom")
                    .appliesTo(OrchestratorConfigReplacementScheduleAppliesToArgs.builder()
                        .consolidation(true)
                        .harnessPodEviction(true)
                        .reverseFallback(true)
                        .build())
                    .windowDetails(OrchestratorConfigReplacementScheduleWindowDetailsArgs.builder()
                        .days(                    
                            "SUN",
                            "WED",
                            "SAT")
                        .timeZone("Asia/Calcutta")
                        .allDay(false)
                        .startTime("10:30")
                        .endTime("11:30")
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      example:
        type: harness:cluster:OrchestratorConfig
        properties:
          orchestratorId: orch-cvifpfl9rbg8neldj97g
          distribution:
            baseOndemandCapacity: 2
            ondemandReplicaPercentage: 50
            selector: ALL
            strategy: CostOptimized
          binpacking:
            podEviction:
              threshold:
                cpu: 60
                memory: 80
            disruption:
              criteria: WhenEmpty
              delay: 10m
              budgets:
                - reasons:
                    - Drifted
                    - Underutilized
                    - Empty
                  nodes: '20'
                - reasons:
                    - Drifted
                    - Empty
                  nodes: '1'
                  schedule:
                    frequency: '@monthly'
                    duration: 10m
          nodePreferences:
            ttl: Never
            reverseFallbackInterval: 6h
          commitmentIntegration:
            enabled: true
            masterAccountId: dummyAccountId
          replacementSchedule:
            windowType: Custom
            appliesTo:
              consolidation: true
              harnessPodEviction: true
              reverseFallback: true
            windowDetails:
              days:
                - SUN
                - WED
                - SAT
              timeZone: Asia/Calcutta
              allDay: false
              startTime: 10:30
              endTime: 11:30
    

    Create OrchestratorConfig Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new OrchestratorConfig(name: string, args: OrchestratorConfigArgs, opts?: CustomResourceOptions);
    @overload
    def OrchestratorConfig(resource_name: str,
                           args: OrchestratorConfigArgs,
                           opts: Optional[ResourceOptions] = None)
    
    @overload
    def OrchestratorConfig(resource_name: str,
                           opts: Optional[ResourceOptions] = None,
                           distribution: Optional[OrchestratorConfigDistributionArgs] = None,
                           orchestrator_id: Optional[str] = None,
                           binpacking: Optional[OrchestratorConfigBinpackingArgs] = None,
                           commitment_integration: Optional[OrchestratorConfigCommitmentIntegrationArgs] = None,
                           node_preferences: Optional[OrchestratorConfigNodePreferencesArgs] = None,
                           replacement_schedule: Optional[OrchestratorConfigReplacementScheduleArgs] = None)
    func NewOrchestratorConfig(ctx *Context, name string, args OrchestratorConfigArgs, opts ...ResourceOption) (*OrchestratorConfig, error)
    public OrchestratorConfig(string name, OrchestratorConfigArgs args, CustomResourceOptions? opts = null)
    public OrchestratorConfig(String name, OrchestratorConfigArgs args)
    public OrchestratorConfig(String name, OrchestratorConfigArgs args, CustomResourceOptions options)
    
    type: harness:cluster:OrchestratorConfig
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args OrchestratorConfigArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args OrchestratorConfigArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args OrchestratorConfigArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args OrchestratorConfigArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args OrchestratorConfigArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var orchestratorConfigResource = new Harness.Cluster.OrchestratorConfig("orchestratorConfigResource", new()
    {
        Distribution = new Harness.Cluster.Inputs.OrchestratorConfigDistributionArgs
        {
            OndemandReplicaPercentage = 0,
            BaseOndemandCapacity = 0,
            Selector = "string",
            Strategy = "string",
        },
        OrchestratorId = "string",
        Binpacking = new Harness.Cluster.Inputs.OrchestratorConfigBinpackingArgs
        {
            Disruption = new Harness.Cluster.Inputs.OrchestratorConfigBinpackingDisruptionArgs
            {
                Budgets = new[]
                {
                    new Harness.Cluster.Inputs.OrchestratorConfigBinpackingDisruptionBudgetArgs
                    {
                        Nodes = "string",
                        Reasons = new[]
                        {
                            "string",
                        },
                        Schedule = new Harness.Cluster.Inputs.OrchestratorConfigBinpackingDisruptionBudgetScheduleArgs
                        {
                            Duration = "string",
                            Frequency = "string",
                        },
                    },
                },
                Criteria = "string",
                Delay = "string",
            },
            PodEviction = new Harness.Cluster.Inputs.OrchestratorConfigBinpackingPodEvictionArgs
            {
                Threshold = new Harness.Cluster.Inputs.OrchestratorConfigBinpackingPodEvictionThresholdArgs
                {
                    Cpu = 0,
                    Memory = 0,
                },
            },
        },
        CommitmentIntegration = new Harness.Cluster.Inputs.OrchestratorConfigCommitmentIntegrationArgs
        {
            Enabled = false,
            MasterAccountId = "string",
        },
        NodePreferences = new Harness.Cluster.Inputs.OrchestratorConfigNodePreferencesArgs
        {
            ReverseFallbackInterval = "string",
            Ttl = "string",
        },
        ReplacementSchedule = new Harness.Cluster.Inputs.OrchestratorConfigReplacementScheduleArgs
        {
            AppliesTo = new Harness.Cluster.Inputs.OrchestratorConfigReplacementScheduleAppliesToArgs
            {
                Consolidation = false,
                HarnessPodEviction = false,
                ReverseFallback = false,
            },
            WindowType = "string",
            WindowDetails = new Harness.Cluster.Inputs.OrchestratorConfigReplacementScheduleWindowDetailsArgs
            {
                Days = new[]
                {
                    "string",
                },
                TimeZone = "string",
                AllDay = false,
                EndTime = "string",
                StartTime = "string",
            },
        },
    });
    
    example, err := cluster.NewOrchestratorConfig(ctx, "orchestratorConfigResource", &cluster.OrchestratorConfigArgs{
    	Distribution: &cluster.OrchestratorConfigDistributionArgs{
    		OndemandReplicaPercentage: pulumi.Float64(0),
    		BaseOndemandCapacity:      pulumi.Int(0),
    		Selector:                  pulumi.String("string"),
    		Strategy:                  pulumi.String("string"),
    	},
    	OrchestratorId: pulumi.String("string"),
    	Binpacking: &cluster.OrchestratorConfigBinpackingArgs{
    		Disruption: &cluster.OrchestratorConfigBinpackingDisruptionArgs{
    			Budgets: cluster.OrchestratorConfigBinpackingDisruptionBudgetArray{
    				&cluster.OrchestratorConfigBinpackingDisruptionBudgetArgs{
    					Nodes: pulumi.String("string"),
    					Reasons: pulumi.StringArray{
    						pulumi.String("string"),
    					},
    					Schedule: &cluster.OrchestratorConfigBinpackingDisruptionBudgetScheduleArgs{
    						Duration:  pulumi.String("string"),
    						Frequency: pulumi.String("string"),
    					},
    				},
    			},
    			Criteria: pulumi.String("string"),
    			Delay:    pulumi.String("string"),
    		},
    		PodEviction: &cluster.OrchestratorConfigBinpackingPodEvictionArgs{
    			Threshold: &cluster.OrchestratorConfigBinpackingPodEvictionThresholdArgs{
    				Cpu:    pulumi.Float64(0),
    				Memory: pulumi.Float64(0),
    			},
    		},
    	},
    	CommitmentIntegration: &cluster.OrchestratorConfigCommitmentIntegrationArgs{
    		Enabled:         pulumi.Bool(false),
    		MasterAccountId: pulumi.String("string"),
    	},
    	NodePreferences: &cluster.OrchestratorConfigNodePreferencesArgs{
    		ReverseFallbackInterval: pulumi.String("string"),
    		Ttl:                     pulumi.String("string"),
    	},
    	ReplacementSchedule: &cluster.OrchestratorConfigReplacementScheduleArgs{
    		AppliesTo: &cluster.OrchestratorConfigReplacementScheduleAppliesToArgs{
    			Consolidation:      pulumi.Bool(false),
    			HarnessPodEviction: pulumi.Bool(false),
    			ReverseFallback:    pulumi.Bool(false),
    		},
    		WindowType: pulumi.String("string"),
    		WindowDetails: &cluster.OrchestratorConfigReplacementScheduleWindowDetailsArgs{
    			Days: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    			TimeZone:  pulumi.String("string"),
    			AllDay:    pulumi.Bool(false),
    			EndTime:   pulumi.String("string"),
    			StartTime: pulumi.String("string"),
    		},
    	},
    })
    
    var orchestratorConfigResource = new OrchestratorConfig("orchestratorConfigResource", OrchestratorConfigArgs.builder()
        .distribution(OrchestratorConfigDistributionArgs.builder()
            .ondemandReplicaPercentage(0.0)
            .baseOndemandCapacity(0)
            .selector("string")
            .strategy("string")
            .build())
        .orchestratorId("string")
        .binpacking(OrchestratorConfigBinpackingArgs.builder()
            .disruption(OrchestratorConfigBinpackingDisruptionArgs.builder()
                .budgets(OrchestratorConfigBinpackingDisruptionBudgetArgs.builder()
                    .nodes("string")
                    .reasons("string")
                    .schedule(OrchestratorConfigBinpackingDisruptionBudgetScheduleArgs.builder()
                        .duration("string")
                        .frequency("string")
                        .build())
                    .build())
                .criteria("string")
                .delay("string")
                .build())
            .podEviction(OrchestratorConfigBinpackingPodEvictionArgs.builder()
                .threshold(OrchestratorConfigBinpackingPodEvictionThresholdArgs.builder()
                    .cpu(0.0)
                    .memory(0.0)
                    .build())
                .build())
            .build())
        .commitmentIntegration(OrchestratorConfigCommitmentIntegrationArgs.builder()
            .enabled(false)
            .masterAccountId("string")
            .build())
        .nodePreferences(OrchestratorConfigNodePreferencesArgs.builder()
            .reverseFallbackInterval("string")
            .ttl("string")
            .build())
        .replacementSchedule(OrchestratorConfigReplacementScheduleArgs.builder()
            .appliesTo(OrchestratorConfigReplacementScheduleAppliesToArgs.builder()
                .consolidation(false)
                .harnessPodEviction(false)
                .reverseFallback(false)
                .build())
            .windowType("string")
            .windowDetails(OrchestratorConfigReplacementScheduleWindowDetailsArgs.builder()
                .days("string")
                .timeZone("string")
                .allDay(false)
                .endTime("string")
                .startTime("string")
                .build())
            .build())
        .build());
    
    orchestrator_config_resource = harness.cluster.OrchestratorConfig("orchestratorConfigResource",
        distribution={
            "ondemand_replica_percentage": 0,
            "base_ondemand_capacity": 0,
            "selector": "string",
            "strategy": "string",
        },
        orchestrator_id="string",
        binpacking={
            "disruption": {
                "budgets": [{
                    "nodes": "string",
                    "reasons": ["string"],
                    "schedule": {
                        "duration": "string",
                        "frequency": "string",
                    },
                }],
                "criteria": "string",
                "delay": "string",
            },
            "pod_eviction": {
                "threshold": {
                    "cpu": 0,
                    "memory": 0,
                },
            },
        },
        commitment_integration={
            "enabled": False,
            "master_account_id": "string",
        },
        node_preferences={
            "reverse_fallback_interval": "string",
            "ttl": "string",
        },
        replacement_schedule={
            "applies_to": {
                "consolidation": False,
                "harness_pod_eviction": False,
                "reverse_fallback": False,
            },
            "window_type": "string",
            "window_details": {
                "days": ["string"],
                "time_zone": "string",
                "all_day": False,
                "end_time": "string",
                "start_time": "string",
            },
        })
    
    const orchestratorConfigResource = new harness.cluster.OrchestratorConfig("orchestratorConfigResource", {
        distribution: {
            ondemandReplicaPercentage: 0,
            baseOndemandCapacity: 0,
            selector: "string",
            strategy: "string",
        },
        orchestratorId: "string",
        binpacking: {
            disruption: {
                budgets: [{
                    nodes: "string",
                    reasons: ["string"],
                    schedule: {
                        duration: "string",
                        frequency: "string",
                    },
                }],
                criteria: "string",
                delay: "string",
            },
            podEviction: {
                threshold: {
                    cpu: 0,
                    memory: 0,
                },
            },
        },
        commitmentIntegration: {
            enabled: false,
            masterAccountId: "string",
        },
        nodePreferences: {
            reverseFallbackInterval: "string",
            ttl: "string",
        },
        replacementSchedule: {
            appliesTo: {
                consolidation: false,
                harnessPodEviction: false,
                reverseFallback: false,
            },
            windowType: "string",
            windowDetails: {
                days: ["string"],
                timeZone: "string",
                allDay: false,
                endTime: "string",
                startTime: "string",
            },
        },
    });
    
    type: harness:cluster:OrchestratorConfig
    properties:
        binpacking:
            disruption:
                budgets:
                    - nodes: string
                      reasons:
                        - string
                      schedule:
                        duration: string
                        frequency: string
                criteria: string
                delay: string
            podEviction:
                threshold:
                    cpu: 0
                    memory: 0
        commitmentIntegration:
            enabled: false
            masterAccountId: string
        distribution:
            baseOndemandCapacity: 0
            ondemandReplicaPercentage: 0
            selector: string
            strategy: string
        nodePreferences:
            reverseFallbackInterval: string
            ttl: string
        orchestratorId: string
        replacementSchedule:
            appliesTo:
                consolidation: false
                harnessPodEviction: false
                reverseFallback: false
            windowDetails:
                allDay: false
                days:
                    - string
                endTime: string
                startTime: string
                timeZone: string
            windowType: string
    

    OrchestratorConfig Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

    The OrchestratorConfig resource accepts the following input properties:

    Distribution OrchestratorConfigDistribution
    Spot and Ondemand Distribution Preferences for workload replicas
    OrchestratorId string
    ID of the Cluster Orchestrator Object
    Binpacking OrchestratorConfigBinpacking
    Binpacking preferences for Cluster Orchestrator
    CommitmentIntegration OrchestratorConfigCommitmentIntegration
    Commitment integration configuration for Cluster Orchestrator
    NodePreferences OrchestratorConfigNodePreferences
    Node preferences for Cluster Orchestrator
    ReplacementSchedule OrchestratorConfigReplacementSchedule
    Replacement schedule for Cluster Orchestrator
    Distribution OrchestratorConfigDistributionArgs
    Spot and Ondemand Distribution Preferences for workload replicas
    OrchestratorId string
    ID of the Cluster Orchestrator Object
    Binpacking OrchestratorConfigBinpackingArgs
    Binpacking preferences for Cluster Orchestrator
    CommitmentIntegration OrchestratorConfigCommitmentIntegrationArgs
    Commitment integration configuration for Cluster Orchestrator
    NodePreferences OrchestratorConfigNodePreferencesArgs
    Node preferences for Cluster Orchestrator
    ReplacementSchedule OrchestratorConfigReplacementScheduleArgs
    Replacement schedule for Cluster Orchestrator
    distribution OrchestratorConfigDistribution
    Spot and Ondemand Distribution Preferences for workload replicas
    orchestratorId String
    ID of the Cluster Orchestrator Object
    binpacking OrchestratorConfigBinpacking
    Binpacking preferences for Cluster Orchestrator
    commitmentIntegration OrchestratorConfigCommitmentIntegration
    Commitment integration configuration for Cluster Orchestrator
    nodePreferences OrchestratorConfigNodePreferences
    Node preferences for Cluster Orchestrator
    replacementSchedule OrchestratorConfigReplacementSchedule
    Replacement schedule for Cluster Orchestrator
    distribution OrchestratorConfigDistribution
    Spot and Ondemand Distribution Preferences for workload replicas
    orchestratorId string
    ID of the Cluster Orchestrator Object
    binpacking OrchestratorConfigBinpacking
    Binpacking preferences for Cluster Orchestrator
    commitmentIntegration OrchestratorConfigCommitmentIntegration
    Commitment integration configuration for Cluster Orchestrator
    nodePreferences OrchestratorConfigNodePreferences
    Node preferences for Cluster Orchestrator
    replacementSchedule OrchestratorConfigReplacementSchedule
    Replacement schedule for Cluster Orchestrator
    distribution OrchestratorConfigDistributionArgs
    Spot and Ondemand Distribution Preferences for workload replicas
    orchestrator_id str
    ID of the Cluster Orchestrator Object
    binpacking OrchestratorConfigBinpackingArgs
    Binpacking preferences for Cluster Orchestrator
    commitment_integration OrchestratorConfigCommitmentIntegrationArgs
    Commitment integration configuration for Cluster Orchestrator
    node_preferences OrchestratorConfigNodePreferencesArgs
    Node preferences for Cluster Orchestrator
    replacement_schedule OrchestratorConfigReplacementScheduleArgs
    Replacement schedule for Cluster Orchestrator
    distribution Property Map
    Spot and Ondemand Distribution Preferences for workload replicas
    orchestratorId String
    ID of the Cluster Orchestrator Object
    binpacking Property Map
    Binpacking preferences for Cluster Orchestrator
    commitmentIntegration Property Map
    Commitment integration configuration for Cluster Orchestrator
    nodePreferences Property Map
    Node preferences for Cluster Orchestrator
    replacementSchedule Property Map
    Replacement schedule for Cluster Orchestrator

    Outputs

    All input properties are implicitly available as output properties. Additionally, the OrchestratorConfig resource produces the following output properties:

    Id string
    The provider-assigned unique ID for this managed resource.
    Id string
    The provider-assigned unique ID for this managed resource.
    id String
    The provider-assigned unique ID for this managed resource.
    id string
    The provider-assigned unique ID for this managed resource.
    id str
    The provider-assigned unique ID for this managed resource.
    id String
    The provider-assigned unique ID for this managed resource.

    Look up Existing OrchestratorConfig Resource

    Get an existing OrchestratorConfig resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: OrchestratorConfigState, opts?: CustomResourceOptions): OrchestratorConfig
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            binpacking: Optional[OrchestratorConfigBinpackingArgs] = None,
            commitment_integration: Optional[OrchestratorConfigCommitmentIntegrationArgs] = None,
            distribution: Optional[OrchestratorConfigDistributionArgs] = None,
            node_preferences: Optional[OrchestratorConfigNodePreferencesArgs] = None,
            orchestrator_id: Optional[str] = None,
            replacement_schedule: Optional[OrchestratorConfigReplacementScheduleArgs] = None) -> OrchestratorConfig
    func GetOrchestratorConfig(ctx *Context, name string, id IDInput, state *OrchestratorConfigState, opts ...ResourceOption) (*OrchestratorConfig, error)
    public static OrchestratorConfig Get(string name, Input<string> id, OrchestratorConfigState? state, CustomResourceOptions? opts = null)
    public static OrchestratorConfig get(String name, Output<String> id, OrchestratorConfigState state, CustomResourceOptions options)
    resources:  _:    type: harness:cluster:OrchestratorConfig    get:      id: ${id}
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    Binpacking OrchestratorConfigBinpacking
    Binpacking preferences for Cluster Orchestrator
    CommitmentIntegration OrchestratorConfigCommitmentIntegration
    Commitment integration configuration for Cluster Orchestrator
    Distribution OrchestratorConfigDistribution
    Spot and Ondemand Distribution Preferences for workload replicas
    NodePreferences OrchestratorConfigNodePreferences
    Node preferences for Cluster Orchestrator
    OrchestratorId string
    ID of the Cluster Orchestrator Object
    ReplacementSchedule OrchestratorConfigReplacementSchedule
    Replacement schedule for Cluster Orchestrator
    Binpacking OrchestratorConfigBinpackingArgs
    Binpacking preferences for Cluster Orchestrator
    CommitmentIntegration OrchestratorConfigCommitmentIntegrationArgs
    Commitment integration configuration for Cluster Orchestrator
    Distribution OrchestratorConfigDistributionArgs
    Spot and Ondemand Distribution Preferences for workload replicas
    NodePreferences OrchestratorConfigNodePreferencesArgs
    Node preferences for Cluster Orchestrator
    OrchestratorId string
    ID of the Cluster Orchestrator Object
    ReplacementSchedule OrchestratorConfigReplacementScheduleArgs
    Replacement schedule for Cluster Orchestrator
    binpacking OrchestratorConfigBinpacking
    Binpacking preferences for Cluster Orchestrator
    commitmentIntegration OrchestratorConfigCommitmentIntegration
    Commitment integration configuration for Cluster Orchestrator
    distribution OrchestratorConfigDistribution
    Spot and Ondemand Distribution Preferences for workload replicas
    nodePreferences OrchestratorConfigNodePreferences
    Node preferences for Cluster Orchestrator
    orchestratorId String
    ID of the Cluster Orchestrator Object
    replacementSchedule OrchestratorConfigReplacementSchedule
    Replacement schedule for Cluster Orchestrator
    binpacking OrchestratorConfigBinpacking
    Binpacking preferences for Cluster Orchestrator
    commitmentIntegration OrchestratorConfigCommitmentIntegration
    Commitment integration configuration for Cluster Orchestrator
    distribution OrchestratorConfigDistribution
    Spot and Ondemand Distribution Preferences for workload replicas
    nodePreferences OrchestratorConfigNodePreferences
    Node preferences for Cluster Orchestrator
    orchestratorId string
    ID of the Cluster Orchestrator Object
    replacementSchedule OrchestratorConfigReplacementSchedule
    Replacement schedule for Cluster Orchestrator
    binpacking OrchestratorConfigBinpackingArgs
    Binpacking preferences for Cluster Orchestrator
    commitment_integration OrchestratorConfigCommitmentIntegrationArgs
    Commitment integration configuration for Cluster Orchestrator
    distribution OrchestratorConfigDistributionArgs
    Spot and Ondemand Distribution Preferences for workload replicas
    node_preferences OrchestratorConfigNodePreferencesArgs
    Node preferences for Cluster Orchestrator
    orchestrator_id str
    ID of the Cluster Orchestrator Object
    replacement_schedule OrchestratorConfigReplacementScheduleArgs
    Replacement schedule for Cluster Orchestrator
    binpacking Property Map
    Binpacking preferences for Cluster Orchestrator
    commitmentIntegration Property Map
    Commitment integration configuration for Cluster Orchestrator
    distribution Property Map
    Spot and Ondemand Distribution Preferences for workload replicas
    nodePreferences Property Map
    Node preferences for Cluster Orchestrator
    orchestratorId String
    ID of the Cluster Orchestrator Object
    replacementSchedule Property Map
    Replacement schedule for Cluster Orchestrator

    Supporting Types

    OrchestratorConfigBinpacking, OrchestratorConfigBinpackingArgs

    Disruption OrchestratorConfigBinpackingDisruption
    Harness disruption configuration
    PodEviction OrchestratorConfigBinpackingPodEviction
    Harness Pod Evictor Configuration
    Disruption OrchestratorConfigBinpackingDisruption
    Harness disruption configuration
    PodEviction OrchestratorConfigBinpackingPodEviction
    Harness Pod Evictor Configuration
    disruption OrchestratorConfigBinpackingDisruption
    Harness disruption configuration
    podEviction OrchestratorConfigBinpackingPodEviction
    Harness Pod Evictor Configuration
    disruption OrchestratorConfigBinpackingDisruption
    Harness disruption configuration
    podEviction OrchestratorConfigBinpackingPodEviction
    Harness Pod Evictor Configuration
    disruption OrchestratorConfigBinpackingDisruption
    Harness disruption configuration
    pod_eviction OrchestratorConfigBinpackingPodEviction
    Harness Pod Evictor Configuration
    disruption Property Map
    Harness disruption configuration
    podEviction Property Map
    Harness Pod Evictor Configuration

    OrchestratorConfigBinpackingDisruption, OrchestratorConfigBinpackingDisruptionArgs

    Budgets List<OrchestratorConfigBinpackingDisruptionBudget>
    Budgets for disruption
    Criteria string
    Criteria for considering a nodes for disruption
    Delay string
    Deletion delay
    Budgets []OrchestratorConfigBinpackingDisruptionBudget
    Budgets for disruption
    Criteria string
    Criteria for considering a nodes for disruption
    Delay string
    Deletion delay
    budgets List<OrchestratorConfigBinpackingDisruptionBudget>
    Budgets for disruption
    criteria String
    Criteria for considering a nodes for disruption
    delay String
    Deletion delay
    budgets OrchestratorConfigBinpackingDisruptionBudget[]
    Budgets for disruption
    criteria string
    Criteria for considering a nodes for disruption
    delay string
    Deletion delay
    budgets Sequence[OrchestratorConfigBinpackingDisruptionBudget]
    Budgets for disruption
    criteria str
    Criteria for considering a nodes for disruption
    delay str
    Deletion delay
    budgets List<Property Map>
    Budgets for disruption
    criteria String
    Criteria for considering a nodes for disruption
    delay String
    Deletion delay

    OrchestratorConfigBinpackingDisruptionBudget, OrchestratorConfigBinpackingDisruptionBudgetArgs

    Nodes string
    Number or percentage of Nodes to consider for disruption
    Reasons List<string>
    Reasons for disruption
    Schedule OrchestratorConfigBinpackingDisruptionBudgetSchedule
    Schedule for disruption budget
    Nodes string
    Number or percentage of Nodes to consider for disruption
    Reasons []string
    Reasons for disruption
    Schedule OrchestratorConfigBinpackingDisruptionBudgetSchedule
    Schedule for disruption budget
    nodes String
    Number or percentage of Nodes to consider for disruption
    reasons List<String>
    Reasons for disruption
    schedule OrchestratorConfigBinpackingDisruptionBudgetSchedule
    Schedule for disruption budget
    nodes string
    Number or percentage of Nodes to consider for disruption
    reasons string[]
    Reasons for disruption
    schedule OrchestratorConfigBinpackingDisruptionBudgetSchedule
    Schedule for disruption budget
    nodes str
    Number or percentage of Nodes to consider for disruption
    reasons Sequence[str]
    Reasons for disruption
    schedule OrchestratorConfigBinpackingDisruptionBudgetSchedule
    Schedule for disruption budget
    nodes String
    Number or percentage of Nodes to consider for disruption
    reasons List<String>
    Reasons for disruption
    schedule Property Map
    Schedule for disruption budget

    OrchestratorConfigBinpackingDisruptionBudgetSchedule, OrchestratorConfigBinpackingDisruptionBudgetScheduleArgs

    Duration string
    Duration for disruption budget
    Frequency string
    Frequency for disruption budget
    Duration string
    Duration for disruption budget
    Frequency string
    Frequency for disruption budget
    duration String
    Duration for disruption budget
    frequency String
    Frequency for disruption budget
    duration string
    Duration for disruption budget
    frequency string
    Frequency for disruption budget
    duration str
    Duration for disruption budget
    frequency str
    Frequency for disruption budget
    duration String
    Duration for disruption budget
    frequency String
    Frequency for disruption budget

    OrchestratorConfigBinpackingPodEviction, OrchestratorConfigBinpackingPodEvictionArgs

    Threshold OrchestratorConfigBinpackingPodEvictionThreshold
    Minimum Threshold for considering a node as underutilized
    Threshold OrchestratorConfigBinpackingPodEvictionThreshold
    Minimum Threshold for considering a node as underutilized
    threshold OrchestratorConfigBinpackingPodEvictionThreshold
    Minimum Threshold for considering a node as underutilized
    threshold OrchestratorConfigBinpackingPodEvictionThreshold
    Minimum Threshold for considering a node as underutilized
    threshold OrchestratorConfigBinpackingPodEvictionThreshold
    Minimum Threshold for considering a node as underutilized
    threshold Property Map
    Minimum Threshold for considering a node as underutilized

    OrchestratorConfigBinpackingPodEvictionThreshold, OrchestratorConfigBinpackingPodEvictionThresholdArgs

    Cpu double
    CPU percentage for considering a node as underutilized
    Memory double
    Memory percentage for considering a node as underutilized
    Cpu float64
    CPU percentage for considering a node as underutilized
    Memory float64
    Memory percentage for considering a node as underutilized
    cpu Double
    CPU percentage for considering a node as underutilized
    memory Double
    Memory percentage for considering a node as underutilized
    cpu number
    CPU percentage for considering a node as underutilized
    memory number
    Memory percentage for considering a node as underutilized
    cpu float
    CPU percentage for considering a node as underutilized
    memory float
    Memory percentage for considering a node as underutilized
    cpu Number
    CPU percentage for considering a node as underutilized
    memory Number
    Memory percentage for considering a node as underutilized

    OrchestratorConfigCommitmentIntegration, OrchestratorConfigCommitmentIntegrationArgs

    Enabled bool
    Flag to enable Commitment Integration
    MasterAccountId string
    Master AWS account id for commitment integration
    Enabled bool
    Flag to enable Commitment Integration
    MasterAccountId string
    Master AWS account id for commitment integration
    enabled Boolean
    Flag to enable Commitment Integration
    masterAccountId String
    Master AWS account id for commitment integration
    enabled boolean
    Flag to enable Commitment Integration
    masterAccountId string
    Master AWS account id for commitment integration
    enabled bool
    Flag to enable Commitment Integration
    master_account_id str
    Master AWS account id for commitment integration
    enabled Boolean
    Flag to enable Commitment Integration
    masterAccountId String
    Master AWS account id for commitment integration

    OrchestratorConfigDistribution, OrchestratorConfigDistributionArgs

    OndemandReplicaPercentage double
    Percentage of on-demand replicas required for workloads
    BaseOndemandCapacity int
    Number of minimum ondemand replicas required for workloads
    Selector string
    Selector for choosing workloads for distribution
    Strategy string
    Strategy for choosing spot nodes for cluster
    OndemandReplicaPercentage float64
    Percentage of on-demand replicas required for workloads
    BaseOndemandCapacity int
    Number of minimum ondemand replicas required for workloads
    Selector string
    Selector for choosing workloads for distribution
    Strategy string
    Strategy for choosing spot nodes for cluster
    ondemandReplicaPercentage Double
    Percentage of on-demand replicas required for workloads
    baseOndemandCapacity Integer
    Number of minimum ondemand replicas required for workloads
    selector String
    Selector for choosing workloads for distribution
    strategy String
    Strategy for choosing spot nodes for cluster
    ondemandReplicaPercentage number
    Percentage of on-demand replicas required for workloads
    baseOndemandCapacity number
    Number of minimum ondemand replicas required for workloads
    selector string
    Selector for choosing workloads for distribution
    strategy string
    Strategy for choosing spot nodes for cluster
    ondemand_replica_percentage float
    Percentage of on-demand replicas required for workloads
    base_ondemand_capacity int
    Number of minimum ondemand replicas required for workloads
    selector str
    Selector for choosing workloads for distribution
    strategy str
    Strategy for choosing spot nodes for cluster
    ondemandReplicaPercentage Number
    Percentage of on-demand replicas required for workloads
    baseOndemandCapacity Number
    Number of minimum ondemand replicas required for workloads
    selector String
    Selector for choosing workloads for distribution
    strategy String
    Strategy for choosing spot nodes for cluster

    OrchestratorConfigNodePreferences, OrchestratorConfigNodePreferencesArgs

    ReverseFallbackInterval string
    Reverse fallback interval
    Ttl string
    TTL for nodes
    ReverseFallbackInterval string
    Reverse fallback interval
    Ttl string
    TTL for nodes
    reverseFallbackInterval String
    Reverse fallback interval
    ttl String
    TTL for nodes
    reverseFallbackInterval string
    Reverse fallback interval
    ttl string
    TTL for nodes
    reverse_fallback_interval str
    Reverse fallback interval
    ttl str
    TTL for nodes
    reverseFallbackInterval String
    Reverse fallback interval
    ttl String
    TTL for nodes

    OrchestratorConfigReplacementSchedule, OrchestratorConfigReplacementScheduleArgs

    AppliesTo OrchestratorConfigReplacementScheduleAppliesTo
    Defines the scope of the replacement schedule
    WindowType string
    Window type for replacement schedule
    WindowDetails OrchestratorConfigReplacementScheduleWindowDetails
    AppliesTo OrchestratorConfigReplacementScheduleAppliesTo
    Defines the scope of the replacement schedule
    WindowType string
    Window type for replacement schedule
    WindowDetails OrchestratorConfigReplacementScheduleWindowDetails
    appliesTo OrchestratorConfigReplacementScheduleAppliesTo
    Defines the scope of the replacement schedule
    windowType String
    Window type for replacement schedule
    windowDetails OrchestratorConfigReplacementScheduleWindowDetails
    appliesTo OrchestratorConfigReplacementScheduleAppliesTo
    Defines the scope of the replacement schedule
    windowType string
    Window type for replacement schedule
    windowDetails OrchestratorConfigReplacementScheduleWindowDetails
    appliesTo Property Map
    Defines the scope of the replacement schedule
    windowType String
    Window type for replacement schedule
    windowDetails Property Map

    OrchestratorConfigReplacementScheduleAppliesTo, OrchestratorConfigReplacementScheduleAppliesToArgs

    OrchestratorConfigReplacementScheduleWindowDetails, OrchestratorConfigReplacementScheduleWindowDetailsArgs

    Days List<string>
    List of days on which schedule need to be active. Valid values are SUN, MON, TUE, WED, THU, FRI and SAT.
    TimeZone string
    Time zone in which the schedule needs to be executed. Example Valid values: UTC, America/NewYork, Europe/London, Asia/Kolkata, Asia/Tokyo, Asia/HongKong, Asia/Singapore, Australia/Melbourne and Australia/Sydney.
    AllDay bool
    EndTime string
    End time of schedule in the format HH:MM. Eg : 13:15 for 01:15pm
    StartTime string
    Start time of schedule in the format HH:MM. Eg : 13:15 for 01:15pm
    Days []string
    List of days on which schedule need to be active. Valid values are SUN, MON, TUE, WED, THU, FRI and SAT.
    TimeZone string
    Time zone in which the schedule needs to be executed. Example Valid values: UTC, America/NewYork, Europe/London, Asia/Kolkata, Asia/Tokyo, Asia/HongKong, Asia/Singapore, Australia/Melbourne and Australia/Sydney.
    AllDay bool
    EndTime string
    End time of schedule in the format HH:MM. Eg : 13:15 for 01:15pm
    StartTime string
    Start time of schedule in the format HH:MM. Eg : 13:15 for 01:15pm
    days List<String>
    List of days on which schedule need to be active. Valid values are SUN, MON, TUE, WED, THU, FRI and SAT.
    timeZone String
    Time zone in which the schedule needs to be executed. Example Valid values: UTC, America/NewYork, Europe/London, Asia/Kolkata, Asia/Tokyo, Asia/HongKong, Asia/Singapore, Australia/Melbourne and Australia/Sydney.
    allDay Boolean
    endTime String
    End time of schedule in the format HH:MM. Eg : 13:15 for 01:15pm
    startTime String
    Start time of schedule in the format HH:MM. Eg : 13:15 for 01:15pm
    days string[]
    List of days on which schedule need to be active. Valid values are SUN, MON, TUE, WED, THU, FRI and SAT.
    timeZone string
    Time zone in which the schedule needs to be executed. Example Valid values: UTC, America/NewYork, Europe/London, Asia/Kolkata, Asia/Tokyo, Asia/HongKong, Asia/Singapore, Australia/Melbourne and Australia/Sydney.
    allDay boolean
    endTime string
    End time of schedule in the format HH:MM. Eg : 13:15 for 01:15pm
    startTime string
    Start time of schedule in the format HH:MM. Eg : 13:15 for 01:15pm
    days Sequence[str]
    List of days on which schedule need to be active. Valid values are SUN, MON, TUE, WED, THU, FRI and SAT.
    time_zone str
    Time zone in which the schedule needs to be executed. Example Valid values: UTC, America/NewYork, Europe/London, Asia/Kolkata, Asia/Tokyo, Asia/HongKong, Asia/Singapore, Australia/Melbourne and Australia/Sydney.
    all_day bool
    end_time str
    End time of schedule in the format HH:MM. Eg : 13:15 for 01:15pm
    start_time str
    Start time of schedule in the format HH:MM. Eg : 13:15 for 01:15pm
    days List<String>
    List of days on which schedule need to be active. Valid values are SUN, MON, TUE, WED, THU, FRI and SAT.
    timeZone String
    Time zone in which the schedule needs to be executed. Example Valid values: UTC, America/NewYork, Europe/London, Asia/Kolkata, Asia/Tokyo, Asia/HongKong, Asia/Singapore, Australia/Melbourne and Australia/Sydney.
    allDay Boolean
    endTime String
    End time of schedule in the format HH:MM. Eg : 13:15 for 01:15pm
    startTime String
    Start time of schedule in the format HH:MM. Eg : 13:15 for 01:15pm

    Package Details

    Repository
    harness pulumi/pulumi-harness
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the harness Terraform Provider.
    harness logo
    Harness v0.8.4 published on Thursday, Sep 11, 2025 by Pulumi