1. Packages
  2. Opentelekomcloud Provider
  3. API Docs
  4. MrsClusterV1
opentelekomcloud 1.36.37 published on Thursday, Apr 24, 2025 by opentelekomcloud

opentelekomcloud.MrsClusterV1

Explore with Pulumi AI

opentelekomcloud logo
opentelekomcloud 1.36.37 published on Thursday, Apr 24, 2025 by opentelekomcloud

    Up-to-date reference of API arguments for MRS cluster you can get at documentation portal

    Manages resource cluster within OpenTelekomCloud MRS.

    Example Usage

    import * as pulumi from "@pulumi/pulumi";
    import * as opentelekomcloud from "@pulumi/opentelekomcloud";
    
    const _this = new opentelekomcloud.MrsClusterV1("this", {
        clusterName: "mrs-cluster",
        billingType: 12,
        masterNodeNum: 2,
        coreNodeNum: 3,
        masterNodeSize: "c3.xlarge.4.linux.mrs",
        coreNodeSize: "c3.xlarge.4.linux.mrs",
        availableZoneId: _var.az,
        vpcId: _var.vpc_id,
        subnetId: _var.network_id,
        clusterVersion: "MRS 2.1.0",
        volumeType: "SATA",
        volumeSize: 100,
        clusterType: 0,
        safeMode: 1,
        nodePublicCertName: "%s",
        clusterAdminSecret: "Qwerty!123",
        componentLists: [
            {
                componentName: "Presto",
            },
            {
                componentName: "Hadoop",
            },
            {
                componentName: "Spark",
            },
            {
                componentName: "HBase",
            },
            {
                componentName: "Hive",
            },
            {
                componentName: "Hue",
            },
            {
                componentName: "Loader",
            },
            {
                componentName: "Tez",
            },
            {
                componentName: "Flink",
            },
        ],
        bootstrapScripts: [{
            name: "Modify os config",
            uri: "s3a://bootstrap/modify_os_config.sh",
            parameters: "param1 param2",
            nodes: [
                "master",
                "core",
                "task",
            ],
            activeMaster: true,
            beforeComponentStart: true,
            failAction: "continue",
        }],
        tags: {
            foo: "bar",
            key: "value",
        },
    });
    
    import pulumi
    import pulumi_opentelekomcloud as opentelekomcloud
    
    this = opentelekomcloud.MrsClusterV1("this",
        cluster_name="mrs-cluster",
        billing_type=12,
        master_node_num=2,
        core_node_num=3,
        master_node_size="c3.xlarge.4.linux.mrs",
        core_node_size="c3.xlarge.4.linux.mrs",
        available_zone_id=var["az"],
        vpc_id=var["vpc_id"],
        subnet_id=var["network_id"],
        cluster_version="MRS 2.1.0",
        volume_type="SATA",
        volume_size=100,
        cluster_type=0,
        safe_mode=1,
        node_public_cert_name="%s",
        cluster_admin_secret="Qwerty!123",
        component_lists=[
            {
                "component_name": "Presto",
            },
            {
                "component_name": "Hadoop",
            },
            {
                "component_name": "Spark",
            },
            {
                "component_name": "HBase",
            },
            {
                "component_name": "Hive",
            },
            {
                "component_name": "Hue",
            },
            {
                "component_name": "Loader",
            },
            {
                "component_name": "Tez",
            },
            {
                "component_name": "Flink",
            },
        ],
        bootstrap_scripts=[{
            "name": "Modify os config",
            "uri": "s3a://bootstrap/modify_os_config.sh",
            "parameters": "param1 param2",
            "nodes": [
                "master",
                "core",
                "task",
            ],
            "active_master": True,
            "before_component_start": True,
            "fail_action": "continue",
        }],
        tags={
            "foo": "bar",
            "key": "value",
        })
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-terraform-provider/sdks/go/opentelekomcloud/opentelekomcloud"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := opentelekomcloud.NewMrsClusterV1(ctx, "this", &opentelekomcloud.MrsClusterV1Args{
    			ClusterName:        pulumi.String("mrs-cluster"),
    			BillingType:        pulumi.Float64(12),
    			MasterNodeNum:      pulumi.Float64(2),
    			CoreNodeNum:        pulumi.Float64(3),
    			MasterNodeSize:     pulumi.String("c3.xlarge.4.linux.mrs"),
    			CoreNodeSize:       pulumi.String("c3.xlarge.4.linux.mrs"),
    			AvailableZoneId:    pulumi.Any(_var.Az),
    			VpcId:              pulumi.Any(_var.Vpc_id),
    			SubnetId:           pulumi.Any(_var.Network_id),
    			ClusterVersion:     pulumi.String("MRS 2.1.0"),
    			VolumeType:         pulumi.String("SATA"),
    			VolumeSize:         pulumi.Float64(100),
    			ClusterType:        pulumi.Float64(0),
    			SafeMode:           pulumi.Float64(1),
    			NodePublicCertName: pulumi.String("%s"),
    			ClusterAdminSecret: pulumi.String("Qwerty!123"),
    			ComponentLists: opentelekomcloud.MrsClusterV1ComponentListArray{
    				&opentelekomcloud.MrsClusterV1ComponentListArgs{
    					ComponentName: pulumi.String("Presto"),
    				},
    				&opentelekomcloud.MrsClusterV1ComponentListArgs{
    					ComponentName: pulumi.String("Hadoop"),
    				},
    				&opentelekomcloud.MrsClusterV1ComponentListArgs{
    					ComponentName: pulumi.String("Spark"),
    				},
    				&opentelekomcloud.MrsClusterV1ComponentListArgs{
    					ComponentName: pulumi.String("HBase"),
    				},
    				&opentelekomcloud.MrsClusterV1ComponentListArgs{
    					ComponentName: pulumi.String("Hive"),
    				},
    				&opentelekomcloud.MrsClusterV1ComponentListArgs{
    					ComponentName: pulumi.String("Hue"),
    				},
    				&opentelekomcloud.MrsClusterV1ComponentListArgs{
    					ComponentName: pulumi.String("Loader"),
    				},
    				&opentelekomcloud.MrsClusterV1ComponentListArgs{
    					ComponentName: pulumi.String("Tez"),
    				},
    				&opentelekomcloud.MrsClusterV1ComponentListArgs{
    					ComponentName: pulumi.String("Flink"),
    				},
    			},
    			BootstrapScripts: opentelekomcloud.MrsClusterV1BootstrapScriptArray{
    				&opentelekomcloud.MrsClusterV1BootstrapScriptArgs{
    					Name:       pulumi.String("Modify os config"),
    					Uri:        pulumi.String("s3a://bootstrap/modify_os_config.sh"),
    					Parameters: pulumi.String("param1 param2"),
    					Nodes: pulumi.StringArray{
    						pulumi.String("master"),
    						pulumi.String("core"),
    						pulumi.String("task"),
    					},
    					ActiveMaster:         pulumi.Bool(true),
    					BeforeComponentStart: pulumi.Bool(true),
    					FailAction:           pulumi.String("continue"),
    				},
    			},
    			Tags: pulumi.StringMap{
    				"foo": pulumi.String("bar"),
    				"key": pulumi.String("value"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Opentelekomcloud = Pulumi.Opentelekomcloud;
    
    return await Deployment.RunAsync(() => 
    {
        var @this = new Opentelekomcloud.MrsClusterV1("this", new()
        {
            ClusterName = "mrs-cluster",
            BillingType = 12,
            MasterNodeNum = 2,
            CoreNodeNum = 3,
            MasterNodeSize = "c3.xlarge.4.linux.mrs",
            CoreNodeSize = "c3.xlarge.4.linux.mrs",
            AvailableZoneId = @var.Az,
            VpcId = @var.Vpc_id,
            SubnetId = @var.Network_id,
            ClusterVersion = "MRS 2.1.0",
            VolumeType = "SATA",
            VolumeSize = 100,
            ClusterType = 0,
            SafeMode = 1,
            NodePublicCertName = "%s",
            ClusterAdminSecret = "Qwerty!123",
            ComponentLists = new[]
            {
                new Opentelekomcloud.Inputs.MrsClusterV1ComponentListArgs
                {
                    ComponentName = "Presto",
                },
                new Opentelekomcloud.Inputs.MrsClusterV1ComponentListArgs
                {
                    ComponentName = "Hadoop",
                },
                new Opentelekomcloud.Inputs.MrsClusterV1ComponentListArgs
                {
                    ComponentName = "Spark",
                },
                new Opentelekomcloud.Inputs.MrsClusterV1ComponentListArgs
                {
                    ComponentName = "HBase",
                },
                new Opentelekomcloud.Inputs.MrsClusterV1ComponentListArgs
                {
                    ComponentName = "Hive",
                },
                new Opentelekomcloud.Inputs.MrsClusterV1ComponentListArgs
                {
                    ComponentName = "Hue",
                },
                new Opentelekomcloud.Inputs.MrsClusterV1ComponentListArgs
                {
                    ComponentName = "Loader",
                },
                new Opentelekomcloud.Inputs.MrsClusterV1ComponentListArgs
                {
                    ComponentName = "Tez",
                },
                new Opentelekomcloud.Inputs.MrsClusterV1ComponentListArgs
                {
                    ComponentName = "Flink",
                },
            },
            BootstrapScripts = new[]
            {
                new Opentelekomcloud.Inputs.MrsClusterV1BootstrapScriptArgs
                {
                    Name = "Modify os config",
                    Uri = "s3a://bootstrap/modify_os_config.sh",
                    Parameters = "param1 param2",
                    Nodes = new[]
                    {
                        "master",
                        "core",
                        "task",
                    },
                    ActiveMaster = true,
                    BeforeComponentStart = true,
                    FailAction = "continue",
                },
            },
            Tags = 
            {
                { "foo", "bar" },
                { "key", "value" },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.opentelekomcloud.MrsClusterV1;
    import com.pulumi.opentelekomcloud.MrsClusterV1Args;
    import com.pulumi.opentelekomcloud.inputs.MrsClusterV1ComponentListArgs;
    import com.pulumi.opentelekomcloud.inputs.MrsClusterV1BootstrapScriptArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var this_ = new MrsClusterV1("this", MrsClusterV1Args.builder()
                .clusterName("mrs-cluster")
                .billingType(12)
                .masterNodeNum(2)
                .coreNodeNum(3)
                .masterNodeSize("c3.xlarge.4.linux.mrs")
                .coreNodeSize("c3.xlarge.4.linux.mrs")
                .availableZoneId(var_.az())
                .vpcId(var_.vpc_id())
                .subnetId(var_.network_id())
                .clusterVersion("MRS 2.1.0")
                .volumeType("SATA")
                .volumeSize(100)
                .clusterType(0)
                .safeMode(1)
                .nodePublicCertName("%s")
                .clusterAdminSecret("Qwerty!123")
                .componentLists(            
                    MrsClusterV1ComponentListArgs.builder()
                        .componentName("Presto")
                        .build(),
                    MrsClusterV1ComponentListArgs.builder()
                        .componentName("Hadoop")
                        .build(),
                    MrsClusterV1ComponentListArgs.builder()
                        .componentName("Spark")
                        .build(),
                    MrsClusterV1ComponentListArgs.builder()
                        .componentName("HBase")
                        .build(),
                    MrsClusterV1ComponentListArgs.builder()
                        .componentName("Hive")
                        .build(),
                    MrsClusterV1ComponentListArgs.builder()
                        .componentName("Hue")
                        .build(),
                    MrsClusterV1ComponentListArgs.builder()
                        .componentName("Loader")
                        .build(),
                    MrsClusterV1ComponentListArgs.builder()
                        .componentName("Tez")
                        .build(),
                    MrsClusterV1ComponentListArgs.builder()
                        .componentName("Flink")
                        .build())
                .bootstrapScripts(MrsClusterV1BootstrapScriptArgs.builder()
                    .name("Modify os config")
                    .uri("s3a://bootstrap/modify_os_config.sh")
                    .parameters("param1 param2")
                    .nodes(                
                        "master",
                        "core",
                        "task")
                    .activeMaster(true)
                    .beforeComponentStart(true)
                    .failAction("continue")
                    .build())
                .tags(Map.ofEntries(
                    Map.entry("foo", "bar"),
                    Map.entry("key", "value")
                ))
                .build());
    
        }
    }
    
    resources:
      this:
        type: opentelekomcloud:MrsClusterV1
        properties:
          clusterName: mrs-cluster
          billingType: 12
          masterNodeNum: 2
          coreNodeNum: 3
          masterNodeSize: c3.xlarge.4.linux.mrs
          coreNodeSize: c3.xlarge.4.linux.mrs
          availableZoneId: ${var.az}
          vpcId: ${var.vpc_id}
          subnetId: ${var.network_id}
          clusterVersion: MRS 2.1.0
          volumeType: SATA
          volumeSize: 100
          clusterType: 0
          safeMode: 1
          nodePublicCertName: '%s'
          clusterAdminSecret: Qwerty!123
          componentLists:
            - componentName: Presto
            - componentName: Hadoop
            - componentName: Spark
            - componentName: HBase
            - componentName: Hive
            - componentName: Hue
            - componentName: Loader
            - componentName: Tez
            - componentName: Flink
          bootstrapScripts:
            - name: Modify os config
              uri: s3a://bootstrap/modify_os_config.sh
              parameters: param1 param2
              nodes:
                - master
                - core
                - task
              activeMaster: true
              beforeComponentStart: true
              failAction: continue
          tags:
            foo: bar
            key: value
    

    Create MrsClusterV1 Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new MrsClusterV1(name: string, args: MrsClusterV1Args, opts?: CustomResourceOptions);
    @overload
    def MrsClusterV1(resource_name: str,
                     args: MrsClusterV1Args,
                     opts: Optional[ResourceOptions] = None)
    
    @overload
    def MrsClusterV1(resource_name: str,
                     opts: Optional[ResourceOptions] = None,
                     node_public_cert_name: Optional[str] = None,
                     core_node_size: Optional[str] = None,
                     billing_type: Optional[float] = None,
                     vpc_id: Optional[str] = None,
                     subnet_id: Optional[str] = None,
                     cluster_name: Optional[str] = None,
                     safe_mode: Optional[float] = None,
                     cluster_version: Optional[str] = None,
                     component_lists: Optional[Sequence[MrsClusterV1ComponentListArgs]] = None,
                     master_node_size: Optional[str] = None,
                     master_node_num: Optional[float] = None,
                     available_zone_id: Optional[str] = None,
                     core_node_num: Optional[float] = None,
                     mrs_cluster_v1_id: Optional[str] = None,
                     master_data_volume_size: Optional[float] = None,
                     log_collection: Optional[float] = None,
                     region: Optional[str] = None,
                     master_data_volume_type: Optional[str] = None,
                     core_data_volume_size: Optional[float] = None,
                     core_data_volume_count: Optional[float] = None,
                     master_data_volume_count: Optional[float] = None,
                     add_jobs: Optional[Sequence[MrsClusterV1AddJobArgs]] = None,
                     core_data_volume_type: Optional[str] = None,
                     cluster_type: Optional[float] = None,
                     cluster_admin_secret: Optional[str] = None,
                     tags: Optional[Mapping[str, str]] = None,
                     timeouts: Optional[MrsClusterV1TimeoutsArgs] = None,
                     volume_size: Optional[float] = None,
                     volume_type: Optional[str] = None,
                     bootstrap_scripts: Optional[Sequence[MrsClusterV1BootstrapScriptArgs]] = None)
    func NewMrsClusterV1(ctx *Context, name string, args MrsClusterV1Args, opts ...ResourceOption) (*MrsClusterV1, error)
    public MrsClusterV1(string name, MrsClusterV1Args args, CustomResourceOptions? opts = null)
    public MrsClusterV1(String name, MrsClusterV1Args args)
    public MrsClusterV1(String name, MrsClusterV1Args args, CustomResourceOptions options)
    
    type: opentelekomcloud:MrsClusterV1
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args MrsClusterV1Args
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args MrsClusterV1Args
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args MrsClusterV1Args
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args MrsClusterV1Args
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args MrsClusterV1Args
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var mrsClusterV1Resource = new Opentelekomcloud.MrsClusterV1("mrsClusterV1Resource", new()
    {
        NodePublicCertName = "string",
        CoreNodeSize = "string",
        BillingType = 0,
        VpcId = "string",
        SubnetId = "string",
        ClusterName = "string",
        SafeMode = 0,
        ClusterVersion = "string",
        ComponentLists = new[]
        {
            new Opentelekomcloud.Inputs.MrsClusterV1ComponentListArgs
            {
                ComponentName = "string",
                ComponentDesc = "string",
                ComponentId = "string",
                ComponentVersion = "string",
            },
        },
        MasterNodeSize = "string",
        MasterNodeNum = 0,
        AvailableZoneId = "string",
        CoreNodeNum = 0,
        MrsClusterV1Id = "string",
        MasterDataVolumeSize = 0,
        LogCollection = 0,
        Region = "string",
        MasterDataVolumeType = "string",
        CoreDataVolumeSize = 0,
        CoreDataVolumeCount = 0,
        MasterDataVolumeCount = 0,
        AddJobs = new[]
        {
            new Opentelekomcloud.Inputs.MrsClusterV1AddJobArgs
            {
                JarPath = "string",
                JobName = "string",
                JobType = 0,
                SubmitJobOnceClusterRun = false,
                Arguments = "string",
                FileAction = "string",
                HiveScriptPath = "string",
                Hql = "string",
                Input = "string",
                JobLog = "string",
                Output = "string",
                ShutdownCluster = false,
            },
        },
        CoreDataVolumeType = "string",
        ClusterType = 0,
        ClusterAdminSecret = "string",
        Tags = 
        {
            { "string", "string" },
        },
        Timeouts = new Opentelekomcloud.Inputs.MrsClusterV1TimeoutsArgs
        {
            Create = "string",
            Delete = "string",
        },
        VolumeSize = 0,
        VolumeType = "string",
        BootstrapScripts = new[]
        {
            new Opentelekomcloud.Inputs.MrsClusterV1BootstrapScriptArgs
            {
                FailAction = "string",
                Name = "string",
                Nodes = new[]
                {
                    "string",
                },
                Uri = "string",
                ActiveMaster = false,
                BeforeComponentStart = false,
                Parameters = "string",
            },
        },
    });
    
    example, err := opentelekomcloud.NewMrsClusterV1(ctx, "mrsClusterV1Resource", &opentelekomcloud.MrsClusterV1Args{
    	NodePublicCertName: pulumi.String("string"),
    	CoreNodeSize:       pulumi.String("string"),
    	BillingType:        pulumi.Float64(0),
    	VpcId:              pulumi.String("string"),
    	SubnetId:           pulumi.String("string"),
    	ClusterName:        pulumi.String("string"),
    	SafeMode:           pulumi.Float64(0),
    	ClusterVersion:     pulumi.String("string"),
    	ComponentLists: opentelekomcloud.MrsClusterV1ComponentListArray{
    		&opentelekomcloud.MrsClusterV1ComponentListArgs{
    			ComponentName:    pulumi.String("string"),
    			ComponentDesc:    pulumi.String("string"),
    			ComponentId:      pulumi.String("string"),
    			ComponentVersion: pulumi.String("string"),
    		},
    	},
    	MasterNodeSize:        pulumi.String("string"),
    	MasterNodeNum:         pulumi.Float64(0),
    	AvailableZoneId:       pulumi.String("string"),
    	CoreNodeNum:           pulumi.Float64(0),
    	MrsClusterV1Id:        pulumi.String("string"),
    	MasterDataVolumeSize:  pulumi.Float64(0),
    	LogCollection:         pulumi.Float64(0),
    	Region:                pulumi.String("string"),
    	MasterDataVolumeType:  pulumi.String("string"),
    	CoreDataVolumeSize:    pulumi.Float64(0),
    	CoreDataVolumeCount:   pulumi.Float64(0),
    	MasterDataVolumeCount: pulumi.Float64(0),
    	AddJobs: opentelekomcloud.MrsClusterV1AddJobArray{
    		&opentelekomcloud.MrsClusterV1AddJobArgs{
    			JarPath:                 pulumi.String("string"),
    			JobName:                 pulumi.String("string"),
    			JobType:                 pulumi.Float64(0),
    			SubmitJobOnceClusterRun: pulumi.Bool(false),
    			Arguments:               pulumi.String("string"),
    			FileAction:              pulumi.String("string"),
    			HiveScriptPath:          pulumi.String("string"),
    			Hql:                     pulumi.String("string"),
    			Input:                   pulumi.String("string"),
    			JobLog:                  pulumi.String("string"),
    			Output:                  pulumi.String("string"),
    			ShutdownCluster:         pulumi.Bool(false),
    		},
    	},
    	CoreDataVolumeType: pulumi.String("string"),
    	ClusterType:        pulumi.Float64(0),
    	ClusterAdminSecret: pulumi.String("string"),
    	Tags: pulumi.StringMap{
    		"string": pulumi.String("string"),
    	},
    	Timeouts: &opentelekomcloud.MrsClusterV1TimeoutsArgs{
    		Create: pulumi.String("string"),
    		Delete: pulumi.String("string"),
    	},
    	VolumeSize: pulumi.Float64(0),
    	VolumeType: pulumi.String("string"),
    	BootstrapScripts: opentelekomcloud.MrsClusterV1BootstrapScriptArray{
    		&opentelekomcloud.MrsClusterV1BootstrapScriptArgs{
    			FailAction: pulumi.String("string"),
    			Name:       pulumi.String("string"),
    			Nodes: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    			Uri:                  pulumi.String("string"),
    			ActiveMaster:         pulumi.Bool(false),
    			BeforeComponentStart: pulumi.Bool(false),
    			Parameters:           pulumi.String("string"),
    		},
    	},
    })
    
    var mrsClusterV1Resource = new MrsClusterV1("mrsClusterV1Resource", MrsClusterV1Args.builder()
        .nodePublicCertName("string")
        .coreNodeSize("string")
        .billingType(0)
        .vpcId("string")
        .subnetId("string")
        .clusterName("string")
        .safeMode(0)
        .clusterVersion("string")
        .componentLists(MrsClusterV1ComponentListArgs.builder()
            .componentName("string")
            .componentDesc("string")
            .componentId("string")
            .componentVersion("string")
            .build())
        .masterNodeSize("string")
        .masterNodeNum(0)
        .availableZoneId("string")
        .coreNodeNum(0)
        .mrsClusterV1Id("string")
        .masterDataVolumeSize(0)
        .logCollection(0)
        .region("string")
        .masterDataVolumeType("string")
        .coreDataVolumeSize(0)
        .coreDataVolumeCount(0)
        .masterDataVolumeCount(0)
        .addJobs(MrsClusterV1AddJobArgs.builder()
            .jarPath("string")
            .jobName("string")
            .jobType(0)
            .submitJobOnceClusterRun(false)
            .arguments("string")
            .fileAction("string")
            .hiveScriptPath("string")
            .hql("string")
            .input("string")
            .jobLog("string")
            .output("string")
            .shutdownCluster(false)
            .build())
        .coreDataVolumeType("string")
        .clusterType(0)
        .clusterAdminSecret("string")
        .tags(Map.of("string", "string"))
        .timeouts(MrsClusterV1TimeoutsArgs.builder()
            .create("string")
            .delete("string")
            .build())
        .volumeSize(0)
        .volumeType("string")
        .bootstrapScripts(MrsClusterV1BootstrapScriptArgs.builder()
            .failAction("string")
            .name("string")
            .nodes("string")
            .uri("string")
            .activeMaster(false)
            .beforeComponentStart(false)
            .parameters("string")
            .build())
        .build());
    
    mrs_cluster_v1_resource = opentelekomcloud.MrsClusterV1("mrsClusterV1Resource",
        node_public_cert_name="string",
        core_node_size="string",
        billing_type=0,
        vpc_id="string",
        subnet_id="string",
        cluster_name="string",
        safe_mode=0,
        cluster_version="string",
        component_lists=[{
            "component_name": "string",
            "component_desc": "string",
            "component_id": "string",
            "component_version": "string",
        }],
        master_node_size="string",
        master_node_num=0,
        available_zone_id="string",
        core_node_num=0,
        mrs_cluster_v1_id="string",
        master_data_volume_size=0,
        log_collection=0,
        region="string",
        master_data_volume_type="string",
        core_data_volume_size=0,
        core_data_volume_count=0,
        master_data_volume_count=0,
        add_jobs=[{
            "jar_path": "string",
            "job_name": "string",
            "job_type": 0,
            "submit_job_once_cluster_run": False,
            "arguments": "string",
            "file_action": "string",
            "hive_script_path": "string",
            "hql": "string",
            "input": "string",
            "job_log": "string",
            "output": "string",
            "shutdown_cluster": False,
        }],
        core_data_volume_type="string",
        cluster_type=0,
        cluster_admin_secret="string",
        tags={
            "string": "string",
        },
        timeouts={
            "create": "string",
            "delete": "string",
        },
        volume_size=0,
        volume_type="string",
        bootstrap_scripts=[{
            "fail_action": "string",
            "name": "string",
            "nodes": ["string"],
            "uri": "string",
            "active_master": False,
            "before_component_start": False,
            "parameters": "string",
        }])
    
    const mrsClusterV1Resource = new opentelekomcloud.MrsClusterV1("mrsClusterV1Resource", {
        nodePublicCertName: "string",
        coreNodeSize: "string",
        billingType: 0,
        vpcId: "string",
        subnetId: "string",
        clusterName: "string",
        safeMode: 0,
        clusterVersion: "string",
        componentLists: [{
            componentName: "string",
            componentDesc: "string",
            componentId: "string",
            componentVersion: "string",
        }],
        masterNodeSize: "string",
        masterNodeNum: 0,
        availableZoneId: "string",
        coreNodeNum: 0,
        mrsClusterV1Id: "string",
        masterDataVolumeSize: 0,
        logCollection: 0,
        region: "string",
        masterDataVolumeType: "string",
        coreDataVolumeSize: 0,
        coreDataVolumeCount: 0,
        masterDataVolumeCount: 0,
        addJobs: [{
            jarPath: "string",
            jobName: "string",
            jobType: 0,
            submitJobOnceClusterRun: false,
            arguments: "string",
            fileAction: "string",
            hiveScriptPath: "string",
            hql: "string",
            input: "string",
            jobLog: "string",
            output: "string",
            shutdownCluster: false,
        }],
        coreDataVolumeType: "string",
        clusterType: 0,
        clusterAdminSecret: "string",
        tags: {
            string: "string",
        },
        timeouts: {
            create: "string",
            "delete": "string",
        },
        volumeSize: 0,
        volumeType: "string",
        bootstrapScripts: [{
            failAction: "string",
            name: "string",
            nodes: ["string"],
            uri: "string",
            activeMaster: false,
            beforeComponentStart: false,
            parameters: "string",
        }],
    });
    
    type: opentelekomcloud:MrsClusterV1
    properties:
        addJobs:
            - arguments: string
              fileAction: string
              hiveScriptPath: string
              hql: string
              input: string
              jarPath: string
              jobLog: string
              jobName: string
              jobType: 0
              output: string
              shutdownCluster: false
              submitJobOnceClusterRun: false
        availableZoneId: string
        billingType: 0
        bootstrapScripts:
            - activeMaster: false
              beforeComponentStart: false
              failAction: string
              name: string
              nodes:
                - string
              parameters: string
              uri: string
        clusterAdminSecret: string
        clusterName: string
        clusterType: 0
        clusterVersion: string
        componentLists:
            - componentDesc: string
              componentId: string
              componentName: string
              componentVersion: string
        coreDataVolumeCount: 0
        coreDataVolumeSize: 0
        coreDataVolumeType: string
        coreNodeNum: 0
        coreNodeSize: string
        logCollection: 0
        masterDataVolumeCount: 0
        masterDataVolumeSize: 0
        masterDataVolumeType: string
        masterNodeNum: 0
        masterNodeSize: string
        mrsClusterV1Id: string
        nodePublicCertName: string
        region: string
        safeMode: 0
        subnetId: string
        tags:
            string: string
        timeouts:
            create: string
            delete: string
        volumeSize: 0
        volumeType: string
        vpcId: string
    

    MrsClusterV1 Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

    The MrsClusterV1 resource accepts the following input properties:

    AvailableZoneId string
    ID of an available zone. Obtain the value from Regions and Endpoints.
    BillingType double
    The value is 12, indicating on-demand payment.
    ClusterName string
    Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
    ClusterVersion string
    Version of the clusters Currently, MRS 1.6.3, MRS 1.7.0, MRS 1.9.2, MRS 2.1.0, MRS 3.0.2 are supported. The latest version of MRS is used by default.
    ComponentLists List<MrsClusterV1ComponentList>
    Service component list.
    CoreNodeNum double
    Number of Core nodes Value range: 1 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
    CoreNodeSize string
    Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
    MasterNodeNum double
    Number of Master nodes.
    MasterNodeSize string
    Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space. Master nodes support h1.2xlarge.linux.mrs h1.4xlarge.linux.mrs, h1.8xlarge.linux.mrs, s1.4xlarge.linux.mrs, and s1.8xlarge.linux.mrs. Core nodes of a streaming cluster support all specifications c2.2xlarge.linux.mrs, c2.4xlarge.linux.mrs, s1.xlarge.linux.mrs, s1.4xlarge.linux.mrs, s1.8xlarge.linux.mrs, d1.xlarge.linux.mrs, d1.2xlarge.linux.mrs, d1.4xlarge.linux.mrs, d1.8xlarge.linux.mrs, h1.2xlarge.linux.mrs, h1.4xlarge.linux.mrs and h1.8xlarge.linux.mrs. Task nodes support c2.2xlarge.linux.mrs, c2.4xlarge.linux.mrs, s1.xlarge.linux.mrs, s1.4xlarge.linux.mrs, s1.8xlarge.linux.mrs, h1.2xlarge.linux.mrs, h1.4xlarge.linux.mrs, and h1.8xlarge.linux.mrs.
    NodePublicCertName string
    Name of a key pair You can use a key to log in to the Master node in the cluster.
    SafeMode double
    MRS cluster running mode 0: common mode. The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster. 1: safe mode. The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management or job management functions of an MRS cluster and cannot view cluster resource usage or the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator. The request has the cluster_admin_secret parameter only when safe_mode is set to 1.
    SubnetId string
    Subnet ID Obtain the subnet ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the subnet ID from the list.
    VpcId string
    ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
    AddJobs List<MrsClusterV1AddJob>
    You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added.
    BootstrapScripts List<MrsClusterV1BootstrapScript>
    Bootstrap action scripts. For details, see bootstrap_scripts block below.
    ClusterAdminSecret string
    Indicates the password of the MRS Manager administrator. The password must contain 8 to 32 characters. Must contain at least two types of the following: Lowercase letters, Uppercase letters, Digits, Special characters ~!@#$%^&*()-_=+\|[{}];:'",<.>/? and spaces.
    ClusterType double
    Type of clusters 0: analysis cluster, 1: streaming cluster The default value is 0.
    CoreDataVolumeCount double
    Number of data disks of the Core node. Value range: 1 to 10.
    CoreDataVolumeSize double
    Data disk size of the Core node. Value range: 100 GB to 32000 GB.
    CoreDataVolumeType string
    Data disk storage type of the Core node, supporting SATA, SAS and SSD. SATA: Common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    LogCollection double
    Indicates whether logs are collected when cluster installation fails. 0: not collected. 1: collected. The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
    MasterDataVolumeCount double
    Number of data disks of the Master node. The value can be set to 1 only.
    MasterDataVolumeSize double
    Data disk size of the Master node. Value range: 100 GB to 32000 GB.
    MasterDataVolumeType string
    Data disk storage type of the Master node, supporting SATA, SAS and SSD. SATA: Common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    MrsClusterV1Id string
    Region string
    Tags Dictionary<string, string>
    Tags key/value pairs to associate with the cluster.
    Timeouts MrsClusterV1Timeouts
    VolumeSize double
    Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB.
    VolumeType string
    Type of disks SATA, SAS and SSD are supported. SATA: common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    AvailableZoneId string
    ID of an available zone. Obtain the value from Regions and Endpoints.
    BillingType float64
    The value is 12, indicating on-demand payment.
    ClusterName string
    Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
    ClusterVersion string
    Version of the clusters Currently, MRS 1.6.3, MRS 1.7.0, MRS 1.9.2, MRS 2.1.0, MRS 3.0.2 are supported. The latest version of MRS is used by default.
    ComponentLists []MrsClusterV1ComponentListArgs
    Service component list.
    CoreNodeNum float64
    Number of Core nodes Value range: 1 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
    CoreNodeSize string
    Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
    MasterNodeNum float64
    Number of Master nodes.
    MasterNodeSize string
    Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space. Master nodes support h1.2xlarge.linux.mrs h1.4xlarge.linux.mrs, h1.8xlarge.linux.mrs, s1.4xlarge.linux.mrs, and s1.8xlarge.linux.mrs. Core nodes of a streaming cluster support all specifications c2.2xlarge.linux.mrs, c2.4xlarge.linux.mrs, s1.xlarge.linux.mrs, s1.4xlarge.linux.mrs, s1.8xlarge.linux.mrs, d1.xlarge.linux.mrs, d1.2xlarge.linux.mrs, d1.4xlarge.linux.mrs, d1.8xlarge.linux.mrs, h1.2xlarge.linux.mrs, h1.4xlarge.linux.mrs and h1.8xlarge.linux.mrs. Task nodes support c2.2xlarge.linux.mrs, c2.4xlarge.linux.mrs, s1.xlarge.linux.mrs, s1.4xlarge.linux.mrs, s1.8xlarge.linux.mrs, h1.2xlarge.linux.mrs, h1.4xlarge.linux.mrs, and h1.8xlarge.linux.mrs.
    NodePublicCertName string
    Name of a key pair You can use a key to log in to the Master node in the cluster.
    SafeMode float64
    MRS cluster running mode 0: common mode. The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster. 1: safe mode. The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management or job management functions of an MRS cluster and cannot view cluster resource usage or the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator. The request has the cluster_admin_secret parameter only when safe_mode is set to 1.
    SubnetId string
    Subnet ID Obtain the subnet ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the subnet ID from the list.
    VpcId string
    ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
    AddJobs []MrsClusterV1AddJobArgs
    You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added.
    BootstrapScripts []MrsClusterV1BootstrapScriptArgs
    Bootstrap action scripts. For details, see bootstrap_scripts block below.
    ClusterAdminSecret string
    Indicates the password of the MRS Manager administrator. The password must contain 8 to 32 characters. Must contain at least two types of the following: Lowercase letters, Uppercase letters, Digits, Special characters ~!@#$%^&*()-_=+\|[{}];:'",<.>/? and spaces.
    ClusterType float64
    Type of clusters 0: analysis cluster, 1: streaming cluster The default value is 0.
    CoreDataVolumeCount float64
    Number of data disks of the Core node. Value range: 1 to 10.
    CoreDataVolumeSize float64
    Data disk size of the Core node. Value range: 100 GB to 32000 GB.
    CoreDataVolumeType string
    Data disk storage type of the Core node, supporting SATA, SAS and SSD. SATA: Common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    LogCollection float64
    Indicates whether logs are collected when cluster installation fails. 0: not collected. 1: collected. The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
    MasterDataVolumeCount float64
    Number of data disks of the Master node. The value can be set to 1 only.
    MasterDataVolumeSize float64
    Data disk size of the Master node. Value range: 100 GB to 32000 GB.
    MasterDataVolumeType string
    Data disk storage type of the Master node, supporting SATA, SAS and SSD. SATA: Common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    MrsClusterV1Id string
    Region string
    Tags map[string]string
    Tags key/value pairs to associate with the cluster.
    Timeouts MrsClusterV1TimeoutsArgs
    VolumeSize float64
    Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB.
    VolumeType string
    Type of disks SATA, SAS and SSD are supported. SATA: common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    availableZoneId String
    ID of an available zone. Obtain the value from Regions and Endpoints.
    billingType Double
    The value is 12, indicating on-demand payment.
    clusterName String
    Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
    clusterVersion String
    Version of the clusters Currently, MRS 1.6.3, MRS 1.7.0, MRS 1.9.2, MRS 2.1.0, MRS 3.0.2 are supported. The latest version of MRS is used by default.
    componentLists List<MrsClusterV1ComponentList>
    Service component list.
    coreNodeNum Double
    Number of Core nodes Value range: 1 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
    coreNodeSize String
    Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
    masterNodeNum Double
    Number of Master nodes.
    masterNodeSize String
    Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space. Master nodes support h1.2xlarge.linux.mrs h1.4xlarge.linux.mrs, h1.8xlarge.linux.mrs, s1.4xlarge.linux.mrs, and s1.8xlarge.linux.mrs. Core nodes of a streaming cluster support all specifications c2.2xlarge.linux.mrs, c2.4xlarge.linux.mrs, s1.xlarge.linux.mrs, s1.4xlarge.linux.mrs, s1.8xlarge.linux.mrs, d1.xlarge.linux.mrs, d1.2xlarge.linux.mrs, d1.4xlarge.linux.mrs, d1.8xlarge.linux.mrs, h1.2xlarge.linux.mrs, h1.4xlarge.linux.mrs and h1.8xlarge.linux.mrs. Task nodes support c2.2xlarge.linux.mrs, c2.4xlarge.linux.mrs, s1.xlarge.linux.mrs, s1.4xlarge.linux.mrs, s1.8xlarge.linux.mrs, h1.2xlarge.linux.mrs, h1.4xlarge.linux.mrs, and h1.8xlarge.linux.mrs.
    nodePublicCertName String
    Name of a key pair You can use a key to log in to the Master node in the cluster.
    safeMode Double
    MRS cluster running mode 0: common mode. The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster. 1: safe mode. The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management or job management functions of an MRS cluster and cannot view cluster resource usage or the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator. The request has the cluster_admin_secret parameter only when safe_mode is set to 1.
    subnetId String
    Subnet ID Obtain the subnet ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the subnet ID from the list.
    vpcId String
    ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
    addJobs List<MrsClusterV1AddJob>
    You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added.
    bootstrapScripts List<MrsClusterV1BootstrapScript>
    Bootstrap action scripts. For details, see bootstrap_scripts block below.
    clusterAdminSecret String
    Indicates the password of the MRS Manager administrator. The password must contain 8 to 32 characters. Must contain at least two types of the following: Lowercase letters, Uppercase letters, Digits, Special characters ~!@#$%^&*()-_=+\|[{}];:'",<.>/? and spaces.
    clusterType Double
    Type of clusters 0: analysis cluster, 1: streaming cluster The default value is 0.
    coreDataVolumeCount Double
    Number of data disks of the Core node. Value range: 1 to 10.
    coreDataVolumeSize Double
    Data disk size of the Core node. Value range: 100 GB to 32000 GB.
    coreDataVolumeType String
    Data disk storage type of the Core node, supporting SATA, SAS and SSD. SATA: Common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    logCollection Double
    Indicates whether logs are collected when cluster installation fails. 0: not collected. 1: collected. The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
    masterDataVolumeCount Double
    Number of data disks of the Master node. The value can be set to 1 only.
    masterDataVolumeSize Double
    Data disk size of the Master node. Value range: 100 GB to 32000 GB.
    masterDataVolumeType String
    Data disk storage type of the Master node, supporting SATA, SAS and SSD. SATA: Common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    mrsClusterV1Id String
    region String
    tags Map<String,String>
    Tags key/value pairs to associate with the cluster.
    timeouts MrsClusterV1Timeouts
    volumeSize Double
    Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB.
    volumeType String
    Type of disks SATA, SAS and SSD are supported. SATA: common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    availableZoneId string
    ID of an available zone. Obtain the value from Regions and Endpoints.
    billingType number
    The value is 12, indicating on-demand payment.
    clusterName string
    Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
    clusterVersion string
    Version of the clusters Currently, MRS 1.6.3, MRS 1.7.0, MRS 1.9.2, MRS 2.1.0, MRS 3.0.2 are supported. The latest version of MRS is used by default.
    componentLists MrsClusterV1ComponentList[]
    Service component list.
    coreNodeNum number
    Number of Core nodes Value range: 1 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
    coreNodeSize string
    Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
    masterNodeNum number
    Number of Master nodes.
    masterNodeSize string
    Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space. Master nodes support h1.2xlarge.linux.mrs h1.4xlarge.linux.mrs, h1.8xlarge.linux.mrs, s1.4xlarge.linux.mrs, and s1.8xlarge.linux.mrs. Core nodes of a streaming cluster support all specifications c2.2xlarge.linux.mrs, c2.4xlarge.linux.mrs, s1.xlarge.linux.mrs, s1.4xlarge.linux.mrs, s1.8xlarge.linux.mrs, d1.xlarge.linux.mrs, d1.2xlarge.linux.mrs, d1.4xlarge.linux.mrs, d1.8xlarge.linux.mrs, h1.2xlarge.linux.mrs, h1.4xlarge.linux.mrs and h1.8xlarge.linux.mrs. Task nodes support c2.2xlarge.linux.mrs, c2.4xlarge.linux.mrs, s1.xlarge.linux.mrs, s1.4xlarge.linux.mrs, s1.8xlarge.linux.mrs, h1.2xlarge.linux.mrs, h1.4xlarge.linux.mrs, and h1.8xlarge.linux.mrs.
    nodePublicCertName string
    Name of a key pair You can use a key to log in to the Master node in the cluster.
    safeMode number
    MRS cluster running mode 0: common mode. The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster. 1: safe mode. The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management or job management functions of an MRS cluster and cannot view cluster resource usage or the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator. The request has the cluster_admin_secret parameter only when safe_mode is set to 1.
    subnetId string
    Subnet ID Obtain the subnet ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the subnet ID from the list.
    vpcId string
    ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
    addJobs MrsClusterV1AddJob[]
    You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added.
    bootstrapScripts MrsClusterV1BootstrapScript[]
    Bootstrap action scripts. For details, see bootstrap_scripts block below.
    clusterAdminSecret string
    Indicates the password of the MRS Manager administrator. The password must contain 8 to 32 characters. Must contain at least two types of the following: Lowercase letters, Uppercase letters, Digits, Special characters ~!@#$%^&*()-_=+\|[{}];:'",<.>/? and spaces.
    clusterType number
    Type of clusters 0: analysis cluster, 1: streaming cluster The default value is 0.
    coreDataVolumeCount number
    Number of data disks of the Core node. Value range: 1 to 10.
    coreDataVolumeSize number
    Data disk size of the Core node. Value range: 100 GB to 32000 GB.
    coreDataVolumeType string
    Data disk storage type of the Core node, supporting SATA, SAS and SSD. SATA: Common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    logCollection number
    Indicates whether logs are collected when cluster installation fails. 0: not collected. 1: collected. The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
    masterDataVolumeCount number
    Number of data disks of the Master node. The value can be set to 1 only.
    masterDataVolumeSize number
    Data disk size of the Master node. Value range: 100 GB to 32000 GB.
    masterDataVolumeType string
    Data disk storage type of the Master node, supporting SATA, SAS and SSD. SATA: Common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    mrsClusterV1Id string
    region string
    tags {[key: string]: string}
    Tags key/value pairs to associate with the cluster.
    timeouts MrsClusterV1Timeouts
    volumeSize number
    Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB.
    volumeType string
    Type of disks SATA, SAS and SSD are supported. SATA: common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    available_zone_id str
    ID of an available zone. Obtain the value from Regions and Endpoints.
    billing_type float
    The value is 12, indicating on-demand payment.
    cluster_name str
    Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
    cluster_version str
    Version of the clusters Currently, MRS 1.6.3, MRS 1.7.0, MRS 1.9.2, MRS 2.1.0, MRS 3.0.2 are supported. The latest version of MRS is used by default.
    component_lists Sequence[MrsClusterV1ComponentListArgs]
    Service component list.
    core_node_num float
    Number of Core nodes Value range: 1 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
    core_node_size str
    Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
    master_node_num float
    Number of Master nodes.
    master_node_size str
    Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space. Master nodes support h1.2xlarge.linux.mrs h1.4xlarge.linux.mrs, h1.8xlarge.linux.mrs, s1.4xlarge.linux.mrs, and s1.8xlarge.linux.mrs. Core nodes of a streaming cluster support all specifications c2.2xlarge.linux.mrs, c2.4xlarge.linux.mrs, s1.xlarge.linux.mrs, s1.4xlarge.linux.mrs, s1.8xlarge.linux.mrs, d1.xlarge.linux.mrs, d1.2xlarge.linux.mrs, d1.4xlarge.linux.mrs, d1.8xlarge.linux.mrs, h1.2xlarge.linux.mrs, h1.4xlarge.linux.mrs and h1.8xlarge.linux.mrs. Task nodes support c2.2xlarge.linux.mrs, c2.4xlarge.linux.mrs, s1.xlarge.linux.mrs, s1.4xlarge.linux.mrs, s1.8xlarge.linux.mrs, h1.2xlarge.linux.mrs, h1.4xlarge.linux.mrs, and h1.8xlarge.linux.mrs.
    node_public_cert_name str
    Name of a key pair You can use a key to log in to the Master node in the cluster.
    safe_mode float
    MRS cluster running mode 0: common mode. The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster. 1: safe mode. The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management or job management functions of an MRS cluster and cannot view cluster resource usage or the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator. The request has the cluster_admin_secret parameter only when safe_mode is set to 1.
    subnet_id str
    Subnet ID Obtain the subnet ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the subnet ID from the list.
    vpc_id str
    ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
    add_jobs Sequence[MrsClusterV1AddJobArgs]
    You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added.
    bootstrap_scripts Sequence[MrsClusterV1BootstrapScriptArgs]
    Bootstrap action scripts. For details, see bootstrap_scripts block below.
    cluster_admin_secret str
    Indicates the password of the MRS Manager administrator. The password must contain 8 to 32 characters. Must contain at least two types of the following: Lowercase letters, Uppercase letters, Digits, Special characters ~!@#$%^&*()-_=+\|[{}];:'",<.>/? and spaces.
    cluster_type float
    Type of clusters 0: analysis cluster, 1: streaming cluster The default value is 0.
    core_data_volume_count float
    Number of data disks of the Core node. Value range: 1 to 10.
    core_data_volume_size float
    Data disk size of the Core node. Value range: 100 GB to 32000 GB.
    core_data_volume_type str
    Data disk storage type of the Core node, supporting SATA, SAS and SSD. SATA: Common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    log_collection float
    Indicates whether logs are collected when cluster installation fails. 0: not collected. 1: collected. The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
    master_data_volume_count float
    Number of data disks of the Master node. The value can be set to 1 only.
    master_data_volume_size float
    Data disk size of the Master node. Value range: 100 GB to 32000 GB.
    master_data_volume_type str
    Data disk storage type of the Master node, supporting SATA, SAS and SSD. SATA: Common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    mrs_cluster_v1_id str
    region str
    tags Mapping[str, str]
    Tags key/value pairs to associate with the cluster.
    timeouts MrsClusterV1TimeoutsArgs
    volume_size float
    Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB.
    volume_type str
    Type of disks SATA, SAS and SSD are supported. SATA: common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    availableZoneId String
    ID of an available zone. Obtain the value from Regions and Endpoints.
    billingType Number
    The value is 12, indicating on-demand payment.
    clusterName String
    Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
    clusterVersion String
    Version of the clusters Currently, MRS 1.6.3, MRS 1.7.0, MRS 1.9.2, MRS 2.1.0, MRS 3.0.2 are supported. The latest version of MRS is used by default.
    componentLists List<Property Map>
    Service component list.
    coreNodeNum Number
    Number of Core nodes Value range: 1 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
    coreNodeSize String
    Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
    masterNodeNum Number
    Number of Master nodes.
    masterNodeSize String
    Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space. Master nodes support h1.2xlarge.linux.mrs h1.4xlarge.linux.mrs, h1.8xlarge.linux.mrs, s1.4xlarge.linux.mrs, and s1.8xlarge.linux.mrs. Core nodes of a streaming cluster support all specifications c2.2xlarge.linux.mrs, c2.4xlarge.linux.mrs, s1.xlarge.linux.mrs, s1.4xlarge.linux.mrs, s1.8xlarge.linux.mrs, d1.xlarge.linux.mrs, d1.2xlarge.linux.mrs, d1.4xlarge.linux.mrs, d1.8xlarge.linux.mrs, h1.2xlarge.linux.mrs, h1.4xlarge.linux.mrs and h1.8xlarge.linux.mrs. Task nodes support c2.2xlarge.linux.mrs, c2.4xlarge.linux.mrs, s1.xlarge.linux.mrs, s1.4xlarge.linux.mrs, s1.8xlarge.linux.mrs, h1.2xlarge.linux.mrs, h1.4xlarge.linux.mrs, and h1.8xlarge.linux.mrs.
    nodePublicCertName String
    Name of a key pair You can use a key to log in to the Master node in the cluster.
    safeMode Number
    MRS cluster running mode 0: common mode. The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster. 1: safe mode. The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management or job management functions of an MRS cluster and cannot view cluster resource usage or the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator. The request has the cluster_admin_secret parameter only when safe_mode is set to 1.
    subnetId String
    Subnet ID Obtain the subnet ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the subnet ID from the list.
    vpcId String
    ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
    addJobs List<Property Map>
    You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added.
    bootstrapScripts List<Property Map>
    Bootstrap action scripts. For details, see bootstrap_scripts block below.
    clusterAdminSecret String
    Indicates the password of the MRS Manager administrator. The password must contain 8 to 32 characters. Must contain at least two types of the following: Lowercase letters, Uppercase letters, Digits, Special characters ~!@#$%^&*()-_=+\|[{}];:'",<.>/? and spaces.
    clusterType Number
    Type of clusters 0: analysis cluster, 1: streaming cluster The default value is 0.
    coreDataVolumeCount Number
    Number of data disks of the Core node. Value range: 1 to 10.
    coreDataVolumeSize Number
    Data disk size of the Core node. Value range: 100 GB to 32000 GB.
    coreDataVolumeType String
    Data disk storage type of the Core node, supporting SATA, SAS and SSD. SATA: Common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    logCollection Number
    Indicates whether logs are collected when cluster installation fails. 0: not collected. 1: collected. The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
    masterDataVolumeCount Number
    Number of data disks of the Master node. The value can be set to 1 only.
    masterDataVolumeSize Number
    Data disk size of the Master node. Value range: 100 GB to 32000 GB.
    masterDataVolumeType String
    Data disk storage type of the Master node, supporting SATA, SAS and SSD. SATA: Common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    mrsClusterV1Id String
    region String
    tags Map<String>
    Tags key/value pairs to associate with the cluster.
    timeouts Property Map
    volumeSize Number
    Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB.
    volumeType String
    Type of disks SATA, SAS and SSD are supported. SATA: common I/O, SAS: High I/O, SSD: Ultra-high I/O.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the MrsClusterV1 resource produces the following output properties:

    AvailableZoneName string
    Name of an availability zone.
    ChargingStartTime string
    Time when charging starts.
    ClusterId string
    Cluster ID.
    ClusterState string
    Cluster status. Valid values include: existing history: starting, running, terminated, failed, abnormal, terminating, rebooting, shutdown, frozen, scaling-out, scaling-in, scaling-error.
    CoreNodeProductId string
    Product ID of a Core node.
    CoreNodeSpecId string
    Specification ID of a Core node.
    CreateAt string
    Cluster creation time.
    DeploymentId string
    Deployment ID of a cluster.
    ErrorInfo string
    Error information.
    ExternalAlternateIp string
    Backup external IP address.
    ExternalIp string
    External IP address.
    Fee string
    Cluster creation fee, which is automatically calculated.
    HadoopVersion string
    Hadoop version.
    Id string
    The provider-assigned unique ID for this managed resource.
    InstanceId string
    Instance ID.
    InternalIp string
    MasterNodeIp string
    IP address of a Master node.
    MasterNodeProductId string
    Product ID of a Master node.
    MasterNodeSpecId string
    Specification ID of a Master node.
    OrderId string
    Order ID for creating clusters.
    PrivateIpFirst string
    Primary private IP address.
    Remark string
    Remarks of a cluster.
    SecurityGroupsId string
    Security group ID.
    SlaveSecurityGroupsId string
    Standby security group ID.
    TenantId string
    Project ID.
    UpdateAt string
    Cluster update time.
    Vnc string
    URI address for remote login of the elastic cloud server.
    AvailableZoneName string
    Name of an availability zone.
    ChargingStartTime string
    Time when charging starts.
    ClusterId string
    Cluster ID.
    ClusterState string
    Cluster status. Valid values include: existing history: starting, running, terminated, failed, abnormal, terminating, rebooting, shutdown, frozen, scaling-out, scaling-in, scaling-error.
    CoreNodeProductId string
    Product ID of a Core node.
    CoreNodeSpecId string
    Specification ID of a Core node.
    CreateAt string
    Cluster creation time.
    DeploymentId string
    Deployment ID of a cluster.
    ErrorInfo string
    Error information.
    ExternalAlternateIp string
    Backup external IP address.
    ExternalIp string
    External IP address.
    Fee string
    Cluster creation fee, which is automatically calculated.
    HadoopVersion string
    Hadoop version.
    Id string
    The provider-assigned unique ID for this managed resource.
    InstanceId string
    Instance ID.
    InternalIp string
    MasterNodeIp string
    IP address of a Master node.
    MasterNodeProductId string
    Product ID of a Master node.
    MasterNodeSpecId string
    Specification ID of a Master node.
    OrderId string
    Order ID for creating clusters.
    PrivateIpFirst string
    Primary private IP address.
    Remark string
    Remarks of a cluster.
    SecurityGroupsId string
    Security group ID.
    SlaveSecurityGroupsId string
    Standby security group ID.
    TenantId string
    Project ID.
    UpdateAt string
    Cluster update time.
    Vnc string
    URI address for remote login of the elastic cloud server.
    availableZoneName String
    Name of an availability zone.
    chargingStartTime String
    Time when charging starts.
    clusterId String
    Cluster ID.
    clusterState String
    Cluster status. Valid values include: existing history: starting, running, terminated, failed, abnormal, terminating, rebooting, shutdown, frozen, scaling-out, scaling-in, scaling-error.
    coreNodeProductId String
    Product ID of a Core node.
    coreNodeSpecId String
    Specification ID of a Core node.
    createAt String
    Cluster creation time.
    deploymentId String
    Deployment ID of a cluster.
    errorInfo String
    Error information.
    externalAlternateIp String
    Backup external IP address.
    externalIp String
    External IP address.
    fee String
    Cluster creation fee, which is automatically calculated.
    hadoopVersion String
    Hadoop version.
    id String
    The provider-assigned unique ID for this managed resource.
    instanceId String
    Instance ID.
    internalIp String
    masterNodeIp String
    IP address of a Master node.
    masterNodeProductId String
    Product ID of a Master node.
    masterNodeSpecId String
    Specification ID of a Master node.
    orderId String
    Order ID for creating clusters.
    privateIpFirst String
    Primary private IP address.
    remark String
    Remarks of a cluster.
    securityGroupsId String
    Security group ID.
    slaveSecurityGroupsId String
    Standby security group ID.
    tenantId String
    Project ID.
    updateAt String
    Cluster update time.
    vnc String
    URI address for remote login of the elastic cloud server.
    availableZoneName string
    Name of an availability zone.
    chargingStartTime string
    Time when charging starts.
    clusterId string
    Cluster ID.
    clusterState string
    Cluster status. Valid values include: existing history: starting, running, terminated, failed, abnormal, terminating, rebooting, shutdown, frozen, scaling-out, scaling-in, scaling-error.
    coreNodeProductId string
    Product ID of a Core node.
    coreNodeSpecId string
    Specification ID of a Core node.
    createAt string
    Cluster creation time.
    deploymentId string
    Deployment ID of a cluster.
    errorInfo string
    Error information.
    externalAlternateIp string
    Backup external IP address.
    externalIp string
    External IP address.
    fee string
    Cluster creation fee, which is automatically calculated.
    hadoopVersion string
    Hadoop version.
    id string
    The provider-assigned unique ID for this managed resource.
    instanceId string
    Instance ID.
    internalIp string
    masterNodeIp string
    IP address of a Master node.
    masterNodeProductId string
    Product ID of a Master node.
    masterNodeSpecId string
    Specification ID of a Master node.
    orderId string
    Order ID for creating clusters.
    privateIpFirst string
    Primary private IP address.
    remark string
    Remarks of a cluster.
    securityGroupsId string
    Security group ID.
    slaveSecurityGroupsId string
    Standby security group ID.
    tenantId string
    Project ID.
    updateAt string
    Cluster update time.
    vnc string
    URI address for remote login of the elastic cloud server.
    available_zone_name str
    Name of an availability zone.
    charging_start_time str
    Time when charging starts.
    cluster_id str
    Cluster ID.
    cluster_state str
    Cluster status. Valid values include: existing history: starting, running, terminated, failed, abnormal, terminating, rebooting, shutdown, frozen, scaling-out, scaling-in, scaling-error.
    core_node_product_id str
    Product ID of a Core node.
    core_node_spec_id str
    Specification ID of a Core node.
    create_at str
    Cluster creation time.
    deployment_id str
    Deployment ID of a cluster.
    error_info str
    Error information.
    external_alternate_ip str
    Backup external IP address.
    external_ip str
    External IP address.
    fee str
    Cluster creation fee, which is automatically calculated.
    hadoop_version str
    Hadoop version.
    id str
    The provider-assigned unique ID for this managed resource.
    instance_id str
    Instance ID.
    internal_ip str
    master_node_ip str
    IP address of a Master node.
    master_node_product_id str
    Product ID of a Master node.
    master_node_spec_id str
    Specification ID of a Master node.
    order_id str
    Order ID for creating clusters.
    private_ip_first str
    Primary private IP address.
    remark str
    Remarks of a cluster.
    security_groups_id str
    Security group ID.
    slave_security_groups_id str
    Standby security group ID.
    tenant_id str
    Project ID.
    update_at str
    Cluster update time.
    vnc str
    URI address for remote login of the elastic cloud server.
    availableZoneName String
    Name of an availability zone.
    chargingStartTime String
    Time when charging starts.
    clusterId String
    Cluster ID.
    clusterState String
    Cluster status. Valid values include: existing history: starting, running, terminated, failed, abnormal, terminating, rebooting, shutdown, frozen, scaling-out, scaling-in, scaling-error.
    coreNodeProductId String
    Product ID of a Core node.
    coreNodeSpecId String
    Specification ID of a Core node.
    createAt String
    Cluster creation time.
    deploymentId String
    Deployment ID of a cluster.
    errorInfo String
    Error information.
    externalAlternateIp String
    Backup external IP address.
    externalIp String
    External IP address.
    fee String
    Cluster creation fee, which is automatically calculated.
    hadoopVersion String
    Hadoop version.
    id String
    The provider-assigned unique ID for this managed resource.
    instanceId String
    Instance ID.
    internalIp String
    masterNodeIp String
    IP address of a Master node.
    masterNodeProductId String
    Product ID of a Master node.
    masterNodeSpecId String
    Specification ID of a Master node.
    orderId String
    Order ID for creating clusters.
    privateIpFirst String
    Primary private IP address.
    remark String
    Remarks of a cluster.
    securityGroupsId String
    Security group ID.
    slaveSecurityGroupsId String
    Standby security group ID.
    tenantId String
    Project ID.
    updateAt String
    Cluster update time.
    vnc String
    URI address for remote login of the elastic cloud server.

    Look up Existing MrsClusterV1 Resource

    Get an existing MrsClusterV1 resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: MrsClusterV1State, opts?: CustomResourceOptions): MrsClusterV1
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            add_jobs: Optional[Sequence[MrsClusterV1AddJobArgs]] = None,
            available_zone_id: Optional[str] = None,
            available_zone_name: Optional[str] = None,
            billing_type: Optional[float] = None,
            bootstrap_scripts: Optional[Sequence[MrsClusterV1BootstrapScriptArgs]] = None,
            charging_start_time: Optional[str] = None,
            cluster_admin_secret: Optional[str] = None,
            cluster_id: Optional[str] = None,
            cluster_name: Optional[str] = None,
            cluster_state: Optional[str] = None,
            cluster_type: Optional[float] = None,
            cluster_version: Optional[str] = None,
            component_lists: Optional[Sequence[MrsClusterV1ComponentListArgs]] = None,
            core_data_volume_count: Optional[float] = None,
            core_data_volume_size: Optional[float] = None,
            core_data_volume_type: Optional[str] = None,
            core_node_num: Optional[float] = None,
            core_node_product_id: Optional[str] = None,
            core_node_size: Optional[str] = None,
            core_node_spec_id: Optional[str] = None,
            create_at: Optional[str] = None,
            deployment_id: Optional[str] = None,
            error_info: Optional[str] = None,
            external_alternate_ip: Optional[str] = None,
            external_ip: Optional[str] = None,
            fee: Optional[str] = None,
            hadoop_version: Optional[str] = None,
            instance_id: Optional[str] = None,
            internal_ip: Optional[str] = None,
            log_collection: Optional[float] = None,
            master_data_volume_count: Optional[float] = None,
            master_data_volume_size: Optional[float] = None,
            master_data_volume_type: Optional[str] = None,
            master_node_ip: Optional[str] = None,
            master_node_num: Optional[float] = None,
            master_node_product_id: Optional[str] = None,
            master_node_size: Optional[str] = None,
            master_node_spec_id: Optional[str] = None,
            mrs_cluster_v1_id: Optional[str] = None,
            node_public_cert_name: Optional[str] = None,
            order_id: Optional[str] = None,
            private_ip_first: Optional[str] = None,
            region: Optional[str] = None,
            remark: Optional[str] = None,
            safe_mode: Optional[float] = None,
            security_groups_id: Optional[str] = None,
            slave_security_groups_id: Optional[str] = None,
            subnet_id: Optional[str] = None,
            tags: Optional[Mapping[str, str]] = None,
            tenant_id: Optional[str] = None,
            timeouts: Optional[MrsClusterV1TimeoutsArgs] = None,
            update_at: Optional[str] = None,
            vnc: Optional[str] = None,
            volume_size: Optional[float] = None,
            volume_type: Optional[str] = None,
            vpc_id: Optional[str] = None) -> MrsClusterV1
    func GetMrsClusterV1(ctx *Context, name string, id IDInput, state *MrsClusterV1State, opts ...ResourceOption) (*MrsClusterV1, error)
    public static MrsClusterV1 Get(string name, Input<string> id, MrsClusterV1State? state, CustomResourceOptions? opts = null)
    public static MrsClusterV1 get(String name, Output<String> id, MrsClusterV1State state, CustomResourceOptions options)
    resources:  _:    type: opentelekomcloud:MrsClusterV1    get:      id: ${id}
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    AddJobs List<MrsClusterV1AddJob>
    You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added.
    AvailableZoneId string
    ID of an available zone. Obtain the value from Regions and Endpoints.
    AvailableZoneName string
    Name of an availability zone.
    BillingType double
    The value is 12, indicating on-demand payment.
    BootstrapScripts List<MrsClusterV1BootstrapScript>
    Bootstrap action scripts. For details, see bootstrap_scripts block below.
    ChargingStartTime string
    Time when charging starts.
    ClusterAdminSecret string
    Indicates the password of the MRS Manager administrator. The password must contain 8 to 32 characters. Must contain at least two types of the following: Lowercase letters, Uppercase letters, Digits, Special characters ~!@#$%^&*()-_=+\|[{}];:'",<.>/? and spaces.
    ClusterId string
    Cluster ID.
    ClusterName string
    Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
    ClusterState string
    Cluster status. Valid values include: existing history: starting, running, terminated, failed, abnormal, terminating, rebooting, shutdown, frozen, scaling-out, scaling-in, scaling-error.
    ClusterType double
    Type of clusters 0: analysis cluster, 1: streaming cluster The default value is 0.
    ClusterVersion string
    Version of the clusters Currently, MRS 1.6.3, MRS 1.7.0, MRS 1.9.2, MRS 2.1.0, MRS 3.0.2 are supported. The latest version of MRS is used by default.
    ComponentLists List<MrsClusterV1ComponentList>
    Service component list.
    CoreDataVolumeCount double
    Number of data disks of the Core node. Value range: 1 to 10.
    CoreDataVolumeSize double
    Data disk size of the Core node. Value range: 100 GB to 32000 GB.
    CoreDataVolumeType string
    Data disk storage type of the Core node, supporting SATA, SAS and SSD. SATA: Common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    CoreNodeNum double
    Number of Core nodes Value range: 1 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
    CoreNodeProductId string
    Product ID of a Core node.
    CoreNodeSize string
    Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
    CoreNodeSpecId string
    Specification ID of a Core node.
    CreateAt string
    Cluster creation time.
    DeploymentId string
    Deployment ID of a cluster.
    ErrorInfo string
    Error information.
    ExternalAlternateIp string
    Backup external IP address.
    ExternalIp string
    External IP address.
    Fee string
    Cluster creation fee, which is automatically calculated.
    HadoopVersion string
    Hadoop version.
    InstanceId string
    Instance ID.
    InternalIp string
    LogCollection double
    Indicates whether logs are collected when cluster installation fails. 0: not collected. 1: collected. The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
    MasterDataVolumeCount double
    Number of data disks of the Master node. The value can be set to 1 only.
    MasterDataVolumeSize double
    Data disk size of the Master node. Value range: 100 GB to 32000 GB.
    MasterDataVolumeType string
    Data disk storage type of the Master node, supporting SATA, SAS and SSD. SATA: Common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    MasterNodeIp string
    IP address of a Master node.
    MasterNodeNum double
    Number of Master nodes.
    MasterNodeProductId string
    Product ID of a Master node.
    MasterNodeSize string
    Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space. Master nodes support h1.2xlarge.linux.mrs h1.4xlarge.linux.mrs, h1.8xlarge.linux.mrs, s1.4xlarge.linux.mrs, and s1.8xlarge.linux.mrs. Core nodes of a streaming cluster support all specifications c2.2xlarge.linux.mrs, c2.4xlarge.linux.mrs, s1.xlarge.linux.mrs, s1.4xlarge.linux.mrs, s1.8xlarge.linux.mrs, d1.xlarge.linux.mrs, d1.2xlarge.linux.mrs, d1.4xlarge.linux.mrs, d1.8xlarge.linux.mrs, h1.2xlarge.linux.mrs, h1.4xlarge.linux.mrs and h1.8xlarge.linux.mrs. Task nodes support c2.2xlarge.linux.mrs, c2.4xlarge.linux.mrs, s1.xlarge.linux.mrs, s1.4xlarge.linux.mrs, s1.8xlarge.linux.mrs, h1.2xlarge.linux.mrs, h1.4xlarge.linux.mrs, and h1.8xlarge.linux.mrs.
    MasterNodeSpecId string
    Specification ID of a Master node.
    MrsClusterV1Id string
    NodePublicCertName string
    Name of a key pair You can use a key to log in to the Master node in the cluster.
    OrderId string
    Order ID for creating clusters.
    PrivateIpFirst string
    Primary private IP address.
    Region string
    Remark string
    Remarks of a cluster.
    SafeMode double
    MRS cluster running mode 0: common mode. The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster. 1: safe mode. The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management or job management functions of an MRS cluster and cannot view cluster resource usage or the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator. The request has the cluster_admin_secret parameter only when safe_mode is set to 1.
    SecurityGroupsId string
    Security group ID.
    SlaveSecurityGroupsId string
    Standby security group ID.
    SubnetId string
    Subnet ID Obtain the subnet ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the subnet ID from the list.
    Tags Dictionary<string, string>
    Tags key/value pairs to associate with the cluster.
    TenantId string
    Project ID.
    Timeouts MrsClusterV1Timeouts
    UpdateAt string
    Cluster update time.
    Vnc string
    URI address for remote login of the elastic cloud server.
    VolumeSize double
    Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB.
    VolumeType string
    Type of disks SATA, SAS and SSD are supported. SATA: common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    VpcId string
    ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
    AddJobs []MrsClusterV1AddJobArgs
    You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added.
    AvailableZoneId string
    ID of an available zone. Obtain the value from Regions and Endpoints.
    AvailableZoneName string
    Name of an availability zone.
    BillingType float64
    The value is 12, indicating on-demand payment.
    BootstrapScripts []MrsClusterV1BootstrapScriptArgs
    Bootstrap action scripts. For details, see bootstrap_scripts block below.
    ChargingStartTime string
    Time when charging starts.
    ClusterAdminSecret string
    Indicates the password of the MRS Manager administrator. The password must contain 8 to 32 characters. Must contain at least two types of the following: Lowercase letters, Uppercase letters, Digits, Special characters ~!@#$%^&*()-_=+\|[{}];:'",<.>/? and spaces.
    ClusterId string
    Cluster ID.
    ClusterName string
    Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
    ClusterState string
    Cluster status. Valid values include: existing history: starting, running, terminated, failed, abnormal, terminating, rebooting, shutdown, frozen, scaling-out, scaling-in, scaling-error.
    ClusterType float64
    Type of clusters 0: analysis cluster, 1: streaming cluster The default value is 0.
    ClusterVersion string
    Version of the clusters Currently, MRS 1.6.3, MRS 1.7.0, MRS 1.9.2, MRS 2.1.0, MRS 3.0.2 are supported. The latest version of MRS is used by default.
    ComponentLists []MrsClusterV1ComponentListArgs
    Service component list.
    CoreDataVolumeCount float64
    Number of data disks of the Core node. Value range: 1 to 10.
    CoreDataVolumeSize float64
    Data disk size of the Core node. Value range: 100 GB to 32000 GB.
    CoreDataVolumeType string
    Data disk storage type of the Core node, supporting SATA, SAS and SSD. SATA: Common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    CoreNodeNum float64
    Number of Core nodes Value range: 1 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
    CoreNodeProductId string
    Product ID of a Core node.
    CoreNodeSize string
    Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
    CoreNodeSpecId string
    Specification ID of a Core node.
    CreateAt string
    Cluster creation time.
    DeploymentId string
    Deployment ID of a cluster.
    ErrorInfo string
    Error information.
    ExternalAlternateIp string
    Backup external IP address.
    ExternalIp string
    External IP address.
    Fee string
    Cluster creation fee, which is automatically calculated.
    HadoopVersion string
    Hadoop version.
    InstanceId string
    Instance ID.
    InternalIp string
    LogCollection float64
    Indicates whether logs are collected when cluster installation fails. 0: not collected. 1: collected. The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
    MasterDataVolumeCount float64
    Number of data disks of the Master node. The value can be set to 1 only.
    MasterDataVolumeSize float64
    Data disk size of the Master node. Value range: 100 GB to 32000 GB.
    MasterDataVolumeType string
    Data disk storage type of the Master node, supporting SATA, SAS and SSD. SATA: Common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    MasterNodeIp string
    IP address of a Master node.
    MasterNodeNum float64
    Number of Master nodes.
    MasterNodeProductId string
    Product ID of a Master node.
    MasterNodeSize string
    Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space. Master nodes support h1.2xlarge.linux.mrs h1.4xlarge.linux.mrs, h1.8xlarge.linux.mrs, s1.4xlarge.linux.mrs, and s1.8xlarge.linux.mrs. Core nodes of a streaming cluster support all specifications c2.2xlarge.linux.mrs, c2.4xlarge.linux.mrs, s1.xlarge.linux.mrs, s1.4xlarge.linux.mrs, s1.8xlarge.linux.mrs, d1.xlarge.linux.mrs, d1.2xlarge.linux.mrs, d1.4xlarge.linux.mrs, d1.8xlarge.linux.mrs, h1.2xlarge.linux.mrs, h1.4xlarge.linux.mrs and h1.8xlarge.linux.mrs. Task nodes support c2.2xlarge.linux.mrs, c2.4xlarge.linux.mrs, s1.xlarge.linux.mrs, s1.4xlarge.linux.mrs, s1.8xlarge.linux.mrs, h1.2xlarge.linux.mrs, h1.4xlarge.linux.mrs, and h1.8xlarge.linux.mrs.
    MasterNodeSpecId string
    Specification ID of a Master node.
    MrsClusterV1Id string
    NodePublicCertName string
    Name of a key pair You can use a key to log in to the Master node in the cluster.
    OrderId string
    Order ID for creating clusters.
    PrivateIpFirst string
    Primary private IP address.
    Region string
    Remark string
    Remarks of a cluster.
    SafeMode float64
    MRS cluster running mode 0: common mode. The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster. 1: safe mode. The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management or job management functions of an MRS cluster and cannot view cluster resource usage or the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator. The request has the cluster_admin_secret parameter only when safe_mode is set to 1.
    SecurityGroupsId string
    Security group ID.
    SlaveSecurityGroupsId string
    Standby security group ID.
    SubnetId string
    Subnet ID Obtain the subnet ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the subnet ID from the list.
    Tags map[string]string
    Tags key/value pairs to associate with the cluster.
    TenantId string
    Project ID.
    Timeouts MrsClusterV1TimeoutsArgs
    UpdateAt string
    Cluster update time.
    Vnc string
    URI address for remote login of the elastic cloud server.
    VolumeSize float64
    Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB.
    VolumeType string
    Type of disks SATA, SAS and SSD are supported. SATA: common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    VpcId string
    ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
    addJobs List<MrsClusterV1AddJob>
    You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added.
    availableZoneId String
    ID of an available zone. Obtain the value from Regions and Endpoints.
    availableZoneName String
    Name of an availability zone.
    billingType Double
    The value is 12, indicating on-demand payment.
    bootstrapScripts List<MrsClusterV1BootstrapScript>
    Bootstrap action scripts. For details, see bootstrap_scripts block below.
    chargingStartTime String
    Time when charging starts.
    clusterAdminSecret String
    Indicates the password of the MRS Manager administrator. The password must contain 8 to 32 characters. Must contain at least two types of the following: Lowercase letters, Uppercase letters, Digits, Special characters ~!@#$%^&*()-_=+\|[{}];:'",<.>/? and spaces.
    clusterId String
    Cluster ID.
    clusterName String
    Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
    clusterState String
    Cluster status. Valid values include: existing history: starting, running, terminated, failed, abnormal, terminating, rebooting, shutdown, frozen, scaling-out, scaling-in, scaling-error.
    clusterType Double
    Type of clusters 0: analysis cluster, 1: streaming cluster The default value is 0.
    clusterVersion String
    Version of the clusters Currently, MRS 1.6.3, MRS 1.7.0, MRS 1.9.2, MRS 2.1.0, MRS 3.0.2 are supported. The latest version of MRS is used by default.
    componentLists List<MrsClusterV1ComponentList>
    Service component list.
    coreDataVolumeCount Double
    Number of data disks of the Core node. Value range: 1 to 10.
    coreDataVolumeSize Double
    Data disk size of the Core node. Value range: 100 GB to 32000 GB.
    coreDataVolumeType String
    Data disk storage type of the Core node, supporting SATA, SAS and SSD. SATA: Common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    coreNodeNum Double
    Number of Core nodes Value range: 1 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
    coreNodeProductId String
    Product ID of a Core node.
    coreNodeSize String
    Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
    coreNodeSpecId String
    Specification ID of a Core node.
    createAt String
    Cluster creation time.
    deploymentId String
    Deployment ID of a cluster.
    errorInfo String
    Error information.
    externalAlternateIp String
    Backup external IP address.
    externalIp String
    External IP address.
    fee String
    Cluster creation fee, which is automatically calculated.
    hadoopVersion String
    Hadoop version.
    instanceId String
    Instance ID.
    internalIp String
    logCollection Double
    Indicates whether logs are collected when cluster installation fails. 0: not collected. 1: collected. The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
    masterDataVolumeCount Double
    Number of data disks of the Master node. The value can be set to 1 only.
    masterDataVolumeSize Double
    Data disk size of the Master node. Value range: 100 GB to 32000 GB.
    masterDataVolumeType String
    Data disk storage type of the Master node, supporting SATA, SAS and SSD. SATA: Common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    masterNodeIp String
    IP address of a Master node.
    masterNodeNum Double
    Number of Master nodes.
    masterNodeProductId String
    Product ID of a Master node.
    masterNodeSize String
    Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space. Master nodes support h1.2xlarge.linux.mrs h1.4xlarge.linux.mrs, h1.8xlarge.linux.mrs, s1.4xlarge.linux.mrs, and s1.8xlarge.linux.mrs. Core nodes of a streaming cluster support all specifications c2.2xlarge.linux.mrs, c2.4xlarge.linux.mrs, s1.xlarge.linux.mrs, s1.4xlarge.linux.mrs, s1.8xlarge.linux.mrs, d1.xlarge.linux.mrs, d1.2xlarge.linux.mrs, d1.4xlarge.linux.mrs, d1.8xlarge.linux.mrs, h1.2xlarge.linux.mrs, h1.4xlarge.linux.mrs and h1.8xlarge.linux.mrs. Task nodes support c2.2xlarge.linux.mrs, c2.4xlarge.linux.mrs, s1.xlarge.linux.mrs, s1.4xlarge.linux.mrs, s1.8xlarge.linux.mrs, h1.2xlarge.linux.mrs, h1.4xlarge.linux.mrs, and h1.8xlarge.linux.mrs.
    masterNodeSpecId String
    Specification ID of a Master node.
    mrsClusterV1Id String
    nodePublicCertName String
    Name of a key pair You can use a key to log in to the Master node in the cluster.
    orderId String
    Order ID for creating clusters.
    privateIpFirst String
    Primary private IP address.
    region String
    remark String
    Remarks of a cluster.
    safeMode Double
    MRS cluster running mode 0: common mode. The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster. 1: safe mode. The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management or job management functions of an MRS cluster and cannot view cluster resource usage or the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator. The request has the cluster_admin_secret parameter only when safe_mode is set to 1.
    securityGroupsId String
    Security group ID.
    slaveSecurityGroupsId String
    Standby security group ID.
    subnetId String
    Subnet ID Obtain the subnet ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the subnet ID from the list.
    tags Map<String,String>
    Tags key/value pairs to associate with the cluster.
    tenantId String
    Project ID.
    timeouts MrsClusterV1Timeouts
    updateAt String
    Cluster update time.
    vnc String
    URI address for remote login of the elastic cloud server.
    volumeSize Double
    Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB.
    volumeType String
    Type of disks SATA, SAS and SSD are supported. SATA: common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    vpcId String
    ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
    addJobs MrsClusterV1AddJob[]
    You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added.
    availableZoneId string
    ID of an available zone. Obtain the value from Regions and Endpoints.
    availableZoneName string
    Name of an availability zone.
    billingType number
    The value is 12, indicating on-demand payment.
    bootstrapScripts MrsClusterV1BootstrapScript[]
    Bootstrap action scripts. For details, see bootstrap_scripts block below.
    chargingStartTime string
    Time when charging starts.
    clusterAdminSecret string
    Indicates the password of the MRS Manager administrator. The password must contain 8 to 32 characters. Must contain at least two types of the following: Lowercase letters, Uppercase letters, Digits, Special characters ~!@#$%^&*()-_=+\|[{}];:'",<.>/? and spaces.
    clusterId string
    Cluster ID.
    clusterName string
    Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
    clusterState string
    Cluster status. Valid values include: existing history: starting, running, terminated, failed, abnormal, terminating, rebooting, shutdown, frozen, scaling-out, scaling-in, scaling-error.
    clusterType number
    Type of clusters 0: analysis cluster, 1: streaming cluster The default value is 0.
    clusterVersion string
    Version of the clusters Currently, MRS 1.6.3, MRS 1.7.0, MRS 1.9.2, MRS 2.1.0, MRS 3.0.2 are supported. The latest version of MRS is used by default.
    componentLists MrsClusterV1ComponentList[]
    Service component list.
    coreDataVolumeCount number
    Number of data disks of the Core node. Value range: 1 to 10.
    coreDataVolumeSize number
    Data disk size of the Core node. Value range: 100 GB to 32000 GB.
    coreDataVolumeType string
    Data disk storage type of the Core node, supporting SATA, SAS and SSD. SATA: Common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    coreNodeNum number
    Number of Core nodes Value range: 1 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
    coreNodeProductId string
    Product ID of a Core node.
    coreNodeSize string
    Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
    coreNodeSpecId string
    Specification ID of a Core node.
    createAt string
    Cluster creation time.
    deploymentId string
    Deployment ID of a cluster.
    errorInfo string
    Error information.
    externalAlternateIp string
    Backup external IP address.
    externalIp string
    External IP address.
    fee string
    Cluster creation fee, which is automatically calculated.
    hadoopVersion string
    Hadoop version.
    instanceId string
    Instance ID.
    internalIp string
    logCollection number
    Indicates whether logs are collected when cluster installation fails. 0: not collected. 1: collected. The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
    masterDataVolumeCount number
    Number of data disks of the Master node. The value can be set to 1 only.
    masterDataVolumeSize number
    Data disk size of the Master node. Value range: 100 GB to 32000 GB.
    masterDataVolumeType string
    Data disk storage type of the Master node, supporting SATA, SAS and SSD. SATA: Common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    masterNodeIp string
    IP address of a Master node.
    masterNodeNum number
    Number of Master nodes.
    masterNodeProductId string
    Product ID of a Master node.
    masterNodeSize string
    Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space. Master nodes support h1.2xlarge.linux.mrs h1.4xlarge.linux.mrs, h1.8xlarge.linux.mrs, s1.4xlarge.linux.mrs, and s1.8xlarge.linux.mrs. Core nodes of a streaming cluster support all specifications c2.2xlarge.linux.mrs, c2.4xlarge.linux.mrs, s1.xlarge.linux.mrs, s1.4xlarge.linux.mrs, s1.8xlarge.linux.mrs, d1.xlarge.linux.mrs, d1.2xlarge.linux.mrs, d1.4xlarge.linux.mrs, d1.8xlarge.linux.mrs, h1.2xlarge.linux.mrs, h1.4xlarge.linux.mrs and h1.8xlarge.linux.mrs. Task nodes support c2.2xlarge.linux.mrs, c2.4xlarge.linux.mrs, s1.xlarge.linux.mrs, s1.4xlarge.linux.mrs, s1.8xlarge.linux.mrs, h1.2xlarge.linux.mrs, h1.4xlarge.linux.mrs, and h1.8xlarge.linux.mrs.
    masterNodeSpecId string
    Specification ID of a Master node.
    mrsClusterV1Id string
    nodePublicCertName string
    Name of a key pair You can use a key to log in to the Master node in the cluster.
    orderId string
    Order ID for creating clusters.
    privateIpFirst string
    Primary private IP address.
    region string
    remark string
    Remarks of a cluster.
    safeMode number
    MRS cluster running mode 0: common mode. The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster. 1: safe mode. The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management or job management functions of an MRS cluster and cannot view cluster resource usage or the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator. The request has the cluster_admin_secret parameter only when safe_mode is set to 1.
    securityGroupsId string
    Security group ID.
    slaveSecurityGroupsId string
    Standby security group ID.
    subnetId string
    Subnet ID Obtain the subnet ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the subnet ID from the list.
    tags {[key: string]: string}
    Tags key/value pairs to associate with the cluster.
    tenantId string
    Project ID.
    timeouts MrsClusterV1Timeouts
    updateAt string
    Cluster update time.
    vnc string
    URI address for remote login of the elastic cloud server.
    volumeSize number
    Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB.
    volumeType string
    Type of disks SATA, SAS and SSD are supported. SATA: common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    vpcId string
    ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
    add_jobs Sequence[MrsClusterV1AddJobArgs]
    You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added.
    available_zone_id str
    ID of an available zone. Obtain the value from Regions and Endpoints.
    available_zone_name str
    Name of an availability zone.
    billing_type float
    The value is 12, indicating on-demand payment.
    bootstrap_scripts Sequence[MrsClusterV1BootstrapScriptArgs]
    Bootstrap action scripts. For details, see bootstrap_scripts block below.
    charging_start_time str
    Time when charging starts.
    cluster_admin_secret str
    Indicates the password of the MRS Manager administrator. The password must contain 8 to 32 characters. Must contain at least two types of the following: Lowercase letters, Uppercase letters, Digits, Special characters ~!@#$%^&*()-_=+\|[{}];:'",<.>/? and spaces.
    cluster_id str
    Cluster ID.
    cluster_name str
    Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
    cluster_state str
    Cluster status. Valid values include: existing history: starting, running, terminated, failed, abnormal, terminating, rebooting, shutdown, frozen, scaling-out, scaling-in, scaling-error.
    cluster_type float
    Type of clusters 0: analysis cluster, 1: streaming cluster The default value is 0.
    cluster_version str
    Version of the clusters Currently, MRS 1.6.3, MRS 1.7.0, MRS 1.9.2, MRS 2.1.0, MRS 3.0.2 are supported. The latest version of MRS is used by default.
    component_lists Sequence[MrsClusterV1ComponentListArgs]
    Service component list.
    core_data_volume_count float
    Number of data disks of the Core node. Value range: 1 to 10.
    core_data_volume_size float
    Data disk size of the Core node. Value range: 100 GB to 32000 GB.
    core_data_volume_type str
    Data disk storage type of the Core node, supporting SATA, SAS and SSD. SATA: Common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    core_node_num float
    Number of Core nodes Value range: 1 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
    core_node_product_id str
    Product ID of a Core node.
    core_node_size str
    Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
    core_node_spec_id str
    Specification ID of a Core node.
    create_at str
    Cluster creation time.
    deployment_id str
    Deployment ID of a cluster.
    error_info str
    Error information.
    external_alternate_ip str
    Backup external IP address.
    external_ip str
    External IP address.
    fee str
    Cluster creation fee, which is automatically calculated.
    hadoop_version str
    Hadoop version.
    instance_id str
    Instance ID.
    internal_ip str
    log_collection float
    Indicates whether logs are collected when cluster installation fails. 0: not collected. 1: collected. The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
    master_data_volume_count float
    Number of data disks of the Master node. The value can be set to 1 only.
    master_data_volume_size float
    Data disk size of the Master node. Value range: 100 GB to 32000 GB.
    master_data_volume_type str
    Data disk storage type of the Master node, supporting SATA, SAS and SSD. SATA: Common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    master_node_ip str
    IP address of a Master node.
    master_node_num float
    Number of Master nodes.
    master_node_product_id str
    Product ID of a Master node.
    master_node_size str
    Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space. Master nodes support h1.2xlarge.linux.mrs h1.4xlarge.linux.mrs, h1.8xlarge.linux.mrs, s1.4xlarge.linux.mrs, and s1.8xlarge.linux.mrs. Core nodes of a streaming cluster support all specifications c2.2xlarge.linux.mrs, c2.4xlarge.linux.mrs, s1.xlarge.linux.mrs, s1.4xlarge.linux.mrs, s1.8xlarge.linux.mrs, d1.xlarge.linux.mrs, d1.2xlarge.linux.mrs, d1.4xlarge.linux.mrs, d1.8xlarge.linux.mrs, h1.2xlarge.linux.mrs, h1.4xlarge.linux.mrs and h1.8xlarge.linux.mrs. Task nodes support c2.2xlarge.linux.mrs, c2.4xlarge.linux.mrs, s1.xlarge.linux.mrs, s1.4xlarge.linux.mrs, s1.8xlarge.linux.mrs, h1.2xlarge.linux.mrs, h1.4xlarge.linux.mrs, and h1.8xlarge.linux.mrs.
    master_node_spec_id str
    Specification ID of a Master node.
    mrs_cluster_v1_id str
    node_public_cert_name str
    Name of a key pair You can use a key to log in to the Master node in the cluster.
    order_id str
    Order ID for creating clusters.
    private_ip_first str
    Primary private IP address.
    region str
    remark str
    Remarks of a cluster.
    safe_mode float
    MRS cluster running mode 0: common mode. The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster. 1: safe mode. The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management or job management functions of an MRS cluster and cannot view cluster resource usage or the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator. The request has the cluster_admin_secret parameter only when safe_mode is set to 1.
    security_groups_id str
    Security group ID.
    slave_security_groups_id str
    Standby security group ID.
    subnet_id str
    Subnet ID Obtain the subnet ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the subnet ID from the list.
    tags Mapping[str, str]
    Tags key/value pairs to associate with the cluster.
    tenant_id str
    Project ID.
    timeouts MrsClusterV1TimeoutsArgs
    update_at str
    Cluster update time.
    vnc str
    URI address for remote login of the elastic cloud server.
    volume_size float
    Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB.
    volume_type str
    Type of disks SATA, SAS and SSD are supported. SATA: common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    vpc_id str
    ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
    addJobs List<Property Map>
    You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added.
    availableZoneId String
    ID of an available zone. Obtain the value from Regions and Endpoints.
    availableZoneName String
    Name of an availability zone.
    billingType Number
    The value is 12, indicating on-demand payment.
    bootstrapScripts List<Property Map>
    Bootstrap action scripts. For details, see bootstrap_scripts block below.
    chargingStartTime String
    Time when charging starts.
    clusterAdminSecret String
    Indicates the password of the MRS Manager administrator. The password must contain 8 to 32 characters. Must contain at least two types of the following: Lowercase letters, Uppercase letters, Digits, Special characters ~!@#$%^&*()-_=+\|[{}];:'",<.>/? and spaces.
    clusterId String
    Cluster ID.
    clusterName String
    Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
    clusterState String
    Cluster status. Valid values include: existing history: starting, running, terminated, failed, abnormal, terminating, rebooting, shutdown, frozen, scaling-out, scaling-in, scaling-error.
    clusterType Number
    Type of clusters 0: analysis cluster, 1: streaming cluster The default value is 0.
    clusterVersion String
    Version of the clusters Currently, MRS 1.6.3, MRS 1.7.0, MRS 1.9.2, MRS 2.1.0, MRS 3.0.2 are supported. The latest version of MRS is used by default.
    componentLists List<Property Map>
    Service component list.
    coreDataVolumeCount Number
    Number of data disks of the Core node. Value range: 1 to 10.
    coreDataVolumeSize Number
    Data disk size of the Core node. Value range: 100 GB to 32000 GB.
    coreDataVolumeType String
    Data disk storage type of the Core node, supporting SATA, SAS and SSD. SATA: Common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    coreNodeNum Number
    Number of Core nodes Value range: 1 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
    coreNodeProductId String
    Product ID of a Core node.
    coreNodeSize String
    Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
    coreNodeSpecId String
    Specification ID of a Core node.
    createAt String
    Cluster creation time.
    deploymentId String
    Deployment ID of a cluster.
    errorInfo String
    Error information.
    externalAlternateIp String
    Backup external IP address.
    externalIp String
    External IP address.
    fee String
    Cluster creation fee, which is automatically calculated.
    hadoopVersion String
    Hadoop version.
    instanceId String
    Instance ID.
    internalIp String
    logCollection Number
    Indicates whether logs are collected when cluster installation fails. 0: not collected. 1: collected. The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
    masterDataVolumeCount Number
    Number of data disks of the Master node. The value can be set to 1 only.
    masterDataVolumeSize Number
    Data disk size of the Master node. Value range: 100 GB to 32000 GB.
    masterDataVolumeType String
    Data disk storage type of the Master node, supporting SATA, SAS and SSD. SATA: Common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    masterNodeIp String
    IP address of a Master node.
    masterNodeNum Number
    Number of Master nodes.
    masterNodeProductId String
    Product ID of a Master node.
    masterNodeSize String
    Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space. Master nodes support h1.2xlarge.linux.mrs h1.4xlarge.linux.mrs, h1.8xlarge.linux.mrs, s1.4xlarge.linux.mrs, and s1.8xlarge.linux.mrs. Core nodes of a streaming cluster support all specifications c2.2xlarge.linux.mrs, c2.4xlarge.linux.mrs, s1.xlarge.linux.mrs, s1.4xlarge.linux.mrs, s1.8xlarge.linux.mrs, d1.xlarge.linux.mrs, d1.2xlarge.linux.mrs, d1.4xlarge.linux.mrs, d1.8xlarge.linux.mrs, h1.2xlarge.linux.mrs, h1.4xlarge.linux.mrs and h1.8xlarge.linux.mrs. Task nodes support c2.2xlarge.linux.mrs, c2.4xlarge.linux.mrs, s1.xlarge.linux.mrs, s1.4xlarge.linux.mrs, s1.8xlarge.linux.mrs, h1.2xlarge.linux.mrs, h1.4xlarge.linux.mrs, and h1.8xlarge.linux.mrs.
    masterNodeSpecId String
    Specification ID of a Master node.
    mrsClusterV1Id String
    nodePublicCertName String
    Name of a key pair You can use a key to log in to the Master node in the cluster.
    orderId String
    Order ID for creating clusters.
    privateIpFirst String
    Primary private IP address.
    region String
    remark String
    Remarks of a cluster.
    safeMode Number
    MRS cluster running mode 0: common mode. The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster. 1: safe mode. The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management or job management functions of an MRS cluster and cannot view cluster resource usage or the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator. The request has the cluster_admin_secret parameter only when safe_mode is set to 1.
    securityGroupsId String
    Security group ID.
    slaveSecurityGroupsId String
    Standby security group ID.
    subnetId String
    Subnet ID Obtain the subnet ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the subnet ID from the list.
    tags Map<String>
    Tags key/value pairs to associate with the cluster.
    tenantId String
    Project ID.
    timeouts Property Map
    updateAt String
    Cluster update time.
    vnc String
    URI address for remote login of the elastic cloud server.
    volumeSize Number
    Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB.
    volumeType String
    Type of disks SATA, SAS and SSD are supported. SATA: common I/O, SAS: High I/O, SSD: Ultra-high I/O.
    vpcId String
    ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.

    Supporting Types

    MrsClusterV1AddJob, MrsClusterV1AddJobArgs

    JarPath string
    Path of the .jar file or .sql file for program execution The parameter must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Spark Script must end with .sql while MapReduce and Spark Jar must end with .jar. sql and jar are case-insensitive.
    JobName string
    It contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
    JobType double
    Type. 1: MapReduce, 2: Spark, 3: Hive Script, 4: HiveQL (not supported currently), 5: DistCp, importing and exporting data (not supported in this API currently), 6: Spark Script, 7: Spark SQL, submitting Spark SQL statements (not supported in this API currently).
    SubmitJobOnceClusterRun bool
    Possible values are: true a job is submitted when a cluster is created and false a job is submitted separately.
    Arguments string
    Key parameter for program execution. The parameter is specified by the function of the user's program. MRS is only responsible for loading the parameter. The parameter contains a maximum of 2047 characters, excluding special characters such as ;|&>'<$, and can be empty.
    FileAction string
    Data import and export import export
    HiveScriptPath string
    SQL program path. This parameter is needed by Spark Script and Hive Script jobs only and must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Ends with .sql. sql is case-insensitive.
    Hql string
    HiveQL statement.
    Input string
    Path for inputting data, which must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
    JobLog string
    Path for storing job logs that record job running status. This path must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
    Output string
    Path for outputting data, which must start with / or s3a://. A correct OBS path is required. If the path does not exist, the system automatically creates it. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
    ShutdownCluster bool
    Whether to delete the cluster after the jobs are complete. true: Yes, false: No.
    JarPath string
    Path of the .jar file or .sql file for program execution The parameter must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Spark Script must end with .sql while MapReduce and Spark Jar must end with .jar. sql and jar are case-insensitive.
    JobName string
    It contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
    JobType float64
    Type. 1: MapReduce, 2: Spark, 3: Hive Script, 4: HiveQL (not supported currently), 5: DistCp, importing and exporting data (not supported in this API currently), 6: Spark Script, 7: Spark SQL, submitting Spark SQL statements (not supported in this API currently).
    SubmitJobOnceClusterRun bool
    Possible values are: true a job is submitted when a cluster is created and false a job is submitted separately.
    Arguments string
    Key parameter for program execution. The parameter is specified by the function of the user's program. MRS is only responsible for loading the parameter. The parameter contains a maximum of 2047 characters, excluding special characters such as ;|&>'<$, and can be empty.
    FileAction string
    Data import and export import export
    HiveScriptPath string
    SQL program path. This parameter is needed by Spark Script and Hive Script jobs only and must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Ends with .sql. sql is case-insensitive.
    Hql string
    HiveQL statement.
    Input string
    Path for inputting data, which must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
    JobLog string
    Path for storing job logs that record job running status. This path must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
    Output string
    Path for outputting data, which must start with / or s3a://. A correct OBS path is required. If the path does not exist, the system automatically creates it. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
    ShutdownCluster bool
    Whether to delete the cluster after the jobs are complete. true: Yes, false: No.
    jarPath String
    Path of the .jar file or .sql file for program execution The parameter must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Spark Script must end with .sql while MapReduce and Spark Jar must end with .jar. sql and jar are case-insensitive.
    jobName String
    It contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
    jobType Double
    Type. 1: MapReduce, 2: Spark, 3: Hive Script, 4: HiveQL (not supported currently), 5: DistCp, importing and exporting data (not supported in this API currently), 6: Spark Script, 7: Spark SQL, submitting Spark SQL statements (not supported in this API currently).
    submitJobOnceClusterRun Boolean
    Possible values are: true a job is submitted when a cluster is created and false a job is submitted separately.
    arguments String
    Key parameter for program execution. The parameter is specified by the function of the user's program. MRS is only responsible for loading the parameter. The parameter contains a maximum of 2047 characters, excluding special characters such as ;|&>'<$, and can be empty.
    fileAction String
    Data import and export import export
    hiveScriptPath String
    SQL program path. This parameter is needed by Spark Script and Hive Script jobs only and must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Ends with .sql. sql is case-insensitive.
    hql String
    HiveQL statement.
    input String
    Path for inputting data, which must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
    jobLog String
    Path for storing job logs that record job running status. This path must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
    output String
    Path for outputting data, which must start with / or s3a://. A correct OBS path is required. If the path does not exist, the system automatically creates it. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
    shutdownCluster Boolean
    Whether to delete the cluster after the jobs are complete. true: Yes, false: No.
    jarPath string
    Path of the .jar file or .sql file for program execution The parameter must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Spark Script must end with .sql while MapReduce and Spark Jar must end with .jar. sql and jar are case-insensitive.
    jobName string
    It contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
    jobType number
    Type. 1: MapReduce, 2: Spark, 3: Hive Script, 4: HiveQL (not supported currently), 5: DistCp, importing and exporting data (not supported in this API currently), 6: Spark Script, 7: Spark SQL, submitting Spark SQL statements (not supported in this API currently).
    submitJobOnceClusterRun boolean
    Possible values are: true a job is submitted when a cluster is created and false a job is submitted separately.
    arguments string
    Key parameter for program execution. The parameter is specified by the function of the user's program. MRS is only responsible for loading the parameter. The parameter contains a maximum of 2047 characters, excluding special characters such as ;|&>'<$, and can be empty.
    fileAction string
    Data import and export import export
    hiveScriptPath string
    SQL program path. This parameter is needed by Spark Script and Hive Script jobs only and must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Ends with .sql. sql is case-insensitive.
    hql string
    HiveQL statement.
    input string
    Path for inputting data, which must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
    jobLog string
    Path for storing job logs that record job running status. This path must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
    output string
    Path for outputting data, which must start with / or s3a://. A correct OBS path is required. If the path does not exist, the system automatically creates it. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
    shutdownCluster boolean
    Whether to delete the cluster after the jobs are complete. true: Yes, false: No.
    jar_path str
    Path of the .jar file or .sql file for program execution The parameter must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Spark Script must end with .sql while MapReduce and Spark Jar must end with .jar. sql and jar are case-insensitive.
    job_name str
    It contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
    job_type float
    Type. 1: MapReduce, 2: Spark, 3: Hive Script, 4: HiveQL (not supported currently), 5: DistCp, importing and exporting data (not supported in this API currently), 6: Spark Script, 7: Spark SQL, submitting Spark SQL statements (not supported in this API currently).
    submit_job_once_cluster_run bool
    Possible values are: true a job is submitted when a cluster is created and false a job is submitted separately.
    arguments str
    Key parameter for program execution. The parameter is specified by the function of the user's program. MRS is only responsible for loading the parameter. The parameter contains a maximum of 2047 characters, excluding special characters such as ;|&>'<$, and can be empty.
    file_action str
    Data import and export import export
    hive_script_path str
    SQL program path. This parameter is needed by Spark Script and Hive Script jobs only and must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Ends with .sql. sql is case-insensitive.
    hql str
    HiveQL statement.
    input str
    Path for inputting data, which must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
    job_log str
    Path for storing job logs that record job running status. This path must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
    output str
    Path for outputting data, which must start with / or s3a://. A correct OBS path is required. If the path does not exist, the system automatically creates it. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
    shutdown_cluster bool
    Whether to delete the cluster after the jobs are complete. true: Yes, false: No.
    jarPath String
    Path of the .jar file or .sql file for program execution The parameter must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Spark Script must end with .sql while MapReduce and Spark Jar must end with .jar. sql and jar are case-insensitive.
    jobName String
    It contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
    jobType Number
    Type. 1: MapReduce, 2: Spark, 3: Hive Script, 4: HiveQL (not supported currently), 5: DistCp, importing and exporting data (not supported in this API currently), 6: Spark Script, 7: Spark SQL, submitting Spark SQL statements (not supported in this API currently).
    submitJobOnceClusterRun Boolean
    Possible values are: true a job is submitted when a cluster is created and false a job is submitted separately.
    arguments String
    Key parameter for program execution. The parameter is specified by the function of the user's program. MRS is only responsible for loading the parameter. The parameter contains a maximum of 2047 characters, excluding special characters such as ;|&>'<$, and can be empty.
    fileAction String
    Data import and export import export
    hiveScriptPath String
    SQL program path. This parameter is needed by Spark Script and Hive Script jobs only and must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Ends with .sql. sql is case-insensitive.
    hql String
    HiveQL statement.
    input String
    Path for inputting data, which must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
    jobLog String
    Path for storing job logs that record job running status. This path must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
    output String
    Path for outputting data, which must start with / or s3a://. A correct OBS path is required. If the path does not exist, the system automatically creates it. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
    shutdownCluster Boolean
    Whether to delete the cluster after the jobs are complete. true: Yes, false: No.

    MrsClusterV1BootstrapScript, MrsClusterV1BootstrapScriptArgs

    FailAction string
    Whether to continue to execute subsequent scripts and create a cluster after the bootstrap action script fails to be executed. continue: Continue to execute subsequent scripts. errorout: Stop the action.
    Name string
    Name of a bootstrap action script.
    Nodes List<string>
    Type of node where the bootstrap action script is executed, including master, core, and task.
    Uri string
    Path of the shell script. Set this parameter to an OBS bucket path or a local VM path.
    ActiveMaster bool
    Whether the bootstrap action script runs only on active Master nodes.
    BeforeComponentStart bool
    Time when the bootstrap action script is executed. Currently, the script can be executed before and after the component is started.
    Parameters string
    Bootstrap action script parameters.
    FailAction string
    Whether to continue to execute subsequent scripts and create a cluster after the bootstrap action script fails to be executed. continue: Continue to execute subsequent scripts. errorout: Stop the action.
    Name string
    Name of a bootstrap action script.
    Nodes []string
    Type of node where the bootstrap action script is executed, including master, core, and task.
    Uri string
    Path of the shell script. Set this parameter to an OBS bucket path or a local VM path.
    ActiveMaster bool
    Whether the bootstrap action script runs only on active Master nodes.
    BeforeComponentStart bool
    Time when the bootstrap action script is executed. Currently, the script can be executed before and after the component is started.
    Parameters string
    Bootstrap action script parameters.
    failAction String
    Whether to continue to execute subsequent scripts and create a cluster after the bootstrap action script fails to be executed. continue: Continue to execute subsequent scripts. errorout: Stop the action.
    name String
    Name of a bootstrap action script.
    nodes List<String>
    Type of node where the bootstrap action script is executed, including master, core, and task.
    uri String
    Path of the shell script. Set this parameter to an OBS bucket path or a local VM path.
    activeMaster Boolean
    Whether the bootstrap action script runs only on active Master nodes.
    beforeComponentStart Boolean
    Time when the bootstrap action script is executed. Currently, the script can be executed before and after the component is started.
    parameters String
    Bootstrap action script parameters.
    failAction string
    Whether to continue to execute subsequent scripts and create a cluster after the bootstrap action script fails to be executed. continue: Continue to execute subsequent scripts. errorout: Stop the action.
    name string
    Name of a bootstrap action script.
    nodes string[]
    Type of node where the bootstrap action script is executed, including master, core, and task.
    uri string
    Path of the shell script. Set this parameter to an OBS bucket path or a local VM path.
    activeMaster boolean
    Whether the bootstrap action script runs only on active Master nodes.
    beforeComponentStart boolean
    Time when the bootstrap action script is executed. Currently, the script can be executed before and after the component is started.
    parameters string
    Bootstrap action script parameters.
    fail_action str
    Whether to continue to execute subsequent scripts and create a cluster after the bootstrap action script fails to be executed. continue: Continue to execute subsequent scripts. errorout: Stop the action.
    name str
    Name of a bootstrap action script.
    nodes Sequence[str]
    Type of node where the bootstrap action script is executed, including master, core, and task.
    uri str
    Path of the shell script. Set this parameter to an OBS bucket path or a local VM path.
    active_master bool
    Whether the bootstrap action script runs only on active Master nodes.
    before_component_start bool
    Time when the bootstrap action script is executed. Currently, the script can be executed before and after the component is started.
    parameters str
    Bootstrap action script parameters.
    failAction String
    Whether to continue to execute subsequent scripts and create a cluster after the bootstrap action script fails to be executed. continue: Continue to execute subsequent scripts. errorout: Stop the action.
    name String
    Name of a bootstrap action script.
    nodes List<String>
    Type of node where the bootstrap action script is executed, including master, core, and task.
    uri String
    Path of the shell script. Set this parameter to an OBS bucket path or a local VM path.
    activeMaster Boolean
    Whether the bootstrap action script runs only on active Master nodes.
    beforeComponentStart Boolean
    Time when the bootstrap action script is executed. Currently, the script can be executed before and after the component is started.
    parameters String
    Bootstrap action script parameters.

    MrsClusterV1ComponentList, MrsClusterV1ComponentListArgs

    ComponentName string
    Component name.
    ComponentDesc string
    ComponentId string
    Component ID.
    ComponentVersion string
    Component version.
    ComponentName string
    Component name.
    ComponentDesc string
    ComponentId string
    Component ID.
    ComponentVersion string
    Component version.
    componentName String
    Component name.
    componentDesc String
    componentId String
    Component ID.
    componentVersion String
    Component version.
    componentName string
    Component name.
    componentDesc string
    componentId string
    Component ID.
    componentVersion string
    Component version.
    component_name str
    Component name.
    component_desc str
    component_id str
    Component ID.
    component_version str
    Component version.
    componentName String
    Component name.
    componentDesc String
    componentId String
    Component ID.
    componentVersion String
    Component version.

    MrsClusterV1Timeouts, MrsClusterV1TimeoutsArgs

    Create string
    Delete string
    Create string
    Delete string
    create String
    delete String
    create string
    delete string
    create str
    delete str
    create String
    delete String

    Import

    Cluster can be imported using the cluster_id, e.g.

    $ pulumi import opentelekomcloud:index/mrsClusterV1:MrsClusterV1 cluster_1 4729ab1c-7c1a-4411-a02e-93dfc361b32d
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    opentelekomcloud opentelekomcloud/terraform-provider-opentelekomcloud
    License
    Notes
    This Pulumi package is based on the opentelekomcloud Terraform Provider.
    opentelekomcloud logo
    opentelekomcloud 1.36.37 published on Thursday, Apr 24, 2025 by opentelekomcloud