opentelekomcloud.MrsClusterV1
Explore with Pulumi AI
Up-to-date reference of API arguments for MRS cluster you can get at documentation portal
Manages resource cluster within OpenTelekomCloud MRS.
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as opentelekomcloud from "@pulumi/opentelekomcloud";
const _this = new opentelekomcloud.MrsClusterV1("this", {
clusterName: "mrs-cluster",
billingType: 12,
masterNodeNum: 2,
coreNodeNum: 3,
masterNodeSize: "c3.xlarge.4.linux.mrs",
coreNodeSize: "c3.xlarge.4.linux.mrs",
availableZoneId: _var.az,
vpcId: _var.vpc_id,
subnetId: _var.network_id,
clusterVersion: "MRS 2.1.0",
volumeType: "SATA",
volumeSize: 100,
clusterType: 0,
safeMode: 1,
nodePublicCertName: "%s",
clusterAdminSecret: "Qwerty!123",
componentLists: [
{
componentName: "Presto",
},
{
componentName: "Hadoop",
},
{
componentName: "Spark",
},
{
componentName: "HBase",
},
{
componentName: "Hive",
},
{
componentName: "Hue",
},
{
componentName: "Loader",
},
{
componentName: "Tez",
},
{
componentName: "Flink",
},
],
bootstrapScripts: [{
name: "Modify os config",
uri: "s3a://bootstrap/modify_os_config.sh",
parameters: "param1 param2",
nodes: [
"master",
"core",
"task",
],
activeMaster: true,
beforeComponentStart: true,
failAction: "continue",
}],
tags: {
foo: "bar",
key: "value",
},
});
import pulumi
import pulumi_opentelekomcloud as opentelekomcloud
this = opentelekomcloud.MrsClusterV1("this",
cluster_name="mrs-cluster",
billing_type=12,
master_node_num=2,
core_node_num=3,
master_node_size="c3.xlarge.4.linux.mrs",
core_node_size="c3.xlarge.4.linux.mrs",
available_zone_id=var["az"],
vpc_id=var["vpc_id"],
subnet_id=var["network_id"],
cluster_version="MRS 2.1.0",
volume_type="SATA",
volume_size=100,
cluster_type=0,
safe_mode=1,
node_public_cert_name="%s",
cluster_admin_secret="Qwerty!123",
component_lists=[
{
"component_name": "Presto",
},
{
"component_name": "Hadoop",
},
{
"component_name": "Spark",
},
{
"component_name": "HBase",
},
{
"component_name": "Hive",
},
{
"component_name": "Hue",
},
{
"component_name": "Loader",
},
{
"component_name": "Tez",
},
{
"component_name": "Flink",
},
],
bootstrap_scripts=[{
"name": "Modify os config",
"uri": "s3a://bootstrap/modify_os_config.sh",
"parameters": "param1 param2",
"nodes": [
"master",
"core",
"task",
],
"active_master": True,
"before_component_start": True,
"fail_action": "continue",
}],
tags={
"foo": "bar",
"key": "value",
})
package main
import (
"github.com/pulumi/pulumi-terraform-provider/sdks/go/opentelekomcloud/opentelekomcloud"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := opentelekomcloud.NewMrsClusterV1(ctx, "this", &opentelekomcloud.MrsClusterV1Args{
ClusterName: pulumi.String("mrs-cluster"),
BillingType: pulumi.Float64(12),
MasterNodeNum: pulumi.Float64(2),
CoreNodeNum: pulumi.Float64(3),
MasterNodeSize: pulumi.String("c3.xlarge.4.linux.mrs"),
CoreNodeSize: pulumi.String("c3.xlarge.4.linux.mrs"),
AvailableZoneId: pulumi.Any(_var.Az),
VpcId: pulumi.Any(_var.Vpc_id),
SubnetId: pulumi.Any(_var.Network_id),
ClusterVersion: pulumi.String("MRS 2.1.0"),
VolumeType: pulumi.String("SATA"),
VolumeSize: pulumi.Float64(100),
ClusterType: pulumi.Float64(0),
SafeMode: pulumi.Float64(1),
NodePublicCertName: pulumi.String("%s"),
ClusterAdminSecret: pulumi.String("Qwerty!123"),
ComponentLists: opentelekomcloud.MrsClusterV1ComponentListArray{
&opentelekomcloud.MrsClusterV1ComponentListArgs{
ComponentName: pulumi.String("Presto"),
},
&opentelekomcloud.MrsClusterV1ComponentListArgs{
ComponentName: pulumi.String("Hadoop"),
},
&opentelekomcloud.MrsClusterV1ComponentListArgs{
ComponentName: pulumi.String("Spark"),
},
&opentelekomcloud.MrsClusterV1ComponentListArgs{
ComponentName: pulumi.String("HBase"),
},
&opentelekomcloud.MrsClusterV1ComponentListArgs{
ComponentName: pulumi.String("Hive"),
},
&opentelekomcloud.MrsClusterV1ComponentListArgs{
ComponentName: pulumi.String("Hue"),
},
&opentelekomcloud.MrsClusterV1ComponentListArgs{
ComponentName: pulumi.String("Loader"),
},
&opentelekomcloud.MrsClusterV1ComponentListArgs{
ComponentName: pulumi.String("Tez"),
},
&opentelekomcloud.MrsClusterV1ComponentListArgs{
ComponentName: pulumi.String("Flink"),
},
},
BootstrapScripts: opentelekomcloud.MrsClusterV1BootstrapScriptArray{
&opentelekomcloud.MrsClusterV1BootstrapScriptArgs{
Name: pulumi.String("Modify os config"),
Uri: pulumi.String("s3a://bootstrap/modify_os_config.sh"),
Parameters: pulumi.String("param1 param2"),
Nodes: pulumi.StringArray{
pulumi.String("master"),
pulumi.String("core"),
pulumi.String("task"),
},
ActiveMaster: pulumi.Bool(true),
BeforeComponentStart: pulumi.Bool(true),
FailAction: pulumi.String("continue"),
},
},
Tags: pulumi.StringMap{
"foo": pulumi.String("bar"),
"key": pulumi.String("value"),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Opentelekomcloud = Pulumi.Opentelekomcloud;
return await Deployment.RunAsync(() =>
{
var @this = new Opentelekomcloud.MrsClusterV1("this", new()
{
ClusterName = "mrs-cluster",
BillingType = 12,
MasterNodeNum = 2,
CoreNodeNum = 3,
MasterNodeSize = "c3.xlarge.4.linux.mrs",
CoreNodeSize = "c3.xlarge.4.linux.mrs",
AvailableZoneId = @var.Az,
VpcId = @var.Vpc_id,
SubnetId = @var.Network_id,
ClusterVersion = "MRS 2.1.0",
VolumeType = "SATA",
VolumeSize = 100,
ClusterType = 0,
SafeMode = 1,
NodePublicCertName = "%s",
ClusterAdminSecret = "Qwerty!123",
ComponentLists = new[]
{
new Opentelekomcloud.Inputs.MrsClusterV1ComponentListArgs
{
ComponentName = "Presto",
},
new Opentelekomcloud.Inputs.MrsClusterV1ComponentListArgs
{
ComponentName = "Hadoop",
},
new Opentelekomcloud.Inputs.MrsClusterV1ComponentListArgs
{
ComponentName = "Spark",
},
new Opentelekomcloud.Inputs.MrsClusterV1ComponentListArgs
{
ComponentName = "HBase",
},
new Opentelekomcloud.Inputs.MrsClusterV1ComponentListArgs
{
ComponentName = "Hive",
},
new Opentelekomcloud.Inputs.MrsClusterV1ComponentListArgs
{
ComponentName = "Hue",
},
new Opentelekomcloud.Inputs.MrsClusterV1ComponentListArgs
{
ComponentName = "Loader",
},
new Opentelekomcloud.Inputs.MrsClusterV1ComponentListArgs
{
ComponentName = "Tez",
},
new Opentelekomcloud.Inputs.MrsClusterV1ComponentListArgs
{
ComponentName = "Flink",
},
},
BootstrapScripts = new[]
{
new Opentelekomcloud.Inputs.MrsClusterV1BootstrapScriptArgs
{
Name = "Modify os config",
Uri = "s3a://bootstrap/modify_os_config.sh",
Parameters = "param1 param2",
Nodes = new[]
{
"master",
"core",
"task",
},
ActiveMaster = true,
BeforeComponentStart = true,
FailAction = "continue",
},
},
Tags =
{
{ "foo", "bar" },
{ "key", "value" },
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.opentelekomcloud.MrsClusterV1;
import com.pulumi.opentelekomcloud.MrsClusterV1Args;
import com.pulumi.opentelekomcloud.inputs.MrsClusterV1ComponentListArgs;
import com.pulumi.opentelekomcloud.inputs.MrsClusterV1BootstrapScriptArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var this_ = new MrsClusterV1("this", MrsClusterV1Args.builder()
.clusterName("mrs-cluster")
.billingType(12)
.masterNodeNum(2)
.coreNodeNum(3)
.masterNodeSize("c3.xlarge.4.linux.mrs")
.coreNodeSize("c3.xlarge.4.linux.mrs")
.availableZoneId(var_.az())
.vpcId(var_.vpc_id())
.subnetId(var_.network_id())
.clusterVersion("MRS 2.1.0")
.volumeType("SATA")
.volumeSize(100)
.clusterType(0)
.safeMode(1)
.nodePublicCertName("%s")
.clusterAdminSecret("Qwerty!123")
.componentLists(
MrsClusterV1ComponentListArgs.builder()
.componentName("Presto")
.build(),
MrsClusterV1ComponentListArgs.builder()
.componentName("Hadoop")
.build(),
MrsClusterV1ComponentListArgs.builder()
.componentName("Spark")
.build(),
MrsClusterV1ComponentListArgs.builder()
.componentName("HBase")
.build(),
MrsClusterV1ComponentListArgs.builder()
.componentName("Hive")
.build(),
MrsClusterV1ComponentListArgs.builder()
.componentName("Hue")
.build(),
MrsClusterV1ComponentListArgs.builder()
.componentName("Loader")
.build(),
MrsClusterV1ComponentListArgs.builder()
.componentName("Tez")
.build(),
MrsClusterV1ComponentListArgs.builder()
.componentName("Flink")
.build())
.bootstrapScripts(MrsClusterV1BootstrapScriptArgs.builder()
.name("Modify os config")
.uri("s3a://bootstrap/modify_os_config.sh")
.parameters("param1 param2")
.nodes(
"master",
"core",
"task")
.activeMaster(true)
.beforeComponentStart(true)
.failAction("continue")
.build())
.tags(Map.ofEntries(
Map.entry("foo", "bar"),
Map.entry("key", "value")
))
.build());
}
}
resources:
this:
type: opentelekomcloud:MrsClusterV1
properties:
clusterName: mrs-cluster
billingType: 12
masterNodeNum: 2
coreNodeNum: 3
masterNodeSize: c3.xlarge.4.linux.mrs
coreNodeSize: c3.xlarge.4.linux.mrs
availableZoneId: ${var.az}
vpcId: ${var.vpc_id}
subnetId: ${var.network_id}
clusterVersion: MRS 2.1.0
volumeType: SATA
volumeSize: 100
clusterType: 0
safeMode: 1
nodePublicCertName: '%s'
clusterAdminSecret: Qwerty!123
componentLists:
- componentName: Presto
- componentName: Hadoop
- componentName: Spark
- componentName: HBase
- componentName: Hive
- componentName: Hue
- componentName: Loader
- componentName: Tez
- componentName: Flink
bootstrapScripts:
- name: Modify os config
uri: s3a://bootstrap/modify_os_config.sh
parameters: param1 param2
nodes:
- master
- core
- task
activeMaster: true
beforeComponentStart: true
failAction: continue
tags:
foo: bar
key: value
Create MrsClusterV1 Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new MrsClusterV1(name: string, args: MrsClusterV1Args, opts?: CustomResourceOptions);
@overload
def MrsClusterV1(resource_name: str,
args: MrsClusterV1Args,
opts: Optional[ResourceOptions] = None)
@overload
def MrsClusterV1(resource_name: str,
opts: Optional[ResourceOptions] = None,
node_public_cert_name: Optional[str] = None,
core_node_size: Optional[str] = None,
billing_type: Optional[float] = None,
vpc_id: Optional[str] = None,
subnet_id: Optional[str] = None,
cluster_name: Optional[str] = None,
safe_mode: Optional[float] = None,
cluster_version: Optional[str] = None,
component_lists: Optional[Sequence[MrsClusterV1ComponentListArgs]] = None,
master_node_size: Optional[str] = None,
master_node_num: Optional[float] = None,
available_zone_id: Optional[str] = None,
core_node_num: Optional[float] = None,
mrs_cluster_v1_id: Optional[str] = None,
master_data_volume_size: Optional[float] = None,
log_collection: Optional[float] = None,
region: Optional[str] = None,
master_data_volume_type: Optional[str] = None,
core_data_volume_size: Optional[float] = None,
core_data_volume_count: Optional[float] = None,
master_data_volume_count: Optional[float] = None,
add_jobs: Optional[Sequence[MrsClusterV1AddJobArgs]] = None,
core_data_volume_type: Optional[str] = None,
cluster_type: Optional[float] = None,
cluster_admin_secret: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
timeouts: Optional[MrsClusterV1TimeoutsArgs] = None,
volume_size: Optional[float] = None,
volume_type: Optional[str] = None,
bootstrap_scripts: Optional[Sequence[MrsClusterV1BootstrapScriptArgs]] = None)
func NewMrsClusterV1(ctx *Context, name string, args MrsClusterV1Args, opts ...ResourceOption) (*MrsClusterV1, error)
public MrsClusterV1(string name, MrsClusterV1Args args, CustomResourceOptions? opts = null)
public MrsClusterV1(String name, MrsClusterV1Args args)
public MrsClusterV1(String name, MrsClusterV1Args args, CustomResourceOptions options)
type: opentelekomcloud:MrsClusterV1
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args MrsClusterV1Args
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args MrsClusterV1Args
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args MrsClusterV1Args
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args MrsClusterV1Args
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args MrsClusterV1Args
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var mrsClusterV1Resource = new Opentelekomcloud.MrsClusterV1("mrsClusterV1Resource", new()
{
NodePublicCertName = "string",
CoreNodeSize = "string",
BillingType = 0,
VpcId = "string",
SubnetId = "string",
ClusterName = "string",
SafeMode = 0,
ClusterVersion = "string",
ComponentLists = new[]
{
new Opentelekomcloud.Inputs.MrsClusterV1ComponentListArgs
{
ComponentName = "string",
ComponentDesc = "string",
ComponentId = "string",
ComponentVersion = "string",
},
},
MasterNodeSize = "string",
MasterNodeNum = 0,
AvailableZoneId = "string",
CoreNodeNum = 0,
MrsClusterV1Id = "string",
MasterDataVolumeSize = 0,
LogCollection = 0,
Region = "string",
MasterDataVolumeType = "string",
CoreDataVolumeSize = 0,
CoreDataVolumeCount = 0,
MasterDataVolumeCount = 0,
AddJobs = new[]
{
new Opentelekomcloud.Inputs.MrsClusterV1AddJobArgs
{
JarPath = "string",
JobName = "string",
JobType = 0,
SubmitJobOnceClusterRun = false,
Arguments = "string",
FileAction = "string",
HiveScriptPath = "string",
Hql = "string",
Input = "string",
JobLog = "string",
Output = "string",
ShutdownCluster = false,
},
},
CoreDataVolumeType = "string",
ClusterType = 0,
ClusterAdminSecret = "string",
Tags =
{
{ "string", "string" },
},
Timeouts = new Opentelekomcloud.Inputs.MrsClusterV1TimeoutsArgs
{
Create = "string",
Delete = "string",
},
VolumeSize = 0,
VolumeType = "string",
BootstrapScripts = new[]
{
new Opentelekomcloud.Inputs.MrsClusterV1BootstrapScriptArgs
{
FailAction = "string",
Name = "string",
Nodes = new[]
{
"string",
},
Uri = "string",
ActiveMaster = false,
BeforeComponentStart = false,
Parameters = "string",
},
},
});
example, err := opentelekomcloud.NewMrsClusterV1(ctx, "mrsClusterV1Resource", &opentelekomcloud.MrsClusterV1Args{
NodePublicCertName: pulumi.String("string"),
CoreNodeSize: pulumi.String("string"),
BillingType: pulumi.Float64(0),
VpcId: pulumi.String("string"),
SubnetId: pulumi.String("string"),
ClusterName: pulumi.String("string"),
SafeMode: pulumi.Float64(0),
ClusterVersion: pulumi.String("string"),
ComponentLists: opentelekomcloud.MrsClusterV1ComponentListArray{
&opentelekomcloud.MrsClusterV1ComponentListArgs{
ComponentName: pulumi.String("string"),
ComponentDesc: pulumi.String("string"),
ComponentId: pulumi.String("string"),
ComponentVersion: pulumi.String("string"),
},
},
MasterNodeSize: pulumi.String("string"),
MasterNodeNum: pulumi.Float64(0),
AvailableZoneId: pulumi.String("string"),
CoreNodeNum: pulumi.Float64(0),
MrsClusterV1Id: pulumi.String("string"),
MasterDataVolumeSize: pulumi.Float64(0),
LogCollection: pulumi.Float64(0),
Region: pulumi.String("string"),
MasterDataVolumeType: pulumi.String("string"),
CoreDataVolumeSize: pulumi.Float64(0),
CoreDataVolumeCount: pulumi.Float64(0),
MasterDataVolumeCount: pulumi.Float64(0),
AddJobs: opentelekomcloud.MrsClusterV1AddJobArray{
&opentelekomcloud.MrsClusterV1AddJobArgs{
JarPath: pulumi.String("string"),
JobName: pulumi.String("string"),
JobType: pulumi.Float64(0),
SubmitJobOnceClusterRun: pulumi.Bool(false),
Arguments: pulumi.String("string"),
FileAction: pulumi.String("string"),
HiveScriptPath: pulumi.String("string"),
Hql: pulumi.String("string"),
Input: pulumi.String("string"),
JobLog: pulumi.String("string"),
Output: pulumi.String("string"),
ShutdownCluster: pulumi.Bool(false),
},
},
CoreDataVolumeType: pulumi.String("string"),
ClusterType: pulumi.Float64(0),
ClusterAdminSecret: pulumi.String("string"),
Tags: pulumi.StringMap{
"string": pulumi.String("string"),
},
Timeouts: &opentelekomcloud.MrsClusterV1TimeoutsArgs{
Create: pulumi.String("string"),
Delete: pulumi.String("string"),
},
VolumeSize: pulumi.Float64(0),
VolumeType: pulumi.String("string"),
BootstrapScripts: opentelekomcloud.MrsClusterV1BootstrapScriptArray{
&opentelekomcloud.MrsClusterV1BootstrapScriptArgs{
FailAction: pulumi.String("string"),
Name: pulumi.String("string"),
Nodes: pulumi.StringArray{
pulumi.String("string"),
},
Uri: pulumi.String("string"),
ActiveMaster: pulumi.Bool(false),
BeforeComponentStart: pulumi.Bool(false),
Parameters: pulumi.String("string"),
},
},
})
var mrsClusterV1Resource = new MrsClusterV1("mrsClusterV1Resource", MrsClusterV1Args.builder()
.nodePublicCertName("string")
.coreNodeSize("string")
.billingType(0)
.vpcId("string")
.subnetId("string")
.clusterName("string")
.safeMode(0)
.clusterVersion("string")
.componentLists(MrsClusterV1ComponentListArgs.builder()
.componentName("string")
.componentDesc("string")
.componentId("string")
.componentVersion("string")
.build())
.masterNodeSize("string")
.masterNodeNum(0)
.availableZoneId("string")
.coreNodeNum(0)
.mrsClusterV1Id("string")
.masterDataVolumeSize(0)
.logCollection(0)
.region("string")
.masterDataVolumeType("string")
.coreDataVolumeSize(0)
.coreDataVolumeCount(0)
.masterDataVolumeCount(0)
.addJobs(MrsClusterV1AddJobArgs.builder()
.jarPath("string")
.jobName("string")
.jobType(0)
.submitJobOnceClusterRun(false)
.arguments("string")
.fileAction("string")
.hiveScriptPath("string")
.hql("string")
.input("string")
.jobLog("string")
.output("string")
.shutdownCluster(false)
.build())
.coreDataVolumeType("string")
.clusterType(0)
.clusterAdminSecret("string")
.tags(Map.of("string", "string"))
.timeouts(MrsClusterV1TimeoutsArgs.builder()
.create("string")
.delete("string")
.build())
.volumeSize(0)
.volumeType("string")
.bootstrapScripts(MrsClusterV1BootstrapScriptArgs.builder()
.failAction("string")
.name("string")
.nodes("string")
.uri("string")
.activeMaster(false)
.beforeComponentStart(false)
.parameters("string")
.build())
.build());
mrs_cluster_v1_resource = opentelekomcloud.MrsClusterV1("mrsClusterV1Resource",
node_public_cert_name="string",
core_node_size="string",
billing_type=0,
vpc_id="string",
subnet_id="string",
cluster_name="string",
safe_mode=0,
cluster_version="string",
component_lists=[{
"component_name": "string",
"component_desc": "string",
"component_id": "string",
"component_version": "string",
}],
master_node_size="string",
master_node_num=0,
available_zone_id="string",
core_node_num=0,
mrs_cluster_v1_id="string",
master_data_volume_size=0,
log_collection=0,
region="string",
master_data_volume_type="string",
core_data_volume_size=0,
core_data_volume_count=0,
master_data_volume_count=0,
add_jobs=[{
"jar_path": "string",
"job_name": "string",
"job_type": 0,
"submit_job_once_cluster_run": False,
"arguments": "string",
"file_action": "string",
"hive_script_path": "string",
"hql": "string",
"input": "string",
"job_log": "string",
"output": "string",
"shutdown_cluster": False,
}],
core_data_volume_type="string",
cluster_type=0,
cluster_admin_secret="string",
tags={
"string": "string",
},
timeouts={
"create": "string",
"delete": "string",
},
volume_size=0,
volume_type="string",
bootstrap_scripts=[{
"fail_action": "string",
"name": "string",
"nodes": ["string"],
"uri": "string",
"active_master": False,
"before_component_start": False,
"parameters": "string",
}])
const mrsClusterV1Resource = new opentelekomcloud.MrsClusterV1("mrsClusterV1Resource", {
nodePublicCertName: "string",
coreNodeSize: "string",
billingType: 0,
vpcId: "string",
subnetId: "string",
clusterName: "string",
safeMode: 0,
clusterVersion: "string",
componentLists: [{
componentName: "string",
componentDesc: "string",
componentId: "string",
componentVersion: "string",
}],
masterNodeSize: "string",
masterNodeNum: 0,
availableZoneId: "string",
coreNodeNum: 0,
mrsClusterV1Id: "string",
masterDataVolumeSize: 0,
logCollection: 0,
region: "string",
masterDataVolumeType: "string",
coreDataVolumeSize: 0,
coreDataVolumeCount: 0,
masterDataVolumeCount: 0,
addJobs: [{
jarPath: "string",
jobName: "string",
jobType: 0,
submitJobOnceClusterRun: false,
arguments: "string",
fileAction: "string",
hiveScriptPath: "string",
hql: "string",
input: "string",
jobLog: "string",
output: "string",
shutdownCluster: false,
}],
coreDataVolumeType: "string",
clusterType: 0,
clusterAdminSecret: "string",
tags: {
string: "string",
},
timeouts: {
create: "string",
"delete": "string",
},
volumeSize: 0,
volumeType: "string",
bootstrapScripts: [{
failAction: "string",
name: "string",
nodes: ["string"],
uri: "string",
activeMaster: false,
beforeComponentStart: false,
parameters: "string",
}],
});
type: opentelekomcloud:MrsClusterV1
properties:
addJobs:
- arguments: string
fileAction: string
hiveScriptPath: string
hql: string
input: string
jarPath: string
jobLog: string
jobName: string
jobType: 0
output: string
shutdownCluster: false
submitJobOnceClusterRun: false
availableZoneId: string
billingType: 0
bootstrapScripts:
- activeMaster: false
beforeComponentStart: false
failAction: string
name: string
nodes:
- string
parameters: string
uri: string
clusterAdminSecret: string
clusterName: string
clusterType: 0
clusterVersion: string
componentLists:
- componentDesc: string
componentId: string
componentName: string
componentVersion: string
coreDataVolumeCount: 0
coreDataVolumeSize: 0
coreDataVolumeType: string
coreNodeNum: 0
coreNodeSize: string
logCollection: 0
masterDataVolumeCount: 0
masterDataVolumeSize: 0
masterDataVolumeType: string
masterNodeNum: 0
masterNodeSize: string
mrsClusterV1Id: string
nodePublicCertName: string
region: string
safeMode: 0
subnetId: string
tags:
string: string
timeouts:
create: string
delete: string
volumeSize: 0
volumeType: string
vpcId: string
MrsClusterV1 Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The MrsClusterV1 resource accepts the following input properties:
- Available
Zone stringId - ID of an available zone. Obtain the value from Regions and Endpoints.
- Billing
Type double - The value is
12
, indicating on-demand payment. - Cluster
Name string - Cluster name, which is globally unique and contains
only
1
to64
letters, digits, hyphens (-), and underscores (_). - Cluster
Version string - Version of the clusters Currently,
MRS 1.6.3
,MRS 1.7.0
,MRS 1.9.2
,MRS 2.1.0
,MRS 3.0.2
are supported. The latest version of MRS is used by default. - Component
Lists List<MrsCluster V1Component List> - Service component list.
- Core
Node doubleNum - Number of Core nodes Value range:
1
to500
. A maximum of500
Core nodes are supported by default. If more than500
Core nodes are required, contact technical support engineers or invoke background APIs to modify the database. - Core
Node stringSize - Instance specification of a Core node Configuration
method of this parameter is identical to that of
master_node_size
. - Master
Node doubleNum - Number of Master nodes.
- Master
Node stringSize - Best match based on several years of commissioning
experience. MRS supports specifications of hosts, and host specifications are
determined by CPUs, memory, and disks space. Master nodes support
h1.2xlarge.linux.mrs
h1.4xlarge.linux.mrs
,h1.8xlarge.linux.mrs
,s1.4xlarge.linux.mrs
, ands1.8xlarge.linux.mrs
. Core nodes of a streaming cluster support all specificationsc2.2xlarge.linux.mrs
,c2.4xlarge.linux.mrs
,s1.xlarge.linux.mrs
,s1.4xlarge.linux.mrs
,s1.8xlarge.linux.mrs
,d1.xlarge.linux.mrs
,d1.2xlarge.linux.mrs
,d1.4xlarge.linux.mrs
,d1.8xlarge.linux.mrs
,h1.2xlarge.linux.mrs
,h1.4xlarge.linux.mrs
andh1.8xlarge.linux.mrs
. Task nodes supportc2.2xlarge.linux.mrs
,c2.4xlarge.linux.mrs
,s1.xlarge.linux.mrs
,s1.4xlarge.linux.mrs
,s1.8xlarge.linux.mrs
,h1.2xlarge.linux.mrs
,h1.4xlarge.linux.mrs
, andh1.8xlarge.linux.mrs
. - Node
Public stringCert Name - Name of a key pair You can use a key to log in to the Master node in the cluster.
- Safe
Mode double - MRS cluster running mode
0
: common mode. The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.1
: safe mode. The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management or job management functions of an MRS cluster and cannot view cluster resource usage or the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator. The request has thecluster_admin_secret
parameter only whensafe_mode
is set to1
. - Subnet
Id string - Subnet ID Obtain the subnet ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the subnet ID from the list.
- Vpc
Id string - ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
- Add
Jobs List<MrsCluster V1Add Job> - You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added.
- Bootstrap
Scripts List<MrsCluster V1Bootstrap Script> - Bootstrap action scripts. For details, see bootstrap_scripts block below.
- Cluster
Admin stringSecret - Indicates the password of the MRS Manager
administrator. The password must contain
8
to32
characters. Must contain at least two types of the following: Lowercase letters, Uppercase letters, Digits, Special characters~!@#$%^&*()-_=+\|[{}];:'",<.>/?
and spaces. - Cluster
Type double - Type of clusters
0
: analysis cluster,1
: streaming cluster The default value is0
. - Core
Data doubleVolume Count - Number of data disks of the Core node.
Value range:
1
to10
. - Core
Data doubleVolume Size - Data disk size of the Core node.
Value range:
100
GB to32000
GB. - Core
Data stringVolume Type - Data disk storage type of the Core node,
supporting
SATA
,SAS
andSSD
.SATA
: Common I/O,SAS
: High I/O,SSD
: Ultra-high I/O. - Log
Collection double - Indicates whether logs are collected when cluster
installation fails.
0
: not collected.1
: collected. The default value is0
. Iflog_collection
is set to1
, OBS buckets will be created to collect the MRS logs. These buckets will be charged. - Master
Data doubleVolume Count - Number of data disks of the Master node.
The value can be set to
1
only. - Master
Data doubleVolume Size - Data disk size of the Master node.
Value range:
100
GB to32000
GB. - Master
Data stringVolume Type - Data disk storage type of the Master node,
supporting
SATA
,SAS
andSSD
.SATA
: Common I/O,SAS
: High I/O,SSD
: Ultra-high I/O. - Mrs
Cluster stringV1Id - Region string
- Dictionary<string, string>
- Tags key/value pairs to associate with the cluster.
- Timeouts
Mrs
Cluster V1Timeouts - Volume
Size double - Data disk storage space of a Core node Users can
add disks to expand storage capacity when creating a cluster. There are the
following scenarios: Separation of data storage and computing: Data is stored
in the OBS system. Costs of clusters are relatively low but computing performance
is poor. The clusters can be deleted at any time. It is recommended when data
computing is not frequently performed. Integration of data storage and computing:
Data is stored in the HDFS system. Costs of clusters are relatively high but
computing performance is good. The clusters cannot be deleted in a short term.
It is recommended when data computing is frequently performed. Value range:
100
GB to32000
GB. - Volume
Type string - Type of disks
SATA
,SAS
andSSD
are supported.SATA
: common I/O,SAS
: High I/O,SSD
: Ultra-high I/O.
- Available
Zone stringId - ID of an available zone. Obtain the value from Regions and Endpoints.
- Billing
Type float64 - The value is
12
, indicating on-demand payment. - Cluster
Name string - Cluster name, which is globally unique and contains
only
1
to64
letters, digits, hyphens (-), and underscores (_). - Cluster
Version string - Version of the clusters Currently,
MRS 1.6.3
,MRS 1.7.0
,MRS 1.9.2
,MRS 2.1.0
,MRS 3.0.2
are supported. The latest version of MRS is used by default. - Component
Lists []MrsCluster V1Component List Args - Service component list.
- Core
Node float64Num - Number of Core nodes Value range:
1
to500
. A maximum of500
Core nodes are supported by default. If more than500
Core nodes are required, contact technical support engineers or invoke background APIs to modify the database. - Core
Node stringSize - Instance specification of a Core node Configuration
method of this parameter is identical to that of
master_node_size
. - Master
Node float64Num - Number of Master nodes.
- Master
Node stringSize - Best match based on several years of commissioning
experience. MRS supports specifications of hosts, and host specifications are
determined by CPUs, memory, and disks space. Master nodes support
h1.2xlarge.linux.mrs
h1.4xlarge.linux.mrs
,h1.8xlarge.linux.mrs
,s1.4xlarge.linux.mrs
, ands1.8xlarge.linux.mrs
. Core nodes of a streaming cluster support all specificationsc2.2xlarge.linux.mrs
,c2.4xlarge.linux.mrs
,s1.xlarge.linux.mrs
,s1.4xlarge.linux.mrs
,s1.8xlarge.linux.mrs
,d1.xlarge.linux.mrs
,d1.2xlarge.linux.mrs
,d1.4xlarge.linux.mrs
,d1.8xlarge.linux.mrs
,h1.2xlarge.linux.mrs
,h1.4xlarge.linux.mrs
andh1.8xlarge.linux.mrs
. Task nodes supportc2.2xlarge.linux.mrs
,c2.4xlarge.linux.mrs
,s1.xlarge.linux.mrs
,s1.4xlarge.linux.mrs
,s1.8xlarge.linux.mrs
,h1.2xlarge.linux.mrs
,h1.4xlarge.linux.mrs
, andh1.8xlarge.linux.mrs
. - Node
Public stringCert Name - Name of a key pair You can use a key to log in to the Master node in the cluster.
- Safe
Mode float64 - MRS cluster running mode
0
: common mode. The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.1
: safe mode. The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management or job management functions of an MRS cluster and cannot view cluster resource usage or the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator. The request has thecluster_admin_secret
parameter only whensafe_mode
is set to1
. - Subnet
Id string - Subnet ID Obtain the subnet ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the subnet ID from the list.
- Vpc
Id string - ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
- Add
Jobs []MrsCluster V1Add Job Args - You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added.
- Bootstrap
Scripts []MrsCluster V1Bootstrap Script Args - Bootstrap action scripts. For details, see bootstrap_scripts block below.
- Cluster
Admin stringSecret - Indicates the password of the MRS Manager
administrator. The password must contain
8
to32
characters. Must contain at least two types of the following: Lowercase letters, Uppercase letters, Digits, Special characters~!@#$%^&*()-_=+\|[{}];:'",<.>/?
and spaces. - Cluster
Type float64 - Type of clusters
0
: analysis cluster,1
: streaming cluster The default value is0
. - Core
Data float64Volume Count - Number of data disks of the Core node.
Value range:
1
to10
. - Core
Data float64Volume Size - Data disk size of the Core node.
Value range:
100
GB to32000
GB. - Core
Data stringVolume Type - Data disk storage type of the Core node,
supporting
SATA
,SAS
andSSD
.SATA
: Common I/O,SAS
: High I/O,SSD
: Ultra-high I/O. - Log
Collection float64 - Indicates whether logs are collected when cluster
installation fails.
0
: not collected.1
: collected. The default value is0
. Iflog_collection
is set to1
, OBS buckets will be created to collect the MRS logs. These buckets will be charged. - Master
Data float64Volume Count - Number of data disks of the Master node.
The value can be set to
1
only. - Master
Data float64Volume Size - Data disk size of the Master node.
Value range:
100
GB to32000
GB. - Master
Data stringVolume Type - Data disk storage type of the Master node,
supporting
SATA
,SAS
andSSD
.SATA
: Common I/O,SAS
: High I/O,SSD
: Ultra-high I/O. - Mrs
Cluster stringV1Id - Region string
- map[string]string
- Tags key/value pairs to associate with the cluster.
- Timeouts
Mrs
Cluster V1Timeouts Args - Volume
Size float64 - Data disk storage space of a Core node Users can
add disks to expand storage capacity when creating a cluster. There are the
following scenarios: Separation of data storage and computing: Data is stored
in the OBS system. Costs of clusters are relatively low but computing performance
is poor. The clusters can be deleted at any time. It is recommended when data
computing is not frequently performed. Integration of data storage and computing:
Data is stored in the HDFS system. Costs of clusters are relatively high but
computing performance is good. The clusters cannot be deleted in a short term.
It is recommended when data computing is frequently performed. Value range:
100
GB to32000
GB. - Volume
Type string - Type of disks
SATA
,SAS
andSSD
are supported.SATA
: common I/O,SAS
: High I/O,SSD
: Ultra-high I/O.
- available
Zone StringId - ID of an available zone. Obtain the value from Regions and Endpoints.
- billing
Type Double - The value is
12
, indicating on-demand payment. - cluster
Name String - Cluster name, which is globally unique and contains
only
1
to64
letters, digits, hyphens (-), and underscores (_). - cluster
Version String - Version of the clusters Currently,
MRS 1.6.3
,MRS 1.7.0
,MRS 1.9.2
,MRS 2.1.0
,MRS 3.0.2
are supported. The latest version of MRS is used by default. - component
Lists List<MrsCluster V1Component List> - Service component list.
- core
Node DoubleNum - Number of Core nodes Value range:
1
to500
. A maximum of500
Core nodes are supported by default. If more than500
Core nodes are required, contact technical support engineers or invoke background APIs to modify the database. - core
Node StringSize - Instance specification of a Core node Configuration
method of this parameter is identical to that of
master_node_size
. - master
Node DoubleNum - Number of Master nodes.
- master
Node StringSize - Best match based on several years of commissioning
experience. MRS supports specifications of hosts, and host specifications are
determined by CPUs, memory, and disks space. Master nodes support
h1.2xlarge.linux.mrs
h1.4xlarge.linux.mrs
,h1.8xlarge.linux.mrs
,s1.4xlarge.linux.mrs
, ands1.8xlarge.linux.mrs
. Core nodes of a streaming cluster support all specificationsc2.2xlarge.linux.mrs
,c2.4xlarge.linux.mrs
,s1.xlarge.linux.mrs
,s1.4xlarge.linux.mrs
,s1.8xlarge.linux.mrs
,d1.xlarge.linux.mrs
,d1.2xlarge.linux.mrs
,d1.4xlarge.linux.mrs
,d1.8xlarge.linux.mrs
,h1.2xlarge.linux.mrs
,h1.4xlarge.linux.mrs
andh1.8xlarge.linux.mrs
. Task nodes supportc2.2xlarge.linux.mrs
,c2.4xlarge.linux.mrs
,s1.xlarge.linux.mrs
,s1.4xlarge.linux.mrs
,s1.8xlarge.linux.mrs
,h1.2xlarge.linux.mrs
,h1.4xlarge.linux.mrs
, andh1.8xlarge.linux.mrs
. - node
Public StringCert Name - Name of a key pair You can use a key to log in to the Master node in the cluster.
- safe
Mode Double - MRS cluster running mode
0
: common mode. The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.1
: safe mode. The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management or job management functions of an MRS cluster and cannot view cluster resource usage or the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator. The request has thecluster_admin_secret
parameter only whensafe_mode
is set to1
. - subnet
Id String - Subnet ID Obtain the subnet ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the subnet ID from the list.
- vpc
Id String - ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
- add
Jobs List<MrsCluster V1Add Job> - You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added.
- bootstrap
Scripts List<MrsCluster V1Bootstrap Script> - Bootstrap action scripts. For details, see bootstrap_scripts block below.
- cluster
Admin StringSecret - Indicates the password of the MRS Manager
administrator. The password must contain
8
to32
characters. Must contain at least two types of the following: Lowercase letters, Uppercase letters, Digits, Special characters~!@#$%^&*()-_=+\|[{}];:'",<.>/?
and spaces. - cluster
Type Double - Type of clusters
0
: analysis cluster,1
: streaming cluster The default value is0
. - core
Data DoubleVolume Count - Number of data disks of the Core node.
Value range:
1
to10
. - core
Data DoubleVolume Size - Data disk size of the Core node.
Value range:
100
GB to32000
GB. - core
Data StringVolume Type - Data disk storage type of the Core node,
supporting
SATA
,SAS
andSSD
.SATA
: Common I/O,SAS
: High I/O,SSD
: Ultra-high I/O. - log
Collection Double - Indicates whether logs are collected when cluster
installation fails.
0
: not collected.1
: collected. The default value is0
. Iflog_collection
is set to1
, OBS buckets will be created to collect the MRS logs. These buckets will be charged. - master
Data DoubleVolume Count - Number of data disks of the Master node.
The value can be set to
1
only. - master
Data DoubleVolume Size - Data disk size of the Master node.
Value range:
100
GB to32000
GB. - master
Data StringVolume Type - Data disk storage type of the Master node,
supporting
SATA
,SAS
andSSD
.SATA
: Common I/O,SAS
: High I/O,SSD
: Ultra-high I/O. - mrs
Cluster StringV1Id - region String
- Map<String,String>
- Tags key/value pairs to associate with the cluster.
- timeouts
Mrs
Cluster V1Timeouts - volume
Size Double - Data disk storage space of a Core node Users can
add disks to expand storage capacity when creating a cluster. There are the
following scenarios: Separation of data storage and computing: Data is stored
in the OBS system. Costs of clusters are relatively low but computing performance
is poor. The clusters can be deleted at any time. It is recommended when data
computing is not frequently performed. Integration of data storage and computing:
Data is stored in the HDFS system. Costs of clusters are relatively high but
computing performance is good. The clusters cannot be deleted in a short term.
It is recommended when data computing is frequently performed. Value range:
100
GB to32000
GB. - volume
Type String - Type of disks
SATA
,SAS
andSSD
are supported.SATA
: common I/O,SAS
: High I/O,SSD
: Ultra-high I/O.
- available
Zone stringId - ID of an available zone. Obtain the value from Regions and Endpoints.
- billing
Type number - The value is
12
, indicating on-demand payment. - cluster
Name string - Cluster name, which is globally unique and contains
only
1
to64
letters, digits, hyphens (-), and underscores (_). - cluster
Version string - Version of the clusters Currently,
MRS 1.6.3
,MRS 1.7.0
,MRS 1.9.2
,MRS 2.1.0
,MRS 3.0.2
are supported. The latest version of MRS is used by default. - component
Lists MrsCluster V1Component List[] - Service component list.
- core
Node numberNum - Number of Core nodes Value range:
1
to500
. A maximum of500
Core nodes are supported by default. If more than500
Core nodes are required, contact technical support engineers or invoke background APIs to modify the database. - core
Node stringSize - Instance specification of a Core node Configuration
method of this parameter is identical to that of
master_node_size
. - master
Node numberNum - Number of Master nodes.
- master
Node stringSize - Best match based on several years of commissioning
experience. MRS supports specifications of hosts, and host specifications are
determined by CPUs, memory, and disks space. Master nodes support
h1.2xlarge.linux.mrs
h1.4xlarge.linux.mrs
,h1.8xlarge.linux.mrs
,s1.4xlarge.linux.mrs
, ands1.8xlarge.linux.mrs
. Core nodes of a streaming cluster support all specificationsc2.2xlarge.linux.mrs
,c2.4xlarge.linux.mrs
,s1.xlarge.linux.mrs
,s1.4xlarge.linux.mrs
,s1.8xlarge.linux.mrs
,d1.xlarge.linux.mrs
,d1.2xlarge.linux.mrs
,d1.4xlarge.linux.mrs
,d1.8xlarge.linux.mrs
,h1.2xlarge.linux.mrs
,h1.4xlarge.linux.mrs
andh1.8xlarge.linux.mrs
. Task nodes supportc2.2xlarge.linux.mrs
,c2.4xlarge.linux.mrs
,s1.xlarge.linux.mrs
,s1.4xlarge.linux.mrs
,s1.8xlarge.linux.mrs
,h1.2xlarge.linux.mrs
,h1.4xlarge.linux.mrs
, andh1.8xlarge.linux.mrs
. - node
Public stringCert Name - Name of a key pair You can use a key to log in to the Master node in the cluster.
- safe
Mode number - MRS cluster running mode
0
: common mode. The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.1
: safe mode. The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management or job management functions of an MRS cluster and cannot view cluster resource usage or the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator. The request has thecluster_admin_secret
parameter only whensafe_mode
is set to1
. - subnet
Id string - Subnet ID Obtain the subnet ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the subnet ID from the list.
- vpc
Id string - ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
- add
Jobs MrsCluster V1Add Job[] - You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added.
- bootstrap
Scripts MrsCluster V1Bootstrap Script[] - Bootstrap action scripts. For details, see bootstrap_scripts block below.
- cluster
Admin stringSecret - Indicates the password of the MRS Manager
administrator. The password must contain
8
to32
characters. Must contain at least two types of the following: Lowercase letters, Uppercase letters, Digits, Special characters~!@#$%^&*()-_=+\|[{}];:'",<.>/?
and spaces. - cluster
Type number - Type of clusters
0
: analysis cluster,1
: streaming cluster The default value is0
. - core
Data numberVolume Count - Number of data disks of the Core node.
Value range:
1
to10
. - core
Data numberVolume Size - Data disk size of the Core node.
Value range:
100
GB to32000
GB. - core
Data stringVolume Type - Data disk storage type of the Core node,
supporting
SATA
,SAS
andSSD
.SATA
: Common I/O,SAS
: High I/O,SSD
: Ultra-high I/O. - log
Collection number - Indicates whether logs are collected when cluster
installation fails.
0
: not collected.1
: collected. The default value is0
. Iflog_collection
is set to1
, OBS buckets will be created to collect the MRS logs. These buckets will be charged. - master
Data numberVolume Count - Number of data disks of the Master node.
The value can be set to
1
only. - master
Data numberVolume Size - Data disk size of the Master node.
Value range:
100
GB to32000
GB. - master
Data stringVolume Type - Data disk storage type of the Master node,
supporting
SATA
,SAS
andSSD
.SATA
: Common I/O,SAS
: High I/O,SSD
: Ultra-high I/O. - mrs
Cluster stringV1Id - region string
- {[key: string]: string}
- Tags key/value pairs to associate with the cluster.
- timeouts
Mrs
Cluster V1Timeouts - volume
Size number - Data disk storage space of a Core node Users can
add disks to expand storage capacity when creating a cluster. There are the
following scenarios: Separation of data storage and computing: Data is stored
in the OBS system. Costs of clusters are relatively low but computing performance
is poor. The clusters can be deleted at any time. It is recommended when data
computing is not frequently performed. Integration of data storage and computing:
Data is stored in the HDFS system. Costs of clusters are relatively high but
computing performance is good. The clusters cannot be deleted in a short term.
It is recommended when data computing is frequently performed. Value range:
100
GB to32000
GB. - volume
Type string - Type of disks
SATA
,SAS
andSSD
are supported.SATA
: common I/O,SAS
: High I/O,SSD
: Ultra-high I/O.
- available_
zone_ strid - ID of an available zone. Obtain the value from Regions and Endpoints.
- billing_
type float - The value is
12
, indicating on-demand payment. - cluster_
name str - Cluster name, which is globally unique and contains
only
1
to64
letters, digits, hyphens (-), and underscores (_). - cluster_
version str - Version of the clusters Currently,
MRS 1.6.3
,MRS 1.7.0
,MRS 1.9.2
,MRS 2.1.0
,MRS 3.0.2
are supported. The latest version of MRS is used by default. - component_
lists Sequence[MrsCluster V1Component List Args] - Service component list.
- core_
node_ floatnum - Number of Core nodes Value range:
1
to500
. A maximum of500
Core nodes are supported by default. If more than500
Core nodes are required, contact technical support engineers or invoke background APIs to modify the database. - core_
node_ strsize - Instance specification of a Core node Configuration
method of this parameter is identical to that of
master_node_size
. - master_
node_ floatnum - Number of Master nodes.
- master_
node_ strsize - Best match based on several years of commissioning
experience. MRS supports specifications of hosts, and host specifications are
determined by CPUs, memory, and disks space. Master nodes support
h1.2xlarge.linux.mrs
h1.4xlarge.linux.mrs
,h1.8xlarge.linux.mrs
,s1.4xlarge.linux.mrs
, ands1.8xlarge.linux.mrs
. Core nodes of a streaming cluster support all specificationsc2.2xlarge.linux.mrs
,c2.4xlarge.linux.mrs
,s1.xlarge.linux.mrs
,s1.4xlarge.linux.mrs
,s1.8xlarge.linux.mrs
,d1.xlarge.linux.mrs
,d1.2xlarge.linux.mrs
,d1.4xlarge.linux.mrs
,d1.8xlarge.linux.mrs
,h1.2xlarge.linux.mrs
,h1.4xlarge.linux.mrs
andh1.8xlarge.linux.mrs
. Task nodes supportc2.2xlarge.linux.mrs
,c2.4xlarge.linux.mrs
,s1.xlarge.linux.mrs
,s1.4xlarge.linux.mrs
,s1.8xlarge.linux.mrs
,h1.2xlarge.linux.mrs
,h1.4xlarge.linux.mrs
, andh1.8xlarge.linux.mrs
. - node_
public_ strcert_ name - Name of a key pair You can use a key to log in to the Master node in the cluster.
- safe_
mode float - MRS cluster running mode
0
: common mode. The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.1
: safe mode. The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management or job management functions of an MRS cluster and cannot view cluster resource usage or the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator. The request has thecluster_admin_secret
parameter only whensafe_mode
is set to1
. - subnet_
id str - Subnet ID Obtain the subnet ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the subnet ID from the list.
- vpc_
id str - ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
- add_
jobs Sequence[MrsCluster V1Add Job Args] - You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added.
- bootstrap_
scripts Sequence[MrsCluster V1Bootstrap Script Args] - Bootstrap action scripts. For details, see bootstrap_scripts block below.
- cluster_
admin_ strsecret - Indicates the password of the MRS Manager
administrator. The password must contain
8
to32
characters. Must contain at least two types of the following: Lowercase letters, Uppercase letters, Digits, Special characters~!@#$%^&*()-_=+\|[{}];:'",<.>/?
and spaces. - cluster_
type float - Type of clusters
0
: analysis cluster,1
: streaming cluster The default value is0
. - core_
data_ floatvolume_ count - Number of data disks of the Core node.
Value range:
1
to10
. - core_
data_ floatvolume_ size - Data disk size of the Core node.
Value range:
100
GB to32000
GB. - core_
data_ strvolume_ type - Data disk storage type of the Core node,
supporting
SATA
,SAS
andSSD
.SATA
: Common I/O,SAS
: High I/O,SSD
: Ultra-high I/O. - log_
collection float - Indicates whether logs are collected when cluster
installation fails.
0
: not collected.1
: collected. The default value is0
. Iflog_collection
is set to1
, OBS buckets will be created to collect the MRS logs. These buckets will be charged. - master_
data_ floatvolume_ count - Number of data disks of the Master node.
The value can be set to
1
only. - master_
data_ floatvolume_ size - Data disk size of the Master node.
Value range:
100
GB to32000
GB. - master_
data_ strvolume_ type - Data disk storage type of the Master node,
supporting
SATA
,SAS
andSSD
.SATA
: Common I/O,SAS
: High I/O,SSD
: Ultra-high I/O. - mrs_
cluster_ strv1_ id - region str
- Mapping[str, str]
- Tags key/value pairs to associate with the cluster.
- timeouts
Mrs
Cluster V1Timeouts Args - volume_
size float - Data disk storage space of a Core node Users can
add disks to expand storage capacity when creating a cluster. There are the
following scenarios: Separation of data storage and computing: Data is stored
in the OBS system. Costs of clusters are relatively low but computing performance
is poor. The clusters can be deleted at any time. It is recommended when data
computing is not frequently performed. Integration of data storage and computing:
Data is stored in the HDFS system. Costs of clusters are relatively high but
computing performance is good. The clusters cannot be deleted in a short term.
It is recommended when data computing is frequently performed. Value range:
100
GB to32000
GB. - volume_
type str - Type of disks
SATA
,SAS
andSSD
are supported.SATA
: common I/O,SAS
: High I/O,SSD
: Ultra-high I/O.
- available
Zone StringId - ID of an available zone. Obtain the value from Regions and Endpoints.
- billing
Type Number - The value is
12
, indicating on-demand payment. - cluster
Name String - Cluster name, which is globally unique and contains
only
1
to64
letters, digits, hyphens (-), and underscores (_). - cluster
Version String - Version of the clusters Currently,
MRS 1.6.3
,MRS 1.7.0
,MRS 1.9.2
,MRS 2.1.0
,MRS 3.0.2
are supported. The latest version of MRS is used by default. - component
Lists List<Property Map> - Service component list.
- core
Node NumberNum - Number of Core nodes Value range:
1
to500
. A maximum of500
Core nodes are supported by default. If more than500
Core nodes are required, contact technical support engineers or invoke background APIs to modify the database. - core
Node StringSize - Instance specification of a Core node Configuration
method of this parameter is identical to that of
master_node_size
. - master
Node NumberNum - Number of Master nodes.
- master
Node StringSize - Best match based on several years of commissioning
experience. MRS supports specifications of hosts, and host specifications are
determined by CPUs, memory, and disks space. Master nodes support
h1.2xlarge.linux.mrs
h1.4xlarge.linux.mrs
,h1.8xlarge.linux.mrs
,s1.4xlarge.linux.mrs
, ands1.8xlarge.linux.mrs
. Core nodes of a streaming cluster support all specificationsc2.2xlarge.linux.mrs
,c2.4xlarge.linux.mrs
,s1.xlarge.linux.mrs
,s1.4xlarge.linux.mrs
,s1.8xlarge.linux.mrs
,d1.xlarge.linux.mrs
,d1.2xlarge.linux.mrs
,d1.4xlarge.linux.mrs
,d1.8xlarge.linux.mrs
,h1.2xlarge.linux.mrs
,h1.4xlarge.linux.mrs
andh1.8xlarge.linux.mrs
. Task nodes supportc2.2xlarge.linux.mrs
,c2.4xlarge.linux.mrs
,s1.xlarge.linux.mrs
,s1.4xlarge.linux.mrs
,s1.8xlarge.linux.mrs
,h1.2xlarge.linux.mrs
,h1.4xlarge.linux.mrs
, andh1.8xlarge.linux.mrs
. - node
Public StringCert Name - Name of a key pair You can use a key to log in to the Master node in the cluster.
- safe
Mode Number - MRS cluster running mode
0
: common mode. The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.1
: safe mode. The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management or job management functions of an MRS cluster and cannot view cluster resource usage or the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator. The request has thecluster_admin_secret
parameter only whensafe_mode
is set to1
. - subnet
Id String - Subnet ID Obtain the subnet ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the subnet ID from the list.
- vpc
Id String - ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
- add
Jobs List<Property Map> - You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added.
- bootstrap
Scripts List<Property Map> - Bootstrap action scripts. For details, see bootstrap_scripts block below.
- cluster
Admin StringSecret - Indicates the password of the MRS Manager
administrator. The password must contain
8
to32
characters. Must contain at least two types of the following: Lowercase letters, Uppercase letters, Digits, Special characters~!@#$%^&*()-_=+\|[{}];:'",<.>/?
and spaces. - cluster
Type Number - Type of clusters
0
: analysis cluster,1
: streaming cluster The default value is0
. - core
Data NumberVolume Count - Number of data disks of the Core node.
Value range:
1
to10
. - core
Data NumberVolume Size - Data disk size of the Core node.
Value range:
100
GB to32000
GB. - core
Data StringVolume Type - Data disk storage type of the Core node,
supporting
SATA
,SAS
andSSD
.SATA
: Common I/O,SAS
: High I/O,SSD
: Ultra-high I/O. - log
Collection Number - Indicates whether logs are collected when cluster
installation fails.
0
: not collected.1
: collected. The default value is0
. Iflog_collection
is set to1
, OBS buckets will be created to collect the MRS logs. These buckets will be charged. - master
Data NumberVolume Count - Number of data disks of the Master node.
The value can be set to
1
only. - master
Data NumberVolume Size - Data disk size of the Master node.
Value range:
100
GB to32000
GB. - master
Data StringVolume Type - Data disk storage type of the Master node,
supporting
SATA
,SAS
andSSD
.SATA
: Common I/O,SAS
: High I/O,SSD
: Ultra-high I/O. - mrs
Cluster StringV1Id - region String
- Map<String>
- Tags key/value pairs to associate with the cluster.
- timeouts Property Map
- volume
Size Number - Data disk storage space of a Core node Users can
add disks to expand storage capacity when creating a cluster. There are the
following scenarios: Separation of data storage and computing: Data is stored
in the OBS system. Costs of clusters are relatively low but computing performance
is poor. The clusters can be deleted at any time. It is recommended when data
computing is not frequently performed. Integration of data storage and computing:
Data is stored in the HDFS system. Costs of clusters are relatively high but
computing performance is good. The clusters cannot be deleted in a short term.
It is recommended when data computing is frequently performed. Value range:
100
GB to32000
GB. - volume
Type String - Type of disks
SATA
,SAS
andSSD
are supported.SATA
: common I/O,SAS
: High I/O,SSD
: Ultra-high I/O.
Outputs
All input properties are implicitly available as output properties. Additionally, the MrsClusterV1 resource produces the following output properties:
- Available
Zone stringName - Name of an availability zone.
- Charging
Start stringTime - Time when charging starts.
- Cluster
Id string - Cluster ID.
- Cluster
State string - Cluster status. Valid values include: existing history:
starting
,running
,terminated
,failed
,abnormal
,terminating
,rebooting
,shutdown
,frozen
,scaling-out
,scaling-in
,scaling-error
. - Core
Node stringProduct Id - Product ID of a Core node.
- Core
Node stringSpec Id - Specification ID of a Core node.
- Create
At string - Cluster creation time.
- Deployment
Id string - Deployment ID of a cluster.
- Error
Info string - Error information.
- External
Alternate stringIp - Backup external IP address.
- External
Ip string - External IP address.
- Fee string
- Cluster creation fee, which is automatically calculated.
- Hadoop
Version string - Hadoop version.
- Id string
- The provider-assigned unique ID for this managed resource.
- Instance
Id string - Instance ID.
- Internal
Ip string - Master
Node stringIp - IP address of a Master node.
- Master
Node stringProduct Id - Product ID of a Master node.
- Master
Node stringSpec Id - Specification ID of a Master node.
- Order
Id string - Order ID for creating clusters.
- Private
Ip stringFirst - Primary private IP address.
- Remark string
- Remarks of a cluster.
- Security
Groups stringId - Security group ID.
- Slave
Security stringGroups Id - Standby security group ID.
- Tenant
Id string - Project ID.
- Update
At string - Cluster update time.
- Vnc string
- URI address for remote login of the elastic cloud server.
- Available
Zone stringName - Name of an availability zone.
- Charging
Start stringTime - Time when charging starts.
- Cluster
Id string - Cluster ID.
- Cluster
State string - Cluster status. Valid values include: existing history:
starting
,running
,terminated
,failed
,abnormal
,terminating
,rebooting
,shutdown
,frozen
,scaling-out
,scaling-in
,scaling-error
. - Core
Node stringProduct Id - Product ID of a Core node.
- Core
Node stringSpec Id - Specification ID of a Core node.
- Create
At string - Cluster creation time.
- Deployment
Id string - Deployment ID of a cluster.
- Error
Info string - Error information.
- External
Alternate stringIp - Backup external IP address.
- External
Ip string - External IP address.
- Fee string
- Cluster creation fee, which is automatically calculated.
- Hadoop
Version string - Hadoop version.
- Id string
- The provider-assigned unique ID for this managed resource.
- Instance
Id string - Instance ID.
- Internal
Ip string - Master
Node stringIp - IP address of a Master node.
- Master
Node stringProduct Id - Product ID of a Master node.
- Master
Node stringSpec Id - Specification ID of a Master node.
- Order
Id string - Order ID for creating clusters.
- Private
Ip stringFirst - Primary private IP address.
- Remark string
- Remarks of a cluster.
- Security
Groups stringId - Security group ID.
- Slave
Security stringGroups Id - Standby security group ID.
- Tenant
Id string - Project ID.
- Update
At string - Cluster update time.
- Vnc string
- URI address for remote login of the elastic cloud server.
- available
Zone StringName - Name of an availability zone.
- charging
Start StringTime - Time when charging starts.
- cluster
Id String - Cluster ID.
- cluster
State String - Cluster status. Valid values include: existing history:
starting
,running
,terminated
,failed
,abnormal
,terminating
,rebooting
,shutdown
,frozen
,scaling-out
,scaling-in
,scaling-error
. - core
Node StringProduct Id - Product ID of a Core node.
- core
Node StringSpec Id - Specification ID of a Core node.
- create
At String - Cluster creation time.
- deployment
Id String - Deployment ID of a cluster.
- error
Info String - Error information.
- external
Alternate StringIp - Backup external IP address.
- external
Ip String - External IP address.
- fee String
- Cluster creation fee, which is automatically calculated.
- hadoop
Version String - Hadoop version.
- id String
- The provider-assigned unique ID for this managed resource.
- instance
Id String - Instance ID.
- internal
Ip String - master
Node StringIp - IP address of a Master node.
- master
Node StringProduct Id - Product ID of a Master node.
- master
Node StringSpec Id - Specification ID of a Master node.
- order
Id String - Order ID for creating clusters.
- private
Ip StringFirst - Primary private IP address.
- remark String
- Remarks of a cluster.
- security
Groups StringId - Security group ID.
- slave
Security StringGroups Id - Standby security group ID.
- tenant
Id String - Project ID.
- update
At String - Cluster update time.
- vnc String
- URI address for remote login of the elastic cloud server.
- available
Zone stringName - Name of an availability zone.
- charging
Start stringTime - Time when charging starts.
- cluster
Id string - Cluster ID.
- cluster
State string - Cluster status. Valid values include: existing history:
starting
,running
,terminated
,failed
,abnormal
,terminating
,rebooting
,shutdown
,frozen
,scaling-out
,scaling-in
,scaling-error
. - core
Node stringProduct Id - Product ID of a Core node.
- core
Node stringSpec Id - Specification ID of a Core node.
- create
At string - Cluster creation time.
- deployment
Id string - Deployment ID of a cluster.
- error
Info string - Error information.
- external
Alternate stringIp - Backup external IP address.
- external
Ip string - External IP address.
- fee string
- Cluster creation fee, which is automatically calculated.
- hadoop
Version string - Hadoop version.
- id string
- The provider-assigned unique ID for this managed resource.
- instance
Id string - Instance ID.
- internal
Ip string - master
Node stringIp - IP address of a Master node.
- master
Node stringProduct Id - Product ID of a Master node.
- master
Node stringSpec Id - Specification ID of a Master node.
- order
Id string - Order ID for creating clusters.
- private
Ip stringFirst - Primary private IP address.
- remark string
- Remarks of a cluster.
- security
Groups stringId - Security group ID.
- slave
Security stringGroups Id - Standby security group ID.
- tenant
Id string - Project ID.
- update
At string - Cluster update time.
- vnc string
- URI address for remote login of the elastic cloud server.
- available_
zone_ strname - Name of an availability zone.
- charging_
start_ strtime - Time when charging starts.
- cluster_
id str - Cluster ID.
- cluster_
state str - Cluster status. Valid values include: existing history:
starting
,running
,terminated
,failed
,abnormal
,terminating
,rebooting
,shutdown
,frozen
,scaling-out
,scaling-in
,scaling-error
. - core_
node_ strproduct_ id - Product ID of a Core node.
- core_
node_ strspec_ id - Specification ID of a Core node.
- create_
at str - Cluster creation time.
- deployment_
id str - Deployment ID of a cluster.
- error_
info str - Error information.
- external_
alternate_ strip - Backup external IP address.
- external_
ip str - External IP address.
- fee str
- Cluster creation fee, which is automatically calculated.
- hadoop_
version str - Hadoop version.
- id str
- The provider-assigned unique ID for this managed resource.
- instance_
id str - Instance ID.
- internal_
ip str - master_
node_ strip - IP address of a Master node.
- master_
node_ strproduct_ id - Product ID of a Master node.
- master_
node_ strspec_ id - Specification ID of a Master node.
- order_
id str - Order ID for creating clusters.
- private_
ip_ strfirst - Primary private IP address.
- remark str
- Remarks of a cluster.
- security_
groups_ strid - Security group ID.
- slave_
security_ strgroups_ id - Standby security group ID.
- tenant_
id str - Project ID.
- update_
at str - Cluster update time.
- vnc str
- URI address for remote login of the elastic cloud server.
- available
Zone StringName - Name of an availability zone.
- charging
Start StringTime - Time when charging starts.
- cluster
Id String - Cluster ID.
- cluster
State String - Cluster status. Valid values include: existing history:
starting
,running
,terminated
,failed
,abnormal
,terminating
,rebooting
,shutdown
,frozen
,scaling-out
,scaling-in
,scaling-error
. - core
Node StringProduct Id - Product ID of a Core node.
- core
Node StringSpec Id - Specification ID of a Core node.
- create
At String - Cluster creation time.
- deployment
Id String - Deployment ID of a cluster.
- error
Info String - Error information.
- external
Alternate StringIp - Backup external IP address.
- external
Ip String - External IP address.
- fee String
- Cluster creation fee, which is automatically calculated.
- hadoop
Version String - Hadoop version.
- id String
- The provider-assigned unique ID for this managed resource.
- instance
Id String - Instance ID.
- internal
Ip String - master
Node StringIp - IP address of a Master node.
- master
Node StringProduct Id - Product ID of a Master node.
- master
Node StringSpec Id - Specification ID of a Master node.
- order
Id String - Order ID for creating clusters.
- private
Ip StringFirst - Primary private IP address.
- remark String
- Remarks of a cluster.
- security
Groups StringId - Security group ID.
- slave
Security StringGroups Id - Standby security group ID.
- tenant
Id String - Project ID.
- update
At String - Cluster update time.
- vnc String
- URI address for remote login of the elastic cloud server.
Look up Existing MrsClusterV1 Resource
Get an existing MrsClusterV1 resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: MrsClusterV1State, opts?: CustomResourceOptions): MrsClusterV1
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
add_jobs: Optional[Sequence[MrsClusterV1AddJobArgs]] = None,
available_zone_id: Optional[str] = None,
available_zone_name: Optional[str] = None,
billing_type: Optional[float] = None,
bootstrap_scripts: Optional[Sequence[MrsClusterV1BootstrapScriptArgs]] = None,
charging_start_time: Optional[str] = None,
cluster_admin_secret: Optional[str] = None,
cluster_id: Optional[str] = None,
cluster_name: Optional[str] = None,
cluster_state: Optional[str] = None,
cluster_type: Optional[float] = None,
cluster_version: Optional[str] = None,
component_lists: Optional[Sequence[MrsClusterV1ComponentListArgs]] = None,
core_data_volume_count: Optional[float] = None,
core_data_volume_size: Optional[float] = None,
core_data_volume_type: Optional[str] = None,
core_node_num: Optional[float] = None,
core_node_product_id: Optional[str] = None,
core_node_size: Optional[str] = None,
core_node_spec_id: Optional[str] = None,
create_at: Optional[str] = None,
deployment_id: Optional[str] = None,
error_info: Optional[str] = None,
external_alternate_ip: Optional[str] = None,
external_ip: Optional[str] = None,
fee: Optional[str] = None,
hadoop_version: Optional[str] = None,
instance_id: Optional[str] = None,
internal_ip: Optional[str] = None,
log_collection: Optional[float] = None,
master_data_volume_count: Optional[float] = None,
master_data_volume_size: Optional[float] = None,
master_data_volume_type: Optional[str] = None,
master_node_ip: Optional[str] = None,
master_node_num: Optional[float] = None,
master_node_product_id: Optional[str] = None,
master_node_size: Optional[str] = None,
master_node_spec_id: Optional[str] = None,
mrs_cluster_v1_id: Optional[str] = None,
node_public_cert_name: Optional[str] = None,
order_id: Optional[str] = None,
private_ip_first: Optional[str] = None,
region: Optional[str] = None,
remark: Optional[str] = None,
safe_mode: Optional[float] = None,
security_groups_id: Optional[str] = None,
slave_security_groups_id: Optional[str] = None,
subnet_id: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
tenant_id: Optional[str] = None,
timeouts: Optional[MrsClusterV1TimeoutsArgs] = None,
update_at: Optional[str] = None,
vnc: Optional[str] = None,
volume_size: Optional[float] = None,
volume_type: Optional[str] = None,
vpc_id: Optional[str] = None) -> MrsClusterV1
func GetMrsClusterV1(ctx *Context, name string, id IDInput, state *MrsClusterV1State, opts ...ResourceOption) (*MrsClusterV1, error)
public static MrsClusterV1 Get(string name, Input<string> id, MrsClusterV1State? state, CustomResourceOptions? opts = null)
public static MrsClusterV1 get(String name, Output<String> id, MrsClusterV1State state, CustomResourceOptions options)
resources: _: type: opentelekomcloud:MrsClusterV1 get: id: ${id}
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Add
Jobs List<MrsCluster V1Add Job> - You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added.
- Available
Zone stringId - ID of an available zone. Obtain the value from Regions and Endpoints.
- Available
Zone stringName - Name of an availability zone.
- Billing
Type double - The value is
12
, indicating on-demand payment. - Bootstrap
Scripts List<MrsCluster V1Bootstrap Script> - Bootstrap action scripts. For details, see bootstrap_scripts block below.
- Charging
Start stringTime - Time when charging starts.
- Cluster
Admin stringSecret - Indicates the password of the MRS Manager
administrator. The password must contain
8
to32
characters. Must contain at least two types of the following: Lowercase letters, Uppercase letters, Digits, Special characters~!@#$%^&*()-_=+\|[{}];:'",<.>/?
and spaces. - Cluster
Id string - Cluster ID.
- Cluster
Name string - Cluster name, which is globally unique and contains
only
1
to64
letters, digits, hyphens (-), and underscores (_). - Cluster
State string - Cluster status. Valid values include: existing history:
starting
,running
,terminated
,failed
,abnormal
,terminating
,rebooting
,shutdown
,frozen
,scaling-out
,scaling-in
,scaling-error
. - Cluster
Type double - Type of clusters
0
: analysis cluster,1
: streaming cluster The default value is0
. - Cluster
Version string - Version of the clusters Currently,
MRS 1.6.3
,MRS 1.7.0
,MRS 1.9.2
,MRS 2.1.0
,MRS 3.0.2
are supported. The latest version of MRS is used by default. - Component
Lists List<MrsCluster V1Component List> - Service component list.
- Core
Data doubleVolume Count - Number of data disks of the Core node.
Value range:
1
to10
. - Core
Data doubleVolume Size - Data disk size of the Core node.
Value range:
100
GB to32000
GB. - Core
Data stringVolume Type - Data disk storage type of the Core node,
supporting
SATA
,SAS
andSSD
.SATA
: Common I/O,SAS
: High I/O,SSD
: Ultra-high I/O. - Core
Node doubleNum - Number of Core nodes Value range:
1
to500
. A maximum of500
Core nodes are supported by default. If more than500
Core nodes are required, contact technical support engineers or invoke background APIs to modify the database. - Core
Node stringProduct Id - Product ID of a Core node.
- Core
Node stringSize - Instance specification of a Core node Configuration
method of this parameter is identical to that of
master_node_size
. - Core
Node stringSpec Id - Specification ID of a Core node.
- Create
At string - Cluster creation time.
- Deployment
Id string - Deployment ID of a cluster.
- Error
Info string - Error information.
- External
Alternate stringIp - Backup external IP address.
- External
Ip string - External IP address.
- Fee string
- Cluster creation fee, which is automatically calculated.
- Hadoop
Version string - Hadoop version.
- Instance
Id string - Instance ID.
- Internal
Ip string - Log
Collection double - Indicates whether logs are collected when cluster
installation fails.
0
: not collected.1
: collected. The default value is0
. Iflog_collection
is set to1
, OBS buckets will be created to collect the MRS logs. These buckets will be charged. - Master
Data doubleVolume Count - Number of data disks of the Master node.
The value can be set to
1
only. - Master
Data doubleVolume Size - Data disk size of the Master node.
Value range:
100
GB to32000
GB. - Master
Data stringVolume Type - Data disk storage type of the Master node,
supporting
SATA
,SAS
andSSD
.SATA
: Common I/O,SAS
: High I/O,SSD
: Ultra-high I/O. - Master
Node stringIp - IP address of a Master node.
- Master
Node doubleNum - Number of Master nodes.
- Master
Node stringProduct Id - Product ID of a Master node.
- Master
Node stringSize - Best match based on several years of commissioning
experience. MRS supports specifications of hosts, and host specifications are
determined by CPUs, memory, and disks space. Master nodes support
h1.2xlarge.linux.mrs
h1.4xlarge.linux.mrs
,h1.8xlarge.linux.mrs
,s1.4xlarge.linux.mrs
, ands1.8xlarge.linux.mrs
. Core nodes of a streaming cluster support all specificationsc2.2xlarge.linux.mrs
,c2.4xlarge.linux.mrs
,s1.xlarge.linux.mrs
,s1.4xlarge.linux.mrs
,s1.8xlarge.linux.mrs
,d1.xlarge.linux.mrs
,d1.2xlarge.linux.mrs
,d1.4xlarge.linux.mrs
,d1.8xlarge.linux.mrs
,h1.2xlarge.linux.mrs
,h1.4xlarge.linux.mrs
andh1.8xlarge.linux.mrs
. Task nodes supportc2.2xlarge.linux.mrs
,c2.4xlarge.linux.mrs
,s1.xlarge.linux.mrs
,s1.4xlarge.linux.mrs
,s1.8xlarge.linux.mrs
,h1.2xlarge.linux.mrs
,h1.4xlarge.linux.mrs
, andh1.8xlarge.linux.mrs
. - Master
Node stringSpec Id - Specification ID of a Master node.
- Mrs
Cluster stringV1Id - Node
Public stringCert Name - Name of a key pair You can use a key to log in to the Master node in the cluster.
- Order
Id string - Order ID for creating clusters.
- Private
Ip stringFirst - Primary private IP address.
- Region string
- Remark string
- Remarks of a cluster.
- Safe
Mode double - MRS cluster running mode
0
: common mode. The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.1
: safe mode. The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management or job management functions of an MRS cluster and cannot view cluster resource usage or the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator. The request has thecluster_admin_secret
parameter only whensafe_mode
is set to1
. - Security
Groups stringId - Security group ID.
- Slave
Security stringGroups Id - Standby security group ID.
- Subnet
Id string - Subnet ID Obtain the subnet ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the subnet ID from the list.
- Dictionary<string, string>
- Tags key/value pairs to associate with the cluster.
- Tenant
Id string - Project ID.
- Timeouts
Mrs
Cluster V1Timeouts - Update
At string - Cluster update time.
- Vnc string
- URI address for remote login of the elastic cloud server.
- Volume
Size double - Data disk storage space of a Core node Users can
add disks to expand storage capacity when creating a cluster. There are the
following scenarios: Separation of data storage and computing: Data is stored
in the OBS system. Costs of clusters are relatively low but computing performance
is poor. The clusters can be deleted at any time. It is recommended when data
computing is not frequently performed. Integration of data storage and computing:
Data is stored in the HDFS system. Costs of clusters are relatively high but
computing performance is good. The clusters cannot be deleted in a short term.
It is recommended when data computing is frequently performed. Value range:
100
GB to32000
GB. - Volume
Type string - Type of disks
SATA
,SAS
andSSD
are supported.SATA
: common I/O,SAS
: High I/O,SSD
: Ultra-high I/O. - Vpc
Id string - ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
- Add
Jobs []MrsCluster V1Add Job Args - You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added.
- Available
Zone stringId - ID of an available zone. Obtain the value from Regions and Endpoints.
- Available
Zone stringName - Name of an availability zone.
- Billing
Type float64 - The value is
12
, indicating on-demand payment. - Bootstrap
Scripts []MrsCluster V1Bootstrap Script Args - Bootstrap action scripts. For details, see bootstrap_scripts block below.
- Charging
Start stringTime - Time when charging starts.
- Cluster
Admin stringSecret - Indicates the password of the MRS Manager
administrator. The password must contain
8
to32
characters. Must contain at least two types of the following: Lowercase letters, Uppercase letters, Digits, Special characters~!@#$%^&*()-_=+\|[{}];:'",<.>/?
and spaces. - Cluster
Id string - Cluster ID.
- Cluster
Name string - Cluster name, which is globally unique and contains
only
1
to64
letters, digits, hyphens (-), and underscores (_). - Cluster
State string - Cluster status. Valid values include: existing history:
starting
,running
,terminated
,failed
,abnormal
,terminating
,rebooting
,shutdown
,frozen
,scaling-out
,scaling-in
,scaling-error
. - Cluster
Type float64 - Type of clusters
0
: analysis cluster,1
: streaming cluster The default value is0
. - Cluster
Version string - Version of the clusters Currently,
MRS 1.6.3
,MRS 1.7.0
,MRS 1.9.2
,MRS 2.1.0
,MRS 3.0.2
are supported. The latest version of MRS is used by default. - Component
Lists []MrsCluster V1Component List Args - Service component list.
- Core
Data float64Volume Count - Number of data disks of the Core node.
Value range:
1
to10
. - Core
Data float64Volume Size - Data disk size of the Core node.
Value range:
100
GB to32000
GB. - Core
Data stringVolume Type - Data disk storage type of the Core node,
supporting
SATA
,SAS
andSSD
.SATA
: Common I/O,SAS
: High I/O,SSD
: Ultra-high I/O. - Core
Node float64Num - Number of Core nodes Value range:
1
to500
. A maximum of500
Core nodes are supported by default. If more than500
Core nodes are required, contact technical support engineers or invoke background APIs to modify the database. - Core
Node stringProduct Id - Product ID of a Core node.
- Core
Node stringSize - Instance specification of a Core node Configuration
method of this parameter is identical to that of
master_node_size
. - Core
Node stringSpec Id - Specification ID of a Core node.
- Create
At string - Cluster creation time.
- Deployment
Id string - Deployment ID of a cluster.
- Error
Info string - Error information.
- External
Alternate stringIp - Backup external IP address.
- External
Ip string - External IP address.
- Fee string
- Cluster creation fee, which is automatically calculated.
- Hadoop
Version string - Hadoop version.
- Instance
Id string - Instance ID.
- Internal
Ip string - Log
Collection float64 - Indicates whether logs are collected when cluster
installation fails.
0
: not collected.1
: collected. The default value is0
. Iflog_collection
is set to1
, OBS buckets will be created to collect the MRS logs. These buckets will be charged. - Master
Data float64Volume Count - Number of data disks of the Master node.
The value can be set to
1
only. - Master
Data float64Volume Size - Data disk size of the Master node.
Value range:
100
GB to32000
GB. - Master
Data stringVolume Type - Data disk storage type of the Master node,
supporting
SATA
,SAS
andSSD
.SATA
: Common I/O,SAS
: High I/O,SSD
: Ultra-high I/O. - Master
Node stringIp - IP address of a Master node.
- Master
Node float64Num - Number of Master nodes.
- Master
Node stringProduct Id - Product ID of a Master node.
- Master
Node stringSize - Best match based on several years of commissioning
experience. MRS supports specifications of hosts, and host specifications are
determined by CPUs, memory, and disks space. Master nodes support
h1.2xlarge.linux.mrs
h1.4xlarge.linux.mrs
,h1.8xlarge.linux.mrs
,s1.4xlarge.linux.mrs
, ands1.8xlarge.linux.mrs
. Core nodes of a streaming cluster support all specificationsc2.2xlarge.linux.mrs
,c2.4xlarge.linux.mrs
,s1.xlarge.linux.mrs
,s1.4xlarge.linux.mrs
,s1.8xlarge.linux.mrs
,d1.xlarge.linux.mrs
,d1.2xlarge.linux.mrs
,d1.4xlarge.linux.mrs
,d1.8xlarge.linux.mrs
,h1.2xlarge.linux.mrs
,h1.4xlarge.linux.mrs
andh1.8xlarge.linux.mrs
. Task nodes supportc2.2xlarge.linux.mrs
,c2.4xlarge.linux.mrs
,s1.xlarge.linux.mrs
,s1.4xlarge.linux.mrs
,s1.8xlarge.linux.mrs
,h1.2xlarge.linux.mrs
,h1.4xlarge.linux.mrs
, andh1.8xlarge.linux.mrs
. - Master
Node stringSpec Id - Specification ID of a Master node.
- Mrs
Cluster stringV1Id - Node
Public stringCert Name - Name of a key pair You can use a key to log in to the Master node in the cluster.
- Order
Id string - Order ID for creating clusters.
- Private
Ip stringFirst - Primary private IP address.
- Region string
- Remark string
- Remarks of a cluster.
- Safe
Mode float64 - MRS cluster running mode
0
: common mode. The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.1
: safe mode. The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management or job management functions of an MRS cluster and cannot view cluster resource usage or the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator. The request has thecluster_admin_secret
parameter only whensafe_mode
is set to1
. - Security
Groups stringId - Security group ID.
- Slave
Security stringGroups Id - Standby security group ID.
- Subnet
Id string - Subnet ID Obtain the subnet ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the subnet ID from the list.
- map[string]string
- Tags key/value pairs to associate with the cluster.
- Tenant
Id string - Project ID.
- Timeouts
Mrs
Cluster V1Timeouts Args - Update
At string - Cluster update time.
- Vnc string
- URI address for remote login of the elastic cloud server.
- Volume
Size float64 - Data disk storage space of a Core node Users can
add disks to expand storage capacity when creating a cluster. There are the
following scenarios: Separation of data storage and computing: Data is stored
in the OBS system. Costs of clusters are relatively low but computing performance
is poor. The clusters can be deleted at any time. It is recommended when data
computing is not frequently performed. Integration of data storage and computing:
Data is stored in the HDFS system. Costs of clusters are relatively high but
computing performance is good. The clusters cannot be deleted in a short term.
It is recommended when data computing is frequently performed. Value range:
100
GB to32000
GB. - Volume
Type string - Type of disks
SATA
,SAS
andSSD
are supported.SATA
: common I/O,SAS
: High I/O,SSD
: Ultra-high I/O. - Vpc
Id string - ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
- add
Jobs List<MrsCluster V1Add Job> - You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added.
- available
Zone StringId - ID of an available zone. Obtain the value from Regions and Endpoints.
- available
Zone StringName - Name of an availability zone.
- billing
Type Double - The value is
12
, indicating on-demand payment. - bootstrap
Scripts List<MrsCluster V1Bootstrap Script> - Bootstrap action scripts. For details, see bootstrap_scripts block below.
- charging
Start StringTime - Time when charging starts.
- cluster
Admin StringSecret - Indicates the password of the MRS Manager
administrator. The password must contain
8
to32
characters. Must contain at least two types of the following: Lowercase letters, Uppercase letters, Digits, Special characters~!@#$%^&*()-_=+\|[{}];:'",<.>/?
and spaces. - cluster
Id String - Cluster ID.
- cluster
Name String - Cluster name, which is globally unique and contains
only
1
to64
letters, digits, hyphens (-), and underscores (_). - cluster
State String - Cluster status. Valid values include: existing history:
starting
,running
,terminated
,failed
,abnormal
,terminating
,rebooting
,shutdown
,frozen
,scaling-out
,scaling-in
,scaling-error
. - cluster
Type Double - Type of clusters
0
: analysis cluster,1
: streaming cluster The default value is0
. - cluster
Version String - Version of the clusters Currently,
MRS 1.6.3
,MRS 1.7.0
,MRS 1.9.2
,MRS 2.1.0
,MRS 3.0.2
are supported. The latest version of MRS is used by default. - component
Lists List<MrsCluster V1Component List> - Service component list.
- core
Data DoubleVolume Count - Number of data disks of the Core node.
Value range:
1
to10
. - core
Data DoubleVolume Size - Data disk size of the Core node.
Value range:
100
GB to32000
GB. - core
Data StringVolume Type - Data disk storage type of the Core node,
supporting
SATA
,SAS
andSSD
.SATA
: Common I/O,SAS
: High I/O,SSD
: Ultra-high I/O. - core
Node DoubleNum - Number of Core nodes Value range:
1
to500
. A maximum of500
Core nodes are supported by default. If more than500
Core nodes are required, contact technical support engineers or invoke background APIs to modify the database. - core
Node StringProduct Id - Product ID of a Core node.
- core
Node StringSize - Instance specification of a Core node Configuration
method of this parameter is identical to that of
master_node_size
. - core
Node StringSpec Id - Specification ID of a Core node.
- create
At String - Cluster creation time.
- deployment
Id String - Deployment ID of a cluster.
- error
Info String - Error information.
- external
Alternate StringIp - Backup external IP address.
- external
Ip String - External IP address.
- fee String
- Cluster creation fee, which is automatically calculated.
- hadoop
Version String - Hadoop version.
- instance
Id String - Instance ID.
- internal
Ip String - log
Collection Double - Indicates whether logs are collected when cluster
installation fails.
0
: not collected.1
: collected. The default value is0
. Iflog_collection
is set to1
, OBS buckets will be created to collect the MRS logs. These buckets will be charged. - master
Data DoubleVolume Count - Number of data disks of the Master node.
The value can be set to
1
only. - master
Data DoubleVolume Size - Data disk size of the Master node.
Value range:
100
GB to32000
GB. - master
Data StringVolume Type - Data disk storage type of the Master node,
supporting
SATA
,SAS
andSSD
.SATA
: Common I/O,SAS
: High I/O,SSD
: Ultra-high I/O. - master
Node StringIp - IP address of a Master node.
- master
Node DoubleNum - Number of Master nodes.
- master
Node StringProduct Id - Product ID of a Master node.
- master
Node StringSize - Best match based on several years of commissioning
experience. MRS supports specifications of hosts, and host specifications are
determined by CPUs, memory, and disks space. Master nodes support
h1.2xlarge.linux.mrs
h1.4xlarge.linux.mrs
,h1.8xlarge.linux.mrs
,s1.4xlarge.linux.mrs
, ands1.8xlarge.linux.mrs
. Core nodes of a streaming cluster support all specificationsc2.2xlarge.linux.mrs
,c2.4xlarge.linux.mrs
,s1.xlarge.linux.mrs
,s1.4xlarge.linux.mrs
,s1.8xlarge.linux.mrs
,d1.xlarge.linux.mrs
,d1.2xlarge.linux.mrs
,d1.4xlarge.linux.mrs
,d1.8xlarge.linux.mrs
,h1.2xlarge.linux.mrs
,h1.4xlarge.linux.mrs
andh1.8xlarge.linux.mrs
. Task nodes supportc2.2xlarge.linux.mrs
,c2.4xlarge.linux.mrs
,s1.xlarge.linux.mrs
,s1.4xlarge.linux.mrs
,s1.8xlarge.linux.mrs
,h1.2xlarge.linux.mrs
,h1.4xlarge.linux.mrs
, andh1.8xlarge.linux.mrs
. - master
Node StringSpec Id - Specification ID of a Master node.
- mrs
Cluster StringV1Id - node
Public StringCert Name - Name of a key pair You can use a key to log in to the Master node in the cluster.
- order
Id String - Order ID for creating clusters.
- private
Ip StringFirst - Primary private IP address.
- region String
- remark String
- Remarks of a cluster.
- safe
Mode Double - MRS cluster running mode
0
: common mode. The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.1
: safe mode. The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management or job management functions of an MRS cluster and cannot view cluster resource usage or the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator. The request has thecluster_admin_secret
parameter only whensafe_mode
is set to1
. - security
Groups StringId - Security group ID.
- slave
Security StringGroups Id - Standby security group ID.
- subnet
Id String - Subnet ID Obtain the subnet ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the subnet ID from the list.
- Map<String,String>
- Tags key/value pairs to associate with the cluster.
- tenant
Id String - Project ID.
- timeouts
Mrs
Cluster V1Timeouts - update
At String - Cluster update time.
- vnc String
- URI address for remote login of the elastic cloud server.
- volume
Size Double - Data disk storage space of a Core node Users can
add disks to expand storage capacity when creating a cluster. There are the
following scenarios: Separation of data storage and computing: Data is stored
in the OBS system. Costs of clusters are relatively low but computing performance
is poor. The clusters can be deleted at any time. It is recommended when data
computing is not frequently performed. Integration of data storage and computing:
Data is stored in the HDFS system. Costs of clusters are relatively high but
computing performance is good. The clusters cannot be deleted in a short term.
It is recommended when data computing is frequently performed. Value range:
100
GB to32000
GB. - volume
Type String - Type of disks
SATA
,SAS
andSSD
are supported.SATA
: common I/O,SAS
: High I/O,SSD
: Ultra-high I/O. - vpc
Id String - ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
- add
Jobs MrsCluster V1Add Job[] - You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added.
- available
Zone stringId - ID of an available zone. Obtain the value from Regions and Endpoints.
- available
Zone stringName - Name of an availability zone.
- billing
Type number - The value is
12
, indicating on-demand payment. - bootstrap
Scripts MrsCluster V1Bootstrap Script[] - Bootstrap action scripts. For details, see bootstrap_scripts block below.
- charging
Start stringTime - Time when charging starts.
- cluster
Admin stringSecret - Indicates the password of the MRS Manager
administrator. The password must contain
8
to32
characters. Must contain at least two types of the following: Lowercase letters, Uppercase letters, Digits, Special characters~!@#$%^&*()-_=+\|[{}];:'",<.>/?
and spaces. - cluster
Id string - Cluster ID.
- cluster
Name string - Cluster name, which is globally unique and contains
only
1
to64
letters, digits, hyphens (-), and underscores (_). - cluster
State string - Cluster status. Valid values include: existing history:
starting
,running
,terminated
,failed
,abnormal
,terminating
,rebooting
,shutdown
,frozen
,scaling-out
,scaling-in
,scaling-error
. - cluster
Type number - Type of clusters
0
: analysis cluster,1
: streaming cluster The default value is0
. - cluster
Version string - Version of the clusters Currently,
MRS 1.6.3
,MRS 1.7.0
,MRS 1.9.2
,MRS 2.1.0
,MRS 3.0.2
are supported. The latest version of MRS is used by default. - component
Lists MrsCluster V1Component List[] - Service component list.
- core
Data numberVolume Count - Number of data disks of the Core node.
Value range:
1
to10
. - core
Data numberVolume Size - Data disk size of the Core node.
Value range:
100
GB to32000
GB. - core
Data stringVolume Type - Data disk storage type of the Core node,
supporting
SATA
,SAS
andSSD
.SATA
: Common I/O,SAS
: High I/O,SSD
: Ultra-high I/O. - core
Node numberNum - Number of Core nodes Value range:
1
to500
. A maximum of500
Core nodes are supported by default. If more than500
Core nodes are required, contact technical support engineers or invoke background APIs to modify the database. - core
Node stringProduct Id - Product ID of a Core node.
- core
Node stringSize - Instance specification of a Core node Configuration
method of this parameter is identical to that of
master_node_size
. - core
Node stringSpec Id - Specification ID of a Core node.
- create
At string - Cluster creation time.
- deployment
Id string - Deployment ID of a cluster.
- error
Info string - Error information.
- external
Alternate stringIp - Backup external IP address.
- external
Ip string - External IP address.
- fee string
- Cluster creation fee, which is automatically calculated.
- hadoop
Version string - Hadoop version.
- instance
Id string - Instance ID.
- internal
Ip string - log
Collection number - Indicates whether logs are collected when cluster
installation fails.
0
: not collected.1
: collected. The default value is0
. Iflog_collection
is set to1
, OBS buckets will be created to collect the MRS logs. These buckets will be charged. - master
Data numberVolume Count - Number of data disks of the Master node.
The value can be set to
1
only. - master
Data numberVolume Size - Data disk size of the Master node.
Value range:
100
GB to32000
GB. - master
Data stringVolume Type - Data disk storage type of the Master node,
supporting
SATA
,SAS
andSSD
.SATA
: Common I/O,SAS
: High I/O,SSD
: Ultra-high I/O. - master
Node stringIp - IP address of a Master node.
- master
Node numberNum - Number of Master nodes.
- master
Node stringProduct Id - Product ID of a Master node.
- master
Node stringSize - Best match based on several years of commissioning
experience. MRS supports specifications of hosts, and host specifications are
determined by CPUs, memory, and disks space. Master nodes support
h1.2xlarge.linux.mrs
h1.4xlarge.linux.mrs
,h1.8xlarge.linux.mrs
,s1.4xlarge.linux.mrs
, ands1.8xlarge.linux.mrs
. Core nodes of a streaming cluster support all specificationsc2.2xlarge.linux.mrs
,c2.4xlarge.linux.mrs
,s1.xlarge.linux.mrs
,s1.4xlarge.linux.mrs
,s1.8xlarge.linux.mrs
,d1.xlarge.linux.mrs
,d1.2xlarge.linux.mrs
,d1.4xlarge.linux.mrs
,d1.8xlarge.linux.mrs
,h1.2xlarge.linux.mrs
,h1.4xlarge.linux.mrs
andh1.8xlarge.linux.mrs
. Task nodes supportc2.2xlarge.linux.mrs
,c2.4xlarge.linux.mrs
,s1.xlarge.linux.mrs
,s1.4xlarge.linux.mrs
,s1.8xlarge.linux.mrs
,h1.2xlarge.linux.mrs
,h1.4xlarge.linux.mrs
, andh1.8xlarge.linux.mrs
. - master
Node stringSpec Id - Specification ID of a Master node.
- mrs
Cluster stringV1Id - node
Public stringCert Name - Name of a key pair You can use a key to log in to the Master node in the cluster.
- order
Id string - Order ID for creating clusters.
- private
Ip stringFirst - Primary private IP address.
- region string
- remark string
- Remarks of a cluster.
- safe
Mode number - MRS cluster running mode
0
: common mode. The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.1
: safe mode. The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management or job management functions of an MRS cluster and cannot view cluster resource usage or the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator. The request has thecluster_admin_secret
parameter only whensafe_mode
is set to1
. - security
Groups stringId - Security group ID.
- slave
Security stringGroups Id - Standby security group ID.
- subnet
Id string - Subnet ID Obtain the subnet ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the subnet ID from the list.
- {[key: string]: string}
- Tags key/value pairs to associate with the cluster.
- tenant
Id string - Project ID.
- timeouts
Mrs
Cluster V1Timeouts - update
At string - Cluster update time.
- vnc string
- URI address for remote login of the elastic cloud server.
- volume
Size number - Data disk storage space of a Core node Users can
add disks to expand storage capacity when creating a cluster. There are the
following scenarios: Separation of data storage and computing: Data is stored
in the OBS system. Costs of clusters are relatively low but computing performance
is poor. The clusters can be deleted at any time. It is recommended when data
computing is not frequently performed. Integration of data storage and computing:
Data is stored in the HDFS system. Costs of clusters are relatively high but
computing performance is good. The clusters cannot be deleted in a short term.
It is recommended when data computing is frequently performed. Value range:
100
GB to32000
GB. - volume
Type string - Type of disks
SATA
,SAS
andSSD
are supported.SATA
: common I/O,SAS
: High I/O,SSD
: Ultra-high I/O. - vpc
Id string - ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
- add_
jobs Sequence[MrsCluster V1Add Job Args] - You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added.
- available_
zone_ strid - ID of an available zone. Obtain the value from Regions and Endpoints.
- available_
zone_ strname - Name of an availability zone.
- billing_
type float - The value is
12
, indicating on-demand payment. - bootstrap_
scripts Sequence[MrsCluster V1Bootstrap Script Args] - Bootstrap action scripts. For details, see bootstrap_scripts block below.
- charging_
start_ strtime - Time when charging starts.
- cluster_
admin_ strsecret - Indicates the password of the MRS Manager
administrator. The password must contain
8
to32
characters. Must contain at least two types of the following: Lowercase letters, Uppercase letters, Digits, Special characters~!@#$%^&*()-_=+\|[{}];:'",<.>/?
and spaces. - cluster_
id str - Cluster ID.
- cluster_
name str - Cluster name, which is globally unique and contains
only
1
to64
letters, digits, hyphens (-), and underscores (_). - cluster_
state str - Cluster status. Valid values include: existing history:
starting
,running
,terminated
,failed
,abnormal
,terminating
,rebooting
,shutdown
,frozen
,scaling-out
,scaling-in
,scaling-error
. - cluster_
type float - Type of clusters
0
: analysis cluster,1
: streaming cluster The default value is0
. - cluster_
version str - Version of the clusters Currently,
MRS 1.6.3
,MRS 1.7.0
,MRS 1.9.2
,MRS 2.1.0
,MRS 3.0.2
are supported. The latest version of MRS is used by default. - component_
lists Sequence[MrsCluster V1Component List Args] - Service component list.
- core_
data_ floatvolume_ count - Number of data disks of the Core node.
Value range:
1
to10
. - core_
data_ floatvolume_ size - Data disk size of the Core node.
Value range:
100
GB to32000
GB. - core_
data_ strvolume_ type - Data disk storage type of the Core node,
supporting
SATA
,SAS
andSSD
.SATA
: Common I/O,SAS
: High I/O,SSD
: Ultra-high I/O. - core_
node_ floatnum - Number of Core nodes Value range:
1
to500
. A maximum of500
Core nodes are supported by default. If more than500
Core nodes are required, contact technical support engineers or invoke background APIs to modify the database. - core_
node_ strproduct_ id - Product ID of a Core node.
- core_
node_ strsize - Instance specification of a Core node Configuration
method of this parameter is identical to that of
master_node_size
. - core_
node_ strspec_ id - Specification ID of a Core node.
- create_
at str - Cluster creation time.
- deployment_
id str - Deployment ID of a cluster.
- error_
info str - Error information.
- external_
alternate_ strip - Backup external IP address.
- external_
ip str - External IP address.
- fee str
- Cluster creation fee, which is automatically calculated.
- hadoop_
version str - Hadoop version.
- instance_
id str - Instance ID.
- internal_
ip str - log_
collection float - Indicates whether logs are collected when cluster
installation fails.
0
: not collected.1
: collected. The default value is0
. Iflog_collection
is set to1
, OBS buckets will be created to collect the MRS logs. These buckets will be charged. - master_
data_ floatvolume_ count - Number of data disks of the Master node.
The value can be set to
1
only. - master_
data_ floatvolume_ size - Data disk size of the Master node.
Value range:
100
GB to32000
GB. - master_
data_ strvolume_ type - Data disk storage type of the Master node,
supporting
SATA
,SAS
andSSD
.SATA
: Common I/O,SAS
: High I/O,SSD
: Ultra-high I/O. - master_
node_ strip - IP address of a Master node.
- master_
node_ floatnum - Number of Master nodes.
- master_
node_ strproduct_ id - Product ID of a Master node.
- master_
node_ strsize - Best match based on several years of commissioning
experience. MRS supports specifications of hosts, and host specifications are
determined by CPUs, memory, and disks space. Master nodes support
h1.2xlarge.linux.mrs
h1.4xlarge.linux.mrs
,h1.8xlarge.linux.mrs
,s1.4xlarge.linux.mrs
, ands1.8xlarge.linux.mrs
. Core nodes of a streaming cluster support all specificationsc2.2xlarge.linux.mrs
,c2.4xlarge.linux.mrs
,s1.xlarge.linux.mrs
,s1.4xlarge.linux.mrs
,s1.8xlarge.linux.mrs
,d1.xlarge.linux.mrs
,d1.2xlarge.linux.mrs
,d1.4xlarge.linux.mrs
,d1.8xlarge.linux.mrs
,h1.2xlarge.linux.mrs
,h1.4xlarge.linux.mrs
andh1.8xlarge.linux.mrs
. Task nodes supportc2.2xlarge.linux.mrs
,c2.4xlarge.linux.mrs
,s1.xlarge.linux.mrs
,s1.4xlarge.linux.mrs
,s1.8xlarge.linux.mrs
,h1.2xlarge.linux.mrs
,h1.4xlarge.linux.mrs
, andh1.8xlarge.linux.mrs
. - master_
node_ strspec_ id - Specification ID of a Master node.
- mrs_
cluster_ strv1_ id - node_
public_ strcert_ name - Name of a key pair You can use a key to log in to the Master node in the cluster.
- order_
id str - Order ID for creating clusters.
- private_
ip_ strfirst - Primary private IP address.
- region str
- remark str
- Remarks of a cluster.
- safe_
mode float - MRS cluster running mode
0
: common mode. The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.1
: safe mode. The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management or job management functions of an MRS cluster and cannot view cluster resource usage or the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator. The request has thecluster_admin_secret
parameter only whensafe_mode
is set to1
. - security_
groups_ strid - Security group ID.
- slave_
security_ strgroups_ id - Standby security group ID.
- subnet_
id str - Subnet ID Obtain the subnet ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the subnet ID from the list.
- Mapping[str, str]
- Tags key/value pairs to associate with the cluster.
- tenant_
id str - Project ID.
- timeouts
Mrs
Cluster V1Timeouts Args - update_
at str - Cluster update time.
- vnc str
- URI address for remote login of the elastic cloud server.
- volume_
size float - Data disk storage space of a Core node Users can
add disks to expand storage capacity when creating a cluster. There are the
following scenarios: Separation of data storage and computing: Data is stored
in the OBS system. Costs of clusters are relatively low but computing performance
is poor. The clusters can be deleted at any time. It is recommended when data
computing is not frequently performed. Integration of data storage and computing:
Data is stored in the HDFS system. Costs of clusters are relatively high but
computing performance is good. The clusters cannot be deleted in a short term.
It is recommended when data computing is frequently performed. Value range:
100
GB to32000
GB. - volume_
type str - Type of disks
SATA
,SAS
andSSD
are supported.SATA
: common I/O,SAS
: High I/O,SSD
: Ultra-high I/O. - vpc_
id str - ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
- add
Jobs List<Property Map> - You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added.
- available
Zone StringId - ID of an available zone. Obtain the value from Regions and Endpoints.
- available
Zone StringName - Name of an availability zone.
- billing
Type Number - The value is
12
, indicating on-demand payment. - bootstrap
Scripts List<Property Map> - Bootstrap action scripts. For details, see bootstrap_scripts block below.
- charging
Start StringTime - Time when charging starts.
- cluster
Admin StringSecret - Indicates the password of the MRS Manager
administrator. The password must contain
8
to32
characters. Must contain at least two types of the following: Lowercase letters, Uppercase letters, Digits, Special characters~!@#$%^&*()-_=+\|[{}];:'",<.>/?
and spaces. - cluster
Id String - Cluster ID.
- cluster
Name String - Cluster name, which is globally unique and contains
only
1
to64
letters, digits, hyphens (-), and underscores (_). - cluster
State String - Cluster status. Valid values include: existing history:
starting
,running
,terminated
,failed
,abnormal
,terminating
,rebooting
,shutdown
,frozen
,scaling-out
,scaling-in
,scaling-error
. - cluster
Type Number - Type of clusters
0
: analysis cluster,1
: streaming cluster The default value is0
. - cluster
Version String - Version of the clusters Currently,
MRS 1.6.3
,MRS 1.7.0
,MRS 1.9.2
,MRS 2.1.0
,MRS 3.0.2
are supported. The latest version of MRS is used by default. - component
Lists List<Property Map> - Service component list.
- core
Data NumberVolume Count - Number of data disks of the Core node.
Value range:
1
to10
. - core
Data NumberVolume Size - Data disk size of the Core node.
Value range:
100
GB to32000
GB. - core
Data StringVolume Type - Data disk storage type of the Core node,
supporting
SATA
,SAS
andSSD
.SATA
: Common I/O,SAS
: High I/O,SSD
: Ultra-high I/O. - core
Node NumberNum - Number of Core nodes Value range:
1
to500
. A maximum of500
Core nodes are supported by default. If more than500
Core nodes are required, contact technical support engineers or invoke background APIs to modify the database. - core
Node StringProduct Id - Product ID of a Core node.
- core
Node StringSize - Instance specification of a Core node Configuration
method of this parameter is identical to that of
master_node_size
. - core
Node StringSpec Id - Specification ID of a Core node.
- create
At String - Cluster creation time.
- deployment
Id String - Deployment ID of a cluster.
- error
Info String - Error information.
- external
Alternate StringIp - Backup external IP address.
- external
Ip String - External IP address.
- fee String
- Cluster creation fee, which is automatically calculated.
- hadoop
Version String - Hadoop version.
- instance
Id String - Instance ID.
- internal
Ip String - log
Collection Number - Indicates whether logs are collected when cluster
installation fails.
0
: not collected.1
: collected. The default value is0
. Iflog_collection
is set to1
, OBS buckets will be created to collect the MRS logs. These buckets will be charged. - master
Data NumberVolume Count - Number of data disks of the Master node.
The value can be set to
1
only. - master
Data NumberVolume Size - Data disk size of the Master node.
Value range:
100
GB to32000
GB. - master
Data StringVolume Type - Data disk storage type of the Master node,
supporting
SATA
,SAS
andSSD
.SATA
: Common I/O,SAS
: High I/O,SSD
: Ultra-high I/O. - master
Node StringIp - IP address of a Master node.
- master
Node NumberNum - Number of Master nodes.
- master
Node StringProduct Id - Product ID of a Master node.
- master
Node StringSize - Best match based on several years of commissioning
experience. MRS supports specifications of hosts, and host specifications are
determined by CPUs, memory, and disks space. Master nodes support
h1.2xlarge.linux.mrs
h1.4xlarge.linux.mrs
,h1.8xlarge.linux.mrs
,s1.4xlarge.linux.mrs
, ands1.8xlarge.linux.mrs
. Core nodes of a streaming cluster support all specificationsc2.2xlarge.linux.mrs
,c2.4xlarge.linux.mrs
,s1.xlarge.linux.mrs
,s1.4xlarge.linux.mrs
,s1.8xlarge.linux.mrs
,d1.xlarge.linux.mrs
,d1.2xlarge.linux.mrs
,d1.4xlarge.linux.mrs
,d1.8xlarge.linux.mrs
,h1.2xlarge.linux.mrs
,h1.4xlarge.linux.mrs
andh1.8xlarge.linux.mrs
. Task nodes supportc2.2xlarge.linux.mrs
,c2.4xlarge.linux.mrs
,s1.xlarge.linux.mrs
,s1.4xlarge.linux.mrs
,s1.8xlarge.linux.mrs
,h1.2xlarge.linux.mrs
,h1.4xlarge.linux.mrs
, andh1.8xlarge.linux.mrs
. - master
Node StringSpec Id - Specification ID of a Master node.
- mrs
Cluster StringV1Id - node
Public StringCert Name - Name of a key pair You can use a key to log in to the Master node in the cluster.
- order
Id String - Order ID for creating clusters.
- private
Ip StringFirst - Primary private IP address.
- region String
- remark String
- Remarks of a cluster.
- safe
Mode Number - MRS cluster running mode
0
: common mode. The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.1
: safe mode. The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management or job management functions of an MRS cluster and cannot view cluster resource usage or the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator. The request has thecluster_admin_secret
parameter only whensafe_mode
is set to1
. - security
Groups StringId - Security group ID.
- slave
Security StringGroups Id - Standby security group ID.
- subnet
Id String - Subnet ID Obtain the subnet ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the subnet ID from the list.
- Map<String>
- Tags key/value pairs to associate with the cluster.
- tenant
Id String - Project ID.
- timeouts Property Map
- update
At String - Cluster update time.
- vnc String
- URI address for remote login of the elastic cloud server.
- volume
Size Number - Data disk storage space of a Core node Users can
add disks to expand storage capacity when creating a cluster. There are the
following scenarios: Separation of data storage and computing: Data is stored
in the OBS system. Costs of clusters are relatively low but computing performance
is poor. The clusters can be deleted at any time. It is recommended when data
computing is not frequently performed. Integration of data storage and computing:
Data is stored in the HDFS system. Costs of clusters are relatively high but
computing performance is good. The clusters cannot be deleted in a short term.
It is recommended when data computing is frequently performed. Value range:
100
GB to32000
GB. - volume
Type String - Type of disks
SATA
,SAS
andSSD
are supported.SATA
: common I/O,SAS
: High I/O,SSD
: Ultra-high I/O. - vpc
Id String - ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
Supporting Types
MrsClusterV1AddJob, MrsClusterV1AddJobArgs
- Jar
Path string - Path of the
.jar
file or.sql
file for program execution The parameter must meet the following requirements: Contains a maximum of1023
characters, excluding special characters such as;|&><'$
. The address cannot be empty or full of spaces. Starts with/
ors3a://
. Spark Script must end with.sql
whileMapReduce
andSpark Jar
must end with.jar
.sql
andjar
are case-insensitive. - Job
Name string - It contains only
1
to64
letters, digits, hyphens (-), and underscores (_). - Job
Type double - Type.
1
: MapReduce,2
: Spark,3
: Hive Script,4
: HiveQL (not supported currently),5
: DistCp, importing and exporting data (not supported in this API currently),6
: Spark Script,7
: Spark SQL, submitting Spark SQL statements (not supported in this API currently). - Submit
Job boolOnce Cluster Run - Possible values are:
true
a job is submitted when a cluster is created andfalse
a job is submitted separately. - Arguments string
- Key parameter for program execution. The parameter
is specified by the function of the user's program. MRS is only responsible
for loading the parameter. The parameter contains a maximum of
2047
characters, excluding special characters such as;|&>'<$
, and can be empty. - File
Action string - Data import and export import export
- Hive
Script stringPath - SQL program path. This parameter is needed
by Spark Script and Hive Script jobs only and must meet the following requirements:
Contains a maximum of
1023
characters, excluding special characters such as;|&><'$
. The address cannot be empty or full of spaces. Starts with/
ors3a://
. Ends with.sql
.sql
is case-insensitive. - Hql string
- HiveQL statement.
- Input string
- Path for inputting data, which must start with
/
ors3a://
. A correct OBS path is required. The parameter contains a maximum of1023
characters, excluding special characters such as;|&>'<$
, and can be empty. - Job
Log string - Path for storing job logs that record job running status.
This path must start with
/
ors3a://
. A correct OBS path is required. The parameter contains a maximum of1023
characters, excluding special characters such as;|&>'<$
, and can be empty. - Output string
- Path for outputting data, which must start with
/
ors3a://
. A correct OBS path is required. If the path does not exist, the system automatically creates it. The parameter contains a maximum of1023
characters, excluding special characters such as;|&>'<$
, and can be empty. - Shutdown
Cluster bool - Whether to delete the cluster after the jobs
are complete.
true
: Yes,false
: No.
- Jar
Path string - Path of the
.jar
file or.sql
file for program execution The parameter must meet the following requirements: Contains a maximum of1023
characters, excluding special characters such as;|&><'$
. The address cannot be empty or full of spaces. Starts with/
ors3a://
. Spark Script must end with.sql
whileMapReduce
andSpark Jar
must end with.jar
.sql
andjar
are case-insensitive. - Job
Name string - It contains only
1
to64
letters, digits, hyphens (-), and underscores (_). - Job
Type float64 - Type.
1
: MapReduce,2
: Spark,3
: Hive Script,4
: HiveQL (not supported currently),5
: DistCp, importing and exporting data (not supported in this API currently),6
: Spark Script,7
: Spark SQL, submitting Spark SQL statements (not supported in this API currently). - Submit
Job boolOnce Cluster Run - Possible values are:
true
a job is submitted when a cluster is created andfalse
a job is submitted separately. - Arguments string
- Key parameter for program execution. The parameter
is specified by the function of the user's program. MRS is only responsible
for loading the parameter. The parameter contains a maximum of
2047
characters, excluding special characters such as;|&>'<$
, and can be empty. - File
Action string - Data import and export import export
- Hive
Script stringPath - SQL program path. This parameter is needed
by Spark Script and Hive Script jobs only and must meet the following requirements:
Contains a maximum of
1023
characters, excluding special characters such as;|&><'$
. The address cannot be empty or full of spaces. Starts with/
ors3a://
. Ends with.sql
.sql
is case-insensitive. - Hql string
- HiveQL statement.
- Input string
- Path for inputting data, which must start with
/
ors3a://
. A correct OBS path is required. The parameter contains a maximum of1023
characters, excluding special characters such as;|&>'<$
, and can be empty. - Job
Log string - Path for storing job logs that record job running status.
This path must start with
/
ors3a://
. A correct OBS path is required. The parameter contains a maximum of1023
characters, excluding special characters such as;|&>'<$
, and can be empty. - Output string
- Path for outputting data, which must start with
/
ors3a://
. A correct OBS path is required. If the path does not exist, the system automatically creates it. The parameter contains a maximum of1023
characters, excluding special characters such as;|&>'<$
, and can be empty. - Shutdown
Cluster bool - Whether to delete the cluster after the jobs
are complete.
true
: Yes,false
: No.
- jar
Path String - Path of the
.jar
file or.sql
file for program execution The parameter must meet the following requirements: Contains a maximum of1023
characters, excluding special characters such as;|&><'$
. The address cannot be empty or full of spaces. Starts with/
ors3a://
. Spark Script must end with.sql
whileMapReduce
andSpark Jar
must end with.jar
.sql
andjar
are case-insensitive. - job
Name String - It contains only
1
to64
letters, digits, hyphens (-), and underscores (_). - job
Type Double - Type.
1
: MapReduce,2
: Spark,3
: Hive Script,4
: HiveQL (not supported currently),5
: DistCp, importing and exporting data (not supported in this API currently),6
: Spark Script,7
: Spark SQL, submitting Spark SQL statements (not supported in this API currently). - submit
Job BooleanOnce Cluster Run - Possible values are:
true
a job is submitted when a cluster is created andfalse
a job is submitted separately. - arguments String
- Key parameter for program execution. The parameter
is specified by the function of the user's program. MRS is only responsible
for loading the parameter. The parameter contains a maximum of
2047
characters, excluding special characters such as;|&>'<$
, and can be empty. - file
Action String - Data import and export import export
- hive
Script StringPath - SQL program path. This parameter is needed
by Spark Script and Hive Script jobs only and must meet the following requirements:
Contains a maximum of
1023
characters, excluding special characters such as;|&><'$
. The address cannot be empty or full of spaces. Starts with/
ors3a://
. Ends with.sql
.sql
is case-insensitive. - hql String
- HiveQL statement.
- input String
- Path for inputting data, which must start with
/
ors3a://
. A correct OBS path is required. The parameter contains a maximum of1023
characters, excluding special characters such as;|&>'<$
, and can be empty. - job
Log String - Path for storing job logs that record job running status.
This path must start with
/
ors3a://
. A correct OBS path is required. The parameter contains a maximum of1023
characters, excluding special characters such as;|&>'<$
, and can be empty. - output String
- Path for outputting data, which must start with
/
ors3a://
. A correct OBS path is required. If the path does not exist, the system automatically creates it. The parameter contains a maximum of1023
characters, excluding special characters such as;|&>'<$
, and can be empty. - shutdown
Cluster Boolean - Whether to delete the cluster after the jobs
are complete.
true
: Yes,false
: No.
- jar
Path string - Path of the
.jar
file or.sql
file for program execution The parameter must meet the following requirements: Contains a maximum of1023
characters, excluding special characters such as;|&><'$
. The address cannot be empty or full of spaces. Starts with/
ors3a://
. Spark Script must end with.sql
whileMapReduce
andSpark Jar
must end with.jar
.sql
andjar
are case-insensitive. - job
Name string - It contains only
1
to64
letters, digits, hyphens (-), and underscores (_). - job
Type number - Type.
1
: MapReduce,2
: Spark,3
: Hive Script,4
: HiveQL (not supported currently),5
: DistCp, importing and exporting data (not supported in this API currently),6
: Spark Script,7
: Spark SQL, submitting Spark SQL statements (not supported in this API currently). - submit
Job booleanOnce Cluster Run - Possible values are:
true
a job is submitted when a cluster is created andfalse
a job is submitted separately. - arguments string
- Key parameter for program execution. The parameter
is specified by the function of the user's program. MRS is only responsible
for loading the parameter. The parameter contains a maximum of
2047
characters, excluding special characters such as;|&>'<$
, and can be empty. - file
Action string - Data import and export import export
- hive
Script stringPath - SQL program path. This parameter is needed
by Spark Script and Hive Script jobs only and must meet the following requirements:
Contains a maximum of
1023
characters, excluding special characters such as;|&><'$
. The address cannot be empty or full of spaces. Starts with/
ors3a://
. Ends with.sql
.sql
is case-insensitive. - hql string
- HiveQL statement.
- input string
- Path for inputting data, which must start with
/
ors3a://
. A correct OBS path is required. The parameter contains a maximum of1023
characters, excluding special characters such as;|&>'<$
, and can be empty. - job
Log string - Path for storing job logs that record job running status.
This path must start with
/
ors3a://
. A correct OBS path is required. The parameter contains a maximum of1023
characters, excluding special characters such as;|&>'<$
, and can be empty. - output string
- Path for outputting data, which must start with
/
ors3a://
. A correct OBS path is required. If the path does not exist, the system automatically creates it. The parameter contains a maximum of1023
characters, excluding special characters such as;|&>'<$
, and can be empty. - shutdown
Cluster boolean - Whether to delete the cluster after the jobs
are complete.
true
: Yes,false
: No.
- jar_
path str - Path of the
.jar
file or.sql
file for program execution The parameter must meet the following requirements: Contains a maximum of1023
characters, excluding special characters such as;|&><'$
. The address cannot be empty or full of spaces. Starts with/
ors3a://
. Spark Script must end with.sql
whileMapReduce
andSpark Jar
must end with.jar
.sql
andjar
are case-insensitive. - job_
name str - It contains only
1
to64
letters, digits, hyphens (-), and underscores (_). - job_
type float - Type.
1
: MapReduce,2
: Spark,3
: Hive Script,4
: HiveQL (not supported currently),5
: DistCp, importing and exporting data (not supported in this API currently),6
: Spark Script,7
: Spark SQL, submitting Spark SQL statements (not supported in this API currently). - submit_
job_ boolonce_ cluster_ run - Possible values are:
true
a job is submitted when a cluster is created andfalse
a job is submitted separately. - arguments str
- Key parameter for program execution. The parameter
is specified by the function of the user's program. MRS is only responsible
for loading the parameter. The parameter contains a maximum of
2047
characters, excluding special characters such as;|&>'<$
, and can be empty. - file_
action str - Data import and export import export
- hive_
script_ strpath - SQL program path. This parameter is needed
by Spark Script and Hive Script jobs only and must meet the following requirements:
Contains a maximum of
1023
characters, excluding special characters such as;|&><'$
. The address cannot be empty or full of spaces. Starts with/
ors3a://
. Ends with.sql
.sql
is case-insensitive. - hql str
- HiveQL statement.
- input str
- Path for inputting data, which must start with
/
ors3a://
. A correct OBS path is required. The parameter contains a maximum of1023
characters, excluding special characters such as;|&>'<$
, and can be empty. - job_
log str - Path for storing job logs that record job running status.
This path must start with
/
ors3a://
. A correct OBS path is required. The parameter contains a maximum of1023
characters, excluding special characters such as;|&>'<$
, and can be empty. - output str
- Path for outputting data, which must start with
/
ors3a://
. A correct OBS path is required. If the path does not exist, the system automatically creates it. The parameter contains a maximum of1023
characters, excluding special characters such as;|&>'<$
, and can be empty. - shutdown_
cluster bool - Whether to delete the cluster after the jobs
are complete.
true
: Yes,false
: No.
- jar
Path String - Path of the
.jar
file or.sql
file for program execution The parameter must meet the following requirements: Contains a maximum of1023
characters, excluding special characters such as;|&><'$
. The address cannot be empty or full of spaces. Starts with/
ors3a://
. Spark Script must end with.sql
whileMapReduce
andSpark Jar
must end with.jar
.sql
andjar
are case-insensitive. - job
Name String - It contains only
1
to64
letters, digits, hyphens (-), and underscores (_). - job
Type Number - Type.
1
: MapReduce,2
: Spark,3
: Hive Script,4
: HiveQL (not supported currently),5
: DistCp, importing and exporting data (not supported in this API currently),6
: Spark Script,7
: Spark SQL, submitting Spark SQL statements (not supported in this API currently). - submit
Job BooleanOnce Cluster Run - Possible values are:
true
a job is submitted when a cluster is created andfalse
a job is submitted separately. - arguments String
- Key parameter for program execution. The parameter
is specified by the function of the user's program. MRS is only responsible
for loading the parameter. The parameter contains a maximum of
2047
characters, excluding special characters such as;|&>'<$
, and can be empty. - file
Action String - Data import and export import export
- hive
Script StringPath - SQL program path. This parameter is needed
by Spark Script and Hive Script jobs only and must meet the following requirements:
Contains a maximum of
1023
characters, excluding special characters such as;|&><'$
. The address cannot be empty or full of spaces. Starts with/
ors3a://
. Ends with.sql
.sql
is case-insensitive. - hql String
- HiveQL statement.
- input String
- Path for inputting data, which must start with
/
ors3a://
. A correct OBS path is required. The parameter contains a maximum of1023
characters, excluding special characters such as;|&>'<$
, and can be empty. - job
Log String - Path for storing job logs that record job running status.
This path must start with
/
ors3a://
. A correct OBS path is required. The parameter contains a maximum of1023
characters, excluding special characters such as;|&>'<$
, and can be empty. - output String
- Path for outputting data, which must start with
/
ors3a://
. A correct OBS path is required. If the path does not exist, the system automatically creates it. The parameter contains a maximum of1023
characters, excluding special characters such as;|&>'<$
, and can be empty. - shutdown
Cluster Boolean - Whether to delete the cluster after the jobs
are complete.
true
: Yes,false
: No.
MrsClusterV1BootstrapScript, MrsClusterV1BootstrapScriptArgs
- Fail
Action string - Whether to continue to execute subsequent scripts and create a cluster after the
bootstrap action script fails to be executed.
continue
: Continue to execute subsequent scripts.errorout
: Stop the action. - Name string
- Name of a bootstrap action script.
- Nodes List<string>
- Type of node where the bootstrap action script is executed, including
master
,core
, andtask
. - Uri string
- Path of the shell script. Set this parameter to an OBS bucket path or a local VM path.
- Active
Master bool - Whether the bootstrap action script runs only on active Master nodes.
- Before
Component boolStart - Time when the bootstrap action script is executed. Currently, the script can be executed before and after the component is started.
- Parameters string
- Bootstrap action script parameters.
- Fail
Action string - Whether to continue to execute subsequent scripts and create a cluster after the
bootstrap action script fails to be executed.
continue
: Continue to execute subsequent scripts.errorout
: Stop the action. - Name string
- Name of a bootstrap action script.
- Nodes []string
- Type of node where the bootstrap action script is executed, including
master
,core
, andtask
. - Uri string
- Path of the shell script. Set this parameter to an OBS bucket path or a local VM path.
- Active
Master bool - Whether the bootstrap action script runs only on active Master nodes.
- Before
Component boolStart - Time when the bootstrap action script is executed. Currently, the script can be executed before and after the component is started.
- Parameters string
- Bootstrap action script parameters.
- fail
Action String - Whether to continue to execute subsequent scripts and create a cluster after the
bootstrap action script fails to be executed.
continue
: Continue to execute subsequent scripts.errorout
: Stop the action. - name String
- Name of a bootstrap action script.
- nodes List<String>
- Type of node where the bootstrap action script is executed, including
master
,core
, andtask
. - uri String
- Path of the shell script. Set this parameter to an OBS bucket path or a local VM path.
- active
Master Boolean - Whether the bootstrap action script runs only on active Master nodes.
- before
Component BooleanStart - Time when the bootstrap action script is executed. Currently, the script can be executed before and after the component is started.
- parameters String
- Bootstrap action script parameters.
- fail
Action string - Whether to continue to execute subsequent scripts and create a cluster after the
bootstrap action script fails to be executed.
continue
: Continue to execute subsequent scripts.errorout
: Stop the action. - name string
- Name of a bootstrap action script.
- nodes string[]
- Type of node where the bootstrap action script is executed, including
master
,core
, andtask
. - uri string
- Path of the shell script. Set this parameter to an OBS bucket path or a local VM path.
- active
Master boolean - Whether the bootstrap action script runs only on active Master nodes.
- before
Component booleanStart - Time when the bootstrap action script is executed. Currently, the script can be executed before and after the component is started.
- parameters string
- Bootstrap action script parameters.
- fail_
action str - Whether to continue to execute subsequent scripts and create a cluster after the
bootstrap action script fails to be executed.
continue
: Continue to execute subsequent scripts.errorout
: Stop the action. - name str
- Name of a bootstrap action script.
- nodes Sequence[str]
- Type of node where the bootstrap action script is executed, including
master
,core
, andtask
. - uri str
- Path of the shell script. Set this parameter to an OBS bucket path or a local VM path.
- active_
master bool - Whether the bootstrap action script runs only on active Master nodes.
- before_
component_ boolstart - Time when the bootstrap action script is executed. Currently, the script can be executed before and after the component is started.
- parameters str
- Bootstrap action script parameters.
- fail
Action String - Whether to continue to execute subsequent scripts and create a cluster after the
bootstrap action script fails to be executed.
continue
: Continue to execute subsequent scripts.errorout
: Stop the action. - name String
- Name of a bootstrap action script.
- nodes List<String>
- Type of node where the bootstrap action script is executed, including
master
,core
, andtask
. - uri String
- Path of the shell script. Set this parameter to an OBS bucket path or a local VM path.
- active
Master Boolean - Whether the bootstrap action script runs only on active Master nodes.
- before
Component BooleanStart - Time when the bootstrap action script is executed. Currently, the script can be executed before and after the component is started.
- parameters String
- Bootstrap action script parameters.
MrsClusterV1ComponentList, MrsClusterV1ComponentListArgs
- Component
Name string - Component name.
- Component
Desc string - Component
Id string - Component ID.
- Component
Version string - Component version.
- Component
Name string - Component name.
- Component
Desc string - Component
Id string - Component ID.
- Component
Version string - Component version.
- component
Name String - Component name.
- component
Desc String - component
Id String - Component ID.
- component
Version String - Component version.
- component
Name string - Component name.
- component
Desc string - component
Id string - Component ID.
- component
Version string - Component version.
- component_
name str - Component name.
- component_
desc str - component_
id str - Component ID.
- component_
version str - Component version.
- component
Name String - Component name.
- component
Desc String - component
Id String - Component ID.
- component
Version String - Component version.
MrsClusterV1Timeouts, MrsClusterV1TimeoutsArgs
Import
Cluster can be imported using the cluster_id
, e.g.
$ pulumi import opentelekomcloud:index/mrsClusterV1:MrsClusterV1 cluster_1 4729ab1c-7c1a-4411-a02e-93dfc361b32d
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- opentelekomcloud opentelekomcloud/terraform-provider-opentelekomcloud
- License
- Notes
- This Pulumi package is based on the
opentelekomcloud
Terraform Provider.