flexibleengine.MrsClusterV1
Explore with Pulumi AI
Manages a MRS cluster resource within FlexibleEngine.
!> Warning: It has been deprecated, please use flexibleengine.MrsClusterV2
instead.
Example Usage
Creating A MRS Cluster
import * as pulumi from "@pulumi/pulumi";
import * as flexibleengine from "@pulumi/flexibleengine";
const exampleVpc = new flexibleengine.VpcV1("exampleVpc", {cidr: "192.168.0.0/16"});
const exampleSubnet = new flexibleengine.VpcSubnetV1("exampleSubnet", {
cidr: "192.168.0.0/24",
gatewayIp: "192.168.0.1",
vpcId: exampleVpc.vpcV1Id,
});
const cluster1 = new flexibleengine.MrsClusterV1("cluster1", {
region: "eu-west-0",
availableZoneId: "eu-west-0a",
clusterName: "mrs-cluster-test",
clusterType: 0,
clusterVersion: "MRS 2.0.1",
masterNodeNum: 2,
coreNodeNum: 3,
masterNodeSize: "s3.2xlarge.4.linux.mrs",
coreNodeSize: "s3.xlarge.4.linux.mrs",
volumeType: "SATA",
volumeSize: 100,
vpcId: exampleVpc.vpcV1Id,
subnetId: exampleSubnet.vpcSubnetV1Id,
safeMode: 0,
clusterAdminSecret: "{{password_of_mrs_manager}}",
nodePublicCertName: "KeyPair-ci",
componentLists: [
{
componentName: "Hadoop",
},
{
componentName: "Spark",
},
{
componentName: "Hive",
},
{
componentName: "Tez",
},
],
});
import pulumi
import pulumi_flexibleengine as flexibleengine
example_vpc = flexibleengine.VpcV1("exampleVpc", cidr="192.168.0.0/16")
example_subnet = flexibleengine.VpcSubnetV1("exampleSubnet",
cidr="192.168.0.0/24",
gateway_ip="192.168.0.1",
vpc_id=example_vpc.vpc_v1_id)
cluster1 = flexibleengine.MrsClusterV1("cluster1",
region="eu-west-0",
available_zone_id="eu-west-0a",
cluster_name="mrs-cluster-test",
cluster_type=0,
cluster_version="MRS 2.0.1",
master_node_num=2,
core_node_num=3,
master_node_size="s3.2xlarge.4.linux.mrs",
core_node_size="s3.xlarge.4.linux.mrs",
volume_type="SATA",
volume_size=100,
vpc_id=example_vpc.vpc_v1_id,
subnet_id=example_subnet.vpc_subnet_v1_id,
safe_mode=0,
cluster_admin_secret="{{password_of_mrs_manager}}",
node_public_cert_name="KeyPair-ci",
component_lists=[
{
"component_name": "Hadoop",
},
{
"component_name": "Spark",
},
{
"component_name": "Hive",
},
{
"component_name": "Tez",
},
])
package main
import (
"github.com/pulumi/pulumi-terraform-provider/sdks/go/flexibleengine/flexibleengine"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
exampleVpc, err := flexibleengine.NewVpcV1(ctx, "exampleVpc", &flexibleengine.VpcV1Args{
Cidr: pulumi.String("192.168.0.0/16"),
})
if err != nil {
return err
}
exampleSubnet, err := flexibleengine.NewVpcSubnetV1(ctx, "exampleSubnet", &flexibleengine.VpcSubnetV1Args{
Cidr: pulumi.String("192.168.0.0/24"),
GatewayIp: pulumi.String("192.168.0.1"),
VpcId: exampleVpc.VpcV1Id,
})
if err != nil {
return err
}
_, err = flexibleengine.NewMrsClusterV1(ctx, "cluster1", &flexibleengine.MrsClusterV1Args{
Region: pulumi.String("eu-west-0"),
AvailableZoneId: pulumi.String("eu-west-0a"),
ClusterName: pulumi.String("mrs-cluster-test"),
ClusterType: pulumi.Float64(0),
ClusterVersion: pulumi.String("MRS 2.0.1"),
MasterNodeNum: pulumi.Float64(2),
CoreNodeNum: pulumi.Float64(3),
MasterNodeSize: pulumi.String("s3.2xlarge.4.linux.mrs"),
CoreNodeSize: pulumi.String("s3.xlarge.4.linux.mrs"),
VolumeType: pulumi.String("SATA"),
VolumeSize: pulumi.Float64(100),
VpcId: exampleVpc.VpcV1Id,
SubnetId: exampleSubnet.VpcSubnetV1Id,
SafeMode: pulumi.Float64(0),
ClusterAdminSecret: pulumi.String("{{password_of_mrs_manager}}"),
NodePublicCertName: pulumi.String("KeyPair-ci"),
ComponentLists: flexibleengine.MrsClusterV1ComponentListArray{
&flexibleengine.MrsClusterV1ComponentListArgs{
ComponentName: pulumi.String("Hadoop"),
},
&flexibleengine.MrsClusterV1ComponentListArgs{
ComponentName: pulumi.String("Spark"),
},
&flexibleengine.MrsClusterV1ComponentListArgs{
ComponentName: pulumi.String("Hive"),
},
&flexibleengine.MrsClusterV1ComponentListArgs{
ComponentName: pulumi.String("Tez"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Flexibleengine = Pulumi.Flexibleengine;
return await Deployment.RunAsync(() =>
{
var exampleVpc = new Flexibleengine.VpcV1("exampleVpc", new()
{
Cidr = "192.168.0.0/16",
});
var exampleSubnet = new Flexibleengine.VpcSubnetV1("exampleSubnet", new()
{
Cidr = "192.168.0.0/24",
GatewayIp = "192.168.0.1",
VpcId = exampleVpc.VpcV1Id,
});
var cluster1 = new Flexibleengine.MrsClusterV1("cluster1", new()
{
Region = "eu-west-0",
AvailableZoneId = "eu-west-0a",
ClusterName = "mrs-cluster-test",
ClusterType = 0,
ClusterVersion = "MRS 2.0.1",
MasterNodeNum = 2,
CoreNodeNum = 3,
MasterNodeSize = "s3.2xlarge.4.linux.mrs",
CoreNodeSize = "s3.xlarge.4.linux.mrs",
VolumeType = "SATA",
VolumeSize = 100,
VpcId = exampleVpc.VpcV1Id,
SubnetId = exampleSubnet.VpcSubnetV1Id,
SafeMode = 0,
ClusterAdminSecret = "{{password_of_mrs_manager}}",
NodePublicCertName = "KeyPair-ci",
ComponentLists = new[]
{
new Flexibleengine.Inputs.MrsClusterV1ComponentListArgs
{
ComponentName = "Hadoop",
},
new Flexibleengine.Inputs.MrsClusterV1ComponentListArgs
{
ComponentName = "Spark",
},
new Flexibleengine.Inputs.MrsClusterV1ComponentListArgs
{
ComponentName = "Hive",
},
new Flexibleengine.Inputs.MrsClusterV1ComponentListArgs
{
ComponentName = "Tez",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.flexibleengine.VpcV1;
import com.pulumi.flexibleengine.VpcV1Args;
import com.pulumi.flexibleengine.VpcSubnetV1;
import com.pulumi.flexibleengine.VpcSubnetV1Args;
import com.pulumi.flexibleengine.MrsClusterV1;
import com.pulumi.flexibleengine.MrsClusterV1Args;
import com.pulumi.flexibleengine.inputs.MrsClusterV1ComponentListArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var exampleVpc = new VpcV1("exampleVpc", VpcV1Args.builder()
.cidr("192.168.0.0/16")
.build());
var exampleSubnet = new VpcSubnetV1("exampleSubnet", VpcSubnetV1Args.builder()
.cidr("192.168.0.0/24")
.gatewayIp("192.168.0.1")
.vpcId(exampleVpc.vpcV1Id())
.build());
var cluster1 = new MrsClusterV1("cluster1", MrsClusterV1Args.builder()
.region("eu-west-0")
.availableZoneId("eu-west-0a")
.clusterName("mrs-cluster-test")
.clusterType(0)
.clusterVersion("MRS 2.0.1")
.masterNodeNum(2)
.coreNodeNum(3)
.masterNodeSize("s3.2xlarge.4.linux.mrs")
.coreNodeSize("s3.xlarge.4.linux.mrs")
.volumeType("SATA")
.volumeSize(100)
.vpcId(exampleVpc.vpcV1Id())
.subnetId(exampleSubnet.vpcSubnetV1Id())
.safeMode(0)
.clusterAdminSecret("{{password_of_mrs_manager}}")
.nodePublicCertName("KeyPair-ci")
.componentLists(
MrsClusterV1ComponentListArgs.builder()
.componentName("Hadoop")
.build(),
MrsClusterV1ComponentListArgs.builder()
.componentName("Spark")
.build(),
MrsClusterV1ComponentListArgs.builder()
.componentName("Hive")
.build(),
MrsClusterV1ComponentListArgs.builder()
.componentName("Tez")
.build())
.build());
}
}
resources:
exampleVpc:
type: flexibleengine:VpcV1
properties:
cidr: 192.168.0.0/16
exampleSubnet:
type: flexibleengine:VpcSubnetV1
properties:
cidr: 192.168.0.0/24
gatewayIp: 192.168.0.1
vpcId: ${exampleVpc.vpcV1Id}
cluster1:
type: flexibleengine:MrsClusterV1
properties:
region: eu-west-0
availableZoneId: eu-west-0a
clusterName: mrs-cluster-test
clusterType: 0
clusterVersion: MRS 2.0.1
masterNodeNum: 2
coreNodeNum: 3
masterNodeSize: s3.2xlarge.4.linux.mrs
coreNodeSize: s3.xlarge.4.linux.mrs
volumeType: SATA
volumeSize: 100
vpcId: ${exampleVpc.vpcV1Id}
subnetId: ${exampleSubnet.vpcSubnetV1Id}
safeMode: 0
clusterAdminSecret: '{{password_of_mrs_manager}}'
nodePublicCertName: KeyPair-ci
componentLists:
- componentName: Hadoop
- componentName: Spark
- componentName: Hive
- componentName: Tez
Create MrsClusterV1 Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new MrsClusterV1(name: string, args: MrsClusterV1Args, opts?: CustomResourceOptions);
@overload
def MrsClusterV1(resource_name: str,
args: MrsClusterV1Args,
opts: Optional[ResourceOptions] = None)
@overload
def MrsClusterV1(resource_name: str,
opts: Optional[ResourceOptions] = None,
subnet_id: Optional[str] = None,
available_zone_id: Optional[str] = None,
vpc_id: Optional[str] = None,
volume_type: Optional[str] = None,
cluster_name: Optional[str] = None,
volume_size: Optional[float] = None,
node_public_cert_name: Optional[str] = None,
component_lists: Optional[Sequence[MrsClusterV1ComponentListArgs]] = None,
core_node_num: Optional[float] = None,
core_node_size: Optional[str] = None,
safe_mode: Optional[float] = None,
master_node_num: Optional[float] = None,
master_node_size: Optional[str] = None,
log_collection: Optional[float] = None,
mrs_cluster_v1_id: Optional[str] = None,
region: Optional[str] = None,
add_jobs: Optional[Sequence[MrsClusterV1AddJobArgs]] = None,
cluster_version: Optional[str] = None,
timeouts: Optional[MrsClusterV1TimeoutsArgs] = None,
cluster_type: Optional[float] = None,
cluster_admin_secret: Optional[str] = None,
billing_type: Optional[float] = None)
func NewMrsClusterV1(ctx *Context, name string, args MrsClusterV1Args, opts ...ResourceOption) (*MrsClusterV1, error)
public MrsClusterV1(string name, MrsClusterV1Args args, CustomResourceOptions? opts = null)
public MrsClusterV1(String name, MrsClusterV1Args args)
public MrsClusterV1(String name, MrsClusterV1Args args, CustomResourceOptions options)
type: flexibleengine:MrsClusterV1
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args MrsClusterV1Args
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args MrsClusterV1Args
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args MrsClusterV1Args
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args MrsClusterV1Args
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args MrsClusterV1Args
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var mrsClusterV1Resource = new Flexibleengine.MrsClusterV1("mrsClusterV1Resource", new()
{
SubnetId = "string",
AvailableZoneId = "string",
VpcId = "string",
VolumeType = "string",
ClusterName = "string",
VolumeSize = 0,
NodePublicCertName = "string",
ComponentLists = new[]
{
new Flexibleengine.Inputs.MrsClusterV1ComponentListArgs
{
ComponentName = "string",
ComponentDesc = "string",
ComponentId = "string",
ComponentVersion = "string",
},
},
CoreNodeNum = 0,
CoreNodeSize = "string",
SafeMode = 0,
MasterNodeNum = 0,
MasterNodeSize = "string",
LogCollection = 0,
MrsClusterV1Id = "string",
Region = "string",
AddJobs = new[]
{
new Flexibleengine.Inputs.MrsClusterV1AddJobArgs
{
JarPath = "string",
JobName = "string",
JobType = 0,
SubmitJobOnceClusterRun = false,
Arguments = "string",
FileAction = "string",
HiveScriptPath = "string",
Hql = "string",
Input = "string",
JobLog = "string",
Output = "string",
ShutdownCluster = false,
},
},
ClusterVersion = "string",
Timeouts = new Flexibleengine.Inputs.MrsClusterV1TimeoutsArgs
{
Create = "string",
Delete = "string",
},
ClusterType = 0,
ClusterAdminSecret = "string",
BillingType = 0,
});
example, err := flexibleengine.NewMrsClusterV1(ctx, "mrsClusterV1Resource", &flexibleengine.MrsClusterV1Args{
SubnetId: pulumi.String("string"),
AvailableZoneId: pulumi.String("string"),
VpcId: pulumi.String("string"),
VolumeType: pulumi.String("string"),
ClusterName: pulumi.String("string"),
VolumeSize: pulumi.Float64(0),
NodePublicCertName: pulumi.String("string"),
ComponentLists: flexibleengine.MrsClusterV1ComponentListArray{
&flexibleengine.MrsClusterV1ComponentListArgs{
ComponentName: pulumi.String("string"),
ComponentDesc: pulumi.String("string"),
ComponentId: pulumi.String("string"),
ComponentVersion: pulumi.String("string"),
},
},
CoreNodeNum: pulumi.Float64(0),
CoreNodeSize: pulumi.String("string"),
SafeMode: pulumi.Float64(0),
MasterNodeNum: pulumi.Float64(0),
MasterNodeSize: pulumi.String("string"),
LogCollection: pulumi.Float64(0),
MrsClusterV1Id: pulumi.String("string"),
Region: pulumi.String("string"),
AddJobs: flexibleengine.MrsClusterV1AddJobArray{
&flexibleengine.MrsClusterV1AddJobArgs{
JarPath: pulumi.String("string"),
JobName: pulumi.String("string"),
JobType: pulumi.Float64(0),
SubmitJobOnceClusterRun: pulumi.Bool(false),
Arguments: pulumi.String("string"),
FileAction: pulumi.String("string"),
HiveScriptPath: pulumi.String("string"),
Hql: pulumi.String("string"),
Input: pulumi.String("string"),
JobLog: pulumi.String("string"),
Output: pulumi.String("string"),
ShutdownCluster: pulumi.Bool(false),
},
},
ClusterVersion: pulumi.String("string"),
Timeouts: &flexibleengine.MrsClusterV1TimeoutsArgs{
Create: pulumi.String("string"),
Delete: pulumi.String("string"),
},
ClusterType: pulumi.Float64(0),
ClusterAdminSecret: pulumi.String("string"),
BillingType: pulumi.Float64(0),
})
var mrsClusterV1Resource = new MrsClusterV1("mrsClusterV1Resource", MrsClusterV1Args.builder()
.subnetId("string")
.availableZoneId("string")
.vpcId("string")
.volumeType("string")
.clusterName("string")
.volumeSize(0)
.nodePublicCertName("string")
.componentLists(MrsClusterV1ComponentListArgs.builder()
.componentName("string")
.componentDesc("string")
.componentId("string")
.componentVersion("string")
.build())
.coreNodeNum(0)
.coreNodeSize("string")
.safeMode(0)
.masterNodeNum(0)
.masterNodeSize("string")
.logCollection(0)
.mrsClusterV1Id("string")
.region("string")
.addJobs(MrsClusterV1AddJobArgs.builder()
.jarPath("string")
.jobName("string")
.jobType(0)
.submitJobOnceClusterRun(false)
.arguments("string")
.fileAction("string")
.hiveScriptPath("string")
.hql("string")
.input("string")
.jobLog("string")
.output("string")
.shutdownCluster(false)
.build())
.clusterVersion("string")
.timeouts(MrsClusterV1TimeoutsArgs.builder()
.create("string")
.delete("string")
.build())
.clusterType(0)
.clusterAdminSecret("string")
.billingType(0)
.build());
mrs_cluster_v1_resource = flexibleengine.MrsClusterV1("mrsClusterV1Resource",
subnet_id="string",
available_zone_id="string",
vpc_id="string",
volume_type="string",
cluster_name="string",
volume_size=0,
node_public_cert_name="string",
component_lists=[{
"component_name": "string",
"component_desc": "string",
"component_id": "string",
"component_version": "string",
}],
core_node_num=0,
core_node_size="string",
safe_mode=0,
master_node_num=0,
master_node_size="string",
log_collection=0,
mrs_cluster_v1_id="string",
region="string",
add_jobs=[{
"jar_path": "string",
"job_name": "string",
"job_type": 0,
"submit_job_once_cluster_run": False,
"arguments": "string",
"file_action": "string",
"hive_script_path": "string",
"hql": "string",
"input": "string",
"job_log": "string",
"output": "string",
"shutdown_cluster": False,
}],
cluster_version="string",
timeouts={
"create": "string",
"delete": "string",
},
cluster_type=0,
cluster_admin_secret="string",
billing_type=0)
const mrsClusterV1Resource = new flexibleengine.MrsClusterV1("mrsClusterV1Resource", {
subnetId: "string",
availableZoneId: "string",
vpcId: "string",
volumeType: "string",
clusterName: "string",
volumeSize: 0,
nodePublicCertName: "string",
componentLists: [{
componentName: "string",
componentDesc: "string",
componentId: "string",
componentVersion: "string",
}],
coreNodeNum: 0,
coreNodeSize: "string",
safeMode: 0,
masterNodeNum: 0,
masterNodeSize: "string",
logCollection: 0,
mrsClusterV1Id: "string",
region: "string",
addJobs: [{
jarPath: "string",
jobName: "string",
jobType: 0,
submitJobOnceClusterRun: false,
arguments: "string",
fileAction: "string",
hiveScriptPath: "string",
hql: "string",
input: "string",
jobLog: "string",
output: "string",
shutdownCluster: false,
}],
clusterVersion: "string",
timeouts: {
create: "string",
"delete": "string",
},
clusterType: 0,
clusterAdminSecret: "string",
billingType: 0,
});
type: flexibleengine:MrsClusterV1
properties:
addJobs:
- arguments: string
fileAction: string
hiveScriptPath: string
hql: string
input: string
jarPath: string
jobLog: string
jobName: string
jobType: 0
output: string
shutdownCluster: false
submitJobOnceClusterRun: false
availableZoneId: string
billingType: 0
clusterAdminSecret: string
clusterName: string
clusterType: 0
clusterVersion: string
componentLists:
- componentDesc: string
componentId: string
componentName: string
componentVersion: string
coreNodeNum: 0
coreNodeSize: string
logCollection: 0
masterNodeNum: 0
masterNodeSize: string
mrsClusterV1Id: string
nodePublicCertName: string
region: string
safeMode: 0
subnetId: string
timeouts:
create: string
delete: string
volumeSize: 0
volumeType: string
vpcId: string
MrsClusterV1 Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The MrsClusterV1 resource accepts the following input properties:
- Available
Zone stringId - ID or Name of an available zone. Obtain the value from Regions and Endpoints.
- Cluster
Name string - Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
- Component
Lists List<MrsCluster V1Component List> - Service component list. The object structure is documented below.
- Core
Node doubleNum - Number of Core nodes Value range: 3 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
- Core
Node stringSize - Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
- Master
Node doubleNum - Number of Master nodes The value is 2.
- Master
Node stringSize Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space.
- Master nodes support s1.4xlarge and s1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
- Core nodes of a streaming cluster support s1.xlarge, c2.2xlarge, s1.2xlarge, s1.4xlarge, s1.8xlarge, d1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
- Core nodes of an analysis cluster support all specifications c2.2xlarge, s1.xlarge, s1.4xlarge, s1.8xlarge, d1.xlarge, d1.2xlarge, d1.4xlarge, d1.8xlarge, , c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4, d2.xlarge.8, d2.2xlarge.8, d2.4xlarge.8, d2.8xlarge.8.
The following provides specification details.
node_size | CPU(core) | Memory(GB) | System Disk | Data Disk --- | --- | --- | --- | --- c2.2xlarge.linux.mrs | 8 | 16 | 40 | - cc3.xlarge.4.linux.mrs | 4 | 16 | 40 | - cc3.2xlarge.4.linux.mrs | 8 | 32 | 40 | - cc3.4xlarge.4.linux.mrs | 16 | 64 | 40 | - cc3.8xlarge.4.linux.mrs | 32 | 128 | 40 | - s1.xlarge.linux.mrs | 4 | 16 | 40 | - s1.4xlarge.linux.mrs | 16 | 64 | 40 | - s1.8xlarge.linux.mrs | 32 | 128 | 40 | - s3.xlarge.4.linux.mrs| 4 | 16 | 40 | - s3.2xlarge.4.linux.mrs| 8 | 32 | 40 | - s3.4xlarge.4.linux.mrs| 16 | 64 | 40 | - d1.xlarge.linux.mrs | 6 | 55 | 40 | 1.8 TB x 3 HDDs d1.2xlarge.linux.mrs | 12 | 110 | 40 | 1.8 TB x 6 HDDs d1.4xlarge.linux.mrs | 24 | 220 | 40 | 1.8 TB x 12 HDDs d1.8xlarge.linux.mrs | 48 | 440 | 40 | 1.8 TB x 24 HDDs d2.xlarge.linux.mrs | 4 | 32 | 40 | - d2.2xlarge.linux.mrs | 8 | 64 | 40 | - d2.4xlarge.linux.mrs | 16 | 128 | 40 | 1.8TB8HDDs d2.8xlarge.linux.mrs | 32 | 256 | 40 | 1.8TB16HDDs
- Node
Public stringCert Name - Name of a key pair You can use a key to log in to the Master node in the cluster.
- Safe
Mode double - MRS cluster running mode.
- 0: common mode The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.
- 1: safe mode The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management and job management functions of an MRS cluster or view cluster resource usage and the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator.
- Subnet
Id string - Specifies the ID of the VPC Subnet which bound to the MRS cluster. Changing this will create a new MRS cluster resource.
- Volume
Size double - Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB
- Volume
Type string - Type of disks SATA and SSD are supported. SATA: common I/O SSD: super high-speed I/O
- Vpc
Id string - ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
- Add
Jobs List<MrsCluster V1Add Job> - You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added. The object structure is documented below.
- Billing
Type double - Cluster
Admin stringSecret - Indicates the password of the MRS Manager
administrator.
- Must contain 8 to 32 characters.
- Must contain at least three of the following: Lowercase letters, Uppercase letters, Digits and Special characters: `~!@#$%^&*()-_=+|[{}];:'",<.>/? and space
- Cannot be the username or the username spelled backwards.
- Cluster
Type double - Type of clusters. 0: analysis cluster; 1: streaming cluster. The default value is 0.
- Cluster
Version string - Version of the clusters. Possible values are as follows: MRS 1.8.9, MRS 2.0.1, MRS 2.1.0 and MRS 3.1.0-LTS.1. The latest version of MRS is used by default.
- Log
Collection double - Indicates whether logs are collected when cluster installation fails. 0: not collected 1: collected The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
- Mrs
Cluster stringV1Id - The resource ID in UUID format.
- Region string
- Cluster region information. Obtain the value from Regions and Endpoints.
- Timeouts
Mrs
Cluster V1Timeouts
- Available
Zone stringId - ID or Name of an available zone. Obtain the value from Regions and Endpoints.
- Cluster
Name string - Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
- Component
Lists []MrsCluster V1Component List Args - Service component list. The object structure is documented below.
- Core
Node float64Num - Number of Core nodes Value range: 3 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
- Core
Node stringSize - Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
- Master
Node float64Num - Number of Master nodes The value is 2.
- Master
Node stringSize Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space.
- Master nodes support s1.4xlarge and s1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
- Core nodes of a streaming cluster support s1.xlarge, c2.2xlarge, s1.2xlarge, s1.4xlarge, s1.8xlarge, d1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
- Core nodes of an analysis cluster support all specifications c2.2xlarge, s1.xlarge, s1.4xlarge, s1.8xlarge, d1.xlarge, d1.2xlarge, d1.4xlarge, d1.8xlarge, , c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4, d2.xlarge.8, d2.2xlarge.8, d2.4xlarge.8, d2.8xlarge.8.
The following provides specification details.
node_size | CPU(core) | Memory(GB) | System Disk | Data Disk --- | --- | --- | --- | --- c2.2xlarge.linux.mrs | 8 | 16 | 40 | - cc3.xlarge.4.linux.mrs | 4 | 16 | 40 | - cc3.2xlarge.4.linux.mrs | 8 | 32 | 40 | - cc3.4xlarge.4.linux.mrs | 16 | 64 | 40 | - cc3.8xlarge.4.linux.mrs | 32 | 128 | 40 | - s1.xlarge.linux.mrs | 4 | 16 | 40 | - s1.4xlarge.linux.mrs | 16 | 64 | 40 | - s1.8xlarge.linux.mrs | 32 | 128 | 40 | - s3.xlarge.4.linux.mrs| 4 | 16 | 40 | - s3.2xlarge.4.linux.mrs| 8 | 32 | 40 | - s3.4xlarge.4.linux.mrs| 16 | 64 | 40 | - d1.xlarge.linux.mrs | 6 | 55 | 40 | 1.8 TB x 3 HDDs d1.2xlarge.linux.mrs | 12 | 110 | 40 | 1.8 TB x 6 HDDs d1.4xlarge.linux.mrs | 24 | 220 | 40 | 1.8 TB x 12 HDDs d1.8xlarge.linux.mrs | 48 | 440 | 40 | 1.8 TB x 24 HDDs d2.xlarge.linux.mrs | 4 | 32 | 40 | - d2.2xlarge.linux.mrs | 8 | 64 | 40 | - d2.4xlarge.linux.mrs | 16 | 128 | 40 | 1.8TB8HDDs d2.8xlarge.linux.mrs | 32 | 256 | 40 | 1.8TB16HDDs
- Node
Public stringCert Name - Name of a key pair You can use a key to log in to the Master node in the cluster.
- Safe
Mode float64 - MRS cluster running mode.
- 0: common mode The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.
- 1: safe mode The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management and job management functions of an MRS cluster or view cluster resource usage and the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator.
- Subnet
Id string - Specifies the ID of the VPC Subnet which bound to the MRS cluster. Changing this will create a new MRS cluster resource.
- Volume
Size float64 - Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB
- Volume
Type string - Type of disks SATA and SSD are supported. SATA: common I/O SSD: super high-speed I/O
- Vpc
Id string - ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
- Add
Jobs []MrsCluster V1Add Job Args - You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added. The object structure is documented below.
- Billing
Type float64 - Cluster
Admin stringSecret - Indicates the password of the MRS Manager
administrator.
- Must contain 8 to 32 characters.
- Must contain at least three of the following: Lowercase letters, Uppercase letters, Digits and Special characters: `~!@#$%^&*()-_=+|[{}];:'",<.>/? and space
- Cannot be the username or the username spelled backwards.
- Cluster
Type float64 - Type of clusters. 0: analysis cluster; 1: streaming cluster. The default value is 0.
- Cluster
Version string - Version of the clusters. Possible values are as follows: MRS 1.8.9, MRS 2.0.1, MRS 2.1.0 and MRS 3.1.0-LTS.1. The latest version of MRS is used by default.
- Log
Collection float64 - Indicates whether logs are collected when cluster installation fails. 0: not collected 1: collected The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
- Mrs
Cluster stringV1Id - The resource ID in UUID format.
- Region string
- Cluster region information. Obtain the value from Regions and Endpoints.
- Timeouts
Mrs
Cluster V1Timeouts Args
- available
Zone StringId - ID or Name of an available zone. Obtain the value from Regions and Endpoints.
- cluster
Name String - Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
- component
Lists List<MrsCluster V1Component List> - Service component list. The object structure is documented below.
- core
Node DoubleNum - Number of Core nodes Value range: 3 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
- core
Node StringSize - Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
- master
Node DoubleNum - Number of Master nodes The value is 2.
- master
Node StringSize Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space.
- Master nodes support s1.4xlarge and s1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
- Core nodes of a streaming cluster support s1.xlarge, c2.2xlarge, s1.2xlarge, s1.4xlarge, s1.8xlarge, d1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
- Core nodes of an analysis cluster support all specifications c2.2xlarge, s1.xlarge, s1.4xlarge, s1.8xlarge, d1.xlarge, d1.2xlarge, d1.4xlarge, d1.8xlarge, , c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4, d2.xlarge.8, d2.2xlarge.8, d2.4xlarge.8, d2.8xlarge.8.
The following provides specification details.
node_size | CPU(core) | Memory(GB) | System Disk | Data Disk --- | --- | --- | --- | --- c2.2xlarge.linux.mrs | 8 | 16 | 40 | - cc3.xlarge.4.linux.mrs | 4 | 16 | 40 | - cc3.2xlarge.4.linux.mrs | 8 | 32 | 40 | - cc3.4xlarge.4.linux.mrs | 16 | 64 | 40 | - cc3.8xlarge.4.linux.mrs | 32 | 128 | 40 | - s1.xlarge.linux.mrs | 4 | 16 | 40 | - s1.4xlarge.linux.mrs | 16 | 64 | 40 | - s1.8xlarge.linux.mrs | 32 | 128 | 40 | - s3.xlarge.4.linux.mrs| 4 | 16 | 40 | - s3.2xlarge.4.linux.mrs| 8 | 32 | 40 | - s3.4xlarge.4.linux.mrs| 16 | 64 | 40 | - d1.xlarge.linux.mrs | 6 | 55 | 40 | 1.8 TB x 3 HDDs d1.2xlarge.linux.mrs | 12 | 110 | 40 | 1.8 TB x 6 HDDs d1.4xlarge.linux.mrs | 24 | 220 | 40 | 1.8 TB x 12 HDDs d1.8xlarge.linux.mrs | 48 | 440 | 40 | 1.8 TB x 24 HDDs d2.xlarge.linux.mrs | 4 | 32 | 40 | - d2.2xlarge.linux.mrs | 8 | 64 | 40 | - d2.4xlarge.linux.mrs | 16 | 128 | 40 | 1.8TB8HDDs d2.8xlarge.linux.mrs | 32 | 256 | 40 | 1.8TB16HDDs
- node
Public StringCert Name - Name of a key pair You can use a key to log in to the Master node in the cluster.
- safe
Mode Double - MRS cluster running mode.
- 0: common mode The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.
- 1: safe mode The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management and job management functions of an MRS cluster or view cluster resource usage and the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator.
- subnet
Id String - Specifies the ID of the VPC Subnet which bound to the MRS cluster. Changing this will create a new MRS cluster resource.
- volume
Size Double - Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB
- volume
Type String - Type of disks SATA and SSD are supported. SATA: common I/O SSD: super high-speed I/O
- vpc
Id String - ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
- add
Jobs List<MrsCluster V1Add Job> - You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added. The object structure is documented below.
- billing
Type Double - cluster
Admin StringSecret - Indicates the password of the MRS Manager
administrator.
- Must contain 8 to 32 characters.
- Must contain at least three of the following: Lowercase letters, Uppercase letters, Digits and Special characters: `~!@#$%^&*()-_=+|[{}];:'",<.>/? and space
- Cannot be the username or the username spelled backwards.
- cluster
Type Double - Type of clusters. 0: analysis cluster; 1: streaming cluster. The default value is 0.
- cluster
Version String - Version of the clusters. Possible values are as follows: MRS 1.8.9, MRS 2.0.1, MRS 2.1.0 and MRS 3.1.0-LTS.1. The latest version of MRS is used by default.
- log
Collection Double - Indicates whether logs are collected when cluster installation fails. 0: not collected 1: collected The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
- mrs
Cluster StringV1Id - The resource ID in UUID format.
- region String
- Cluster region information. Obtain the value from Regions and Endpoints.
- timeouts
Mrs
Cluster V1Timeouts
- available
Zone stringId - ID or Name of an available zone. Obtain the value from Regions and Endpoints.
- cluster
Name string - Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
- component
Lists MrsCluster V1Component List[] - Service component list. The object structure is documented below.
- core
Node numberNum - Number of Core nodes Value range: 3 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
- core
Node stringSize - Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
- master
Node numberNum - Number of Master nodes The value is 2.
- master
Node stringSize Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space.
- Master nodes support s1.4xlarge and s1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
- Core nodes of a streaming cluster support s1.xlarge, c2.2xlarge, s1.2xlarge, s1.4xlarge, s1.8xlarge, d1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
- Core nodes of an analysis cluster support all specifications c2.2xlarge, s1.xlarge, s1.4xlarge, s1.8xlarge, d1.xlarge, d1.2xlarge, d1.4xlarge, d1.8xlarge, , c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4, d2.xlarge.8, d2.2xlarge.8, d2.4xlarge.8, d2.8xlarge.8.
The following provides specification details.
node_size | CPU(core) | Memory(GB) | System Disk | Data Disk --- | --- | --- | --- | --- c2.2xlarge.linux.mrs | 8 | 16 | 40 | - cc3.xlarge.4.linux.mrs | 4 | 16 | 40 | - cc3.2xlarge.4.linux.mrs | 8 | 32 | 40 | - cc3.4xlarge.4.linux.mrs | 16 | 64 | 40 | - cc3.8xlarge.4.linux.mrs | 32 | 128 | 40 | - s1.xlarge.linux.mrs | 4 | 16 | 40 | - s1.4xlarge.linux.mrs | 16 | 64 | 40 | - s1.8xlarge.linux.mrs | 32 | 128 | 40 | - s3.xlarge.4.linux.mrs| 4 | 16 | 40 | - s3.2xlarge.4.linux.mrs| 8 | 32 | 40 | - s3.4xlarge.4.linux.mrs| 16 | 64 | 40 | - d1.xlarge.linux.mrs | 6 | 55 | 40 | 1.8 TB x 3 HDDs d1.2xlarge.linux.mrs | 12 | 110 | 40 | 1.8 TB x 6 HDDs d1.4xlarge.linux.mrs | 24 | 220 | 40 | 1.8 TB x 12 HDDs d1.8xlarge.linux.mrs | 48 | 440 | 40 | 1.8 TB x 24 HDDs d2.xlarge.linux.mrs | 4 | 32 | 40 | - d2.2xlarge.linux.mrs | 8 | 64 | 40 | - d2.4xlarge.linux.mrs | 16 | 128 | 40 | 1.8TB8HDDs d2.8xlarge.linux.mrs | 32 | 256 | 40 | 1.8TB16HDDs
- node
Public stringCert Name - Name of a key pair You can use a key to log in to the Master node in the cluster.
- safe
Mode number - MRS cluster running mode.
- 0: common mode The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.
- 1: safe mode The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management and job management functions of an MRS cluster or view cluster resource usage and the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator.
- subnet
Id string - Specifies the ID of the VPC Subnet which bound to the MRS cluster. Changing this will create a new MRS cluster resource.
- volume
Size number - Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB
- volume
Type string - Type of disks SATA and SSD are supported. SATA: common I/O SSD: super high-speed I/O
- vpc
Id string - ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
- add
Jobs MrsCluster V1Add Job[] - You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added. The object structure is documented below.
- billing
Type number - cluster
Admin stringSecret - Indicates the password of the MRS Manager
administrator.
- Must contain 8 to 32 characters.
- Must contain at least three of the following: Lowercase letters, Uppercase letters, Digits and Special characters: `~!@#$%^&*()-_=+|[{}];:'",<.>/? and space
- Cannot be the username or the username spelled backwards.
- cluster
Type number - Type of clusters. 0: analysis cluster; 1: streaming cluster. The default value is 0.
- cluster
Version string - Version of the clusters. Possible values are as follows: MRS 1.8.9, MRS 2.0.1, MRS 2.1.0 and MRS 3.1.0-LTS.1. The latest version of MRS is used by default.
- log
Collection number - Indicates whether logs are collected when cluster installation fails. 0: not collected 1: collected The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
- mrs
Cluster stringV1Id - The resource ID in UUID format.
- region string
- Cluster region information. Obtain the value from Regions and Endpoints.
- timeouts
Mrs
Cluster V1Timeouts
- available_
zone_ strid - ID or Name of an available zone. Obtain the value from Regions and Endpoints.
- cluster_
name str - Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
- component_
lists Sequence[MrsCluster V1Component List Args] - Service component list. The object structure is documented below.
- core_
node_ floatnum - Number of Core nodes Value range: 3 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
- core_
node_ strsize - Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
- master_
node_ floatnum - Number of Master nodes The value is 2.
- master_
node_ strsize Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space.
- Master nodes support s1.4xlarge and s1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
- Core nodes of a streaming cluster support s1.xlarge, c2.2xlarge, s1.2xlarge, s1.4xlarge, s1.8xlarge, d1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
- Core nodes of an analysis cluster support all specifications c2.2xlarge, s1.xlarge, s1.4xlarge, s1.8xlarge, d1.xlarge, d1.2xlarge, d1.4xlarge, d1.8xlarge, , c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4, d2.xlarge.8, d2.2xlarge.8, d2.4xlarge.8, d2.8xlarge.8.
The following provides specification details.
node_size | CPU(core) | Memory(GB) | System Disk | Data Disk --- | --- | --- | --- | --- c2.2xlarge.linux.mrs | 8 | 16 | 40 | - cc3.xlarge.4.linux.mrs | 4 | 16 | 40 | - cc3.2xlarge.4.linux.mrs | 8 | 32 | 40 | - cc3.4xlarge.4.linux.mrs | 16 | 64 | 40 | - cc3.8xlarge.4.linux.mrs | 32 | 128 | 40 | - s1.xlarge.linux.mrs | 4 | 16 | 40 | - s1.4xlarge.linux.mrs | 16 | 64 | 40 | - s1.8xlarge.linux.mrs | 32 | 128 | 40 | - s3.xlarge.4.linux.mrs| 4 | 16 | 40 | - s3.2xlarge.4.linux.mrs| 8 | 32 | 40 | - s3.4xlarge.4.linux.mrs| 16 | 64 | 40 | - d1.xlarge.linux.mrs | 6 | 55 | 40 | 1.8 TB x 3 HDDs d1.2xlarge.linux.mrs | 12 | 110 | 40 | 1.8 TB x 6 HDDs d1.4xlarge.linux.mrs | 24 | 220 | 40 | 1.8 TB x 12 HDDs d1.8xlarge.linux.mrs | 48 | 440 | 40 | 1.8 TB x 24 HDDs d2.xlarge.linux.mrs | 4 | 32 | 40 | - d2.2xlarge.linux.mrs | 8 | 64 | 40 | - d2.4xlarge.linux.mrs | 16 | 128 | 40 | 1.8TB8HDDs d2.8xlarge.linux.mrs | 32 | 256 | 40 | 1.8TB16HDDs
- node_
public_ strcert_ name - Name of a key pair You can use a key to log in to the Master node in the cluster.
- safe_
mode float - MRS cluster running mode.
- 0: common mode The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.
- 1: safe mode The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management and job management functions of an MRS cluster or view cluster resource usage and the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator.
- subnet_
id str - Specifies the ID of the VPC Subnet which bound to the MRS cluster. Changing this will create a new MRS cluster resource.
- volume_
size float - Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB
- volume_
type str - Type of disks SATA and SSD are supported. SATA: common I/O SSD: super high-speed I/O
- vpc_
id str - ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
- add_
jobs Sequence[MrsCluster V1Add Job Args] - You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added. The object structure is documented below.
- billing_
type float - cluster_
admin_ strsecret - Indicates the password of the MRS Manager
administrator.
- Must contain 8 to 32 characters.
- Must contain at least three of the following: Lowercase letters, Uppercase letters, Digits and Special characters: `~!@#$%^&*()-_=+|[{}];:'",<.>/? and space
- Cannot be the username or the username spelled backwards.
- cluster_
type float - Type of clusters. 0: analysis cluster; 1: streaming cluster. The default value is 0.
- cluster_
version str - Version of the clusters. Possible values are as follows: MRS 1.8.9, MRS 2.0.1, MRS 2.1.0 and MRS 3.1.0-LTS.1. The latest version of MRS is used by default.
- log_
collection float - Indicates whether logs are collected when cluster installation fails. 0: not collected 1: collected The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
- mrs_
cluster_ strv1_ id - The resource ID in UUID format.
- region str
- Cluster region information. Obtain the value from Regions and Endpoints.
- timeouts
Mrs
Cluster V1Timeouts Args
- available
Zone StringId - ID or Name of an available zone. Obtain the value from Regions and Endpoints.
- cluster
Name String - Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
- component
Lists List<Property Map> - Service component list. The object structure is documented below.
- core
Node NumberNum - Number of Core nodes Value range: 3 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
- core
Node StringSize - Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
- master
Node NumberNum - Number of Master nodes The value is 2.
- master
Node StringSize Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space.
- Master nodes support s1.4xlarge and s1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
- Core nodes of a streaming cluster support s1.xlarge, c2.2xlarge, s1.2xlarge, s1.4xlarge, s1.8xlarge, d1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
- Core nodes of an analysis cluster support all specifications c2.2xlarge, s1.xlarge, s1.4xlarge, s1.8xlarge, d1.xlarge, d1.2xlarge, d1.4xlarge, d1.8xlarge, , c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4, d2.xlarge.8, d2.2xlarge.8, d2.4xlarge.8, d2.8xlarge.8.
The following provides specification details.
node_size | CPU(core) | Memory(GB) | System Disk | Data Disk --- | --- | --- | --- | --- c2.2xlarge.linux.mrs | 8 | 16 | 40 | - cc3.xlarge.4.linux.mrs | 4 | 16 | 40 | - cc3.2xlarge.4.linux.mrs | 8 | 32 | 40 | - cc3.4xlarge.4.linux.mrs | 16 | 64 | 40 | - cc3.8xlarge.4.linux.mrs | 32 | 128 | 40 | - s1.xlarge.linux.mrs | 4 | 16 | 40 | - s1.4xlarge.linux.mrs | 16 | 64 | 40 | - s1.8xlarge.linux.mrs | 32 | 128 | 40 | - s3.xlarge.4.linux.mrs| 4 | 16 | 40 | - s3.2xlarge.4.linux.mrs| 8 | 32 | 40 | - s3.4xlarge.4.linux.mrs| 16 | 64 | 40 | - d1.xlarge.linux.mrs | 6 | 55 | 40 | 1.8 TB x 3 HDDs d1.2xlarge.linux.mrs | 12 | 110 | 40 | 1.8 TB x 6 HDDs d1.4xlarge.linux.mrs | 24 | 220 | 40 | 1.8 TB x 12 HDDs d1.8xlarge.linux.mrs | 48 | 440 | 40 | 1.8 TB x 24 HDDs d2.xlarge.linux.mrs | 4 | 32 | 40 | - d2.2xlarge.linux.mrs | 8 | 64 | 40 | - d2.4xlarge.linux.mrs | 16 | 128 | 40 | 1.8TB8HDDs d2.8xlarge.linux.mrs | 32 | 256 | 40 | 1.8TB16HDDs
- node
Public StringCert Name - Name of a key pair You can use a key to log in to the Master node in the cluster.
- safe
Mode Number - MRS cluster running mode.
- 0: common mode The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.
- 1: safe mode The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management and job management functions of an MRS cluster or view cluster resource usage and the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator.
- subnet
Id String - Specifies the ID of the VPC Subnet which bound to the MRS cluster. Changing this will create a new MRS cluster resource.
- volume
Size Number - Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB
- volume
Type String - Type of disks SATA and SSD are supported. SATA: common I/O SSD: super high-speed I/O
- vpc
Id String - ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
- add
Jobs List<Property Map> - You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added. The object structure is documented below.
- billing
Type Number - cluster
Admin StringSecret - Indicates the password of the MRS Manager
administrator.
- Must contain 8 to 32 characters.
- Must contain at least three of the following: Lowercase letters, Uppercase letters, Digits and Special characters: `~!@#$%^&*()-_=+|[{}];:'",<.>/? and space
- Cannot be the username or the username spelled backwards.
- cluster
Type Number - Type of clusters. 0: analysis cluster; 1: streaming cluster. The default value is 0.
- cluster
Version String - Version of the clusters. Possible values are as follows: MRS 1.8.9, MRS 2.0.1, MRS 2.1.0 and MRS 3.1.0-LTS.1. The latest version of MRS is used by default.
- log
Collection Number - Indicates whether logs are collected when cluster installation fails. 0: not collected 1: collected The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
- mrs
Cluster StringV1Id - The resource ID in UUID format.
- region String
- Cluster region information. Obtain the value from Regions and Endpoints.
- timeouts Property Map
Outputs
All input properties are implicitly available as output properties. Additionally, the MrsClusterV1 resource produces the following output properties:
- Available
Zone stringName - Name of an availability zone.
- Charging
Start stringTime - Time when charging starts.
- Cluster
Id string - Cluster ID.
- Cluster
State string - Cluster status. Valid values include: starting, running, terminated, failed, abnormal, terminating, frozen, scaling-out and scaling-in.
- Core
Node stringProduct Id - Product ID of a Core node.
- Core
Node stringSpec Id - Specification ID of a Core node.
- Create
At string - Cluster creation time.
- Deployment
Id string - Deployment ID of a cluster.
- Duration string
- Cluster subscription duration.
- Error
Info string - Error information.
- External
Alternate stringIp - Backup external IP address.
- External
Ip string - External IP address.
- Fee string
- Cluster creation fee, which is automatically calculated.
- Hadoop
Version string - Hadoop version.
- Id string
- The provider-assigned unique ID for this managed resource.
- Instance
Id string - Instance ID.
- Internal
Ip string - Master
Node stringIp - IP address of a Master node.
externalIp
- Internal IP address.
- Master
Node stringProduct Id - Product ID of a Master node.
- Master
Node stringSpec Id - Specification ID of a Master node.
- Order
Id string - Order ID for creating clusters.
- Private
Ip stringFirst - Primary private IP address.
- Remark string
- Remarks of a cluster.
- Security
Groups stringId - Security group ID.
- Slave
Security stringGroups Id - Standby security group ID.
- Tenant
Id string - Project ID.
- Update
At string - Cluster update time.
- Vnc string
- URI address for remote login of the elastic cloud server.
- Available
Zone stringName - Name of an availability zone.
- Charging
Start stringTime - Time when charging starts.
- Cluster
Id string - Cluster ID.
- Cluster
State string - Cluster status. Valid values include: starting, running, terminated, failed, abnormal, terminating, frozen, scaling-out and scaling-in.
- Core
Node stringProduct Id - Product ID of a Core node.
- Core
Node stringSpec Id - Specification ID of a Core node.
- Create
At string - Cluster creation time.
- Deployment
Id string - Deployment ID of a cluster.
- Duration string
- Cluster subscription duration.
- Error
Info string - Error information.
- External
Alternate stringIp - Backup external IP address.
- External
Ip string - External IP address.
- Fee string
- Cluster creation fee, which is automatically calculated.
- Hadoop
Version string - Hadoop version.
- Id string
- The provider-assigned unique ID for this managed resource.
- Instance
Id string - Instance ID.
- Internal
Ip string - Master
Node stringIp - IP address of a Master node.
externalIp
- Internal IP address.
- Master
Node stringProduct Id - Product ID of a Master node.
- Master
Node stringSpec Id - Specification ID of a Master node.
- Order
Id string - Order ID for creating clusters.
- Private
Ip stringFirst - Primary private IP address.
- Remark string
- Remarks of a cluster.
- Security
Groups stringId - Security group ID.
- Slave
Security stringGroups Id - Standby security group ID.
- Tenant
Id string - Project ID.
- Update
At string - Cluster update time.
- Vnc string
- URI address for remote login of the elastic cloud server.
- available
Zone StringName - Name of an availability zone.
- charging
Start StringTime - Time when charging starts.
- cluster
Id String - Cluster ID.
- cluster
State String - Cluster status. Valid values include: starting, running, terminated, failed, abnormal, terminating, frozen, scaling-out and scaling-in.
- core
Node StringProduct Id - Product ID of a Core node.
- core
Node StringSpec Id - Specification ID of a Core node.
- create
At String - Cluster creation time.
- deployment
Id String - Deployment ID of a cluster.
- duration String
- Cluster subscription duration.
- error
Info String - Error information.
- external
Alternate StringIp - Backup external IP address.
- external
Ip String - External IP address.
- fee String
- Cluster creation fee, which is automatically calculated.
- hadoop
Version String - Hadoop version.
- id String
- The provider-assigned unique ID for this managed resource.
- instance
Id String - Instance ID.
- internal
Ip String - master
Node StringIp - IP address of a Master node.
externalIp
- Internal IP address.
- master
Node StringProduct Id - Product ID of a Master node.
- master
Node StringSpec Id - Specification ID of a Master node.
- order
Id String - Order ID for creating clusters.
- private
Ip StringFirst - Primary private IP address.
- remark String
- Remarks of a cluster.
- security
Groups StringId - Security group ID.
- slave
Security StringGroups Id - Standby security group ID.
- tenant
Id String - Project ID.
- update
At String - Cluster update time.
- vnc String
- URI address for remote login of the elastic cloud server.
- available
Zone stringName - Name of an availability zone.
- charging
Start stringTime - Time when charging starts.
- cluster
Id string - Cluster ID.
- cluster
State string - Cluster status. Valid values include: starting, running, terminated, failed, abnormal, terminating, frozen, scaling-out and scaling-in.
- core
Node stringProduct Id - Product ID of a Core node.
- core
Node stringSpec Id - Specification ID of a Core node.
- create
At string - Cluster creation time.
- deployment
Id string - Deployment ID of a cluster.
- duration string
- Cluster subscription duration.
- error
Info string - Error information.
- external
Alternate stringIp - Backup external IP address.
- external
Ip string - External IP address.
- fee string
- Cluster creation fee, which is automatically calculated.
- hadoop
Version string - Hadoop version.
- id string
- The provider-assigned unique ID for this managed resource.
- instance
Id string - Instance ID.
- internal
Ip string - master
Node stringIp - IP address of a Master node.
externalIp
- Internal IP address.
- master
Node stringProduct Id - Product ID of a Master node.
- master
Node stringSpec Id - Specification ID of a Master node.
- order
Id string - Order ID for creating clusters.
- private
Ip stringFirst - Primary private IP address.
- remark string
- Remarks of a cluster.
- security
Groups stringId - Security group ID.
- slave
Security stringGroups Id - Standby security group ID.
- tenant
Id string - Project ID.
- update
At string - Cluster update time.
- vnc string
- URI address for remote login of the elastic cloud server.
- available_
zone_ strname - Name of an availability zone.
- charging_
start_ strtime - Time when charging starts.
- cluster_
id str - Cluster ID.
- cluster_
state str - Cluster status. Valid values include: starting, running, terminated, failed, abnormal, terminating, frozen, scaling-out and scaling-in.
- core_
node_ strproduct_ id - Product ID of a Core node.
- core_
node_ strspec_ id - Specification ID of a Core node.
- create_
at str - Cluster creation time.
- deployment_
id str - Deployment ID of a cluster.
- duration str
- Cluster subscription duration.
- error_
info str - Error information.
- external_
alternate_ strip - Backup external IP address.
- external_
ip str - External IP address.
- fee str
- Cluster creation fee, which is automatically calculated.
- hadoop_
version str - Hadoop version.
- id str
- The provider-assigned unique ID for this managed resource.
- instance_
id str - Instance ID.
- internal_
ip str - master_
node_ strip - IP address of a Master node.
externalIp
- Internal IP address.
- master_
node_ strproduct_ id - Product ID of a Master node.
- master_
node_ strspec_ id - Specification ID of a Master node.
- order_
id str - Order ID for creating clusters.
- private_
ip_ strfirst - Primary private IP address.
- remark str
- Remarks of a cluster.
- security_
groups_ strid - Security group ID.
- slave_
security_ strgroups_ id - Standby security group ID.
- tenant_
id str - Project ID.
- update_
at str - Cluster update time.
- vnc str
- URI address for remote login of the elastic cloud server.
- available
Zone StringName - Name of an availability zone.
- charging
Start StringTime - Time when charging starts.
- cluster
Id String - Cluster ID.
- cluster
State String - Cluster status. Valid values include: starting, running, terminated, failed, abnormal, terminating, frozen, scaling-out and scaling-in.
- core
Node StringProduct Id - Product ID of a Core node.
- core
Node StringSpec Id - Specification ID of a Core node.
- create
At String - Cluster creation time.
- deployment
Id String - Deployment ID of a cluster.
- duration String
- Cluster subscription duration.
- error
Info String - Error information.
- external
Alternate StringIp - Backup external IP address.
- external
Ip String - External IP address.
- fee String
- Cluster creation fee, which is automatically calculated.
- hadoop
Version String - Hadoop version.
- id String
- The provider-assigned unique ID for this managed resource.
- instance
Id String - Instance ID.
- internal
Ip String - master
Node StringIp - IP address of a Master node.
externalIp
- Internal IP address.
- master
Node StringProduct Id - Product ID of a Master node.
- master
Node StringSpec Id - Specification ID of a Master node.
- order
Id String - Order ID for creating clusters.
- private
Ip StringFirst - Primary private IP address.
- remark String
- Remarks of a cluster.
- security
Groups StringId - Security group ID.
- slave
Security StringGroups Id - Standby security group ID.
- tenant
Id String - Project ID.
- update
At String - Cluster update time.
- vnc String
- URI address for remote login of the elastic cloud server.
Look up Existing MrsClusterV1 Resource
Get an existing MrsClusterV1 resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: MrsClusterV1State, opts?: CustomResourceOptions): MrsClusterV1
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
add_jobs: Optional[Sequence[MrsClusterV1AddJobArgs]] = None,
available_zone_id: Optional[str] = None,
available_zone_name: Optional[str] = None,
billing_type: Optional[float] = None,
charging_start_time: Optional[str] = None,
cluster_admin_secret: Optional[str] = None,
cluster_id: Optional[str] = None,
cluster_name: Optional[str] = None,
cluster_state: Optional[str] = None,
cluster_type: Optional[float] = None,
cluster_version: Optional[str] = None,
component_lists: Optional[Sequence[MrsClusterV1ComponentListArgs]] = None,
core_node_num: Optional[float] = None,
core_node_product_id: Optional[str] = None,
core_node_size: Optional[str] = None,
core_node_spec_id: Optional[str] = None,
create_at: Optional[str] = None,
deployment_id: Optional[str] = None,
duration: Optional[str] = None,
error_info: Optional[str] = None,
external_alternate_ip: Optional[str] = None,
external_ip: Optional[str] = None,
fee: Optional[str] = None,
hadoop_version: Optional[str] = None,
instance_id: Optional[str] = None,
internal_ip: Optional[str] = None,
log_collection: Optional[float] = None,
master_node_ip: Optional[str] = None,
master_node_num: Optional[float] = None,
master_node_product_id: Optional[str] = None,
master_node_size: Optional[str] = None,
master_node_spec_id: Optional[str] = None,
mrs_cluster_v1_id: Optional[str] = None,
node_public_cert_name: Optional[str] = None,
order_id: Optional[str] = None,
private_ip_first: Optional[str] = None,
region: Optional[str] = None,
remark: Optional[str] = None,
safe_mode: Optional[float] = None,
security_groups_id: Optional[str] = None,
slave_security_groups_id: Optional[str] = None,
subnet_id: Optional[str] = None,
tenant_id: Optional[str] = None,
timeouts: Optional[MrsClusterV1TimeoutsArgs] = None,
update_at: Optional[str] = None,
vnc: Optional[str] = None,
volume_size: Optional[float] = None,
volume_type: Optional[str] = None,
vpc_id: Optional[str] = None) -> MrsClusterV1
func GetMrsClusterV1(ctx *Context, name string, id IDInput, state *MrsClusterV1State, opts ...ResourceOption) (*MrsClusterV1, error)
public static MrsClusterV1 Get(string name, Input<string> id, MrsClusterV1State? state, CustomResourceOptions? opts = null)
public static MrsClusterV1 get(String name, Output<String> id, MrsClusterV1State state, CustomResourceOptions options)
resources: _: type: flexibleengine:MrsClusterV1 get: id: ${id}
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Add
Jobs List<MrsCluster V1Add Job> - You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added. The object structure is documented below.
- Available
Zone stringId - ID or Name of an available zone. Obtain the value from Regions and Endpoints.
- Available
Zone stringName - Name of an availability zone.
- Billing
Type double - Charging
Start stringTime - Time when charging starts.
- Cluster
Admin stringSecret - Indicates the password of the MRS Manager
administrator.
- Must contain 8 to 32 characters.
- Must contain at least three of the following: Lowercase letters, Uppercase letters, Digits and Special characters: `~!@#$%^&*()-_=+|[{}];:'",<.>/? and space
- Cannot be the username or the username spelled backwards.
- Cluster
Id string - Cluster ID.
- Cluster
Name string - Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
- Cluster
State string - Cluster status. Valid values include: starting, running, terminated, failed, abnormal, terminating, frozen, scaling-out and scaling-in.
- Cluster
Type double - Type of clusters. 0: analysis cluster; 1: streaming cluster. The default value is 0.
- Cluster
Version string - Version of the clusters. Possible values are as follows: MRS 1.8.9, MRS 2.0.1, MRS 2.1.0 and MRS 3.1.0-LTS.1. The latest version of MRS is used by default.
- Component
Lists List<MrsCluster V1Component List> - Service component list. The object structure is documented below.
- Core
Node doubleNum - Number of Core nodes Value range: 3 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
- Core
Node stringProduct Id - Product ID of a Core node.
- Core
Node stringSize - Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
- Core
Node stringSpec Id - Specification ID of a Core node.
- Create
At string - Cluster creation time.
- Deployment
Id string - Deployment ID of a cluster.
- Duration string
- Cluster subscription duration.
- Error
Info string - Error information.
- External
Alternate stringIp - Backup external IP address.
- External
Ip string - External IP address.
- Fee string
- Cluster creation fee, which is automatically calculated.
- Hadoop
Version string - Hadoop version.
- Instance
Id string - Instance ID.
- Internal
Ip string - Log
Collection double - Indicates whether logs are collected when cluster installation fails. 0: not collected 1: collected The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
- Master
Node stringIp - IP address of a Master node.
externalIp
- Internal IP address.
- Master
Node doubleNum - Number of Master nodes The value is 2.
- Master
Node stringProduct Id - Product ID of a Master node.
- Master
Node stringSize Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space.
- Master nodes support s1.4xlarge and s1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
- Core nodes of a streaming cluster support s1.xlarge, c2.2xlarge, s1.2xlarge, s1.4xlarge, s1.8xlarge, d1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
- Core nodes of an analysis cluster support all specifications c2.2xlarge, s1.xlarge, s1.4xlarge, s1.8xlarge, d1.xlarge, d1.2xlarge, d1.4xlarge, d1.8xlarge, , c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4, d2.xlarge.8, d2.2xlarge.8, d2.4xlarge.8, d2.8xlarge.8.
The following provides specification details.
node_size | CPU(core) | Memory(GB) | System Disk | Data Disk --- | --- | --- | --- | --- c2.2xlarge.linux.mrs | 8 | 16 | 40 | - cc3.xlarge.4.linux.mrs | 4 | 16 | 40 | - cc3.2xlarge.4.linux.mrs | 8 | 32 | 40 | - cc3.4xlarge.4.linux.mrs | 16 | 64 | 40 | - cc3.8xlarge.4.linux.mrs | 32 | 128 | 40 | - s1.xlarge.linux.mrs | 4 | 16 | 40 | - s1.4xlarge.linux.mrs | 16 | 64 | 40 | - s1.8xlarge.linux.mrs | 32 | 128 | 40 | - s3.xlarge.4.linux.mrs| 4 | 16 | 40 | - s3.2xlarge.4.linux.mrs| 8 | 32 | 40 | - s3.4xlarge.4.linux.mrs| 16 | 64 | 40 | - d1.xlarge.linux.mrs | 6 | 55 | 40 | 1.8 TB x 3 HDDs d1.2xlarge.linux.mrs | 12 | 110 | 40 | 1.8 TB x 6 HDDs d1.4xlarge.linux.mrs | 24 | 220 | 40 | 1.8 TB x 12 HDDs d1.8xlarge.linux.mrs | 48 | 440 | 40 | 1.8 TB x 24 HDDs d2.xlarge.linux.mrs | 4 | 32 | 40 | - d2.2xlarge.linux.mrs | 8 | 64 | 40 | - d2.4xlarge.linux.mrs | 16 | 128 | 40 | 1.8TB8HDDs d2.8xlarge.linux.mrs | 32 | 256 | 40 | 1.8TB16HDDs
- Master
Node stringSpec Id - Specification ID of a Master node.
- Mrs
Cluster stringV1Id - The resource ID in UUID format.
- Node
Public stringCert Name - Name of a key pair You can use a key to log in to the Master node in the cluster.
- Order
Id string - Order ID for creating clusters.
- Private
Ip stringFirst - Primary private IP address.
- Region string
- Cluster region information. Obtain the value from Regions and Endpoints.
- Remark string
- Remarks of a cluster.
- Safe
Mode double - MRS cluster running mode.
- 0: common mode The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.
- 1: safe mode The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management and job management functions of an MRS cluster or view cluster resource usage and the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator.
- Security
Groups stringId - Security group ID.
- Slave
Security stringGroups Id - Standby security group ID.
- Subnet
Id string - Specifies the ID of the VPC Subnet which bound to the MRS cluster. Changing this will create a new MRS cluster resource.
- Tenant
Id string - Project ID.
- Timeouts
Mrs
Cluster V1Timeouts - Update
At string - Cluster update time.
- Vnc string
- URI address for remote login of the elastic cloud server.
- Volume
Size double - Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB
- Volume
Type string - Type of disks SATA and SSD are supported. SATA: common I/O SSD: super high-speed I/O
- Vpc
Id string - ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
- Add
Jobs []MrsCluster V1Add Job Args - You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added. The object structure is documented below.
- Available
Zone stringId - ID or Name of an available zone. Obtain the value from Regions and Endpoints.
- Available
Zone stringName - Name of an availability zone.
- Billing
Type float64 - Charging
Start stringTime - Time when charging starts.
- Cluster
Admin stringSecret - Indicates the password of the MRS Manager
administrator.
- Must contain 8 to 32 characters.
- Must contain at least three of the following: Lowercase letters, Uppercase letters, Digits and Special characters: `~!@#$%^&*()-_=+|[{}];:'",<.>/? and space
- Cannot be the username or the username spelled backwards.
- Cluster
Id string - Cluster ID.
- Cluster
Name string - Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
- Cluster
State string - Cluster status. Valid values include: starting, running, terminated, failed, abnormal, terminating, frozen, scaling-out and scaling-in.
- Cluster
Type float64 - Type of clusters. 0: analysis cluster; 1: streaming cluster. The default value is 0.
- Cluster
Version string - Version of the clusters. Possible values are as follows: MRS 1.8.9, MRS 2.0.1, MRS 2.1.0 and MRS 3.1.0-LTS.1. The latest version of MRS is used by default.
- Component
Lists []MrsCluster V1Component List Args - Service component list. The object structure is documented below.
- Core
Node float64Num - Number of Core nodes Value range: 3 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
- Core
Node stringProduct Id - Product ID of a Core node.
- Core
Node stringSize - Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
- Core
Node stringSpec Id - Specification ID of a Core node.
- Create
At string - Cluster creation time.
- Deployment
Id string - Deployment ID of a cluster.
- Duration string
- Cluster subscription duration.
- Error
Info string - Error information.
- External
Alternate stringIp - Backup external IP address.
- External
Ip string - External IP address.
- Fee string
- Cluster creation fee, which is automatically calculated.
- Hadoop
Version string - Hadoop version.
- Instance
Id string - Instance ID.
- Internal
Ip string - Log
Collection float64 - Indicates whether logs are collected when cluster installation fails. 0: not collected 1: collected The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
- Master
Node stringIp - IP address of a Master node.
externalIp
- Internal IP address.
- Master
Node float64Num - Number of Master nodes The value is 2.
- Master
Node stringProduct Id - Product ID of a Master node.
- Master
Node stringSize Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space.
- Master nodes support s1.4xlarge and s1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
- Core nodes of a streaming cluster support s1.xlarge, c2.2xlarge, s1.2xlarge, s1.4xlarge, s1.8xlarge, d1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
- Core nodes of an analysis cluster support all specifications c2.2xlarge, s1.xlarge, s1.4xlarge, s1.8xlarge, d1.xlarge, d1.2xlarge, d1.4xlarge, d1.8xlarge, , c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4, d2.xlarge.8, d2.2xlarge.8, d2.4xlarge.8, d2.8xlarge.8.
The following provides specification details.
node_size | CPU(core) | Memory(GB) | System Disk | Data Disk --- | --- | --- | --- | --- c2.2xlarge.linux.mrs | 8 | 16 | 40 | - cc3.xlarge.4.linux.mrs | 4 | 16 | 40 | - cc3.2xlarge.4.linux.mrs | 8 | 32 | 40 | - cc3.4xlarge.4.linux.mrs | 16 | 64 | 40 | - cc3.8xlarge.4.linux.mrs | 32 | 128 | 40 | - s1.xlarge.linux.mrs | 4 | 16 | 40 | - s1.4xlarge.linux.mrs | 16 | 64 | 40 | - s1.8xlarge.linux.mrs | 32 | 128 | 40 | - s3.xlarge.4.linux.mrs| 4 | 16 | 40 | - s3.2xlarge.4.linux.mrs| 8 | 32 | 40 | - s3.4xlarge.4.linux.mrs| 16 | 64 | 40 | - d1.xlarge.linux.mrs | 6 | 55 | 40 | 1.8 TB x 3 HDDs d1.2xlarge.linux.mrs | 12 | 110 | 40 | 1.8 TB x 6 HDDs d1.4xlarge.linux.mrs | 24 | 220 | 40 | 1.8 TB x 12 HDDs d1.8xlarge.linux.mrs | 48 | 440 | 40 | 1.8 TB x 24 HDDs d2.xlarge.linux.mrs | 4 | 32 | 40 | - d2.2xlarge.linux.mrs | 8 | 64 | 40 | - d2.4xlarge.linux.mrs | 16 | 128 | 40 | 1.8TB8HDDs d2.8xlarge.linux.mrs | 32 | 256 | 40 | 1.8TB16HDDs
- Master
Node stringSpec Id - Specification ID of a Master node.
- Mrs
Cluster stringV1Id - The resource ID in UUID format.
- Node
Public stringCert Name - Name of a key pair You can use a key to log in to the Master node in the cluster.
- Order
Id string - Order ID for creating clusters.
- Private
Ip stringFirst - Primary private IP address.
- Region string
- Cluster region information. Obtain the value from Regions and Endpoints.
- Remark string
- Remarks of a cluster.
- Safe
Mode float64 - MRS cluster running mode.
- 0: common mode The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.
- 1: safe mode The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management and job management functions of an MRS cluster or view cluster resource usage and the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator.
- Security
Groups stringId - Security group ID.
- Slave
Security stringGroups Id - Standby security group ID.
- Subnet
Id string - Specifies the ID of the VPC Subnet which bound to the MRS cluster. Changing this will create a new MRS cluster resource.
- Tenant
Id string - Project ID.
- Timeouts
Mrs
Cluster V1Timeouts Args - Update
At string - Cluster update time.
- Vnc string
- URI address for remote login of the elastic cloud server.
- Volume
Size float64 - Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB
- Volume
Type string - Type of disks SATA and SSD are supported. SATA: common I/O SSD: super high-speed I/O
- Vpc
Id string - ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
- add
Jobs List<MrsCluster V1Add Job> - You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added. The object structure is documented below.
- available
Zone StringId - ID or Name of an available zone. Obtain the value from Regions and Endpoints.
- available
Zone StringName - Name of an availability zone.
- billing
Type Double - charging
Start StringTime - Time when charging starts.
- cluster
Admin StringSecret - Indicates the password of the MRS Manager
administrator.
- Must contain 8 to 32 characters.
- Must contain at least three of the following: Lowercase letters, Uppercase letters, Digits and Special characters: `~!@#$%^&*()-_=+|[{}];:'",<.>/? and space
- Cannot be the username or the username spelled backwards.
- cluster
Id String - Cluster ID.
- cluster
Name String - Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
- cluster
State String - Cluster status. Valid values include: starting, running, terminated, failed, abnormal, terminating, frozen, scaling-out and scaling-in.
- cluster
Type Double - Type of clusters. 0: analysis cluster; 1: streaming cluster. The default value is 0.
- cluster
Version String - Version of the clusters. Possible values are as follows: MRS 1.8.9, MRS 2.0.1, MRS 2.1.0 and MRS 3.1.0-LTS.1. The latest version of MRS is used by default.
- component
Lists List<MrsCluster V1Component List> - Service component list. The object structure is documented below.
- core
Node DoubleNum - Number of Core nodes Value range: 3 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
- core
Node StringProduct Id - Product ID of a Core node.
- core
Node StringSize - Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
- core
Node StringSpec Id - Specification ID of a Core node.
- create
At String - Cluster creation time.
- deployment
Id String - Deployment ID of a cluster.
- duration String
- Cluster subscription duration.
- error
Info String - Error information.
- external
Alternate StringIp - Backup external IP address.
- external
Ip String - External IP address.
- fee String
- Cluster creation fee, which is automatically calculated.
- hadoop
Version String - Hadoop version.
- instance
Id String - Instance ID.
- internal
Ip String - log
Collection Double - Indicates whether logs are collected when cluster installation fails. 0: not collected 1: collected The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
- master
Node StringIp - IP address of a Master node.
externalIp
- Internal IP address.
- master
Node DoubleNum - Number of Master nodes The value is 2.
- master
Node StringProduct Id - Product ID of a Master node.
- master
Node StringSize Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space.
- Master nodes support s1.4xlarge and s1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
- Core nodes of a streaming cluster support s1.xlarge, c2.2xlarge, s1.2xlarge, s1.4xlarge, s1.8xlarge, d1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
- Core nodes of an analysis cluster support all specifications c2.2xlarge, s1.xlarge, s1.4xlarge, s1.8xlarge, d1.xlarge, d1.2xlarge, d1.4xlarge, d1.8xlarge, , c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4, d2.xlarge.8, d2.2xlarge.8, d2.4xlarge.8, d2.8xlarge.8.
The following provides specification details.
node_size | CPU(core) | Memory(GB) | System Disk | Data Disk --- | --- | --- | --- | --- c2.2xlarge.linux.mrs | 8 | 16 | 40 | - cc3.xlarge.4.linux.mrs | 4 | 16 | 40 | - cc3.2xlarge.4.linux.mrs | 8 | 32 | 40 | - cc3.4xlarge.4.linux.mrs | 16 | 64 | 40 | - cc3.8xlarge.4.linux.mrs | 32 | 128 | 40 | - s1.xlarge.linux.mrs | 4 | 16 | 40 | - s1.4xlarge.linux.mrs | 16 | 64 | 40 | - s1.8xlarge.linux.mrs | 32 | 128 | 40 | - s3.xlarge.4.linux.mrs| 4 | 16 | 40 | - s3.2xlarge.4.linux.mrs| 8 | 32 | 40 | - s3.4xlarge.4.linux.mrs| 16 | 64 | 40 | - d1.xlarge.linux.mrs | 6 | 55 | 40 | 1.8 TB x 3 HDDs d1.2xlarge.linux.mrs | 12 | 110 | 40 | 1.8 TB x 6 HDDs d1.4xlarge.linux.mrs | 24 | 220 | 40 | 1.8 TB x 12 HDDs d1.8xlarge.linux.mrs | 48 | 440 | 40 | 1.8 TB x 24 HDDs d2.xlarge.linux.mrs | 4 | 32 | 40 | - d2.2xlarge.linux.mrs | 8 | 64 | 40 | - d2.4xlarge.linux.mrs | 16 | 128 | 40 | 1.8TB8HDDs d2.8xlarge.linux.mrs | 32 | 256 | 40 | 1.8TB16HDDs
- master
Node StringSpec Id - Specification ID of a Master node.
- mrs
Cluster StringV1Id - The resource ID in UUID format.
- node
Public StringCert Name - Name of a key pair You can use a key to log in to the Master node in the cluster.
- order
Id String - Order ID for creating clusters.
- private
Ip StringFirst - Primary private IP address.
- region String
- Cluster region information. Obtain the value from Regions and Endpoints.
- remark String
- Remarks of a cluster.
- safe
Mode Double - MRS cluster running mode.
- 0: common mode The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.
- 1: safe mode The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management and job management functions of an MRS cluster or view cluster resource usage and the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator.
- security
Groups StringId - Security group ID.
- slave
Security StringGroups Id - Standby security group ID.
- subnet
Id String - Specifies the ID of the VPC Subnet which bound to the MRS cluster. Changing this will create a new MRS cluster resource.
- tenant
Id String - Project ID.
- timeouts
Mrs
Cluster V1Timeouts - update
At String - Cluster update time.
- vnc String
- URI address for remote login of the elastic cloud server.
- volume
Size Double - Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB
- volume
Type String - Type of disks SATA and SSD are supported. SATA: common I/O SSD: super high-speed I/O
- vpc
Id String - ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
- add
Jobs MrsCluster V1Add Job[] - You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added. The object structure is documented below.
- available
Zone stringId - ID or Name of an available zone. Obtain the value from Regions and Endpoints.
- available
Zone stringName - Name of an availability zone.
- billing
Type number - charging
Start stringTime - Time when charging starts.
- cluster
Admin stringSecret - Indicates the password of the MRS Manager
administrator.
- Must contain 8 to 32 characters.
- Must contain at least three of the following: Lowercase letters, Uppercase letters, Digits and Special characters: `~!@#$%^&*()-_=+|[{}];:'",<.>/? and space
- Cannot be the username or the username spelled backwards.
- cluster
Id string - Cluster ID.
- cluster
Name string - Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
- cluster
State string - Cluster status. Valid values include: starting, running, terminated, failed, abnormal, terminating, frozen, scaling-out and scaling-in.
- cluster
Type number - Type of clusters. 0: analysis cluster; 1: streaming cluster. The default value is 0.
- cluster
Version string - Version of the clusters. Possible values are as follows: MRS 1.8.9, MRS 2.0.1, MRS 2.1.0 and MRS 3.1.0-LTS.1. The latest version of MRS is used by default.
- component
Lists MrsCluster V1Component List[] - Service component list. The object structure is documented below.
- core
Node numberNum - Number of Core nodes Value range: 3 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
- core
Node stringProduct Id - Product ID of a Core node.
- core
Node stringSize - Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
- core
Node stringSpec Id - Specification ID of a Core node.
- create
At string - Cluster creation time.
- deployment
Id string - Deployment ID of a cluster.
- duration string
- Cluster subscription duration.
- error
Info string - Error information.
- external
Alternate stringIp - Backup external IP address.
- external
Ip string - External IP address.
- fee string
- Cluster creation fee, which is automatically calculated.
- hadoop
Version string - Hadoop version.
- instance
Id string - Instance ID.
- internal
Ip string - log
Collection number - Indicates whether logs are collected when cluster installation fails. 0: not collected 1: collected The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
- master
Node stringIp - IP address of a Master node.
externalIp
- Internal IP address.
- master
Node numberNum - Number of Master nodes The value is 2.
- master
Node stringProduct Id - Product ID of a Master node.
- master
Node stringSize Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space.
- Master nodes support s1.4xlarge and s1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
- Core nodes of a streaming cluster support s1.xlarge, c2.2xlarge, s1.2xlarge, s1.4xlarge, s1.8xlarge, d1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
- Core nodes of an analysis cluster support all specifications c2.2xlarge, s1.xlarge, s1.4xlarge, s1.8xlarge, d1.xlarge, d1.2xlarge, d1.4xlarge, d1.8xlarge, , c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4, d2.xlarge.8, d2.2xlarge.8, d2.4xlarge.8, d2.8xlarge.8.
The following provides specification details.
node_size | CPU(core) | Memory(GB) | System Disk | Data Disk --- | --- | --- | --- | --- c2.2xlarge.linux.mrs | 8 | 16 | 40 | - cc3.xlarge.4.linux.mrs | 4 | 16 | 40 | - cc3.2xlarge.4.linux.mrs | 8 | 32 | 40 | - cc3.4xlarge.4.linux.mrs | 16 | 64 | 40 | - cc3.8xlarge.4.linux.mrs | 32 | 128 | 40 | - s1.xlarge.linux.mrs | 4 | 16 | 40 | - s1.4xlarge.linux.mrs | 16 | 64 | 40 | - s1.8xlarge.linux.mrs | 32 | 128 | 40 | - s3.xlarge.4.linux.mrs| 4 | 16 | 40 | - s3.2xlarge.4.linux.mrs| 8 | 32 | 40 | - s3.4xlarge.4.linux.mrs| 16 | 64 | 40 | - d1.xlarge.linux.mrs | 6 | 55 | 40 | 1.8 TB x 3 HDDs d1.2xlarge.linux.mrs | 12 | 110 | 40 | 1.8 TB x 6 HDDs d1.4xlarge.linux.mrs | 24 | 220 | 40 | 1.8 TB x 12 HDDs d1.8xlarge.linux.mrs | 48 | 440 | 40 | 1.8 TB x 24 HDDs d2.xlarge.linux.mrs | 4 | 32 | 40 | - d2.2xlarge.linux.mrs | 8 | 64 | 40 | - d2.4xlarge.linux.mrs | 16 | 128 | 40 | 1.8TB8HDDs d2.8xlarge.linux.mrs | 32 | 256 | 40 | 1.8TB16HDDs
- master
Node stringSpec Id - Specification ID of a Master node.
- mrs
Cluster stringV1Id - The resource ID in UUID format.
- node
Public stringCert Name - Name of a key pair You can use a key to log in to the Master node in the cluster.
- order
Id string - Order ID for creating clusters.
- private
Ip stringFirst - Primary private IP address.
- region string
- Cluster region information. Obtain the value from Regions and Endpoints.
- remark string
- Remarks of a cluster.
- safe
Mode number - MRS cluster running mode.
- 0: common mode The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.
- 1: safe mode The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management and job management functions of an MRS cluster or view cluster resource usage and the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator.
- security
Groups stringId - Security group ID.
- slave
Security stringGroups Id - Standby security group ID.
- subnet
Id string - Specifies the ID of the VPC Subnet which bound to the MRS cluster. Changing this will create a new MRS cluster resource.
- tenant
Id string - Project ID.
- timeouts
Mrs
Cluster V1Timeouts - update
At string - Cluster update time.
- vnc string
- URI address for remote login of the elastic cloud server.
- volume
Size number - Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB
- volume
Type string - Type of disks SATA and SSD are supported. SATA: common I/O SSD: super high-speed I/O
- vpc
Id string - ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
- add_
jobs Sequence[MrsCluster V1Add Job Args] - You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added. The object structure is documented below.
- available_
zone_ strid - ID or Name of an available zone. Obtain the value from Regions and Endpoints.
- available_
zone_ strname - Name of an availability zone.
- billing_
type float - charging_
start_ strtime - Time when charging starts.
- cluster_
admin_ strsecret - Indicates the password of the MRS Manager
administrator.
- Must contain 8 to 32 characters.
- Must contain at least three of the following: Lowercase letters, Uppercase letters, Digits and Special characters: `~!@#$%^&*()-_=+|[{}];:'",<.>/? and space
- Cannot be the username or the username spelled backwards.
- cluster_
id str - Cluster ID.
- cluster_
name str - Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
- cluster_
state str - Cluster status. Valid values include: starting, running, terminated, failed, abnormal, terminating, frozen, scaling-out and scaling-in.
- cluster_
type float - Type of clusters. 0: analysis cluster; 1: streaming cluster. The default value is 0.
- cluster_
version str - Version of the clusters. Possible values are as follows: MRS 1.8.9, MRS 2.0.1, MRS 2.1.0 and MRS 3.1.0-LTS.1. The latest version of MRS is used by default.
- component_
lists Sequence[MrsCluster V1Component List Args] - Service component list. The object structure is documented below.
- core_
node_ floatnum - Number of Core nodes Value range: 3 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
- core_
node_ strproduct_ id - Product ID of a Core node.
- core_
node_ strsize - Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
- core_
node_ strspec_ id - Specification ID of a Core node.
- create_
at str - Cluster creation time.
- deployment_
id str - Deployment ID of a cluster.
- duration str
- Cluster subscription duration.
- error_
info str - Error information.
- external_
alternate_ strip - Backup external IP address.
- external_
ip str - External IP address.
- fee str
- Cluster creation fee, which is automatically calculated.
- hadoop_
version str - Hadoop version.
- instance_
id str - Instance ID.
- internal_
ip str - log_
collection float - Indicates whether logs are collected when cluster installation fails. 0: not collected 1: collected The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
- master_
node_ strip - IP address of a Master node.
externalIp
- Internal IP address.
- master_
node_ floatnum - Number of Master nodes The value is 2.
- master_
node_ strproduct_ id - Product ID of a Master node.
- master_
node_ strsize Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space.
- Master nodes support s1.4xlarge and s1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
- Core nodes of a streaming cluster support s1.xlarge, c2.2xlarge, s1.2xlarge, s1.4xlarge, s1.8xlarge, d1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
- Core nodes of an analysis cluster support all specifications c2.2xlarge, s1.xlarge, s1.4xlarge, s1.8xlarge, d1.xlarge, d1.2xlarge, d1.4xlarge, d1.8xlarge, , c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4, d2.xlarge.8, d2.2xlarge.8, d2.4xlarge.8, d2.8xlarge.8.
The following provides specification details.
node_size | CPU(core) | Memory(GB) | System Disk | Data Disk --- | --- | --- | --- | --- c2.2xlarge.linux.mrs | 8 | 16 | 40 | - cc3.xlarge.4.linux.mrs | 4 | 16 | 40 | - cc3.2xlarge.4.linux.mrs | 8 | 32 | 40 | - cc3.4xlarge.4.linux.mrs | 16 | 64 | 40 | - cc3.8xlarge.4.linux.mrs | 32 | 128 | 40 | - s1.xlarge.linux.mrs | 4 | 16 | 40 | - s1.4xlarge.linux.mrs | 16 | 64 | 40 | - s1.8xlarge.linux.mrs | 32 | 128 | 40 | - s3.xlarge.4.linux.mrs| 4 | 16 | 40 | - s3.2xlarge.4.linux.mrs| 8 | 32 | 40 | - s3.4xlarge.4.linux.mrs| 16 | 64 | 40 | - d1.xlarge.linux.mrs | 6 | 55 | 40 | 1.8 TB x 3 HDDs d1.2xlarge.linux.mrs | 12 | 110 | 40 | 1.8 TB x 6 HDDs d1.4xlarge.linux.mrs | 24 | 220 | 40 | 1.8 TB x 12 HDDs d1.8xlarge.linux.mrs | 48 | 440 | 40 | 1.8 TB x 24 HDDs d2.xlarge.linux.mrs | 4 | 32 | 40 | - d2.2xlarge.linux.mrs | 8 | 64 | 40 | - d2.4xlarge.linux.mrs | 16 | 128 | 40 | 1.8TB8HDDs d2.8xlarge.linux.mrs | 32 | 256 | 40 | 1.8TB16HDDs
- master_
node_ strspec_ id - Specification ID of a Master node.
- mrs_
cluster_ strv1_ id - The resource ID in UUID format.
- node_
public_ strcert_ name - Name of a key pair You can use a key to log in to the Master node in the cluster.
- order_
id str - Order ID for creating clusters.
- private_
ip_ strfirst - Primary private IP address.
- region str
- Cluster region information. Obtain the value from Regions and Endpoints.
- remark str
- Remarks of a cluster.
- safe_
mode float - MRS cluster running mode.
- 0: common mode The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.
- 1: safe mode The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management and job management functions of an MRS cluster or view cluster resource usage and the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator.
- security_
groups_ strid - Security group ID.
- slave_
security_ strgroups_ id - Standby security group ID.
- subnet_
id str - Specifies the ID of the VPC Subnet which bound to the MRS cluster. Changing this will create a new MRS cluster resource.
- tenant_
id str - Project ID.
- timeouts
Mrs
Cluster V1Timeouts Args - update_
at str - Cluster update time.
- vnc str
- URI address for remote login of the elastic cloud server.
- volume_
size float - Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB
- volume_
type str - Type of disks SATA and SSD are supported. SATA: common I/O SSD: super high-speed I/O
- vpc_
id str - ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
- add
Jobs List<Property Map> - You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added. The object structure is documented below.
- available
Zone StringId - ID or Name of an available zone. Obtain the value from Regions and Endpoints.
- available
Zone StringName - Name of an availability zone.
- billing
Type Number - charging
Start StringTime - Time when charging starts.
- cluster
Admin StringSecret - Indicates the password of the MRS Manager
administrator.
- Must contain 8 to 32 characters.
- Must contain at least three of the following: Lowercase letters, Uppercase letters, Digits and Special characters: `~!@#$%^&*()-_=+|[{}];:'",<.>/? and space
- Cannot be the username or the username spelled backwards.
- cluster
Id String - Cluster ID.
- cluster
Name String - Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
- cluster
State String - Cluster status. Valid values include: starting, running, terminated, failed, abnormal, terminating, frozen, scaling-out and scaling-in.
- cluster
Type Number - Type of clusters. 0: analysis cluster; 1: streaming cluster. The default value is 0.
- cluster
Version String - Version of the clusters. Possible values are as follows: MRS 1.8.9, MRS 2.0.1, MRS 2.1.0 and MRS 3.1.0-LTS.1. The latest version of MRS is used by default.
- component
Lists List<Property Map> - Service component list. The object structure is documented below.
- core
Node NumberNum - Number of Core nodes Value range: 3 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
- core
Node StringProduct Id - Product ID of a Core node.
- core
Node StringSize - Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
- core
Node StringSpec Id - Specification ID of a Core node.
- create
At String - Cluster creation time.
- deployment
Id String - Deployment ID of a cluster.
- duration String
- Cluster subscription duration.
- error
Info String - Error information.
- external
Alternate StringIp - Backup external IP address.
- external
Ip String - External IP address.
- fee String
- Cluster creation fee, which is automatically calculated.
- hadoop
Version String - Hadoop version.
- instance
Id String - Instance ID.
- internal
Ip String - log
Collection Number - Indicates whether logs are collected when cluster installation fails. 0: not collected 1: collected The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
- master
Node StringIp - IP address of a Master node.
externalIp
- Internal IP address.
- master
Node NumberNum - Number of Master nodes The value is 2.
- master
Node StringProduct Id - Product ID of a Master node.
- master
Node StringSize Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space.
- Master nodes support s1.4xlarge and s1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
- Core nodes of a streaming cluster support s1.xlarge, c2.2xlarge, s1.2xlarge, s1.4xlarge, s1.8xlarge, d1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
- Core nodes of an analysis cluster support all specifications c2.2xlarge, s1.xlarge, s1.4xlarge, s1.8xlarge, d1.xlarge, d1.2xlarge, d1.4xlarge, d1.8xlarge, , c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4, d2.xlarge.8, d2.2xlarge.8, d2.4xlarge.8, d2.8xlarge.8.
The following provides specification details.
node_size | CPU(core) | Memory(GB) | System Disk | Data Disk --- | --- | --- | --- | --- c2.2xlarge.linux.mrs | 8 | 16 | 40 | - cc3.xlarge.4.linux.mrs | 4 | 16 | 40 | - cc3.2xlarge.4.linux.mrs | 8 | 32 | 40 | - cc3.4xlarge.4.linux.mrs | 16 | 64 | 40 | - cc3.8xlarge.4.linux.mrs | 32 | 128 | 40 | - s1.xlarge.linux.mrs | 4 | 16 | 40 | - s1.4xlarge.linux.mrs | 16 | 64 | 40 | - s1.8xlarge.linux.mrs | 32 | 128 | 40 | - s3.xlarge.4.linux.mrs| 4 | 16 | 40 | - s3.2xlarge.4.linux.mrs| 8 | 32 | 40 | - s3.4xlarge.4.linux.mrs| 16 | 64 | 40 | - d1.xlarge.linux.mrs | 6 | 55 | 40 | 1.8 TB x 3 HDDs d1.2xlarge.linux.mrs | 12 | 110 | 40 | 1.8 TB x 6 HDDs d1.4xlarge.linux.mrs | 24 | 220 | 40 | 1.8 TB x 12 HDDs d1.8xlarge.linux.mrs | 48 | 440 | 40 | 1.8 TB x 24 HDDs d2.xlarge.linux.mrs | 4 | 32 | 40 | - d2.2xlarge.linux.mrs | 8 | 64 | 40 | - d2.4xlarge.linux.mrs | 16 | 128 | 40 | 1.8TB8HDDs d2.8xlarge.linux.mrs | 32 | 256 | 40 | 1.8TB16HDDs
- master
Node StringSpec Id - Specification ID of a Master node.
- mrs
Cluster StringV1Id - The resource ID in UUID format.
- node
Public StringCert Name - Name of a key pair You can use a key to log in to the Master node in the cluster.
- order
Id String - Order ID for creating clusters.
- private
Ip StringFirst - Primary private IP address.
- region String
- Cluster region information. Obtain the value from Regions and Endpoints.
- remark String
- Remarks of a cluster.
- safe
Mode Number - MRS cluster running mode.
- 0: common mode The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.
- 1: safe mode The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management and job management functions of an MRS cluster or view cluster resource usage and the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator.
- security
Groups StringId - Security group ID.
- slave
Security StringGroups Id - Standby security group ID.
- subnet
Id String - Specifies the ID of the VPC Subnet which bound to the MRS cluster. Changing this will create a new MRS cluster resource.
- tenant
Id String - Project ID.
- timeouts Property Map
- update
At String - Cluster update time.
- vnc String
- URI address for remote login of the elastic cloud server.
- volume
Size Number - Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB
- volume
Type String - Type of disks SATA and SSD are supported. SATA: common I/O SSD: super high-speed I/O
- vpc
Id String - ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
Supporting Types
MrsClusterV1AddJob, MrsClusterV1AddJobArgs
- Jar
Path string - Path of the .jar file or .sql file for program execution The parameter must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Spark Script must end with .sql; while MapReduce and Spark Jar must end with .jar. sql and jar are case-insensitive.
- Job
Name string - Job name It contains only 1 to 64 letters, digits, hyphens (-), and underscores (_). NOTE: Identical job names are allowed but not recommended.
- Job
Type double - Job type. 1: MapReduce 2: Spark 3: Hive Script 4: HiveQL (not supported currently) 5: DistCp, importing and exporting data (not supported in this API currently). 6: Spark Script 7: Spark SQL, submitting Spark SQL statements (not supported in this API currently). NOTE: Spark and Hive jobs can be added to only clusters including Spark and Hive components.
- Submit
Job boolOnce Cluster Run - true: A job is submitted when a cluster is created. false: A job is submitted separately. The parameter is set to true in this example.
- Arguments string
- Key parameter for program execution The parameter is specified by the function of the user's program. MRS is only responsible for loading the parameter. The parameter contains a maximum of 2047 characters, excluding special characters such as ;|&>'<$, and can be empty.
- File
Action string - Data import and export import export
- Hive
Script stringPath - SQL program path This parameter is needed by Spark Script and Hive Script jobs only and must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Ends with .sql. sql is case-insensitive.
- Hql string
- HiveQL statement
- Input string
- Path for inputting data, which must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
- Job
Log string - Path for storing job logs that record job running status. This path must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
- Output string
- Path for outputting data, which must start with / or s3a://. A correct OBS path is required. If the path does not exist, the system automatically creates it. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
- Shutdown
Cluster bool - Whether to delete the cluster after the jobs are complete true: Yes false: No
- Jar
Path string - Path of the .jar file or .sql file for program execution The parameter must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Spark Script must end with .sql; while MapReduce and Spark Jar must end with .jar. sql and jar are case-insensitive.
- Job
Name string - Job name It contains only 1 to 64 letters, digits, hyphens (-), and underscores (_). NOTE: Identical job names are allowed but not recommended.
- Job
Type float64 - Job type. 1: MapReduce 2: Spark 3: Hive Script 4: HiveQL (not supported currently) 5: DistCp, importing and exporting data (not supported in this API currently). 6: Spark Script 7: Spark SQL, submitting Spark SQL statements (not supported in this API currently). NOTE: Spark and Hive jobs can be added to only clusters including Spark and Hive components.
- Submit
Job boolOnce Cluster Run - true: A job is submitted when a cluster is created. false: A job is submitted separately. The parameter is set to true in this example.
- Arguments string
- Key parameter for program execution The parameter is specified by the function of the user's program. MRS is only responsible for loading the parameter. The parameter contains a maximum of 2047 characters, excluding special characters such as ;|&>'<$, and can be empty.
- File
Action string - Data import and export import export
- Hive
Script stringPath - SQL program path This parameter is needed by Spark Script and Hive Script jobs only and must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Ends with .sql. sql is case-insensitive.
- Hql string
- HiveQL statement
- Input string
- Path for inputting data, which must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
- Job
Log string - Path for storing job logs that record job running status. This path must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
- Output string
- Path for outputting data, which must start with / or s3a://. A correct OBS path is required. If the path does not exist, the system automatically creates it. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
- Shutdown
Cluster bool - Whether to delete the cluster after the jobs are complete true: Yes false: No
- jar
Path String - Path of the .jar file or .sql file for program execution The parameter must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Spark Script must end with .sql; while MapReduce and Spark Jar must end with .jar. sql and jar are case-insensitive.
- job
Name String - Job name It contains only 1 to 64 letters, digits, hyphens (-), and underscores (_). NOTE: Identical job names are allowed but not recommended.
- job
Type Double - Job type. 1: MapReduce 2: Spark 3: Hive Script 4: HiveQL (not supported currently) 5: DistCp, importing and exporting data (not supported in this API currently). 6: Spark Script 7: Spark SQL, submitting Spark SQL statements (not supported in this API currently). NOTE: Spark and Hive jobs can be added to only clusters including Spark and Hive components.
- submit
Job BooleanOnce Cluster Run - true: A job is submitted when a cluster is created. false: A job is submitted separately. The parameter is set to true in this example.
- arguments String
- Key parameter for program execution The parameter is specified by the function of the user's program. MRS is only responsible for loading the parameter. The parameter contains a maximum of 2047 characters, excluding special characters such as ;|&>'<$, and can be empty.
- file
Action String - Data import and export import export
- hive
Script StringPath - SQL program path This parameter is needed by Spark Script and Hive Script jobs only and must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Ends with .sql. sql is case-insensitive.
- hql String
- HiveQL statement
- input String
- Path for inputting data, which must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
- job
Log String - Path for storing job logs that record job running status. This path must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
- output String
- Path for outputting data, which must start with / or s3a://. A correct OBS path is required. If the path does not exist, the system automatically creates it. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
- shutdown
Cluster Boolean - Whether to delete the cluster after the jobs are complete true: Yes false: No
- jar
Path string - Path of the .jar file or .sql file for program execution The parameter must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Spark Script must end with .sql; while MapReduce and Spark Jar must end with .jar. sql and jar are case-insensitive.
- job
Name string - Job name It contains only 1 to 64 letters, digits, hyphens (-), and underscores (_). NOTE: Identical job names are allowed but not recommended.
- job
Type number - Job type. 1: MapReduce 2: Spark 3: Hive Script 4: HiveQL (not supported currently) 5: DistCp, importing and exporting data (not supported in this API currently). 6: Spark Script 7: Spark SQL, submitting Spark SQL statements (not supported in this API currently). NOTE: Spark and Hive jobs can be added to only clusters including Spark and Hive components.
- submit
Job booleanOnce Cluster Run - true: A job is submitted when a cluster is created. false: A job is submitted separately. The parameter is set to true in this example.
- arguments string
- Key parameter for program execution The parameter is specified by the function of the user's program. MRS is only responsible for loading the parameter. The parameter contains a maximum of 2047 characters, excluding special characters such as ;|&>'<$, and can be empty.
- file
Action string - Data import and export import export
- hive
Script stringPath - SQL program path This parameter is needed by Spark Script and Hive Script jobs only and must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Ends with .sql. sql is case-insensitive.
- hql string
- HiveQL statement
- input string
- Path for inputting data, which must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
- job
Log string - Path for storing job logs that record job running status. This path must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
- output string
- Path for outputting data, which must start with / or s3a://. A correct OBS path is required. If the path does not exist, the system automatically creates it. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
- shutdown
Cluster boolean - Whether to delete the cluster after the jobs are complete true: Yes false: No
- jar_
path str - Path of the .jar file or .sql file for program execution The parameter must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Spark Script must end with .sql; while MapReduce and Spark Jar must end with .jar. sql and jar are case-insensitive.
- job_
name str - Job name It contains only 1 to 64 letters, digits, hyphens (-), and underscores (_). NOTE: Identical job names are allowed but not recommended.
- job_
type float - Job type. 1: MapReduce 2: Spark 3: Hive Script 4: HiveQL (not supported currently) 5: DistCp, importing and exporting data (not supported in this API currently). 6: Spark Script 7: Spark SQL, submitting Spark SQL statements (not supported in this API currently). NOTE: Spark and Hive jobs can be added to only clusters including Spark and Hive components.
- submit_
job_ boolonce_ cluster_ run - true: A job is submitted when a cluster is created. false: A job is submitted separately. The parameter is set to true in this example.
- arguments str
- Key parameter for program execution The parameter is specified by the function of the user's program. MRS is only responsible for loading the parameter. The parameter contains a maximum of 2047 characters, excluding special characters such as ;|&>'<$, and can be empty.
- file_
action str - Data import and export import export
- hive_
script_ strpath - SQL program path This parameter is needed by Spark Script and Hive Script jobs only and must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Ends with .sql. sql is case-insensitive.
- hql str
- HiveQL statement
- input str
- Path for inputting data, which must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
- job_
log str - Path for storing job logs that record job running status. This path must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
- output str
- Path for outputting data, which must start with / or s3a://. A correct OBS path is required. If the path does not exist, the system automatically creates it. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
- shutdown_
cluster bool - Whether to delete the cluster after the jobs are complete true: Yes false: No
- jar
Path String - Path of the .jar file or .sql file for program execution The parameter must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Spark Script must end with .sql; while MapReduce and Spark Jar must end with .jar. sql and jar are case-insensitive.
- job
Name String - Job name It contains only 1 to 64 letters, digits, hyphens (-), and underscores (_). NOTE: Identical job names are allowed but not recommended.
- job
Type Number - Job type. 1: MapReduce 2: Spark 3: Hive Script 4: HiveQL (not supported currently) 5: DistCp, importing and exporting data (not supported in this API currently). 6: Spark Script 7: Spark SQL, submitting Spark SQL statements (not supported in this API currently). NOTE: Spark and Hive jobs can be added to only clusters including Spark and Hive components.
- submit
Job BooleanOnce Cluster Run - true: A job is submitted when a cluster is created. false: A job is submitted separately. The parameter is set to true in this example.
- arguments String
- Key parameter for program execution The parameter is specified by the function of the user's program. MRS is only responsible for loading the parameter. The parameter contains a maximum of 2047 characters, excluding special characters such as ;|&>'<$, and can be empty.
- file
Action String - Data import and export import export
- hive
Script StringPath - SQL program path This parameter is needed by Spark Script and Hive Script jobs only and must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Ends with .sql. sql is case-insensitive.
- hql String
- HiveQL statement
- input String
- Path for inputting data, which must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
- job
Log String - Path for storing job logs that record job running status. This path must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
- output String
- Path for outputting data, which must start with / or s3a://. A correct OBS path is required. If the path does not exist, the system automatically creates it. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
- shutdown
Cluster Boolean - Whether to delete the cluster after the jobs are complete true: Yes false: No
MrsClusterV1ComponentList, MrsClusterV1ComponentListArgs
- Component
Name string - the Component name.
- MRS 3.1.0-LTS.1 supports the following components:
- The analysis cluster contains the following components: Hadoop, Spark2x, HBase, Hive, Hue, HetuEngine, Loader, Flink, Oozie, ZooKeeper, Ranger, and Tez.
- The streaming cluster contains the following components: Kafka, Flume, ZooKeeper, and Ranger.
- MRS 2.0.1 supports the following components:
- The analysis cluster contains the following components: Presto, Hadoop, Spark, HBase, Hive, Hue, Loader, and Tez
- The streaming cluster contains the following components: Kafka, Storm, and Flume.
- MRS 1.8.9 supports the following components:
- The analysis cluster contains the following components: Presto, Hadoop, Spark, HBase, Opentsdb, Hive, Hue, Loader, and Flink.
- The streaming cluster contains the following components: Kafka, KafkaManager, Storm, and Flume.
- Component
Desc string - Component description.
- Component
Id string - Component ID. For example, component_id of Hadoop is MRS 3.1.0-LTS.1_001, MRS 2.1.0_001, MRS 2.0.1_001, and MRS 1.8.9_001.
- Component
Version string - Component version.
- Component
Name string - the Component name.
- MRS 3.1.0-LTS.1 supports the following components:
- The analysis cluster contains the following components: Hadoop, Spark2x, HBase, Hive, Hue, HetuEngine, Loader, Flink, Oozie, ZooKeeper, Ranger, and Tez.
- The streaming cluster contains the following components: Kafka, Flume, ZooKeeper, and Ranger.
- MRS 2.0.1 supports the following components:
- The analysis cluster contains the following components: Presto, Hadoop, Spark, HBase, Hive, Hue, Loader, and Tez
- The streaming cluster contains the following components: Kafka, Storm, and Flume.
- MRS 1.8.9 supports the following components:
- The analysis cluster contains the following components: Presto, Hadoop, Spark, HBase, Opentsdb, Hive, Hue, Loader, and Flink.
- The streaming cluster contains the following components: Kafka, KafkaManager, Storm, and Flume.
- Component
Desc string - Component description.
- Component
Id string - Component ID. For example, component_id of Hadoop is MRS 3.1.0-LTS.1_001, MRS 2.1.0_001, MRS 2.0.1_001, and MRS 1.8.9_001.
- Component
Version string - Component version.
- component
Name String - the Component name.
- MRS 3.1.0-LTS.1 supports the following components:
- The analysis cluster contains the following components: Hadoop, Spark2x, HBase, Hive, Hue, HetuEngine, Loader, Flink, Oozie, ZooKeeper, Ranger, and Tez.
- The streaming cluster contains the following components: Kafka, Flume, ZooKeeper, and Ranger.
- MRS 2.0.1 supports the following components:
- The analysis cluster contains the following components: Presto, Hadoop, Spark, HBase, Hive, Hue, Loader, and Tez
- The streaming cluster contains the following components: Kafka, Storm, and Flume.
- MRS 1.8.9 supports the following components:
- The analysis cluster contains the following components: Presto, Hadoop, Spark, HBase, Opentsdb, Hive, Hue, Loader, and Flink.
- The streaming cluster contains the following components: Kafka, KafkaManager, Storm, and Flume.
- component
Desc String - Component description.
- component
Id String - Component ID. For example, component_id of Hadoop is MRS 3.1.0-LTS.1_001, MRS 2.1.0_001, MRS 2.0.1_001, and MRS 1.8.9_001.
- component
Version String - Component version.
- component
Name string - the Component name.
- MRS 3.1.0-LTS.1 supports the following components:
- The analysis cluster contains the following components: Hadoop, Spark2x, HBase, Hive, Hue, HetuEngine, Loader, Flink, Oozie, ZooKeeper, Ranger, and Tez.
- The streaming cluster contains the following components: Kafka, Flume, ZooKeeper, and Ranger.
- MRS 2.0.1 supports the following components:
- The analysis cluster contains the following components: Presto, Hadoop, Spark, HBase, Hive, Hue, Loader, and Tez
- The streaming cluster contains the following components: Kafka, Storm, and Flume.
- MRS 1.8.9 supports the following components:
- The analysis cluster contains the following components: Presto, Hadoop, Spark, HBase, Opentsdb, Hive, Hue, Loader, and Flink.
- The streaming cluster contains the following components: Kafka, KafkaManager, Storm, and Flume.
- component
Desc string - Component description.
- component
Id string - Component ID. For example, component_id of Hadoop is MRS 3.1.0-LTS.1_001, MRS 2.1.0_001, MRS 2.0.1_001, and MRS 1.8.9_001.
- component
Version string - Component version.
- component_
name str - the Component name.
- MRS 3.1.0-LTS.1 supports the following components:
- The analysis cluster contains the following components: Hadoop, Spark2x, HBase, Hive, Hue, HetuEngine, Loader, Flink, Oozie, ZooKeeper, Ranger, and Tez.
- The streaming cluster contains the following components: Kafka, Flume, ZooKeeper, and Ranger.
- MRS 2.0.1 supports the following components:
- The analysis cluster contains the following components: Presto, Hadoop, Spark, HBase, Hive, Hue, Loader, and Tez
- The streaming cluster contains the following components: Kafka, Storm, and Flume.
- MRS 1.8.9 supports the following components:
- The analysis cluster contains the following components: Presto, Hadoop, Spark, HBase, Opentsdb, Hive, Hue, Loader, and Flink.
- The streaming cluster contains the following components: Kafka, KafkaManager, Storm, and Flume.
- component_
desc str - Component description.
- component_
id str - Component ID. For example, component_id of Hadoop is MRS 3.1.0-LTS.1_001, MRS 2.1.0_001, MRS 2.0.1_001, and MRS 1.8.9_001.
- component_
version str - Component version.
- component
Name String - the Component name.
- MRS 3.1.0-LTS.1 supports the following components:
- The analysis cluster contains the following components: Hadoop, Spark2x, HBase, Hive, Hue, HetuEngine, Loader, Flink, Oozie, ZooKeeper, Ranger, and Tez.
- The streaming cluster contains the following components: Kafka, Flume, ZooKeeper, and Ranger.
- MRS 2.0.1 supports the following components:
- The analysis cluster contains the following components: Presto, Hadoop, Spark, HBase, Hive, Hue, Loader, and Tez
- The streaming cluster contains the following components: Kafka, Storm, and Flume.
- MRS 1.8.9 supports the following components:
- The analysis cluster contains the following components: Presto, Hadoop, Spark, HBase, Opentsdb, Hive, Hue, Loader, and Flink.
- The streaming cluster contains the following components: Kafka, KafkaManager, Storm, and Flume.
- component
Desc String - Component description.
- component
Id String - Component ID. For example, component_id of Hadoop is MRS 3.1.0-LTS.1_001, MRS 2.1.0_001, MRS 2.0.1_001, and MRS 1.8.9_001.
- component
Version String - Component version.
MrsClusterV1Timeouts, MrsClusterV1TimeoutsArgs
Package Details
- Repository
- flexibleengine flexibleenginecloud/terraform-provider-flexibleengine
- License
- Notes
- This Pulumi package is based on the
flexibleengine
Terraform Provider.