Yandex

Pulumi Official
Package maintained by Pulumi
v0.13.0 published on Tuesday, Feb 22, 2022 by Pulumi

MdbClickhouseCluster

Manages a ClickHouse cluster within the Yandex.Cloud. For more information, see the official documentation.

Example Usage

using Pulumi;
using Yandex = Pulumi.Yandex;

class MyStack : Stack
{
    public MyStack()
    {
        var fooVpcNetwork = new Yandex.VpcNetwork("fooVpcNetwork", new Yandex.VpcNetworkArgs
        {
        });
        var fooVpcSubnet = new Yandex.VpcSubnet("fooVpcSubnet", new Yandex.VpcSubnetArgs
        {
            NetworkId = fooVpcNetwork.Id,
            V4CidrBlocks = 
            {
                "10.5.0.0/24",
            },
            Zone = "ru-central1-a",
        });
        var fooMdbClickhouseCluster = new Yandex.MdbClickhouseCluster("fooMdbClickhouseCluster", new Yandex.MdbClickhouseClusterArgs
        {
            Clickhouse = new Yandex.Inputs.MdbClickhouseClusterClickhouseArgs
            {
                Config = new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigArgs
                {
                    BackgroundPoolSize = 16,
                    BackgroundSchedulePoolSize = 16,
                    Compression = 
                    {
                        
                        {
                            { "method", "LZ4" },
                            { "minPartSize", 1024 },
                            { "minPartSizeRatio", 0.5 },
                        },
                        
                        {
                            { "method", "ZSTD" },
                            { "minPartSize", 2048 },
                            { "minPartSizeRatio", 0.7 },
                        },
                    },
                    GeobaseUri = "",
                    GraphiteRollup = 
                    {
                        
                        {
                            { "name", "rollup1" },
                            { "pattern", 
                            {
                                
                                {
                                    { "function", "func1" },
                                    { "regexp", "abc" },
                                    { "retention", 
                                    {
                                        
                                        {
                                            { "age", 1000 },
                                            { "precision", 3 },
                                        },
                                    } },
                                },
                            } },
                        },
                        
                        {
                            { "name", "rollup2" },
                            { "pattern", 
                            {
                                
                                {
                                    { "function", "func2" },
                                    { "retention", 
                                    {
                                        
                                        {
                                            { "age", 2000 },
                                            { "precision", 5 },
                                        },
                                    } },
                                },
                            } },
                        },
                    },
                    Kafka = new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigKafkaArgs
                    {
                        SaslMechanism = "SASL_MECHANISM_GSSAPI",
                        SaslPassword = "pass1",
                        SaslUsername = "user1",
                        SecurityProtocol = "SECURITY_PROTOCOL_PLAINTEXT",
                    },
                    KafkaTopic = 
                    {
                        
                        {
                            { "name", "topic1" },
                            { "settings", 
                            {
                                { "saslMechanism", "SASL_MECHANISM_SCRAM_SHA_256" },
                                { "saslPassword", "pass2" },
                                { "saslUsername", "user2" },
                                { "securityProtocol", "SECURITY_PROTOCOL_SSL" },
                            } },
                        },
                        
                        {
                            { "name", "topic2" },
                            { "settings", 
                            {
                                { "saslMechanism", "SASL_MECHANISM_PLAIN" },
                                { "securityProtocol", "SECURITY_PROTOCOL_SASL_PLAINTEXT" },
                            } },
                        },
                    },
                    KeepAliveTimeout = 3000,
                    LogLevel = "TRACE",
                    MarkCacheSize = 5368709120,
                    MaxConcurrentQueries = 50,
                    MaxConnections = 100,
                    MaxPartitionSizeToDrop = 53687091200,
                    MaxTableSizeToDrop = 53687091200,
                    MergeTree = new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigMergeTreeArgs
                    {
                        MaxBytesToMergeAtMinSpaceInPool = 1048576,
                        MaxReplicatedMergesInQueue = 16,
                        NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge = 8,
                        PartsToDelayInsert = 150,
                        PartsToThrowInsert = 300,
                        ReplicatedDeduplicationWindow = 100,
                        ReplicatedDeduplicationWindowSeconds = 604800,
                    },
                    MetricLogEnabled = true,
                    MetricLogRetentionSize = 536870912,
                    MetricLogRetentionTime = 2592000,
                    PartLogRetentionSize = 536870912,
                    PartLogRetentionTime = 2592000,
                    QueryLogRetentionSize = 1073741824,
                    QueryLogRetentionTime = 2592000,
                    QueryThreadLogEnabled = true,
                    QueryThreadLogRetentionSize = 536870912,
                    QueryThreadLogRetentionTime = 2592000,
                    Rabbitmq = new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigRabbitmqArgs
                    {
                        Password = "rabbit_pass",
                        Username = "rabbit_user",
                    },
                    TextLogEnabled = true,
                    TextLogLevel = "TRACE",
                    TextLogRetentionSize = 536870912,
                    TextLogRetentionTime = 2592000,
                    Timezone = "UTC",
                    TraceLogEnabled = true,
                    TraceLogRetentionSize = 536870912,
                    TraceLogRetentionTime = 2592000,
                    UncompressedCacheSize = 8589934592,
                },
                Resources = new Yandex.Inputs.MdbClickhouseClusterClickhouseResourcesArgs
                {
                    DiskSize = 32,
                    DiskTypeId = "network-ssd",
                    ResourcePresetId = "s2.micro",
                },
            },
            CloudStorage = new Yandex.Inputs.MdbClickhouseClusterCloudStorageArgs
            {
                Enabled = false,
            },
            Databases = 
            {
                new Yandex.Inputs.MdbClickhouseClusterDatabaseArgs
                {
                    Name = "db_name",
                },
            },
            Environment = "PRESTABLE",
            FormatSchemas = 
            {
                new Yandex.Inputs.MdbClickhouseClusterFormatSchemaArgs
                {
                    Name = "test_schema",
                    Type = "FORMAT_SCHEMA_TYPE_CAPNPROTO",
                    Uri = "https://storage.yandexcloud.net/ch-data/schema.proto",
                },
            },
            Hosts = 
            {
                new Yandex.Inputs.MdbClickhouseClusterHostArgs
                {
                    SubnetId = fooVpcSubnet.Id,
                    Type = "CLICKHOUSE",
                    Zone = "ru-central1-a",
                },
            },
            MaintenanceWindow = new Yandex.Inputs.MdbClickhouseClusterMaintenanceWindowArgs
            {
                Type = "ANYTIME",
            },
            MlModels = 
            {
                new Yandex.Inputs.MdbClickhouseClusterMlModelArgs
                {
                    Name = "test_model",
                    Type = "ML_MODEL_TYPE_CATBOOST",
                    Uri = "https://storage.yandexcloud.net/ch-data/train.csv",
                },
            },
            NetworkId = fooVpcNetwork.Id,
            ServiceAccountId = "your_service_account_id",
            Users = 
            {
                new Yandex.Inputs.MdbClickhouseClusterUserArgs
                {
                    Name = "user",
                    Password = "your_password",
                    Permissions = 
                    {
                        new Yandex.Inputs.MdbClickhouseClusterUserPermissionArgs
                        {
                            DatabaseName = "db_name",
                        },
                    },
                    Quotas = 
                    {
                        new Yandex.Inputs.MdbClickhouseClusterUserQuotaArgs
                        {
                            Errors = 1000,
                            IntervalDuration = 3600000,
                            Queries = 10000,
                        },
                        new Yandex.Inputs.MdbClickhouseClusterUserQuotaArgs
                        {
                            Error = 5000,
                            IntervalDuration = 79800000,
                            Queries = 50000,
                        },
                    },
                    Settings = new Yandex.Inputs.MdbClickhouseClusterUserSettingsArgs
                    {
                        MaxMemoryUsageForUser = 1000000000,
                        OutputFormatJsonQuote64bitIntegers = true,
                        ReadOverflowMode = "throw",
                    },
                },
            },
        });
    }

}
package main

import (
	"github.com/pulumi/pulumi-yandex/sdk/go/yandex"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		fooVpcNetwork, err := yandex.NewVpcNetwork(ctx, "fooVpcNetwork", nil)
		if err != nil {
			return err
		}
		fooVpcSubnet, err := yandex.NewVpcSubnet(ctx, "fooVpcSubnet", &yandex.VpcSubnetArgs{
			NetworkId: fooVpcNetwork.ID(),
			V4CidrBlocks: pulumi.StringArray{
				pulumi.String("10.5.0.0/24"),
			},
			Zone: pulumi.String("ru-central1-a"),
		})
		if err != nil {
			return err
		}
		_, err = yandex.NewMdbClickhouseCluster(ctx, "fooMdbClickhouseCluster", &yandex.MdbClickhouseClusterArgs{
			Clickhouse: &MdbClickhouseClusterClickhouseArgs{
				Config: &MdbClickhouseClusterClickhouseConfigArgs{
					BackgroundPoolSize:         pulumi.Int(16),
					BackgroundSchedulePoolSize: pulumi.Int(16),
					Compression: []interface{}{
						map[string]interface{}{
							"method":           "LZ4",
							"minPartSize":      1024,
							"minPartSizeRatio": 0.5,
						},
						map[string]interface{}{
							"method":           "ZSTD",
							"minPartSize":      2048,
							"minPartSizeRatio": 0.7,
						},
					},
					GeobaseUri: pulumi.String(""),
					GraphiteRollup: []interface{}{
						map[string]interface{}{
							"name": "rollup1",
							"pattern": []map[string]interface{}{
								map[string]interface{}{
									"function": "func1",
									"regexp":   "abc",
									"retention": []map[string]interface{}{
										map[string]interface{}{
											"age":       1000,
											"precision": 3,
										},
									},
								},
							},
						},
						map[string]interface{}{
							"name": "rollup2",
							"pattern": []map[string]interface{}{
								map[string]interface{}{
									"function": "func2",
									"retention": []map[string]interface{}{
										map[string]interface{}{
											"age":       2000,
											"precision": 5,
										},
									},
								},
							},
						},
					},
					Kafka: &MdbClickhouseClusterClickhouseConfigKafkaArgs{
						SaslMechanism:    pulumi.String("SASL_MECHANISM_GSSAPI"),
						SaslPassword:     pulumi.String("pass1"),
						SaslUsername:     pulumi.String("user1"),
						SecurityProtocol: pulumi.String("SECURITY_PROTOCOL_PLAINTEXT"),
					},
					KafkaTopic: []interface{}{
						map[string]interface{}{
							"name": "topic1",
							"settings": map[string]interface{}{
								"saslMechanism":    "SASL_MECHANISM_SCRAM_SHA_256",
								"saslPassword":     "pass2",
								"saslUsername":     "user2",
								"securityProtocol": "SECURITY_PROTOCOL_SSL",
							},
						},
						map[string]interface{}{
							"name": "topic2",
							"settings": map[string]interface{}{
								"saslMechanism":    "SASL_MECHANISM_PLAIN",
								"securityProtocol": "SECURITY_PROTOCOL_SASL_PLAINTEXT",
							},
						},
					},
					KeepAliveTimeout:       pulumi.Int(3000),
					LogLevel:               pulumi.String("TRACE"),
					MarkCacheSize:          pulumi.Int(5368709120),
					MaxConcurrentQueries:   pulumi.Int(50),
					MaxConnections:         pulumi.Int(100),
					MaxPartitionSizeToDrop: pulumi.Int(53687091200),
					MaxTableSizeToDrop:     pulumi.Int(53687091200),
					MergeTree: &MdbClickhouseClusterClickhouseConfigMergeTreeArgs{
						MaxBytesToMergeAtMinSpaceInPool:                pulumi.Int(1048576),
						MaxReplicatedMergesInQueue:                     pulumi.Int(16),
						NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge: pulumi.Int(8),
						PartsToDelayInsert:                             pulumi.Int(150),
						PartsToThrowInsert:                             pulumi.Int(300),
						ReplicatedDeduplicationWindow:                  pulumi.Int(100),
						ReplicatedDeduplicationWindowSeconds:           pulumi.Int(604800),
					},
					MetricLogEnabled:            pulumi.Bool(true),
					MetricLogRetentionSize:      pulumi.Int(536870912),
					MetricLogRetentionTime:      pulumi.Int(2592000),
					PartLogRetentionSize:        pulumi.Int(536870912),
					PartLogRetentionTime:        pulumi.Int(2592000),
					QueryLogRetentionSize:       pulumi.Int(1073741824),
					QueryLogRetentionTime:       pulumi.Int(2592000),
					QueryThreadLogEnabled:       pulumi.Bool(true),
					QueryThreadLogRetentionSize: pulumi.Int(536870912),
					QueryThreadLogRetentionTime: pulumi.Int(2592000),
					Rabbitmq: &MdbClickhouseClusterClickhouseConfigRabbitmqArgs{
						Password: pulumi.String("rabbit_pass"),
						Username: pulumi.String("rabbit_user"),
					},
					TextLogEnabled:        pulumi.Bool(true),
					TextLogLevel:          pulumi.String("TRACE"),
					TextLogRetentionSize:  pulumi.Int(536870912),
					TextLogRetentionTime:  pulumi.Int(2592000),
					Timezone:              pulumi.String("UTC"),
					TraceLogEnabled:       pulumi.Bool(true),
					TraceLogRetentionSize: pulumi.Int(536870912),
					TraceLogRetentionTime: pulumi.Int(2592000),
					UncompressedCacheSize: pulumi.Int(8589934592),
				},
				Resources: &MdbClickhouseClusterClickhouseResourcesArgs{
					DiskSize:         pulumi.Int(32),
					DiskTypeId:       pulumi.String("network-ssd"),
					ResourcePresetId: pulumi.String("s2.micro"),
				},
			},
			CloudStorage: &MdbClickhouseClusterCloudStorageArgs{
				Enabled: pulumi.Bool(false),
			},
			Databases: MdbClickhouseClusterDatabaseArray{
				&MdbClickhouseClusterDatabaseArgs{
					Name: pulumi.String("db_name"),
				},
			},
			Environment: pulumi.String("PRESTABLE"),
			FormatSchemas: MdbClickhouseClusterFormatSchemaArray{
				&MdbClickhouseClusterFormatSchemaArgs{
					Name: pulumi.String("test_schema"),
					Type: pulumi.String("FORMAT_SCHEMA_TYPE_CAPNPROTO"),
					Uri:  pulumi.String("https://storage.yandexcloud.net/ch-data/schema.proto"),
				},
			},
			Hosts: MdbClickhouseClusterHostArray{
				&MdbClickhouseClusterHostArgs{
					SubnetId: fooVpcSubnet.ID(),
					Type:     pulumi.String("CLICKHOUSE"),
					Zone:     pulumi.String("ru-central1-a"),
				},
			},
			MaintenanceWindow: &MdbClickhouseClusterMaintenanceWindowArgs{
				Type: pulumi.String("ANYTIME"),
			},
			MlModels: MdbClickhouseClusterMlModelArray{
				&MdbClickhouseClusterMlModelArgs{
					Name: pulumi.String("test_model"),
					Type: pulumi.String("ML_MODEL_TYPE_CATBOOST"),
					Uri:  pulumi.String("https://storage.yandexcloud.net/ch-data/train.csv"),
				},
			},
			NetworkId:        fooVpcNetwork.ID(),
			ServiceAccountId: pulumi.String("your_service_account_id"),
			Users: MdbClickhouseClusterUserArray{
				&MdbClickhouseClusterUserArgs{
					Name:     pulumi.String("user"),
					Password: pulumi.String("your_password"),
					Permissions: MdbClickhouseClusterUserPermissionArray{
						&MdbClickhouseClusterUserPermissionArgs{
							DatabaseName: pulumi.String("db_name"),
						},
					},
					Quotas: MdbClickhouseClusterUserQuotaArray{
						&MdbClickhouseClusterUserQuotaArgs{
							Errors:           pulumi.Int(1000),
							IntervalDuration: pulumi.Int(3600000),
							Queries:          pulumi.Int(10000),
						},
						&MdbClickhouseClusterUserQuotaArgs{
							Error:            5000,
							IntervalDuration: pulumi.Int(79800000),
							Queries:          pulumi.Int(50000),
						},
					},
					Settings: &MdbClickhouseClusterUserSettingsArgs{
						MaxMemoryUsageForUser:              pulumi.Int(1000000000),
						OutputFormatJsonQuote64bitIntegers: pulumi.Bool(true),
						ReadOverflowMode:                   pulumi.String("throw"),
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}

Coming soon!

import pulumi
import pulumi_yandex as yandex

foo_vpc_network = yandex.VpcNetwork("fooVpcNetwork")
foo_vpc_subnet = yandex.VpcSubnet("fooVpcSubnet",
    network_id=foo_vpc_network.id,
    v4_cidr_blocks=["10.5.0.0/24"],
    zone="ru-central1-a")
foo_mdb_clickhouse_cluster = yandex.MdbClickhouseCluster("fooMdbClickhouseCluster",
    clickhouse=yandex.MdbClickhouseClusterClickhouseArgs(
        config=yandex.MdbClickhouseClusterClickhouseConfigArgs(
            background_pool_size=16,
            background_schedule_pool_size=16,
            compression=[
                {
                    "method": "LZ4",
                    "minPartSize": 1024,
                    "minPartSizeRatio": 0.5,
                },
                {
                    "method": "ZSTD",
                    "minPartSize": 2048,
                    "minPartSizeRatio": 0.7,
                },
            ],
            geobase_uri="",
            graphite_rollup=[
                {
                    "name": "rollup1",
                    "pattern": [{
                        "function": "func1",
                        "regexp": "abc",
                        "retention": [{
                            "age": 1000,
                            "precision": 3,
                        }],
                    }],
                },
                {
                    "name": "rollup2",
                    "pattern": [{
                        "function": "func2",
                        "retention": [{
                            "age": 2000,
                            "precision": 5,
                        }],
                    }],
                },
            ],
            kafka=yandex.MdbClickhouseClusterClickhouseConfigKafkaArgs(
                sasl_mechanism="SASL_MECHANISM_GSSAPI",
                sasl_password="pass1",
                sasl_username="user1",
                security_protocol="SECURITY_PROTOCOL_PLAINTEXT",
            ),
            kafka_topic=[
                {
                    "name": "topic1",
                    "settings": {
                        "saslMechanism": "SASL_MECHANISM_SCRAM_SHA_256",
                        "saslPassword": "pass2",
                        "saslUsername": "user2",
                        "securityProtocol": "SECURITY_PROTOCOL_SSL",
                    },
                },
                {
                    "name": "topic2",
                    "settings": {
                        "saslMechanism": "SASL_MECHANISM_PLAIN",
                        "securityProtocol": "SECURITY_PROTOCOL_SASL_PLAINTEXT",
                    },
                },
            ],
            keep_alive_timeout=3000,
            log_level="TRACE",
            mark_cache_size=5368709120,
            max_concurrent_queries=50,
            max_connections=100,
            max_partition_size_to_drop=53687091200,
            max_table_size_to_drop=53687091200,
            merge_tree=yandex.MdbClickhouseClusterClickhouseConfigMergeTreeArgs(
                max_bytes_to_merge_at_min_space_in_pool=1048576,
                max_replicated_merges_in_queue=16,
                number_of_free_entries_in_pool_to_lower_max_size_of_merge=8,
                parts_to_delay_insert=150,
                parts_to_throw_insert=300,
                replicated_deduplication_window=100,
                replicated_deduplication_window_seconds=604800,
            ),
            metric_log_enabled=True,
            metric_log_retention_size=536870912,
            metric_log_retention_time=2592000,
            part_log_retention_size=536870912,
            part_log_retention_time=2592000,
            query_log_retention_size=1073741824,
            query_log_retention_time=2592000,
            query_thread_log_enabled=True,
            query_thread_log_retention_size=536870912,
            query_thread_log_retention_time=2592000,
            rabbitmq=yandex.MdbClickhouseClusterClickhouseConfigRabbitmqArgs(
                password="rabbit_pass",
                username="rabbit_user",
            ),
            text_log_enabled=True,
            text_log_level="TRACE",
            text_log_retention_size=536870912,
            text_log_retention_time=2592000,
            timezone="UTC",
            trace_log_enabled=True,
            trace_log_retention_size=536870912,
            trace_log_retention_time=2592000,
            uncompressed_cache_size=8589934592,
        ),
        resources=yandex.MdbClickhouseClusterClickhouseResourcesArgs(
            disk_size=32,
            disk_type_id="network-ssd",
            resource_preset_id="s2.micro",
        ),
    ),
    cloud_storage=yandex.MdbClickhouseClusterCloudStorageArgs(
        enabled=False,
    ),
    databases=[yandex.MdbClickhouseClusterDatabaseArgs(
        name="db_name",
    )],
    environment="PRESTABLE",
    format_schemas=[yandex.MdbClickhouseClusterFormatSchemaArgs(
        name="test_schema",
        type="FORMAT_SCHEMA_TYPE_CAPNPROTO",
        uri="https://storage.yandexcloud.net/ch-data/schema.proto",
    )],
    hosts=[yandex.MdbClickhouseClusterHostArgs(
        subnet_id=foo_vpc_subnet.id,
        type="CLICKHOUSE",
        zone="ru-central1-a",
    )],
    maintenance_window=yandex.MdbClickhouseClusterMaintenanceWindowArgs(
        type="ANYTIME",
    ),
    ml_models=[yandex.MdbClickhouseClusterMlModelArgs(
        name="test_model",
        type="ML_MODEL_TYPE_CATBOOST",
        uri="https://storage.yandexcloud.net/ch-data/train.csv",
    )],
    network_id=foo_vpc_network.id,
    service_account_id="your_service_account_id",
    users=[yandex.MdbClickhouseClusterUserArgs(
        name="user",
        password="your_password",
        permissions=[yandex.MdbClickhouseClusterUserPermissionArgs(
            database_name="db_name",
        )],
        quotas=[
            yandex.MdbClickhouseClusterUserQuotaArgs(
                errors=1000,
                interval_duration=3600000,
                queries=10000,
            ),
            yandex.MdbClickhouseClusterUserQuotaArgs(
                error=5000,
                interval_duration=79800000,
                queries=50000,
            ),
        ],
        settings=yandex.MdbClickhouseClusterUserSettingsArgs(
            max_memory_usage_for_user=1000000000,
            output_format_json_quote64bit_integers=True,
            read_overflow_mode="throw",
        ),
    )])
import * as pulumi from "@pulumi/pulumi";
import * as yandex from "@pulumi/yandex";

const fooVpcNetwork = new yandex.VpcNetwork("foo", {});
const fooVpcSubnet = new yandex.VpcSubnet("foo", {
    networkId: fooVpcNetwork.id,
    v4CidrBlocks: ["10.5.0.0/24"],
    zone: "ru-central1-a",
});
const fooMdbClickhouseCluster = new yandex.MdbClickhouseCluster("foo", {
    clickhouse: {
        config: {
            backgroundPoolSize: 16,
            backgroundSchedulePoolSize: 16,
            compressions: [
                {
                    method: "LZ4",
                    minPartSize: 1024,
                    minPartSizeRatio: 0.5,
                },
                {
                    method: "ZSTD",
                    minPartSize: 2048,
                    minPartSizeRatio: 0.7,
                },
            ],
            geobaseUri: "",
            graphiteRollups: [
                {
                    name: "rollup1",
                    patterns: [{
                        function: "func1",
                        regexp: "abc",
                        retentions: [{
                            age: 1000,
                            precision: 3,
                        }],
                    }],
                },
                {
                    name: "rollup2",
                    patterns: [{
                        function: "func2",
                        retentions: [{
                            age: 2000,
                            precision: 5,
                        }],
                    }],
                },
            ],
            kafka: {
                saslMechanism: "SASL_MECHANISM_GSSAPI",
                saslPassword: "pass1",
                saslUsername: "user1",
                securityProtocol: "SECURITY_PROTOCOL_PLAINTEXT",
            },
            kafkaTopics: [
                {
                    name: "topic1",
                    settings: {
                        saslMechanism: "SASL_MECHANISM_SCRAM_SHA_256",
                        saslPassword: "pass2",
                        saslUsername: "user2",
                        securityProtocol: "SECURITY_PROTOCOL_SSL",
                    },
                },
                {
                    name: "topic2",
                    settings: {
                        saslMechanism: "SASL_MECHANISM_PLAIN",
                        securityProtocol: "SECURITY_PROTOCOL_SASL_PLAINTEXT",
                    },
                },
            ],
            keepAliveTimeout: 3000,
            logLevel: "TRACE",
            markCacheSize: 5368709120,
            maxConcurrentQueries: 50,
            maxConnections: 100,
            maxPartitionSizeToDrop: 53687091200,
            maxTableSizeToDrop: 53687091200,
            mergeTree: {
                maxBytesToMergeAtMinSpaceInPool: 1048576,
                maxReplicatedMergesInQueue: 16,
                numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge: 8,
                partsToDelayInsert: 150,
                partsToThrowInsert: 300,
                replicatedDeduplicationWindow: 100,
                replicatedDeduplicationWindowSeconds: 604800,
            },
            metricLogEnabled: true,
            metricLogRetentionSize: 536870912,
            metricLogRetentionTime: 2592000,
            partLogRetentionSize: 536870912,
            partLogRetentionTime: 2592000,
            queryLogRetentionSize: 1073741824,
            queryLogRetentionTime: 2592000,
            queryThreadLogEnabled: true,
            queryThreadLogRetentionSize: 536870912,
            queryThreadLogRetentionTime: 2592000,
            rabbitmq: {
                password: "rabbit_pass",
                username: "rabbit_user",
            },
            textLogEnabled: true,
            textLogLevel: "TRACE",
            textLogRetentionSize: 536870912,
            textLogRetentionTime: 2592000,
            timezone: "UTC",
            traceLogEnabled: true,
            traceLogRetentionSize: 536870912,
            traceLogRetentionTime: 2592000,
            uncompressedCacheSize: 8589934592,
        },
        resources: {
            diskSize: 32,
            diskTypeId: "network-ssd",
            resourcePresetId: "s2.micro",
        },
    },
    cloudStorage: {
        enabled: false,
    },
    databases: [{
        name: "db_name",
    }],
    environment: "PRESTABLE",
    formatSchemas: [{
        name: "test_schema",
        type: "FORMAT_SCHEMA_TYPE_CAPNPROTO",
        uri: "https://storage.yandexcloud.net/ch-data/schema.proto",
    }],
    hosts: [{
        subnetId: fooVpcSubnet.id,
        type: "CLICKHOUSE",
        zone: "ru-central1-a",
    }],
    maintenanceWindow: {
        type: "ANYTIME",
    },
    mlModels: [{
        name: "test_model",
        type: "ML_MODEL_TYPE_CATBOOST",
        uri: "https://storage.yandexcloud.net/ch-data/train.csv",
    }],
    networkId: fooVpcNetwork.id,
    serviceAccountId: "your_service_account_id",
    users: [{
        name: "user",
        password: "your_password",
        permissions: [{
            databaseName: "db_name",
        }],
        quotas: [
            {
                errors: 1000,
                intervalDuration: 3600000,
                queries: 10000,
            },
            {
                error: 5000,
                intervalDuration: 79800000,
                queries: 50000,
            },
        ],
        settings: {
            maxMemoryUsageForUser: 1000000000,
            outputFormatJsonQuote64bitIntegers: true,
            readOverflowMode: "throw",
        },
    }],
});

Coming soon!

Create a MdbClickhouseCluster Resource

new MdbClickhouseCluster(name: string, args: MdbClickhouseClusterArgs, opts?: CustomResourceOptions);
@overload
def MdbClickhouseCluster(resource_name: str,
                         opts: Optional[ResourceOptions] = None,
                         access: Optional[MdbClickhouseClusterAccessArgs] = None,
                         admin_password: Optional[str] = None,
                         backup_window_start: Optional[MdbClickhouseClusterBackupWindowStartArgs] = None,
                         clickhouse: Optional[MdbClickhouseClusterClickhouseArgs] = None,
                         cloud_storage: Optional[MdbClickhouseClusterCloudStorageArgs] = None,
                         copy_schema_on_new_hosts: Optional[bool] = None,
                         databases: Optional[Sequence[MdbClickhouseClusterDatabaseArgs]] = None,
                         deletion_protection: Optional[bool] = None,
                         description: Optional[str] = None,
                         environment: Optional[str] = None,
                         folder_id: Optional[str] = None,
                         format_schemas: Optional[Sequence[MdbClickhouseClusterFormatSchemaArgs]] = None,
                         hosts: Optional[Sequence[MdbClickhouseClusterHostArgs]] = None,
                         labels: Optional[Mapping[str, str]] = None,
                         maintenance_window: Optional[MdbClickhouseClusterMaintenanceWindowArgs] = None,
                         ml_models: Optional[Sequence[MdbClickhouseClusterMlModelArgs]] = None,
                         name: Optional[str] = None,
                         network_id: Optional[str] = None,
                         security_group_ids: Optional[Sequence[str]] = None,
                         service_account_id: Optional[str] = None,
                         shard_groups: Optional[Sequence[MdbClickhouseClusterShardGroupArgs]] = None,
                         sql_database_management: Optional[bool] = None,
                         sql_user_management: Optional[bool] = None,
                         users: Optional[Sequence[MdbClickhouseClusterUserArgs]] = None,
                         version: Optional[str] = None,
                         zookeeper: Optional[MdbClickhouseClusterZookeeperArgs] = None)
@overload
def MdbClickhouseCluster(resource_name: str,
                         args: MdbClickhouseClusterArgs,
                         opts: Optional[ResourceOptions] = None)
func NewMdbClickhouseCluster(ctx *Context, name string, args MdbClickhouseClusterArgs, opts ...ResourceOption) (*MdbClickhouseCluster, error)
public MdbClickhouseCluster(string name, MdbClickhouseClusterArgs args, CustomResourceOptions? opts = null)
public MdbClickhouseCluster(String name, MdbClickhouseClusterArgs args)
public MdbClickhouseCluster(String name, MdbClickhouseClusterArgs args, CustomResourceOptions options)
type: yandex:MdbClickhouseCluster
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.

name string
The unique name of the resource.
args MdbClickhouseClusterArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
resource_name str
The unique name of the resource.
args MdbClickhouseClusterArgs
The arguments to resource properties.
opts ResourceOptions
Bag of options to control resource's behavior.
ctx Context
Context object for the current deployment.
name string
The unique name of the resource.
args MdbClickhouseClusterArgs
The arguments to resource properties.
opts ResourceOption
Bag of options to control resource's behavior.
name string
The unique name of the resource.
args MdbClickhouseClusterArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
name String
The unique name of the resource.
args MdbClickhouseClusterArgs
The arguments to resource properties.
options CustomResourceOptions
Bag of options to control resource's behavior.

MdbClickhouseCluster Resource Properties

To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

Inputs

The MdbClickhouseCluster resource accepts the following input properties:

Clickhouse MdbClickhouseClusterClickhouseArgs

Configuration of the ClickHouse subcluster. The structure is documented below.

Environment string

Deployment environment of the ClickHouse cluster. Can be either PRESTABLE or PRODUCTION.

Hosts List<MdbClickhouseClusterHostArgs>

A host of the ClickHouse cluster. The structure is documented below.

NetworkId string

ID of the network, to which the ClickHouse cluster belongs.

Access MdbClickhouseClusterAccessArgs

Access policy to the ClickHouse cluster. The structure is documented below.

AdminPassword string

A password used to authorize as user admin when sql_user_management enabled.

BackupWindowStart MdbClickhouseClusterBackupWindowStartArgs

Time to start the daily backup, in the UTC timezone. The structure is documented below.

CloudStorage MdbClickhouseClusterCloudStorageArgs
CopySchemaOnNewHosts bool

Whether to copy schema on new ClickHouse hosts.

Databases List<MdbClickhouseClusterDatabaseArgs>

A database of the ClickHouse cluster. The structure is documented below.

DeletionProtection bool

Inhibits deletion of the cluster. Can be either true or false.

Description string

Description of the shard group.

FolderId string

The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.

FormatSchemas List<MdbClickhouseClusterFormatSchemaArgs>

A set of protobuf or capnproto format schemas. The structure is documented below.

Labels Dictionary<string, string>

A set of key/value label pairs to assign to the ClickHouse cluster.

MaintenanceWindow MdbClickhouseClusterMaintenanceWindowArgs
MlModels List<MdbClickhouseClusterMlModelArgs>

A group of machine learning models. The structure is documented below

Name string

Graphite rollup configuration name.

SecurityGroupIds List<string>

A set of ids of security groups assigned to hosts of the cluster.

ServiceAccountId string

ID of the service account used for access to Yandex Object Storage.

ShardGroups List<MdbClickhouseClusterShardGroupArgs>

A group of clickhouse shards. The structure is documented below.

SqlDatabaseManagement bool

Grants admin user database management permission.

SqlUserManagement bool

Enables admin user with user management permission.

Users List<MdbClickhouseClusterUserArgs>

A user of the ClickHouse cluster. The structure is documented below.

Version string

Version of the ClickHouse server software.

Zookeeper MdbClickhouseClusterZookeeperArgs

Configuration of the ZooKeeper subcluster. The structure is documented below.

Clickhouse MdbClickhouseClusterClickhouseArgs

Configuration of the ClickHouse subcluster. The structure is documented below.

Environment string

Deployment environment of the ClickHouse cluster. Can be either PRESTABLE or PRODUCTION.

Hosts []MdbClickhouseClusterHostArgs

A host of the ClickHouse cluster. The structure is documented below.

NetworkId string

ID of the network, to which the ClickHouse cluster belongs.

Access MdbClickhouseClusterAccessArgs

Access policy to the ClickHouse cluster. The structure is documented below.

AdminPassword string

A password used to authorize as user admin when sql_user_management enabled.

BackupWindowStart MdbClickhouseClusterBackupWindowStartArgs

Time to start the daily backup, in the UTC timezone. The structure is documented below.

CloudStorage MdbClickhouseClusterCloudStorageArgs
CopySchemaOnNewHosts bool

Whether to copy schema on new ClickHouse hosts.

Databases []MdbClickhouseClusterDatabaseArgs

A database of the ClickHouse cluster. The structure is documented below.

DeletionProtection bool

Inhibits deletion of the cluster. Can be either true or false.

Description string

Description of the shard group.

FolderId string

The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.

FormatSchemas []MdbClickhouseClusterFormatSchemaArgs

A set of protobuf or capnproto format schemas. The structure is documented below.

Labels map[string]string

A set of key/value label pairs to assign to the ClickHouse cluster.

MaintenanceWindow MdbClickhouseClusterMaintenanceWindowArgs
MlModels []MdbClickhouseClusterMlModelArgs

A group of machine learning models. The structure is documented below

Name string

Graphite rollup configuration name.

SecurityGroupIds []string

A set of ids of security groups assigned to hosts of the cluster.

ServiceAccountId string

ID of the service account used for access to Yandex Object Storage.

ShardGroups []MdbClickhouseClusterShardGroupArgs

A group of clickhouse shards. The structure is documented below.

SqlDatabaseManagement bool

Grants admin user database management permission.

SqlUserManagement bool

Enables admin user with user management permission.

Users []MdbClickhouseClusterUserArgs

A user of the ClickHouse cluster. The structure is documented below.

Version string

Version of the ClickHouse server software.

Zookeeper MdbClickhouseClusterZookeeperArgs

Configuration of the ZooKeeper subcluster. The structure is documented below.

clickhouse MdbClickhouseClusterClickhouseArgs

Configuration of the ClickHouse subcluster. The structure is documented below.

environment String

Deployment environment of the ClickHouse cluster. Can be either PRESTABLE or PRODUCTION.

hosts ListClickhouseClusterHostArgs>

A host of the ClickHouse cluster. The structure is documented below.

networkId String

ID of the network, to which the ClickHouse cluster belongs.

access MdbClickhouseClusterAccessArgs

Access policy to the ClickHouse cluster. The structure is documented below.

adminPassword String

A password used to authorize as user admin when sql_user_management enabled.

backupWindowStart MdbClickhouseClusterBackupWindowStartArgs

Time to start the daily backup, in the UTC timezone. The structure is documented below.

cloudStorage MdbClickhouseClusterCloudStorageArgs
copySchemaOnNewHosts Boolean

Whether to copy schema on new ClickHouse hosts.

databases ListClickhouseClusterDatabaseArgs>

A database of the ClickHouse cluster. The structure is documented below.

deletionProtection Boolean

Inhibits deletion of the cluster. Can be either true or false.

description String

Description of the shard group.

folderId String

The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.

formatSchemas ListClickhouseClusterFormatSchemaArgs>

A set of protobuf or capnproto format schemas. The structure is documented below.

labels Map

A set of key/value label pairs to assign to the ClickHouse cluster.

maintenanceWindow MdbClickhouseClusterMaintenanceWindowArgs
mlModels ListClickhouseClusterMlModelArgs>

A group of machine learning models. The structure is documented below

name String

Graphite rollup configuration name.

securityGroupIds List

A set of ids of security groups assigned to hosts of the cluster.

serviceAccountId String

ID of the service account used for access to Yandex Object Storage.

shardGroups ListClickhouseClusterShardGroupArgs>

A group of clickhouse shards. The structure is documented below.

sqlDatabaseManagement Boolean

Grants admin user database management permission.

sqlUserManagement Boolean

Enables admin user with user management permission.

users ListClickhouseClusterUserArgs>

A user of the ClickHouse cluster. The structure is documented below.

version String

Version of the ClickHouse server software.

zookeeper MdbClickhouseClusterZookeeperArgs

Configuration of the ZooKeeper subcluster. The structure is documented below.

clickhouse MdbClickhouseClusterClickhouseArgs

Configuration of the ClickHouse subcluster. The structure is documented below.

environment string

Deployment environment of the ClickHouse cluster. Can be either PRESTABLE or PRODUCTION.

hosts MdbClickhouseClusterHostArgs[]

A host of the ClickHouse cluster. The structure is documented below.

networkId string

ID of the network, to which the ClickHouse cluster belongs.

access MdbClickhouseClusterAccessArgs

Access policy to the ClickHouse cluster. The structure is documented below.

adminPassword string

A password used to authorize as user admin when sql_user_management enabled.

backupWindowStart MdbClickhouseClusterBackupWindowStartArgs

Time to start the daily backup, in the UTC timezone. The structure is documented below.

cloudStorage MdbClickhouseClusterCloudStorageArgs
copySchemaOnNewHosts boolean

Whether to copy schema on new ClickHouse hosts.

databases MdbClickhouseClusterDatabaseArgs[]

A database of the ClickHouse cluster. The structure is documented below.

deletionProtection boolean

Inhibits deletion of the cluster. Can be either true or false.

description string

Description of the shard group.

folderId string

The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.

formatSchemas MdbClickhouseClusterFormatSchemaArgs[]

A set of protobuf or capnproto format schemas. The structure is documented below.

labels {[key: string]: string}

A set of key/value label pairs to assign to the ClickHouse cluster.

maintenanceWindow MdbClickhouseClusterMaintenanceWindowArgs
mlModels MdbClickhouseClusterMlModelArgs[]

A group of machine learning models. The structure is documented below

name string

Graphite rollup configuration name.

securityGroupIds string[]

A set of ids of security groups assigned to hosts of the cluster.

serviceAccountId string

ID of the service account used for access to Yandex Object Storage.

shardGroups MdbClickhouseClusterShardGroupArgs[]

A group of clickhouse shards. The structure is documented below.

sqlDatabaseManagement boolean

Grants admin user database management permission.

sqlUserManagement boolean

Enables admin user with user management permission.

users MdbClickhouseClusterUserArgs[]

A user of the ClickHouse cluster. The structure is documented below.

version string

Version of the ClickHouse server software.

zookeeper MdbClickhouseClusterZookeeperArgs

Configuration of the ZooKeeper subcluster. The structure is documented below.

clickhouse MdbClickhouseClusterClickhouseArgs

Configuration of the ClickHouse subcluster. The structure is documented below.

environment str

Deployment environment of the ClickHouse cluster. Can be either PRESTABLE or PRODUCTION.

hosts Sequence[MdbClickhouseClusterHostArgs]

A host of the ClickHouse cluster. The structure is documented below.

network_id str

ID of the network, to which the ClickHouse cluster belongs.

access MdbClickhouseClusterAccessArgs

Access policy to the ClickHouse cluster. The structure is documented below.

admin_password str

A password used to authorize as user admin when sql_user_management enabled.

backup_window_start MdbClickhouseClusterBackupWindowStartArgs

Time to start the daily backup, in the UTC timezone. The structure is documented below.

cloud_storage MdbClickhouseClusterCloudStorageArgs
copy_schema_on_new_hosts bool

Whether to copy schema on new ClickHouse hosts.

databases Sequence[MdbClickhouseClusterDatabaseArgs]

A database of the ClickHouse cluster. The structure is documented below.

deletion_protection bool

Inhibits deletion of the cluster. Can be either true or false.

description str

Description of the shard group.

folder_id str

The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.

format_schemas Sequence[MdbClickhouseClusterFormatSchemaArgs]

A set of protobuf or capnproto format schemas. The structure is documented below.

labels Mapping[str, str]

A set of key/value label pairs to assign to the ClickHouse cluster.

maintenance_window MdbClickhouseClusterMaintenanceWindowArgs
ml_models Sequence[MdbClickhouseClusterMlModelArgs]

A group of machine learning models. The structure is documented below

name str

Graphite rollup configuration name.

security_group_ids Sequence[str]

A set of ids of security groups assigned to hosts of the cluster.

service_account_id str

ID of the service account used for access to Yandex Object Storage.

shard_groups Sequence[MdbClickhouseClusterShardGroupArgs]

A group of clickhouse shards. The structure is documented below.

sql_database_management bool

Grants admin user database management permission.

sql_user_management bool

Enables admin user with user management permission.

users Sequence[MdbClickhouseClusterUserArgs]

A user of the ClickHouse cluster. The structure is documented below.

version str

Version of the ClickHouse server software.

zookeeper MdbClickhouseClusterZookeeperArgs

Configuration of the ZooKeeper subcluster. The structure is documented below.

clickhouse Property Map

Configuration of the ClickHouse subcluster. The structure is documented below.

environment String

Deployment environment of the ClickHouse cluster. Can be either PRESTABLE or PRODUCTION.

hosts List

A host of the ClickHouse cluster. The structure is documented below.

networkId String

ID of the network, to which the ClickHouse cluster belongs.

access Property Map

Access policy to the ClickHouse cluster. The structure is documented below.

adminPassword String

A password used to authorize as user admin when sql_user_management enabled.

backupWindowStart Property Map

Time to start the daily backup, in the UTC timezone. The structure is documented below.

cloudStorage Property Map
copySchemaOnNewHosts Boolean

Whether to copy schema on new ClickHouse hosts.

databases List

A database of the ClickHouse cluster. The structure is documented below.

deletionProtection Boolean

Inhibits deletion of the cluster. Can be either true or false.

description String

Description of the shard group.

folderId String

The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.

formatSchemas List

A set of protobuf or capnproto format schemas. The structure is documented below.

labels Map

A set of key/value label pairs to assign to the ClickHouse cluster.

maintenanceWindow Property Map
mlModels List

A group of machine learning models. The structure is documented below

name String

Graphite rollup configuration name.

securityGroupIds List

A set of ids of security groups assigned to hosts of the cluster.

serviceAccountId String

ID of the service account used for access to Yandex Object Storage.

shardGroups List

A group of clickhouse shards. The structure is documented below.

sqlDatabaseManagement Boolean

Grants admin user database management permission.

sqlUserManagement Boolean

Enables admin user with user management permission.

users List

A user of the ClickHouse cluster. The structure is documented below.

version String

Version of the ClickHouse server software.

zookeeper Property Map

Configuration of the ZooKeeper subcluster. The structure is documented below.

Outputs

All input properties are implicitly available as output properties. Additionally, the MdbClickhouseCluster resource produces the following output properties:

CreatedAt string

Timestamp of cluster creation.

Health string

Aggregated health of the cluster. Can be ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation.

Id string

The provider-assigned unique ID for this managed resource.

Status string

Status of the cluster. Can be CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.

CreatedAt string

Timestamp of cluster creation.

Health string

Aggregated health of the cluster. Can be ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation.

Id string

The provider-assigned unique ID for this managed resource.

Status string

Status of the cluster. Can be CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.

createdAt String

Timestamp of cluster creation.

health String

Aggregated health of the cluster. Can be ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation.

id String

The provider-assigned unique ID for this managed resource.

status String

Status of the cluster. Can be CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.

createdAt string

Timestamp of cluster creation.

health string

Aggregated health of the cluster. Can be ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation.

id string

The provider-assigned unique ID for this managed resource.

status string

Status of the cluster. Can be CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.

created_at str

Timestamp of cluster creation.

health str

Aggregated health of the cluster. Can be ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation.

id str

The provider-assigned unique ID for this managed resource.

status str

Status of the cluster. Can be CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.

createdAt String

Timestamp of cluster creation.

health String

Aggregated health of the cluster. Can be ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation.

id String

The provider-assigned unique ID for this managed resource.

status String

Status of the cluster. Can be CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.

Look up an Existing MdbClickhouseCluster Resource

Get an existing MdbClickhouseCluster resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

public static get(name: string, id: Input<ID>, state?: MdbClickhouseClusterState, opts?: CustomResourceOptions): MdbClickhouseCluster
@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        access: Optional[MdbClickhouseClusterAccessArgs] = None,
        admin_password: Optional[str] = None,
        backup_window_start: Optional[MdbClickhouseClusterBackupWindowStartArgs] = None,
        clickhouse: Optional[MdbClickhouseClusterClickhouseArgs] = None,
        cloud_storage: Optional[MdbClickhouseClusterCloudStorageArgs] = None,
        copy_schema_on_new_hosts: Optional[bool] = None,
        created_at: Optional[str] = None,
        databases: Optional[Sequence[MdbClickhouseClusterDatabaseArgs]] = None,
        deletion_protection: Optional[bool] = None,
        description: Optional[str] = None,
        environment: Optional[str] = None,
        folder_id: Optional[str] = None,
        format_schemas: Optional[Sequence[MdbClickhouseClusterFormatSchemaArgs]] = None,
        health: Optional[str] = None,
        hosts: Optional[Sequence[MdbClickhouseClusterHostArgs]] = None,
        labels: Optional[Mapping[str, str]] = None,
        maintenance_window: Optional[MdbClickhouseClusterMaintenanceWindowArgs] = None,
        ml_models: Optional[Sequence[MdbClickhouseClusterMlModelArgs]] = None,
        name: Optional[str] = None,
        network_id: Optional[str] = None,
        security_group_ids: Optional[Sequence[str]] = None,
        service_account_id: Optional[str] = None,
        shard_groups: Optional[Sequence[MdbClickhouseClusterShardGroupArgs]] = None,
        sql_database_management: Optional[bool] = None,
        sql_user_management: Optional[bool] = None,
        status: Optional[str] = None,
        users: Optional[Sequence[MdbClickhouseClusterUserArgs]] = None,
        version: Optional[str] = None,
        zookeeper: Optional[MdbClickhouseClusterZookeeperArgs] = None) -> MdbClickhouseCluster
func GetMdbClickhouseCluster(ctx *Context, name string, id IDInput, state *MdbClickhouseClusterState, opts ...ResourceOption) (*MdbClickhouseCluster, error)
public static MdbClickhouseCluster Get(string name, Input<string> id, MdbClickhouseClusterState? state, CustomResourceOptions? opts = null)
public static MdbClickhouseCluster get(String name, Output<String> id, MdbClickhouseClusterState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
resource_name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
The following state arguments are supported:
Access MdbClickhouseClusterAccessArgs

Access policy to the ClickHouse cluster. The structure is documented below.

AdminPassword string

A password used to authorize as user admin when sql_user_management enabled.

BackupWindowStart MdbClickhouseClusterBackupWindowStartArgs

Time to start the daily backup, in the UTC timezone. The structure is documented below.

Clickhouse MdbClickhouseClusterClickhouseArgs

Configuration of the ClickHouse subcluster. The structure is documented below.

CloudStorage MdbClickhouseClusterCloudStorageArgs
CopySchemaOnNewHosts bool

Whether to copy schema on new ClickHouse hosts.

CreatedAt string

Timestamp of cluster creation.

Databases List<MdbClickhouseClusterDatabaseArgs>

A database of the ClickHouse cluster. The structure is documented below.

DeletionProtection bool

Inhibits deletion of the cluster. Can be either true or false.

Description string

Description of the shard group.

Environment string

Deployment environment of the ClickHouse cluster. Can be either PRESTABLE or PRODUCTION.

FolderId string

The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.

FormatSchemas List<MdbClickhouseClusterFormatSchemaArgs>

A set of protobuf or capnproto format schemas. The structure is documented below.

Health string

Aggregated health of the cluster. Can be ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation.

Hosts List<MdbClickhouseClusterHostArgs>

A host of the ClickHouse cluster. The structure is documented below.

Labels Dictionary<string, string>

A set of key/value label pairs to assign to the ClickHouse cluster.

MaintenanceWindow MdbClickhouseClusterMaintenanceWindowArgs
MlModels List<MdbClickhouseClusterMlModelArgs>

A group of machine learning models. The structure is documented below

Name string

Graphite rollup configuration name.

NetworkId string

ID of the network, to which the ClickHouse cluster belongs.

SecurityGroupIds List<string>

A set of ids of security groups assigned to hosts of the cluster.

ServiceAccountId string

ID of the service account used for access to Yandex Object Storage.

ShardGroups List<MdbClickhouseClusterShardGroupArgs>

A group of clickhouse shards. The structure is documented below.

SqlDatabaseManagement bool

Grants admin user database management permission.

SqlUserManagement bool

Enables admin user with user management permission.

Status string

Status of the cluster. Can be CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.

Users List<MdbClickhouseClusterUserArgs>

A user of the ClickHouse cluster. The structure is documented below.

Version string

Version of the ClickHouse server software.

Zookeeper MdbClickhouseClusterZookeeperArgs

Configuration of the ZooKeeper subcluster. The structure is documented below.

Access MdbClickhouseClusterAccessArgs

Access policy to the ClickHouse cluster. The structure is documented below.

AdminPassword string

A password used to authorize as user admin when sql_user_management enabled.

BackupWindowStart MdbClickhouseClusterBackupWindowStartArgs

Time to start the daily backup, in the UTC timezone. The structure is documented below.

Clickhouse MdbClickhouseClusterClickhouseArgs

Configuration of the ClickHouse subcluster. The structure is documented below.

CloudStorage MdbClickhouseClusterCloudStorageArgs
CopySchemaOnNewHosts bool

Whether to copy schema on new ClickHouse hosts.

CreatedAt string

Timestamp of cluster creation.

Databases []MdbClickhouseClusterDatabaseArgs

A database of the ClickHouse cluster. The structure is documented below.

DeletionProtection bool

Inhibits deletion of the cluster. Can be either true or false.

Description string

Description of the shard group.

Environment string

Deployment environment of the ClickHouse cluster. Can be either PRESTABLE or PRODUCTION.

FolderId string

The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.

FormatSchemas []MdbClickhouseClusterFormatSchemaArgs

A set of protobuf or capnproto format schemas. The structure is documented below.

Health string

Aggregated health of the cluster. Can be ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation.

Hosts []MdbClickhouseClusterHostArgs

A host of the ClickHouse cluster. The structure is documented below.

Labels map[string]string

A set of key/value label pairs to assign to the ClickHouse cluster.

MaintenanceWindow MdbClickhouseClusterMaintenanceWindowArgs
MlModels []MdbClickhouseClusterMlModelArgs

A group of machine learning models. The structure is documented below

Name string

Graphite rollup configuration name.

NetworkId string

ID of the network, to which the ClickHouse cluster belongs.

SecurityGroupIds []string

A set of ids of security groups assigned to hosts of the cluster.

ServiceAccountId string

ID of the service account used for access to Yandex Object Storage.

ShardGroups []MdbClickhouseClusterShardGroupArgs

A group of clickhouse shards. The structure is documented below.

SqlDatabaseManagement bool

Grants admin user database management permission.

SqlUserManagement bool

Enables admin user with user management permission.

Status string

Status of the cluster. Can be CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.

Users []MdbClickhouseClusterUserArgs

A user of the ClickHouse cluster. The structure is documented below.

Version string

Version of the ClickHouse server software.

Zookeeper MdbClickhouseClusterZookeeperArgs

Configuration of the ZooKeeper subcluster. The structure is documented below.

access MdbClickhouseClusterAccessArgs

Access policy to the ClickHouse cluster. The structure is documented below.

adminPassword String

A password used to authorize as user admin when sql_user_management enabled.

backupWindowStart MdbClickhouseClusterBackupWindowStartArgs

Time to start the daily backup, in the UTC timezone. The structure is documented below.

clickhouse MdbClickhouseClusterClickhouseArgs

Configuration of the ClickHouse subcluster. The structure is documented below.

cloudStorage MdbClickhouseClusterCloudStorageArgs
copySchemaOnNewHosts Boolean

Whether to copy schema on new ClickHouse hosts.

createdAt String

Timestamp of cluster creation.

databases ListClickhouseClusterDatabaseArgs>

A database of the ClickHouse cluster. The structure is documented below.

deletionProtection Boolean

Inhibits deletion of the cluster. Can be either true or false.

description String

Description of the shard group.

environment String

Deployment environment of the ClickHouse cluster. Can be either PRESTABLE or PRODUCTION.

folderId String

The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.

formatSchemas ListClickhouseClusterFormatSchemaArgs>

A set of protobuf or capnproto format schemas. The structure is documented below.

health String

Aggregated health of the cluster. Can be ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation.

hosts ListClickhouseClusterHostArgs>

A host of the ClickHouse cluster. The structure is documented below.

labels Map

A set of key/value label pairs to assign to the ClickHouse cluster.

maintenanceWindow MdbClickhouseClusterMaintenanceWindowArgs
mlModels ListClickhouseClusterMlModelArgs>

A group of machine learning models. The structure is documented below

name String

Graphite rollup configuration name.

networkId String

ID of the network, to which the ClickHouse cluster belongs.

securityGroupIds List

A set of ids of security groups assigned to hosts of the cluster.

serviceAccountId String

ID of the service account used for access to Yandex Object Storage.

shardGroups ListClickhouseClusterShardGroupArgs>

A group of clickhouse shards. The structure is documented below.

sqlDatabaseManagement Boolean

Grants admin user database management permission.

sqlUserManagement Boolean

Enables admin user with user management permission.

status String

Status of the cluster. Can be CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.

users ListClickhouseClusterUserArgs>

A user of the ClickHouse cluster. The structure is documented below.

version String

Version of the ClickHouse server software.

zookeeper MdbClickhouseClusterZookeeperArgs

Configuration of the ZooKeeper subcluster. The structure is documented below.

access MdbClickhouseClusterAccessArgs

Access policy to the ClickHouse cluster. The structure is documented below.

adminPassword string

A password used to authorize as user admin when sql_user_management enabled.

backupWindowStart MdbClickhouseClusterBackupWindowStartArgs

Time to start the daily backup, in the UTC timezone. The structure is documented below.

clickhouse MdbClickhouseClusterClickhouseArgs

Configuration of the ClickHouse subcluster. The structure is documented below.

cloudStorage MdbClickhouseClusterCloudStorageArgs
copySchemaOnNewHosts boolean

Whether to copy schema on new ClickHouse hosts.

createdAt string

Timestamp of cluster creation.

databases MdbClickhouseClusterDatabaseArgs[]

A database of the ClickHouse cluster. The structure is documented below.

deletionProtection boolean

Inhibits deletion of the cluster. Can be either true or false.

description string

Description of the shard group.

environment string

Deployment environment of the ClickHouse cluster. Can be either PRESTABLE or PRODUCTION.

folderId string

The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.

formatSchemas MdbClickhouseClusterFormatSchemaArgs[]

A set of protobuf or capnproto format schemas. The structure is documented below.

health string

Aggregated health of the cluster. Can be ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation.

hosts MdbClickhouseClusterHostArgs[]

A host of the ClickHouse cluster. The structure is documented below.

labels {[key: string]: string}

A set of key/value label pairs to assign to the ClickHouse cluster.

maintenanceWindow MdbClickhouseClusterMaintenanceWindowArgs
mlModels MdbClickhouseClusterMlModelArgs[]

A group of machine learning models. The structure is documented below

name string

Graphite rollup configuration name.

networkId string

ID of the network, to which the ClickHouse cluster belongs.

securityGroupIds string[]

A set of ids of security groups assigned to hosts of the cluster.

serviceAccountId string

ID of the service account used for access to Yandex Object Storage.

shardGroups MdbClickhouseClusterShardGroupArgs[]

A group of clickhouse shards. The structure is documented below.

sqlDatabaseManagement boolean

Grants admin user database management permission.

sqlUserManagement boolean

Enables admin user with user management permission.

status string

Status of the cluster. Can be CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.

users MdbClickhouseClusterUserArgs[]

A user of the ClickHouse cluster. The structure is documented below.

version string

Version of the ClickHouse server software.

zookeeper MdbClickhouseClusterZookeeperArgs

Configuration of the ZooKeeper subcluster. The structure is documented below.

access MdbClickhouseClusterAccessArgs

Access policy to the ClickHouse cluster. The structure is documented below.

admin_password str

A password used to authorize as user admin when sql_user_management enabled.

backup_window_start MdbClickhouseClusterBackupWindowStartArgs

Time to start the daily backup, in the UTC timezone. The structure is documented below.

clickhouse MdbClickhouseClusterClickhouseArgs

Configuration of the ClickHouse subcluster. The structure is documented below.

cloud_storage MdbClickhouseClusterCloudStorageArgs
copy_schema_on_new_hosts bool

Whether to copy schema on new ClickHouse hosts.

created_at str

Timestamp of cluster creation.

databases Sequence[MdbClickhouseClusterDatabaseArgs]

A database of the ClickHouse cluster. The structure is documented below.

deletion_protection bool

Inhibits deletion of the cluster. Can be either true or false.

description str

Description of the shard group.

environment str

Deployment environment of the ClickHouse cluster. Can be either PRESTABLE or PRODUCTION.

folder_id str

The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.

format_schemas Sequence[MdbClickhouseClusterFormatSchemaArgs]

A set of protobuf or capnproto format schemas. The structure is documented below.

health str

Aggregated health of the cluster. Can be ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation.

hosts Sequence[MdbClickhouseClusterHostArgs]

A host of the ClickHouse cluster. The structure is documented below.

labels Mapping[str, str]

A set of key/value label pairs to assign to the ClickHouse cluster.

maintenance_window MdbClickhouseClusterMaintenanceWindowArgs
ml_models Sequence[MdbClickhouseClusterMlModelArgs]

A group of machine learning models. The structure is documented below

name str

Graphite rollup configuration name.

network_id str

ID of the network, to which the ClickHouse cluster belongs.

security_group_ids Sequence[str]

A set of ids of security groups assigned to hosts of the cluster.

service_account_id str

ID of the service account used for access to Yandex Object Storage.

shard_groups Sequence[MdbClickhouseClusterShardGroupArgs]

A group of clickhouse shards. The structure is documented below.

sql_database_management bool

Grants admin user database management permission.

sql_user_management bool

Enables admin user with user management permission.

status str

Status of the cluster. Can be CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.

users Sequence[MdbClickhouseClusterUserArgs]

A user of the ClickHouse cluster. The structure is documented below.

version str

Version of the ClickHouse server software.

zookeeper MdbClickhouseClusterZookeeperArgs

Configuration of the ZooKeeper subcluster. The structure is documented below.

access Property Map

Access policy to the ClickHouse cluster. The structure is documented below.

adminPassword String

A password used to authorize as user admin when sql_user_management enabled.

backupWindowStart Property Map

Time to start the daily backup, in the UTC timezone. The structure is documented below.

clickhouse Property Map

Configuration of the ClickHouse subcluster. The structure is documented below.

cloudStorage Property Map
copySchemaOnNewHosts Boolean

Whether to copy schema on new ClickHouse hosts.

createdAt String

Timestamp of cluster creation.

databases List

A database of the ClickHouse cluster. The structure is documented below.

deletionProtection Boolean

Inhibits deletion of the cluster. Can be either true or false.

description String

Description of the shard group.

environment String

Deployment environment of the ClickHouse cluster. Can be either PRESTABLE or PRODUCTION.

folderId String

The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.

formatSchemas List

A set of protobuf or capnproto format schemas. The structure is documented below.

health String

Aggregated health of the cluster. Can be ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation.

hosts List

A host of the ClickHouse cluster. The structure is documented below.

labels Map

A set of key/value label pairs to assign to the ClickHouse cluster.

maintenanceWindow Property Map
mlModels List

A group of machine learning models. The structure is documented below

name String

Graphite rollup configuration name.

networkId String

ID of the network, to which the ClickHouse cluster belongs.

securityGroupIds List

A set of ids of security groups assigned to hosts of the cluster.

serviceAccountId String

ID of the service account used for access to Yandex Object Storage.

shardGroups List

A group of clickhouse shards. The structure is documented below.

sqlDatabaseManagement Boolean

Grants admin user database management permission.

sqlUserManagement Boolean

Enables admin user with user management permission.

status String

Status of the cluster. Can be CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.

users List

A user of the ClickHouse cluster. The structure is documented below.

version String

Version of the ClickHouse server software.

zookeeper Property Map

Configuration of the ZooKeeper subcluster. The structure is documented below.

Supporting Types

MdbClickhouseClusterAccess

DataLens bool

Allow access for DataLens. Can be either true or false.

Metrika bool

Allow access for Yandex.Metrika. Can be either true or false.

Serverless bool

Allow access for Serverless. Can be either true or false.

WebSql bool

Allow access for Web SQL. Can be either true or false.

DataLens bool

Allow access for DataLens. Can be either true or false.

Metrika bool

Allow access for Yandex.Metrika. Can be either true or false.

Serverless bool

Allow access for Serverless. Can be either true or false.

WebSql bool

Allow access for Web SQL. Can be either true or false.

dataLens Boolean

Allow access for DataLens. Can be either true or false.

metrika Boolean

Allow access for Yandex.Metrika. Can be either true or false.

serverless Boolean

Allow access for Serverless. Can be either true or false.

webSql Boolean

Allow access for Web SQL. Can be either true or false.

dataLens boolean

Allow access for DataLens. Can be either true or false.

metrika boolean

Allow access for Yandex.Metrika. Can be either true or false.

serverless boolean

Allow access for Serverless. Can be either true or false.

webSql boolean

Allow access for Web SQL. Can be either true or false.

data_lens bool

Allow access for DataLens. Can be either true or false.

metrika bool

Allow access for Yandex.Metrika. Can be either true or false.

serverless bool

Allow access for Serverless. Can be either true or false.

web_sql bool

Allow access for Web SQL. Can be either true or false.

dataLens Boolean

Allow access for DataLens. Can be either true or false.

metrika Boolean

Allow access for Yandex.Metrika. Can be either true or false.

serverless Boolean

Allow access for Serverless. Can be either true or false.

webSql Boolean

Allow access for Web SQL. Can be either true or false.

MdbClickhouseClusterBackupWindowStart

Hours int

The hour at which backup will be started.

Minutes int

The minute at which backup will be started.

Hours int

The hour at which backup will be started.

Minutes int

The minute at which backup will be started.

hours Integer

The hour at which backup will be started.

minutes Integer

The minute at which backup will be started.

hours number

The hour at which backup will be started.

minutes number

The minute at which backup will be started.

hours int

The hour at which backup will be started.

minutes int

The minute at which backup will be started.

hours Number

The hour at which backup will be started.

minutes Number

The minute at which backup will be started.

MdbClickhouseClusterClickhouse

Resources MdbClickhouseClusterClickhouseResources

Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.

Config MdbClickhouseClusterClickhouseConfig

Main ClickHouse cluster configuration.

Resources MdbClickhouseClusterClickhouseResources

Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.

Config MdbClickhouseClusterClickhouseConfig

Main ClickHouse cluster configuration.

resources MdbClickhouseClusterClickhouseResources

Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.

config MdbClickhouseClusterClickhouseConfig

Main ClickHouse cluster configuration.

resources MdbClickhouseClusterClickhouseResources

Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.

config MdbClickhouseClusterClickhouseConfig

Main ClickHouse cluster configuration.

resources MdbClickhouseClusterClickhouseResources

Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.

config MdbClickhouseClusterClickhouseConfig

Main ClickHouse cluster configuration.

resources Property Map

Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.

config Property Map

Main ClickHouse cluster configuration.

MdbClickhouseClusterClickhouseConfig

BackgroundPoolSize int
BackgroundSchedulePoolSize int
Compressions List<MdbClickhouseClusterClickhouseConfigCompression>

Data compression configuration. The structure is documented below.

GeobaseUri string
GraphiteRollups List<MdbClickhouseClusterClickhouseConfigGraphiteRollup>

Graphite rollup configuration. The structure is documented below.

Kafka MdbClickhouseClusterClickhouseConfigKafka

Kafka connection configuration. The structure is documented below.

KafkaTopics List<MdbClickhouseClusterClickhouseConfigKafkaTopic>

Kafka topic connection configuration. The structure is documented below.

KeepAliveTimeout int
LogLevel string
MarkCacheSize int
MaxConcurrentQueries int
MaxConnections int
MaxPartitionSizeToDrop int
MaxTableSizeToDrop int
MergeTree MdbClickhouseClusterClickhouseConfigMergeTree

MergeTree engine configuration. The structure is documented below.

MetricLogEnabled bool
MetricLogRetentionSize int
MetricLogRetentionTime int
PartLogRetentionSize int
PartLogRetentionTime int
QueryLogRetentionSize int
QueryLogRetentionTime int
QueryThreadLogEnabled bool
QueryThreadLogRetentionSize int
QueryThreadLogRetentionTime int
Rabbitmq MdbClickhouseClusterClickhouseConfigRabbitmq

RabbitMQ connection configuration. The structure is documented below.

TextLogEnabled bool
TextLogLevel string
TextLogRetentionSize int
TextLogRetentionTime int
Timezone string
TraceLogEnabled bool
TraceLogRetentionSize int
TraceLogRetentionTime int
UncompressedCacheSize int
BackgroundPoolSize int
BackgroundSchedulePoolSize int
Compressions []MdbClickhouseClusterClickhouseConfigCompression

Data compression configuration. The structure is documented below.

GeobaseUri string
GraphiteRollups []MdbClickhouseClusterClickhouseConfigGraphiteRollup

Graphite rollup configuration. The structure is documented below.

Kafka MdbClickhouseClusterClickhouseConfigKafka

Kafka connection configuration. The structure is documented below.

KafkaTopics []MdbClickhouseClusterClickhouseConfigKafkaTopic

Kafka topic connection configuration. The structure is documented below.

KeepAliveTimeout int
LogLevel string
MarkCacheSize int
MaxConcurrentQueries int
MaxConnections int
MaxPartitionSizeToDrop int
MaxTableSizeToDrop int
MergeTree MdbClickhouseClusterClickhouseConfigMergeTree

MergeTree engine configuration. The structure is documented below.

MetricLogEnabled bool
MetricLogRetentionSize int
MetricLogRetentionTime int
PartLogRetentionSize int
PartLogRetentionTime int
QueryLogRetentionSize int
QueryLogRetentionTime int
QueryThreadLogEnabled bool
QueryThreadLogRetentionSize int
QueryThreadLogRetentionTime int
Rabbitmq MdbClickhouseClusterClickhouseConfigRabbitmq

RabbitMQ connection configuration. The structure is documented below.

TextLogEnabled bool
TextLogLevel string
TextLogRetentionSize int
TextLogRetentionTime int
Timezone string
TraceLogEnabled bool
TraceLogRetentionSize int
TraceLogRetentionTime int
UncompressedCacheSize int
backgroundPoolSize Integer
backgroundSchedulePoolSize Integer
compressions ListClickhouseClusterClickhouseConfigCompression>

Data compression configuration. The structure is documented below.

geobaseUri String
graphiteRollups ListClickhouseClusterClickhouseConfigGraphiteRollup>

Graphite rollup configuration. The structure is documented below.

kafka MdbClickhouseClusterClickhouseConfigKafka

Kafka connection configuration. The structure is documented below.

kafkaTopics ListClickhouseClusterClickhouseConfigKafkaTopic>

Kafka topic connection configuration. The structure is documented below.

keepAliveTimeout Integer
logLevel String
markCacheSize Integer
maxConcurrentQueries Integer
maxConnections Integer
maxPartitionSizeToDrop Integer
maxTableSizeToDrop Integer
mergeTree MdbClickhouseClusterClickhouseConfigMergeTree

MergeTree engine configuration. The structure is documented below.

metricLogEnabled Boolean
metricLogRetentionSize Integer
metricLogRetentionTime Integer
partLogRetentionSize Integer
partLogRetentionTime Integer
queryLogRetentionSize Integer
queryLogRetentionTime Integer
queryThreadLogEnabled Boolean
queryThreadLogRetentionSize Integer
queryThreadLogRetentionTime Integer
rabbitmq MdbClickhouseClusterClickhouseConfigRabbitmq

RabbitMQ connection configuration. The structure is documented below.

textLogEnabled Boolean
textLogLevel String
textLogRetentionSize Integer
textLogRetentionTime Integer
timezone String
traceLogEnabled Boolean
traceLogRetentionSize Integer
traceLogRetentionTime Integer
uncompressedCacheSize Integer
backgroundPoolSize number
backgroundSchedulePoolSize number
compressions MdbClickhouseClusterClickhouseConfigCompression[]

Data compression configuration. The structure is documented below.

geobaseUri string
graphiteRollups MdbClickhouseClusterClickhouseConfigGraphiteRollup[]

Graphite rollup configuration. The structure is documented below.

kafka MdbClickhouseClusterClickhouseConfigKafka

Kafka connection configuration. The structure is documented below.

kafkaTopics MdbClickhouseClusterClickhouseConfigKafkaTopic[]

Kafka topic connection configuration. The structure is documented below.

keepAliveTimeout number
logLevel string
markCacheSize number
maxConcurrentQueries number
maxConnections number
maxPartitionSizeToDrop number
maxTableSizeToDrop number
mergeTree MdbClickhouseClusterClickhouseConfigMergeTree

MergeTree engine configuration. The structure is documented below.

metricLogEnabled boolean
metricLogRetentionSize number
metricLogRetentionTime number
partLogRetentionSize number
partLogRetentionTime number
queryLogRetentionSize number
queryLogRetentionTime number
queryThreadLogEnabled boolean
queryThreadLogRetentionSize number
queryThreadLogRetentionTime number
rabbitmq MdbClickhouseClusterClickhouseConfigRabbitmq

RabbitMQ connection configuration. The structure is documented below.

textLogEnabled boolean
textLogLevel string
textLogRetentionSize number
textLogRetentionTime number
timezone string
traceLogEnabled boolean
traceLogRetentionSize number
traceLogRetentionTime number
uncompressedCacheSize number
background_pool_size int
background_schedule_pool_size int
compressions Sequence[MdbClickhouseClusterClickhouseConfigCompression]

Data compression configuration. The structure is documented below.

geobase_uri str
graphite_rollups Sequence[MdbClickhouseClusterClickhouseConfigGraphiteRollup]

Graphite rollup configuration. The structure is documented below.

kafka MdbClickhouseClusterClickhouseConfigKafka

Kafka connection configuration. The structure is documented below.

kafka_topics Sequence[MdbClickhouseClusterClickhouseConfigKafkaTopic]

Kafka topic connection configuration. The structure is documented below.

keep_alive_timeout int
log_level str
mark_cache_size int
max_concurrent_queries int
max_connections int
max_partition_size_to_drop int
max_table_size_to_drop int
merge_tree MdbClickhouseClusterClickhouseConfigMergeTree

MergeTree engine configuration. The structure is documented below.

metric_log_enabled bool
metric_log_retention_size int
metric_log_retention_time int
part_log_retention_size int
part_log_retention_time int
query_log_retention_size int
query_log_retention_time int
query_thread_log_enabled bool
query_thread_log_retention_size int
query_thread_log_retention_time int
rabbitmq MdbClickhouseClusterClickhouseConfigRabbitmq

RabbitMQ connection configuration. The structure is documented below.

text_log_enabled bool
text_log_level str
text_log_retention_size int
text_log_retention_time int
timezone str
trace_log_enabled bool
trace_log_retention_size int
trace_log_retention_time int
uncompressed_cache_size int
backgroundPoolSize Number
backgroundSchedulePoolSize Number
compressions List

Data compression configuration. The structure is documented below.

geobaseUri String
graphiteRollups List

Graphite rollup configuration. The structure is documented below.

kafka Property Map

Kafka connection configuration. The structure is documented below.

kafkaTopics List

Kafka topic connection configuration. The structure is documented below.

keepAliveTimeout Number
logLevel String
markCacheSize Number
maxConcurrentQueries Number
maxConnections Number
maxPartitionSizeToDrop Number
maxTableSizeToDrop Number
mergeTree Property Map

MergeTree engine configuration. The structure is documented below.

metricLogEnabled Boolean
metricLogRetentionSize Number
metricLogRetentionTime Number
partLogRetentionSize Number
partLogRetentionTime Number
queryLogRetentionSize Number
queryLogRetentionTime Number
queryThreadLogEnabled Boolean
queryThreadLogRetentionSize Number
queryThreadLogRetentionTime Number
rabbitmq Property Map

RabbitMQ connection configuration. The structure is documented below.

textLogEnabled Boolean
textLogLevel String
textLogRetentionSize Number
textLogRetentionTime Number
timezone String
traceLogEnabled Boolean
traceLogRetentionSize Number
traceLogRetentionTime Number
uncompressedCacheSize Number

MdbClickhouseClusterClickhouseConfigCompression

Method string

Method: Compression method. Two methods are available: LZ4 and zstd.

MinPartSize int

Min part size: Minimum size (in bytes) of a data part in a table. ClickHouse only applies the rule to tables with data parts greater than or equal to the Min part size value.

MinPartSizeRatio double

Min part size ratio: Minimum table part size to total table size ratio. ClickHouse only applies the rule to tables in which this ratio is greater than or equal to the Min part size ratio value.

Method string

Method: Compression method. Two methods are available: LZ4 and zstd.

MinPartSize int

Min part size: Minimum size (in bytes) of a data part in a table. ClickHouse only applies the rule to tables with data parts greater than or equal to the Min part size value.

MinPartSizeRatio float64

Min part size ratio: Minimum table part size to total table size ratio. ClickHouse only applies the rule to tables in which this ratio is greater than or equal to the Min part size ratio value.

method String

Method: Compression method. Two methods are available: LZ4 and zstd.

minPartSize Integer

Min part size: Minimum size (in bytes) of a data part in a table. ClickHouse only applies the rule to tables with data parts greater than or equal to the Min part size value.

minPartSizeRatio Double

Min part size ratio: Minimum table part size to total table size ratio. ClickHouse only applies the rule to tables in which this ratio is greater than or equal to the Min part size ratio value.

method string

Method: Compression method. Two methods are available: LZ4 and zstd.

minPartSize number

Min part size: Minimum size (in bytes) of a data part in a table. ClickHouse only applies the rule to tables with data parts greater than or equal to the Min part size value.

minPartSizeRatio number

Min part size ratio: Minimum table part size to total table size ratio. ClickHouse only applies the rule to tables in which this ratio is greater than or equal to the Min part size ratio value.

method str

Method: Compression method. Two methods are available: LZ4 and zstd.

min_part_size int

Min part size: Minimum size (in bytes) of a data part in a table. ClickHouse only applies the rule to tables with data parts greater than or equal to the Min part size value.

min_part_size_ratio float

Min part size ratio: Minimum table part size to total table size ratio. ClickHouse only applies the rule to tables in which this ratio is greater than or equal to the Min part size ratio value.

method String

Method: Compression method. Two methods are available: LZ4 and zstd.

minPartSize Number

Min part size: Minimum size (in bytes) of a data part in a table. ClickHouse only applies the rule to tables with data parts greater than or equal to the Min part size value.

minPartSizeRatio Number

Min part size ratio: Minimum table part size to total table size ratio. ClickHouse only applies the rule to tables in which this ratio is greater than or equal to the Min part size ratio value.

MdbClickhouseClusterClickhouseConfigGraphiteRollup

Name string

Graphite rollup configuration name.

Patterns List<MdbClickhouseClusterClickhouseConfigGraphiteRollupPattern>

Set of thinning rules.

Name string

Graphite rollup configuration name.

Patterns []MdbClickhouseClusterClickhouseConfigGraphiteRollupPattern

Set of thinning rules.

name String

Graphite rollup configuration name.

patterns ListClickhouseClusterClickhouseConfigGraphiteRollupPattern>

Set of thinning rules.

name string

Graphite rollup configuration name.

patterns MdbClickhouseClusterClickhouseConfigGraphiteRollupPattern[]

Set of thinning rules.

name str

Graphite rollup configuration name.

patterns Sequence[MdbClickhouseClusterClickhouseConfigGraphiteRollupPattern]

Set of thinning rules.

name String

Graphite rollup configuration name.

patterns List

Set of thinning rules.

MdbClickhouseClusterClickhouseConfigGraphiteRollupPattern

Function string

Aggregation function name.

Regexp string

Regular expression that the metric name must match.

Retentions List<MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternRetention>

Retain parameters.

Function string

Aggregation function name.

Regexp string

Regular expression that the metric name must match.

Retentions []MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternRetention

Retain parameters.

function String

Aggregation function name.

regexp String

Regular expression that the metric name must match.

retentions ListClickhouseClusterClickhouseConfigGraphiteRollupPatternRetention>

Retain parameters.

function string

Aggregation function name.

regexp string

Regular expression that the metric name must match.

retentions MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternRetention[]

Retain parameters.

function str

Aggregation function name.

regexp str

Regular expression that the metric name must match.

retentions Sequence[MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternRetention]

Retain parameters.

function String

Aggregation function name.

regexp String

Regular expression that the metric name must match.

retentions List

Retain parameters.

MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternRetention

Age int

Minimum data age in seconds.

Precision int

Accuracy of determining the age of the data in seconds.

Age int

Minimum data age in seconds.

Precision int

Accuracy of determining the age of the data in seconds.

age Integer

Minimum data age in seconds.

precision Integer

Accuracy of determining the age of the data in seconds.

age number

Minimum data age in seconds.

precision number

Accuracy of determining the age of the data in seconds.

age int

Minimum data age in seconds.

precision int

Accuracy of determining the age of the data in seconds.

age Number

Minimum data age in seconds.

precision Number

Accuracy of determining the age of the data in seconds.

MdbClickhouseClusterClickhouseConfigKafka

SaslMechanism string

SASL mechanism used in kafka authentication.

SaslPassword string

User password on kafka server.

SaslUsername string

Username on kafka server.

SecurityProtocol string

Security protocol used to connect to kafka server.

SaslMechanism string

SASL mechanism used in kafka authentication.

SaslPassword string

User password on kafka server.

SaslUsername string

Username on kafka server.

SecurityProtocol string

Security protocol used to connect to kafka server.

saslMechanism String

SASL mechanism used in kafka authentication.

saslPassword String

User password on kafka server.

saslUsername String

Username on kafka server.

securityProtocol String

Security protocol used to connect to kafka server.

saslMechanism string

SASL mechanism used in kafka authentication.

saslPassword string

User password on kafka server.

saslUsername string

Username on kafka server.

securityProtocol string

Security protocol used to connect to kafka server.

sasl_mechanism str

SASL mechanism used in kafka authentication.

sasl_password str

User password on kafka server.

sasl_username str

Username on kafka server.

security_protocol str

Security protocol used to connect to kafka server.

saslMechanism String

SASL mechanism used in kafka authentication.

saslPassword String

User password on kafka server.

saslUsername String

Username on kafka server.

securityProtocol String

Security protocol used to connect to kafka server.

MdbClickhouseClusterClickhouseConfigKafkaTopic

Name string

Graphite rollup configuration name.

Settings MdbClickhouseClusterClickhouseConfigKafkaTopicSettings

Kafka connection settngs sanem as kafka block.

Name string

Graphite rollup configuration name.

Settings MdbClickhouseClusterClickhouseConfigKafkaTopicSettings

Kafka connection settngs sanem as kafka block.

name String

Graphite rollup configuration name.

settings MdbClickhouseClusterClickhouseConfigKafkaTopicSettings

Kafka connection settngs sanem as kafka block.

name string

Graphite rollup configuration name.

settings MdbClickhouseClusterClickhouseConfigKafkaTopicSettings

Kafka connection settngs sanem as kafka block.

name str

Graphite rollup configuration name.

settings MdbClickhouseClusterClickhouseConfigKafkaTopicSettings

Kafka connection settngs sanem as kafka block.

name String

Graphite rollup configuration name.

settings Property Map

Kafka connection settngs sanem as kafka block.

MdbClickhouseClusterClickhouseConfigKafkaTopicSettings

SaslMechanism string

SASL mechanism used in kafka authentication.

SaslPassword string

User password on kafka server.

SaslUsername string

Username on kafka server.

SecurityProtocol string

Security protocol used to connect to kafka server.

SaslMechanism string

SASL mechanism used in kafka authentication.

SaslPassword string

User password on kafka server.

SaslUsername string

Username on kafka server.

SecurityProtocol string

Security protocol used to connect to kafka server.

saslMechanism String

SASL mechanism used in kafka authentication.

saslPassword String

User password on kafka server.

saslUsername String

Username on kafka server.

securityProtocol String

Security protocol used to connect to kafka server.

saslMechanism string

SASL mechanism used in kafka authentication.

saslPassword string

User password on kafka server.

saslUsername string

Username on kafka server.

securityProtocol string

Security protocol used to connect to kafka server.

sasl_mechanism str

SASL mechanism used in kafka authentication.

sasl_password str

User password on kafka server.

sasl_username str

Username on kafka server.

security_protocol str

Security protocol used to connect to kafka server.

saslMechanism String

SASL mechanism used in kafka authentication.

saslPassword String

User password on kafka server.

saslUsername String

Username on kafka server.

securityProtocol String

Security protocol used to connect to kafka server.

MdbClickhouseClusterClickhouseConfigMergeTree

MaxBytesToMergeAtMinSpaceInPool int

Max bytes to merge at min space in pool: Maximum total size of a data part to merge when the number of free threads in the background pool is minimum.

MaxReplicatedMergesInQueue int

Max replicated merges in queue: Maximum number of merge tasks that can be in the ReplicatedMergeTree queue at the same time.

NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge int

Number of free entries in pool to lower max size of merge: Threshold value of free entries in the pool. If the number of entries in the pool falls below this value, ClickHouse reduces the maximum size of a data part to merge. This helps handle small merges faster, rather than filling the pool with lengthy merges.

PartsToDelayInsert int

Parts to delay insert: Number of active data parts in a table, on exceeding which ClickHouse starts artificially reduce the rate of inserting data into the table.

PartsToThrowInsert int

Parts to throw insert: Threshold value of active data parts in a table, on exceeding which ClickHouse throws the 'Too many parts ...' exception.

ReplicatedDeduplicationWindow int

Replicated deduplication window: Number of recent hash blocks that ZooKeeper will store (the old ones will be deleted).

ReplicatedDeduplicationWindowSeconds int

Replicated deduplication window seconds: Time during which ZooKeeper stores the hash blocks (the old ones wil be deleted).

MaxBytesToMergeAtMinSpaceInPool int

Max bytes to merge at min space in pool: Maximum total size of a data part to merge when the number of free threads in the background pool is minimum.

MaxReplicatedMergesInQueue int

Max replicated merges in queue: Maximum number of merge tasks that can be in the ReplicatedMergeTree queue at the same time.

NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge int

Number of free entries in pool to lower max size of merge: Threshold value of free entries in the pool. If the number of entries in the pool falls below this value, ClickHouse reduces the maximum size of a data part to merge. This helps handle small merges faster, rather than filling the pool with lengthy merges.

PartsToDelayInsert int

Parts to delay insert: Number of active data parts in a table, on exceeding which ClickHouse starts artificially reduce the rate of inserting data into the table.

PartsToThrowInsert int

Parts to throw insert: Threshold value of active data parts in a table, on exceeding which ClickHouse throws the 'Too many parts ...' exception.

ReplicatedDeduplicationWindow int

Replicated deduplication window: Number of recent hash blocks that ZooKeeper will store (the old ones will be deleted).

ReplicatedDeduplicationWindowSeconds int

Replicated deduplication window seconds: Time during which ZooKeeper stores the hash blocks (the old ones wil be deleted).

maxBytesToMergeAtMinSpaceInPool Integer

Max bytes to merge at min space in pool: Maximum total size of a data part to merge when the number of free threads in the background pool is minimum.

maxReplicatedMergesInQueue Integer

Max replicated merges in queue: Maximum number of merge tasks that can be in the ReplicatedMergeTree queue at the same time.

numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge Integer

Number of free entries in pool to lower max size of merge: Threshold value of free entries in the pool. If the number of entries in the pool falls below this value, ClickHouse reduces the maximum size of a data part to merge. This helps handle small merges faster, rather than filling the pool with lengthy merges.

partsToDelayInsert Integer

Parts to delay insert: Number of active data parts in a table, on exceeding which ClickHouse starts artificially reduce the rate of inserting data into the table.

partsToThrowInsert Integer

Parts to throw insert: Threshold value of active data parts in a table, on exceeding which ClickHouse throws the 'Too many parts ...' exception.

replicatedDeduplicationWindow Integer

Replicated deduplication window: Number of recent hash blocks that ZooKeeper will store (the old ones will be deleted).

replicatedDeduplicationWindowSeconds Integer

Replicated deduplication window seconds: Time during which ZooKeeper stores the hash blocks (the old ones wil be deleted).

maxBytesToMergeAtMinSpaceInPool number

Max bytes to merge at min space in pool: Maximum total size of a data part to merge when the number of free threads in the background pool is minimum.

maxReplicatedMergesInQueue number

Max replicated merges in queue: Maximum number of merge tasks that can be in the ReplicatedMergeTree queue at the same time.

numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge number

Number of free entries in pool to lower max size of merge: Threshold value of free entries in the pool. If the number of entries in the pool falls below this value, ClickHouse reduces the maximum size of a data part to merge. This helps handle small merges faster, rather than filling the pool with lengthy merges.

partsToDelayInsert number

Parts to delay insert: Number of active data parts in a table, on exceeding which ClickHouse starts artificially reduce the rate of inserting data into the table.

partsToThrowInsert number

Parts to throw insert: Threshold value of active data parts in a table, on exceeding which ClickHouse throws the 'Too many parts ...' exception.

replicatedDeduplicationWindow number

Replicated deduplication window: Number of recent hash blocks that ZooKeeper will store (the old ones will be deleted).

replicatedDeduplicationWindowSeconds number

Replicated deduplication window seconds: Time during which ZooKeeper stores the hash blocks (the old ones wil be deleted).

max_bytes_to_merge_at_min_space_in_pool int

Max bytes to merge at min space in pool: Maximum total size of a data part to merge when the number of free threads in the background pool is minimum.

max_replicated_merges_in_queue int

Max replicated merges in queue: Maximum number of merge tasks that can be in the ReplicatedMergeTree queue at the same time.

number_of_free_entries_in_pool_to_lower_max_size_of_merge int

Number of free entries in pool to lower max size of merge: Threshold value of free entries in the pool. If the number of entries in the pool falls below this value, ClickHouse reduces the maximum size of a data part to merge. This helps handle small merges faster, rather than filling the pool with lengthy merges.

parts_to_delay_insert int

Parts to delay insert: Number of active data parts in a table, on exceeding which ClickHouse starts artificially reduce the rate of inserting data into the table.

parts_to_throw_insert int

Parts to throw insert: Threshold value of active data parts in a table, on exceeding which ClickHouse throws the 'Too many parts ...' exception.

replicated_deduplication_window int

Replicated deduplication window: Number of recent hash blocks that ZooKeeper will store (the old ones will be deleted).

replicated_deduplication_window_seconds int

Replicated deduplication window seconds: Time during which ZooKeeper stores the hash blocks (the old ones wil be deleted).

maxBytesToMergeAtMinSpaceInPool Number

Max bytes to merge at min space in pool: Maximum total size of a data part to merge when the number of free threads in the background pool is minimum.

maxReplicatedMergesInQueue Number

Max replicated merges in queue: Maximum number of merge tasks that can be in the ReplicatedMergeTree queue at the same time.

numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge Number

Number of free entries in pool to lower max size of merge: Threshold value of free entries in the pool. If the number of entries in the pool falls below this value, ClickHouse reduces the maximum size of a data part to merge. This helps handle small merges faster, rather than filling the pool with lengthy merges.

partsToDelayInsert Number

Parts to delay insert: Number of active data parts in a table, on exceeding which ClickHouse starts artificially reduce the rate of inserting data into the table.

partsToThrowInsert Number

Parts to throw insert: Threshold value of active data parts in a table, on exceeding which ClickHouse throws the 'Too many parts ...' exception.

replicatedDeduplicationWindow Number

Replicated deduplication window: Number of recent hash blocks that ZooKeeper will store (the old ones will be deleted).

replicatedDeduplicationWindowSeconds Number

Replicated deduplication window seconds: Time during which ZooKeeper stores the hash blocks (the old ones wil be deleted).

MdbClickhouseClusterClickhouseConfigRabbitmq

Password string

RabbitMQ user password.

Username string

RabbitMQ username.

Password string

RabbitMQ user password.

Username string

RabbitMQ username.

password String

RabbitMQ user password.

username String

RabbitMQ username.

password string

RabbitMQ user password.

username string

RabbitMQ username.

password str

RabbitMQ user password.

username str

RabbitMQ username.

password String

RabbitMQ user password.

username String

RabbitMQ username.

MdbClickhouseClusterClickhouseResources

DiskSize int

Volume of the storage available to a ZooKeeper host, in gigabytes.

DiskTypeId string

Type of the storage of ZooKeeper hosts. For more information see the official documentation.

ResourcePresetId string
DiskSize int

Volume of the storage available to a ZooKeeper host, in gigabytes.

DiskTypeId string

Type of the storage of ZooKeeper hosts. For more information see the official documentation.

ResourcePresetId string
diskSize Integer

Volume of the storage available to a ZooKeeper host, in gigabytes.

diskTypeId String

Type of the storage of ZooKeeper hosts. For more information see the official documentation.

resourcePresetId String
diskSize number

Volume of the storage available to a ZooKeeper host, in gigabytes.

diskTypeId string

Type of the storage of ZooKeeper hosts. For more information see the official documentation.

resourcePresetId string
disk_size int

Volume of the storage available to a ZooKeeper host, in gigabytes.

disk_type_id str

Type of the storage of ZooKeeper hosts. For more information see the official documentation.

resource_preset_id str
diskSize Number

Volume of the storage available to a ZooKeeper host, in gigabytes.

diskTypeId String

Type of the storage of ZooKeeper hosts. For more information see the official documentation.

resourcePresetId String

MdbClickhouseClusterCloudStorage

Enabled bool

Whether to use Yandex Object Storage for storing ClickHouse data. Can be either true or false.

Enabled bool

Whether to use Yandex Object Storage for storing ClickHouse data. Can be either true or false.

enabled Boolean

Whether to use Yandex Object Storage for storing ClickHouse data. Can be either true or false.

enabled boolean

Whether to use Yandex Object Storage for storing ClickHouse data. Can be either true or false.

enabled bool

Whether to use Yandex Object Storage for storing ClickHouse data. Can be either true or false.

enabled Boolean

Whether to use Yandex Object Storage for storing ClickHouse data. Can be either true or false.

MdbClickhouseClusterDatabase

Name string

Graphite rollup configuration name.

Name string

Graphite rollup configuration name.

name String

Graphite rollup configuration name.

name string

Graphite rollup configuration name.

name str

Graphite rollup configuration name.

name String

Graphite rollup configuration name.

MdbClickhouseClusterFormatSchema

Name string

Graphite rollup configuration name.

Type string

Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.

Uri string

Model file URL. You can only use models stored in Yandex Object Storage.

Name string

Graphite rollup configuration name.

Type string

Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.

Uri string

Model file URL. You can only use models stored in Yandex Object Storage.

name String

Graphite rollup configuration name.

type String

Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.

uri String

Model file URL. You can only use models stored in Yandex Object Storage.

name string

Graphite rollup configuration name.

type string

Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.

uri string

Model file URL. You can only use models stored in Yandex Object Storage.

name str

Graphite rollup configuration name.

type str

Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.

uri str

Model file URL. You can only use models stored in Yandex Object Storage.

name String

Graphite rollup configuration name.

type String

Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.

uri String

Model file URL. You can only use models stored in Yandex Object Storage.

MdbClickhouseClusterHost

Type string

Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.

Zone string

The availability zone where the ClickHouse host will be created. For more information see the official documentation.

AssignPublicIp bool

Sets whether the host should get a public IP address on creation. Can be either true or false.

Fqdn string

The fully qualified domain name of the host.

ShardName string

The name of the shard to which the host belongs.

SubnetId string

The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs.

Type string

Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.

Zone string

The availability zone where the ClickHouse host will be created. For more information see the official documentation.

AssignPublicIp bool

Sets whether the host should get a public IP address on creation. Can be either true or false.

Fqdn string

The fully qualified domain name of the host.

ShardName string

The name of the shard to which the host belongs.

SubnetId string

The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs.

type String

Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.

zone String

The availability zone where the ClickHouse host will be created. For more information see the official documentation.

assignPublicIp Boolean

Sets whether the host should get a public IP address on creation. Can be either true or false.

fqdn String

The fully qualified domain name of the host.

shardName String

The name of the shard to which the host belongs.

subnetId String

The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs.

type string

Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.

zone string

The availability zone where the ClickHouse host will be created. For more information see the official documentation.

assignPublicIp boolean

Sets whether the host should get a public IP address on creation. Can be either true or false.

fqdn string

The fully qualified domain name of the host.

shardName string

The name of the shard to which the host belongs.

subnetId string

The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs.

type str

Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.

zone str

The availability zone where the ClickHouse host will be created. For more information see the official documentation.

assign_public_ip bool

Sets whether the host should get a public IP address on creation. Can be either true or false.

fqdn str

The fully qualified domain name of the host.

shard_name str

The name of the shard to which the host belongs.

subnet_id str

The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs.

type String

Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.

zone String

The availability zone where the ClickHouse host will be created. For more information see the official documentation.

assignPublicIp Boolean

Sets whether the host should get a public IP address on creation. Can be either true or false.

fqdn String

The fully qualified domain name of the host.

shardName String

The name of the shard to which the host belongs.

subnetId String

The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs.

MdbClickhouseClusterMaintenanceWindow

Type string

Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.

Day string

Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN.

Hour int

Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly.

Type string

Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.

Day string

Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN.

Hour int

Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly.

type String

Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.

day String

Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN.

hour Integer

Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly.

type string

Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.

day string

Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN.

hour number

Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly.

type str

Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.

day str

Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN.

hour int

Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly.

type String

Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.

day String

Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN.

hour Number

Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly.

MdbClickhouseClusterMlModel

Name string

Graphite rollup configuration name.

Type string

Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.

Uri string

Model file URL. You can only use models stored in Yandex Object Storage.

Name string

Graphite rollup configuration name.

Type string

Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.

Uri string

Model file URL. You can only use models stored in Yandex Object Storage.

name String

Graphite rollup configuration name.

type String

Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.

uri String

Model file URL. You can only use models stored in Yandex Object Storage.

name string

Graphite rollup configuration name.

type string

Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.

uri string

Model file URL. You can only use models stored in Yandex Object Storage.

name str

Graphite rollup configuration name.

type str

Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.

uri str

Model file URL. You can only use models stored in Yandex Object Storage.

name String

Graphite rollup configuration name.

type String

Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.

uri String

Model file URL. You can only use models stored in Yandex Object Storage.

MdbClickhouseClusterShardGroup

Name string

Graphite rollup configuration name.

ShardNames List<string>

List of shards names that belong to the shard group.

Description string

Description of the shard group.

Name string

Graphite rollup configuration name.

ShardNames []string

List of shards names that belong to the shard group.

Description string

Description of the shard group.

name String

Graphite rollup configuration name.

shardNames List

List of shards names that belong to the shard group.

description String

Description of the shard group.

name string

Graphite rollup configuration name.

shardNames string[]

List of shards names that belong to the shard group.

description string

Description of the shard group.

name str

Graphite rollup configuration name.

shard_names Sequence[str]

List of shards names that belong to the shard group.

description str

Description of the shard group.

name String

Graphite rollup configuration name.

shardNames List

List of shards names that belong to the shard group.

description String

Description of the shard group.

MdbClickhouseClusterUser

Name string

Graphite rollup configuration name.

Password string

RabbitMQ user password.

Permissions List<MdbClickhouseClusterUserPermission>

Set of permissions granted to the user. The structure is documented below.

Quotas List<MdbClickhouseClusterUserQuota>

Set of user quotas. The structure is documented below.

Settings MdbClickhouseClusterUserSettings

Kafka connection settngs sanem as kafka block.

Name string

Graphite rollup configuration name.

Password string

RabbitMQ user password.

Permissions []MdbClickhouseClusterUserPermission

Set of permissions granted to the user. The structure is documented below.

Quotas []MdbClickhouseClusterUserQuota

Set of user quotas. The structure is documented below.

Settings MdbClickhouseClusterUserSettings

Kafka connection settngs sanem as kafka block.

name String

Graphite rollup configuration name.

password String

RabbitMQ user password.

permissions ListClickhouseClusterUserPermission>

Set of permissions granted to the user. The structure is documented below.

quotas ListClickhouseClusterUserQuota>

Set of user quotas. The structure is documented below.

settings MdbClickhouseClusterUserSettings

Kafka connection settngs sanem as kafka block.

name string

Graphite rollup configuration name.

password string

RabbitMQ user password.

permissions MdbClickhouseClusterUserPermission[]

Set of permissions granted to the user. The structure is documented below.

quotas MdbClickhouseClusterUserQuota[]

Set of user quotas. The structure is documented below.

settings MdbClickhouseClusterUserSettings

Kafka connection settngs sanem as kafka block.

name str

Graphite rollup configuration name.

password str

RabbitMQ user password.

permissions Sequence[MdbClickhouseClusterUserPermission]

Set of permissions granted to the user. The structure is documented below.

quotas Sequence[MdbClickhouseClusterUserQuota]

Set of user quotas. The structure is documented below.

settings MdbClickhouseClusterUserSettings

Kafka connection settngs sanem as kafka block.

name String

Graphite rollup configuration name.

password String

RabbitMQ user password.

permissions List

Set of permissions granted to the user. The structure is documented below.

quotas List

Set of user quotas. The structure is documented below.

settings Property Map

Kafka connection settngs sanem as kafka block.

MdbClickhouseClusterUserPermission

DatabaseName string

The name of the database that the permission grants access to.

DatabaseName string

The name of the database that the permission grants access to.

databaseName String

The name of the database that the permission grants access to.

databaseName string

The name of the database that the permission grants access to.

database_name str

The name of the database that the permission grants access to.

databaseName String

The name of the database that the permission grants access to.

MdbClickhouseClusterUserQuota

IntervalDuration int

Duration of interval for quota in milliseconds.

Errors int

The number of queries that threw exception.

ExecutionTime int

The total query execution time, in milliseconds (wall time).

Queries int

The total number of queries.

ReadRows int

The total number of source rows read from tables for running the query, on all remote servers.

ResultRows int

The total number of rows given as the result.

IntervalDuration int

Duration of interval for quota in milliseconds.

Errors int

The number of queries that threw exception.

ExecutionTime int

The total query execution time, in milliseconds (wall time).

Queries int

The total number of queries.

ReadRows int

The total number of source rows read from tables for running the query, on all remote servers.

ResultRows int

The total number of rows given as the result.

intervalDuration Integer

Duration of interval for quota in milliseconds.

errors Integer

The number of queries that threw exception.

executionTime Integer

The total query execution time, in milliseconds (wall time).

queries Integer

The total number of queries.

readRows Integer

The total number of source rows read from tables for running the query, on all remote servers.

resultRows Integer

The total number of rows given as the result.

intervalDuration number

Duration of interval for quota in milliseconds.

errors number

The number of queries that threw exception.

executionTime number

The total query execution time, in milliseconds (wall time).

queries number

The total number of queries.

readRows number

The total number of source rows read from tables for running the query, on all remote servers.

resultRows number

The total number of rows given as the result.

interval_duration int

Duration of interval for quota in milliseconds.

errors int

The number of queries that threw exception.

execution_time int

The total query execution time, in milliseconds (wall time).

queries int

The total number of queries.

read_rows int

The total number of source rows read from tables for running the query, on all remote servers.

result_rows int

The total number of rows given as the result.

intervalDuration Number

Duration of interval for quota in milliseconds.

errors Number

The number of queries that threw exception.

executionTime Number

The total query execution time, in milliseconds (wall time).

queries Number

The total number of queries.

readRows Number

The total number of source rows read from tables for running the query, on all remote servers.

resultRows Number

The total number of rows given as the result.

MdbClickhouseClusterUserSettings

AddHttpCorsHeader bool

Include CORS headers in HTTP responces.

AllowDdl bool

Allows or denies DDL queries.

Compile bool

Enable compilation of queries.

CompileExpressions bool

Turn on expression compilation.

ConnectTimeout int

Connect timeout in milliseconds on the socket used for communicating with the client.

CountDistinctImplementation string

Specifies which of the uniq* functions should be used to perform the COUNT(DISTINCT …) construction.

DistinctOverflowMode string

Sets behaviour on overflow when using DISTINCT. Possible values:

DistributedAggregationMemoryEfficient bool

Determine the behavior of distributed subqueries.

DistributedDdlTaskTimeout int

Timeout for DDL queries, in milliseconds.

DistributedProductMode string

Changes the behaviour of distributed subqueries.

EmptyResultForAggregationByEmptySet bool

Allows to retunr empty result.

EnableHttpCompression bool

Enables or disables data compression in the response to an HTTP request.

FallbackToStaleReplicasForDistributedQueries bool

Forces a query to an out-of-date replica if updated data is not available.

ForceIndexByDate bool

Disables query execution if the index can’t be used by date.

ForcePrimaryKey bool

Disables query execution if indexing by the primary key is not possible.

GroupByOverflowMode string

Sets behaviour on overflow while GROUP BY operation. Possible values:

GroupByTwoLevelThreshold int

Sets the threshold of the number of keys, after that the two-level aggregation should be used.

GroupByTwoLevelThresholdBytes int

Sets the threshold of the number of bytes, after that the two-level aggregation should be used.

HttpConnectionTimeout int

Timeout for HTTP connection in milliseconds.

HttpHeadersProgressInterval int

Sets minimal interval between notifications about request process in HTTP header X-ClickHouse-Progress.

HttpReceiveTimeout int

Timeout for HTTP connection in milliseconds.

HttpSendTimeout int

Timeout for HTTP connection in milliseconds.

InputFormatDefaultsForOmittedFields bool

When performing INSERT queries, replace omitted input column values with default values of the respective columns.

InputFormatValuesInterpretExpressions bool

Enables or disables the full SQL parser if the fast stream parser can’t parse the data.

InsertQuorum int

Enables the quorum writes.

InsertQuorumTimeout int

Write to a quorum timeout in milliseconds.

JoinOverflowMode string

Sets behaviour on overflow in JOIN. Possible values:

JoinUseNulls bool

Sets the type of JOIN behaviour. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting.

JoinedSubqueryRequiresAlias bool

Require aliases for subselects and table functions in FROM that more than one table is present.

LowCardinalityAllowInNativeFormat bool

Allows or restricts using the LowCardinality data type with the Native format.

MaxAstDepth int

Maximum abstract syntax tree depth.

MaxAstElements int

Maximum abstract syntax tree elements.

MaxBlockSize int

A recommendation for what size of the block (in a count of rows) to load from tables.

MaxBytesBeforeExternalGroupBy int

Limit in bytes for using memoru for GROUP BY before using swap on disk.

MaxBytesBeforeExternalSort int

This setting is equivalent of the max_bytes_before_external_group_by setting, except for it is for sort operation (ORDER BY), not aggregation.

MaxBytesInDistinct int

Limits the maximum size of a hash table in bytes (uncompressed data) when using DISTINCT.

MaxBytesInJoin int

Limit on maximum size of the hash table for JOIN, in bytes.

MaxBytesInSet int

Limit on the number of bytes in the set resulting from the execution of the IN section.

MaxBytesToRead int

Limits the maximum number of bytes (uncompressed data) that can be read from a table when running a query.

MaxBytesToSort int

Limits the maximum number of bytes (uncompressed data) that can be read from a table for sorting.

MaxBytesToTransfer int

Limits the maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.

MaxColumnsToRead int

Limits the maximum number of columns that can be read from a table in a single query.

MaxExecutionTime int

Limits the maximum query execution time in milliseconds.

MaxExpandedAstElements int

Maximum abstract syntax tree depth after after expansion of aliases.

MaxInsertBlockSize int

The size of blocks (in a count of rows) to form for insertion into a table.

MaxMemoryUsage int

Limits the maximum memory usage (in bytes) for processing queries on a single server.

MaxMemoryUsageForUser int

Limits the maximum memory usage (in bytes) for processing of user's queries on a single server.

MaxNetworkBandwidth int

Limits the speed of the data exchange over the network in bytes per second.

MaxNetworkBandwidthForUser int

Limits the speed of the data exchange over the network in bytes per second.

MaxQuerySize int

The maximum part of a query that can be taken to RAM for parsing with the SQL parser.

MaxReplicaDelayForDistributedQueries int

Disables lagging replicas for distributed queries.

MaxResultBytes int

Limits the number of bytes in the result.

MaxResultRows int

Limits the number of rows in the result.

MaxRowsInDistinct int

Limits the maximum number of different rows when using DISTINCT.

MaxRowsInJoin int

Limit on maximum size of the hash table for JOIN, in rows.

MaxRowsInSet int

Limit on the number of rows in the set resulting from the execution of the IN section.

MaxRowsToGroupBy int

Limits the maximum number of unique keys received from aggregation function.

MaxRowsToRead int

Limits the maximum number of rows that can be read from a table when running a query.

MaxRowsToSort int

Limits the maximum number of rows that can be read from a table for sorting.

MaxRowsToTransfer int

Limits the maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.

MaxTemporaryColumns int

Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns.

MaxTemporaryNonConstColumns int

Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, excluding constant columns.

MaxThreads int

The maximum number of query processing threads, excluding threads for retrieving data from remote servers.

MergeTreeMaxBytesToUseCache int

If ClickHouse should read more than merge_tree_max_bytes_to_use_cache bytes in one query, it doesn’t use the cache of uncompressed blocks.

MergeTreeMaxRowsToUseCache int

If ClickHouse should read more than merge_tree_max_rows_to_use_cache rows in one query, it doesn’t use the cache of uncompressed blocks.

MergeTreeMinBytesForConcurrentRead int

If the number of bytes to read from one file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, then ClickHouse tries to concurrently read from this file in several threads.

MergeTreeMinRowsForConcurrentRead int

If the number of rows to be read from a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read then ClickHouse tries to perform a concurrent reading from this file on several threads.

MinBytesToUseDirectIo int

The minimum data volume required for using direct I/O access to the storage disk.

MinCountToCompile int

How many times to potentially use a compiled chunk of code before running compilation.

MinCountToCompileExpression int

A query waits for expression compilation process to complete prior to continuing execution.

MinExecutionSpeed int

Minimal execution speed in rows per second.

MinExecutionSpeedBytes int

Minimal execution speed in bytes per second.

MinInsertBlockSizeBytes int

Sets the minimum number of bytes in the block which can be inserted into a table by an INSERT query.

MinInsertBlockSizeRows int

Sets the minimum number of rows in the block which can be inserted into a table by an INSERT query.

OutputFormatJsonQuote64bitIntegers bool

If the value is true, integers appear in quotes when using JSON* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes.

OutputFormatJsonQuoteDenormals bool

Enables +nan, -nan, +inf, -inf outputs in JSON output format.

Priority int

Query priority.

QuotaMode string

Quota accounting mode.

ReadOverflowMode string

Sets behaviour on overflow while read. Possible values:

Readonly int

Restricts permissions for reading data, write data and change settings queries.

ReceiveTimeout int

Receive timeout in milliseconds on the socket used for communicating with the client.

ReplicationAlterPartitionsSync int

For ALTER ... ATTACH|DETACH|DROP queries, you can use the replication_alter_partitions_sync setting to set up waiting.

ResultOverflowMode string

Sets behaviour on overflow in result. Possible values:

SelectSequentialConsistency bool

Enables or disables sequential consistency for SELECT queries.

SendProgressInHttpHeaders bool

Enables or disables X-ClickHouse-Progress HTTP response headers in clickhouse-server responses.

SendTimeout int

Send timeout in milliseconds on the socket used for communicating with the client.

SetOverflowMode string

Sets behaviour on overflow in the set resulting. Possible values:

SkipUnavailableShards bool

Enables or disables silently skipping of unavailable shards.

SortOverflowMode string

Sets behaviour on overflow while sort. Possible values:

TimeoutOverflowMode string

Sets behaviour on overflow. Possible values:

TransferOverflowMode string

Sets behaviour on overflow. Possible values:

TransformNullIn bool

Enables equality of NULL values for IN operator.

UseUncompressedCache bool

Whether to use a cache of uncompressed blocks.

AddHttpCorsHeader bool

Include CORS headers in HTTP responces.

AllowDdl bool

Allows or denies DDL queries.

Compile bool

Enable compilation of queries.

CompileExpressions bool

Turn on expression compilation.

ConnectTimeout int

Connect timeout in milliseconds on the socket used for communicating with the client.

CountDistinctImplementation string

Specifies which of the uniq* functions should be used to perform the COUNT(DISTINCT …) construction.

DistinctOverflowMode string

Sets behaviour on overflow when using DISTINCT. Possible values:

DistributedAggregationMemoryEfficient bool

Determine the behavior of distributed subqueries.

DistributedDdlTaskTimeout int

Timeout for DDL queries, in milliseconds.

DistributedProductMode string

Changes the behaviour of distributed subqueries.

EmptyResultForAggregationByEmptySet bool

Allows to retunr empty result.

EnableHttpCompression bool

Enables or disables data compression in the response to an HTTP request.

FallbackToStaleReplicasForDistributedQueries bool

Forces a query to an out-of-date replica if updated data is not available.

ForceIndexByDate bool

Disables query execution if the index can’t be used by date.

ForcePrimaryKey bool

Disables query execution if indexing by the primary key is not possible.

GroupByOverflowMode string

Sets behaviour on overflow while GROUP BY operation. Possible values:

GroupByTwoLevelThreshold int

Sets the threshold of the number of keys, after that the two-level aggregation should be used.

GroupByTwoLevelThresholdBytes int

Sets the threshold of the number of bytes, after that the two-level aggregation should be used.

HttpConnectionTimeout int

Timeout for HTTP connection in milliseconds.

HttpHeadersProgressInterval int

Sets minimal interval between notifications about request process in HTTP header X-ClickHouse-Progress.

HttpReceiveTimeout int

Timeout for HTTP connection in milliseconds.

HttpSendTimeout int

Timeout for HTTP connection in milliseconds.

InputFormatDefaultsForOmittedFields bool

When performing INSERT queries, replace omitted input column values with default values of the respective columns.

InputFormatValuesInterpretExpressions bool

Enables or disables the full SQL parser if the fast stream parser can’t parse the data.

InsertQuorum int

Enables the quorum writes.

InsertQuorumTimeout int

Write to a quorum timeout in milliseconds.

JoinOverflowMode string

Sets behaviour on overflow in JOIN. Possible values:

JoinUseNulls bool

Sets the type of JOIN behaviour. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting.

JoinedSubqueryRequiresAlias bool

Require aliases for subselects and table functions in FROM that more than one table is present.

LowCardinalityAllowInNativeFormat bool

Allows or restricts using the LowCardinality data type with the Native format.

MaxAstDepth int

Maximum abstract syntax tree depth.

MaxAstElements int

Maximum abstract syntax tree elements.

MaxBlockSize int

A recommendation for what size of the block (in a count of rows) to load from tables.

MaxBytesBeforeExternalGroupBy int

Limit in bytes for using memoru for GROUP BY before using swap on disk.

MaxBytesBeforeExternalSort int

This setting is equivalent of the max_bytes_before_external_group_by setting, except for it is for sort operation (ORDER BY), not aggregation.

MaxBytesInDistinct int

Limits the maximum size of a hash table in bytes (uncompressed data) when using DISTINCT.

MaxBytesInJoin int

Limit on maximum size of the hash table for JOIN, in bytes.

MaxBytesInSet int

Limit on the number of bytes in the set resulting from the execution of the IN section.

MaxBytesToRead int

Limits the maximum number of bytes (uncompressed data) that can be read from a table when running a query.

MaxBytesToSort int

Limits the maximum number of bytes (uncompressed data) that can be read from a table for sorting.

MaxBytesToTransfer int

Limits the maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.

MaxColumnsToRead int

Limits the maximum number of columns that can be read from a table in a single query.

MaxExecutionTime int

Limits the maximum query execution time in milliseconds.

MaxExpandedAstElements int

Maximum abstract syntax tree depth after after expansion of aliases.

MaxInsertBlockSize int

The size of blocks (in a count of rows) to form for insertion into a table.

MaxMemoryUsage int

Limits the maximum memory usage (in bytes) for processing queries on a single server.

MaxMemoryUsageForUser int

Limits the maximum memory usage (in bytes) for processing of user's queries on a single server.

MaxNetworkBandwidth int

Limits the speed of the data exchange over the network in bytes per second.

MaxNetworkBandwidthForUser int

Limits the speed of the data exchange over the network in bytes per second.

MaxQuerySize int

The maximum part of a query that can be taken to RAM for parsing with the SQL parser.

MaxReplicaDelayForDistributedQueries int

Disables lagging replicas for distributed queries.

MaxResultBytes int

Limits the number of bytes in the result.

MaxResultRows int

Limits the number of rows in the result.

MaxRowsInDistinct int

Limits the maximum number of different rows when using DISTINCT.

MaxRowsInJoin int

Limit on maximum size of the hash table for JOIN, in rows.

MaxRowsInSet int

Limit on the number of rows in the set resulting from the execution of the IN section.

MaxRowsToGroupBy int

Limits the maximum number of unique keys received from aggregation function.

MaxRowsToRead int

Limits the maximum number of rows that can be read from a table when running a query.

MaxRowsToSort int

Limits the maximum number of rows that can be read from a table for sorting.

MaxRowsToTransfer int

Limits the maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.

MaxTemporaryColumns int

Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns.

MaxTemporaryNonConstColumns int

Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, excluding constant columns.

MaxThreads int

The maximum number of query processing threads, excluding threads for retrieving data from remote servers.

MergeTreeMaxBytesToUseCache int

If ClickHouse should read more than merge_tree_max_bytes_to_use_cache bytes in one query, it doesn’t use the cache of uncompressed blocks.

MergeTreeMaxRowsToUseCache int

If ClickHouse should read more than merge_tree_max_rows_to_use_cache rows in one query, it doesn’t use the cache of uncompressed blocks.

MergeTreeMinBytesForConcurrentRead int

If the number of bytes to read from one file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, then ClickHouse tries to concurrently read from this file in several threads.

MergeTreeMinRowsForConcurrentRead int

If the number of rows to be read from a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read then ClickHouse tries to perform a concurrent reading from this file on several threads.

MinBytesToUseDirectIo int

The minimum data volume required for using direct I/O access to the storage disk.

MinCountToCompile int

How many times to potentially use a compiled chunk of code before running compilation.

MinCountToCompileExpression int

A query waits for expression compilation process to complete prior to continuing execution.

MinExecutionSpeed int

Minimal execution speed in rows per second.

MinExecutionSpeedBytes int

Minimal execution speed in bytes per second.

MinInsertBlockSizeBytes int

Sets the minimum number of bytes in the block which can be inserted into a table by an INSERT query.

MinInsertBlockSizeRows int

Sets the minimum number of rows in the block which can be inserted into a table by an INSERT query.

OutputFormatJsonQuote64bitIntegers bool

If the value is true, integers appear in quotes when using JSON* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes.

OutputFormatJsonQuoteDenormals bool

Enables +nan, -nan, +inf, -inf outputs in JSON output format.

Priority int

Query priority.

QuotaMode string

Quota accounting mode.

ReadOverflowMode string

Sets behaviour on overflow while read. Possible values:

Readonly int

Restricts permissions for reading data, write data and change settings queries.

ReceiveTimeout int

Receive timeout in milliseconds on the socket used for communicating with the client.

ReplicationAlterPartitionsSync int

For ALTER ... ATTACH|DETACH|DROP queries, you can use the replication_alter_partitions_sync setting to set up waiting.

ResultOverflowMode string

Sets behaviour on overflow in result. Possible values:

SelectSequentialConsistency bool

Enables or disables sequential consistency for SELECT queries.

SendProgressInHttpHeaders bool

Enables or disables X-ClickHouse-Progress HTTP response headers in clickhouse-server responses.

SendTimeout int

Send timeout in milliseconds on the socket used for communicating with the client.

SetOverflowMode string

Sets behaviour on overflow in the set resulting. Possible values:

SkipUnavailableShards bool

Enables or disables silently skipping of unavailable shards.

SortOverflowMode string

Sets behaviour on overflow while sort. Possible values:

TimeoutOverflowMode string

Sets behaviour on overflow. Possible values:

TransferOverflowMode string

Sets behaviour on overflow. Possible values:

TransformNullIn bool

Enables equality of NULL values for IN operator.

UseUncompressedCache bool

Whether to use a cache of uncompressed blocks.

addHttpCorsHeader Boolean

Include CORS headers in HTTP responces.

allowDdl Boolean

Allows or denies DDL queries.

compile Boolean

Enable compilation of queries.

compileExpressions Boolean

Turn on expression compilation.

connectTimeout Integer

Connect timeout in milliseconds on the socket used for communicating with the client.

countDistinctImplementation String

Specifies which of the uniq* functions should be used to perform the COUNT(DISTINCT …) construction.

distinctOverflowMode String

Sets behaviour on overflow when using DISTINCT. Possible values:

distributedAggregationMemoryEfficient Boolean

Determine the behavior of distributed subqueries.

distributedDdlTaskTimeout Integer

Timeout for DDL queries, in milliseconds.

distributedProductMode String

Changes the behaviour of distributed subqueries.

emptyResultForAggregationByEmptySet Boolean

Allows to retunr empty result.

enableHttpCompression Boolean

Enables or disables data compression in the response to an HTTP request.

fallbackToStaleReplicasForDistributedQueries Boolean

Forces a query to an out-of-date replica if updated data is not available.

forceIndexByDate Boolean

Disables query execution if the index can’t be used by date.

forcePrimaryKey Boolean

Disables query execution if indexing by the primary key is not possible.

groupByOverflowMode String

Sets behaviour on overflow while GROUP BY operation. Possible values:

groupByTwoLevelThreshold Integer

Sets the threshold of the number of keys, after that the two-level aggregation should be used.

groupByTwoLevelThresholdBytes Integer

Sets the threshold of the number of bytes, after that the two-level aggregation should be used.

httpConnectionTimeout Integer

Timeout for HTTP connection in milliseconds.

httpHeadersProgressInterval Integer

Sets minimal interval between notifications about request process in HTTP header X-ClickHouse-Progress.

httpReceiveTimeout Integer

Timeout for HTTP connection in milliseconds.

httpSendTimeout Integer

Timeout for HTTP connection in milliseconds.

inputFormatDefaultsForOmittedFields Boolean

When performing INSERT queries, replace omitted input column values with default values of the respective columns.

inputFormatValuesInterpretExpressions Boolean

Enables or disables the full SQL parser if the fast stream parser can’t parse the data.

insertQuorum Integer

Enables the quorum writes.

insertQuorumTimeout Integer

Write to a quorum timeout in milliseconds.

joinOverflowMode String

Sets behaviour on overflow in JOIN. Possible values:

joinUseNulls Boolean

Sets the type of JOIN behaviour. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting.

joinedSubqueryRequiresAlias Boolean

Require aliases for subselects and table functions in FROM that more than one table is present.

lowCardinalityAllowInNativeFormat Boolean

Allows or restricts using the LowCardinality data type with the Native format.

maxAstDepth Integer

Maximum abstract syntax tree depth.

maxAstElements Integer

Maximum abstract syntax tree elements.

maxBlockSize Integer

A recommendation for what size of the block (in a count of rows) to load from tables.

maxBytesBeforeExternalGroupBy Integer

Limit in bytes for using memoru for GROUP BY before using swap on disk.

maxBytesBeforeExternalSort Integer

This setting is equivalent of the max_bytes_before_external_group_by setting, except for it is for sort operation (ORDER BY), not aggregation.

maxBytesInDistinct Integer

Limits the maximum size of a hash table in bytes (uncompressed data) when using DISTINCT.

maxBytesInJoin Integer

Limit on maximum size of the hash table for JOIN, in bytes.

maxBytesInSet Integer

Limit on the number of bytes in the set resulting from the execution of the IN section.

maxBytesToRead Integer

Limits the maximum number of bytes (uncompressed data) that can be read from a table when running a query.

maxBytesToSort Integer

Limits the maximum number of bytes (uncompressed data) that can be read from a table for sorting.

maxBytesToTransfer Integer

Limits the maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.

maxColumnsToRead Integer

Limits the maximum number of columns that can be read from a table in a single query.

maxExecutionTime Integer

Limits the maximum query execution time in milliseconds.

maxExpandedAstElements Integer

Maximum abstract syntax tree depth after after expansion of aliases.

maxInsertBlockSize Integer

The size of blocks (in a count of rows) to form for insertion into a table.

maxMemoryUsage Integer

Limits the maximum memory usage (in bytes) for processing queries on a single server.

maxMemoryUsageForUser Integer

Limits the maximum memory usage (in bytes) for processing of user's queries on a single server.

maxNetworkBandwidth Integer

Limits the speed of the data exchange over the network in bytes per second.

maxNetworkBandwidthForUser Integer

Limits the speed of the data exchange over the network in bytes per second.

maxQuerySize Integer

The maximum part of a query that can be taken to RAM for parsing with the SQL parser.

maxReplicaDelayForDistributedQueries Integer

Disables lagging replicas for distributed queries.

maxResultBytes Integer

Limits the number of bytes in the result.

maxResultRows Integer

Limits the number of rows in the result.

maxRowsInDistinct Integer

Limits the maximum number of different rows when using DISTINCT.

maxRowsInJoin Integer

Limit on maximum size of the hash table for JOIN, in rows.

maxRowsInSet Integer

Limit on the number of rows in the set resulting from the execution of the IN section.

maxRowsToGroupBy Integer

Limits the maximum number of unique keys received from aggregation function.

maxRowsToRead Integer

Limits the maximum number of rows that can be read from a table when running a query.

maxRowsToSort Integer

Limits the maximum number of rows that can be read from a table for sorting.

maxRowsToTransfer Integer

Limits the maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.

maxTemporaryColumns Integer

Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns.

maxTemporaryNonConstColumns Integer

Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, excluding constant columns.

maxThreads Integer

The maximum number of query processing threads, excluding threads for retrieving data from remote servers.

mergeTreeMaxBytesToUseCache Integer

If ClickHouse should read more than merge_tree_max_bytes_to_use_cache bytes in one query, it doesn’t use the cache of uncompressed blocks.

mergeTreeMaxRowsToUseCache Integer

If ClickHouse should read more than merge_tree_max_rows_to_use_cache rows in one query, it doesn’t use the cache of uncompressed blocks.

mergeTreeMinBytesForConcurrentRead Integer

If the number of bytes to read from one file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, then ClickHouse tries to concurrently read from this file in several threads.

mergeTreeMinRowsForConcurrentRead Integer

If the number of rows to be read from a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read then ClickHouse tries to perform a concurrent reading from this file on several threads.

minBytesToUseDirectIo Integer

The minimum data volume required for using direct I/O access to the storage disk.

minCountToCompile Integer

How many times to potentially use a compiled chunk of code before running compilation.

minCountToCompileExpression Integer

A query waits for expression compilation process to complete prior to continuing execution.

minExecutionSpeed Integer

Minimal execution speed in rows per second.

minExecutionSpeedBytes Integer

Minimal execution speed in bytes per second.

minInsertBlockSizeBytes Integer

Sets the minimum number of bytes in the block which can be inserted into a table by an INSERT query.

minInsertBlockSizeRows Integer

Sets the minimum number of rows in the block which can be inserted into a table by an INSERT query.

outputFormatJsonQuote64bitIntegers Boolean

If the value is true, integers appear in quotes when using JSON* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes.

outputFormatJsonQuoteDenormals Boolean

Enables +nan, -nan, +inf, -inf outputs in JSON output format.

priority Integer

Query priority.

quotaMode String

Quota accounting mode.

readOverflowMode String

Sets behaviour on overflow while read. Possible values:

readonly Integer

Restricts permissions for reading data, write data and change settings queries.

receiveTimeout Integer

Receive timeout in milliseconds on the socket used for communicating with the client.

replicationAlterPartitionsSync Integer

For ALTER ... ATTACH|DETACH|DROP queries, you can use the replication_alter_partitions_sync setting to set up waiting.

resultOverflowMode String

Sets behaviour on overflow in result. Possible values:

selectSequentialConsistency Boolean

Enables or disables sequential consistency for SELECT queries.

sendProgressInHttpHeaders Boolean

Enables or disables X-ClickHouse-Progress HTTP response headers in clickhouse-server responses.

sendTimeout Integer

Send timeout in milliseconds on the socket used for communicating with the client.

setOverflowMode String

Sets behaviour on overflow in the set resulting. Possible values:

skipUnavailableShards Boolean

Enables or disables silently skipping of unavailable shards.

sortOverflowMode String

Sets behaviour on overflow while sort. Possible values:

timeoutOverflowMode String

Sets behaviour on overflow. Possible values:

transferOverflowMode String

Sets behaviour on overflow. Possible values:

transformNullIn Boolean

Enables equality of NULL values for IN operator.

useUncompressedCache Boolean

Whether to use a cache of uncompressed blocks.

addHttpCorsHeader boolean

Include CORS headers in HTTP responces.

allowDdl boolean

Allows or denies DDL queries.

compile boolean

Enable compilation of queries.

compileExpressions boolean

Turn on expression compilation.

connectTimeout number

Connect timeout in milliseconds on the socket used for communicating with the client.

countDistinctImplementation string

Specifies which of the uniq* functions should be used to perform the COUNT(DISTINCT …) construction.

distinctOverflowMode string

Sets behaviour on overflow when using DISTINCT. Possible values:

distributedAggregationMemoryEfficient boolean

Determine the behavior of distributed subqueries.

distributedDdlTaskTimeout number

Timeout for DDL queries, in milliseconds.

distributedProductMode string

Changes the behaviour of distributed subqueries.

emptyResultForAggregationByEmptySet boolean

Allows to retunr empty result.

enableHttpCompression boolean

Enables or disables data compression in the response to an HTTP request.

fallbackToStaleReplicasForDistributedQueries boolean

Forces a query to an out-of-date replica if updated data is not available.

forceIndexByDate boolean

Disables query execution if the index can’t be used by date.

forcePrimaryKey boolean

Disables query execution if indexing by the primary key is not possible.

groupByOverflowMode string

Sets behaviour on overflow while GROUP BY operation. Possible values:

groupByTwoLevelThreshold number

Sets the threshold of the number of keys, after that the two-level aggregation should be used.

groupByTwoLevelThresholdBytes number

Sets the threshold of the number of bytes, after that the two-level aggregation should be used.

httpConnectionTimeout number

Timeout for HTTP connection in milliseconds.

httpHeadersProgressInterval number

Sets minimal interval between notifications about request process in HTTP header X-ClickHouse-Progress.

httpReceiveTimeout number

Timeout for HTTP connection in milliseconds.

httpSendTimeout number

Timeout for HTTP connection in milliseconds.

inputFormatDefaultsForOmittedFields boolean

When performing INSERT queries, replace omitted input column values with default values of the respective columns.

inputFormatValuesInterpretExpressions boolean

Enables or disables the full SQL parser if the fast stream parser can’t parse the data.

insertQuorum number

Enables the quorum writes.

insertQuorumTimeout number

Write to a quorum timeout in milliseconds.

joinOverflowMode string

Sets behaviour on overflow in JOIN. Possible values:

joinUseNulls boolean

Sets the type of JOIN behaviour. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting.

joinedSubqueryRequiresAlias boolean

Require aliases for subselects and table functions in FROM that more than one table is present.

lowCardinalityAllowInNativeFormat boolean

Allows or restricts using the LowCardinality data type with the Native format.

maxAstDepth number

Maximum abstract syntax tree depth.

maxAstElements number

Maximum abstract syntax tree elements.

maxBlockSize number

A recommendation for what size of the block (in a count of rows) to load from tables.

maxBytesBeforeExternalGroupBy number

Limit in bytes for using memoru for GROUP BY before using swap on disk.

maxBytesBeforeExternalSort number

This setting is equivalent of the max_bytes_before_external_group_by setting, except for it is for sort operation (ORDER BY), not aggregation.

maxBytesInDistinct number

Limits the maximum size of a hash table in bytes (uncompressed data) when using DISTINCT.

maxBytesInJoin number

Limit on maximum size of the hash table for JOIN, in bytes.

maxBytesInSet number

Limit on the number of bytes in the set resulting from the execution of the IN section.

maxBytesToRead number

Limits the maximum number of bytes (uncompressed data) that can be read from a table when running a query.

maxBytesToSort number

Limits the maximum number of bytes (uncompressed data) that can be read from a table for sorting.

maxBytesToTransfer number

Limits the maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.

maxColumnsToRead number

Limits the maximum number of columns that can be read from a table in a single query.

maxExecutionTime number

Limits the maximum query execution time in milliseconds.

maxExpandedAstElements number

Maximum abstract syntax tree depth after after expansion of aliases.

maxInsertBlockSize number

The size of blocks (in a count of rows) to form for insertion into a table.

maxMemoryUsage number

Limits the maximum memory usage (in bytes) for processing queries on a single server.

maxMemoryUsageForUser number

Limits the maximum memory usage (in bytes) for processing of user's queries on a single server.

maxNetworkBandwidth number

Limits the speed of the data exchange over the network in bytes per second.

maxNetworkBandwidthForUser number

Limits the speed of the data exchange over the network in bytes per second.

maxQuerySize number

The maximum part of a query that can be taken to RAM for parsing with the SQL parser.

maxReplicaDelayForDistributedQueries number

Disables lagging replicas for distributed queries.

maxResultBytes number

Limits the number of bytes in the result.

maxResultRows number

Limits the number of rows in the result.

maxRowsInDistinct number

Limits the maximum number of different rows when using DISTINCT.

maxRowsInJoin number

Limit on maximum size of the hash table for JOIN, in rows.

maxRowsInSet number

Limit on the number of rows in the set resulting from the execution of the IN section.

maxRowsToGroupBy number

Limits the maximum number of unique keys received from aggregation function.

maxRowsToRead number

Limits the maximum number of rows that can be read from a table when running a query.

maxRowsToSort number

Limits the maximum number of rows that can be read from a table for sorting.

maxRowsToTransfer number

Limits the maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.

maxTemporaryColumns number

Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns.

maxTemporaryNonConstColumns number

Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, excluding constant columns.

maxThreads number

The maximum number of query processing threads, excluding threads for retrieving data from remote servers.

mergeTreeMaxBytesToUseCache number

If ClickHouse should read more than merge_tree_max_bytes_to_use_cache bytes in one query, it doesn’t use the cache of uncompressed blocks.

mergeTreeMaxRowsToUseCache number

If ClickHouse should read more than merge_tree_max_rows_to_use_cache rows in one query, it doesn’t use the cache of uncompressed blocks.

mergeTreeMinBytesForConcurrentRead number

If the number of bytes to read from one file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, then ClickHouse tries to concurrently read from this file in several threads.

mergeTreeMinRowsForConcurrentRead number

If the number of rows to be read from a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read then ClickHouse tries to perform a concurrent reading from this file on several threads.

minBytesToUseDirectIo number

The minimum data volume required for using direct I/O access to the storage disk.

minCountToCompile number

How many times to potentially use a compiled chunk of code before running compilation.

minCountToCompileExpression number

A query waits for expression compilation process to complete prior to continuing execution.

minExecutionSpeed number

Minimal execution speed in rows per second.

minExecutionSpeedBytes number

Minimal execution speed in bytes per second.

minInsertBlockSizeBytes number

Sets the minimum number of bytes in the block which can be inserted into a table by an INSERT query.

minInsertBlockSizeRows number

Sets the minimum number of rows in the block which can be inserted into a table by an INSERT query.

outputFormatJsonQuote64bitIntegers boolean

If the value is true, integers appear in quotes when using JSON* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes.

outputFormatJsonQuoteDenormals boolean

Enables +nan, -nan, +inf, -inf outputs in JSON output format.

priority number

Query priority.

quotaMode string

Quota accounting mode.

readOverflowMode string

Sets behaviour on overflow while read. Possible values:

readonly number

Restricts permissions for reading data, write data and change settings queries.

receiveTimeout number

Receive timeout in milliseconds on the socket used for communicating with the client.

replicationAlterPartitionsSync number

For ALTER ... ATTACH|DETACH|DROP queries, you can use the replication_alter_partitions_sync setting to set up waiting.

resultOverflowMode string

Sets behaviour on overflow in result. Possible values:

selectSequentialConsistency boolean

Enables or disables sequential consistency for SELECT queries.

sendProgressInHttpHeaders boolean

Enables or disables X-ClickHouse-Progress HTTP response headers in clickhouse-server responses.

sendTimeout number

Send timeout in milliseconds on the socket used for communicating with the client.

setOverflowMode string

Sets behaviour on overflow in the set resulting. Possible values:

skipUnavailableShards boolean

Enables or disables silently skipping of unavailable shards.

sortOverflowMode string

Sets behaviour on overflow while sort. Possible values:

timeoutOverflowMode string

Sets behaviour on overflow. Possible values:

transferOverflowMode string

Sets behaviour on overflow. Possible values:

transformNullIn boolean

Enables equality of NULL values for IN operator.

useUncompressedCache boolean

Whether to use a cache of uncompressed blocks.

add_http_cors_header bool

Include CORS headers in HTTP responces.

allow_ddl bool

Allows or denies DDL queries.

compile bool

Enable compilation of queries.

compile_expressions bool

Turn on expression compilation.

connect_timeout int

Connect timeout in milliseconds on the socket used for communicating with the client.

count_distinct_implementation str

Specifies which of the uniq* functions should be used to perform the COUNT(DISTINCT …) construction.

distinct_overflow_mode str

Sets behaviour on overflow when using DISTINCT. Possible values:

distributed_aggregation_memory_efficient bool

Determine the behavior of distributed subqueries.

distributed_ddl_task_timeout int

Timeout for DDL queries, in milliseconds.

distributed_product_mode str

Changes the behaviour of distributed subqueries.

empty_result_for_aggregation_by_empty_set bool

Allows to retunr empty result.

enable_http_compression bool

Enables or disables data compression in the response to an HTTP request.

fallback_to_stale_replicas_for_distributed_queries bool

Forces a query to an out-of-date replica if updated data is not available.

force_index_by_date bool

Disables query execution if the index can’t be used by date.

force_primary_key bool

Disables query execution if indexing by the primary key is not possible.

group_by_overflow_mode str

Sets behaviour on overflow while GROUP BY operation. Possible values:

group_by_two_level_threshold int

Sets the threshold of the number of keys, after that the two-level aggregation should be used.

group_by_two_level_threshold_bytes int

Sets the threshold of the number of bytes, after that the two-level aggregation should be used.

http_connection_timeout int

Timeout for HTTP connection in milliseconds.

http_headers_progress_interval int

Sets minimal interval between notifications about request process in HTTP header X-ClickHouse-Progress.

http_receive_timeout int

Timeout for HTTP connection in milliseconds.

http_send_timeout int

Timeout for HTTP connection in milliseconds.

input_format_defaults_for_omitted_fields bool

When performing INSERT queries, replace omitted input column values with default values of the respective columns.

input_format_values_interpret_expressions bool

Enables or disables the full SQL parser if the fast stream parser can’t parse the data.

insert_quorum int

Enables the quorum writes.

insert_quorum_timeout int

Write to a quorum timeout in milliseconds.

join_overflow_mode str

Sets behaviour on overflow in JOIN. Possible values:

join_use_nulls bool

Sets the type of JOIN behaviour. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting.

joined_subquery_requires_alias bool

Require aliases for subselects and table functions in FROM that more than one table is present.

low_cardinality_allow_in_native_format bool

Allows or restricts using the LowCardinality data type with the Native format.

max_ast_depth int

Maximum abstract syntax tree depth.

max_ast_elements int

Maximum abstract syntax tree elements.

max_block_size int

A recommendation for what size of the block (in a count of rows) to load from tables.

max_bytes_before_external_group_by int

Limit in bytes for using memoru for GROUP BY before using swap on disk.

max_bytes_before_external_sort int

This setting is equivalent of the max_bytes_before_external_group_by setting, except for it is for sort operation (ORDER BY), not aggregation.

max_bytes_in_distinct int

Limits the maximum size of a hash table in bytes (uncompressed data) when using DISTINCT.

max_bytes_in_join int

Limit on maximum size of the hash table for JOIN, in bytes.

max_bytes_in_set int

Limit on the number of bytes in the set resulting from the execution of the IN section.

max_bytes_to_read int

Limits the maximum number of bytes (uncompressed data) that can be read from a table when running a query.

max_bytes_to_sort int

Limits the maximum number of bytes (uncompressed data) that can be read from a table for sorting.

max_bytes_to_transfer int

Limits the maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.

max_columns_to_read int

Limits the maximum number of columns that can be read from a table in a single query.

max_execution_time int

Limits the maximum query execution time in milliseconds.

max_expanded_ast_elements int

Maximum abstract syntax tree depth after after expansion of aliases.

max_insert_block_size int

The size of blocks (in a count of rows) to form for insertion into a table.

max_memory_usage int

Limits the maximum memory usage (in bytes) for processing queries on a single server.

max_memory_usage_for_user int

Limits the maximum memory usage (in bytes) for processing of user's queries on a single server.

max_network_bandwidth int

Limits the speed of the data exchange over the network in bytes per second.

max_network_bandwidth_for_user int

Limits the speed of the data exchange over the network in bytes per second.

max_query_size int

The maximum part of a query that can be taken to RAM for parsing with the SQL parser.

max_replica_delay_for_distributed_queries int

Disables lagging replicas for distributed queries.

max_result_bytes int

Limits the number of bytes in the result.

max_result_rows int

Limits the number of rows in the result.

max_rows_in_distinct int

Limits the maximum number of different rows when using DISTINCT.

max_rows_in_join int

Limit on maximum size of the hash table for JOIN, in rows.

max_rows_in_set int

Limit on the number of rows in the set resulting from the execution of the IN section.

max_rows_to_group_by int

Limits the maximum number of unique keys received from aggregation function.

max_rows_to_read int

Limits the maximum number of rows that can be read from a table when running a query.

max_rows_to_sort int

Limits the maximum number of rows that can be read from a table for sorting.

max_rows_to_transfer int

Limits the maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.

max_temporary_columns int

Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns.

max_temporary_non_const_columns int

Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, excluding constant columns.

max_threads int

The maximum number of query processing threads, excluding threads for retrieving data from remote servers.

merge_tree_max_bytes_to_use_cache int

If ClickHouse should read more than merge_tree_max_bytes_to_use_cache bytes in one query, it doesn’t use the cache of uncompressed blocks.

merge_tree_max_rows_to_use_cache int

If ClickHouse should read more than merge_tree_max_rows_to_use_cache rows in one query, it doesn’t use the cache of uncompressed blocks.

merge_tree_min_bytes_for_concurrent_read int

If the number of bytes to read from one file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, then ClickHouse tries to concurrently read from this file in several threads.

merge_tree_min_rows_for_concurrent_read int

If the number of rows to be read from a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read then ClickHouse tries to perform a concurrent reading from this file on several threads.

min_bytes_to_use_direct_io int

The minimum data volume required for using direct I/O access to the storage disk.

min_count_to_compile int

How many times to potentially use a compiled chunk of code before running compilation.

min_count_to_compile_expression int

A query waits for expression compilation process to complete prior to continuing execution.

min_execution_speed int

Minimal execution speed in rows per second.

min_execution_speed_bytes int

Minimal execution speed in bytes per second.

min_insert_block_size_bytes int

Sets the minimum number of bytes in the block which can be inserted into a table by an INSERT query.

min_insert_block_size_rows int

Sets the minimum number of rows in the block which can be inserted into a table by an INSERT query.

output_format_json_quote64bit_integers bool

If the value is true, integers appear in quotes when using JSON* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes.

output_format_json_quote_denormals bool

Enables +nan, -nan, +inf, -inf outputs in JSON output format.

priority int

Query priority.

quota_mode str

Quota accounting mode.

read_overflow_mode str

Sets behaviour on overflow while read. Possible values:

readonly int

Restricts permissions for reading data, write data and change settings queries.

receive_timeout int

Receive timeout in milliseconds on the socket used for communicating with the client.

replication_alter_partitions_sync int

For ALTER ... ATTACH|DETACH|DROP queries, you can use the replication_alter_partitions_sync setting to set up waiting.

result_overflow_mode str

Sets behaviour on overflow in result. Possible values:

select_sequential_consistency bool

Enables or disables sequential consistency for SELECT queries.

send_progress_in_http_headers bool

Enables or disables X-ClickHouse-Progress HTTP response headers in clickhouse-server responses.

send_timeout int

Send timeout in milliseconds on the socket used for communicating with the client.

set_overflow_mode str

Sets behaviour on overflow in the set resulting. Possible values:

skip_unavailable_shards bool

Enables or disables silently skipping of unavailable shards.

sort_overflow_mode str

Sets behaviour on overflow while sort. Possible values:

timeout_overflow_mode str

Sets behaviour on overflow. Possible values:

transfer_overflow_mode str

Sets behaviour on overflow. Possible values:

transform_null_in bool

Enables equality of NULL values for IN operator.

use_uncompressed_cache bool

Whether to use a cache of uncompressed blocks.

addHttpCorsHeader Boolean

Include CORS headers in HTTP responces.

allowDdl Boolean

Allows or denies DDL queries.

compile Boolean

Enable compilation of queries.

compileExpressions Boolean

Turn on expression compilation.

connectTimeout Number

Connect timeout in milliseconds on the socket used for communicating with the client.

countDistinctImplementation String

Specifies which of the uniq* functions should be used to perform the COUNT(DISTINCT …) construction.

distinctOverflowMode String

Sets behaviour on overflow when using DISTINCT. Possible values:

distributedAggregationMemoryEfficient Boolean

Determine the behavior of distributed subqueries.

distributedDdlTaskTimeout Number

Timeout for DDL queries, in milliseconds.

distributedProductMode String

Changes the behaviour of distributed subqueries.

emptyResultForAggregationByEmptySet Boolean

Allows to retunr empty result.

enableHttpCompression Boolean

Enables or disables data compression in the response to an HTTP request.

fallbackToStaleReplicasForDistributedQueries Boolean

Forces a query to an out-of-date replica if updated data is not available.

forceIndexByDate Boolean

Disables query execution if the index can’t be used by date.

forcePrimaryKey Boolean

Disables query execution if indexing by the primary key is not possible.

groupByOverflowMode String

Sets behaviour on overflow while GROUP BY operation. Possible values:

groupByTwoLevelThreshold Number

Sets the threshold of the number of keys, after that the two-level aggregation should be used.

groupByTwoLevelThresholdBytes Number

Sets the threshold of the number of bytes, after that the two-level aggregation should be used.

httpConnectionTimeout Number

Timeout for HTTP connection in milliseconds.

httpHeadersProgressInterval Number

Sets minimal interval between notifications about request process in HTTP header X-ClickHouse-Progress.

httpReceiveTimeout Number

Timeout for HTTP connection in milliseconds.

httpSendTimeout Number

Timeout for HTTP connection in milliseconds.

inputFormatDefaultsForOmittedFields Boolean

When performing INSERT queries, replace omitted input column values with default values of the respective columns.

inputFormatValuesInterpretExpressions Boolean

Enables or disables the full SQL parser if the fast stream parser can’t parse the data.

insertQuorum Number

Enables the quorum writes.

insertQuorumTimeout Number

Write to a quorum timeout in milliseconds.

joinOverflowMode String

Sets behaviour on overflow in JOIN. Possible values:

joinUseNulls Boolean

Sets the type of JOIN behaviour. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting.

joinedSubqueryRequiresAlias Boolean

Require aliases for subselects and table functions in FROM that more than one table is present.

lowCardinalityAllowInNativeFormat Boolean

Allows or restricts using the LowCardinality data type with the Native format.

maxAstDepth Number

Maximum abstract syntax tree depth.

maxAstElements Number

Maximum abstract syntax tree elements.

maxBlockSize Number

A recommendation for what size of the block (in a count of rows) to load from tables.

maxBytesBeforeExternalGroupBy Number

Limit in bytes for using memoru for GROUP BY before using swap on disk.

maxBytesBeforeExternalSort Number

This setting is equivalent of the max_bytes_before_external_group_by setting, except for it is for sort operation (ORDER BY), not aggregation.

maxBytesInDistinct Number

Limits the maximum size of a hash table in bytes (uncompressed data) when using DISTINCT.

maxBytesInJoin Number

Limit on maximum size of the hash table for JOIN, in bytes.

maxBytesInSet Number

Limit on the number of bytes in the set resulting from the execution of the IN section.

maxBytesToRead Number

Limits the maximum number of bytes (uncompressed data) that can be read from a table when running a query.

maxBytesToSort Number

Limits the maximum number of bytes (uncompressed data) that can be read from a table for sorting.

maxBytesToTransfer Number

Limits the maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.

maxColumnsToRead Number

Limits the maximum number of columns that can be read from a table in a single query.

maxExecutionTime Number

Limits the maximum query execution time in milliseconds.

maxExpandedAstElements Number

Maximum abstract syntax tree depth after after expansion of aliases.

maxInsertBlockSize Number

The size of blocks (in a count of rows) to form for insertion into a table.

maxMemoryUsage Number

Limits the maximum memory usage (in bytes) for processing queries on a single server.

maxMemoryUsageForUser Number

Limits the maximum memory usage (in bytes) for processing of user's queries on a single server.

maxNetworkBandwidth Number

Limits the speed of the data exchange over the network in bytes per second.

maxNetworkBandwidthForUser Number

Limits the speed of the data exchange over the network in bytes per second.

maxQuerySize Number

The maximum part of a query that can be taken to RAM for parsing with the SQL parser.

maxReplicaDelayForDistributedQueries Number

Disables lagging replicas for distributed queries.

maxResultBytes Number

Limits the number of bytes in the result.

maxResultRows Number

Limits the number of rows in the result.

maxRowsInDistinct Number

Limits the maximum number of different rows when using DISTINCT.

maxRowsInJoin Number

Limit on maximum size of the hash table for JOIN, in rows.

maxRowsInSet Number

Limit on the number of rows in the set resulting from the execution of the IN section.

maxRowsToGroupBy Number

Limits the maximum number of unique keys received from aggregation function.

maxRowsToRead Number

Limits the maximum number of rows that can be read from a table when running a query.

maxRowsToSort Number

Limits the maximum number of rows that can be read from a table for sorting.

maxRowsToTransfer Number

Limits the maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.

maxTemporaryColumns Number

Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns.

maxTemporaryNonConstColumns Number

Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, excluding constant columns.

maxThreads Number

The maximum number of query processing threads, excluding threads for retrieving data from remote servers.

mergeTreeMaxBytesToUseCache Number

If ClickHouse should read more than merge_tree_max_bytes_to_use_cache bytes in one query, it doesn’t use the cache of uncompressed blocks.

mergeTreeMaxRowsToUseCache Number

If ClickHouse should read more than merge_tree_max_rows_to_use_cache rows in one query, it doesn’t use the cache of uncompressed blocks.

mergeTreeMinBytesForConcurrentRead Number

If the number of bytes to read from one file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, then ClickHouse tries to concurrently read from this file in several threads.

mergeTreeMinRowsForConcurrentRead Number

If the number of rows to be read from a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read then ClickHouse tries to perform a concurrent reading from this file on several threads.

minBytesToUseDirectIo Number

The minimum data volume required for using direct I/O access to the storage disk.

minCountToCompile Number

How many times to potentially use a compiled chunk of code before running compilation.

minCountToCompileExpression Number

A query waits for expression compilation process to complete prior to continuing execution.

minExecutionSpeed Number

Minimal execution speed in rows per second.

minExecutionSpeedBytes Number

Minimal execution speed in bytes per second.

minInsertBlockSizeBytes Number

Sets the minimum number of bytes in the block which can be inserted into a table by an INSERT query.

minInsertBlockSizeRows Number

Sets the minimum number of rows in the block which can be inserted into a table by an INSERT query.

outputFormatJsonQuote64bitIntegers Boolean

If the value is true, integers appear in quotes when using JSON* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes.

outputFormatJsonQuoteDenormals Boolean

Enables +nan, -nan, +inf, -inf outputs in JSON output format.

priority Number

Query priority.

quotaMode String

Quota accounting mode.

readOverflowMode String

Sets behaviour on overflow while read. Possible values:

readonly Number

Restricts permissions for reading data, write data and change settings queries.

receiveTimeout Number

Receive timeout in milliseconds on the socket used for communicating with the client.

replicationAlterPartitionsSync Number

For ALTER ... ATTACH|DETACH|DROP queries, you can use the replication_alter_partitions_sync setting to set up waiting.

resultOverflowMode String

Sets behaviour on overflow in result. Possible values:

selectSequentialConsistency Boolean

Enables or disables sequential consistency for SELECT queries.

sendProgressInHttpHeaders Boolean

Enables or disables X-ClickHouse-Progress HTTP response headers in clickhouse-server responses.

sendTimeout Number

Send timeout in milliseconds on the socket used for communicating with the client.

setOverflowMode String

Sets behaviour on overflow in the set resulting. Possible values:

skipUnavailableShards Boolean

Enables or disables silently skipping of unavailable shards.

sortOverflowMode String

Sets behaviour on overflow while sort. Possible values:

timeoutOverflowMode String

Sets behaviour on overflow. Possible values:

transferOverflowMode String

Sets behaviour on overflow. Possible values:

transformNullIn Boolean

Enables equality of NULL values for IN operator.

useUncompressedCache Boolean

Whether to use a cache of uncompressed blocks.

MdbClickhouseClusterZookeeper

Resources MdbClickhouseClusterZookeeperResources

Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.

Resources MdbClickhouseClusterZookeeperResources

Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.

resources MdbClickhouseClusterZookeeperResources

Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.

resources MdbClickhouseClusterZookeeperResources

Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.

resources MdbClickhouseClusterZookeeperResources

Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.

resources Property Map

Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.

MdbClickhouseClusterZookeeperResources

DiskSize int

Volume of the storage available to a ZooKeeper host, in gigabytes.

DiskTypeId string

Type of the storage of ZooKeeper hosts. For more information see the official documentation.

ResourcePresetId string
DiskSize int

Volume of the storage available to a ZooKeeper host, in gigabytes.

DiskTypeId string

Type of the storage of ZooKeeper hosts. For more information see the official documentation.

ResourcePresetId string
diskSize Integer

Volume of the storage available to a ZooKeeper host, in gigabytes.

diskTypeId String

Type of the storage of ZooKeeper hosts. For more information see the official documentation.

resourcePresetId String
diskSize number

Volume of the storage available to a ZooKeeper host, in gigabytes.

diskTypeId string

Type of the storage of ZooKeeper hosts. For more information see the official documentation.

resourcePresetId string
disk_size int

Volume of the storage available to a ZooKeeper host, in gigabytes.

disk_type_id str

Type of the storage of ZooKeeper hosts. For more information see the official documentation.

resource_preset_id str
diskSize Number

Volume of the storage available to a ZooKeeper host, in gigabytes.

diskTypeId String

Type of the storage of ZooKeeper hosts. For more information see the official documentation.

resourcePresetId String

Import

A cluster can be imported using the id of the resource, e.g.

 $ pulumi import yandex:index/mdbClickhouseCluster:MdbClickhouseCluster foo cluster_id

Package Details

Repository
https://github.com/pulumi/pulumi-yandex
License
Apache-2.0
Notes

This Pulumi package is based on the yandex Terraform Provider.