MdbKafkaTopic

Manages a topic of a Kafka cluster within the Yandex.Cloud. For more information, see the official documentation.

Example Usage

using Pulumi;
using Yandex = Pulumi.Yandex;

class MyStack : Stack
{
    public MyStack()
    {
        var foo = new Yandex.MdbKafkaCluster("foo", new Yandex.MdbKafkaClusterArgs
        {
            NetworkId = "c64vs98keiqc7f24pvkd",
            Config = new Yandex.Inputs.MdbKafkaClusterConfigArgs
            {
                Version = "2.8",
                Zones = 
                {
                    "ru-central1-a",
                },
                UnmanagedTopics = true,
                Kafka = new Yandex.Inputs.MdbKafkaClusterConfigKafkaArgs
                {
                    Resources = new Yandex.Inputs.MdbKafkaClusterConfigKafkaResourcesArgs
                    {
                        ResourcePresetId = "s2.micro",
                        DiskTypeId = "network-hdd",
                        DiskSize = 16,
                    },
                },
            },
        });
        var events = new Yandex.MdbKafkaTopic("events", new Yandex.MdbKafkaTopicArgs
        {
            ClusterId = foo.Id,
            Partitions = 4,
            ReplicationFactor = 1,
            TopicConfig = new Yandex.Inputs.MdbKafkaTopicTopicConfigArgs
            {
                CleanupPolicy = "CLEANUP_POLICY_COMPACT",
                CompressionType = "COMPRESSION_TYPE_LZ4",
                DeleteRetentionMs = "86400000",
                FileDeleteDelayMs = "60000",
                FlushMessages = "128",
                FlushMs = "1000",
                MinCompactionLagMs = "0",
                RetentionBytes = "10737418240",
                RetentionMs = "604800000",
                MaxMessageBytes = "1048588",
                MinInsyncReplicas = "1",
                SegmentBytes = "268435456",
                Preallocate = true,
            },
        });
    }

}
package main

import (
	"github.com/pulumi/pulumi-yandex/sdk/go/yandex"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		foo, err := yandex.NewMdbKafkaCluster(ctx, "foo", &yandex.MdbKafkaClusterArgs{
			NetworkId: pulumi.String("c64vs98keiqc7f24pvkd"),
			Config: &MdbKafkaClusterConfigArgs{
				Version: pulumi.String("2.8"),
				Zones: pulumi.StringArray{
					pulumi.String("ru-central1-a"),
				},
				UnmanagedTopics: pulumi.Bool(true),
				Kafka: &MdbKafkaClusterConfigKafkaArgs{
					Resources: &MdbKafkaClusterConfigKafkaResourcesArgs{
						ResourcePresetId: pulumi.String("s2.micro"),
						DiskTypeId:       pulumi.String("network-hdd"),
						DiskSize:         pulumi.Int(16),
					},
				},
			},
		})
		if err != nil {
			return err
		}
		_, err = yandex.NewMdbKafkaTopic(ctx, "events", &yandex.MdbKafkaTopicArgs{
			ClusterId:         foo.ID(),
			Partitions:        pulumi.Int(4),
			ReplicationFactor: pulumi.Int(1),
			TopicConfig: &MdbKafkaTopicTopicConfigArgs{
				CleanupPolicy:      pulumi.String("CLEANUP_POLICY_COMPACT"),
				CompressionType:    pulumi.String("COMPRESSION_TYPE_LZ4"),
				DeleteRetentionMs:  pulumi.String("86400000"),
				FileDeleteDelayMs:  pulumi.String("60000"),
				FlushMessages:      pulumi.String("128"),
				FlushMs:            pulumi.String("1000"),
				MinCompactionLagMs: pulumi.String("0"),
				RetentionBytes:     pulumi.String("10737418240"),
				RetentionMs:        pulumi.String("604800000"),
				MaxMessageBytes:    pulumi.String("1048588"),
				MinInsyncReplicas:  pulumi.String("1"),
				SegmentBytes:       pulumi.String("268435456"),
				Preallocate:        pulumi.Bool(true),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
import pulumi
import pulumi_yandex as yandex

foo = yandex.MdbKafkaCluster("foo",
    network_id="c64vs98keiqc7f24pvkd",
    config=yandex.MdbKafkaClusterConfigArgs(
        version="2.8",
        zones=["ru-central1-a"],
        unmanaged_topics=True,
        kafka=yandex.MdbKafkaClusterConfigKafkaArgs(
            resources=yandex.MdbKafkaClusterConfigKafkaResourcesArgs(
                resource_preset_id="s2.micro",
                disk_type_id="network-hdd",
                disk_size=16,
            ),
        ),
    ))
events = yandex.MdbKafkaTopic("events",
    cluster_id=foo.id,
    partitions=4,
    replication_factor=1,
    topic_config=yandex.MdbKafkaTopicTopicConfigArgs(
        cleanup_policy="CLEANUP_POLICY_COMPACT",
        compression_type="COMPRESSION_TYPE_LZ4",
        delete_retention_ms="86400000",
        file_delete_delay_ms="60000",
        flush_messages="128",
        flush_ms="1000",
        min_compaction_lag_ms="0",
        retention_bytes="10737418240",
        retention_ms="604800000",
        max_message_bytes="1048588",
        min_insync_replicas="1",
        segment_bytes="268435456",
        preallocate=True,
    ))
import * as pulumi from "@pulumi/pulumi";
import * as yandex from "@pulumi/yandex";

const foo = new yandex.MdbKafkaCluster("foo", {
    networkId: "c64vs98keiqc7f24pvkd",
    config: {
        version: "2.8",
        zones: ["ru-central1-a"],
        unmanagedTopics: true,
        kafka: {
            resources: {
                resourcePresetId: "s2.micro",
                diskTypeId: "network-hdd",
                diskSize: 16,
            },
        },
    },
});
const events = new yandex.MdbKafkaTopic("events", {
    clusterId: foo.id,
    partitions: 4,
    replicationFactor: 1,
    topicConfig: {
        cleanupPolicy: "CLEANUP_POLICY_COMPACT",
        compressionType: "COMPRESSION_TYPE_LZ4",
        deleteRetentionMs: 86400000,
        fileDeleteDelayMs: 60000,
        flushMessages: 128,
        flushMs: 1000,
        minCompactionLagMs: 0,
        retentionBytes: 10737418240,
        retentionMs: 604800000,
        maxMessageBytes: 1048588,
        minInsyncReplicas: 1,
        segmentBytes: 268435456,
        preallocate: true,
    },
});

Create a MdbKafkaTopic Resource

new MdbKafkaTopic(name: string, args: MdbKafkaTopicArgs, opts?: CustomResourceOptions);
@overload
def MdbKafkaTopic(resource_name: str,
                  opts: Optional[ResourceOptions] = None,
                  cluster_id: Optional[str] = None,
                  name: Optional[str] = None,
                  partitions: Optional[int] = None,
                  replication_factor: Optional[int] = None,
                  topic_config: Optional[MdbKafkaTopicTopicConfigArgs] = None)
@overload
def MdbKafkaTopic(resource_name: str,
                  args: MdbKafkaTopicArgs,
                  opts: Optional[ResourceOptions] = None)
func NewMdbKafkaTopic(ctx *Context, name string, args MdbKafkaTopicArgs, opts ...ResourceOption) (*MdbKafkaTopic, error)
public MdbKafkaTopic(string name, MdbKafkaTopicArgs args, CustomResourceOptions? opts = null)
name string
The unique name of the resource.
args MdbKafkaTopicArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
resource_name str
The unique name of the resource.
args MdbKafkaTopicArgs
The arguments to resource properties.
opts ResourceOptions
Bag of options to control resource's behavior.
ctx Context
Context object for the current deployment.
name string
The unique name of the resource.
args MdbKafkaTopicArgs
The arguments to resource properties.
opts ResourceOption
Bag of options to control resource's behavior.
name string
The unique name of the resource.
args MdbKafkaTopicArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.

MdbKafkaTopic Resource Properties

To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

Inputs

The MdbKafkaTopic resource accepts the following input properties:

ClusterId string
Partitions int
The number of the topic’s partitions.
ReplicationFactor int
Amount of data copies (replicas) for the topic in the cluster.
Name string
The name of the topic.
TopicConfig MdbKafkaTopicTopicConfigArgs
User-defined settings for the topic. The structure is documented below.
ClusterId string
Partitions int
The number of the topic’s partitions.
ReplicationFactor int
Amount of data copies (replicas) for the topic in the cluster.
Name string
The name of the topic.
TopicConfig MdbKafkaTopicTopicConfigArgs
User-defined settings for the topic. The structure is documented below.
clusterId string
partitions number
The number of the topic’s partitions.
replicationFactor number
Amount of data copies (replicas) for the topic in the cluster.
name string
The name of the topic.
topicConfig MdbKafkaTopicTopicConfigArgs
User-defined settings for the topic. The structure is documented below.
cluster_id str
partitions int
The number of the topic’s partitions.
replication_factor int
Amount of data copies (replicas) for the topic in the cluster.
name str
The name of the topic.
topic_config MdbKafkaTopicTopicConfigArgs
User-defined settings for the topic. The structure is documented below.

Outputs

All input properties are implicitly available as output properties. Additionally, the MdbKafkaTopic resource produces the following output properties:

Id string
The provider-assigned unique ID for this managed resource.
Id string
The provider-assigned unique ID for this managed resource.
id string
The provider-assigned unique ID for this managed resource.
id str
The provider-assigned unique ID for this managed resource.

Look up an Existing MdbKafkaTopic Resource

Get an existing MdbKafkaTopic resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

public static get(name: string, id: Input<ID>, state?: MdbKafkaTopicState, opts?: CustomResourceOptions): MdbKafkaTopic
@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        cluster_id: Optional[str] = None,
        name: Optional[str] = None,
        partitions: Optional[int] = None,
        replication_factor: Optional[int] = None,
        topic_config: Optional[MdbKafkaTopicTopicConfigArgs] = None) -> MdbKafkaTopic
func GetMdbKafkaTopic(ctx *Context, name string, id IDInput, state *MdbKafkaTopicState, opts ...ResourceOption) (*MdbKafkaTopic, error)
public static MdbKafkaTopic Get(string name, Input<string> id, MdbKafkaTopicState? state, CustomResourceOptions? opts = null)
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
resource_name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.

The following state arguments are supported:

ClusterId string
Name string
The name of the topic.
Partitions int
The number of the topic’s partitions.
ReplicationFactor int
Amount of data copies (replicas) for the topic in the cluster.
TopicConfig MdbKafkaTopicTopicConfigArgs
User-defined settings for the topic. The structure is documented below.
ClusterId string
Name string
The name of the topic.
Partitions int
The number of the topic’s partitions.
ReplicationFactor int
Amount of data copies (replicas) for the topic in the cluster.
TopicConfig MdbKafkaTopicTopicConfigArgs
User-defined settings for the topic. The structure is documented below.
clusterId string
name string
The name of the topic.
partitions number
The number of the topic’s partitions.
replicationFactor number
Amount of data copies (replicas) for the topic in the cluster.
topicConfig MdbKafkaTopicTopicConfigArgs
User-defined settings for the topic. The structure is documented below.
cluster_id str
name str
The name of the topic.
partitions int
The number of the topic’s partitions.
replication_factor int
Amount of data copies (replicas) for the topic in the cluster.
topic_config MdbKafkaTopicTopicConfigArgs
User-defined settings for the topic. The structure is documented below.

Supporting Types

MdbKafkaTopicTopicConfig

Import

Kafka topic can be imported using following format

 $ pulumi import yandex:index/mdbKafkaTopic:MdbKafkaTopic foo {{cluster_id}}:{{topic_name}}

Package Details

Repository
https://github.com/pulumi/pulumi-yandex
License
Apache-2.0
Notes
This Pulumi package is based on the yandex Terraform Provider.