1. Packages
  2. DigitalOcean
  3. API Docs
  4. DatabaseKafkaTopic
DigitalOcean v4.28.1 published on Friday, Apr 26, 2024 by Pulumi

digitalocean.DatabaseKafkaTopic

Explore with Pulumi AI

digitalocean logo
DigitalOcean v4.28.1 published on Friday, Apr 26, 2024 by Pulumi

    Provides a DigitalOcean Kafka topic for Kafka clusters.

    Example Usage

    Create a new Kafka topic

    import * as pulumi from "@pulumi/pulumi";
    import * as digitalocean from "@pulumi/digitalocean";
    
    const kafka_example = new digitalocean.DatabaseCluster("kafka-example", {
        engine: "kafka",
        version: "3.5",
        size: "db-s-2vcpu-2gb",
        region: digitalocean.Region.NYC1,
        nodeCount: 3,
        tags: ["production"],
    });
    const topic_01 = new digitalocean.DatabaseKafkaTopic("topic-01", {
        clusterId: kafka_example.id,
        partitionCount: 3,
        replicationFactor: 2,
        configs: [{
            cleanupPolicy: "compact",
            compressionType: "uncompressed",
            deleteRetentionMs: "14000",
            fileDeleteDelayMs: "170000",
            flushMessages: "92233",
            flushMs: "92233720368",
            indexIntervalBytes: "40962",
            maxCompactionLagMs: "9223372036854775807",
            maxMessageBytes: "1048588",
            messageDownConversionEnable: true,
            messageFormatVersion: "3.0-IV1",
            messageTimestampDifferenceMaxMs: "9223372036854775807",
            messageTimestampType: "log_append_time",
            minCleanableDirtyRatio: 0.5,
            minCompactionLagMs: "20000",
            minInsyncReplicas: 2,
            preallocate: false,
            retentionBytes: "-1",
            retentionMs: "-1",
            segmentBytes: "209715200",
            segmentIndexBytes: "10485760",
            segmentJitterMs: "0",
            segmentMs: "604800000",
        }],
    });
    
    import pulumi
    import pulumi_digitalocean as digitalocean
    
    kafka_example = digitalocean.DatabaseCluster("kafka-example",
        engine="kafka",
        version="3.5",
        size="db-s-2vcpu-2gb",
        region=digitalocean.Region.NYC1,
        node_count=3,
        tags=["production"])
    topic_01 = digitalocean.DatabaseKafkaTopic("topic-01",
        cluster_id=kafka_example.id,
        partition_count=3,
        replication_factor=2,
        configs=[digitalocean.DatabaseKafkaTopicConfigArgs(
            cleanup_policy="compact",
            compression_type="uncompressed",
            delete_retention_ms="14000",
            file_delete_delay_ms="170000",
            flush_messages="92233",
            flush_ms="92233720368",
            index_interval_bytes="40962",
            max_compaction_lag_ms="9223372036854775807",
            max_message_bytes="1048588",
            message_down_conversion_enable=True,
            message_format_version="3.0-IV1",
            message_timestamp_difference_max_ms="9223372036854775807",
            message_timestamp_type="log_append_time",
            min_cleanable_dirty_ratio=0.5,
            min_compaction_lag_ms="20000",
            min_insync_replicas=2,
            preallocate=False,
            retention_bytes="-1",
            retention_ms="-1",
            segment_bytes="209715200",
            segment_index_bytes="10485760",
            segment_jitter_ms="0",
            segment_ms="604800000",
        )])
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-digitalocean/sdk/v4/go/digitalocean"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := digitalocean.NewDatabaseCluster(ctx, "kafka-example", &digitalocean.DatabaseClusterArgs{
    			Engine:    pulumi.String("kafka"),
    			Version:   pulumi.String("3.5"),
    			Size:      pulumi.String("db-s-2vcpu-2gb"),
    			Region:    pulumi.String(digitalocean.RegionNYC1),
    			NodeCount: pulumi.Int(3),
    			Tags: pulumi.StringArray{
    				pulumi.String("production"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = digitalocean.NewDatabaseKafkaTopic(ctx, "topic-01", &digitalocean.DatabaseKafkaTopicArgs{
    			ClusterId:         kafka_example.ID(),
    			PartitionCount:    pulumi.Int(3),
    			ReplicationFactor: pulumi.Int(2),
    			Configs: digitalocean.DatabaseKafkaTopicConfigArray{
    				&digitalocean.DatabaseKafkaTopicConfigArgs{
    					CleanupPolicy:                   pulumi.String("compact"),
    					CompressionType:                 pulumi.String("uncompressed"),
    					DeleteRetentionMs:               pulumi.String("14000"),
    					FileDeleteDelayMs:               pulumi.String("170000"),
    					FlushMessages:                   pulumi.String("92233"),
    					FlushMs:                         pulumi.String("92233720368"),
    					IndexIntervalBytes:              pulumi.String("40962"),
    					MaxCompactionLagMs:              pulumi.String("9223372036854775807"),
    					MaxMessageBytes:                 pulumi.String("1048588"),
    					MessageDownConversionEnable:     pulumi.Bool(true),
    					MessageFormatVersion:            pulumi.String("3.0-IV1"),
    					MessageTimestampDifferenceMaxMs: pulumi.String("9223372036854775807"),
    					MessageTimestampType:            pulumi.String("log_append_time"),
    					MinCleanableDirtyRatio:          pulumi.Float64(0.5),
    					MinCompactionLagMs:              pulumi.String("20000"),
    					MinInsyncReplicas:               pulumi.Int(2),
    					Preallocate:                     pulumi.Bool(false),
    					RetentionBytes:                  pulumi.String("-1"),
    					RetentionMs:                     pulumi.String("-1"),
    					SegmentBytes:                    pulumi.String("209715200"),
    					SegmentIndexBytes:               pulumi.String("10485760"),
    					SegmentJitterMs:                 pulumi.String("0"),
    					SegmentMs:                       pulumi.String("604800000"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using DigitalOcean = Pulumi.DigitalOcean;
    
    return await Deployment.RunAsync(() => 
    {
        var kafka_example = new DigitalOcean.DatabaseCluster("kafka-example", new()
        {
            Engine = "kafka",
            Version = "3.5",
            Size = "db-s-2vcpu-2gb",
            Region = DigitalOcean.Region.NYC1,
            NodeCount = 3,
            Tags = new[]
            {
                "production",
            },
        });
    
        var topic_01 = new DigitalOcean.DatabaseKafkaTopic("topic-01", new()
        {
            ClusterId = kafka_example.Id,
            PartitionCount = 3,
            ReplicationFactor = 2,
            Configs = new[]
            {
                new DigitalOcean.Inputs.DatabaseKafkaTopicConfigArgs
                {
                    CleanupPolicy = "compact",
                    CompressionType = "uncompressed",
                    DeleteRetentionMs = "14000",
                    FileDeleteDelayMs = "170000",
                    FlushMessages = "92233",
                    FlushMs = "92233720368",
                    IndexIntervalBytes = "40962",
                    MaxCompactionLagMs = "9223372036854775807",
                    MaxMessageBytes = "1048588",
                    MessageDownConversionEnable = true,
                    MessageFormatVersion = "3.0-IV1",
                    MessageTimestampDifferenceMaxMs = "9223372036854775807",
                    MessageTimestampType = "log_append_time",
                    MinCleanableDirtyRatio = 0.5,
                    MinCompactionLagMs = "20000",
                    MinInsyncReplicas = 2,
                    Preallocate = false,
                    RetentionBytes = "-1",
                    RetentionMs = "-1",
                    SegmentBytes = "209715200",
                    SegmentIndexBytes = "10485760",
                    SegmentJitterMs = "0",
                    SegmentMs = "604800000",
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.digitalocean.DatabaseCluster;
    import com.pulumi.digitalocean.DatabaseClusterArgs;
    import com.pulumi.digitalocean.DatabaseKafkaTopic;
    import com.pulumi.digitalocean.DatabaseKafkaTopicArgs;
    import com.pulumi.digitalocean.inputs.DatabaseKafkaTopicConfigArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var kafka_example = new DatabaseCluster("kafka-example", DatabaseClusterArgs.builder()        
                .engine("kafka")
                .version("3.5")
                .size("db-s-2vcpu-2gb")
                .region("nyc1")
                .nodeCount(3)
                .tags("production")
                .build());
    
            var topic_01 = new DatabaseKafkaTopic("topic-01", DatabaseKafkaTopicArgs.builder()        
                .clusterId(kafka_example.id())
                .partitionCount(3)
                .replicationFactor(2)
                .configs(DatabaseKafkaTopicConfigArgs.builder()
                    .cleanupPolicy("compact")
                    .compressionType("uncompressed")
                    .deleteRetentionMs(14000)
                    .fileDeleteDelayMs(170000)
                    .flushMessages(92233)
                    .flushMs(92233720368)
                    .indexIntervalBytes(40962)
                    .maxCompactionLagMs(9223372036854775807)
                    .maxMessageBytes(1048588)
                    .messageDownConversionEnable(true)
                    .messageFormatVersion("3.0-IV1")
                    .messageTimestampDifferenceMaxMs(9223372036854775807)
                    .messageTimestampType("log_append_time")
                    .minCleanableDirtyRatio(0.5)
                    .minCompactionLagMs(20000)
                    .minInsyncReplicas(2)
                    .preallocate(false)
                    .retentionBytes("TODO: GenUnaryOpExpression")
                    .retentionMs("TODO: GenUnaryOpExpression")
                    .segmentBytes(209715200)
                    .segmentIndexBytes(10485760)
                    .segmentJitterMs(0)
                    .segmentMs(604800000)
                    .build())
                .build());
    
        }
    }
    
    Coming soon!
    

    Create DatabaseKafkaTopic Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new DatabaseKafkaTopic(name: string, args: DatabaseKafkaTopicArgs, opts?: CustomResourceOptions);
    @overload
    def DatabaseKafkaTopic(resource_name: str,
                           args: DatabaseKafkaTopicArgs,
                           opts: Optional[ResourceOptions] = None)
    
    @overload
    def DatabaseKafkaTopic(resource_name: str,
                           opts: Optional[ResourceOptions] = None,
                           cluster_id: Optional[str] = None,
                           configs: Optional[Sequence[DatabaseKafkaTopicConfigArgs]] = None,
                           name: Optional[str] = None,
                           partition_count: Optional[int] = None,
                           replication_factor: Optional[int] = None)
    func NewDatabaseKafkaTopic(ctx *Context, name string, args DatabaseKafkaTopicArgs, opts ...ResourceOption) (*DatabaseKafkaTopic, error)
    public DatabaseKafkaTopic(string name, DatabaseKafkaTopicArgs args, CustomResourceOptions? opts = null)
    public DatabaseKafkaTopic(String name, DatabaseKafkaTopicArgs args)
    public DatabaseKafkaTopic(String name, DatabaseKafkaTopicArgs args, CustomResourceOptions options)
    
    type: digitalocean:DatabaseKafkaTopic
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args DatabaseKafkaTopicArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args DatabaseKafkaTopicArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args DatabaseKafkaTopicArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args DatabaseKafkaTopicArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args DatabaseKafkaTopicArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Example

    The following reference example uses placeholder values for all input properties.

    var databaseKafkaTopicResource = new DigitalOcean.DatabaseKafkaTopic("databaseKafkaTopicResource", new()
    {
        ClusterId = "string",
        Configs = new[]
        {
            new DigitalOcean.Inputs.DatabaseKafkaTopicConfigArgs
            {
                CleanupPolicy = "string",
                CompressionType = "string",
                DeleteRetentionMs = "string",
                FileDeleteDelayMs = "string",
                FlushMessages = "string",
                FlushMs = "string",
                IndexIntervalBytes = "string",
                MaxCompactionLagMs = "string",
                MaxMessageBytes = "string",
                MessageDownConversionEnable = false,
                MessageFormatVersion = "string",
                MessageTimestampDifferenceMaxMs = "string",
                MessageTimestampType = "string",
                MinCleanableDirtyRatio = 0,
                MinCompactionLagMs = "string",
                MinInsyncReplicas = 0,
                Preallocate = false,
                RetentionBytes = "string",
                RetentionMs = "string",
                SegmentBytes = "string",
                SegmentIndexBytes = "string",
                SegmentJitterMs = "string",
                SegmentMs = "string",
            },
        },
        Name = "string",
        PartitionCount = 0,
        ReplicationFactor = 0,
    });
    
    example, err := digitalocean.NewDatabaseKafkaTopic(ctx, "databaseKafkaTopicResource", &digitalocean.DatabaseKafkaTopicArgs{
    	ClusterId: pulumi.String("string"),
    	Configs: digitalocean.DatabaseKafkaTopicConfigArray{
    		&digitalocean.DatabaseKafkaTopicConfigArgs{
    			CleanupPolicy:                   pulumi.String("string"),
    			CompressionType:                 pulumi.String("string"),
    			DeleteRetentionMs:               pulumi.String("string"),
    			FileDeleteDelayMs:               pulumi.String("string"),
    			FlushMessages:                   pulumi.String("string"),
    			FlushMs:                         pulumi.String("string"),
    			IndexIntervalBytes:              pulumi.String("string"),
    			MaxCompactionLagMs:              pulumi.String("string"),
    			MaxMessageBytes:                 pulumi.String("string"),
    			MessageDownConversionEnable:     pulumi.Bool(false),
    			MessageFormatVersion:            pulumi.String("string"),
    			MessageTimestampDifferenceMaxMs: pulumi.String("string"),
    			MessageTimestampType:            pulumi.String("string"),
    			MinCleanableDirtyRatio:          pulumi.Float64(0),
    			MinCompactionLagMs:              pulumi.String("string"),
    			MinInsyncReplicas:               pulumi.Int(0),
    			Preallocate:                     pulumi.Bool(false),
    			RetentionBytes:                  pulumi.String("string"),
    			RetentionMs:                     pulumi.String("string"),
    			SegmentBytes:                    pulumi.String("string"),
    			SegmentIndexBytes:               pulumi.String("string"),
    			SegmentJitterMs:                 pulumi.String("string"),
    			SegmentMs:                       pulumi.String("string"),
    		},
    	},
    	Name:              pulumi.String("string"),
    	PartitionCount:    pulumi.Int(0),
    	ReplicationFactor: pulumi.Int(0),
    })
    
    var databaseKafkaTopicResource = new DatabaseKafkaTopic("databaseKafkaTopicResource", DatabaseKafkaTopicArgs.builder()        
        .clusterId("string")
        .configs(DatabaseKafkaTopicConfigArgs.builder()
            .cleanupPolicy("string")
            .compressionType("string")
            .deleteRetentionMs("string")
            .fileDeleteDelayMs("string")
            .flushMessages("string")
            .flushMs("string")
            .indexIntervalBytes("string")
            .maxCompactionLagMs("string")
            .maxMessageBytes("string")
            .messageDownConversionEnable(false)
            .messageFormatVersion("string")
            .messageTimestampDifferenceMaxMs("string")
            .messageTimestampType("string")
            .minCleanableDirtyRatio(0)
            .minCompactionLagMs("string")
            .minInsyncReplicas(0)
            .preallocate(false)
            .retentionBytes("string")
            .retentionMs("string")
            .segmentBytes("string")
            .segmentIndexBytes("string")
            .segmentJitterMs("string")
            .segmentMs("string")
            .build())
        .name("string")
        .partitionCount(0)
        .replicationFactor(0)
        .build());
    
    database_kafka_topic_resource = digitalocean.DatabaseKafkaTopic("databaseKafkaTopicResource",
        cluster_id="string",
        configs=[digitalocean.DatabaseKafkaTopicConfigArgs(
            cleanup_policy="string",
            compression_type="string",
            delete_retention_ms="string",
            file_delete_delay_ms="string",
            flush_messages="string",
            flush_ms="string",
            index_interval_bytes="string",
            max_compaction_lag_ms="string",
            max_message_bytes="string",
            message_down_conversion_enable=False,
            message_format_version="string",
            message_timestamp_difference_max_ms="string",
            message_timestamp_type="string",
            min_cleanable_dirty_ratio=0,
            min_compaction_lag_ms="string",
            min_insync_replicas=0,
            preallocate=False,
            retention_bytes="string",
            retention_ms="string",
            segment_bytes="string",
            segment_index_bytes="string",
            segment_jitter_ms="string",
            segment_ms="string",
        )],
        name="string",
        partition_count=0,
        replication_factor=0)
    
    const databaseKafkaTopicResource = new digitalocean.DatabaseKafkaTopic("databaseKafkaTopicResource", {
        clusterId: "string",
        configs: [{
            cleanupPolicy: "string",
            compressionType: "string",
            deleteRetentionMs: "string",
            fileDeleteDelayMs: "string",
            flushMessages: "string",
            flushMs: "string",
            indexIntervalBytes: "string",
            maxCompactionLagMs: "string",
            maxMessageBytes: "string",
            messageDownConversionEnable: false,
            messageFormatVersion: "string",
            messageTimestampDifferenceMaxMs: "string",
            messageTimestampType: "string",
            minCleanableDirtyRatio: 0,
            minCompactionLagMs: "string",
            minInsyncReplicas: 0,
            preallocate: false,
            retentionBytes: "string",
            retentionMs: "string",
            segmentBytes: "string",
            segmentIndexBytes: "string",
            segmentJitterMs: "string",
            segmentMs: "string",
        }],
        name: "string",
        partitionCount: 0,
        replicationFactor: 0,
    });
    
    type: digitalocean:DatabaseKafkaTopic
    properties:
        clusterId: string
        configs:
            - cleanupPolicy: string
              compressionType: string
              deleteRetentionMs: string
              fileDeleteDelayMs: string
              flushMessages: string
              flushMs: string
              indexIntervalBytes: string
              maxCompactionLagMs: string
              maxMessageBytes: string
              messageDownConversionEnable: false
              messageFormatVersion: string
              messageTimestampDifferenceMaxMs: string
              messageTimestampType: string
              minCleanableDirtyRatio: 0
              minCompactionLagMs: string
              minInsyncReplicas: 0
              preallocate: false
              retentionBytes: string
              retentionMs: string
              segmentBytes: string
              segmentIndexBytes: string
              segmentJitterMs: string
              segmentMs: string
        name: string
        partitionCount: 0
        replicationFactor: 0
    

    DatabaseKafkaTopic Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The DatabaseKafkaTopic resource accepts the following input properties:

    ClusterId string
    The ID of the source database cluster. Note: This must be a Kafka cluster.
    Configs List<Pulumi.DigitalOcean.Inputs.DatabaseKafkaTopicConfig>
    A set of advanced configuration parameters. Defaults will be set for any of the parameters that are not included. The config block is documented below.
    Name string
    The name for the topic.
    PartitionCount int
    The number of partitions for the topic. Default and minimum set at 3, maximum is 2048.
    ReplicationFactor int
    The number of nodes that topics are replicated across. Default and minimum set at 2, maximum is the number of nodes in the cluster.
    ClusterId string
    The ID of the source database cluster. Note: This must be a Kafka cluster.
    Configs []DatabaseKafkaTopicConfigArgs
    A set of advanced configuration parameters. Defaults will be set for any of the parameters that are not included. The config block is documented below.
    Name string
    The name for the topic.
    PartitionCount int
    The number of partitions for the topic. Default and minimum set at 3, maximum is 2048.
    ReplicationFactor int
    The number of nodes that topics are replicated across. Default and minimum set at 2, maximum is the number of nodes in the cluster.
    clusterId String
    The ID of the source database cluster. Note: This must be a Kafka cluster.
    configs List<DatabaseKafkaTopicConfig>
    A set of advanced configuration parameters. Defaults will be set for any of the parameters that are not included. The config block is documented below.
    name String
    The name for the topic.
    partitionCount Integer
    The number of partitions for the topic. Default and minimum set at 3, maximum is 2048.
    replicationFactor Integer
    The number of nodes that topics are replicated across. Default and minimum set at 2, maximum is the number of nodes in the cluster.
    clusterId string
    The ID of the source database cluster. Note: This must be a Kafka cluster.
    configs DatabaseKafkaTopicConfig[]
    A set of advanced configuration parameters. Defaults will be set for any of the parameters that are not included. The config block is documented below.
    name string
    The name for the topic.
    partitionCount number
    The number of partitions for the topic. Default and minimum set at 3, maximum is 2048.
    replicationFactor number
    The number of nodes that topics are replicated across. Default and minimum set at 2, maximum is the number of nodes in the cluster.
    cluster_id str
    The ID of the source database cluster. Note: This must be a Kafka cluster.
    configs Sequence[DatabaseKafkaTopicConfigArgs]
    A set of advanced configuration parameters. Defaults will be set for any of the parameters that are not included. The config block is documented below.
    name str
    The name for the topic.
    partition_count int
    The number of partitions for the topic. Default and minimum set at 3, maximum is 2048.
    replication_factor int
    The number of nodes that topics are replicated across. Default and minimum set at 2, maximum is the number of nodes in the cluster.
    clusterId String
    The ID of the source database cluster. Note: This must be a Kafka cluster.
    configs List<Property Map>
    A set of advanced configuration parameters. Defaults will be set for any of the parameters that are not included. The config block is documented below.
    name String
    The name for the topic.
    partitionCount Number
    The number of partitions for the topic. Default and minimum set at 3, maximum is 2048.
    replicationFactor Number
    The number of nodes that topics are replicated across. Default and minimum set at 2, maximum is the number of nodes in the cluster.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the DatabaseKafkaTopic resource produces the following output properties:

    Id string
    The provider-assigned unique ID for this managed resource.
    State string
    The current status of the topic. Possible values are 'active', 'configuring', and 'deleting'.
    Id string
    The provider-assigned unique ID for this managed resource.
    State string
    The current status of the topic. Possible values are 'active', 'configuring', and 'deleting'.
    id String
    The provider-assigned unique ID for this managed resource.
    state String
    The current status of the topic. Possible values are 'active', 'configuring', and 'deleting'.
    id string
    The provider-assigned unique ID for this managed resource.
    state string
    The current status of the topic. Possible values are 'active', 'configuring', and 'deleting'.
    id str
    The provider-assigned unique ID for this managed resource.
    state str
    The current status of the topic. Possible values are 'active', 'configuring', and 'deleting'.
    id String
    The provider-assigned unique ID for this managed resource.
    state String
    The current status of the topic. Possible values are 'active', 'configuring', and 'deleting'.

    Look up Existing DatabaseKafkaTopic Resource

    Get an existing DatabaseKafkaTopic resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: DatabaseKafkaTopicState, opts?: CustomResourceOptions): DatabaseKafkaTopic
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            cluster_id: Optional[str] = None,
            configs: Optional[Sequence[DatabaseKafkaTopicConfigArgs]] = None,
            name: Optional[str] = None,
            partition_count: Optional[int] = None,
            replication_factor: Optional[int] = None,
            state: Optional[str] = None) -> DatabaseKafkaTopic
    func GetDatabaseKafkaTopic(ctx *Context, name string, id IDInput, state *DatabaseKafkaTopicState, opts ...ResourceOption) (*DatabaseKafkaTopic, error)
    public static DatabaseKafkaTopic Get(string name, Input<string> id, DatabaseKafkaTopicState? state, CustomResourceOptions? opts = null)
    public static DatabaseKafkaTopic get(String name, Output<String> id, DatabaseKafkaTopicState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    ClusterId string
    The ID of the source database cluster. Note: This must be a Kafka cluster.
    Configs List<Pulumi.DigitalOcean.Inputs.DatabaseKafkaTopicConfig>
    A set of advanced configuration parameters. Defaults will be set for any of the parameters that are not included. The config block is documented below.
    Name string
    The name for the topic.
    PartitionCount int
    The number of partitions for the topic. Default and minimum set at 3, maximum is 2048.
    ReplicationFactor int
    The number of nodes that topics are replicated across. Default and minimum set at 2, maximum is the number of nodes in the cluster.
    State string
    The current status of the topic. Possible values are 'active', 'configuring', and 'deleting'.
    ClusterId string
    The ID of the source database cluster. Note: This must be a Kafka cluster.
    Configs []DatabaseKafkaTopicConfigArgs
    A set of advanced configuration parameters. Defaults will be set for any of the parameters that are not included. The config block is documented below.
    Name string
    The name for the topic.
    PartitionCount int
    The number of partitions for the topic. Default and minimum set at 3, maximum is 2048.
    ReplicationFactor int
    The number of nodes that topics are replicated across. Default and minimum set at 2, maximum is the number of nodes in the cluster.
    State string
    The current status of the topic. Possible values are 'active', 'configuring', and 'deleting'.
    clusterId String
    The ID of the source database cluster. Note: This must be a Kafka cluster.
    configs List<DatabaseKafkaTopicConfig>
    A set of advanced configuration parameters. Defaults will be set for any of the parameters that are not included. The config block is documented below.
    name String
    The name for the topic.
    partitionCount Integer
    The number of partitions for the topic. Default and minimum set at 3, maximum is 2048.
    replicationFactor Integer
    The number of nodes that topics are replicated across. Default and minimum set at 2, maximum is the number of nodes in the cluster.
    state String
    The current status of the topic. Possible values are 'active', 'configuring', and 'deleting'.
    clusterId string
    The ID of the source database cluster. Note: This must be a Kafka cluster.
    configs DatabaseKafkaTopicConfig[]
    A set of advanced configuration parameters. Defaults will be set for any of the parameters that are not included. The config block is documented below.
    name string
    The name for the topic.
    partitionCount number
    The number of partitions for the topic. Default and minimum set at 3, maximum is 2048.
    replicationFactor number
    The number of nodes that topics are replicated across. Default and minimum set at 2, maximum is the number of nodes in the cluster.
    state string
    The current status of the topic. Possible values are 'active', 'configuring', and 'deleting'.
    cluster_id str
    The ID of the source database cluster. Note: This must be a Kafka cluster.
    configs Sequence[DatabaseKafkaTopicConfigArgs]
    A set of advanced configuration parameters. Defaults will be set for any of the parameters that are not included. The config block is documented below.
    name str
    The name for the topic.
    partition_count int
    The number of partitions for the topic. Default and minimum set at 3, maximum is 2048.
    replication_factor int
    The number of nodes that topics are replicated across. Default and minimum set at 2, maximum is the number of nodes in the cluster.
    state str
    The current status of the topic. Possible values are 'active', 'configuring', and 'deleting'.
    clusterId String
    The ID of the source database cluster. Note: This must be a Kafka cluster.
    configs List<Property Map>
    A set of advanced configuration parameters. Defaults will be set for any of the parameters that are not included. The config block is documented below.
    name String
    The name for the topic.
    partitionCount Number
    The number of partitions for the topic. Default and minimum set at 3, maximum is 2048.
    replicationFactor Number
    The number of nodes that topics are replicated across. Default and minimum set at 2, maximum is the number of nodes in the cluster.
    state String
    The current status of the topic. Possible values are 'active', 'configuring', and 'deleting'.

    Supporting Types

    DatabaseKafkaTopicConfig, DatabaseKafkaTopicConfigArgs

    CleanupPolicy string
    The topic cleanup policy that decribes whether messages should be deleted, compacted, or both when retention policies are violated. This may be one of "delete", "compact", or "compact_delete".
    CompressionType string
    The topic compression codecs used for a given topic. This may be one of "uncompressed", "gzip", "snappy", "lz4", "producer", "zstd". "uncompressed" indicates that there is no compression and "producer" retains the original compression codec set by the producer.
    DeleteRetentionMs string
    The amount of time, in ms, that deleted records are retained.
    FileDeleteDelayMs string
    The amount of time, in ms, to wait before deleting a topic log segment from the filesystem.
    FlushMessages string
    The number of messages accumulated on a topic partition before they are flushed to disk.
    FlushMs string
    The maximum time, in ms, that a topic is kept in memory before being flushed to disk.
    IndexIntervalBytes string
    The interval, in bytes, in which entries are added to the offset index.
    MaxCompactionLagMs string
    The maximum time, in ms, that a particular message will remain uncompacted. This will not apply if the compression_type is set to "uncompressed" or it is set to producer and the producer is not using compression.
    MaxMessageBytes string
    The maximum size, in bytes, of a message.
    MessageDownConversionEnable bool
    Determines whether down-conversion of message formats for consumers is enabled.
    MessageFormatVersion string
    The version of the inter-broker protocol that will be used. This may be one of "0.8.0", "0.8.1", "0.8.2", "0.9.0", "0.10.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", "0.10.2", "0.10.2-IV0", "0.11.0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0", "1.0-IV0", "1.1", "1.1-IV0", "2.0", "2.0-IV0", "2.0-IV1", "2.1", "2.1-IV0", "2.1-IV1", "2.1-IV2", "2.2", "2.2-IV0", "2.2-IV1", "2.3", "2.3-IV0", "2.3-IV1", "2.4", "2.4-IV0", "2.4-IV1", "2.5", "2.5-IV0", "2.6", "2.6-IV0", "2.7", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8", "2.8-IV0", "2.8-IV1", "3.0", "3.0-IV0", "3.0-IV1", "3.1", "3.1-IV0", "3.2", "3.2-IV0", "3.3", "3.3-IV0", "3.3-IV1", "3.3-IV2", "3.3-IV3", "3.4", "3.4-IV0", "3.5", "3.5-IV0", "3.5-IV1", "3.5-IV2", "3.6", "3.6-IV0", "3.6-IV1", "3.6-IV2".
    MessageTimestampDifferenceMaxMs string
    The maximum difference, in ms, between the timestamp specific in a message and when the broker receives the message.
    MessageTimestampType string
    Specifies which timestamp to use for the message. This may be one of "create_time" or "log_append_time".
    MinCleanableDirtyRatio double
    A scale between 0.0 and 1.0 which controls the frequency of the compactor. Larger values mean more frequent compactions. This is often paired with max_compaction_lag_ms to control the compactor frequency.
    MinCompactionLagMs string
    MinInsyncReplicas int
    The number of replicas that must acknowledge a write before it is considered successful. -1 is a special setting to indicate that all nodes must ack a message before a write is considered successful. Default is 1, indicating at least 1 replica must acknowledge a write to be considered successful.
    Preallocate bool
    Determines whether to preallocate a file on disk when creating a new log segment within a topic.
    RetentionBytes string
    The maximum size, in bytes, of a topic before messages are deleted. -1 is a special setting indicating that this setting has no limit.
    RetentionMs string
    The maximum time, in ms, that a topic log file is retained before deleting it. -1 is a special setting indicating that this setting has no limit.
    SegmentBytes string
    The maximum size, in bytes, of a single topic log file.
    SegmentIndexBytes string
    The maximum size, in bytes, of the offset index.
    SegmentJitterMs string
    The maximum time, in ms, subtracted from the scheduled segment disk flush time to avoid the thundering herd problem for segment flushing.
    SegmentMs string
    The maximum time, in ms, before the topic log will flush to disk.
    CleanupPolicy string
    The topic cleanup policy that decribes whether messages should be deleted, compacted, or both when retention policies are violated. This may be one of "delete", "compact", or "compact_delete".
    CompressionType string
    The topic compression codecs used for a given topic. This may be one of "uncompressed", "gzip", "snappy", "lz4", "producer", "zstd". "uncompressed" indicates that there is no compression and "producer" retains the original compression codec set by the producer.
    DeleteRetentionMs string
    The amount of time, in ms, that deleted records are retained.
    FileDeleteDelayMs string
    The amount of time, in ms, to wait before deleting a topic log segment from the filesystem.
    FlushMessages string
    The number of messages accumulated on a topic partition before they are flushed to disk.
    FlushMs string
    The maximum time, in ms, that a topic is kept in memory before being flushed to disk.
    IndexIntervalBytes string
    The interval, in bytes, in which entries are added to the offset index.
    MaxCompactionLagMs string
    The maximum time, in ms, that a particular message will remain uncompacted. This will not apply if the compression_type is set to "uncompressed" or it is set to producer and the producer is not using compression.
    MaxMessageBytes string
    The maximum size, in bytes, of a message.
    MessageDownConversionEnable bool
    Determines whether down-conversion of message formats for consumers is enabled.
    MessageFormatVersion string
    The version of the inter-broker protocol that will be used. This may be one of "0.8.0", "0.8.1", "0.8.2", "0.9.0", "0.10.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", "0.10.2", "0.10.2-IV0", "0.11.0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0", "1.0-IV0", "1.1", "1.1-IV0", "2.0", "2.0-IV0", "2.0-IV1", "2.1", "2.1-IV0", "2.1-IV1", "2.1-IV2", "2.2", "2.2-IV0", "2.2-IV1", "2.3", "2.3-IV0", "2.3-IV1", "2.4", "2.4-IV0", "2.4-IV1", "2.5", "2.5-IV0", "2.6", "2.6-IV0", "2.7", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8", "2.8-IV0", "2.8-IV1", "3.0", "3.0-IV0", "3.0-IV1", "3.1", "3.1-IV0", "3.2", "3.2-IV0", "3.3", "3.3-IV0", "3.3-IV1", "3.3-IV2", "3.3-IV3", "3.4", "3.4-IV0", "3.5", "3.5-IV0", "3.5-IV1", "3.5-IV2", "3.6", "3.6-IV0", "3.6-IV1", "3.6-IV2".
    MessageTimestampDifferenceMaxMs string
    The maximum difference, in ms, between the timestamp specific in a message and when the broker receives the message.
    MessageTimestampType string
    Specifies which timestamp to use for the message. This may be one of "create_time" or "log_append_time".
    MinCleanableDirtyRatio float64
    A scale between 0.0 and 1.0 which controls the frequency of the compactor. Larger values mean more frequent compactions. This is often paired with max_compaction_lag_ms to control the compactor frequency.
    MinCompactionLagMs string
    MinInsyncReplicas int
    The number of replicas that must acknowledge a write before it is considered successful. -1 is a special setting to indicate that all nodes must ack a message before a write is considered successful. Default is 1, indicating at least 1 replica must acknowledge a write to be considered successful.
    Preallocate bool
    Determines whether to preallocate a file on disk when creating a new log segment within a topic.
    RetentionBytes string
    The maximum size, in bytes, of a topic before messages are deleted. -1 is a special setting indicating that this setting has no limit.
    RetentionMs string
    The maximum time, in ms, that a topic log file is retained before deleting it. -1 is a special setting indicating that this setting has no limit.
    SegmentBytes string
    The maximum size, in bytes, of a single topic log file.
    SegmentIndexBytes string
    The maximum size, in bytes, of the offset index.
    SegmentJitterMs string
    The maximum time, in ms, subtracted from the scheduled segment disk flush time to avoid the thundering herd problem for segment flushing.
    SegmentMs string
    The maximum time, in ms, before the topic log will flush to disk.
    cleanupPolicy String
    The topic cleanup policy that decribes whether messages should be deleted, compacted, or both when retention policies are violated. This may be one of "delete", "compact", or "compact_delete".
    compressionType String
    The topic compression codecs used for a given topic. This may be one of "uncompressed", "gzip", "snappy", "lz4", "producer", "zstd". "uncompressed" indicates that there is no compression and "producer" retains the original compression codec set by the producer.
    deleteRetentionMs String
    The amount of time, in ms, that deleted records are retained.
    fileDeleteDelayMs String
    The amount of time, in ms, to wait before deleting a topic log segment from the filesystem.
    flushMessages String
    The number of messages accumulated on a topic partition before they are flushed to disk.
    flushMs String
    The maximum time, in ms, that a topic is kept in memory before being flushed to disk.
    indexIntervalBytes String
    The interval, in bytes, in which entries are added to the offset index.
    maxCompactionLagMs String
    The maximum time, in ms, that a particular message will remain uncompacted. This will not apply if the compression_type is set to "uncompressed" or it is set to producer and the producer is not using compression.
    maxMessageBytes String
    The maximum size, in bytes, of a message.
    messageDownConversionEnable Boolean
    Determines whether down-conversion of message formats for consumers is enabled.
    messageFormatVersion String
    The version of the inter-broker protocol that will be used. This may be one of "0.8.0", "0.8.1", "0.8.2", "0.9.0", "0.10.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", "0.10.2", "0.10.2-IV0", "0.11.0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0", "1.0-IV0", "1.1", "1.1-IV0", "2.0", "2.0-IV0", "2.0-IV1", "2.1", "2.1-IV0", "2.1-IV1", "2.1-IV2", "2.2", "2.2-IV0", "2.2-IV1", "2.3", "2.3-IV0", "2.3-IV1", "2.4", "2.4-IV0", "2.4-IV1", "2.5", "2.5-IV0", "2.6", "2.6-IV0", "2.7", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8", "2.8-IV0", "2.8-IV1", "3.0", "3.0-IV0", "3.0-IV1", "3.1", "3.1-IV0", "3.2", "3.2-IV0", "3.3", "3.3-IV0", "3.3-IV1", "3.3-IV2", "3.3-IV3", "3.4", "3.4-IV0", "3.5", "3.5-IV0", "3.5-IV1", "3.5-IV2", "3.6", "3.6-IV0", "3.6-IV1", "3.6-IV2".
    messageTimestampDifferenceMaxMs String
    The maximum difference, in ms, between the timestamp specific in a message and when the broker receives the message.
    messageTimestampType String
    Specifies which timestamp to use for the message. This may be one of "create_time" or "log_append_time".
    minCleanableDirtyRatio Double
    A scale between 0.0 and 1.0 which controls the frequency of the compactor. Larger values mean more frequent compactions. This is often paired with max_compaction_lag_ms to control the compactor frequency.
    minCompactionLagMs String
    minInsyncReplicas Integer
    The number of replicas that must acknowledge a write before it is considered successful. -1 is a special setting to indicate that all nodes must ack a message before a write is considered successful. Default is 1, indicating at least 1 replica must acknowledge a write to be considered successful.
    preallocate Boolean
    Determines whether to preallocate a file on disk when creating a new log segment within a topic.
    retentionBytes String
    The maximum size, in bytes, of a topic before messages are deleted. -1 is a special setting indicating that this setting has no limit.
    retentionMs String
    The maximum time, in ms, that a topic log file is retained before deleting it. -1 is a special setting indicating that this setting has no limit.
    segmentBytes String
    The maximum size, in bytes, of a single topic log file.
    segmentIndexBytes String
    The maximum size, in bytes, of the offset index.
    segmentJitterMs String
    The maximum time, in ms, subtracted from the scheduled segment disk flush time to avoid the thundering herd problem for segment flushing.
    segmentMs String
    The maximum time, in ms, before the topic log will flush to disk.
    cleanupPolicy string
    The topic cleanup policy that decribes whether messages should be deleted, compacted, or both when retention policies are violated. This may be one of "delete", "compact", or "compact_delete".
    compressionType string
    The topic compression codecs used for a given topic. This may be one of "uncompressed", "gzip", "snappy", "lz4", "producer", "zstd". "uncompressed" indicates that there is no compression and "producer" retains the original compression codec set by the producer.
    deleteRetentionMs string
    The amount of time, in ms, that deleted records are retained.
    fileDeleteDelayMs string
    The amount of time, in ms, to wait before deleting a topic log segment from the filesystem.
    flushMessages string
    The number of messages accumulated on a topic partition before they are flushed to disk.
    flushMs string
    The maximum time, in ms, that a topic is kept in memory before being flushed to disk.
    indexIntervalBytes string
    The interval, in bytes, in which entries are added to the offset index.
    maxCompactionLagMs string
    The maximum time, in ms, that a particular message will remain uncompacted. This will not apply if the compression_type is set to "uncompressed" or it is set to producer and the producer is not using compression.
    maxMessageBytes string
    The maximum size, in bytes, of a message.
    messageDownConversionEnable boolean
    Determines whether down-conversion of message formats for consumers is enabled.
    messageFormatVersion string
    The version of the inter-broker protocol that will be used. This may be one of "0.8.0", "0.8.1", "0.8.2", "0.9.0", "0.10.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", "0.10.2", "0.10.2-IV0", "0.11.0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0", "1.0-IV0", "1.1", "1.1-IV0", "2.0", "2.0-IV0", "2.0-IV1", "2.1", "2.1-IV0", "2.1-IV1", "2.1-IV2", "2.2", "2.2-IV0", "2.2-IV1", "2.3", "2.3-IV0", "2.3-IV1", "2.4", "2.4-IV0", "2.4-IV1", "2.5", "2.5-IV0", "2.6", "2.6-IV0", "2.7", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8", "2.8-IV0", "2.8-IV1", "3.0", "3.0-IV0", "3.0-IV1", "3.1", "3.1-IV0", "3.2", "3.2-IV0", "3.3", "3.3-IV0", "3.3-IV1", "3.3-IV2", "3.3-IV3", "3.4", "3.4-IV0", "3.5", "3.5-IV0", "3.5-IV1", "3.5-IV2", "3.6", "3.6-IV0", "3.6-IV1", "3.6-IV2".
    messageTimestampDifferenceMaxMs string
    The maximum difference, in ms, between the timestamp specific in a message and when the broker receives the message.
    messageTimestampType string
    Specifies which timestamp to use for the message. This may be one of "create_time" or "log_append_time".
    minCleanableDirtyRatio number
    A scale between 0.0 and 1.0 which controls the frequency of the compactor. Larger values mean more frequent compactions. This is often paired with max_compaction_lag_ms to control the compactor frequency.
    minCompactionLagMs string
    minInsyncReplicas number
    The number of replicas that must acknowledge a write before it is considered successful. -1 is a special setting to indicate that all nodes must ack a message before a write is considered successful. Default is 1, indicating at least 1 replica must acknowledge a write to be considered successful.
    preallocate boolean
    Determines whether to preallocate a file on disk when creating a new log segment within a topic.
    retentionBytes string
    The maximum size, in bytes, of a topic before messages are deleted. -1 is a special setting indicating that this setting has no limit.
    retentionMs string
    The maximum time, in ms, that a topic log file is retained before deleting it. -1 is a special setting indicating that this setting has no limit.
    segmentBytes string
    The maximum size, in bytes, of a single topic log file.
    segmentIndexBytes string
    The maximum size, in bytes, of the offset index.
    segmentJitterMs string
    The maximum time, in ms, subtracted from the scheduled segment disk flush time to avoid the thundering herd problem for segment flushing.
    segmentMs string
    The maximum time, in ms, before the topic log will flush to disk.
    cleanup_policy str
    The topic cleanup policy that decribes whether messages should be deleted, compacted, or both when retention policies are violated. This may be one of "delete", "compact", or "compact_delete".
    compression_type str
    The topic compression codecs used for a given topic. This may be one of "uncompressed", "gzip", "snappy", "lz4", "producer", "zstd". "uncompressed" indicates that there is no compression and "producer" retains the original compression codec set by the producer.
    delete_retention_ms str
    The amount of time, in ms, that deleted records are retained.
    file_delete_delay_ms str
    The amount of time, in ms, to wait before deleting a topic log segment from the filesystem.
    flush_messages str
    The number of messages accumulated on a topic partition before they are flushed to disk.
    flush_ms str
    The maximum time, in ms, that a topic is kept in memory before being flushed to disk.
    index_interval_bytes str
    The interval, in bytes, in which entries are added to the offset index.
    max_compaction_lag_ms str
    The maximum time, in ms, that a particular message will remain uncompacted. This will not apply if the compression_type is set to "uncompressed" or it is set to producer and the producer is not using compression.
    max_message_bytes str
    The maximum size, in bytes, of a message.
    message_down_conversion_enable bool
    Determines whether down-conversion of message formats for consumers is enabled.
    message_format_version str
    The version of the inter-broker protocol that will be used. This may be one of "0.8.0", "0.8.1", "0.8.2", "0.9.0", "0.10.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", "0.10.2", "0.10.2-IV0", "0.11.0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0", "1.0-IV0", "1.1", "1.1-IV0", "2.0", "2.0-IV0", "2.0-IV1", "2.1", "2.1-IV0", "2.1-IV1", "2.1-IV2", "2.2", "2.2-IV0", "2.2-IV1", "2.3", "2.3-IV0", "2.3-IV1", "2.4", "2.4-IV0", "2.4-IV1", "2.5", "2.5-IV0", "2.6", "2.6-IV0", "2.7", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8", "2.8-IV0", "2.8-IV1", "3.0", "3.0-IV0", "3.0-IV1", "3.1", "3.1-IV0", "3.2", "3.2-IV0", "3.3", "3.3-IV0", "3.3-IV1", "3.3-IV2", "3.3-IV3", "3.4", "3.4-IV0", "3.5", "3.5-IV0", "3.5-IV1", "3.5-IV2", "3.6", "3.6-IV0", "3.6-IV1", "3.6-IV2".
    message_timestamp_difference_max_ms str
    The maximum difference, in ms, between the timestamp specific in a message and when the broker receives the message.
    message_timestamp_type str
    Specifies which timestamp to use for the message. This may be one of "create_time" or "log_append_time".
    min_cleanable_dirty_ratio float
    A scale between 0.0 and 1.0 which controls the frequency of the compactor. Larger values mean more frequent compactions. This is often paired with max_compaction_lag_ms to control the compactor frequency.
    min_compaction_lag_ms str
    min_insync_replicas int
    The number of replicas that must acknowledge a write before it is considered successful. -1 is a special setting to indicate that all nodes must ack a message before a write is considered successful. Default is 1, indicating at least 1 replica must acknowledge a write to be considered successful.
    preallocate bool
    Determines whether to preallocate a file on disk when creating a new log segment within a topic.
    retention_bytes str
    The maximum size, in bytes, of a topic before messages are deleted. -1 is a special setting indicating that this setting has no limit.
    retention_ms str
    The maximum time, in ms, that a topic log file is retained before deleting it. -1 is a special setting indicating that this setting has no limit.
    segment_bytes str
    The maximum size, in bytes, of a single topic log file.
    segment_index_bytes str
    The maximum size, in bytes, of the offset index.
    segment_jitter_ms str
    The maximum time, in ms, subtracted from the scheduled segment disk flush time to avoid the thundering herd problem for segment flushing.
    segment_ms str
    The maximum time, in ms, before the topic log will flush to disk.
    cleanupPolicy String
    The topic cleanup policy that decribes whether messages should be deleted, compacted, or both when retention policies are violated. This may be one of "delete", "compact", or "compact_delete".
    compressionType String
    The topic compression codecs used for a given topic. This may be one of "uncompressed", "gzip", "snappy", "lz4", "producer", "zstd". "uncompressed" indicates that there is no compression and "producer" retains the original compression codec set by the producer.
    deleteRetentionMs String
    The amount of time, in ms, that deleted records are retained.
    fileDeleteDelayMs String
    The amount of time, in ms, to wait before deleting a topic log segment from the filesystem.
    flushMessages String
    The number of messages accumulated on a topic partition before they are flushed to disk.
    flushMs String
    The maximum time, in ms, that a topic is kept in memory before being flushed to disk.
    indexIntervalBytes String
    The interval, in bytes, in which entries are added to the offset index.
    maxCompactionLagMs String
    The maximum time, in ms, that a particular message will remain uncompacted. This will not apply if the compression_type is set to "uncompressed" or it is set to producer and the producer is not using compression.
    maxMessageBytes String
    The maximum size, in bytes, of a message.
    messageDownConversionEnable Boolean
    Determines whether down-conversion of message formats for consumers is enabled.
    messageFormatVersion String
    The version of the inter-broker protocol that will be used. This may be one of "0.8.0", "0.8.1", "0.8.2", "0.9.0", "0.10.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", "0.10.2", "0.10.2-IV0", "0.11.0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0", "1.0-IV0", "1.1", "1.1-IV0", "2.0", "2.0-IV0", "2.0-IV1", "2.1", "2.1-IV0", "2.1-IV1", "2.1-IV2", "2.2", "2.2-IV0", "2.2-IV1", "2.3", "2.3-IV0", "2.3-IV1", "2.4", "2.4-IV0", "2.4-IV1", "2.5", "2.5-IV0", "2.6", "2.6-IV0", "2.7", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8", "2.8-IV0", "2.8-IV1", "3.0", "3.0-IV0", "3.0-IV1", "3.1", "3.1-IV0", "3.2", "3.2-IV0", "3.3", "3.3-IV0", "3.3-IV1", "3.3-IV2", "3.3-IV3", "3.4", "3.4-IV0", "3.5", "3.5-IV0", "3.5-IV1", "3.5-IV2", "3.6", "3.6-IV0", "3.6-IV1", "3.6-IV2".
    messageTimestampDifferenceMaxMs String
    The maximum difference, in ms, between the timestamp specific in a message and when the broker receives the message.
    messageTimestampType String
    Specifies which timestamp to use for the message. This may be one of "create_time" or "log_append_time".
    minCleanableDirtyRatio Number
    A scale between 0.0 and 1.0 which controls the frequency of the compactor. Larger values mean more frequent compactions. This is often paired with max_compaction_lag_ms to control the compactor frequency.
    minCompactionLagMs String
    minInsyncReplicas Number
    The number of replicas that must acknowledge a write before it is considered successful. -1 is a special setting to indicate that all nodes must ack a message before a write is considered successful. Default is 1, indicating at least 1 replica must acknowledge a write to be considered successful.
    preallocate Boolean
    Determines whether to preallocate a file on disk when creating a new log segment within a topic.
    retentionBytes String
    The maximum size, in bytes, of a topic before messages are deleted. -1 is a special setting indicating that this setting has no limit.
    retentionMs String
    The maximum time, in ms, that a topic log file is retained before deleting it. -1 is a special setting indicating that this setting has no limit.
    segmentBytes String
    The maximum size, in bytes, of a single topic log file.
    segmentIndexBytes String
    The maximum size, in bytes, of the offset index.
    segmentJitterMs String
    The maximum time, in ms, subtracted from the scheduled segment disk flush time to avoid the thundering herd problem for segment flushing.
    segmentMs String
    The maximum time, in ms, before the topic log will flush to disk.

    Import

    Topics can be imported using the id of the source cluster and the name of the topic joined with a comma. For example:

    $ pulumi import digitalocean:index/databaseKafkaTopic:DatabaseKafkaTopic topic-01 245bcfd0-7f31-4ce6-a2bc-475a116cca97,topic-01
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    DigitalOcean pulumi/pulumi-digitalocean
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the digitalocean Terraform Provider.
    digitalocean logo
    DigitalOcean v4.28.1 published on Friday, Apr 26, 2024 by Pulumi