kafka.Topic
Explore with Pulumi AI
The kafka.Topic
resource manages Apache Kafka topics, including their partition count, replication factor, and various configuration parameters. This resource supports non-destructive partition count increases.
Example Usage
Basic Topic
import * as pulumi from "@pulumi/pulumi";
import * as kafka from "@pulumi/kafka";
const example = new kafka.Topic("example", {
name: "example-topic",
replicationFactor: 3,
partitions: 10,
});
import pulumi
import pulumi_kafka as kafka
example = kafka.Topic("example",
name="example-topic",
replication_factor=3,
partitions=10)
package main
import (
"github.com/pulumi/pulumi-kafka/sdk/v3/go/kafka"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := kafka.NewTopic(ctx, "example", &kafka.TopicArgs{
Name: pulumi.String("example-topic"),
ReplicationFactor: pulumi.Int(3),
Partitions: pulumi.Int(10),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Kafka = Pulumi.Kafka;
return await Deployment.RunAsync(() =>
{
var example = new Kafka.Topic("example", new()
{
Name = "example-topic",
ReplicationFactor = 3,
Partitions = 10,
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.kafka.Topic;
import com.pulumi.kafka.TopicArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new Topic("example", TopicArgs.builder()
.name("example-topic")
.replicationFactor(3)
.partitions(10)
.build());
}
}
resources:
example:
type: kafka:Topic
properties:
name: example-topic
replicationFactor: 3
partitions: 10
Topic with Common Configurations
import * as pulumi from "@pulumi/pulumi";
import * as kafka from "@pulumi/kafka";
const logs = new kafka.Topic("logs", {
name: "application-logs",
replicationFactor: 3,
partitions: 50,
config: {
"retention.ms": "604800000",
"segment.ms": "86400000",
"cleanup.policy": "delete",
"compression.type": "gzip",
},
});
import pulumi
import pulumi_kafka as kafka
logs = kafka.Topic("logs",
name="application-logs",
replication_factor=3,
partitions=50,
config={
"retention.ms": "604800000",
"segment.ms": "86400000",
"cleanup.policy": "delete",
"compression.type": "gzip",
})
package main
import (
"github.com/pulumi/pulumi-kafka/sdk/v3/go/kafka"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := kafka.NewTopic(ctx, "logs", &kafka.TopicArgs{
Name: pulumi.String("application-logs"),
ReplicationFactor: pulumi.Int(3),
Partitions: pulumi.Int(50),
Config: pulumi.StringMap{
"retention.ms": pulumi.String("604800000"),
"segment.ms": pulumi.String("86400000"),
"cleanup.policy": pulumi.String("delete"),
"compression.type": pulumi.String("gzip"),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Kafka = Pulumi.Kafka;
return await Deployment.RunAsync(() =>
{
var logs = new Kafka.Topic("logs", new()
{
Name = "application-logs",
ReplicationFactor = 3,
Partitions = 50,
Config =
{
{ "retention.ms", "604800000" },
{ "segment.ms", "86400000" },
{ "cleanup.policy", "delete" },
{ "compression.type", "gzip" },
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.kafka.Topic;
import com.pulumi.kafka.TopicArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var logs = new Topic("logs", TopicArgs.builder()
.name("application-logs")
.replicationFactor(3)
.partitions(50)
.config(Map.ofEntries(
Map.entry("retention.ms", "604800000"),
Map.entry("segment.ms", "86400000"),
Map.entry("cleanup.policy", "delete"),
Map.entry("compression.type", "gzip")
))
.build());
}
}
resources:
logs:
type: kafka:Topic
properties:
name: application-logs
replicationFactor: 3
partitions: 50
config:
retention.ms: '604800000'
segment.ms: '86400000'
cleanup.policy: delete
compression.type: gzip
Compacted Topic for Event Sourcing
import * as pulumi from "@pulumi/pulumi";
import * as kafka from "@pulumi/kafka";
const events = new kafka.Topic("events", {
name: "user-events",
replicationFactor: 3,
partitions: 100,
config: {
"cleanup.policy": "compact",
"retention.ms": "-1",
"min.compaction.lag.ms": "3600000",
"delete.retention.ms": "86400000",
"compression.type": "lz4",
"segment.bytes": "1073741824",
},
});
import pulumi
import pulumi_kafka as kafka
events = kafka.Topic("events",
name="user-events",
replication_factor=3,
partitions=100,
config={
"cleanup.policy": "compact",
"retention.ms": "-1",
"min.compaction.lag.ms": "3600000",
"delete.retention.ms": "86400000",
"compression.type": "lz4",
"segment.bytes": "1073741824",
})
package main
import (
"github.com/pulumi/pulumi-kafka/sdk/v3/go/kafka"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := kafka.NewTopic(ctx, "events", &kafka.TopicArgs{
Name: pulumi.String("user-events"),
ReplicationFactor: pulumi.Int(3),
Partitions: pulumi.Int(100),
Config: pulumi.StringMap{
"cleanup.policy": pulumi.String("compact"),
"retention.ms": pulumi.String("-1"),
"min.compaction.lag.ms": pulumi.String("3600000"),
"delete.retention.ms": pulumi.String("86400000"),
"compression.type": pulumi.String("lz4"),
"segment.bytes": pulumi.String("1073741824"),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Kafka = Pulumi.Kafka;
return await Deployment.RunAsync(() =>
{
var events = new Kafka.Topic("events", new()
{
Name = "user-events",
ReplicationFactor = 3,
Partitions = 100,
Config =
{
{ "cleanup.policy", "compact" },
{ "retention.ms", "-1" },
{ "min.compaction.lag.ms", "3600000" },
{ "delete.retention.ms", "86400000" },
{ "compression.type", "lz4" },
{ "segment.bytes", "1073741824" },
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.kafka.Topic;
import com.pulumi.kafka.TopicArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var events = new Topic("events", TopicArgs.builder()
.name("user-events")
.replicationFactor(3)
.partitions(100)
.config(Map.ofEntries(
Map.entry("cleanup.policy", "compact"),
Map.entry("retention.ms", "-1"),
Map.entry("min.compaction.lag.ms", "3600000"),
Map.entry("delete.retention.ms", "86400000"),
Map.entry("compression.type", "lz4"),
Map.entry("segment.bytes", "1073741824")
))
.build());
}
}
resources:
events:
type: kafka:Topic
properties:
name: user-events
replicationFactor: 3
partitions: 100
config:
cleanup.policy: compact
retention.ms: '-1'
min.compaction.lag.ms: '3600000'
delete.retention.ms: '86400000'
compression.type: lz4
segment.bytes: '1073741824'
High-Throughput Topic
import * as pulumi from "@pulumi/pulumi";
import * as kafka from "@pulumi/kafka";
const metrics = new kafka.Topic("metrics", {
name: "system-metrics",
replicationFactor: 2,
partitions: 200,
config: {
"retention.ms": "86400000",
"segment.ms": "3600000",
"compression.type": "lz4",
"max.message.bytes": "1048576",
"min.insync.replicas": "2",
"unclean.leader.election.enable": "false",
},
});
import pulumi
import pulumi_kafka as kafka
metrics = kafka.Topic("metrics",
name="system-metrics",
replication_factor=2,
partitions=200,
config={
"retention.ms": "86400000",
"segment.ms": "3600000",
"compression.type": "lz4",
"max.message.bytes": "1048576",
"min.insync.replicas": "2",
"unclean.leader.election.enable": "false",
})
package main
import (
"github.com/pulumi/pulumi-kafka/sdk/v3/go/kafka"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := kafka.NewTopic(ctx, "metrics", &kafka.TopicArgs{
Name: pulumi.String("system-metrics"),
ReplicationFactor: pulumi.Int(2),
Partitions: pulumi.Int(200),
Config: pulumi.StringMap{
"retention.ms": pulumi.String("86400000"),
"segment.ms": pulumi.String("3600000"),
"compression.type": pulumi.String("lz4"),
"max.message.bytes": pulumi.String("1048576"),
"min.insync.replicas": pulumi.String("2"),
"unclean.leader.election.enable": pulumi.String("false"),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Kafka = Pulumi.Kafka;
return await Deployment.RunAsync(() =>
{
var metrics = new Kafka.Topic("metrics", new()
{
Name = "system-metrics",
ReplicationFactor = 2,
Partitions = 200,
Config =
{
{ "retention.ms", "86400000" },
{ "segment.ms", "3600000" },
{ "compression.type", "lz4" },
{ "max.message.bytes", "1048576" },
{ "min.insync.replicas", "2" },
{ "unclean.leader.election.enable", "false" },
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.kafka.Topic;
import com.pulumi.kafka.TopicArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var metrics = new Topic("metrics", TopicArgs.builder()
.name("system-metrics")
.replicationFactor(2)
.partitions(200)
.config(Map.ofEntries(
Map.entry("retention.ms", "86400000"),
Map.entry("segment.ms", "3600000"),
Map.entry("compression.type", "lz4"),
Map.entry("max.message.bytes", "1048576"),
Map.entry("min.insync.replicas", "2"),
Map.entry("unclean.leader.election.enable", "false")
))
.build());
}
}
resources:
metrics:
type: kafka:Topic
properties:
name: system-metrics
replicationFactor: 2
partitions: 200
config:
retention.ms: '86400000'
segment.ms: '3600000'
compression.type: lz4
max.message.bytes: '1048576'
min.insync.replicas: '2'
unclean.leader.election.enable: 'false'
Configuration Parameters
The config
map supports all Kafka topic-level configurations. Common configurations include:
Retention Settings
retention.ms
- How long to retain messages (in milliseconds). Default: 604800000 (7 days)retention.bytes
- Maximum size of the log before deleting old segments. Default: -1 (no limit)segment.ms
- Time after which a log segment should be rotated. Default: 604800000 (7 days)segment.bytes
- Maximum size of a single log segment file. Default: 1073741824 (1GB)
Cleanup and Compaction
cleanup.policy
- Either “delete” or “compact” or both “compact,delete”. Default: “delete”min.compaction.lag.ms
- Minimum time a message will remain uncompacted. Default: 0delete.retention.ms
- How long to retain delete tombstone markers for compacted topics. Default: 86400000 (1 day)
Compression
compression.type
- Compression codec: “uncompressed”, “zstd”, “lz4”, “snappy”, “gzip”, “producer”. Default: “producer”
Replication and Durability
min.insync.replicas
- Minimum number of replicas that must acknowledge a write. Default: 1unclean.leader.election.enable
- Whether to allow replicas not in ISR to be elected leader. Default: false
Message Size
max.message.bytes
- Maximum size of a message. Default: 1048588 (~1MB)message.timestamp.type
- Whether to use CreateTime or LogAppendTime. Default: “CreateTime”
For a complete list of configurations, refer to the Kafka documentation.
Note: Increasing the partition count is supported without recreating the topic. However, decreasing partitions requires topic recreation.
Create Topic Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Topic(name: string, args: TopicArgs, opts?: CustomResourceOptions);
@overload
def Topic(resource_name: str,
args: TopicArgs,
opts: Optional[ResourceOptions] = None)
@overload
def Topic(resource_name: str,
opts: Optional[ResourceOptions] = None,
partitions: Optional[int] = None,
replication_factor: Optional[int] = None,
config: Optional[Mapping[str, str]] = None,
name: Optional[str] = None)
func NewTopic(ctx *Context, name string, args TopicArgs, opts ...ResourceOption) (*Topic, error)
public Topic(string name, TopicArgs args, CustomResourceOptions? opts = null)
type: kafka:Topic
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args TopicArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args TopicArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args TopicArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args TopicArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args TopicArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var topicResource = new Kafka.Topic("topicResource", new()
{
Partitions = 0,
ReplicationFactor = 0,
Config =
{
{ "string", "string" },
},
Name = "string",
});
example, err := kafka.NewTopic(ctx, "topicResource", &kafka.TopicArgs{
Partitions: pulumi.Int(0),
ReplicationFactor: pulumi.Int(0),
Config: pulumi.StringMap{
"string": pulumi.String("string"),
},
Name: pulumi.String("string"),
})
var topicResource = new Topic("topicResource", TopicArgs.builder()
.partitions(0)
.replicationFactor(0)
.config(Map.of("string", "string"))
.name("string")
.build());
topic_resource = kafka.Topic("topicResource",
partitions=0,
replication_factor=0,
config={
"string": "string",
},
name="string")
const topicResource = new kafka.Topic("topicResource", {
partitions: 0,
replicationFactor: 0,
config: {
string: "string",
},
name: "string",
});
type: kafka:Topic
properties:
config:
string: string
name: string
partitions: 0
replicationFactor: 0
Topic Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The Topic resource accepts the following input properties:
- Partitions int
- Number of partitions.
- Replication
Factor int - Number of replicas.
- Config Dictionary<string, string>
- A map of string k/v attributes.
- Name string
- The name of the topic.
- Partitions int
- Number of partitions.
- Replication
Factor int - Number of replicas.
- Config map[string]string
- A map of string k/v attributes.
- Name string
- The name of the topic.
- partitions Integer
- Number of partitions.
- replication
Factor Integer - Number of replicas.
- config Map<String,String>
- A map of string k/v attributes.
- name String
- The name of the topic.
- partitions number
- Number of partitions.
- replication
Factor number - Number of replicas.
- config {[key: string]: string}
- A map of string k/v attributes.
- name string
- The name of the topic.
- partitions int
- Number of partitions.
- replication_
factor int - Number of replicas.
- config Mapping[str, str]
- A map of string k/v attributes.
- name str
- The name of the topic.
- partitions Number
- Number of partitions.
- replication
Factor Number - Number of replicas.
- config Map<String>
- A map of string k/v attributes.
- name String
- The name of the topic.
Outputs
All input properties are implicitly available as output properties. Additionally, the Topic resource produces the following output properties:
- Id string
- The provider-assigned unique ID for this managed resource.
- Id string
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
- id string
- The provider-assigned unique ID for this managed resource.
- id str
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
Look up Existing Topic Resource
Get an existing Topic resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: TopicState, opts?: CustomResourceOptions): Topic
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
config: Optional[Mapping[str, str]] = None,
name: Optional[str] = None,
partitions: Optional[int] = None,
replication_factor: Optional[int] = None) -> Topic
func GetTopic(ctx *Context, name string, id IDInput, state *TopicState, opts ...ResourceOption) (*Topic, error)
public static Topic Get(string name, Input<string> id, TopicState? state, CustomResourceOptions? opts = null)
public static Topic get(String name, Output<String> id, TopicState state, CustomResourceOptions options)
resources: _: type: kafka:Topic get: id: ${id}
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Config Dictionary<string, string>
- A map of string k/v attributes.
- Name string
- The name of the topic.
- Partitions int
- Number of partitions.
- Replication
Factor int - Number of replicas.
- Config map[string]string
- A map of string k/v attributes.
- Name string
- The name of the topic.
- Partitions int
- Number of partitions.
- Replication
Factor int - Number of replicas.
- config Map<String,String>
- A map of string k/v attributes.
- name String
- The name of the topic.
- partitions Integer
- Number of partitions.
- replication
Factor Integer - Number of replicas.
- config {[key: string]: string}
- A map of string k/v attributes.
- name string
- The name of the topic.
- partitions number
- Number of partitions.
- replication
Factor number - Number of replicas.
- config Mapping[str, str]
- A map of string k/v attributes.
- name str
- The name of the topic.
- partitions int
- Number of partitions.
- replication_
factor int - Number of replicas.
- config Map<String>
- A map of string k/v attributes.
- name String
- The name of the topic.
- partitions Number
- Number of partitions.
- replication
Factor Number - Number of replicas.
Import
Existing Kafka topics can be imported using the topic name:
$ pulumi import kafka:index/topic:Topic example example-topic
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Kafka pulumi/pulumi-kafka
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
kafka
Terraform Provider.