aiven.Kafka
Explore with Pulumi AI
The Kafka resource allows the creation and management of Aiven Kafka services.
Example Usage
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aiven = Pulumi.Aiven;
return await Deployment.RunAsync(() =>
{
var kafka1 = new Aiven.Kafka("kafka1", new()
{
Project = data.Aiven_project.Pr1.Project,
CloudName = "google-europe-west1",
Plan = "business-4",
ServiceName = "my-kafka1",
MaintenanceWindowDow = "monday",
MaintenanceWindowTime = "10:00:00",
KafkaUserConfig = new Aiven.Inputs.KafkaKafkaUserConfigArgs
{
KafkaRest = true,
KafkaConnect = true,
SchemaRegistry = true,
KafkaVersion = "3.1",
Kafka = new Aiven.Inputs.KafkaKafkaUserConfigKafkaArgs
{
GroupMaxSessionTimeoutMs = 70000,
LogRetentionBytes = 1000000000,
},
PublicAccess = new Aiven.Inputs.KafkaKafkaUserConfigPublicAccessArgs
{
KafkaRest = true,
KafkaConnect = true,
},
},
});
});
package main
import (
"github.com/pulumi/pulumi-aiven/sdk/v6/go/aiven"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := aiven.NewKafka(ctx, "kafka1", &aiven.KafkaArgs{
Project: pulumi.Any(data.Aiven_project.Pr1.Project),
CloudName: pulumi.String("google-europe-west1"),
Plan: pulumi.String("business-4"),
ServiceName: pulumi.String("my-kafka1"),
MaintenanceWindowDow: pulumi.String("monday"),
MaintenanceWindowTime: pulumi.String("10:00:00"),
KafkaUserConfig: &aiven.KafkaKafkaUserConfigArgs{
KafkaRest: pulumi.Bool(true),
KafkaConnect: pulumi.Bool(true),
SchemaRegistry: pulumi.Bool(true),
KafkaVersion: pulumi.String("3.1"),
Kafka: &aiven.KafkaKafkaUserConfigKafkaArgs{
GroupMaxSessionTimeoutMs: pulumi.Int(70000),
LogRetentionBytes: pulumi.Int(1000000000),
},
PublicAccess: &aiven.KafkaKafkaUserConfigPublicAccessArgs{
KafkaRest: pulumi.Bool(true),
KafkaConnect: pulumi.Bool(true),
},
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aiven.Kafka;
import com.pulumi.aiven.KafkaArgs;
import com.pulumi.aiven.inputs.KafkaKafkaUserConfigArgs;
import com.pulumi.aiven.inputs.KafkaKafkaUserConfigKafkaArgs;
import com.pulumi.aiven.inputs.KafkaKafkaUserConfigPublicAccessArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var kafka1 = new Kafka("kafka1", KafkaArgs.builder()
.project(data.aiven_project().pr1().project())
.cloudName("google-europe-west1")
.plan("business-4")
.serviceName("my-kafka1")
.maintenanceWindowDow("monday")
.maintenanceWindowTime("10:00:00")
.kafkaUserConfig(KafkaKafkaUserConfigArgs.builder()
.kafkaRest(true)
.kafkaConnect(true)
.schemaRegistry(true)
.kafkaVersion("3.1")
.kafka(KafkaKafkaUserConfigKafkaArgs.builder()
.groupMaxSessionTimeoutMs(70000)
.logRetentionBytes(1000000000)
.build())
.publicAccess(KafkaKafkaUserConfigPublicAccessArgs.builder()
.kafkaRest(true)
.kafkaConnect(true)
.build())
.build())
.build());
}
}
import pulumi
import pulumi_aiven as aiven
kafka1 = aiven.Kafka("kafka1",
project=data["aiven_project"]["pr1"]["project"],
cloud_name="google-europe-west1",
plan="business-4",
service_name="my-kafka1",
maintenance_window_dow="monday",
maintenance_window_time="10:00:00",
kafka_user_config=aiven.KafkaKafkaUserConfigArgs(
kafka_rest=True,
kafka_connect=True,
schema_registry=True,
kafka_version="3.1",
kafka=aiven.KafkaKafkaUserConfigKafkaArgs(
group_max_session_timeout_ms=70000,
log_retention_bytes=1000000000,
),
public_access=aiven.KafkaKafkaUserConfigPublicAccessArgs(
kafka_rest=True,
kafka_connect=True,
),
))
import * as pulumi from "@pulumi/pulumi";
import * as aiven from "@pulumi/aiven";
const kafka1 = new aiven.Kafka("kafka1", {
project: data.aiven_project.pr1.project,
cloudName: "google-europe-west1",
plan: "business-4",
serviceName: "my-kafka1",
maintenanceWindowDow: "monday",
maintenanceWindowTime: "10:00:00",
kafkaUserConfig: {
kafkaRest: true,
kafkaConnect: true,
schemaRegistry: true,
kafkaVersion: "3.1",
kafka: {
groupMaxSessionTimeoutMs: 70000,
logRetentionBytes: 1000000000,
},
publicAccess: {
kafkaRest: true,
kafkaConnect: true,
},
},
});
resources:
kafka1:
type: aiven:Kafka
properties:
project: ${data.aiven_project.pr1.project}
cloudName: google-europe-west1
plan: business-4
serviceName: my-kafka1
maintenanceWindowDow: monday
maintenanceWindowTime: 10:00:00
kafkaUserConfig:
kafkaRest: true
kafkaConnect: true
schemaRegistry: true
kafkaVersion: '3.1'
kafka:
groupMaxSessionTimeoutMs: 70000
logRetentionBytes: 1e+09
publicAccess:
kafkaRest: true
kafkaConnect: true
Create Kafka Resource
new Kafka(name: string, args: KafkaArgs, opts?: CustomResourceOptions);
@overload
def Kafka(resource_name: str,
opts: Optional[ResourceOptions] = None,
additional_disk_space: Optional[str] = None,
cloud_name: Optional[str] = None,
default_acl: Optional[bool] = None,
disk_space: Optional[str] = None,
kafka_user_config: Optional[KafkaKafkaUserConfigArgs] = None,
karapace: Optional[bool] = None,
maintenance_window_dow: Optional[str] = None,
maintenance_window_time: Optional[str] = None,
plan: Optional[str] = None,
project: Optional[str] = None,
project_vpc_id: Optional[str] = None,
service_integrations: Optional[Sequence[KafkaServiceIntegrationArgs]] = None,
service_name: Optional[str] = None,
static_ips: Optional[Sequence[str]] = None,
tags: Optional[Sequence[KafkaTagArgs]] = None,
termination_protection: Optional[bool] = None)
@overload
def Kafka(resource_name: str,
args: KafkaArgs,
opts: Optional[ResourceOptions] = None)
func NewKafka(ctx *Context, name string, args KafkaArgs, opts ...ResourceOption) (*Kafka, error)
public Kafka(string name, KafkaArgs args, CustomResourceOptions? opts = null)
type: aiven:Kafka
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args KafkaArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args KafkaArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args KafkaArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args KafkaArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args KafkaArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Kafka Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
The Kafka resource accepts the following input properties:
- Plan string
Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are
hobbyist
,startup-x
,business-x
andpremium-x
wherex
is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.- Project string
Identifies the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. This property cannot be changed, doing so forces recreation of the resource.
- Service
Name string Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- Additional
Disk stringSpace Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- Cloud
Name string Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (
aws
,azure
,do
google
,upcloud
, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.- Default
Acl bool Create default wildcard Kafka ACL
- Disk
Space string Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
This will be removed in v5.0.0. Please use
additional_disk_space
to specify the space to be added to the defaultdisk_space
defined by the plan.- Kafka
User KafkaConfig Kafka User Config Kafka user configurable settings
- Karapace bool
Switch the service to use Karapace for schema registry and REST proxy
Usage of this field is discouraged.
- Maintenance
Window stringDow Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- Maintenance
Window stringTime Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- Project
Vpc stringId Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- Service
Integrations List<KafkaService Integration> Service integrations to specify when creating a service. Not applied after initial service creation
- Static
Ips List<string> Use static public IP addresses.
- List<Kafka
Tag> Tags are key-value pairs that allow you to categorize services.
- Termination
Protection bool Prevents the service from being deleted. It is recommended to set this to
true
for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- Plan string
Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are
hobbyist
,startup-x
,business-x
andpremium-x
wherex
is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.- Project string
Identifies the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. This property cannot be changed, doing so forces recreation of the resource.
- Service
Name string Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- Additional
Disk stringSpace Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- Cloud
Name string Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (
aws
,azure
,do
google
,upcloud
, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.- Default
Acl bool Create default wildcard Kafka ACL
- Disk
Space string Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
This will be removed in v5.0.0. Please use
additional_disk_space
to specify the space to be added to the defaultdisk_space
defined by the plan.- Kafka
User KafkaConfig Kafka User Config Args Kafka user configurable settings
- Karapace bool
Switch the service to use Karapace for schema registry and REST proxy
Usage of this field is discouraged.
- Maintenance
Window stringDow Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- Maintenance
Window stringTime Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- Project
Vpc stringId Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- Service
Integrations []KafkaService Integration Args Service integrations to specify when creating a service. Not applied after initial service creation
- Static
Ips []string Use static public IP addresses.
- []Kafka
Tag Args Tags are key-value pairs that allow you to categorize services.
- Termination
Protection bool Prevents the service from being deleted. It is recommended to set this to
true
for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- plan String
Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are
hobbyist
,startup-x
,business-x
andpremium-x
wherex
is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.- project String
Identifies the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. This property cannot be changed, doing so forces recreation of the resource.
- service
Name String Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- additional
Disk StringSpace Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- cloud
Name String Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (
aws
,azure
,do
google
,upcloud
, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.- default
Acl Boolean Create default wildcard Kafka ACL
- disk
Space String Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
This will be removed in v5.0.0. Please use
additional_disk_space
to specify the space to be added to the defaultdisk_space
defined by the plan.- kafka
User KafkaConfig Kafka User Config Kafka user configurable settings
- karapace Boolean
Switch the service to use Karapace for schema registry and REST proxy
Usage of this field is discouraged.
- maintenance
Window StringDow Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- maintenance
Window StringTime Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- project
Vpc StringId Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- service
Integrations List<KafkaService Integration> Service integrations to specify when creating a service. Not applied after initial service creation
- static
Ips List<String> Use static public IP addresses.
- List<Kafka
Tag> Tags are key-value pairs that allow you to categorize services.
- termination
Protection Boolean Prevents the service from being deleted. It is recommended to set this to
true
for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- plan string
Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are
hobbyist
,startup-x
,business-x
andpremium-x
wherex
is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.- project string
Identifies the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. This property cannot be changed, doing so forces recreation of the resource.
- service
Name string Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- additional
Disk stringSpace Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- cloud
Name string Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (
aws
,azure
,do
google
,upcloud
, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.- default
Acl boolean Create default wildcard Kafka ACL
- disk
Space string Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
This will be removed in v5.0.0. Please use
additional_disk_space
to specify the space to be added to the defaultdisk_space
defined by the plan.- kafka
User KafkaConfig Kafka User Config Kafka user configurable settings
- karapace boolean
Switch the service to use Karapace for schema registry and REST proxy
Usage of this field is discouraged.
- maintenance
Window stringDow Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- maintenance
Window stringTime Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- project
Vpc stringId Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- service
Integrations KafkaService Integration[] Service integrations to specify when creating a service. Not applied after initial service creation
- static
Ips string[] Use static public IP addresses.
- Kafka
Tag[] Tags are key-value pairs that allow you to categorize services.
- termination
Protection boolean Prevents the service from being deleted. It is recommended to set this to
true
for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- plan str
Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are
hobbyist
,startup-x
,business-x
andpremium-x
wherex
is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.- project str
Identifies the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. This property cannot be changed, doing so forces recreation of the resource.
- service_
name str Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- additional_
disk_ strspace Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- cloud_
name str Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (
aws
,azure
,do
google
,upcloud
, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.- default_
acl bool Create default wildcard Kafka ACL
- disk_
space str Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
This will be removed in v5.0.0. Please use
additional_disk_space
to specify the space to be added to the defaultdisk_space
defined by the plan.- kafka_
user_ Kafkaconfig Kafka User Config Args Kafka user configurable settings
- karapace bool
Switch the service to use Karapace for schema registry and REST proxy
Usage of this field is discouraged.
- maintenance_
window_ strdow Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- maintenance_
window_ strtime Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- project_
vpc_ strid Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- service_
integrations Sequence[KafkaService Integration Args] Service integrations to specify when creating a service. Not applied after initial service creation
- static_
ips Sequence[str] Use static public IP addresses.
- Sequence[Kafka
Tag Args] Tags are key-value pairs that allow you to categorize services.
- termination_
protection bool Prevents the service from being deleted. It is recommended to set this to
true
for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- plan String
Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are
hobbyist
,startup-x
,business-x
andpremium-x
wherex
is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.- project String
Identifies the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. This property cannot be changed, doing so forces recreation of the resource.
- service
Name String Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- additional
Disk StringSpace Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- cloud
Name String Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (
aws
,azure
,do
google
,upcloud
, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.- default
Acl Boolean Create default wildcard Kafka ACL
- disk
Space String Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
This will be removed in v5.0.0. Please use
additional_disk_space
to specify the space to be added to the defaultdisk_space
defined by the plan.- kafka
User Property MapConfig Kafka user configurable settings
- karapace Boolean
Switch the service to use Karapace for schema registry and REST proxy
Usage of this field is discouraged.
- maintenance
Window StringDow Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- maintenance
Window StringTime Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- project
Vpc StringId Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- service
Integrations List<Property Map> Service integrations to specify when creating a service. Not applied after initial service creation
- static
Ips List<String> Use static public IP addresses.
- List<Property Map>
Tags are key-value pairs that allow you to categorize services.
- termination
Protection Boolean Prevents the service from being deleted. It is recommended to set this to
true
for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
Outputs
All input properties are implicitly available as output properties. Additionally, the Kafka resource produces the following output properties:
- Components
List<Kafka
Component> Service component information objects
- Disk
Space stringCap The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- Disk
Space stringDefault The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for
disk_space
- Disk
Space stringStep The default disk space step of the service, possible values depend on the service type, the cloud provider and the project.
disk_space
needs to increment fromdisk_space_default
by increments of this size.- Disk
Space stringUsed Disk space that service is currently using
- Id string
The provider-assigned unique ID for this managed resource.
- Kafka
Server List<KafkaKafka> Kafka broker configuration values.
- Service
Host string The hostname of the service.
- Service
Password string Password used for connecting to the service, if applicable
- Service
Port int The port of the service
- Service
Type string Aiven internal service type code
- Service
Uri string URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- Service
Username string Username used for connecting to the service, if applicable
- State string
Service state. One of
POWEROFF
,REBALANCING
,REBUILDING
orRUNNING
- Components
[]Kafka
Component Service component information objects
- Disk
Space stringCap The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- Disk
Space stringDefault The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for
disk_space
- Disk
Space stringStep The default disk space step of the service, possible values depend on the service type, the cloud provider and the project.
disk_space
needs to increment fromdisk_space_default
by increments of this size.- Disk
Space stringUsed Disk space that service is currently using
- Id string
The provider-assigned unique ID for this managed resource.
- Kafkas
[]Kafka
Kafka Kafka broker configuration values.
- Service
Host string The hostname of the service.
- Service
Password string Password used for connecting to the service, if applicable
- Service
Port int The port of the service
- Service
Type string Aiven internal service type code
- Service
Uri string URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- Service
Username string Username used for connecting to the service, if applicable
- State string
Service state. One of
POWEROFF
,REBALANCING
,REBUILDING
orRUNNING
- components
List<Kafka
Component> Service component information objects
- disk
Space StringCap The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- disk
Space StringDefault The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for
disk_space
- disk
Space StringStep The default disk space step of the service, possible values depend on the service type, the cloud provider and the project.
disk_space
needs to increment fromdisk_space_default
by increments of this size.- disk
Space StringUsed Disk space that service is currently using
- id String
The provider-assigned unique ID for this managed resource.
- kafkas
List<Kafka
Kafka> Kafka broker configuration values.
- service
Host String The hostname of the service.
- service
Password String Password used for connecting to the service, if applicable
- service
Port Integer The port of the service
- service
Type String Aiven internal service type code
- service
Uri String URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- service
Username String Username used for connecting to the service, if applicable
- state String
Service state. One of
POWEROFF
,REBALANCING
,REBUILDING
orRUNNING
- components
Kafka
Component[] Service component information objects
- disk
Space stringCap The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- disk
Space stringDefault The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for
disk_space
- disk
Space stringStep The default disk space step of the service, possible values depend on the service type, the cloud provider and the project.
disk_space
needs to increment fromdisk_space_default
by increments of this size.- disk
Space stringUsed Disk space that service is currently using
- id string
The provider-assigned unique ID for this managed resource.
- kafkas
Kafka
Kafka[] Kafka broker configuration values.
- service
Host string The hostname of the service.
- service
Password string Password used for connecting to the service, if applicable
- service
Port number The port of the service
- service
Type string Aiven internal service type code
- service
Uri string URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- service
Username string Username used for connecting to the service, if applicable
- state string
Service state. One of
POWEROFF
,REBALANCING
,REBUILDING
orRUNNING
- components
Sequence[Kafka
Component] Service component information objects
- disk_
space_ strcap The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- disk_
space_ strdefault The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for
disk_space
- disk_
space_ strstep The default disk space step of the service, possible values depend on the service type, the cloud provider and the project.
disk_space
needs to increment fromdisk_space_default
by increments of this size.- disk_
space_ strused Disk space that service is currently using
- id str
The provider-assigned unique ID for this managed resource.
- kafkas
Sequence[Kafka
Kafka] Kafka broker configuration values.
- service_
host str The hostname of the service.
- service_
password str Password used for connecting to the service, if applicable
- service_
port int The port of the service
- service_
type str Aiven internal service type code
- service_
uri str URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- service_
username str Username used for connecting to the service, if applicable
- state str
Service state. One of
POWEROFF
,REBALANCING
,REBUILDING
orRUNNING
- components List<Property Map>
Service component information objects
- disk
Space StringCap The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- disk
Space StringDefault The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for
disk_space
- disk
Space StringStep The default disk space step of the service, possible values depend on the service type, the cloud provider and the project.
disk_space
needs to increment fromdisk_space_default
by increments of this size.- disk
Space StringUsed Disk space that service is currently using
- id String
The provider-assigned unique ID for this managed resource.
- kafkas List<Property Map>
Kafka broker configuration values.
- service
Host String The hostname of the service.
- service
Password String Password used for connecting to the service, if applicable
- service
Port Number The port of the service
- service
Type String Aiven internal service type code
- service
Uri String URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- service
Username String Username used for connecting to the service, if applicable
- state String
Service state. One of
POWEROFF
,REBALANCING
,REBUILDING
orRUNNING
Look up Existing Kafka Resource
Get an existing Kafka resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: KafkaState, opts?: CustomResourceOptions): Kafka
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
additional_disk_space: Optional[str] = None,
cloud_name: Optional[str] = None,
components: Optional[Sequence[KafkaComponentArgs]] = None,
default_acl: Optional[bool] = None,
disk_space: Optional[str] = None,
disk_space_cap: Optional[str] = None,
disk_space_default: Optional[str] = None,
disk_space_step: Optional[str] = None,
disk_space_used: Optional[str] = None,
kafka_user_config: Optional[KafkaKafkaUserConfigArgs] = None,
kafkas: Optional[Sequence[KafkaKafkaArgs]] = None,
karapace: Optional[bool] = None,
maintenance_window_dow: Optional[str] = None,
maintenance_window_time: Optional[str] = None,
plan: Optional[str] = None,
project: Optional[str] = None,
project_vpc_id: Optional[str] = None,
service_host: Optional[str] = None,
service_integrations: Optional[Sequence[KafkaServiceIntegrationArgs]] = None,
service_name: Optional[str] = None,
service_password: Optional[str] = None,
service_port: Optional[int] = None,
service_type: Optional[str] = None,
service_uri: Optional[str] = None,
service_username: Optional[str] = None,
state: Optional[str] = None,
static_ips: Optional[Sequence[str]] = None,
tags: Optional[Sequence[KafkaTagArgs]] = None,
termination_protection: Optional[bool] = None) -> Kafka
func GetKafka(ctx *Context, name string, id IDInput, state *KafkaState, opts ...ResourceOption) (*Kafka, error)
public static Kafka Get(string name, Input<string> id, KafkaState? state, CustomResourceOptions? opts = null)
public static Kafka get(String name, Output<String> id, KafkaState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Additional
Disk stringSpace Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- Cloud
Name string Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (
aws
,azure
,do
google
,upcloud
, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.- Components
List<Kafka
Component> Service component information objects
- Default
Acl bool Create default wildcard Kafka ACL
- Disk
Space string Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
This will be removed in v5.0.0. Please use
additional_disk_space
to specify the space to be added to the defaultdisk_space
defined by the plan.- Disk
Space stringCap The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- Disk
Space stringDefault The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for
disk_space
- Disk
Space stringStep The default disk space step of the service, possible values depend on the service type, the cloud provider and the project.
disk_space
needs to increment fromdisk_space_default
by increments of this size.- Disk
Space stringUsed Disk space that service is currently using
- Kafka
Server List<KafkaKafka> Kafka broker configuration values.
- Kafka
User KafkaConfig Kafka User Config Kafka user configurable settings
- Karapace bool
Switch the service to use Karapace for schema registry and REST proxy
Usage of this field is discouraged.
- Maintenance
Window stringDow Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- Maintenance
Window stringTime Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- Plan string
Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are
hobbyist
,startup-x
,business-x
andpremium-x
wherex
is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.- Project string
Identifies the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. This property cannot be changed, doing so forces recreation of the resource.
- Project
Vpc stringId Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- Service
Host string The hostname of the service.
- Service
Integrations List<KafkaService Integration> Service integrations to specify when creating a service. Not applied after initial service creation
- Service
Name string Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- Service
Password string Password used for connecting to the service, if applicable
- Service
Port int The port of the service
- Service
Type string Aiven internal service type code
- Service
Uri string URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- Service
Username string Username used for connecting to the service, if applicable
- State string
Service state. One of
POWEROFF
,REBALANCING
,REBUILDING
orRUNNING
- Static
Ips List<string> Use static public IP addresses.
- List<Kafka
Tag> Tags are key-value pairs that allow you to categorize services.
- Termination
Protection bool Prevents the service from being deleted. It is recommended to set this to
true
for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- Additional
Disk stringSpace Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- Cloud
Name string Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (
aws
,azure
,do
google
,upcloud
, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.- Components
[]Kafka
Component Args Service component information objects
- Default
Acl bool Create default wildcard Kafka ACL
- Disk
Space string Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
This will be removed in v5.0.0. Please use
additional_disk_space
to specify the space to be added to the defaultdisk_space
defined by the plan.- Disk
Space stringCap The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- Disk
Space stringDefault The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for
disk_space
- Disk
Space stringStep The default disk space step of the service, possible values depend on the service type, the cloud provider and the project.
disk_space
needs to increment fromdisk_space_default
by increments of this size.- Disk
Space stringUsed Disk space that service is currently using
- Kafka
User KafkaConfig Kafka User Config Args Kafka user configurable settings
- Kafkas
[]Kafka
Kafka Args Kafka broker configuration values.
- Karapace bool
Switch the service to use Karapace for schema registry and REST proxy
Usage of this field is discouraged.
- Maintenance
Window stringDow Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- Maintenance
Window stringTime Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- Plan string
Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are
hobbyist
,startup-x
,business-x
andpremium-x
wherex
is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.- Project string
Identifies the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. This property cannot be changed, doing so forces recreation of the resource.
- Project
Vpc stringId Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- Service
Host string The hostname of the service.
- Service
Integrations []KafkaService Integration Args Service integrations to specify when creating a service. Not applied after initial service creation
- Service
Name string Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- Service
Password string Password used for connecting to the service, if applicable
- Service
Port int The port of the service
- Service
Type string Aiven internal service type code
- Service
Uri string URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- Service
Username string Username used for connecting to the service, if applicable
- State string
Service state. One of
POWEROFF
,REBALANCING
,REBUILDING
orRUNNING
- Static
Ips []string Use static public IP addresses.
- []Kafka
Tag Args Tags are key-value pairs that allow you to categorize services.
- Termination
Protection bool Prevents the service from being deleted. It is recommended to set this to
true
for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- additional
Disk StringSpace Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- cloud
Name String Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (
aws
,azure
,do
google
,upcloud
, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.- components
List<Kafka
Component> Service component information objects
- default
Acl Boolean Create default wildcard Kafka ACL
- disk
Space String Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
This will be removed in v5.0.0. Please use
additional_disk_space
to specify the space to be added to the defaultdisk_space
defined by the plan.- disk
Space StringCap The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- disk
Space StringDefault The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for
disk_space
- disk
Space StringStep The default disk space step of the service, possible values depend on the service type, the cloud provider and the project.
disk_space
needs to increment fromdisk_space_default
by increments of this size.- disk
Space StringUsed Disk space that service is currently using
- kafka
User KafkaConfig Kafka User Config Kafka user configurable settings
- kafkas
List<Kafka
Kafka> Kafka broker configuration values.
- karapace Boolean
Switch the service to use Karapace for schema registry and REST proxy
Usage of this field is discouraged.
- maintenance
Window StringDow Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- maintenance
Window StringTime Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- plan String
Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are
hobbyist
,startup-x
,business-x
andpremium-x
wherex
is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.- project String
Identifies the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. This property cannot be changed, doing so forces recreation of the resource.
- project
Vpc StringId Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- service
Host String The hostname of the service.
- service
Integrations List<KafkaService Integration> Service integrations to specify when creating a service. Not applied after initial service creation
- service
Name String Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- service
Password String Password used for connecting to the service, if applicable
- service
Port Integer The port of the service
- service
Type String Aiven internal service type code
- service
Uri String URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- service
Username String Username used for connecting to the service, if applicable
- state String
Service state. One of
POWEROFF
,REBALANCING
,REBUILDING
orRUNNING
- static
Ips List<String> Use static public IP addresses.
- List<Kafka
Tag> Tags are key-value pairs that allow you to categorize services.
- termination
Protection Boolean Prevents the service from being deleted. It is recommended to set this to
true
for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- additional
Disk stringSpace Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- cloud
Name string Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (
aws
,azure
,do
google
,upcloud
, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.- components
Kafka
Component[] Service component information objects
- default
Acl boolean Create default wildcard Kafka ACL
- disk
Space string Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
This will be removed in v5.0.0. Please use
additional_disk_space
to specify the space to be added to the defaultdisk_space
defined by the plan.- disk
Space stringCap The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- disk
Space stringDefault The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for
disk_space
- disk
Space stringStep The default disk space step of the service, possible values depend on the service type, the cloud provider and the project.
disk_space
needs to increment fromdisk_space_default
by increments of this size.- disk
Space stringUsed Disk space that service is currently using
- kafka
User KafkaConfig Kafka User Config Kafka user configurable settings
- kafkas
Kafka
Kafka[] Kafka broker configuration values.
- karapace boolean
Switch the service to use Karapace for schema registry and REST proxy
Usage of this field is discouraged.
- maintenance
Window stringDow Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- maintenance
Window stringTime Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- plan string
Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are
hobbyist
,startup-x
,business-x
andpremium-x
wherex
is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.- project string
Identifies the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. This property cannot be changed, doing so forces recreation of the resource.
- project
Vpc stringId Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- service
Host string The hostname of the service.
- service
Integrations KafkaService Integration[] Service integrations to specify when creating a service. Not applied after initial service creation
- service
Name string Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- service
Password string Password used for connecting to the service, if applicable
- service
Port number The port of the service
- service
Type string Aiven internal service type code
- service
Uri string URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- service
Username string Username used for connecting to the service, if applicable
- state string
Service state. One of
POWEROFF
,REBALANCING
,REBUILDING
orRUNNING
- static
Ips string[] Use static public IP addresses.
- Kafka
Tag[] Tags are key-value pairs that allow you to categorize services.
- termination
Protection boolean Prevents the service from being deleted. It is recommended to set this to
true
for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- additional_
disk_ strspace Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- cloud_
name str Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (
aws
,azure
,do
google
,upcloud
, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.- components
Sequence[Kafka
Component Args] Service component information objects
- default_
acl bool Create default wildcard Kafka ACL
- disk_
space str Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
This will be removed in v5.0.0. Please use
additional_disk_space
to specify the space to be added to the defaultdisk_space
defined by the plan.- disk_
space_ strcap The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- disk_
space_ strdefault The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for
disk_space
- disk_
space_ strstep The default disk space step of the service, possible values depend on the service type, the cloud provider and the project.
disk_space
needs to increment fromdisk_space_default
by increments of this size.- disk_
space_ strused Disk space that service is currently using
- kafka_
user_ Kafkaconfig Kafka User Config Args Kafka user configurable settings
- kafkas
Sequence[Kafka
Kafka Args] Kafka broker configuration values.
- karapace bool
Switch the service to use Karapace for schema registry and REST proxy
Usage of this field is discouraged.
- maintenance_
window_ strdow Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- maintenance_
window_ strtime Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- plan str
Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are
hobbyist
,startup-x
,business-x
andpremium-x
wherex
is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.- project str
Identifies the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. This property cannot be changed, doing so forces recreation of the resource.
- project_
vpc_ strid Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- service_
host str The hostname of the service.
- service_
integrations Sequence[KafkaService Integration Args] Service integrations to specify when creating a service. Not applied after initial service creation
- service_
name str Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- service_
password str Password used for connecting to the service, if applicable
- service_
port int The port of the service
- service_
type str Aiven internal service type code
- service_
uri str URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- service_
username str Username used for connecting to the service, if applicable
- state str
Service state. One of
POWEROFF
,REBALANCING
,REBUILDING
orRUNNING
- static_
ips Sequence[str] Use static public IP addresses.
- Sequence[Kafka
Tag Args] Tags are key-value pairs that allow you to categorize services.
- termination_
protection bool Prevents the service from being deleted. It is recommended to set this to
true
for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- additional
Disk StringSpace Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- cloud
Name String Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (
aws
,azure
,do
google
,upcloud
, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.- components List<Property Map>
Service component information objects
- default
Acl Boolean Create default wildcard Kafka ACL
- disk
Space String Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
This will be removed in v5.0.0. Please use
additional_disk_space
to specify the space to be added to the defaultdisk_space
defined by the plan.- disk
Space StringCap The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- disk
Space StringDefault The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for
disk_space
- disk
Space StringStep The default disk space step of the service, possible values depend on the service type, the cloud provider and the project.
disk_space
needs to increment fromdisk_space_default
by increments of this size.- disk
Space StringUsed Disk space that service is currently using
- kafka
User Property MapConfig Kafka user configurable settings
- kafkas List<Property Map>
Kafka broker configuration values.
- karapace Boolean
Switch the service to use Karapace for schema registry and REST proxy
Usage of this field is discouraged.
- maintenance
Window StringDow Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- maintenance
Window StringTime Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- plan String
Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are
hobbyist
,startup-x
,business-x
andpremium-x
wherex
is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.- project String
Identifies the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. This property cannot be changed, doing so forces recreation of the resource.
- project
Vpc StringId Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- service
Host String The hostname of the service.
- service
Integrations List<Property Map> Service integrations to specify when creating a service. Not applied after initial service creation
- service
Name String Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- service
Password String Password used for connecting to the service, if applicable
- service
Port Number The port of the service
- service
Type String Aiven internal service type code
- service
Uri String URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- service
Username String Username used for connecting to the service, if applicable
- state String
Service state. One of
POWEROFF
,REBALANCING
,REBUILDING
orRUNNING
- static
Ips List<String> Use static public IP addresses.
- List<Property Map>
Tags are key-value pairs that allow you to categorize services.
- termination
Protection Boolean Prevents the service from being deleted. It is recommended to set this to
true
for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
Supporting Types
KafkaComponent, KafkaComponentArgs
KafkaKafka, KafkaKafkaArgs
- Access
Cert string - Access
Key string - Connect
Uri string - Rest
Uri string - Schema
Registry stringUri
- Access
Cert string - Access
Key string - Connect
Uri string - Rest
Uri string - Schema
Registry stringUri
- access
Cert String - access
Key String - connect
Uri String - rest
Uri String - schema
Registry StringUri
- access
Cert string - access
Key string - connect
Uri string - rest
Uri string - schema
Registry stringUri
- access_
cert str - access_
key str - connect_
uri str - rest_
uri str - schema_
registry_ struri
- access
Cert String - access
Key String - connect
Uri String - rest
Uri String - schema
Registry StringUri
KafkaKafkaUserConfig, KafkaKafkaUserConfigArgs
- Additional
Backup stringRegions Additional Cloud Regions for Backup Replication.
- Custom
Domain string Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.
- Ip
Filter List<KafkaObjects Kafka User Config Ip Filter Object> Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
- Ip
Filter List<string>Strings Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
- Ip
Filters List<string> Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
This will be removed in v5.0.0 and replaced with ip_filter_string instead.
- Kafka
Kafka
Kafka User Config Kafka Kafka broker configuration values.
- Kafka
Authentication KafkaMethods Kafka User Config Kafka Authentication Methods Kafka authentication methods.
- Kafka
Connect bool Enable Kafka Connect service. The default value is
false
.- Kafka
Connect KafkaConfig Kafka User Config Kafka Connect Config Kafka Connect configuration values.
- Kafka
Rest bool Enable Kafka-REST service. The default value is
false
.- bool
Enable authorization in Kafka-REST service.
- Kafka
Rest KafkaConfig Kafka User Config Kafka Rest Config Kafka REST configuration.
- Kafka
Version string Kafka major version.
- Private
Access KafkaKafka User Config Private Access Allow access to selected service ports from private networks.
- Privatelink
Access KafkaKafka User Config Privatelink Access Allow access to selected service components through Privatelink.
- Public
Access KafkaKafka User Config Public Access Allow access to selected service ports from the public Internet.
- Schema
Registry bool Enable Schema-Registry service. The default value is
false
.- Schema
Registry KafkaConfig Kafka User Config Schema Registry Config Schema Registry configuration.
- Static
Ips bool Use static public IP addresses.
- Additional
Backup stringRegions Additional Cloud Regions for Backup Replication.
- Custom
Domain string Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.
- Ip
Filter []KafkaObjects Kafka User Config Ip Filter Object Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
- Ip
Filter []stringStrings Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
- Ip
Filters []string Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
This will be removed in v5.0.0 and replaced with ip_filter_string instead.
- Kafka
Kafka
Kafka User Config Kafka Kafka broker configuration values.
- Kafka
Authentication KafkaMethods Kafka User Config Kafka Authentication Methods Kafka authentication methods.
- Kafka
Connect bool Enable Kafka Connect service. The default value is
false
.- Kafka
Connect KafkaConfig Kafka User Config Kafka Connect Config Kafka Connect configuration values.
- Kafka
Rest bool Enable Kafka-REST service. The default value is
false
.- bool
Enable authorization in Kafka-REST service.
- Kafka
Rest KafkaConfig Kafka User Config Kafka Rest Config Kafka REST configuration.
- Kafka
Version string Kafka major version.
- Private
Access KafkaKafka User Config Private Access Allow access to selected service ports from private networks.
- Privatelink
Access KafkaKafka User Config Privatelink Access Allow access to selected service components through Privatelink.
- Public
Access KafkaKafka User Config Public Access Allow access to selected service ports from the public Internet.
- Schema
Registry bool Enable Schema-Registry service. The default value is
false
.- Schema
Registry KafkaConfig Kafka User Config Schema Registry Config Schema Registry configuration.
- Static
Ips bool Use static public IP addresses.
- additional
Backup StringRegions Additional Cloud Regions for Backup Replication.
- custom
Domain String Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.
- ip
Filter List<KafkaObjects Kafka User Config Ip Filter Object> Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
- ip
Filter List<String>Strings Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
- ip
Filters List<String> Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
This will be removed in v5.0.0 and replaced with ip_filter_string instead.
- kafka
Kafka
Kafka User Config Kafka Kafka broker configuration values.
- kafka
Authentication KafkaMethods Kafka User Config Kafka Authentication Methods Kafka authentication methods.
- kafka
Connect Boolean Enable Kafka Connect service. The default value is
false
.- kafka
Connect KafkaConfig Kafka User Config Kafka Connect Config Kafka Connect configuration values.
- kafka
Rest Boolean Enable Kafka-REST service. The default value is
false
.- Boolean
Enable authorization in Kafka-REST service.
- kafka
Rest KafkaConfig Kafka User Config Kafka Rest Config Kafka REST configuration.
- kafka
Version String Kafka major version.
- private
Access KafkaKafka User Config Private Access Allow access to selected service ports from private networks.
- privatelink
Access KafkaKafka User Config Privatelink Access Allow access to selected service components through Privatelink.
- public
Access KafkaKafka User Config Public Access Allow access to selected service ports from the public Internet.
- schema
Registry Boolean Enable Schema-Registry service. The default value is
false
.- schema
Registry KafkaConfig Kafka User Config Schema Registry Config Schema Registry configuration.
- static
Ips Boolean Use static public IP addresses.
- additional
Backup stringRegions Additional Cloud Regions for Backup Replication.
- custom
Domain string Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.
- ip
Filter KafkaObjects Kafka User Config Ip Filter Object[] Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
- ip
Filter string[]Strings Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
- ip
Filters string[] Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
This will be removed in v5.0.0 and replaced with ip_filter_string instead.
- kafka
Kafka
Kafka User Config Kafka Kafka broker configuration values.
- kafka
Authentication KafkaMethods Kafka User Config Kafka Authentication Methods Kafka authentication methods.
- kafka
Connect boolean Enable Kafka Connect service. The default value is
false
.- kafka
Connect KafkaConfig Kafka User Config Kafka Connect Config Kafka Connect configuration values.
- kafka
Rest boolean Enable Kafka-REST service. The default value is
false
.- boolean
Enable authorization in Kafka-REST service.
- kafka
Rest KafkaConfig Kafka User Config Kafka Rest Config Kafka REST configuration.
- kafka
Version string Kafka major version.
- private
Access KafkaKafka User Config Private Access Allow access to selected service ports from private networks.
- privatelink
Access KafkaKafka User Config Privatelink Access Allow access to selected service components through Privatelink.
- public
Access KafkaKafka User Config Public Access Allow access to selected service ports from the public Internet.
- schema
Registry boolean Enable Schema-Registry service. The default value is
false
.- schema
Registry KafkaConfig Kafka User Config Schema Registry Config Schema Registry configuration.
- static
Ips boolean Use static public IP addresses.
- additional_
backup_ strregions Additional Cloud Regions for Backup Replication.
- custom_
domain str Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.
- ip_
filter_ Sequence[Kafkaobjects Kafka User Config Ip Filter Object] Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
- ip_
filter_ Sequence[str]strings Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
- ip_
filters Sequence[str] Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
This will be removed in v5.0.0 and replaced with ip_filter_string instead.
- kafka
Kafka
Kafka User Config Kafka Kafka broker configuration values.
- kafka_
authentication_ Kafkamethods Kafka User Config Kafka Authentication Methods Kafka authentication methods.
- kafka_
connect bool Enable Kafka Connect service. The default value is
false
.- kafka_
connect_ Kafkaconfig Kafka User Config Kafka Connect Config Kafka Connect configuration values.
- kafka_
rest bool Enable Kafka-REST service. The default value is
false
.- bool
Enable authorization in Kafka-REST service.
- kafka_
rest_ Kafkaconfig Kafka User Config Kafka Rest Config Kafka REST configuration.
- kafka_
version str Kafka major version.
- private_
access KafkaKafka User Config Private Access Allow access to selected service ports from private networks.
- privatelink_
access KafkaKafka User Config Privatelink Access Allow access to selected service components through Privatelink.
- public_
access KafkaKafka User Config Public Access Allow access to selected service ports from the public Internet.
- schema_
registry bool Enable Schema-Registry service. The default value is
false
.- schema_
registry_ Kafkaconfig Kafka User Config Schema Registry Config Schema Registry configuration.
- static_
ips bool Use static public IP addresses.
- additional
Backup StringRegions Additional Cloud Regions for Backup Replication.
- custom
Domain String Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.
- ip
Filter List<Property Map>Objects Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
- ip
Filter List<String>Strings Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
- ip
Filters List<String> Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
This will be removed in v5.0.0 and replaced with ip_filter_string instead.
- kafka Property Map
Kafka broker configuration values.
- kafka
Authentication Property MapMethods Kafka authentication methods.
- kafka
Connect Boolean Enable Kafka Connect service. The default value is
false
.- kafka
Connect Property MapConfig Kafka Connect configuration values.
- kafka
Rest Boolean Enable Kafka-REST service. The default value is
false
.- Boolean
Enable authorization in Kafka-REST service.
- kafka
Rest Property MapConfig Kafka REST configuration.
- kafka
Version String Kafka major version.
- private
Access Property Map Allow access to selected service ports from private networks.
- privatelink
Access Property Map Allow access to selected service components through Privatelink.
- public
Access Property Map Allow access to selected service ports from the public Internet.
- schema
Registry Boolean Enable Schema-Registry service. The default value is
false
.- schema
Registry Property MapConfig Schema Registry configuration.
- static
Ips Boolean Use static public IP addresses.
KafkaKafkaUserConfigIpFilterObject, KafkaKafkaUserConfigIpFilterObjectArgs
- Network string
CIDR address block.
- Description string
Description for IP filter list entry.
- Network string
CIDR address block.
- Description string
Description for IP filter list entry.
- network String
CIDR address block.
- description String
Description for IP filter list entry.
- network string
CIDR address block.
- description string
Description for IP filter list entry.
- network str
CIDR address block.
- description str
Description for IP filter list entry.
- network String
CIDR address block.
- description String
Description for IP filter list entry.
KafkaKafkaUserConfigKafka, KafkaKafkaUserConfigKafkaArgs
- Auto
Create boolTopics Enable Enable auto creation of topics.
- Compression
Type string Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.
- Connections
Max intIdle Ms Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.
- Default
Replication intFactor Replication factor for autocreated topics.
- Group
Initial intRebalance Delay Ms The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.
- Group
Max intSession Timeout Ms The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
- Group
Min intSession Timeout Ms The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
- Log
Cleaner intDelete Retention Ms How long are delete records retained?.
- Log
Cleaner intMax Compaction Lag Ms The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
- Log
Cleaner doubleMin Cleanable Ratio Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.
- Log
Cleaner intMin Compaction Lag Ms The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
- Log
Cleanup stringPolicy The default cleanup policy for segments beyond the retention window.
- Log
Flush intInterval Messages The number of messages accumulated on a log partition before messages are flushed to disk.
- Log
Flush intInterval Ms The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
- Log
Index intInterval Bytes The interval with which Kafka adds an entry to the offset index.
- Log
Index intSize Max Bytes The maximum size in bytes of the offset index.
- Log
Message boolDownconversion Enable This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. .
- Log
Message intTimestamp Difference Max Ms The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
- Log
Message stringTimestamp Type Define whether the timestamp in the message is message create time or log append time.
- Log
Preallocate bool Should pre allocate file when create new segment?.
- Log
Retention intBytes The maximum size of the log before deleting messages.
- Log
Retention intHours The number of hours to keep a log file before deleting it.
- Log
Retention intMs The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
- Log
Roll intJitter Ms The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
- Log
Roll intMs The maximum time before a new log segment is rolled out (in milliseconds).
- Log
Segment intBytes The maximum size of a single log file.
- Log
Segment intDelete Delay Ms The amount of time to wait before deleting a file from the filesystem.
- Max
Connections intPer Ip The maximum number of connections allowed from each ip address (defaults to 2147483647).
- Max
Incremental intFetch Session Cache Slots The maximum number of incremental fetch sessions that the broker will maintain.
- Message
Max intBytes The maximum size of message that the server can receive.
- Min
Insync intReplicas When a producer sets acks to 'all' (or '-1'), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.
- Num
Partitions int Number of partitions for autocreated topics.
- Offsets
Retention intMinutes Log retention window in minutes for offsets topic.
- Producer
Purgatory intPurge Interval Requests The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).
- Replica
Fetch intMax Bytes The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
- Replica
Fetch intResponse Max Bytes Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
- Socket
Request intMax Bytes The maximum number of bytes in a socket request (defaults to 104857600).
- Transaction
Remove intExpired Transaction Cleanup Interval Ms The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).
- Transaction
State intLog Segment Bytes The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).
- Auto
Create boolTopics Enable Enable auto creation of topics.
- Compression
Type string Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.
- Connections
Max intIdle Ms Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.
- Default
Replication intFactor Replication factor for autocreated topics.
- Group
Initial intRebalance Delay Ms The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.
- Group
Max intSession Timeout Ms The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
- Group
Min intSession Timeout Ms The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
- Log
Cleaner intDelete Retention Ms How long are delete records retained?.
- Log
Cleaner intMax Compaction Lag Ms The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
- Log
Cleaner float64Min Cleanable Ratio Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.
- Log
Cleaner intMin Compaction Lag Ms The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
- Log
Cleanup stringPolicy The default cleanup policy for segments beyond the retention window.
- Log
Flush intInterval Messages The number of messages accumulated on a log partition before messages are flushed to disk.
- Log
Flush intInterval Ms The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
- Log
Index intInterval Bytes The interval with which Kafka adds an entry to the offset index.
- Log
Index intSize Max Bytes The maximum size in bytes of the offset index.
- Log
Message boolDownconversion Enable This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. .
- Log
Message intTimestamp Difference Max Ms The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
- Log
Message stringTimestamp Type Define whether the timestamp in the message is message create time or log append time.
- Log
Preallocate bool Should pre allocate file when create new segment?.
- Log
Retention intBytes The maximum size of the log before deleting messages.
- Log
Retention intHours The number of hours to keep a log file before deleting it.
- Log
Retention intMs The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
- Log
Roll intJitter Ms The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
- Log
Roll intMs The maximum time before a new log segment is rolled out (in milliseconds).
- Log
Segment intBytes The maximum size of a single log file.
- Log
Segment intDelete Delay Ms The amount of time to wait before deleting a file from the filesystem.
- Max
Connections intPer Ip The maximum number of connections allowed from each ip address (defaults to 2147483647).
- Max
Incremental intFetch Session Cache Slots The maximum number of incremental fetch sessions that the broker will maintain.
- Message
Max intBytes The maximum size of message that the server can receive.
- Min
Insync intReplicas When a producer sets acks to 'all' (or '-1'), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.
- Num
Partitions int Number of partitions for autocreated topics.
- Offsets
Retention intMinutes Log retention window in minutes for offsets topic.
- Producer
Purgatory intPurge Interval Requests The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).
- Replica
Fetch intMax Bytes The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
- Replica
Fetch intResponse Max Bytes Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
- Socket
Request intMax Bytes The maximum number of bytes in a socket request (defaults to 104857600).
- Transaction
Remove intExpired Transaction Cleanup Interval Ms The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).
- Transaction
State intLog Segment Bytes The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).
- auto
Create BooleanTopics Enable Enable auto creation of topics.
- compression
Type String Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.
- connections
Max IntegerIdle Ms Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.
- default
Replication IntegerFactor Replication factor for autocreated topics.
- group
Initial IntegerRebalance Delay Ms The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.
- group
Max IntegerSession Timeout Ms The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
- group
Min IntegerSession Timeout Ms The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
- log
Cleaner IntegerDelete Retention Ms How long are delete records retained?.
- log
Cleaner IntegerMax Compaction Lag Ms The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
- log
Cleaner DoubleMin Cleanable Ratio Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.
- log
Cleaner IntegerMin Compaction Lag Ms The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
- log
Cleanup StringPolicy The default cleanup policy for segments beyond the retention window.
- log
Flush IntegerInterval Messages The number of messages accumulated on a log partition before messages are flushed to disk.
- log
Flush IntegerInterval Ms The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
- log
Index IntegerInterval Bytes The interval with which Kafka adds an entry to the offset index.
- log
Index IntegerSize Max Bytes The maximum size in bytes of the offset index.
- log
Message BooleanDownconversion Enable This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. .
- log
Message IntegerTimestamp Difference Max Ms The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
- log
Message StringTimestamp Type Define whether the timestamp in the message is message create time or log append time.
- log
Preallocate Boolean Should pre allocate file when create new segment?.
- log
Retention IntegerBytes The maximum size of the log before deleting messages.
- log
Retention IntegerHours The number of hours to keep a log file before deleting it.
- log
Retention IntegerMs The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
- log
Roll IntegerJitter Ms The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
- log
Roll IntegerMs The maximum time before a new log segment is rolled out (in milliseconds).
- log
Segment IntegerBytes The maximum size of a single log file.
- log
Segment IntegerDelete Delay Ms The amount of time to wait before deleting a file from the filesystem.
- max
Connections IntegerPer Ip The maximum number of connections allowed from each ip address (defaults to 2147483647).
- max
Incremental IntegerFetch Session Cache Slots The maximum number of incremental fetch sessions that the broker will maintain.
- message
Max IntegerBytes The maximum size of message that the server can receive.
- min
Insync IntegerReplicas When a producer sets acks to 'all' (or '-1'), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.
- num
Partitions Integer Number of partitions for autocreated topics.
- offsets
Retention IntegerMinutes Log retention window in minutes for offsets topic.
- producer
Purgatory IntegerPurge Interval Requests The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).
- replica
Fetch IntegerMax Bytes The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
- replica
Fetch IntegerResponse Max Bytes Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
- socket
Request IntegerMax Bytes The maximum number of bytes in a socket request (defaults to 104857600).
- transaction
Remove IntegerExpired Transaction Cleanup Interval Ms The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).
- transaction
State IntegerLog Segment Bytes The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).
- auto
Create booleanTopics Enable Enable auto creation of topics.
- compression
Type string Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.
- connections
Max numberIdle Ms Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.
- default
Replication numberFactor Replication factor for autocreated topics.
- group
Initial numberRebalance Delay Ms The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.
- group
Max numberSession Timeout Ms The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
- group
Min numberSession Timeout Ms The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
- log
Cleaner numberDelete Retention Ms How long are delete records retained?.
- log
Cleaner numberMax Compaction Lag Ms The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
- log
Cleaner numberMin Cleanable Ratio Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.
- log
Cleaner numberMin Compaction Lag Ms The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
- log
Cleanup stringPolicy The default cleanup policy for segments beyond the retention window.
- log
Flush numberInterval Messages The number of messages accumulated on a log partition before messages are flushed to disk.
- log
Flush numberInterval Ms The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
- log
Index numberInterval Bytes The interval with which Kafka adds an entry to the offset index.
- log
Index numberSize Max Bytes The maximum size in bytes of the offset index.
- log
Message booleanDownconversion Enable This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. .
- log
Message numberTimestamp Difference Max Ms The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
- log
Message stringTimestamp Type Define whether the timestamp in the message is message create time or log append time.
- log
Preallocate boolean Should pre allocate file when create new segment?.
- log
Retention numberBytes The maximum size of the log before deleting messages.
- log
Retention numberHours The number of hours to keep a log file before deleting it.
- log
Retention numberMs The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
- log
Roll numberJitter Ms The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
- log
Roll numberMs The maximum time before a new log segment is rolled out (in milliseconds).
- log
Segment numberBytes The maximum size of a single log file.
- log
Segment numberDelete Delay Ms The amount of time to wait before deleting a file from the filesystem.
- max
Connections numberPer Ip The maximum number of connections allowed from each ip address (defaults to 2147483647).
- max
Incremental numberFetch Session Cache Slots The maximum number of incremental fetch sessions that the broker will maintain.
- message
Max numberBytes The maximum size of message that the server can receive.
- min
Insync numberReplicas When a producer sets acks to 'all' (or '-1'), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.
- num
Partitions number Number of partitions for autocreated topics.
- offsets
Retention numberMinutes Log retention window in minutes for offsets topic.
- producer
Purgatory numberPurge Interval Requests The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).
- replica
Fetch numberMax Bytes The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
- replica
Fetch numberResponse Max Bytes Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
- socket
Request numberMax Bytes The maximum number of bytes in a socket request (defaults to 104857600).
- transaction
Remove numberExpired Transaction Cleanup Interval Ms The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).
- transaction
State numberLog Segment Bytes The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).
- auto_
create_ booltopics_ enable Enable auto creation of topics.
- compression_
type str Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.
- connections_
max_ intidle_ ms Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.
- default_
replication_ intfactor Replication factor for autocreated topics.
- group_
initial_ intrebalance_ delay_ ms The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.
- group_
max_ intsession_ timeout_ ms The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
- group_
min_ intsession_ timeout_ ms The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
- log_
cleaner_ intdelete_ retention_ ms How long are delete records retained?.
- log_
cleaner_ intmax_ compaction_ lag_ ms The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
- log_
cleaner_ floatmin_ cleanable_ ratio Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.
- log_
cleaner_ intmin_ compaction_ lag_ ms The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
- log_
cleanup_ strpolicy The default cleanup policy for segments beyond the retention window.
- log_
flush_ intinterval_ messages The number of messages accumulated on a log partition before messages are flushed to disk.
- log_
flush_ intinterval_ ms The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
- log_
index_ intinterval_ bytes The interval with which Kafka adds an entry to the offset index.
- log_
index_ intsize_ max_ bytes The maximum size in bytes of the offset index.
- log_
message_ booldownconversion_ enable This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. .
- log_
message_ inttimestamp_ difference_ max_ ms The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
- log_
message_ strtimestamp_ type Define whether the timestamp in the message is message create time or log append time.
- log_
preallocate bool Should pre allocate file when create new segment?.
- log_
retention_ intbytes The maximum size of the log before deleting messages.
- log_
retention_ inthours The number of hours to keep a log file before deleting it.
- log_
retention_ intms The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
- log_
roll_ intjitter_ ms The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
- log_
roll_ intms The maximum time before a new log segment is rolled out (in milliseconds).
- log_
segment_ intbytes The maximum size of a single log file.
- log_
segment_ intdelete_ delay_ ms The amount of time to wait before deleting a file from the filesystem.
- max_
connections_ intper_ ip The maximum number of connections allowed from each ip address (defaults to 2147483647).
- max_
incremental_ intfetch_ session_ cache_ slots The maximum number of incremental fetch sessions that the broker will maintain.
- message_
max_ intbytes The maximum size of message that the server can receive.
- min_
insync_ intreplicas When a producer sets acks to 'all' (or '-1'), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.
- num_
partitions int Number of partitions for autocreated topics.
- offsets_
retention_ intminutes Log retention window in minutes for offsets topic.
- producer_
purgatory_ intpurge_ interval_ requests The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).
- replica_
fetch_ intmax_ bytes The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
- replica_
fetch_ intresponse_ max_ bytes Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
- socket_
request_ intmax_ bytes The maximum number of bytes in a socket request (defaults to 104857600).
- transaction_
remove_ intexpired_ transaction_ cleanup_ interval_ ms The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).
- transaction_
state_ intlog_ segment_ bytes The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).
- auto
Create BooleanTopics Enable Enable auto creation of topics.
- compression
Type String Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.
- connections
Max NumberIdle Ms Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.
- default
Replication NumberFactor Replication factor for autocreated topics.
- group
Initial NumberRebalance Delay Ms The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.
- group
Max NumberSession Timeout Ms The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
- group
Min NumberSession Timeout Ms The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
- log
Cleaner NumberDelete Retention Ms How long are delete records retained?.
- log
Cleaner NumberMax Compaction Lag Ms The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
- log
Cleaner NumberMin Cleanable Ratio Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.
- log
Cleaner NumberMin Compaction Lag Ms The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
- log
Cleanup StringPolicy The default cleanup policy for segments beyond the retention window.
- log
Flush NumberInterval Messages The number of messages accumulated on a log partition before messages are flushed to disk.
- log
Flush NumberInterval Ms The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
- log
Index NumberInterval Bytes The interval with which Kafka adds an entry to the offset index.
- log
Index NumberSize Max Bytes The maximum size in bytes of the offset index.
- log
Message BooleanDownconversion Enable This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. .
- log
Message NumberTimestamp Difference Max Ms The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
- log
Message StringTimestamp Type Define whether the timestamp in the message is message create time or log append time.
- log
Preallocate Boolean Should pre allocate file when create new segment?.
- log
Retention NumberBytes The maximum size of the log before deleting messages.
- log
Retention NumberHours The number of hours to keep a log file before deleting it.
- log
Retention NumberMs The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
- log
Roll NumberJitter Ms The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
- log
Roll NumberMs The maximum time before a new log segment is rolled out (in milliseconds).
- log
Segment NumberBytes The maximum size of a single log file.
- log
Segment NumberDelete Delay Ms The amount of time to wait before deleting a file from the filesystem.
- max
Connections NumberPer Ip The maximum number of connections allowed from each ip address (defaults to 2147483647).
- max
Incremental NumberFetch Session Cache Slots The maximum number of incremental fetch sessions that the broker will maintain.
- message
Max NumberBytes The maximum size of message that the server can receive.
- min
Insync NumberReplicas When a producer sets acks to 'all' (or '-1'), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.
- num
Partitions Number Number of partitions for autocreated topics.
- offsets
Retention NumberMinutes Log retention window in minutes for offsets topic.
- producer
Purgatory NumberPurge Interval Requests The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).
- replica
Fetch NumberMax Bytes The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
- replica
Fetch NumberResponse Max Bytes Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
- socket
Request NumberMax Bytes The maximum number of bytes in a socket request (defaults to 104857600).
- transaction
Remove NumberExpired Transaction Cleanup Interval Ms The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).
- transaction
State NumberLog Segment Bytes The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).
KafkaKafkaUserConfigKafkaAuthenticationMethods, KafkaKafkaUserConfigKafkaAuthenticationMethodsArgs
- Certificate bool
Enable certificate/SSL authentication. The default value is
true
.- Sasl bool
Enable SASL authentication. The default value is
false
.
- Certificate bool
Enable certificate/SSL authentication. The default value is
true
.- Sasl bool
Enable SASL authentication. The default value is
false
.
- certificate Boolean
Enable certificate/SSL authentication. The default value is
true
.- sasl Boolean
Enable SASL authentication. The default value is
false
.
- certificate boolean
Enable certificate/SSL authentication. The default value is
true
.- sasl boolean
Enable SASL authentication. The default value is
false
.
- certificate bool
Enable certificate/SSL authentication. The default value is
true
.- sasl bool
Enable SASL authentication. The default value is
false
.
- certificate Boolean
Enable certificate/SSL authentication. The default value is
true
.- sasl Boolean
Enable SASL authentication. The default value is
false
.
KafkaKafkaUserConfigKafkaConnectConfig, KafkaKafkaUserConfigKafkaConnectConfigArgs
- Connector
Client stringConfig Override Policy Defines what client configurations can be overridden by the connector. Default is None.
- Consumer
Auto stringOffset Reset What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
- Consumer
Fetch intMax Bytes Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
- Consumer
Isolation stringLevel Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.
- Consumer
Max intPartition Fetch Bytes Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .
- Consumer
Max intPoll Interval Ms The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
- Consumer
Max intPoll Records The maximum number of records returned in a single call to poll() (defaults to 500).
- Offset
Flush intInterval Ms The interval at which to try committing offsets for tasks (defaults to 60000).
- Offset
Flush intTimeout Ms Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
- Producer
Batch intSize This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
- Producer
Buffer intMemory The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
- Producer
Compression stringType Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
- Producer
Linger intMs This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.
- Producer
Max intRequest Size This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
- Scheduled
Rebalance intMax Delay Ms The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
- Session
Timeout intMs The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
- Connector
Client stringConfig Override Policy Defines what client configurations can be overridden by the connector. Default is None.
- Consumer
Auto stringOffset Reset What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
- Consumer
Fetch intMax Bytes Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
- Consumer
Isolation stringLevel Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.
- Consumer
Max intPartition Fetch Bytes Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .
- Consumer
Max intPoll Interval Ms The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
- Consumer
Max intPoll Records The maximum number of records returned in a single call to poll() (defaults to 500).
- Offset
Flush intInterval Ms The interval at which to try committing offsets for tasks (defaults to 60000).
- Offset
Flush intTimeout Ms Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
- Producer
Batch intSize This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
- Producer
Buffer intMemory The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
- Producer
Compression stringType Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
- Producer
Linger intMs This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.
- Producer
Max intRequest Size This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
- Scheduled
Rebalance intMax Delay Ms The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
- Session
Timeout intMs The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
- connector
Client StringConfig Override Policy Defines what client configurations can be overridden by the connector. Default is None.
- consumer
Auto StringOffset Reset What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
- consumer
Fetch IntegerMax Bytes Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
- consumer
Isolation StringLevel Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.
- consumer
Max IntegerPartition Fetch Bytes Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .
- consumer
Max IntegerPoll Interval Ms The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
- consumer
Max IntegerPoll Records The maximum number of records returned in a single call to poll() (defaults to 500).
- offset
Flush IntegerInterval Ms The interval at which to try committing offsets for tasks (defaults to 60000).
- offset
Flush IntegerTimeout Ms Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
- producer
Batch IntegerSize This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
- producer
Buffer IntegerMemory The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
- producer
Compression StringType Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
- producer
Linger IntegerMs This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.
- producer
Max IntegerRequest Size This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
- scheduled
Rebalance IntegerMax Delay Ms The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
- session
Timeout IntegerMs The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
- connector
Client stringConfig Override Policy Defines what client configurations can be overridden by the connector. Default is None.
- consumer
Auto stringOffset Reset What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
- consumer
Fetch numberMax Bytes Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
- consumer
Isolation stringLevel Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.
- consumer
Max numberPartition Fetch Bytes Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .
- consumer
Max numberPoll Interval Ms The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
- consumer
Max numberPoll Records The maximum number of records returned in a single call to poll() (defaults to 500).
- offset
Flush numberInterval Ms The interval at which to try committing offsets for tasks (defaults to 60000).
- offset
Flush numberTimeout Ms Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
- producer
Batch numberSize This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
- producer
Buffer numberMemory The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
- producer
Compression stringType Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
- producer
Linger numberMs This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.
- producer
Max numberRequest Size This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
- scheduled
Rebalance numberMax Delay Ms The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
- session
Timeout numberMs The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
- connector_
client_ strconfig_ override_ policy Defines what client configurations can be overridden by the connector. Default is None.
- consumer_
auto_ stroffset_ reset What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
- consumer_
fetch_ intmax_ bytes Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
- consumer_
isolation_ strlevel Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.
- consumer_
max_ intpartition_ fetch_ bytes Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .
- consumer_
max_ intpoll_ interval_ ms The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
- consumer_
max_ intpoll_ records The maximum number of records returned in a single call to poll() (defaults to 500).
- offset_
flush_ intinterval_ ms The interval at which to try committing offsets for tasks (defaults to 60000).
- offset_
flush_ inttimeout_ ms Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
- producer_
batch_ intsize This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
- producer_
buffer_ intmemory The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
- producer_
compression_ strtype Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
- producer_
linger_ intms This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.
- producer_
max_ intrequest_ size This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
- scheduled_
rebalance_ intmax_ delay_ ms The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
- session_
timeout_ intms The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
- connector
Client StringConfig Override Policy Defines what client configurations can be overridden by the connector. Default is None.
- consumer
Auto StringOffset Reset What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
- consumer
Fetch NumberMax Bytes Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
- consumer
Isolation StringLevel Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.
- consumer
Max NumberPartition Fetch Bytes Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .
- consumer
Max NumberPoll Interval Ms The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
- consumer
Max NumberPoll Records The maximum number of records returned in a single call to poll() (defaults to 500).
- offset
Flush NumberInterval Ms The interval at which to try committing offsets for tasks (defaults to 60000).
- offset
Flush NumberTimeout Ms Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
- producer
Batch NumberSize This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
- producer
Buffer NumberMemory The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
- producer
Compression StringType Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
- producer
Linger NumberMs This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.
- producer
Max NumberRequest Size This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
- scheduled
Rebalance NumberMax Delay Ms The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
- session
Timeout NumberMs The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
KafkaKafkaUserConfigKafkaRestConfig, KafkaKafkaUserConfigKafkaRestConfigArgs
- Consumer
Enable boolAuto Commit If true the consumer's offset will be periodically committed to Kafka in the background. The default value is
true
.- Consumer
Request intMax Bytes Maximum number of bytes in unencoded message keys and values by a single request. The default value is
67108864
.- Consumer
Request intTimeout Ms The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is
1000
.- Producer
Acks string The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is
1
.- Producer
Compression stringType Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
- Producer
Linger intMs This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.
- Producer
Max intRequest Size This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
- Simpleconsumer
Pool intSize Max Maximum number of SimpleConsumers that can be instantiated per broker. The default value is
25
.
- Consumer
Enable boolAuto Commit If true the consumer's offset will be periodically committed to Kafka in the background. The default value is
true
.- Consumer
Request intMax Bytes Maximum number of bytes in unencoded message keys and values by a single request. The default value is
67108864
.- Consumer
Request intTimeout Ms The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is
1000
.- Producer
Acks string The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is
1
.- Producer
Compression stringType Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
- Producer
Linger intMs This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.
- Producer
Max intRequest Size This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
- Simpleconsumer
Pool intSize Max Maximum number of SimpleConsumers that can be instantiated per broker. The default value is
25
.
- consumer
Enable BooleanAuto Commit If true the consumer's offset will be periodically committed to Kafka in the background. The default value is
true
.- consumer
Request IntegerMax Bytes Maximum number of bytes in unencoded message keys and values by a single request. The default value is
67108864
.- consumer
Request IntegerTimeout Ms The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is
1000
.- producer
Acks String The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is
1
.- producer
Compression StringType Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
- producer
Linger IntegerMs This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.
- producer
Max IntegerRequest Size This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
- simpleconsumer
Pool IntegerSize Max Maximum number of SimpleConsumers that can be instantiated per broker. The default value is
25
.
- consumer
Enable booleanAuto Commit If true the consumer's offset will be periodically committed to Kafka in the background. The default value is
true
.- consumer
Request numberMax Bytes Maximum number of bytes in unencoded message keys and values by a single request. The default value is
67108864
.- consumer
Request numberTimeout Ms The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is
1000
.- producer
Acks string The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is
1
.- producer
Compression stringType Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
- producer
Linger numberMs This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.
- producer
Max numberRequest Size This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
- simpleconsumer
Pool numberSize Max Maximum number of SimpleConsumers that can be instantiated per broker. The default value is
25
.
- consumer_
enable_ boolauto_ commit If true the consumer's offset will be periodically committed to Kafka in the background. The default value is
true
.- consumer_
request_ intmax_ bytes Maximum number of bytes in unencoded message keys and values by a single request. The default value is
67108864
.- consumer_
request_ inttimeout_ ms The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is
1000
.- producer_
acks str The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is
1
.- producer_
compression_ strtype Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
- producer_
linger_ intms This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.
- producer_
max_ intrequest_ size This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
- simpleconsumer_
pool_ intsize_ max Maximum number of SimpleConsumers that can be instantiated per broker. The default value is
25
.
- consumer
Enable BooleanAuto Commit If true the consumer's offset will be periodically committed to Kafka in the background. The default value is
true
.- consumer
Request NumberMax Bytes Maximum number of bytes in unencoded message keys and values by a single request. The default value is
67108864
.- consumer
Request NumberTimeout Ms The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is
1000
.- producer
Acks String The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is
1
.- producer
Compression StringType Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
- producer
Linger NumberMs This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.
- producer
Max NumberRequest Size This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
- simpleconsumer
Pool NumberSize Max Maximum number of SimpleConsumers that can be instantiated per broker. The default value is
25
.
KafkaKafkaUserConfigPrivateAccess, KafkaKafkaUserConfigPrivateAccessArgs
- Kafka bool
Kafka broker configuration values.
- Kafka
Connect bool Enable Kafka Connect service. The default value is
false
.- Kafka
Rest bool Enable Kafka-REST service. The default value is
false
.- Prometheus bool
Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- Schema
Registry bool Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- Kafka bool
Kafka broker configuration values.
- Kafka
Connect bool Enable Kafka Connect service. The default value is
false
.- Kafka
Rest bool Enable Kafka-REST service. The default value is
false
.- Prometheus bool
Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- Schema
Registry bool Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- kafka Boolean
Kafka broker configuration values.
- kafka
Connect Boolean Enable Kafka Connect service. The default value is
false
.- kafka
Rest Boolean Enable Kafka-REST service. The default value is
false
.- prometheus Boolean
Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- schema
Registry Boolean Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- kafka boolean
Kafka broker configuration values.
- kafka
Connect boolean Enable Kafka Connect service. The default value is
false
.- kafka
Rest boolean Enable Kafka-REST service. The default value is
false
.- prometheus boolean
Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- schema
Registry boolean Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- kafka bool
Kafka broker configuration values.
- kafka_
connect bool Enable Kafka Connect service. The default value is
false
.- kafka_
rest bool Enable Kafka-REST service. The default value is
false
.- prometheus bool
Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- schema_
registry bool Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- kafka Boolean
Kafka broker configuration values.
- kafka
Connect Boolean Enable Kafka Connect service. The default value is
false
.- kafka
Rest Boolean Enable Kafka-REST service. The default value is
false
.- prometheus Boolean
Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- schema
Registry Boolean Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
KafkaKafkaUserConfigPrivatelinkAccess, KafkaKafkaUserConfigPrivatelinkAccessArgs
- Jolokia bool
Enable jolokia.
- Kafka bool
Kafka broker configuration values.
- Kafka
Connect bool Enable Kafka Connect service. The default value is
false
.- Kafka
Rest bool Enable Kafka-REST service. The default value is
false
.- Prometheus bool
Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- Schema
Registry bool Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- Jolokia bool
Enable jolokia.
- Kafka bool
Kafka broker configuration values.
- Kafka
Connect bool Enable Kafka Connect service. The default value is
false
.- Kafka
Rest bool Enable Kafka-REST service. The default value is
false
.- Prometheus bool
Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- Schema
Registry bool Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- jolokia Boolean
Enable jolokia.
- kafka Boolean
Kafka broker configuration values.
- kafka
Connect Boolean Enable Kafka Connect service. The default value is
false
.- kafka
Rest Boolean Enable Kafka-REST service. The default value is
false
.- prometheus Boolean
Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- schema
Registry Boolean Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- jolokia boolean
Enable jolokia.
- kafka boolean
Kafka broker configuration values.
- kafka
Connect boolean Enable Kafka Connect service. The default value is
false
.- kafka
Rest boolean Enable Kafka-REST service. The default value is
false
.- prometheus boolean
Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- schema
Registry boolean Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- jolokia bool
Enable jolokia.
- kafka bool
Kafka broker configuration values.
- kafka_
connect bool Enable Kafka Connect service. The default value is
false
.- kafka_
rest bool Enable Kafka-REST service. The default value is
false
.- prometheus bool
Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- schema_
registry bool Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- jolokia Boolean
Enable jolokia.
- kafka Boolean
Kafka broker configuration values.
- kafka
Connect Boolean Enable Kafka Connect service. The default value is
false
.- kafka
Rest Boolean Enable Kafka-REST service. The default value is
false
.- prometheus Boolean
Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- schema
Registry Boolean Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
KafkaKafkaUserConfigPublicAccess, KafkaKafkaUserConfigPublicAccessArgs
- Kafka bool
Kafka broker configuration values.
- Kafka
Connect bool Enable Kafka Connect service. The default value is
false
.- Kafka
Rest bool Enable Kafka-REST service. The default value is
false
.- Prometheus bool
Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- Schema
Registry bool Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- Kafka bool
Kafka broker configuration values.
- Kafka
Connect bool Enable Kafka Connect service. The default value is
false
.- Kafka
Rest bool Enable Kafka-REST service. The default value is
false
.- Prometheus bool
Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- Schema
Registry bool Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- kafka Boolean
Kafka broker configuration values.
- kafka
Connect Boolean Enable Kafka Connect service. The default value is
false
.- kafka
Rest Boolean Enable Kafka-REST service. The default value is
false
.- prometheus Boolean
Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- schema
Registry Boolean Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- kafka boolean
Kafka broker configuration values.
- kafka
Connect boolean Enable Kafka Connect service. The default value is
false
.- kafka
Rest boolean Enable Kafka-REST service. The default value is
false
.- prometheus boolean
Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- schema
Registry boolean Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- kafka bool
Kafka broker configuration values.
- kafka_
connect bool Enable Kafka Connect service. The default value is
false
.- kafka_
rest bool Enable Kafka-REST service. The default value is
false
.- prometheus bool
Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- schema_
registry bool Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- kafka Boolean
Kafka broker configuration values.
- kafka
Connect Boolean Enable Kafka Connect service. The default value is
false
.- kafka
Rest Boolean Enable Kafka-REST service. The default value is
false
.- prometheus Boolean
Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- schema
Registry Boolean Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
KafkaKafkaUserConfigSchemaRegistryConfig, KafkaKafkaUserConfigSchemaRegistryConfigArgs
- Leader
Eligibility bool If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to
true
.- Topic
Name string The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to
_schemas
.
- Leader
Eligibility bool If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to
true
.- Topic
Name string The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to
_schemas
.
- leader
Eligibility Boolean If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to
true
.- topic
Name String The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to
_schemas
.
- leader
Eligibility boolean If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to
true
.- topic
Name string The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to
_schemas
.
- leader_
eligibility bool If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to
true
.- topic_
name str The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to
_schemas
.
- leader
Eligibility Boolean If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to
true
.- topic
Name String The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to
_schemas
.
KafkaServiceIntegration, KafkaServiceIntegrationArgs
- Integration
Type string Type of the service integration. The only supported value at the moment is
read_replica
- Source
Service stringName Name of the source service
- Integration
Type string Type of the service integration. The only supported value at the moment is
read_replica
- Source
Service stringName Name of the source service
- integration
Type String Type of the service integration. The only supported value at the moment is
read_replica
- source
Service StringName Name of the source service
- integration
Type string Type of the service integration. The only supported value at the moment is
read_replica
- source
Service stringName Name of the source service
- integration_
type str Type of the service integration. The only supported value at the moment is
read_replica
- source_
service_ strname Name of the source service
- integration
Type String Type of the service integration. The only supported value at the moment is
read_replica
- source
Service StringName Name of the source service
KafkaTag, KafkaTagArgs
Import
$ pulumi import aiven:index/kafka:Kafka kafka1 project/service_name
Package Details
- Repository
- Aiven pulumi/pulumi-aiven
- License
- Apache-2.0
- Notes
This Pulumi package is based on the
aiven
Terraform Provider.