Ready to level-up your engineering skills? Join a Pulumi Workshop. Register Now

Kafka

# Kafka Resource

The Kafka resource allows the creation and management of Aiven Kafka services.

Example Usage

using Pulumi;
using Aiven = Pulumi.Aiven;

class MyStack : Stack
{
    public MyStack()
    {
        var kafka1 = new Aiven.Kafka("kafka1", new Aiven.KafkaArgs
        {
            Project = data.Aiven_project.Pr1.Project,
            CloudName = "google-europe-west1",
            Plan = "business-4",
            ServiceName = "my-kafka1",
            MaintenanceWindowDow = "monday",
            MaintenanceWindowTime = "10:00:00",
            KafkaUserConfig = new Aiven.Inputs.KafkaKafkaUserConfigArgs
            {
                KafkaRest = "true",
                KafkaConnect = "true",
                SchemaRegistry = "true",
                KafkaVersion = "2.4",
                Kafka = new Aiven.Inputs.KafkaKafkaUserConfigKafkaArgs
                {
                    GroupMaxSessionTimeoutMs = "70000",
                    LogRetentionBytes = "1000000000",
                },
                PublicAccess = new Aiven.Inputs.KafkaKafkaUserConfigPublicAccessArgs
                {
                    KafkaRest = "true",
                    KafkaConnect = "true",
                },
            },
        });
    }

}
package main

import (
    "github.com/pulumi/pulumi-aiven/sdk/v3/go/aiven"
    "github.com/pulumi/pulumi/sdk/v2/go/pulumi"
)

func main() {
    pulumi.Run(func(ctx *pulumi.Context) error {
        _, err := aiven.NewKafka(ctx, "kafka1", &aiven.KafkaArgs{
            Project:               pulumi.Any(data.Aiven_project.Pr1.Project),
            CloudName:             pulumi.String("google-europe-west1"),
            Plan:                  pulumi.String("business-4"),
            ServiceName:           pulumi.String("my-kafka1"),
            MaintenanceWindowDow:  pulumi.String("monday"),
            MaintenanceWindowTime: pulumi.String("10:00:00"),
            KafkaUserConfig: &aiven.KafkaKafkaUserConfigArgs{
                KafkaRest:      pulumi.String("true"),
                KafkaConnect:   pulumi.String("true"),
                SchemaRegistry: pulumi.String("true"),
                KafkaVersion:   pulumi.String("2.4"),
                Kafka: &aiven.KafkaKafkaUserConfigKafkaArgs{
                    GroupMaxSessionTimeoutMs: pulumi.String("70000"),
                    LogRetentionBytes:        pulumi.String("1000000000"),
                },
                PublicAccess: &aiven.KafkaKafkaUserConfigPublicAccessArgs{
                    KafkaRest:    pulumi.String("true"),
                    KafkaConnect: pulumi.String("true"),
                },
            },
        })
        if err != nil {
            return err
        }
        return nil
    })
}
import pulumi
import pulumi_aiven as aiven

kafka1 = aiven.Kafka("kafka1",
    project=data["aiven_project"]["pr1"]["project"],
    cloud_name="google-europe-west1",
    plan="business-4",
    service_name="my-kafka1",
    maintenance_window_dow="monday",
    maintenance_window_time="10:00:00",
    kafka_user_config=aiven.KafkaKafkaUserConfigArgs(
        kafka_rest="true",
        kafka_connect=True,
        schema_registry="true",
        kafka_version="2.4",
        kafka=aiven.KafkaKafkaUserConfigKafkaArgs(
            group_max_session_timeout_ms="70000",
            log_retention_bytes="1000000000",
        ),
        public_access=aiven.KafkaKafkaUserConfigPublicAccessArgs(
            kafka_rest="true",
            kafka_connect=True,
        ),
    ))
import * as pulumi from "@pulumi/pulumi";
import * as aiven from "@pulumi/aiven";

const kafka1 = new aiven.Kafka("kafka1", {
    project: data.aiven_project.pr1.project,
    cloudName: "google-europe-west1",
    plan: "business-4",
    serviceName: "my-kafka1",
    maintenanceWindowDow: "monday",
    maintenanceWindowTime: "10:00:00",
    kafkaUserConfig: {
        kafkaRest: true,
        kafkaConnect: true,
        schemaRegistry: true,
        kafkaVersion: "2.4",
        kafka: {
            groupMaxSessionTimeoutMs: 70000,
            logRetentionBytes: 1000000000,
        },
        publicAccess: {
            kafkaRest: true,
            kafkaConnect: true,
        },
    },
});

Create a Kafka Resource

new Kafka(name: string, args: KafkaArgs, opts?: CustomResourceOptions);
def Kafka(resource_name: str, opts: Optional[ResourceOptions] = None, cloud_name: Optional[str] = None, default_acl: Optional[bool] = None, kafka: Optional[KafkaKafkaArgs] = None, kafka_user_config: Optional[KafkaKafkaUserConfigArgs] = None, maintenance_window_dow: Optional[str] = None, maintenance_window_time: Optional[str] = None, plan: Optional[str] = None, project: Optional[str] = None, project_vpc_id: Optional[str] = None, service_integrations: Optional[Sequence[KafkaServiceIntegrationArgs]] = None, service_name: Optional[str] = None, termination_protection: Optional[bool] = None)
func NewKafka(ctx *Context, name string, args KafkaArgs, opts ...ResourceOption) (*Kafka, error)
public Kafka(string name, KafkaArgs args, CustomResourceOptions? opts = null)
name string
The unique name of the resource.
args KafkaArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
resource_name str
The unique name of the resource.
opts ResourceOptions
A bag of options that control this resource's behavior.
ctx Context
Context object for the current deployment.
name string
The unique name of the resource.
args KafkaArgs
The arguments to resource properties.
opts ResourceOption
Bag of options to control resource's behavior.
name string
The unique name of the resource.
args KafkaArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.

Kafka Resource Properties

To learn more about resource properties and how to use them, see Inputs and Outputs in the Programming Model docs.

Inputs

The Kafka resource accepts the following input properties:

Project string

identifies the project the service belongs to. To set up proper dependency between the project and the service, refer to the project as shown in the above example. Project cannot be changed later without destroying and re-creating the service.

ServiceName string

specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.

CloudName string

defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (aws, azure, do google, upcloud, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider’s own support articles, like here for Google and here for AWS.

DefaultAcl bool

Create default wildcard Kafka ACL

KafkaServer KafkaKafkaArgs

Enable kafka

KafkaUserConfig KafkaKafkaUserConfigArgs

defines Kafka specific additional configuration options. The following configuration options available:

MaintenanceWindowDow string

day of week when maintenance operations should be performed. On monday, tuesday, wednesday, etc.

MaintenanceWindowTime string

time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.

Plan string

defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The exact options can be seen from the Aiven web console’s Create Service dialog.

ProjectVpcId string

optionally specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference as shown above to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.

ServiceIntegrations List<KafkaServiceIntegrationArgs>

Service integrations to specify when creating a service. Not applied after initial service creation

TerminationProtection bool

prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.

Project string

identifies the project the service belongs to. To set up proper dependency between the project and the service, refer to the project as shown in the above example. Project cannot be changed later without destroying and re-creating the service.

ServiceName string

specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.

CloudName string

defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (aws, azure, do google, upcloud, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider’s own support articles, like here for Google and here for AWS.

DefaultAcl bool

Create default wildcard Kafka ACL

Kafka KafkaKafka

Enable kafka

KafkaUserConfig KafkaKafkaUserConfig

defines Kafka specific additional configuration options. The following configuration options available:

MaintenanceWindowDow string

day of week when maintenance operations should be performed. On monday, tuesday, wednesday, etc.

MaintenanceWindowTime string

time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.

Plan string

defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The exact options can be seen from the Aiven web console’s Create Service dialog.

ProjectVpcId string

optionally specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference as shown above to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.

ServiceIntegrations []KafkaServiceIntegration

Service integrations to specify when creating a service. Not applied after initial service creation

TerminationProtection bool

prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.

project string

identifies the project the service belongs to. To set up proper dependency between the project and the service, refer to the project as shown in the above example. Project cannot be changed later without destroying and re-creating the service.

serviceName string

specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.

cloudName string

defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (aws, azure, do google, upcloud, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider’s own support articles, like here for Google and here for AWS.

defaultAcl boolean

Create default wildcard Kafka ACL

kafka KafkaKafka

Enable kafka

kafkaUserConfig KafkaKafkaUserConfig

defines Kafka specific additional configuration options. The following configuration options available:

maintenanceWindowDow string

day of week when maintenance operations should be performed. On monday, tuesday, wednesday, etc.

maintenanceWindowTime string

time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.

plan string

defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The exact options can be seen from the Aiven web console’s Create Service dialog.

projectVpcId string

optionally specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference as shown above to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.

serviceIntegrations KafkaServiceIntegration[]

Service integrations to specify when creating a service. Not applied after initial service creation

terminationProtection boolean

prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.

project str

identifies the project the service belongs to. To set up proper dependency between the project and the service, refer to the project as shown in the above example. Project cannot be changed later without destroying and re-creating the service.

service_name str

specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.

cloud_name str

defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (aws, azure, do google, upcloud, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider’s own support articles, like here for Google and here for AWS.

default_acl bool

Create default wildcard Kafka ACL

kafka KafkaKafkaArgs

Enable kafka

kafka_user_config KafkaKafkaUserConfigArgs

defines Kafka specific additional configuration options. The following configuration options available:

maintenance_window_dow str

day of week when maintenance operations should be performed. On monday, tuesday, wednesday, etc.

maintenance_window_time str

time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.

plan str

defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The exact options can be seen from the Aiven web console’s Create Service dialog.

project_vpc_id str

optionally specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference as shown above to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.

service_integrations Sequence[KafkaServiceIntegrationArgs]

Service integrations to specify when creating a service. Not applied after initial service creation

termination_protection bool

prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.

Outputs

All input properties are implicitly available as output properties. Additionally, the Kafka resource produces the following output properties:

Components List<KafkaComponent>

Service component information objects

Id string
The provider-assigned unique ID for this managed resource.
ServiceHost string

Kafka hostname.

ServicePassword string

Password used for connecting to the Kafka service, if applicable.

ServicePort int

Kafka port.

ServiceType string

Aiven internal service type code

ServiceUri string

URI for connecting to the Kafka service.

ServiceUsername string

Username used for connecting to the Kafka service, if applicable.

State string

Service state.

Components []KafkaComponent

Service component information objects

Id string
The provider-assigned unique ID for this managed resource.
ServiceHost string

Kafka hostname.

ServicePassword string

Password used for connecting to the Kafka service, if applicable.

ServicePort int

Kafka port.

ServiceType string

Aiven internal service type code

ServiceUri string

URI for connecting to the Kafka service.

ServiceUsername string

Username used for connecting to the Kafka service, if applicable.

State string

Service state.

components KafkaComponent[]

Service component information objects

id string
The provider-assigned unique ID for this managed resource.
serviceHost string

Kafka hostname.

servicePassword string

Password used for connecting to the Kafka service, if applicable.

servicePort number

Kafka port.

serviceType string

Aiven internal service type code

serviceUri string

URI for connecting to the Kafka service.

serviceUsername string

Username used for connecting to the Kafka service, if applicable.

state string

Service state.

components Sequence[KafkaComponent]

Service component information objects

id str
The provider-assigned unique ID for this managed resource.
service_host str

Kafka hostname.

service_password str

Password used for connecting to the Kafka service, if applicable.

service_port int

Kafka port.

service_type str

Aiven internal service type code

service_uri str

URI for connecting to the Kafka service.

service_username str

Username used for connecting to the Kafka service, if applicable.

state str

Service state.

Look up an Existing Kafka Resource

Get an existing Kafka resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

public static get(name: string, id: Input<ID>, state?: KafkaState, opts?: CustomResourceOptions): Kafka
@staticmethod
def get(resource_name: str, id: str, opts: Optional[ResourceOptions] = None, cloud_name: Optional[str] = None, components: Optional[Sequence[KafkaComponentArgs]] = None, default_acl: Optional[bool] = None, kafka: Optional[KafkaKafkaArgs] = None, kafka_user_config: Optional[KafkaKafkaUserConfigArgs] = None, maintenance_window_dow: Optional[str] = None, maintenance_window_time: Optional[str] = None, plan: Optional[str] = None, project: Optional[str] = None, project_vpc_id: Optional[str] = None, service_host: Optional[str] = None, service_integrations: Optional[Sequence[KafkaServiceIntegrationArgs]] = None, service_name: Optional[str] = None, service_password: Optional[str] = None, service_port: Optional[int] = None, service_type: Optional[str] = None, service_uri: Optional[str] = None, service_username: Optional[str] = None, state: Optional[str] = None, termination_protection: Optional[bool] = None) -> Kafka
func GetKafka(ctx *Context, name string, id IDInput, state *KafkaState, opts ...ResourceOption) (*Kafka, error)
public static Kafka Get(string name, Input<string> id, KafkaState? state, CustomResourceOptions? opts = null)
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
resource_name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name
The unique name of the resulting resource.
id
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.

The following state arguments are supported:

CloudName string

defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (aws, azure, do google, upcloud, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider’s own support articles, like here for Google and here for AWS.

Components List<KafkaComponentArgs>

Service component information objects

DefaultAcl bool

Create default wildcard Kafka ACL

KafkaServer KafkaKafkaArgs

Enable kafka

KafkaUserConfig KafkaKafkaUserConfigArgs

defines Kafka specific additional configuration options. The following configuration options available:

MaintenanceWindowDow string

day of week when maintenance operations should be performed. On monday, tuesday, wednesday, etc.

MaintenanceWindowTime string

time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.

Plan string

defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The exact options can be seen from the Aiven web console’s Create Service dialog.

Project string

identifies the project the service belongs to. To set up proper dependency between the project and the service, refer to the project as shown in the above example. Project cannot be changed later without destroying and re-creating the service.

ProjectVpcId string

optionally specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference as shown above to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.

ServiceHost string

Kafka hostname.

ServiceIntegrations List<KafkaServiceIntegrationArgs>

Service integrations to specify when creating a service. Not applied after initial service creation

ServiceName string

specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.

ServicePassword string

Password used for connecting to the Kafka service, if applicable.

ServicePort int

Kafka port.

ServiceType string

Aiven internal service type code

ServiceUri string

URI for connecting to the Kafka service.

ServiceUsername string

Username used for connecting to the Kafka service, if applicable.

State string

Service state.

TerminationProtection bool

prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.

CloudName string

defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (aws, azure, do google, upcloud, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider’s own support articles, like here for Google and here for AWS.

Components []KafkaComponent

Service component information objects

DefaultAcl bool

Create default wildcard Kafka ACL

Kafka KafkaKafka

Enable kafka

KafkaUserConfig KafkaKafkaUserConfig

defines Kafka specific additional configuration options. The following configuration options available:

MaintenanceWindowDow string

day of week when maintenance operations should be performed. On monday, tuesday, wednesday, etc.

MaintenanceWindowTime string

time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.

Plan string

defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The exact options can be seen from the Aiven web console’s Create Service dialog.

Project string

identifies the project the service belongs to. To set up proper dependency between the project and the service, refer to the project as shown in the above example. Project cannot be changed later without destroying and re-creating the service.

ProjectVpcId string

optionally specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference as shown above to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.

ServiceHost string

Kafka hostname.

ServiceIntegrations []KafkaServiceIntegration

Service integrations to specify when creating a service. Not applied after initial service creation

ServiceName string

specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.

ServicePassword string

Password used for connecting to the Kafka service, if applicable.

ServicePort int

Kafka port.

ServiceType string

Aiven internal service type code

ServiceUri string

URI for connecting to the Kafka service.

ServiceUsername string

Username used for connecting to the Kafka service, if applicable.

State string

Service state.

TerminationProtection bool

prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.

cloudName string

defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (aws, azure, do google, upcloud, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider’s own support articles, like here for Google and here for AWS.

components KafkaComponent[]

Service component information objects

defaultAcl boolean

Create default wildcard Kafka ACL

kafka KafkaKafka

Enable kafka

kafkaUserConfig KafkaKafkaUserConfig

defines Kafka specific additional configuration options. The following configuration options available:

maintenanceWindowDow string

day of week when maintenance operations should be performed. On monday, tuesday, wednesday, etc.

maintenanceWindowTime string

time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.

plan string

defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The exact options can be seen from the Aiven web console’s Create Service dialog.

project string

identifies the project the service belongs to. To set up proper dependency between the project and the service, refer to the project as shown in the above example. Project cannot be changed later without destroying and re-creating the service.

projectVpcId string

optionally specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference as shown above to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.

serviceHost string

Kafka hostname.

serviceIntegrations KafkaServiceIntegration[]

Service integrations to specify when creating a service. Not applied after initial service creation

serviceName string

specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.

servicePassword string

Password used for connecting to the Kafka service, if applicable.

servicePort number

Kafka port.

serviceType string

Aiven internal service type code

serviceUri string

URI for connecting to the Kafka service.

serviceUsername string

Username used for connecting to the Kafka service, if applicable.

state string

Service state.

terminationProtection boolean

prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.

cloud_name str

defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (aws, azure, do google, upcloud, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider’s own support articles, like here for Google and here for AWS.

components Sequence[KafkaComponentArgs]

Service component information objects

default_acl bool

Create default wildcard Kafka ACL

kafka KafkaKafkaArgs

Enable kafka

kafka_user_config KafkaKafkaUserConfigArgs

defines Kafka specific additional configuration options. The following configuration options available:

maintenance_window_dow str

day of week when maintenance operations should be performed. On monday, tuesday, wednesday, etc.

maintenance_window_time str

time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.

plan str

defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The exact options can be seen from the Aiven web console’s Create Service dialog.

project str

identifies the project the service belongs to. To set up proper dependency between the project and the service, refer to the project as shown in the above example. Project cannot be changed later without destroying and re-creating the service.

project_vpc_id str

optionally specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference as shown above to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.

service_host str

Kafka hostname.

service_integrations Sequence[KafkaServiceIntegrationArgs]

Service integrations to specify when creating a service. Not applied after initial service creation

service_name str

specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.

service_password str

Password used for connecting to the Kafka service, if applicable.

service_port int

Kafka port.

service_type str

Aiven internal service type code

service_uri str

URI for connecting to the Kafka service.

service_username str

Username used for connecting to the Kafka service, if applicable.

state str

Service state.

termination_protection bool

prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.

Supporting Types

KafkaComponent

Component string
Host string
KafkaAuthenticationMethod string
Port int
Route string
Ssl bool
Usage string
Component string
Host string
KafkaAuthenticationMethod string
Port int
Route string
Ssl bool
Usage string
component string
host string
kafkaAuthenticationMethod string
port number
route string
ssl boolean
usage string

KafkaKafka

AccessCert string

The Kafka client certificate

AccessKey string

The Kafka client certificate key

ConnectUri string

The Kafka Connect URI, if any

RestUri string

The Kafka REST URI, if any

SchemaRegistryUri string

The Schema Registry URI, if any

AccessCert string

The Kafka client certificate

AccessKey string

The Kafka client certificate key

ConnectUri string

The Kafka Connect URI, if any

RestUri string

The Kafka REST URI, if any

SchemaRegistryUri string

The Schema Registry URI, if any

accessCert string

The Kafka client certificate

accessKey string

The Kafka client certificate key

connectUri string

The Kafka Connect URI, if any

restUri string

The Kafka REST URI, if any

schemaRegistryUri string

The Schema Registry URI, if any

access_cert str

The Kafka client certificate

access_key str

The Kafka client certificate key

connect_uri str

The Kafka Connect URI, if any

rest_uri str

The Kafka REST URI, if any

schema_registry_uri str

The Schema Registry URI, if any

KafkaKafkaUserConfig

CustomDomain string

Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.

IpFilters List<string>

Allow incoming connections from CIDR address block, e.g. ‘10.20.0.0/16’.

Kafka KafkaKafkaUserConfigKafkaArgs

Enable kafka

KafkaAuthenticationMethods KafkaKafkaUserConfigKafkaAuthenticationMethodsArgs

Kafka authentication methods

KafkaConnect string

Enable kafka_connect

KafkaConnectConfig KafkaKafkaUserConfigKafkaConnectConfigArgs

Kafka Connect configuration values

KafkaRest string

Enable kafka_rest

KafkaRestConfig KafkaKafkaUserConfigKafkaRestConfigArgs

Kafka-REST configuration

KafkaVersion string

Kafka major version

PrivateAccess KafkaKafkaUserConfigPrivateAccessArgs

Allow access to selected service ports from private networks

PrivatelinkAccess KafkaKafkaUserConfigPrivatelinkAccessArgs

Allow access to selected service components through Privatelink

PublicAccess KafkaKafkaUserConfigPublicAccessArgs

Allow access to selected service ports from the public Internet

SchemaRegistry string

Enable Schema-Registry service

SchemaRegistryConfig KafkaKafkaUserConfigSchemaRegistryConfigArgs

Schema Registry configuration

CustomDomain string

Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.

IpFilters []string

Allow incoming connections from CIDR address block, e.g. ‘10.20.0.0/16’.

Kafka KafkaKafkaUserConfigKafka

Enable kafka

KafkaAuthenticationMethods KafkaKafkaUserConfigKafkaAuthenticationMethods

Kafka authentication methods

KafkaConnect string

Enable kafka_connect

KafkaConnectConfig KafkaKafkaUserConfigKafkaConnectConfig

Kafka Connect configuration values

KafkaRest string

Enable kafka_rest

KafkaRestConfig KafkaKafkaUserConfigKafkaRestConfig

Kafka-REST configuration

KafkaVersion string

Kafka major version

PrivateAccess KafkaKafkaUserConfigPrivateAccess

Allow access to selected service ports from private networks

PrivatelinkAccess KafkaKafkaUserConfigPrivatelinkAccess

Allow access to selected service components through Privatelink

PublicAccess KafkaKafkaUserConfigPublicAccess

Allow access to selected service ports from the public Internet

SchemaRegistry string

Enable Schema-Registry service

SchemaRegistryConfig KafkaKafkaUserConfigSchemaRegistryConfig

Schema Registry configuration

customDomain string

Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.

ipFilters string[]

Allow incoming connections from CIDR address block, e.g. ‘10.20.0.0/16’.

kafka KafkaKafkaUserConfigKafka

Enable kafka

kafkaAuthenticationMethods KafkaKafkaUserConfigKafkaAuthenticationMethods

Kafka authentication methods

kafkaConnect string

Enable kafka_connect

kafkaConnectConfig KafkaKafkaUserConfigKafkaConnectConfig

Kafka Connect configuration values

kafkaRest string

Enable kafka_rest

kafkaRestConfig KafkaKafkaUserConfigKafkaRestConfig

Kafka-REST configuration

kafkaVersion string

Kafka major version

privateAccess KafkaKafkaUserConfigPrivateAccess

Allow access to selected service ports from private networks

privatelinkAccess KafkaKafkaUserConfigPrivatelinkAccess

Allow access to selected service components through Privatelink

publicAccess KafkaKafkaUserConfigPublicAccess

Allow access to selected service ports from the public Internet

schemaRegistry string

Enable Schema-Registry service

schemaRegistryConfig KafkaKafkaUserConfigSchemaRegistryConfig

Schema Registry configuration

custom_domain str

Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.

ip_filters Sequence[str]

Allow incoming connections from CIDR address block, e.g. ‘10.20.0.0/16’.

kafka KafkaKafkaUserConfigKafkaArgs

Enable kafka

kafka_authentication_methods KafkaKafkaUserConfigKafkaAuthenticationMethodsArgs

Kafka authentication methods

kafka_connect str

Enable kafka_connect

kafka_connect_config KafkaKafkaUserConfigKafkaConnectConfigArgs

Kafka Connect configuration values

kafka_rest str

Enable kafka_rest

kafka_rest_config KafkaKafkaUserConfigKafkaRestConfigArgs

Kafka-REST configuration

kafka_version str

Kafka major version

private_access KafkaKafkaUserConfigPrivateAccessArgs

Allow access to selected service ports from private networks

privatelink_access KafkaKafkaUserConfigPrivatelinkAccessArgs

Allow access to selected service components through Privatelink

public_access KafkaKafkaUserConfigPublicAccessArgs

Allow access to selected service ports from the public Internet

schema_registry str

Enable Schema-Registry service

schema_registry_config KafkaKafkaUserConfigSchemaRegistryConfigArgs

Schema Registry configuration

KafkaKafkaUserConfigKafka

AutoCreateTopicsEnable string

Enable auto creation of topics

CompressionType string

Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (‘gzip’, ‘snappy’, ‘lz4’, ‘zstd’). It additionally accepts ‘uncompressed’ which is equivalent to no compression; and ‘producer’ which means retain the original compression codec set by the producer.

ConnectionsMaxIdleMs string

Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.

DefaultReplicationFactor string

Replication factor for autocreated topics

GroupMaxSessionTimeoutMs string

The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.

GroupMinSessionTimeoutMs string

The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.

LogCleanerDeleteRetentionMs string
LogCleanerMaxCompactionLagMs string

The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted

LogCleanerMinCleanableRatio string

Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.

LogCleanerMinCompactionLagMs string

The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.

LogCleanupPolicy string

The default cleanup policy for segments beyond the retention window.

LogFlushIntervalMessages string

The number of messages accumulated on a log partition before messages are flushed to disk.

LogFlushIntervalMs string

The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.

LogIndexIntervalBytes string

The interval with which Kafka adds an entry to the offset index.

LogIndexSizeMaxBytes string

The maximum size in bytes of the offset index.

LogMessageDownconversionEnable string

This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.

LogMessageTimestampDifferenceMaxMs string

The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message

LogMessageTimestampType string

Define whether the timestamp in the message is message create time or log append time.

LogPreallocate string

Should pre allocate file when create new segment?

LogRetentionBytes string

The maximum size of the log before deleting messages

LogRetentionHours string

The number of hours to keep a log file before deleting it.

LogRetentionMs string

The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.

LogRollJitterMs string

The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.

LogRollMs string

The maximum time before a new log segment is rolled out (in milliseconds).

LogSegmentBytes string

The maximum size of a single log file

LogSegmentDeleteDelayMs string

The amount of time to wait before deleting a file from the filesystem.

MaxConnectionsPerIp string

The maximum number of connections allowed from each ip address (defaults to 2147483647).

MaxIncrementalFetchSessionCacheSlots string

The maximum number of incremental fetch sessions that the broker will maintain.

MessageMaxBytes string

The maximum size of message that the server can receive.

MinInsyncReplicas string

When a producer sets acks to ‘all’ (or ‘-1’), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.

NumPartitions string

Number of partitions for autocreated topics

OffsetsRetentionMinutes string

Log retention window in minutes for offsets topic.

ProducerPurgatoryPurgeIntervalRequests string

The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).

ReplicaFetchMaxBytes string

The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.

ReplicaFetchResponseMaxBytes string

Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.

SocketRequestMaxBytes string

The maximum number of bytes in a socket request (defaults to 104857600).

TransactionRemoveExpiredTransactionCleanupIntervalMs string

The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).

TransactionStateLogSegmentBytes string

The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).

AutoCreateTopicsEnable string

Enable auto creation of topics

CompressionType string

Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (‘gzip’, ‘snappy’, ‘lz4’, ‘zstd’). It additionally accepts ‘uncompressed’ which is equivalent to no compression; and ‘producer’ which means retain the original compression codec set by the producer.

ConnectionsMaxIdleMs string

Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.

DefaultReplicationFactor string

Replication factor for autocreated topics

GroupMaxSessionTimeoutMs string

The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.

GroupMinSessionTimeoutMs string

The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.

LogCleanerDeleteRetentionMs string
LogCleanerMaxCompactionLagMs string

The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted

LogCleanerMinCleanableRatio string

Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.

LogCleanerMinCompactionLagMs string

The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.

LogCleanupPolicy string

The default cleanup policy for segments beyond the retention window.

LogFlushIntervalMessages string

The number of messages accumulated on a log partition before messages are flushed to disk.

LogFlushIntervalMs string

The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.

LogIndexIntervalBytes string

The interval with which Kafka adds an entry to the offset index.

LogIndexSizeMaxBytes string

The maximum size in bytes of the offset index.

LogMessageDownconversionEnable string

This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.

LogMessageTimestampDifferenceMaxMs string

The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message

LogMessageTimestampType string

Define whether the timestamp in the message is message create time or log append time.

LogPreallocate string

Should pre allocate file when create new segment?

LogRetentionBytes string

The maximum size of the log before deleting messages

LogRetentionHours string

The number of hours to keep a log file before deleting it.

LogRetentionMs string

The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.

LogRollJitterMs string

The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.

LogRollMs string

The maximum time before a new log segment is rolled out (in milliseconds).

LogSegmentBytes string

The maximum size of a single log file

LogSegmentDeleteDelayMs string

The amount of time to wait before deleting a file from the filesystem.

MaxConnectionsPerIp string

The maximum number of connections allowed from each ip address (defaults to 2147483647).

MaxIncrementalFetchSessionCacheSlots string

The maximum number of incremental fetch sessions that the broker will maintain.

MessageMaxBytes string

The maximum size of message that the server can receive.

MinInsyncReplicas string

When a producer sets acks to ‘all’ (or ‘-1’), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.

NumPartitions string

Number of partitions for autocreated topics

OffsetsRetentionMinutes string

Log retention window in minutes for offsets topic.

ProducerPurgatoryPurgeIntervalRequests string

The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).

ReplicaFetchMaxBytes string

The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.

ReplicaFetchResponseMaxBytes string

Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.

SocketRequestMaxBytes string

The maximum number of bytes in a socket request (defaults to 104857600).

TransactionRemoveExpiredTransactionCleanupIntervalMs string

The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).

TransactionStateLogSegmentBytes string

The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).

autoCreateTopicsEnable string

Enable auto creation of topics

compressionType string

Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (‘gzip’, ‘snappy’, ‘lz4’, ‘zstd’). It additionally accepts ‘uncompressed’ which is equivalent to no compression; and ‘producer’ which means retain the original compression codec set by the producer.

connectionsMaxIdleMs string

Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.

defaultReplicationFactor string

Replication factor for autocreated topics

groupMaxSessionTimeoutMs string

The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.

groupMinSessionTimeoutMs string

The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.

logCleanerDeleteRetentionMs string
logCleanerMaxCompactionLagMs string

The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted

logCleanerMinCleanableRatio string

Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.

logCleanerMinCompactionLagMs string

The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.

logCleanupPolicy string

The default cleanup policy for segments beyond the retention window.

logFlushIntervalMessages string

The number of messages accumulated on a log partition before messages are flushed to disk.

logFlushIntervalMs string

The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.

logIndexIntervalBytes string

The interval with which Kafka adds an entry to the offset index.

logIndexSizeMaxBytes string

The maximum size in bytes of the offset index.

logMessageDownconversionEnable string

This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.

logMessageTimestampDifferenceMaxMs string

The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message

logMessageTimestampType string

Define whether the timestamp in the message is message create time or log append time.

logPreallocate string

Should pre allocate file when create new segment?

logRetentionBytes string

The maximum size of the log before deleting messages

logRetentionHours string

The number of hours to keep a log file before deleting it.

logRetentionMs string

The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.

logRollJitterMs string

The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.

logRollMs string

The maximum time before a new log segment is rolled out (in milliseconds).

logSegmentBytes string

The maximum size of a single log file

logSegmentDeleteDelayMs string

The amount of time to wait before deleting a file from the filesystem.

maxConnectionsPerIp string

The maximum number of connections allowed from each ip address (defaults to 2147483647).

maxIncrementalFetchSessionCacheSlots string

The maximum number of incremental fetch sessions that the broker will maintain.

messageMaxBytes string

The maximum size of message that the server can receive.

minInsyncReplicas string

When a producer sets acks to ‘all’ (or ‘-1’), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.

numPartitions string

Number of partitions for autocreated topics

offsetsRetentionMinutes string

Log retention window in minutes for offsets topic.

producerPurgatoryPurgeIntervalRequests string

The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).

replicaFetchMaxBytes string

The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.

replicaFetchResponseMaxBytes string

Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.

socketRequestMaxBytes string

The maximum number of bytes in a socket request (defaults to 104857600).

transactionRemoveExpiredTransactionCleanupIntervalMs string

The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).

transactionStateLogSegmentBytes string

The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).

auto_create_topics_enable str

Enable auto creation of topics

compression_type str

Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (‘gzip’, ‘snappy’, ‘lz4’, ‘zstd’). It additionally accepts ‘uncompressed’ which is equivalent to no compression; and ‘producer’ which means retain the original compression codec set by the producer.

connections_max_idle_ms str

Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.

default_replication_factor str

Replication factor for autocreated topics

group_max_session_timeout_ms str

The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.

group_min_session_timeout_ms str

The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.

log_cleaner_delete_retention_ms str
log_cleaner_max_compaction_lag_ms str

The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted

log_cleaner_min_cleanable_ratio str

Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.

log_cleaner_min_compaction_lag_ms str

The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.

log_cleanup_policy str

The default cleanup policy for segments beyond the retention window.

log_flush_interval_messages str

The number of messages accumulated on a log partition before messages are flushed to disk.

log_flush_interval_ms str

The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.

log_index_interval_bytes str

The interval with which Kafka adds an entry to the offset index.

log_index_size_max_bytes str

The maximum size in bytes of the offset index.

log_message_downconversion_enable str

This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.

log_message_timestamp_difference_max_ms str

The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message

log_message_timestamp_type str

Define whether the timestamp in the message is message create time or log append time.

log_preallocate str

Should pre allocate file when create new segment?

log_retention_bytes str

The maximum size of the log before deleting messages

log_retention_hours str

The number of hours to keep a log file before deleting it.

log_retention_ms str

The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.

log_roll_jitter_ms str

The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.

log_roll_ms str

The maximum time before a new log segment is rolled out (in milliseconds).

log_segment_bytes str

The maximum size of a single log file

log_segment_delete_delay_ms str

The amount of time to wait before deleting a file from the filesystem.

max_connections_per_ip str

The maximum number of connections allowed from each ip address (defaults to 2147483647).

max_incremental_fetch_session_cache_slots str

The maximum number of incremental fetch sessions that the broker will maintain.

message_max_bytes str

The maximum size of message that the server can receive.

min_insync_replicas str

When a producer sets acks to ‘all’ (or ‘-1’), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.

num_partitions str

Number of partitions for autocreated topics

offsets_retention_minutes str

Log retention window in minutes for offsets topic.

producer_purgatory_purge_interval_requests str

The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).

replica_fetch_max_bytes str

The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.

replica_fetch_response_max_bytes str

Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.

socket_request_max_bytes str

The maximum number of bytes in a socket request (defaults to 104857600).

transaction_remove_expired_transaction_cleanup_interval_ms str

The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).

transaction_state_log_segment_bytes str

The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).

KafkaKafkaUserConfigKafkaAuthenticationMethods

Certificate string

Enable certificate/SSL authentication

Sasl string

Enable SASL authentication

Certificate string

Enable certificate/SSL authentication

Sasl string

Enable SASL authentication

certificate string

Enable certificate/SSL authentication

sasl string

Enable SASL authentication

certificate str

Enable certificate/SSL authentication

sasl str

Enable SASL authentication

KafkaKafkaUserConfigKafkaConnectConfig

ConnectorClientConfigOverridePolicy string

Defines what client configurations can be overridden by the connector. Default is None

ConsumerAutoOffsetReset string

What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.

ConsumerFetchMaxBytes string

Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.

ConsumerIsolationLevel string

Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.

ConsumerMaxPartitionFetchBytes string

Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.

ConsumerMaxPollIntervalMs string

The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).

ConsumerMaxPollRecords string

The maximum number of records returned in a single call to poll() (defaults to 500).

OffsetFlushIntervalMs string

The interval at which to try committing offsets for tasks (defaults to 60000).

OffsetFlushTimeoutMs string

Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).

ProducerMaxRequestSize string

This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.

SessionTimeoutMs string

The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).

ConnectorClientConfigOverridePolicy string

Defines what client configurations can be overridden by the connector. Default is None

ConsumerAutoOffsetReset string

What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.

ConsumerFetchMaxBytes string

Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.

ConsumerIsolationLevel string

Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.

ConsumerMaxPartitionFetchBytes string

Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.

ConsumerMaxPollIntervalMs string

The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).

ConsumerMaxPollRecords string

The maximum number of records returned in a single call to poll() (defaults to 500).

OffsetFlushIntervalMs string

The interval at which to try committing offsets for tasks (defaults to 60000).

OffsetFlushTimeoutMs string

Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).

ProducerMaxRequestSize string

This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.

SessionTimeoutMs string

The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).

connectorClientConfigOverridePolicy string

Defines what client configurations can be overridden by the connector. Default is None

consumerAutoOffsetReset string

What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.

consumerFetchMaxBytes string

Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.

consumerIsolationLevel string

Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.

consumerMaxPartitionFetchBytes string

Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.

consumerMaxPollIntervalMs string

The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).

consumerMaxPollRecords string

The maximum number of records returned in a single call to poll() (defaults to 500).

offsetFlushIntervalMs string

The interval at which to try committing offsets for tasks (defaults to 60000).

offsetFlushTimeoutMs string

Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).

producerMaxRequestSize string

This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.

sessionTimeoutMs string

The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).

connector_client_config_override_policy str

Defines what client configurations can be overridden by the connector. Default is None

consumer_auto_offset_reset str

What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.

consumer_fetch_max_bytes str

Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.

consumer_isolation_level str

Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.

consumer_max_partition_fetch_bytes str

Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.

consumer_max_poll_interval_ms str

The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).

consumer_max_poll_records str

The maximum number of records returned in a single call to poll() (defaults to 500).

offset_flush_interval_ms str

The interval at which to try committing offsets for tasks (defaults to 60000).

offset_flush_timeout_ms str

Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).

producer_max_request_size str

This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.

session_timeout_ms str

The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).

KafkaKafkaUserConfigKafkaRestConfig

ConsumerEnableAutoCommit string

If true the consumer’s offset will be periodically committed to Kafka in the background

ConsumerRequestMaxBytes string

Maximum number of bytes in unencoded message keys and values by a single request

ConsumerRequestTimeoutMs string

The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached

ProducerAcks string

The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to ‘all’ or ‘-1’, the leader will wait for the full set of in-sync replicas to acknowledge the record.

ProducerLingerMs string

Wait for up to the given delay to allow batching records together

SimpleconsumerPoolSizeMax string

Maximum number of SimpleConsumers that can be instantiated per broker.

ConsumerEnableAutoCommit string

If true the consumer’s offset will be periodically committed to Kafka in the background

ConsumerRequestMaxBytes string

Maximum number of bytes in unencoded message keys and values by a single request

ConsumerRequestTimeoutMs string

The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached

ProducerAcks string

The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to ‘all’ or ‘-1’, the leader will wait for the full set of in-sync replicas to acknowledge the record.

ProducerLingerMs string

Wait for up to the given delay to allow batching records together

SimpleconsumerPoolSizeMax string

Maximum number of SimpleConsumers that can be instantiated per broker.

consumerEnableAutoCommit string

If true the consumer’s offset will be periodically committed to Kafka in the background

consumerRequestMaxBytes string

Maximum number of bytes in unencoded message keys and values by a single request

consumerRequestTimeoutMs string

The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached

producerAcks string

The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to ‘all’ or ‘-1’, the leader will wait for the full set of in-sync replicas to acknowledge the record.

producerLingerMs string

Wait for up to the given delay to allow batching records together

simpleconsumerPoolSizeMax string

Maximum number of SimpleConsumers that can be instantiated per broker.

consumer_enable_auto_commit str

If true the consumer’s offset will be periodically committed to Kafka in the background

consumer_request_max_bytes str

Maximum number of bytes in unencoded message keys and values by a single request

consumer_request_timeout_ms str

The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached

producer_acks str

The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to ‘all’ or ‘-1’, the leader will wait for the full set of in-sync replicas to acknowledge the record.

producer_linger_ms str

Wait for up to the given delay to allow batching records together

simpleconsumer_pool_size_max str

Maximum number of SimpleConsumers that can be instantiated per broker.

KafkaKafkaUserConfigPrivateAccess

Prometheus string

Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network

Prometheus string

Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network

prometheus string

Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network

prometheus str

Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network

KafkaKafkaUserConfigPrivatelinkAccess

Kafka string

Enable kafka

KafkaConnect string

Enable kafka_connect

KafkaRest string

Enable kafka_rest

SchemaRegistry string

Enable Schema-Registry service

Kafka string

Enable kafka

KafkaConnect string

Enable kafka_connect

KafkaRest string

Enable kafka_rest

SchemaRegistry string

Enable Schema-Registry service

kafka string

Enable kafka

kafkaConnect string

Enable kafka_connect

kafkaRest string

Enable kafka_rest

schemaRegistry string

Enable Schema-Registry service

kafka str

Enable kafka

kafka_connect str

Enable kafka_connect

kafka_rest str

Enable kafka_rest

schema_registry str

Enable Schema-Registry service

KafkaKafkaUserConfigPublicAccess

Kafka string

Enable kafka

KafkaConnect string

Enable kafka_connect

KafkaRest string

Enable kafka_rest

Prometheus string

Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network

SchemaRegistry string

Enable Schema-Registry service

Kafka string

Enable kafka

KafkaConnect string

Enable kafka_connect

KafkaRest string

Enable kafka_rest

Prometheus string

Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network

SchemaRegistry string

Enable Schema-Registry service

kafka string

Enable kafka

kafkaConnect string

Enable kafka_connect

kafkaRest string

Enable kafka_rest

prometheus string

Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network

schemaRegistry string

Enable Schema-Registry service

kafka str

Enable kafka

kafka_connect str

Enable kafka_connect

kafka_rest str

Enable kafka_rest

prometheus str

Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network

schema_registry str

Enable Schema-Registry service

KafkaKafkaUserConfigSchemaRegistryConfig

LeaderEligibility string

If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to ‘true’.

TopicName string

The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It’s only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to ‘_schemas’.

LeaderEligibility string

If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to ‘true’.

TopicName string

The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It’s only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to ‘_schemas’.

leaderEligibility string

If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to ‘true’.

topicName string

The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It’s only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to ‘_schemas’.

leader_eligibility str

If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to ‘true’.

topic_name str

The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It’s only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to ‘_schemas’.

KafkaServiceIntegration

Package Details

Repository
https://github.com/pulumi/pulumi-aiven
License
Apache-2.0
Notes
This Pulumi package is based on the aiven Terraform Provider.