1. Packages
  2. Aiven
  3. API Docs
  4. Kafka
Aiven v6.7.2 published on Tuesday, Oct 31, 2023 by Pulumi

aiven.Kafka

Explore with Pulumi AI

aiven logo
Aiven v6.7.2 published on Tuesday, Oct 31, 2023 by Pulumi

    The Kafka resource allows the creation and management of Aiven Kafka services.

    Example Usage

    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Aiven = Pulumi.Aiven;
    
    return await Deployment.RunAsync(() => 
    {
        var kafka1 = new Aiven.Kafka("kafka1", new()
        {
            Project = data.Aiven_project.Pr1.Project,
            CloudName = "google-europe-west1",
            Plan = "business-4",
            ServiceName = "my-kafka1",
            MaintenanceWindowDow = "monday",
            MaintenanceWindowTime = "10:00:00",
            KafkaUserConfig = new Aiven.Inputs.KafkaKafkaUserConfigArgs
            {
                KafkaRest = true,
                KafkaConnect = true,
                SchemaRegistry = true,
                KafkaVersion = "3.1",
                Kafka = new Aiven.Inputs.KafkaKafkaUserConfigKafkaArgs
                {
                    GroupMaxSessionTimeoutMs = 70000,
                    LogRetentionBytes = 1000000000,
                },
                PublicAccess = new Aiven.Inputs.KafkaKafkaUserConfigPublicAccessArgs
                {
                    KafkaRest = true,
                    KafkaConnect = true,
                },
            },
        });
    
    });
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-aiven/sdk/v6/go/aiven"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := aiven.NewKafka(ctx, "kafka1", &aiven.KafkaArgs{
    			Project:               pulumi.Any(data.Aiven_project.Pr1.Project),
    			CloudName:             pulumi.String("google-europe-west1"),
    			Plan:                  pulumi.String("business-4"),
    			ServiceName:           pulumi.String("my-kafka1"),
    			MaintenanceWindowDow:  pulumi.String("monday"),
    			MaintenanceWindowTime: pulumi.String("10:00:00"),
    			KafkaUserConfig: &aiven.KafkaKafkaUserConfigArgs{
    				KafkaRest:      pulumi.Bool(true),
    				KafkaConnect:   pulumi.Bool(true),
    				SchemaRegistry: pulumi.Bool(true),
    				KafkaVersion:   pulumi.String("3.1"),
    				Kafka: &aiven.KafkaKafkaUserConfigKafkaArgs{
    					GroupMaxSessionTimeoutMs: pulumi.Int(70000),
    					LogRetentionBytes:        pulumi.Int(1000000000),
    				},
    				PublicAccess: &aiven.KafkaKafkaUserConfigPublicAccessArgs{
    					KafkaRest:    pulumi.Bool(true),
    					KafkaConnect: pulumi.Bool(true),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.aiven.Kafka;
    import com.pulumi.aiven.KafkaArgs;
    import com.pulumi.aiven.inputs.KafkaKafkaUserConfigArgs;
    import com.pulumi.aiven.inputs.KafkaKafkaUserConfigKafkaArgs;
    import com.pulumi.aiven.inputs.KafkaKafkaUserConfigPublicAccessArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var kafka1 = new Kafka("kafka1", KafkaArgs.builder()        
                .project(data.aiven_project().pr1().project())
                .cloudName("google-europe-west1")
                .plan("business-4")
                .serviceName("my-kafka1")
                .maintenanceWindowDow("monday")
                .maintenanceWindowTime("10:00:00")
                .kafkaUserConfig(KafkaKafkaUserConfigArgs.builder()
                    .kafkaRest(true)
                    .kafkaConnect(true)
                    .schemaRegistry(true)
                    .kafkaVersion("3.1")
                    .kafka(KafkaKafkaUserConfigKafkaArgs.builder()
                        .groupMaxSessionTimeoutMs(70000)
                        .logRetentionBytes(1000000000)
                        .build())
                    .publicAccess(KafkaKafkaUserConfigPublicAccessArgs.builder()
                        .kafkaRest(true)
                        .kafkaConnect(true)
                        .build())
                    .build())
                .build());
    
        }
    }
    
    import pulumi
    import pulumi_aiven as aiven
    
    kafka1 = aiven.Kafka("kafka1",
        project=data["aiven_project"]["pr1"]["project"],
        cloud_name="google-europe-west1",
        plan="business-4",
        service_name="my-kafka1",
        maintenance_window_dow="monday",
        maintenance_window_time="10:00:00",
        kafka_user_config=aiven.KafkaKafkaUserConfigArgs(
            kafka_rest=True,
            kafka_connect=True,
            schema_registry=True,
            kafka_version="3.1",
            kafka=aiven.KafkaKafkaUserConfigKafkaArgs(
                group_max_session_timeout_ms=70000,
                log_retention_bytes=1000000000,
            ),
            public_access=aiven.KafkaKafkaUserConfigPublicAccessArgs(
                kafka_rest=True,
                kafka_connect=True,
            ),
        ))
    
    import * as pulumi from "@pulumi/pulumi";
    import * as aiven from "@pulumi/aiven";
    
    const kafka1 = new aiven.Kafka("kafka1", {
        project: data.aiven_project.pr1.project,
        cloudName: "google-europe-west1",
        plan: "business-4",
        serviceName: "my-kafka1",
        maintenanceWindowDow: "monday",
        maintenanceWindowTime: "10:00:00",
        kafkaUserConfig: {
            kafkaRest: true,
            kafkaConnect: true,
            schemaRegistry: true,
            kafkaVersion: "3.1",
            kafka: {
                groupMaxSessionTimeoutMs: 70000,
                logRetentionBytes: 1000000000,
            },
            publicAccess: {
                kafkaRest: true,
                kafkaConnect: true,
            },
        },
    });
    
    resources:
      kafka1:
        type: aiven:Kafka
        properties:
          project: ${data.aiven_project.pr1.project}
          cloudName: google-europe-west1
          plan: business-4
          serviceName: my-kafka1
          maintenanceWindowDow: monday
          maintenanceWindowTime: 10:00:00
          kafkaUserConfig:
            kafkaRest: true
            kafkaConnect: true
            schemaRegistry: true
            kafkaVersion: '3.1'
            kafka:
              groupMaxSessionTimeoutMs: 70000
              logRetentionBytes: 1e+09
            publicAccess:
              kafkaRest: true
              kafkaConnect: true
    

    Create Kafka Resource

    new Kafka(name: string, args: KafkaArgs, opts?: CustomResourceOptions);
    @overload
    def Kafka(resource_name: str,
              opts: Optional[ResourceOptions] = None,
              additional_disk_space: Optional[str] = None,
              cloud_name: Optional[str] = None,
              default_acl: Optional[bool] = None,
              disk_space: Optional[str] = None,
              kafka_user_config: Optional[KafkaKafkaUserConfigArgs] = None,
              karapace: Optional[bool] = None,
              maintenance_window_dow: Optional[str] = None,
              maintenance_window_time: Optional[str] = None,
              plan: Optional[str] = None,
              project: Optional[str] = None,
              project_vpc_id: Optional[str] = None,
              service_integrations: Optional[Sequence[KafkaServiceIntegrationArgs]] = None,
              service_name: Optional[str] = None,
              static_ips: Optional[Sequence[str]] = None,
              tags: Optional[Sequence[KafkaTagArgs]] = None,
              termination_protection: Optional[bool] = None)
    @overload
    def Kafka(resource_name: str,
              args: KafkaArgs,
              opts: Optional[ResourceOptions] = None)
    func NewKafka(ctx *Context, name string, args KafkaArgs, opts ...ResourceOption) (*Kafka, error)
    public Kafka(string name, KafkaArgs args, CustomResourceOptions? opts = null)
    public Kafka(String name, KafkaArgs args)
    public Kafka(String name, KafkaArgs args, CustomResourceOptions options)
    
    type: aiven:Kafka
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    
    name string
    The unique name of the resource.
    args KafkaArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args KafkaArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args KafkaArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args KafkaArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args KafkaArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Kafka Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The Kafka resource accepts the following input properties:

    Plan string

    Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.

    Project string

    Identifies the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. This property cannot be changed, doing so forces recreation of the resource.

    ServiceName string

    Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.

    AdditionalDiskSpace string

    Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    CloudName string

    Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (aws, azure, do google, upcloud, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.

    DefaultAcl bool

    Create default wildcard Kafka ACL

    DiskSpace string

    Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    Deprecated:

    This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

    KafkaUserConfig KafkaKafkaUserConfig

    Kafka user configurable settings

    Karapace bool

    Switch the service to use Karapace for schema registry and REST proxy

    Deprecated:

    Usage of this field is discouraged.

    MaintenanceWindowDow string

    Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.

    MaintenanceWindowTime string

    Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.

    ProjectVpcId string

    Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.

    ServiceIntegrations List<KafkaServiceIntegration>

    Service integrations to specify when creating a service. Not applied after initial service creation

    StaticIps List<string>

    Use static public IP addresses.

    Tags List<KafkaTag>

    Tags are key-value pairs that allow you to categorize services.

    TerminationProtection bool

    Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.

    Plan string

    Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.

    Project string

    Identifies the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. This property cannot be changed, doing so forces recreation of the resource.

    ServiceName string

    Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.

    AdditionalDiskSpace string

    Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    CloudName string

    Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (aws, azure, do google, upcloud, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.

    DefaultAcl bool

    Create default wildcard Kafka ACL

    DiskSpace string

    Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    Deprecated:

    This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

    KafkaUserConfig KafkaKafkaUserConfigArgs

    Kafka user configurable settings

    Karapace bool

    Switch the service to use Karapace for schema registry and REST proxy

    Deprecated:

    Usage of this field is discouraged.

    MaintenanceWindowDow string

    Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.

    MaintenanceWindowTime string

    Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.

    ProjectVpcId string

    Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.

    ServiceIntegrations []KafkaServiceIntegrationArgs

    Service integrations to specify when creating a service. Not applied after initial service creation

    StaticIps []string

    Use static public IP addresses.

    Tags []KafkaTagArgs

    Tags are key-value pairs that allow you to categorize services.

    TerminationProtection bool

    Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.

    plan String

    Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.

    project String

    Identifies the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. This property cannot be changed, doing so forces recreation of the resource.

    serviceName String

    Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.

    additionalDiskSpace String

    Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    cloudName String

    Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (aws, azure, do google, upcloud, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.

    defaultAcl Boolean

    Create default wildcard Kafka ACL

    diskSpace String

    Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    Deprecated:

    This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

    kafkaUserConfig KafkaKafkaUserConfig

    Kafka user configurable settings

    karapace Boolean

    Switch the service to use Karapace for schema registry and REST proxy

    Deprecated:

    Usage of this field is discouraged.

    maintenanceWindowDow String

    Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.

    maintenanceWindowTime String

    Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.

    projectVpcId String

    Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.

    serviceIntegrations List<KafkaServiceIntegration>

    Service integrations to specify when creating a service. Not applied after initial service creation

    staticIps List<String>

    Use static public IP addresses.

    tags List<KafkaTag>

    Tags are key-value pairs that allow you to categorize services.

    terminationProtection Boolean

    Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.

    plan string

    Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.

    project string

    Identifies the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. This property cannot be changed, doing so forces recreation of the resource.

    serviceName string

    Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.

    additionalDiskSpace string

    Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    cloudName string

    Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (aws, azure, do google, upcloud, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.

    defaultAcl boolean

    Create default wildcard Kafka ACL

    diskSpace string

    Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    Deprecated:

    This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

    kafkaUserConfig KafkaKafkaUserConfig

    Kafka user configurable settings

    karapace boolean

    Switch the service to use Karapace for schema registry and REST proxy

    Deprecated:

    Usage of this field is discouraged.

    maintenanceWindowDow string

    Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.

    maintenanceWindowTime string

    Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.

    projectVpcId string

    Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.

    serviceIntegrations KafkaServiceIntegration[]

    Service integrations to specify when creating a service. Not applied after initial service creation

    staticIps string[]

    Use static public IP addresses.

    tags KafkaTag[]

    Tags are key-value pairs that allow you to categorize services.

    terminationProtection boolean

    Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.

    plan str

    Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.

    project str

    Identifies the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. This property cannot be changed, doing so forces recreation of the resource.

    service_name str

    Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.

    additional_disk_space str

    Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    cloud_name str

    Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (aws, azure, do google, upcloud, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.

    default_acl bool

    Create default wildcard Kafka ACL

    disk_space str

    Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    Deprecated:

    This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

    kafka_user_config KafkaKafkaUserConfigArgs

    Kafka user configurable settings

    karapace bool

    Switch the service to use Karapace for schema registry and REST proxy

    Deprecated:

    Usage of this field is discouraged.

    maintenance_window_dow str

    Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.

    maintenance_window_time str

    Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.

    project_vpc_id str

    Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.

    service_integrations Sequence[KafkaServiceIntegrationArgs]

    Service integrations to specify when creating a service. Not applied after initial service creation

    static_ips Sequence[str]

    Use static public IP addresses.

    tags Sequence[KafkaTagArgs]

    Tags are key-value pairs that allow you to categorize services.

    termination_protection bool

    Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.

    plan String

    Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.

    project String

    Identifies the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. This property cannot be changed, doing so forces recreation of the resource.

    serviceName String

    Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.

    additionalDiskSpace String

    Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    cloudName String

    Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (aws, azure, do google, upcloud, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.

    defaultAcl Boolean

    Create default wildcard Kafka ACL

    diskSpace String

    Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    Deprecated:

    This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

    kafkaUserConfig Property Map

    Kafka user configurable settings

    karapace Boolean

    Switch the service to use Karapace for schema registry and REST proxy

    Deprecated:

    Usage of this field is discouraged.

    maintenanceWindowDow String

    Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.

    maintenanceWindowTime String

    Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.

    projectVpcId String

    Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.

    serviceIntegrations List<Property Map>

    Service integrations to specify when creating a service. Not applied after initial service creation

    staticIps List<String>

    Use static public IP addresses.

    tags List<Property Map>

    Tags are key-value pairs that allow you to categorize services.

    terminationProtection Boolean

    Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the Kafka resource produces the following output properties:

    Components List<KafkaComponent>

    Service component information objects

    DiskSpaceCap string

    The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.

    DiskSpaceDefault string

    The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space

    DiskSpaceStep string

    The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.

    DiskSpaceUsed string

    Disk space that service is currently using

    Id string

    The provider-assigned unique ID for this managed resource.

    KafkaServer List<KafkaKafka>

    Kafka broker configuration values.

    ServiceHost string

    The hostname of the service.

    ServicePassword string

    Password used for connecting to the service, if applicable

    ServicePort int

    The port of the service

    ServiceType string

    Aiven internal service type code

    ServiceUri string

    URI for connecting to the service. Service specific info is under "kafka", "pg", etc.

    ServiceUsername string

    Username used for connecting to the service, if applicable

    State string

    Service state. One of POWEROFF, REBALANCING, REBUILDING or RUNNING

    Components []KafkaComponent

    Service component information objects

    DiskSpaceCap string

    The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.

    DiskSpaceDefault string

    The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space

    DiskSpaceStep string

    The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.

    DiskSpaceUsed string

    Disk space that service is currently using

    Id string

    The provider-assigned unique ID for this managed resource.

    Kafkas []KafkaKafka

    Kafka broker configuration values.

    ServiceHost string

    The hostname of the service.

    ServicePassword string

    Password used for connecting to the service, if applicable

    ServicePort int

    The port of the service

    ServiceType string

    Aiven internal service type code

    ServiceUri string

    URI for connecting to the service. Service specific info is under "kafka", "pg", etc.

    ServiceUsername string

    Username used for connecting to the service, if applicable

    State string

    Service state. One of POWEROFF, REBALANCING, REBUILDING or RUNNING

    components List<KafkaComponent>

    Service component information objects

    diskSpaceCap String

    The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.

    diskSpaceDefault String

    The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space

    diskSpaceStep String

    The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.

    diskSpaceUsed String

    Disk space that service is currently using

    id String

    The provider-assigned unique ID for this managed resource.

    kafkas List<KafkaKafka>

    Kafka broker configuration values.

    serviceHost String

    The hostname of the service.

    servicePassword String

    Password used for connecting to the service, if applicable

    servicePort Integer

    The port of the service

    serviceType String

    Aiven internal service type code

    serviceUri String

    URI for connecting to the service. Service specific info is under "kafka", "pg", etc.

    serviceUsername String

    Username used for connecting to the service, if applicable

    state String

    Service state. One of POWEROFF, REBALANCING, REBUILDING or RUNNING

    components KafkaComponent[]

    Service component information objects

    diskSpaceCap string

    The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.

    diskSpaceDefault string

    The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space

    diskSpaceStep string

    The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.

    diskSpaceUsed string

    Disk space that service is currently using

    id string

    The provider-assigned unique ID for this managed resource.

    kafkas KafkaKafka[]

    Kafka broker configuration values.

    serviceHost string

    The hostname of the service.

    servicePassword string

    Password used for connecting to the service, if applicable

    servicePort number

    The port of the service

    serviceType string

    Aiven internal service type code

    serviceUri string

    URI for connecting to the service. Service specific info is under "kafka", "pg", etc.

    serviceUsername string

    Username used for connecting to the service, if applicable

    state string

    Service state. One of POWEROFF, REBALANCING, REBUILDING or RUNNING

    components Sequence[KafkaComponent]

    Service component information objects

    disk_space_cap str

    The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.

    disk_space_default str

    The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space

    disk_space_step str

    The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.

    disk_space_used str

    Disk space that service is currently using

    id str

    The provider-assigned unique ID for this managed resource.

    kafkas Sequence[KafkaKafka]

    Kafka broker configuration values.

    service_host str

    The hostname of the service.

    service_password str

    Password used for connecting to the service, if applicable

    service_port int

    The port of the service

    service_type str

    Aiven internal service type code

    service_uri str

    URI for connecting to the service. Service specific info is under "kafka", "pg", etc.

    service_username str

    Username used for connecting to the service, if applicable

    state str

    Service state. One of POWEROFF, REBALANCING, REBUILDING or RUNNING

    components List<Property Map>

    Service component information objects

    diskSpaceCap String

    The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.

    diskSpaceDefault String

    The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space

    diskSpaceStep String

    The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.

    diskSpaceUsed String

    Disk space that service is currently using

    id String

    The provider-assigned unique ID for this managed resource.

    kafkas List<Property Map>

    Kafka broker configuration values.

    serviceHost String

    The hostname of the service.

    servicePassword String

    Password used for connecting to the service, if applicable

    servicePort Number

    The port of the service

    serviceType String

    Aiven internal service type code

    serviceUri String

    URI for connecting to the service. Service specific info is under "kafka", "pg", etc.

    serviceUsername String

    Username used for connecting to the service, if applicable

    state String

    Service state. One of POWEROFF, REBALANCING, REBUILDING or RUNNING

    Look up Existing Kafka Resource

    Get an existing Kafka resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: KafkaState, opts?: CustomResourceOptions): Kafka
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            additional_disk_space: Optional[str] = None,
            cloud_name: Optional[str] = None,
            components: Optional[Sequence[KafkaComponentArgs]] = None,
            default_acl: Optional[bool] = None,
            disk_space: Optional[str] = None,
            disk_space_cap: Optional[str] = None,
            disk_space_default: Optional[str] = None,
            disk_space_step: Optional[str] = None,
            disk_space_used: Optional[str] = None,
            kafka_user_config: Optional[KafkaKafkaUserConfigArgs] = None,
            kafkas: Optional[Sequence[KafkaKafkaArgs]] = None,
            karapace: Optional[bool] = None,
            maintenance_window_dow: Optional[str] = None,
            maintenance_window_time: Optional[str] = None,
            plan: Optional[str] = None,
            project: Optional[str] = None,
            project_vpc_id: Optional[str] = None,
            service_host: Optional[str] = None,
            service_integrations: Optional[Sequence[KafkaServiceIntegrationArgs]] = None,
            service_name: Optional[str] = None,
            service_password: Optional[str] = None,
            service_port: Optional[int] = None,
            service_type: Optional[str] = None,
            service_uri: Optional[str] = None,
            service_username: Optional[str] = None,
            state: Optional[str] = None,
            static_ips: Optional[Sequence[str]] = None,
            tags: Optional[Sequence[KafkaTagArgs]] = None,
            termination_protection: Optional[bool] = None) -> Kafka
    func GetKafka(ctx *Context, name string, id IDInput, state *KafkaState, opts ...ResourceOption) (*Kafka, error)
    public static Kafka Get(string name, Input<string> id, KafkaState? state, CustomResourceOptions? opts = null)
    public static Kafka get(String name, Output<String> id, KafkaState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    AdditionalDiskSpace string

    Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    CloudName string

    Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (aws, azure, do google, upcloud, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.

    Components List<KafkaComponent>

    Service component information objects

    DefaultAcl bool

    Create default wildcard Kafka ACL

    DiskSpace string

    Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    Deprecated:

    This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

    DiskSpaceCap string

    The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.

    DiskSpaceDefault string

    The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space

    DiskSpaceStep string

    The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.

    DiskSpaceUsed string

    Disk space that service is currently using

    KafkaServer List<KafkaKafka>

    Kafka broker configuration values.

    KafkaUserConfig KafkaKafkaUserConfig

    Kafka user configurable settings

    Karapace bool

    Switch the service to use Karapace for schema registry and REST proxy

    Deprecated:

    Usage of this field is discouraged.

    MaintenanceWindowDow string

    Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.

    MaintenanceWindowTime string

    Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.

    Plan string

    Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.

    Project string

    Identifies the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. This property cannot be changed, doing so forces recreation of the resource.

    ProjectVpcId string

    Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.

    ServiceHost string

    The hostname of the service.

    ServiceIntegrations List<KafkaServiceIntegration>

    Service integrations to specify when creating a service. Not applied after initial service creation

    ServiceName string

    Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.

    ServicePassword string

    Password used for connecting to the service, if applicable

    ServicePort int

    The port of the service

    ServiceType string

    Aiven internal service type code

    ServiceUri string

    URI for connecting to the service. Service specific info is under "kafka", "pg", etc.

    ServiceUsername string

    Username used for connecting to the service, if applicable

    State string

    Service state. One of POWEROFF, REBALANCING, REBUILDING or RUNNING

    StaticIps List<string>

    Use static public IP addresses.

    Tags List<KafkaTag>

    Tags are key-value pairs that allow you to categorize services.

    TerminationProtection bool

    Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.

    AdditionalDiskSpace string

    Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    CloudName string

    Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (aws, azure, do google, upcloud, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.

    Components []KafkaComponentArgs

    Service component information objects

    DefaultAcl bool

    Create default wildcard Kafka ACL

    DiskSpace string

    Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    Deprecated:

    This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

    DiskSpaceCap string

    The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.

    DiskSpaceDefault string

    The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space

    DiskSpaceStep string

    The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.

    DiskSpaceUsed string

    Disk space that service is currently using

    KafkaUserConfig KafkaKafkaUserConfigArgs

    Kafka user configurable settings

    Kafkas []KafkaKafkaArgs

    Kafka broker configuration values.

    Karapace bool

    Switch the service to use Karapace for schema registry and REST proxy

    Deprecated:

    Usage of this field is discouraged.

    MaintenanceWindowDow string

    Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.

    MaintenanceWindowTime string

    Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.

    Plan string

    Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.

    Project string

    Identifies the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. This property cannot be changed, doing so forces recreation of the resource.

    ProjectVpcId string

    Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.

    ServiceHost string

    The hostname of the service.

    ServiceIntegrations []KafkaServiceIntegrationArgs

    Service integrations to specify when creating a service. Not applied after initial service creation

    ServiceName string

    Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.

    ServicePassword string

    Password used for connecting to the service, if applicable

    ServicePort int

    The port of the service

    ServiceType string

    Aiven internal service type code

    ServiceUri string

    URI for connecting to the service. Service specific info is under "kafka", "pg", etc.

    ServiceUsername string

    Username used for connecting to the service, if applicable

    State string

    Service state. One of POWEROFF, REBALANCING, REBUILDING or RUNNING

    StaticIps []string

    Use static public IP addresses.

    Tags []KafkaTagArgs

    Tags are key-value pairs that allow you to categorize services.

    TerminationProtection bool

    Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.

    additionalDiskSpace String

    Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    cloudName String

    Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (aws, azure, do google, upcloud, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.

    components List<KafkaComponent>

    Service component information objects

    defaultAcl Boolean

    Create default wildcard Kafka ACL

    diskSpace String

    Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    Deprecated:

    This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

    diskSpaceCap String

    The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.

    diskSpaceDefault String

    The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space

    diskSpaceStep String

    The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.

    diskSpaceUsed String

    Disk space that service is currently using

    kafkaUserConfig KafkaKafkaUserConfig

    Kafka user configurable settings

    kafkas List<KafkaKafka>

    Kafka broker configuration values.

    karapace Boolean

    Switch the service to use Karapace for schema registry and REST proxy

    Deprecated:

    Usage of this field is discouraged.

    maintenanceWindowDow String

    Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.

    maintenanceWindowTime String

    Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.

    plan String

    Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.

    project String

    Identifies the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. This property cannot be changed, doing so forces recreation of the resource.

    projectVpcId String

    Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.

    serviceHost String

    The hostname of the service.

    serviceIntegrations List<KafkaServiceIntegration>

    Service integrations to specify when creating a service. Not applied after initial service creation

    serviceName String

    Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.

    servicePassword String

    Password used for connecting to the service, if applicable

    servicePort Integer

    The port of the service

    serviceType String

    Aiven internal service type code

    serviceUri String

    URI for connecting to the service. Service specific info is under "kafka", "pg", etc.

    serviceUsername String

    Username used for connecting to the service, if applicable

    state String

    Service state. One of POWEROFF, REBALANCING, REBUILDING or RUNNING

    staticIps List<String>

    Use static public IP addresses.

    tags List<KafkaTag>

    Tags are key-value pairs that allow you to categorize services.

    terminationProtection Boolean

    Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.

    additionalDiskSpace string

    Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    cloudName string

    Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (aws, azure, do google, upcloud, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.

    components KafkaComponent[]

    Service component information objects

    defaultAcl boolean

    Create default wildcard Kafka ACL

    diskSpace string

    Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    Deprecated:

    This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

    diskSpaceCap string

    The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.

    diskSpaceDefault string

    The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space

    diskSpaceStep string

    The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.

    diskSpaceUsed string

    Disk space that service is currently using

    kafkaUserConfig KafkaKafkaUserConfig

    Kafka user configurable settings

    kafkas KafkaKafka[]

    Kafka broker configuration values.

    karapace boolean

    Switch the service to use Karapace for schema registry and REST proxy

    Deprecated:

    Usage of this field is discouraged.

    maintenanceWindowDow string

    Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.

    maintenanceWindowTime string

    Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.

    plan string

    Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.

    project string

    Identifies the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. This property cannot be changed, doing so forces recreation of the resource.

    projectVpcId string

    Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.

    serviceHost string

    The hostname of the service.

    serviceIntegrations KafkaServiceIntegration[]

    Service integrations to specify when creating a service. Not applied after initial service creation

    serviceName string

    Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.

    servicePassword string

    Password used for connecting to the service, if applicable

    servicePort number

    The port of the service

    serviceType string

    Aiven internal service type code

    serviceUri string

    URI for connecting to the service. Service specific info is under "kafka", "pg", etc.

    serviceUsername string

    Username used for connecting to the service, if applicable

    state string

    Service state. One of POWEROFF, REBALANCING, REBUILDING or RUNNING

    staticIps string[]

    Use static public IP addresses.

    tags KafkaTag[]

    Tags are key-value pairs that allow you to categorize services.

    terminationProtection boolean

    Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.

    additional_disk_space str

    Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    cloud_name str

    Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (aws, azure, do google, upcloud, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.

    components Sequence[KafkaComponentArgs]

    Service component information objects

    default_acl bool

    Create default wildcard Kafka ACL

    disk_space str

    Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    Deprecated:

    This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

    disk_space_cap str

    The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.

    disk_space_default str

    The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space

    disk_space_step str

    The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.

    disk_space_used str

    Disk space that service is currently using

    kafka_user_config KafkaKafkaUserConfigArgs

    Kafka user configurable settings

    kafkas Sequence[KafkaKafkaArgs]

    Kafka broker configuration values.

    karapace bool

    Switch the service to use Karapace for schema registry and REST proxy

    Deprecated:

    Usage of this field is discouraged.

    maintenance_window_dow str

    Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.

    maintenance_window_time str

    Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.

    plan str

    Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.

    project str

    Identifies the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. This property cannot be changed, doing so forces recreation of the resource.

    project_vpc_id str

    Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.

    service_host str

    The hostname of the service.

    service_integrations Sequence[KafkaServiceIntegrationArgs]

    Service integrations to specify when creating a service. Not applied after initial service creation

    service_name str

    Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.

    service_password str

    Password used for connecting to the service, if applicable

    service_port int

    The port of the service

    service_type str

    Aiven internal service type code

    service_uri str

    URI for connecting to the service. Service specific info is under "kafka", "pg", etc.

    service_username str

    Username used for connecting to the service, if applicable

    state str

    Service state. One of POWEROFF, REBALANCING, REBUILDING or RUNNING

    static_ips Sequence[str]

    Use static public IP addresses.

    tags Sequence[KafkaTagArgs]

    Tags are key-value pairs that allow you to categorize services.

    termination_protection bool

    Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.

    additionalDiskSpace String

    Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    cloudName String

    Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (aws, azure, do google, upcloud, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.

    components List<Property Map>

    Service component information objects

    defaultAcl Boolean

    Create default wildcard Kafka ACL

    diskSpace String

    Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    Deprecated:

    This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

    diskSpaceCap String

    The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.

    diskSpaceDefault String

    The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space

    diskSpaceStep String

    The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.

    diskSpaceUsed String

    Disk space that service is currently using

    kafkaUserConfig Property Map

    Kafka user configurable settings

    kafkas List<Property Map>

    Kafka broker configuration values.

    karapace Boolean

    Switch the service to use Karapace for schema registry and REST proxy

    Deprecated:

    Usage of this field is discouraged.

    maintenanceWindowDow String

    Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.

    maintenanceWindowTime String

    Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.

    plan String

    Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.

    project String

    Identifies the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. This property cannot be changed, doing so forces recreation of the resource.

    projectVpcId String

    Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.

    serviceHost String

    The hostname of the service.

    serviceIntegrations List<Property Map>

    Service integrations to specify when creating a service. Not applied after initial service creation

    serviceName String

    Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.

    servicePassword String

    Password used for connecting to the service, if applicable

    servicePort Number

    The port of the service

    serviceType String

    Aiven internal service type code

    serviceUri String

    URI for connecting to the service. Service specific info is under "kafka", "pg", etc.

    serviceUsername String

    Username used for connecting to the service, if applicable

    state String

    Service state. One of POWEROFF, REBALANCING, REBUILDING or RUNNING

    staticIps List<String>

    Use static public IP addresses.

    tags List<Property Map>

    Tags are key-value pairs that allow you to categorize services.

    terminationProtection Boolean

    Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.

    Supporting Types

    KafkaComponent, KafkaComponentArgs

    Component string
    Host string
    KafkaAuthenticationMethod string
    Port int
    Route string
    Ssl bool
    Usage string
    Component string
    Host string
    KafkaAuthenticationMethod string
    Port int
    Route string
    Ssl bool
    Usage string
    component String
    host String
    kafkaAuthenticationMethod String
    port Integer
    route String
    ssl Boolean
    usage String
    component string
    host string
    kafkaAuthenticationMethod string
    port number
    route string
    ssl boolean
    usage string
    component String
    host String
    kafkaAuthenticationMethod String
    port Number
    route String
    ssl Boolean
    usage String

    KafkaKafka, KafkaKafkaArgs

    KafkaKafkaUserConfig, KafkaKafkaUserConfigArgs

    AdditionalBackupRegions string

    Additional Cloud Regions for Backup Replication.

    CustomDomain string

    Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.

    IpFilterObjects List<KafkaKafkaUserConfigIpFilterObject>

    Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.

    IpFilterStrings List<string>

    Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.

    IpFilters List<string>

    Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.

    Deprecated:

    This will be removed in v5.0.0 and replaced with ip_filter_string instead.

    Kafka KafkaKafkaUserConfigKafka

    Kafka broker configuration values.

    KafkaAuthenticationMethods KafkaKafkaUserConfigKafkaAuthenticationMethods

    Kafka authentication methods.

    KafkaConnect bool

    Enable Kafka Connect service. The default value is false.

    KafkaConnectConfig KafkaKafkaUserConfigKafkaConnectConfig

    Kafka Connect configuration values.

    KafkaRest bool

    Enable Kafka-REST service. The default value is false.

    KafkaRestAuthorization bool

    Enable authorization in Kafka-REST service.

    KafkaRestConfig KafkaKafkaUserConfigKafkaRestConfig

    Kafka REST configuration.

    KafkaVersion string

    Kafka major version.

    PrivateAccess KafkaKafkaUserConfigPrivateAccess

    Allow access to selected service ports from private networks.

    PrivatelinkAccess KafkaKafkaUserConfigPrivatelinkAccess

    Allow access to selected service components through Privatelink.

    PublicAccess KafkaKafkaUserConfigPublicAccess

    Allow access to selected service ports from the public Internet.

    SchemaRegistry bool

    Enable Schema-Registry service. The default value is false.

    SchemaRegistryConfig KafkaKafkaUserConfigSchemaRegistryConfig

    Schema Registry configuration.

    StaticIps bool

    Use static public IP addresses.

    AdditionalBackupRegions string

    Additional Cloud Regions for Backup Replication.

    CustomDomain string

    Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.

    IpFilterObjects []KafkaKafkaUserConfigIpFilterObject

    Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.

    IpFilterStrings []string

    Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.

    IpFilters []string

    Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.

    Deprecated:

    This will be removed in v5.0.0 and replaced with ip_filter_string instead.

    Kafka KafkaKafkaUserConfigKafka

    Kafka broker configuration values.

    KafkaAuthenticationMethods KafkaKafkaUserConfigKafkaAuthenticationMethods

    Kafka authentication methods.

    KafkaConnect bool

    Enable Kafka Connect service. The default value is false.

    KafkaConnectConfig KafkaKafkaUserConfigKafkaConnectConfig

    Kafka Connect configuration values.

    KafkaRest bool

    Enable Kafka-REST service. The default value is false.

    KafkaRestAuthorization bool

    Enable authorization in Kafka-REST service.

    KafkaRestConfig KafkaKafkaUserConfigKafkaRestConfig

    Kafka REST configuration.

    KafkaVersion string

    Kafka major version.

    PrivateAccess KafkaKafkaUserConfigPrivateAccess

    Allow access to selected service ports from private networks.

    PrivatelinkAccess KafkaKafkaUserConfigPrivatelinkAccess

    Allow access to selected service components through Privatelink.

    PublicAccess KafkaKafkaUserConfigPublicAccess

    Allow access to selected service ports from the public Internet.

    SchemaRegistry bool

    Enable Schema-Registry service. The default value is false.

    SchemaRegistryConfig KafkaKafkaUserConfigSchemaRegistryConfig

    Schema Registry configuration.

    StaticIps bool

    Use static public IP addresses.

    additionalBackupRegions String

    Additional Cloud Regions for Backup Replication.

    customDomain String

    Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.

    ipFilterObjects List<KafkaKafkaUserConfigIpFilterObject>

    Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.

    ipFilterStrings List<String>

    Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.

    ipFilters List<String>

    Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.

    Deprecated:

    This will be removed in v5.0.0 and replaced with ip_filter_string instead.

    kafka KafkaKafkaUserConfigKafka

    Kafka broker configuration values.

    kafkaAuthenticationMethods KafkaKafkaUserConfigKafkaAuthenticationMethods

    Kafka authentication methods.

    kafkaConnect Boolean

    Enable Kafka Connect service. The default value is false.

    kafkaConnectConfig KafkaKafkaUserConfigKafkaConnectConfig

    Kafka Connect configuration values.

    kafkaRest Boolean

    Enable Kafka-REST service. The default value is false.

    kafkaRestAuthorization Boolean

    Enable authorization in Kafka-REST service.

    kafkaRestConfig KafkaKafkaUserConfigKafkaRestConfig

    Kafka REST configuration.

    kafkaVersion String

    Kafka major version.

    privateAccess KafkaKafkaUserConfigPrivateAccess

    Allow access to selected service ports from private networks.

    privatelinkAccess KafkaKafkaUserConfigPrivatelinkAccess

    Allow access to selected service components through Privatelink.

    publicAccess KafkaKafkaUserConfigPublicAccess

    Allow access to selected service ports from the public Internet.

    schemaRegistry Boolean

    Enable Schema-Registry service. The default value is false.

    schemaRegistryConfig KafkaKafkaUserConfigSchemaRegistryConfig

    Schema Registry configuration.

    staticIps Boolean

    Use static public IP addresses.

    additionalBackupRegions string

    Additional Cloud Regions for Backup Replication.

    customDomain string

    Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.

    ipFilterObjects KafkaKafkaUserConfigIpFilterObject[]

    Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.

    ipFilterStrings string[]

    Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.

    ipFilters string[]

    Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.

    Deprecated:

    This will be removed in v5.0.0 and replaced with ip_filter_string instead.

    kafka KafkaKafkaUserConfigKafka

    Kafka broker configuration values.

    kafkaAuthenticationMethods KafkaKafkaUserConfigKafkaAuthenticationMethods

    Kafka authentication methods.

    kafkaConnect boolean

    Enable Kafka Connect service. The default value is false.

    kafkaConnectConfig KafkaKafkaUserConfigKafkaConnectConfig

    Kafka Connect configuration values.

    kafkaRest boolean

    Enable Kafka-REST service. The default value is false.

    kafkaRestAuthorization boolean

    Enable authorization in Kafka-REST service.

    kafkaRestConfig KafkaKafkaUserConfigKafkaRestConfig

    Kafka REST configuration.

    kafkaVersion string

    Kafka major version.

    privateAccess KafkaKafkaUserConfigPrivateAccess

    Allow access to selected service ports from private networks.

    privatelinkAccess KafkaKafkaUserConfigPrivatelinkAccess

    Allow access to selected service components through Privatelink.

    publicAccess KafkaKafkaUserConfigPublicAccess

    Allow access to selected service ports from the public Internet.

    schemaRegistry boolean

    Enable Schema-Registry service. The default value is false.

    schemaRegistryConfig KafkaKafkaUserConfigSchemaRegistryConfig

    Schema Registry configuration.

    staticIps boolean

    Use static public IP addresses.

    additional_backup_regions str

    Additional Cloud Regions for Backup Replication.

    custom_domain str

    Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.

    ip_filter_objects Sequence[KafkaKafkaUserConfigIpFilterObject]

    Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.

    ip_filter_strings Sequence[str]

    Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.

    ip_filters Sequence[str]

    Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.

    Deprecated:

    This will be removed in v5.0.0 and replaced with ip_filter_string instead.

    kafka KafkaKafkaUserConfigKafka

    Kafka broker configuration values.

    kafka_authentication_methods KafkaKafkaUserConfigKafkaAuthenticationMethods

    Kafka authentication methods.

    kafka_connect bool

    Enable Kafka Connect service. The default value is false.

    kafka_connect_config KafkaKafkaUserConfigKafkaConnectConfig

    Kafka Connect configuration values.

    kafka_rest bool

    Enable Kafka-REST service. The default value is false.

    kafka_rest_authorization bool

    Enable authorization in Kafka-REST service.

    kafka_rest_config KafkaKafkaUserConfigKafkaRestConfig

    Kafka REST configuration.

    kafka_version str

    Kafka major version.

    private_access KafkaKafkaUserConfigPrivateAccess

    Allow access to selected service ports from private networks.

    privatelink_access KafkaKafkaUserConfigPrivatelinkAccess

    Allow access to selected service components through Privatelink.

    public_access KafkaKafkaUserConfigPublicAccess

    Allow access to selected service ports from the public Internet.

    schema_registry bool

    Enable Schema-Registry service. The default value is false.

    schema_registry_config KafkaKafkaUserConfigSchemaRegistryConfig

    Schema Registry configuration.

    static_ips bool

    Use static public IP addresses.

    additionalBackupRegions String

    Additional Cloud Regions for Backup Replication.

    customDomain String

    Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.

    ipFilterObjects List<Property Map>

    Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.

    ipFilterStrings List<String>

    Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.

    ipFilters List<String>

    Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.

    Deprecated:

    This will be removed in v5.0.0 and replaced with ip_filter_string instead.

    kafka Property Map

    Kafka broker configuration values.

    kafkaAuthenticationMethods Property Map

    Kafka authentication methods.

    kafkaConnect Boolean

    Enable Kafka Connect service. The default value is false.

    kafkaConnectConfig Property Map

    Kafka Connect configuration values.

    kafkaRest Boolean

    Enable Kafka-REST service. The default value is false.

    kafkaRestAuthorization Boolean

    Enable authorization in Kafka-REST service.

    kafkaRestConfig Property Map

    Kafka REST configuration.

    kafkaVersion String

    Kafka major version.

    privateAccess Property Map

    Allow access to selected service ports from private networks.

    privatelinkAccess Property Map

    Allow access to selected service components through Privatelink.

    publicAccess Property Map

    Allow access to selected service ports from the public Internet.

    schemaRegistry Boolean

    Enable Schema-Registry service. The default value is false.

    schemaRegistryConfig Property Map

    Schema Registry configuration.

    staticIps Boolean

    Use static public IP addresses.

    KafkaKafkaUserConfigIpFilterObject, KafkaKafkaUserConfigIpFilterObjectArgs

    Network string

    CIDR address block.

    Description string

    Description for IP filter list entry.

    Network string

    CIDR address block.

    Description string

    Description for IP filter list entry.

    network String

    CIDR address block.

    description String

    Description for IP filter list entry.

    network string

    CIDR address block.

    description string

    Description for IP filter list entry.

    network str

    CIDR address block.

    description str

    Description for IP filter list entry.

    network String

    CIDR address block.

    description String

    Description for IP filter list entry.

    KafkaKafkaUserConfigKafka, KafkaKafkaUserConfigKafkaArgs

    AutoCreateTopicsEnable bool

    Enable auto creation of topics.

    CompressionType string

    Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.

    ConnectionsMaxIdleMs int

    Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.

    DefaultReplicationFactor int

    Replication factor for autocreated topics.

    GroupInitialRebalanceDelayMs int

    The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.

    GroupMaxSessionTimeoutMs int

    The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.

    GroupMinSessionTimeoutMs int

    The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.

    LogCleanerDeleteRetentionMs int

    How long are delete records retained?.

    LogCleanerMaxCompactionLagMs int

    The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.

    LogCleanerMinCleanableRatio double

    Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.

    LogCleanerMinCompactionLagMs int

    The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.

    LogCleanupPolicy string

    The default cleanup policy for segments beyond the retention window.

    LogFlushIntervalMessages int

    The number of messages accumulated on a log partition before messages are flushed to disk.

    LogFlushIntervalMs int

    The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.

    LogIndexIntervalBytes int

    The interval with which Kafka adds an entry to the offset index.

    LogIndexSizeMaxBytes int

    The maximum size in bytes of the offset index.

    LogMessageDownconversionEnable bool

    This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. .

    LogMessageTimestampDifferenceMaxMs int

    The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.

    LogMessageTimestampType string

    Define whether the timestamp in the message is message create time or log append time.

    LogPreallocate bool

    Should pre allocate file when create new segment?.

    LogRetentionBytes int

    The maximum size of the log before deleting messages.

    LogRetentionHours int

    The number of hours to keep a log file before deleting it.

    LogRetentionMs int

    The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.

    LogRollJitterMs int

    The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.

    LogRollMs int

    The maximum time before a new log segment is rolled out (in milliseconds).

    LogSegmentBytes int

    The maximum size of a single log file.

    LogSegmentDeleteDelayMs int

    The amount of time to wait before deleting a file from the filesystem.

    MaxConnectionsPerIp int

    The maximum number of connections allowed from each ip address (defaults to 2147483647).

    MaxIncrementalFetchSessionCacheSlots int

    The maximum number of incremental fetch sessions that the broker will maintain.

    MessageMaxBytes int

    The maximum size of message that the server can receive.

    MinInsyncReplicas int

    When a producer sets acks to 'all' (or '-1'), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.

    NumPartitions int

    Number of partitions for autocreated topics.

    OffsetsRetentionMinutes int

    Log retention window in minutes for offsets topic.

    ProducerPurgatoryPurgeIntervalRequests int

    The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).

    ReplicaFetchMaxBytes int

    The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.

    ReplicaFetchResponseMaxBytes int

    Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.

    SocketRequestMaxBytes int

    The maximum number of bytes in a socket request (defaults to 104857600).

    TransactionRemoveExpiredTransactionCleanupIntervalMs int

    The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).

    TransactionStateLogSegmentBytes int

    The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).

    AutoCreateTopicsEnable bool

    Enable auto creation of topics.

    CompressionType string

    Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.

    ConnectionsMaxIdleMs int

    Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.

    DefaultReplicationFactor int

    Replication factor for autocreated topics.

    GroupInitialRebalanceDelayMs int

    The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.

    GroupMaxSessionTimeoutMs int

    The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.

    GroupMinSessionTimeoutMs int

    The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.

    LogCleanerDeleteRetentionMs int

    How long are delete records retained?.

    LogCleanerMaxCompactionLagMs int

    The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.

    LogCleanerMinCleanableRatio float64

    Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.

    LogCleanerMinCompactionLagMs int

    The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.

    LogCleanupPolicy string

    The default cleanup policy for segments beyond the retention window.

    LogFlushIntervalMessages int

    The number of messages accumulated on a log partition before messages are flushed to disk.

    LogFlushIntervalMs int

    The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.

    LogIndexIntervalBytes int

    The interval with which Kafka adds an entry to the offset index.

    LogIndexSizeMaxBytes int

    The maximum size in bytes of the offset index.

    LogMessageDownconversionEnable bool

    This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. .

    LogMessageTimestampDifferenceMaxMs int

    The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.

    LogMessageTimestampType string

    Define whether the timestamp in the message is message create time or log append time.

    LogPreallocate bool

    Should pre allocate file when create new segment?.

    LogRetentionBytes int

    The maximum size of the log before deleting messages.

    LogRetentionHours int

    The number of hours to keep a log file before deleting it.

    LogRetentionMs int

    The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.

    LogRollJitterMs int

    The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.

    LogRollMs int

    The maximum time before a new log segment is rolled out (in milliseconds).

    LogSegmentBytes int

    The maximum size of a single log file.

    LogSegmentDeleteDelayMs int

    The amount of time to wait before deleting a file from the filesystem.

    MaxConnectionsPerIp int

    The maximum number of connections allowed from each ip address (defaults to 2147483647).

    MaxIncrementalFetchSessionCacheSlots int

    The maximum number of incremental fetch sessions that the broker will maintain.

    MessageMaxBytes int

    The maximum size of message that the server can receive.

    MinInsyncReplicas int

    When a producer sets acks to 'all' (or '-1'), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.

    NumPartitions int

    Number of partitions for autocreated topics.

    OffsetsRetentionMinutes int

    Log retention window in minutes for offsets topic.

    ProducerPurgatoryPurgeIntervalRequests int

    The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).

    ReplicaFetchMaxBytes int

    The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.

    ReplicaFetchResponseMaxBytes int

    Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.

    SocketRequestMaxBytes int

    The maximum number of bytes in a socket request (defaults to 104857600).

    TransactionRemoveExpiredTransactionCleanupIntervalMs int

    The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).

    TransactionStateLogSegmentBytes int

    The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).

    autoCreateTopicsEnable Boolean

    Enable auto creation of topics.

    compressionType String

    Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.

    connectionsMaxIdleMs Integer

    Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.

    defaultReplicationFactor Integer

    Replication factor for autocreated topics.

    groupInitialRebalanceDelayMs Integer

    The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.

    groupMaxSessionTimeoutMs Integer

    The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.

    groupMinSessionTimeoutMs Integer

    The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.

    logCleanerDeleteRetentionMs Integer

    How long are delete records retained?.

    logCleanerMaxCompactionLagMs Integer

    The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.

    logCleanerMinCleanableRatio Double

    Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.

    logCleanerMinCompactionLagMs Integer

    The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.

    logCleanupPolicy String

    The default cleanup policy for segments beyond the retention window.

    logFlushIntervalMessages Integer

    The number of messages accumulated on a log partition before messages are flushed to disk.

    logFlushIntervalMs Integer

    The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.

    logIndexIntervalBytes Integer

    The interval with which Kafka adds an entry to the offset index.

    logIndexSizeMaxBytes Integer

    The maximum size in bytes of the offset index.

    logMessageDownconversionEnable Boolean

    This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. .

    logMessageTimestampDifferenceMaxMs Integer

    The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.

    logMessageTimestampType String

    Define whether the timestamp in the message is message create time or log append time.

    logPreallocate Boolean

    Should pre allocate file when create new segment?.

    logRetentionBytes Integer

    The maximum size of the log before deleting messages.

    logRetentionHours Integer

    The number of hours to keep a log file before deleting it.

    logRetentionMs Integer

    The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.

    logRollJitterMs Integer

    The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.

    logRollMs Integer

    The maximum time before a new log segment is rolled out (in milliseconds).

    logSegmentBytes Integer

    The maximum size of a single log file.

    logSegmentDeleteDelayMs Integer

    The amount of time to wait before deleting a file from the filesystem.

    maxConnectionsPerIp Integer

    The maximum number of connections allowed from each ip address (defaults to 2147483647).

    maxIncrementalFetchSessionCacheSlots Integer

    The maximum number of incremental fetch sessions that the broker will maintain.

    messageMaxBytes Integer

    The maximum size of message that the server can receive.

    minInsyncReplicas Integer

    When a producer sets acks to 'all' (or '-1'), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.

    numPartitions Integer

    Number of partitions for autocreated topics.

    offsetsRetentionMinutes Integer

    Log retention window in minutes for offsets topic.

    producerPurgatoryPurgeIntervalRequests Integer

    The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).

    replicaFetchMaxBytes Integer

    The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.

    replicaFetchResponseMaxBytes Integer

    Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.

    socketRequestMaxBytes Integer

    The maximum number of bytes in a socket request (defaults to 104857600).

    transactionRemoveExpiredTransactionCleanupIntervalMs Integer

    The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).

    transactionStateLogSegmentBytes Integer

    The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).

    autoCreateTopicsEnable boolean

    Enable auto creation of topics.

    compressionType string

    Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.

    connectionsMaxIdleMs number

    Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.

    defaultReplicationFactor number

    Replication factor for autocreated topics.

    groupInitialRebalanceDelayMs number

    The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.

    groupMaxSessionTimeoutMs number

    The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.

    groupMinSessionTimeoutMs number

    The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.

    logCleanerDeleteRetentionMs number

    How long are delete records retained?.

    logCleanerMaxCompactionLagMs number

    The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.

    logCleanerMinCleanableRatio number

    Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.

    logCleanerMinCompactionLagMs number

    The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.

    logCleanupPolicy string

    The default cleanup policy for segments beyond the retention window.

    logFlushIntervalMessages number

    The number of messages accumulated on a log partition before messages are flushed to disk.

    logFlushIntervalMs number

    The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.

    logIndexIntervalBytes number

    The interval with which Kafka adds an entry to the offset index.

    logIndexSizeMaxBytes number

    The maximum size in bytes of the offset index.

    logMessageDownconversionEnable boolean

    This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. .

    logMessageTimestampDifferenceMaxMs number

    The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.

    logMessageTimestampType string

    Define whether the timestamp in the message is message create time or log append time.

    logPreallocate boolean

    Should pre allocate file when create new segment?.

    logRetentionBytes number

    The maximum size of the log before deleting messages.

    logRetentionHours number

    The number of hours to keep a log file before deleting it.

    logRetentionMs number

    The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.

    logRollJitterMs number

    The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.

    logRollMs number

    The maximum time before a new log segment is rolled out (in milliseconds).

    logSegmentBytes number

    The maximum size of a single log file.

    logSegmentDeleteDelayMs number

    The amount of time to wait before deleting a file from the filesystem.

    maxConnectionsPerIp number

    The maximum number of connections allowed from each ip address (defaults to 2147483647).

    maxIncrementalFetchSessionCacheSlots number

    The maximum number of incremental fetch sessions that the broker will maintain.

    messageMaxBytes number

    The maximum size of message that the server can receive.

    minInsyncReplicas number

    When a producer sets acks to 'all' (or '-1'), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.

    numPartitions number

    Number of partitions for autocreated topics.

    offsetsRetentionMinutes number

    Log retention window in minutes for offsets topic.

    producerPurgatoryPurgeIntervalRequests number

    The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).

    replicaFetchMaxBytes number

    The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.

    replicaFetchResponseMaxBytes number

    Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.

    socketRequestMaxBytes number

    The maximum number of bytes in a socket request (defaults to 104857600).

    transactionRemoveExpiredTransactionCleanupIntervalMs number

    The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).

    transactionStateLogSegmentBytes number

    The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).

    auto_create_topics_enable bool

    Enable auto creation of topics.

    compression_type str

    Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.

    connections_max_idle_ms int

    Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.

    default_replication_factor int

    Replication factor for autocreated topics.

    group_initial_rebalance_delay_ms int

    The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.

    group_max_session_timeout_ms int

    The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.

    group_min_session_timeout_ms int

    The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.

    log_cleaner_delete_retention_ms int

    How long are delete records retained?.

    log_cleaner_max_compaction_lag_ms int

    The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.

    log_cleaner_min_cleanable_ratio float

    Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.

    log_cleaner_min_compaction_lag_ms int

    The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.

    log_cleanup_policy str

    The default cleanup policy for segments beyond the retention window.

    log_flush_interval_messages int

    The number of messages accumulated on a log partition before messages are flushed to disk.

    log_flush_interval_ms int

    The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.

    log_index_interval_bytes int

    The interval with which Kafka adds an entry to the offset index.

    log_index_size_max_bytes int

    The maximum size in bytes of the offset index.

    log_message_downconversion_enable bool

    This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. .

    log_message_timestamp_difference_max_ms int

    The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.

    log_message_timestamp_type str

    Define whether the timestamp in the message is message create time or log append time.

    log_preallocate bool

    Should pre allocate file when create new segment?.

    log_retention_bytes int

    The maximum size of the log before deleting messages.

    log_retention_hours int

    The number of hours to keep a log file before deleting it.

    log_retention_ms int

    The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.

    log_roll_jitter_ms int

    The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.

    log_roll_ms int

    The maximum time before a new log segment is rolled out (in milliseconds).

    log_segment_bytes int

    The maximum size of a single log file.

    log_segment_delete_delay_ms int

    The amount of time to wait before deleting a file from the filesystem.

    max_connections_per_ip int

    The maximum number of connections allowed from each ip address (defaults to 2147483647).

    max_incremental_fetch_session_cache_slots int

    The maximum number of incremental fetch sessions that the broker will maintain.

    message_max_bytes int

    The maximum size of message that the server can receive.

    min_insync_replicas int

    When a producer sets acks to 'all' (or '-1'), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.

    num_partitions int

    Number of partitions for autocreated topics.

    offsets_retention_minutes int

    Log retention window in minutes for offsets topic.

    producer_purgatory_purge_interval_requests int

    The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).

    replica_fetch_max_bytes int

    The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.

    replica_fetch_response_max_bytes int

    Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.

    socket_request_max_bytes int

    The maximum number of bytes in a socket request (defaults to 104857600).

    transaction_remove_expired_transaction_cleanup_interval_ms int

    The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).

    transaction_state_log_segment_bytes int

    The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).

    autoCreateTopicsEnable Boolean

    Enable auto creation of topics.

    compressionType String

    Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.

    connectionsMaxIdleMs Number

    Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.

    defaultReplicationFactor Number

    Replication factor for autocreated topics.

    groupInitialRebalanceDelayMs Number

    The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.

    groupMaxSessionTimeoutMs Number

    The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.

    groupMinSessionTimeoutMs Number

    The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.

    logCleanerDeleteRetentionMs Number

    How long are delete records retained?.

    logCleanerMaxCompactionLagMs Number

    The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.

    logCleanerMinCleanableRatio Number

    Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.

    logCleanerMinCompactionLagMs Number

    The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.

    logCleanupPolicy String

    The default cleanup policy for segments beyond the retention window.

    logFlushIntervalMessages Number

    The number of messages accumulated on a log partition before messages are flushed to disk.

    logFlushIntervalMs Number

    The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.

    logIndexIntervalBytes Number

    The interval with which Kafka adds an entry to the offset index.

    logIndexSizeMaxBytes Number

    The maximum size in bytes of the offset index.

    logMessageDownconversionEnable Boolean

    This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. .

    logMessageTimestampDifferenceMaxMs Number

    The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.

    logMessageTimestampType String

    Define whether the timestamp in the message is message create time or log append time.

    logPreallocate Boolean

    Should pre allocate file when create new segment?.

    logRetentionBytes Number

    The maximum size of the log before deleting messages.

    logRetentionHours Number

    The number of hours to keep a log file before deleting it.

    logRetentionMs Number

    The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.

    logRollJitterMs Number

    The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.

    logRollMs Number

    The maximum time before a new log segment is rolled out (in milliseconds).

    logSegmentBytes Number

    The maximum size of a single log file.

    logSegmentDeleteDelayMs Number

    The amount of time to wait before deleting a file from the filesystem.

    maxConnectionsPerIp Number

    The maximum number of connections allowed from each ip address (defaults to 2147483647).

    maxIncrementalFetchSessionCacheSlots Number

    The maximum number of incremental fetch sessions that the broker will maintain.

    messageMaxBytes Number

    The maximum size of message that the server can receive.

    minInsyncReplicas Number

    When a producer sets acks to 'all' (or '-1'), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.

    numPartitions Number

    Number of partitions for autocreated topics.

    offsetsRetentionMinutes Number

    Log retention window in minutes for offsets topic.

    producerPurgatoryPurgeIntervalRequests Number

    The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).

    replicaFetchMaxBytes Number

    The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.

    replicaFetchResponseMaxBytes Number

    Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.

    socketRequestMaxBytes Number

    The maximum number of bytes in a socket request (defaults to 104857600).

    transactionRemoveExpiredTransactionCleanupIntervalMs Number

    The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).

    transactionStateLogSegmentBytes Number

    The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).

    KafkaKafkaUserConfigKafkaAuthenticationMethods, KafkaKafkaUserConfigKafkaAuthenticationMethodsArgs

    Certificate bool

    Enable certificate/SSL authentication. The default value is true.

    Sasl bool

    Enable SASL authentication. The default value is false.

    Certificate bool

    Enable certificate/SSL authentication. The default value is true.

    Sasl bool

    Enable SASL authentication. The default value is false.

    certificate Boolean

    Enable certificate/SSL authentication. The default value is true.

    sasl Boolean

    Enable SASL authentication. The default value is false.

    certificate boolean

    Enable certificate/SSL authentication. The default value is true.

    sasl boolean

    Enable SASL authentication. The default value is false.

    certificate bool

    Enable certificate/SSL authentication. The default value is true.

    sasl bool

    Enable SASL authentication. The default value is false.

    certificate Boolean

    Enable certificate/SSL authentication. The default value is true.

    sasl Boolean

    Enable SASL authentication. The default value is false.

    KafkaKafkaUserConfigKafkaConnectConfig, KafkaKafkaUserConfigKafkaConnectConfigArgs

    ConnectorClientConfigOverridePolicy string

    Defines what client configurations can be overridden by the connector. Default is None.

    ConsumerAutoOffsetReset string

    What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.

    ConsumerFetchMaxBytes int

    Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.

    ConsumerIsolationLevel string

    Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.

    ConsumerMaxPartitionFetchBytes int

    Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .

    ConsumerMaxPollIntervalMs int

    The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).

    ConsumerMaxPollRecords int

    The maximum number of records returned in a single call to poll() (defaults to 500).

    OffsetFlushIntervalMs int

    The interval at which to try committing offsets for tasks (defaults to 60000).

    OffsetFlushTimeoutMs int

    Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).

    ProducerBatchSize int

    This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).

    ProducerBufferMemory int

    The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).

    ProducerCompressionType string

    Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.

    ProducerLingerMs int

    This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.

    ProducerMaxRequestSize int

    This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.

    ScheduledRebalanceMaxDelayMs int

    The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.

    SessionTimeoutMs int

    The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).

    ConnectorClientConfigOverridePolicy string

    Defines what client configurations can be overridden by the connector. Default is None.

    ConsumerAutoOffsetReset string

    What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.

    ConsumerFetchMaxBytes int

    Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.

    ConsumerIsolationLevel string

    Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.

    ConsumerMaxPartitionFetchBytes int

    Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .

    ConsumerMaxPollIntervalMs int

    The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).

    ConsumerMaxPollRecords int

    The maximum number of records returned in a single call to poll() (defaults to 500).

    OffsetFlushIntervalMs int

    The interval at which to try committing offsets for tasks (defaults to 60000).

    OffsetFlushTimeoutMs int

    Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).

    ProducerBatchSize int

    This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).

    ProducerBufferMemory int

    The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).

    ProducerCompressionType string

    Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.

    ProducerLingerMs int

    This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.

    ProducerMaxRequestSize int

    This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.

    ScheduledRebalanceMaxDelayMs int

    The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.

    SessionTimeoutMs int

    The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).

    connectorClientConfigOverridePolicy String

    Defines what client configurations can be overridden by the connector. Default is None.

    consumerAutoOffsetReset String

    What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.

    consumerFetchMaxBytes Integer

    Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.

    consumerIsolationLevel String

    Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.

    consumerMaxPartitionFetchBytes Integer

    Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .

    consumerMaxPollIntervalMs Integer

    The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).

    consumerMaxPollRecords Integer

    The maximum number of records returned in a single call to poll() (defaults to 500).

    offsetFlushIntervalMs Integer

    The interval at which to try committing offsets for tasks (defaults to 60000).

    offsetFlushTimeoutMs Integer

    Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).

    producerBatchSize Integer

    This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).

    producerBufferMemory Integer

    The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).

    producerCompressionType String

    Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.

    producerLingerMs Integer

    This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.

    producerMaxRequestSize Integer

    This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.

    scheduledRebalanceMaxDelayMs Integer

    The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.

    sessionTimeoutMs Integer

    The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).

    connectorClientConfigOverridePolicy string

    Defines what client configurations can be overridden by the connector. Default is None.

    consumerAutoOffsetReset string

    What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.

    consumerFetchMaxBytes number

    Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.

    consumerIsolationLevel string

    Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.

    consumerMaxPartitionFetchBytes number

    Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .

    consumerMaxPollIntervalMs number

    The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).

    consumerMaxPollRecords number

    The maximum number of records returned in a single call to poll() (defaults to 500).

    offsetFlushIntervalMs number

    The interval at which to try committing offsets for tasks (defaults to 60000).

    offsetFlushTimeoutMs number

    Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).

    producerBatchSize number

    This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).

    producerBufferMemory number

    The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).

    producerCompressionType string

    Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.

    producerLingerMs number

    This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.

    producerMaxRequestSize number

    This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.

    scheduledRebalanceMaxDelayMs number

    The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.

    sessionTimeoutMs number

    The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).

    connector_client_config_override_policy str

    Defines what client configurations can be overridden by the connector. Default is None.

    consumer_auto_offset_reset str

    What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.

    consumer_fetch_max_bytes int

    Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.

    consumer_isolation_level str

    Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.

    consumer_max_partition_fetch_bytes int

    Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .

    consumer_max_poll_interval_ms int

    The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).

    consumer_max_poll_records int

    The maximum number of records returned in a single call to poll() (defaults to 500).

    offset_flush_interval_ms int

    The interval at which to try committing offsets for tasks (defaults to 60000).

    offset_flush_timeout_ms int

    Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).

    producer_batch_size int

    This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).

    producer_buffer_memory int

    The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).

    producer_compression_type str

    Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.

    producer_linger_ms int

    This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.

    producer_max_request_size int

    This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.

    scheduled_rebalance_max_delay_ms int

    The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.

    session_timeout_ms int

    The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).

    connectorClientConfigOverridePolicy String

    Defines what client configurations can be overridden by the connector. Default is None.

    consumerAutoOffsetReset String

    What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.

    consumerFetchMaxBytes Number

    Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.

    consumerIsolationLevel String

    Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.

    consumerMaxPartitionFetchBytes Number

    Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .

    consumerMaxPollIntervalMs Number

    The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).

    consumerMaxPollRecords Number

    The maximum number of records returned in a single call to poll() (defaults to 500).

    offsetFlushIntervalMs Number

    The interval at which to try committing offsets for tasks (defaults to 60000).

    offsetFlushTimeoutMs Number

    Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).

    producerBatchSize Number

    This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).

    producerBufferMemory Number

    The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).

    producerCompressionType String

    Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.

    producerLingerMs Number

    This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.

    producerMaxRequestSize Number

    This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.

    scheduledRebalanceMaxDelayMs Number

    The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.

    sessionTimeoutMs Number

    The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).

    KafkaKafkaUserConfigKafkaRestConfig, KafkaKafkaUserConfigKafkaRestConfigArgs

    ConsumerEnableAutoCommit bool

    If true the consumer's offset will be periodically committed to Kafka in the background. The default value is true.

    ConsumerRequestMaxBytes int

    Maximum number of bytes in unencoded message keys and values by a single request. The default value is 67108864.

    ConsumerRequestTimeoutMs int

    The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is 1000.

    ProducerAcks string

    The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is 1.

    ProducerCompressionType string

    Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.

    ProducerLingerMs int

    This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.

    ProducerMaxRequestSize int

    This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.

    SimpleconsumerPoolSizeMax int

    Maximum number of SimpleConsumers that can be instantiated per broker. The default value is 25.

    ConsumerEnableAutoCommit bool

    If true the consumer's offset will be periodically committed to Kafka in the background. The default value is true.

    ConsumerRequestMaxBytes int

    Maximum number of bytes in unencoded message keys and values by a single request. The default value is 67108864.

    ConsumerRequestTimeoutMs int

    The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is 1000.

    ProducerAcks string

    The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is 1.

    ProducerCompressionType string

    Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.

    ProducerLingerMs int

    This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.

    ProducerMaxRequestSize int

    This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.

    SimpleconsumerPoolSizeMax int

    Maximum number of SimpleConsumers that can be instantiated per broker. The default value is 25.

    consumerEnableAutoCommit Boolean

    If true the consumer's offset will be periodically committed to Kafka in the background. The default value is true.

    consumerRequestMaxBytes Integer

    Maximum number of bytes in unencoded message keys and values by a single request. The default value is 67108864.

    consumerRequestTimeoutMs Integer

    The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is 1000.

    producerAcks String

    The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is 1.

    producerCompressionType String

    Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.

    producerLingerMs Integer

    This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.

    producerMaxRequestSize Integer

    This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.

    simpleconsumerPoolSizeMax Integer

    Maximum number of SimpleConsumers that can be instantiated per broker. The default value is 25.

    consumerEnableAutoCommit boolean

    If true the consumer's offset will be periodically committed to Kafka in the background. The default value is true.

    consumerRequestMaxBytes number

    Maximum number of bytes in unencoded message keys and values by a single request. The default value is 67108864.

    consumerRequestTimeoutMs number

    The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is 1000.

    producerAcks string

    The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is 1.

    producerCompressionType string

    Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.

    producerLingerMs number

    This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.

    producerMaxRequestSize number

    This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.

    simpleconsumerPoolSizeMax number

    Maximum number of SimpleConsumers that can be instantiated per broker. The default value is 25.

    consumer_enable_auto_commit bool

    If true the consumer's offset will be periodically committed to Kafka in the background. The default value is true.

    consumer_request_max_bytes int

    Maximum number of bytes in unencoded message keys and values by a single request. The default value is 67108864.

    consumer_request_timeout_ms int

    The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is 1000.

    producer_acks str

    The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is 1.

    producer_compression_type str

    Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.

    producer_linger_ms int

    This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.

    producer_max_request_size int

    This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.

    simpleconsumer_pool_size_max int

    Maximum number of SimpleConsumers that can be instantiated per broker. The default value is 25.

    consumerEnableAutoCommit Boolean

    If true the consumer's offset will be periodically committed to Kafka in the background. The default value is true.

    consumerRequestMaxBytes Number

    Maximum number of bytes in unencoded message keys and values by a single request. The default value is 67108864.

    consumerRequestTimeoutMs Number

    The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is 1000.

    producerAcks String

    The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is 1.

    producerCompressionType String

    Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.

    producerLingerMs Number

    This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.

    producerMaxRequestSize Number

    This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.

    simpleconsumerPoolSizeMax Number

    Maximum number of SimpleConsumers that can be instantiated per broker. The default value is 25.

    KafkaKafkaUserConfigPrivateAccess, KafkaKafkaUserConfigPrivateAccessArgs

    Kafka bool

    Kafka broker configuration values.

    KafkaConnect bool

    Enable Kafka Connect service. The default value is false.

    KafkaRest bool

    Enable Kafka-REST service. The default value is false.

    Prometheus bool

    Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    SchemaRegistry bool

    Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    Kafka bool

    Kafka broker configuration values.

    KafkaConnect bool

    Enable Kafka Connect service. The default value is false.

    KafkaRest bool

    Enable Kafka-REST service. The default value is false.

    Prometheus bool

    Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    SchemaRegistry bool

    Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    kafka Boolean

    Kafka broker configuration values.

    kafkaConnect Boolean

    Enable Kafka Connect service. The default value is false.

    kafkaRest Boolean

    Enable Kafka-REST service. The default value is false.

    prometheus Boolean

    Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    schemaRegistry Boolean

    Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    kafka boolean

    Kafka broker configuration values.

    kafkaConnect boolean

    Enable Kafka Connect service. The default value is false.

    kafkaRest boolean

    Enable Kafka-REST service. The default value is false.

    prometheus boolean

    Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    schemaRegistry boolean

    Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    kafka bool

    Kafka broker configuration values.

    kafka_connect bool

    Enable Kafka Connect service. The default value is false.

    kafka_rest bool

    Enable Kafka-REST service. The default value is false.

    prometheus bool

    Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    schema_registry bool

    Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    kafka Boolean

    Kafka broker configuration values.

    kafkaConnect Boolean

    Enable Kafka Connect service. The default value is false.

    kafkaRest Boolean

    Enable Kafka-REST service. The default value is false.

    prometheus Boolean

    Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    schemaRegistry Boolean

    Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    KafkaKafkaUserConfigPrivatelinkAccess, KafkaKafkaUserConfigPrivatelinkAccessArgs

    Jolokia bool

    Enable jolokia.

    Kafka bool

    Kafka broker configuration values.

    KafkaConnect bool

    Enable Kafka Connect service. The default value is false.

    KafkaRest bool

    Enable Kafka-REST service. The default value is false.

    Prometheus bool

    Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    SchemaRegistry bool

    Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    Jolokia bool

    Enable jolokia.

    Kafka bool

    Kafka broker configuration values.

    KafkaConnect bool

    Enable Kafka Connect service. The default value is false.

    KafkaRest bool

    Enable Kafka-REST service. The default value is false.

    Prometheus bool

    Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    SchemaRegistry bool

    Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    jolokia Boolean

    Enable jolokia.

    kafka Boolean

    Kafka broker configuration values.

    kafkaConnect Boolean

    Enable Kafka Connect service. The default value is false.

    kafkaRest Boolean

    Enable Kafka-REST service. The default value is false.

    prometheus Boolean

    Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    schemaRegistry Boolean

    Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    jolokia boolean

    Enable jolokia.

    kafka boolean

    Kafka broker configuration values.

    kafkaConnect boolean

    Enable Kafka Connect service. The default value is false.

    kafkaRest boolean

    Enable Kafka-REST service. The default value is false.

    prometheus boolean

    Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    schemaRegistry boolean

    Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    jolokia bool

    Enable jolokia.

    kafka bool

    Kafka broker configuration values.

    kafka_connect bool

    Enable Kafka Connect service. The default value is false.

    kafka_rest bool

    Enable Kafka-REST service. The default value is false.

    prometheus bool

    Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    schema_registry bool

    Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    jolokia Boolean

    Enable jolokia.

    kafka Boolean

    Kafka broker configuration values.

    kafkaConnect Boolean

    Enable Kafka Connect service. The default value is false.

    kafkaRest Boolean

    Enable Kafka-REST service. The default value is false.

    prometheus Boolean

    Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    schemaRegistry Boolean

    Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    KafkaKafkaUserConfigPublicAccess, KafkaKafkaUserConfigPublicAccessArgs

    Kafka bool

    Kafka broker configuration values.

    KafkaConnect bool

    Enable Kafka Connect service. The default value is false.

    KafkaRest bool

    Enable Kafka-REST service. The default value is false.

    Prometheus bool

    Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    SchemaRegistry bool

    Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    Kafka bool

    Kafka broker configuration values.

    KafkaConnect bool

    Enable Kafka Connect service. The default value is false.

    KafkaRest bool

    Enable Kafka-REST service. The default value is false.

    Prometheus bool

    Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    SchemaRegistry bool

    Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    kafka Boolean

    Kafka broker configuration values.

    kafkaConnect Boolean

    Enable Kafka Connect service. The default value is false.

    kafkaRest Boolean

    Enable Kafka-REST service. The default value is false.

    prometheus Boolean

    Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    schemaRegistry Boolean

    Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    kafka boolean

    Kafka broker configuration values.

    kafkaConnect boolean

    Enable Kafka Connect service. The default value is false.

    kafkaRest boolean

    Enable Kafka-REST service. The default value is false.

    prometheus boolean

    Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    schemaRegistry boolean

    Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    kafka bool

    Kafka broker configuration values.

    kafka_connect bool

    Enable Kafka Connect service. The default value is false.

    kafka_rest bool

    Enable Kafka-REST service. The default value is false.

    prometheus bool

    Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    schema_registry bool

    Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    kafka Boolean

    Kafka broker configuration values.

    kafkaConnect Boolean

    Enable Kafka Connect service. The default value is false.

    kafkaRest Boolean

    Enable Kafka-REST service. The default value is false.

    prometheus Boolean

    Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    schemaRegistry Boolean

    Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    KafkaKafkaUserConfigSchemaRegistryConfig, KafkaKafkaUserConfigSchemaRegistryConfigArgs

    LeaderEligibility bool

    If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to true.

    TopicName string

    The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to _schemas.

    LeaderEligibility bool

    If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to true.

    TopicName string

    The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to _schemas.

    leaderEligibility Boolean

    If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to true.

    topicName String

    The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to _schemas.

    leaderEligibility boolean

    If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to true.

    topicName string

    The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to _schemas.

    leader_eligibility bool

    If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to true.

    topic_name str

    The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to _schemas.

    leaderEligibility Boolean

    If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to true.

    topicName String

    The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to _schemas.

    KafkaServiceIntegration, KafkaServiceIntegrationArgs

    IntegrationType string

    Type of the service integration. The only supported value at the moment is read_replica

    SourceServiceName string

    Name of the source service

    IntegrationType string

    Type of the service integration. The only supported value at the moment is read_replica

    SourceServiceName string

    Name of the source service

    integrationType String

    Type of the service integration. The only supported value at the moment is read_replica

    sourceServiceName String

    Name of the source service

    integrationType string

    Type of the service integration. The only supported value at the moment is read_replica

    sourceServiceName string

    Name of the source service

    integration_type str

    Type of the service integration. The only supported value at the moment is read_replica

    source_service_name str

    Name of the source service

    integrationType String

    Type of the service integration. The only supported value at the moment is read_replica

    sourceServiceName String

    Name of the source service

    KafkaTag, KafkaTagArgs

    Key string

    Service tag key

    Value string

    Service tag value

    Key string

    Service tag key

    Value string

    Service tag value

    key String

    Service tag key

    value String

    Service tag value

    key string

    Service tag key

    value string

    Service tag value

    key str

    Service tag key

    value str

    Service tag value

    key String

    Service tag key

    value String

    Service tag value

    Import

     $ pulumi import aiven:index/kafka:Kafka kafka1 project/service_name
    

    Package Details

    Repository
    Aiven pulumi/pulumi-aiven
    License
    Apache-2.0
    Notes

    This Pulumi package is based on the aiven Terraform Provider.

    aiven logo
    Aiven v6.7.2 published on Tuesday, Oct 31, 2023 by Pulumi