Kafka
# Kafka Resource
The Kafka resource allows the creation and management of Aiven Kafka services.
Example Usage
using Pulumi;
using Aiven = Pulumi.Aiven;
class MyStack : Stack
{
public MyStack()
{
var kafka1 = new Aiven.Kafka("kafka1", new Aiven.KafkaArgs
{
Project = data.Aiven_project.Pr1.Project,
CloudName = "google-europe-west1",
Plan = "business-4",
ServiceName = "my-kafka1",
MaintenanceWindowDow = "monday",
MaintenanceWindowTime = "10:00:00",
KafkaUserConfig = new Aiven.Inputs.KafkaKafkaUserConfigArgs
{
KafkaRest = "true",
KafkaConnect = "true",
SchemaRegistry = "true",
KafkaVersion = "2.4",
Kafka = new Aiven.Inputs.KafkaKafkaUserConfigKafkaArgs
{
GroupMaxSessionTimeoutMs = "70000",
LogRetentionBytes = "1000000000",
},
PublicAccess = new Aiven.Inputs.KafkaKafkaUserConfigPublicAccessArgs
{
KafkaRest = "true",
KafkaConnect = "true",
},
},
});
}
}
package main
import (
"github.com/pulumi/pulumi-aiven/sdk/v3/go/aiven"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := aiven.NewKafka(ctx, "kafka1", &aiven.KafkaArgs{
Project: pulumi.Any(data.Aiven_project.Pr1.Project),
CloudName: pulumi.String("google-europe-west1"),
Plan: pulumi.String("business-4"),
ServiceName: pulumi.String("my-kafka1"),
MaintenanceWindowDow: pulumi.String("monday"),
MaintenanceWindowTime: pulumi.String("10:00:00"),
KafkaUserConfig: &aiven.KafkaKafkaUserConfigArgs{
KafkaRest: pulumi.String("true"),
KafkaConnect: pulumi.String("true"),
SchemaRegistry: pulumi.String("true"),
KafkaVersion: pulumi.String("2.4"),
Kafka: &aiven.KafkaKafkaUserConfigKafkaArgs{
GroupMaxSessionTimeoutMs: pulumi.String("70000"),
LogRetentionBytes: pulumi.String("1000000000"),
},
PublicAccess: &aiven.KafkaKafkaUserConfigPublicAccessArgs{
KafkaRest: pulumi.String("true"),
KafkaConnect: pulumi.String("true"),
},
},
})
if err != nil {
return err
}
return nil
})
}
import pulumi
import pulumi_aiven as aiven
kafka1 = aiven.Kafka("kafka1",
project=data["aiven_project"]["pr1"]["project"],
cloud_name="google-europe-west1",
plan="business-4",
service_name="my-kafka1",
maintenance_window_dow="monday",
maintenance_window_time="10:00:00",
kafka_user_config=aiven.KafkaKafkaUserConfigArgs(
kafka_rest="true",
kafka_connect=True,
schema_registry="true",
kafka_version="2.4",
kafka=aiven.KafkaKafkaUserConfigKafkaArgs(
group_max_session_timeout_ms="70000",
log_retention_bytes="1000000000",
),
public_access=aiven.KafkaKafkaUserConfigPublicAccessArgs(
kafka_rest="true",
kafka_connect=True,
),
))
import * as pulumi from "@pulumi/pulumi";
import * as aiven from "@pulumi/aiven";
const kafka1 = new aiven.Kafka("kafka1", {
project: data.aiven_project.pr1.project,
cloudName: "google-europe-west1",
plan: "business-4",
serviceName: "my-kafka1",
maintenanceWindowDow: "monday",
maintenanceWindowTime: "10:00:00",
kafkaUserConfig: {
kafkaRest: true,
kafkaConnect: true,
schemaRegistry: true,
kafkaVersion: "2.4",
kafka: {
groupMaxSessionTimeoutMs: 70000,
logRetentionBytes: 1000000000,
},
publicAccess: {
kafkaRest: true,
kafkaConnect: true,
},
},
});
Create a Kafka Resource
new Kafka(name: string, args: KafkaArgs, opts?: CustomResourceOptions);
def Kafka(resource_name: str, opts: Optional[ResourceOptions] = None, cloud_name: Optional[str] = None, default_acl: Optional[bool] = None, kafka: Optional[KafkaKafkaArgs] = None, kafka_user_config: Optional[KafkaKafkaUserConfigArgs] = None, maintenance_window_dow: Optional[str] = None, maintenance_window_time: Optional[str] = None, plan: Optional[str] = None, project: Optional[str] = None, project_vpc_id: Optional[str] = None, service_integrations: Optional[Sequence[KafkaServiceIntegrationArgs]] = None, service_name: Optional[str] = None, termination_protection: Optional[bool] = None)
func NewKafka(ctx *Context, name string, args KafkaArgs, opts ...ResourceOption) (*Kafka, error)
public Kafka(string name, KafkaArgs args, CustomResourceOptions? opts = null)
- name string
- The unique name of the resource.
- args KafkaArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- opts ResourceOptions
- A bag of options that control this resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args KafkaArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args KafkaArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
Kafka Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Programming Model docs.
Inputs
The Kafka resource accepts the following input properties:
- Project string
identifies the project the service belongs to. To set up proper dependency between the project and the service, refer to the project as shown in the above example. Project cannot be changed later without destroying and re-creating the service.
- Service
Name string specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- Cloud
Name string defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (
aws
,azure
,do
google
,upcloud
, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider’s own support articles, like here for Google and here for AWS.- Default
Acl bool Create default wildcard Kafka ACL
- Kafka
Server KafkaKafka Args Enable kafka
- Kafka
User KafkaConfig Kafka User Config Args defines Kafka specific additional configuration options. The following configuration options available:
- Maintenance
Window stringDow day of week when maintenance operations should be performed. On monday, tuesday, wednesday, etc.
- Maintenance
Window stringTime time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- Plan string
defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are
hobbyist
,startup-x
,business-x
andpremium-x
wherex
is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The exact options can be seen from the Aiven web console’s Create Service dialog.- Project
Vpc stringId optionally specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference as shown above to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- Service
Integrations List<KafkaService Integration Args> Service integrations to specify when creating a service. Not applied after initial service creation
- Termination
Protection bool prevents the service from being deleted. It is recommended to set this to
true
for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- Project string
identifies the project the service belongs to. To set up proper dependency between the project and the service, refer to the project as shown in the above example. Project cannot be changed later without destroying and re-creating the service.
- Service
Name string specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- Cloud
Name string defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (
aws
,azure
,do
google
,upcloud
, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider’s own support articles, like here for Google and here for AWS.- Default
Acl bool Create default wildcard Kafka ACL
- Kafka
Kafka
Kafka Enable kafka
- Kafka
User KafkaConfig Kafka User Config defines Kafka specific additional configuration options. The following configuration options available:
- Maintenance
Window stringDow day of week when maintenance operations should be performed. On monday, tuesday, wednesday, etc.
- Maintenance
Window stringTime time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- Plan string
defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are
hobbyist
,startup-x
,business-x
andpremium-x
wherex
is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The exact options can be seen from the Aiven web console’s Create Service dialog.- Project
Vpc stringId optionally specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference as shown above to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- Service
Integrations []KafkaService Integration Service integrations to specify when creating a service. Not applied after initial service creation
- Termination
Protection bool prevents the service from being deleted. It is recommended to set this to
true
for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- project string
identifies the project the service belongs to. To set up proper dependency between the project and the service, refer to the project as shown in the above example. Project cannot be changed later without destroying and re-creating the service.
- service
Name string specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- cloud
Name string defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (
aws
,azure
,do
google
,upcloud
, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider’s own support articles, like here for Google and here for AWS.- default
Acl boolean Create default wildcard Kafka ACL
- kafka
Kafka
Kafka Enable kafka
- kafka
User KafkaConfig Kafka User Config defines Kafka specific additional configuration options. The following configuration options available:
- maintenance
Window stringDow day of week when maintenance operations should be performed. On monday, tuesday, wednesday, etc.
- maintenance
Window stringTime time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- plan string
defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are
hobbyist
,startup-x
,business-x
andpremium-x
wherex
is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The exact options can be seen from the Aiven web console’s Create Service dialog.- project
Vpc stringId optionally specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference as shown above to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- service
Integrations KafkaService Integration[] Service integrations to specify when creating a service. Not applied after initial service creation
- termination
Protection boolean prevents the service from being deleted. It is recommended to set this to
true
for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- project str
identifies the project the service belongs to. To set up proper dependency between the project and the service, refer to the project as shown in the above example. Project cannot be changed later without destroying and re-creating the service.
- service_
name str specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- cloud_
name str defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (
aws
,azure
,do
google
,upcloud
, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider’s own support articles, like here for Google and here for AWS.- default_
acl bool Create default wildcard Kafka ACL
- kafka
Kafka
Kafka Args Enable kafka
- kafka_
user_ Kafkaconfig Kafka User Config Args defines Kafka specific additional configuration options. The following configuration options available:
- maintenance_
window_ strdow day of week when maintenance operations should be performed. On monday, tuesday, wednesday, etc.
- maintenance_
window_ strtime time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- plan str
defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are
hobbyist
,startup-x
,business-x
andpremium-x
wherex
is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The exact options can be seen from the Aiven web console’s Create Service dialog.- project_
vpc_ strid optionally specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference as shown above to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- service_
integrations Sequence[KafkaService Integration Args] Service integrations to specify when creating a service. Not applied after initial service creation
- termination_
protection bool prevents the service from being deleted. It is recommended to set this to
true
for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
Outputs
All input properties are implicitly available as output properties. Additionally, the Kafka resource produces the following output properties:
- Components
List<Kafka
Component> Service component information objects
- Id string
- The provider-assigned unique ID for this managed resource.
- Service
Host string Kafka hostname.
- Service
Password string Password used for connecting to the Kafka service, if applicable.
- Service
Port int Kafka port.
- Service
Type string Aiven internal service type code
- Service
Uri string URI for connecting to the Kafka service.
- Service
Username string Username used for connecting to the Kafka service, if applicable.
- State string
Service state.
- Components
[]Kafka
Component Service component information objects
- Id string
- The provider-assigned unique ID for this managed resource.
- Service
Host string Kafka hostname.
- Service
Password string Password used for connecting to the Kafka service, if applicable.
- Service
Port int Kafka port.
- Service
Type string Aiven internal service type code
- Service
Uri string URI for connecting to the Kafka service.
- Service
Username string Username used for connecting to the Kafka service, if applicable.
- State string
Service state.
- components
Kafka
Component[] Service component information objects
- id string
- The provider-assigned unique ID for this managed resource.
- service
Host string Kafka hostname.
- service
Password string Password used for connecting to the Kafka service, if applicable.
- service
Port number Kafka port.
- service
Type string Aiven internal service type code
- service
Uri string URI for connecting to the Kafka service.
- service
Username string Username used for connecting to the Kafka service, if applicable.
- state string
Service state.
- components
Sequence[Kafka
Component] Service component information objects
- id str
- The provider-assigned unique ID for this managed resource.
- service_
host str Kafka hostname.
- service_
password str Password used for connecting to the Kafka service, if applicable.
- service_
port int Kafka port.
- service_
type str Aiven internal service type code
- service_
uri str URI for connecting to the Kafka service.
- service_
username str Username used for connecting to the Kafka service, if applicable.
- state str
Service state.
Look up an Existing Kafka Resource
Get an existing Kafka resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: KafkaState, opts?: CustomResourceOptions): Kafka
@staticmethod
def get(resource_name: str, id: str, opts: Optional[ResourceOptions] = None, cloud_name: Optional[str] = None, components: Optional[Sequence[KafkaComponentArgs]] = None, default_acl: Optional[bool] = None, kafka: Optional[KafkaKafkaArgs] = None, kafka_user_config: Optional[KafkaKafkaUserConfigArgs] = None, maintenance_window_dow: Optional[str] = None, maintenance_window_time: Optional[str] = None, plan: Optional[str] = None, project: Optional[str] = None, project_vpc_id: Optional[str] = None, service_host: Optional[str] = None, service_integrations: Optional[Sequence[KafkaServiceIntegrationArgs]] = None, service_name: Optional[str] = None, service_password: Optional[str] = None, service_port: Optional[int] = None, service_type: Optional[str] = None, service_uri: Optional[str] = None, service_username: Optional[str] = None, state: Optional[str] = None, termination_protection: Optional[bool] = None) -> Kafka
func GetKafka(ctx *Context, name string, id IDInput, state *KafkaState, opts ...ResourceOption) (*Kafka, error)
public static Kafka Get(string name, Input<string> id, KafkaState? state, CustomResourceOptions? opts = null)
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
The following state arguments are supported:
- Cloud
Name string defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (
aws
,azure
,do
google
,upcloud
, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider’s own support articles, like here for Google and here for AWS.- Components
List<Kafka
Component Args> Service component information objects
- Default
Acl bool Create default wildcard Kafka ACL
- Kafka
Server KafkaKafka Args Enable kafka
- Kafka
User KafkaConfig Kafka User Config Args defines Kafka specific additional configuration options. The following configuration options available:
- Maintenance
Window stringDow day of week when maintenance operations should be performed. On monday, tuesday, wednesday, etc.
- Maintenance
Window stringTime time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- Plan string
defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are
hobbyist
,startup-x
,business-x
andpremium-x
wherex
is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The exact options can be seen from the Aiven web console’s Create Service dialog.- Project string
identifies the project the service belongs to. To set up proper dependency between the project and the service, refer to the project as shown in the above example. Project cannot be changed later without destroying and re-creating the service.
- Project
Vpc stringId optionally specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference as shown above to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- Service
Host string Kafka hostname.
- Service
Integrations List<KafkaService Integration Args> Service integrations to specify when creating a service. Not applied after initial service creation
- Service
Name string specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- Service
Password string Password used for connecting to the Kafka service, if applicable.
- Service
Port int Kafka port.
- Service
Type string Aiven internal service type code
- Service
Uri string URI for connecting to the Kafka service.
- Service
Username string Username used for connecting to the Kafka service, if applicable.
- State string
Service state.
- Termination
Protection bool prevents the service from being deleted. It is recommended to set this to
true
for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- Cloud
Name string defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (
aws
,azure
,do
google
,upcloud
, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider’s own support articles, like here for Google and here for AWS.- Components
[]Kafka
Component Service component information objects
- Default
Acl bool Create default wildcard Kafka ACL
- Kafka
Kafka
Kafka Enable kafka
- Kafka
User KafkaConfig Kafka User Config defines Kafka specific additional configuration options. The following configuration options available:
- Maintenance
Window stringDow day of week when maintenance operations should be performed. On monday, tuesday, wednesday, etc.
- Maintenance
Window stringTime time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- Plan string
defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are
hobbyist
,startup-x
,business-x
andpremium-x
wherex
is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The exact options can be seen from the Aiven web console’s Create Service dialog.- Project string
identifies the project the service belongs to. To set up proper dependency between the project and the service, refer to the project as shown in the above example. Project cannot be changed later without destroying and re-creating the service.
- Project
Vpc stringId optionally specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference as shown above to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- Service
Host string Kafka hostname.
- Service
Integrations []KafkaService Integration Service integrations to specify when creating a service. Not applied after initial service creation
- Service
Name string specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- Service
Password string Password used for connecting to the Kafka service, if applicable.
- Service
Port int Kafka port.
- Service
Type string Aiven internal service type code
- Service
Uri string URI for connecting to the Kafka service.
- Service
Username string Username used for connecting to the Kafka service, if applicable.
- State string
Service state.
- Termination
Protection bool prevents the service from being deleted. It is recommended to set this to
true
for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- cloud
Name string defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (
aws
,azure
,do
google
,upcloud
, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider’s own support articles, like here for Google and here for AWS.- components
Kafka
Component[] Service component information objects
- default
Acl boolean Create default wildcard Kafka ACL
- kafka
Kafka
Kafka Enable kafka
- kafka
User KafkaConfig Kafka User Config defines Kafka specific additional configuration options. The following configuration options available:
- maintenance
Window stringDow day of week when maintenance operations should be performed. On monday, tuesday, wednesday, etc.
- maintenance
Window stringTime time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- plan string
defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are
hobbyist
,startup-x
,business-x
andpremium-x
wherex
is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The exact options can be seen from the Aiven web console’s Create Service dialog.- project string
identifies the project the service belongs to. To set up proper dependency between the project and the service, refer to the project as shown in the above example. Project cannot be changed later without destroying and re-creating the service.
- project
Vpc stringId optionally specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference as shown above to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- service
Host string Kafka hostname.
- service
Integrations KafkaService Integration[] Service integrations to specify when creating a service. Not applied after initial service creation
- service
Name string specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- service
Password string Password used for connecting to the Kafka service, if applicable.
- service
Port number Kafka port.
- service
Type string Aiven internal service type code
- service
Uri string URI for connecting to the Kafka service.
- service
Username string Username used for connecting to the Kafka service, if applicable.
- state string
Service state.
- termination
Protection boolean prevents the service from being deleted. It is recommended to set this to
true
for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- cloud_
name str defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (
aws
,azure
,do
google
,upcloud
, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider’s own support articles, like here for Google and here for AWS.- components
Sequence[Kafka
Component Args] Service component information objects
- default_
acl bool Create default wildcard Kafka ACL
- kafka
Kafka
Kafka Args Enable kafka
- kafka_
user_ Kafkaconfig Kafka User Config Args defines Kafka specific additional configuration options. The following configuration options available:
- maintenance_
window_ strdow day of week when maintenance operations should be performed. On monday, tuesday, wednesday, etc.
- maintenance_
window_ strtime time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- plan str
defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are
hobbyist
,startup-x
,business-x
andpremium-x
wherex
is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The exact options can be seen from the Aiven web console’s Create Service dialog.- project str
identifies the project the service belongs to. To set up proper dependency between the project and the service, refer to the project as shown in the above example. Project cannot be changed later without destroying and re-creating the service.
- project_
vpc_ strid optionally specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference as shown above to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- service_
host str Kafka hostname.
- service_
integrations Sequence[KafkaService Integration Args] Service integrations to specify when creating a service. Not applied after initial service creation
- service_
name str specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- service_
password str Password used for connecting to the Kafka service, if applicable.
- service_
port int Kafka port.
- service_
type str Aiven internal service type code
- service_
uri str URI for connecting to the Kafka service.
- service_
username str Username used for connecting to the Kafka service, if applicable.
- state str
Service state.
- termination_
protection bool prevents the service from being deleted. It is recommended to set this to
true
for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
Supporting Types
KafkaComponent
KafkaKafka
- Access
Cert string The Kafka client certificate
- Access
Key string The Kafka client certificate key
- Connect
Uri string The Kafka Connect URI, if any
- Rest
Uri string The Kafka REST URI, if any
- Schema
Registry stringUri The Schema Registry URI, if any
- Access
Cert string The Kafka client certificate
- Access
Key string The Kafka client certificate key
- Connect
Uri string The Kafka Connect URI, if any
- Rest
Uri string The Kafka REST URI, if any
- Schema
Registry stringUri The Schema Registry URI, if any
- access
Cert string The Kafka client certificate
- access
Key string The Kafka client certificate key
- connect
Uri string The Kafka Connect URI, if any
- rest
Uri string The Kafka REST URI, if any
- schema
Registry stringUri The Schema Registry URI, if any
- access_
cert str The Kafka client certificate
- access_
key str The Kafka client certificate key
- connect_
uri str The Kafka Connect URI, if any
- rest_
uri str The Kafka REST URI, if any
- schema_
registry_ struri The Schema Registry URI, if any
KafkaKafkaUserConfig
- Custom
Domain string Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.
- Ip
Filters List<string> Allow incoming connections from CIDR address block, e.g. ‘10.20.0.0/16’.
- Kafka
Kafka
Kafka User Config Kafka Args Enable kafka
- Kafka
Authentication KafkaMethods Kafka User Config Kafka Authentication Methods Args Kafka authentication methods
- Kafka
Connect string Enable kafka_connect
- Kafka
Connect KafkaConfig Kafka User Config Kafka Connect Config Args Kafka Connect configuration values
- Kafka
Rest string Enable kafka_rest
- Kafka
Rest KafkaConfig Kafka User Config Kafka Rest Config Args Kafka-REST configuration
- Kafka
Version string Kafka major version
- Private
Access KafkaKafka User Config Private Access Args Allow access to selected service ports from private networks
- Privatelink
Access KafkaKafka User Config Privatelink Access Args Allow access to selected service components through Privatelink
- Public
Access KafkaKafka User Config Public Access Args Allow access to selected service ports from the public Internet
- Schema
Registry string Enable Schema-Registry service
- Schema
Registry KafkaConfig Kafka User Config Schema Registry Config Args Schema Registry configuration
- Custom
Domain string Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.
- Ip
Filters []string Allow incoming connections from CIDR address block, e.g. ‘10.20.0.0/16’.
- Kafka
Kafka
Kafka User Config Kafka Enable kafka
- Kafka
Authentication KafkaMethods Kafka User Config Kafka Authentication Methods Kafka authentication methods
- Kafka
Connect string Enable kafka_connect
- Kafka
Connect KafkaConfig Kafka User Config Kafka Connect Config Kafka Connect configuration values
- Kafka
Rest string Enable kafka_rest
- Kafka
Rest KafkaConfig Kafka User Config Kafka Rest Config Kafka-REST configuration
- Kafka
Version string Kafka major version
- Private
Access KafkaKafka User Config Private Access Allow access to selected service ports from private networks
- Privatelink
Access KafkaKafka User Config Privatelink Access Allow access to selected service components through Privatelink
- Public
Access KafkaKafka User Config Public Access Allow access to selected service ports from the public Internet
- Schema
Registry string Enable Schema-Registry service
- Schema
Registry KafkaConfig Kafka User Config Schema Registry Config Schema Registry configuration
- custom
Domain string Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.
- ip
Filters string[] Allow incoming connections from CIDR address block, e.g. ‘10.20.0.0/16’.
- kafka
Kafka
Kafka User Config Kafka Enable kafka
- kafka
Authentication KafkaMethods Kafka User Config Kafka Authentication Methods Kafka authentication methods
- kafka
Connect string Enable kafka_connect
- kafka
Connect KafkaConfig Kafka User Config Kafka Connect Config Kafka Connect configuration values
- kafka
Rest string Enable kafka_rest
- kafka
Rest KafkaConfig Kafka User Config Kafka Rest Config Kafka-REST configuration
- kafka
Version string Kafka major version
- private
Access KafkaKafka User Config Private Access Allow access to selected service ports from private networks
- privatelink
Access KafkaKafka User Config Privatelink Access Allow access to selected service components through Privatelink
- public
Access KafkaKafka User Config Public Access Allow access to selected service ports from the public Internet
- schema
Registry string Enable Schema-Registry service
- schema
Registry KafkaConfig Kafka User Config Schema Registry Config Schema Registry configuration
- custom_
domain str Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.
- ip_
filters Sequence[str] Allow incoming connections from CIDR address block, e.g. ‘10.20.0.0/16’.
- kafka
Kafka
Kafka User Config Kafka Args Enable kafka
- kafka_
authentication_ Kafkamethods Kafka User Config Kafka Authentication Methods Args Kafka authentication methods
- kafka_
connect str Enable kafka_connect
- kafka_
connect_ Kafkaconfig Kafka User Config Kafka Connect Config Args Kafka Connect configuration values
- kafka_
rest str Enable kafka_rest
- kafka_
rest_ Kafkaconfig Kafka User Config Kafka Rest Config Args Kafka-REST configuration
- kafka_
version str Kafka major version
- private_
access KafkaKafka User Config Private Access Args Allow access to selected service ports from private networks
- privatelink_
access KafkaKafka User Config Privatelink Access Args Allow access to selected service components through Privatelink
- public_
access KafkaKafka User Config Public Access Args Allow access to selected service ports from the public Internet
- schema_
registry str Enable Schema-Registry service
- schema_
registry_ Kafkaconfig Kafka User Config Schema Registry Config Args Schema Registry configuration
KafkaKafkaUserConfigKafka
- Auto
Create stringTopics Enable Enable auto creation of topics
- Compression
Type string Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (‘gzip’, ‘snappy’, ‘lz4’, ‘zstd’). It additionally accepts ‘uncompressed’ which is equivalent to no compression; and ‘producer’ which means retain the original compression codec set by the producer.
- Connections
Max stringIdle Ms Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.
- Default
Replication stringFactor Replication factor for autocreated topics
- Group
Max stringSession Timeout Ms The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
- Group
Min stringSession Timeout Ms The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
- Log
Cleaner stringDelete Retention Ms - Log
Cleaner stringMax Compaction Lag Ms The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted
- Log
Cleaner stringMin Cleanable Ratio Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.
- Log
Cleaner stringMin Compaction Lag Ms The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
- Log
Cleanup stringPolicy The default cleanup policy for segments beyond the retention window.
- Log
Flush stringInterval Messages The number of messages accumulated on a log partition before messages are flushed to disk.
- Log
Flush stringInterval Ms The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
- Log
Index stringInterval Bytes The interval with which Kafka adds an entry to the offset index.
- Log
Index stringSize Max Bytes The maximum size in bytes of the offset index.
- Log
Message stringDownconversion Enable This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
- Log
Message stringTimestamp Difference Max Ms The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message
- Log
Message stringTimestamp Type Define whether the timestamp in the message is message create time or log append time.
- Log
Preallocate string Should pre allocate file when create new segment?
- Log
Retention stringBytes The maximum size of the log before deleting messages
- Log
Retention stringHours The number of hours to keep a log file before deleting it.
- Log
Retention stringMs The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
- Log
Roll stringJitter Ms The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
- Log
Roll stringMs The maximum time before a new log segment is rolled out (in milliseconds).
- Log
Segment stringBytes The maximum size of a single log file
- Log
Segment stringDelete Delay Ms The amount of time to wait before deleting a file from the filesystem.
- Max
Connections stringPer Ip The maximum number of connections allowed from each ip address (defaults to 2147483647).
- Max
Incremental stringFetch Session Cache Slots The maximum number of incremental fetch sessions that the broker will maintain.
- Message
Max stringBytes The maximum size of message that the server can receive.
- Min
Insync stringReplicas When a producer sets acks to ‘all’ (or ‘-1’), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.
- Num
Partitions string Number of partitions for autocreated topics
- Offsets
Retention stringMinutes Log retention window in minutes for offsets topic.
- Producer
Purgatory stringPurge Interval Requests The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).
- Replica
Fetch stringMax Bytes The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
- Replica
Fetch stringResponse Max Bytes Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
- Socket
Request stringMax Bytes The maximum number of bytes in a socket request (defaults to 104857600).
- Transaction
Remove stringExpired Transaction Cleanup Interval Ms The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).
- Transaction
State stringLog Segment Bytes The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).
- Auto
Create stringTopics Enable Enable auto creation of topics
- Compression
Type string Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (‘gzip’, ‘snappy’, ‘lz4’, ‘zstd’). It additionally accepts ‘uncompressed’ which is equivalent to no compression; and ‘producer’ which means retain the original compression codec set by the producer.
- Connections
Max stringIdle Ms Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.
- Default
Replication stringFactor Replication factor for autocreated topics
- Group
Max stringSession Timeout Ms The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
- Group
Min stringSession Timeout Ms The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
- Log
Cleaner stringDelete Retention Ms - Log
Cleaner stringMax Compaction Lag Ms The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted
- Log
Cleaner stringMin Cleanable Ratio Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.
- Log
Cleaner stringMin Compaction Lag Ms The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
- Log
Cleanup stringPolicy The default cleanup policy for segments beyond the retention window.
- Log
Flush stringInterval Messages The number of messages accumulated on a log partition before messages are flushed to disk.
- Log
Flush stringInterval Ms The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
- Log
Index stringInterval Bytes The interval with which Kafka adds an entry to the offset index.
- Log
Index stringSize Max Bytes The maximum size in bytes of the offset index.
- Log
Message stringDownconversion Enable This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
- Log
Message stringTimestamp Difference Max Ms The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message
- Log
Message stringTimestamp Type Define whether the timestamp in the message is message create time or log append time.
- Log
Preallocate string Should pre allocate file when create new segment?
- Log
Retention stringBytes The maximum size of the log before deleting messages
- Log
Retention stringHours The number of hours to keep a log file before deleting it.
- Log
Retention stringMs The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
- Log
Roll stringJitter Ms The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
- Log
Roll stringMs The maximum time before a new log segment is rolled out (in milliseconds).
- Log
Segment stringBytes The maximum size of a single log file
- Log
Segment stringDelete Delay Ms The amount of time to wait before deleting a file from the filesystem.
- Max
Connections stringPer Ip The maximum number of connections allowed from each ip address (defaults to 2147483647).
- Max
Incremental stringFetch Session Cache Slots The maximum number of incremental fetch sessions that the broker will maintain.
- Message
Max stringBytes The maximum size of message that the server can receive.
- Min
Insync stringReplicas When a producer sets acks to ‘all’ (or ‘-1’), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.
- Num
Partitions string Number of partitions for autocreated topics
- Offsets
Retention stringMinutes Log retention window in minutes for offsets topic.
- Producer
Purgatory stringPurge Interval Requests The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).
- Replica
Fetch stringMax Bytes The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
- Replica
Fetch stringResponse Max Bytes Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
- Socket
Request stringMax Bytes The maximum number of bytes in a socket request (defaults to 104857600).
- Transaction
Remove stringExpired Transaction Cleanup Interval Ms The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).
- Transaction
State stringLog Segment Bytes The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).
- auto
Create stringTopics Enable Enable auto creation of topics
- compression
Type string Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (‘gzip’, ‘snappy’, ‘lz4’, ‘zstd’). It additionally accepts ‘uncompressed’ which is equivalent to no compression; and ‘producer’ which means retain the original compression codec set by the producer.
- connections
Max stringIdle Ms Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.
- default
Replication stringFactor Replication factor for autocreated topics
- group
Max stringSession Timeout Ms The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
- group
Min stringSession Timeout Ms The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
- log
Cleaner stringDelete Retention Ms - log
Cleaner stringMax Compaction Lag Ms The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted
- log
Cleaner stringMin Cleanable Ratio Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.
- log
Cleaner stringMin Compaction Lag Ms The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
- log
Cleanup stringPolicy The default cleanup policy for segments beyond the retention window.
- log
Flush stringInterval Messages The number of messages accumulated on a log partition before messages are flushed to disk.
- log
Flush stringInterval Ms The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
- log
Index stringInterval Bytes The interval with which Kafka adds an entry to the offset index.
- log
Index stringSize Max Bytes The maximum size in bytes of the offset index.
- log
Message stringDownconversion Enable This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
- log
Message stringTimestamp Difference Max Ms The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message
- log
Message stringTimestamp Type Define whether the timestamp in the message is message create time or log append time.
- log
Preallocate string Should pre allocate file when create new segment?
- log
Retention stringBytes The maximum size of the log before deleting messages
- log
Retention stringHours The number of hours to keep a log file before deleting it.
- log
Retention stringMs The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
- log
Roll stringJitter Ms The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
- log
Roll stringMs The maximum time before a new log segment is rolled out (in milliseconds).
- log
Segment stringBytes The maximum size of a single log file
- log
Segment stringDelete Delay Ms The amount of time to wait before deleting a file from the filesystem.
- max
Connections stringPer Ip The maximum number of connections allowed from each ip address (defaults to 2147483647).
- max
Incremental stringFetch Session Cache Slots The maximum number of incremental fetch sessions that the broker will maintain.
- message
Max stringBytes The maximum size of message that the server can receive.
- min
Insync stringReplicas When a producer sets acks to ‘all’ (or ‘-1’), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.
- num
Partitions string Number of partitions for autocreated topics
- offsets
Retention stringMinutes Log retention window in minutes for offsets topic.
- producer
Purgatory stringPurge Interval Requests The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).
- replica
Fetch stringMax Bytes The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
- replica
Fetch stringResponse Max Bytes Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
- socket
Request stringMax Bytes The maximum number of bytes in a socket request (defaults to 104857600).
- transaction
Remove stringExpired Transaction Cleanup Interval Ms The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).
- transaction
State stringLog Segment Bytes The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).
- auto_
create_ strtopics_ enable Enable auto creation of topics
- compression_
type str Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (‘gzip’, ‘snappy’, ‘lz4’, ‘zstd’). It additionally accepts ‘uncompressed’ which is equivalent to no compression; and ‘producer’ which means retain the original compression codec set by the producer.
- connections_
max_ stridle_ ms Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.
- default_
replication_ strfactor Replication factor for autocreated topics
- group_
max_ strsession_ timeout_ ms The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
- group_
min_ strsession_ timeout_ ms The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
- log_
cleaner_ strdelete_ retention_ ms - log_
cleaner_ strmax_ compaction_ lag_ ms The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted
- log_
cleaner_ strmin_ cleanable_ ratio Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.
- log_
cleaner_ strmin_ compaction_ lag_ ms The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
- log_
cleanup_ strpolicy The default cleanup policy for segments beyond the retention window.
- log_
flush_ strinterval_ messages The number of messages accumulated on a log partition before messages are flushed to disk.
- log_
flush_ strinterval_ ms The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
- log_
index_ strinterval_ bytes The interval with which Kafka adds an entry to the offset index.
- log_
index_ strsize_ max_ bytes The maximum size in bytes of the offset index.
- log_
message_ strdownconversion_ enable This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
- log_
message_ strtimestamp_ difference_ max_ ms The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message
- log_
message_ strtimestamp_ type Define whether the timestamp in the message is message create time or log append time.
- log_
preallocate str Should pre allocate file when create new segment?
- log_
retention_ strbytes The maximum size of the log before deleting messages
- log_
retention_ strhours The number of hours to keep a log file before deleting it.
- log_
retention_ strms The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
- log_
roll_ strjitter_ ms The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
- log_
roll_ strms The maximum time before a new log segment is rolled out (in milliseconds).
- log_
segment_ strbytes The maximum size of a single log file
- log_
segment_ strdelete_ delay_ ms The amount of time to wait before deleting a file from the filesystem.
- max_
connections_ strper_ ip The maximum number of connections allowed from each ip address (defaults to 2147483647).
- max_
incremental_ strfetch_ session_ cache_ slots The maximum number of incremental fetch sessions that the broker will maintain.
- message_
max_ strbytes The maximum size of message that the server can receive.
- min_
insync_ strreplicas When a producer sets acks to ‘all’ (or ‘-1’), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.
- num_
partitions str Number of partitions for autocreated topics
- offsets_
retention_ strminutes Log retention window in minutes for offsets topic.
- producer_
purgatory_ strpurge_ interval_ requests The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).
- replica_
fetch_ strmax_ bytes The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
- replica_
fetch_ strresponse_ max_ bytes Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
- socket_
request_ strmax_ bytes The maximum number of bytes in a socket request (defaults to 104857600).
- transaction_
remove_ strexpired_ transaction_ cleanup_ interval_ ms The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).
- transaction_
state_ strlog_ segment_ bytes The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).
KafkaKafkaUserConfigKafkaAuthenticationMethods
- Certificate string
Enable certificate/SSL authentication
- Sasl string
Enable SASL authentication
- Certificate string
Enable certificate/SSL authentication
- Sasl string
Enable SASL authentication
- certificate string
Enable certificate/SSL authentication
- sasl string
Enable SASL authentication
- certificate str
Enable certificate/SSL authentication
- sasl str
Enable SASL authentication
KafkaKafkaUserConfigKafkaConnectConfig
- Connector
Client stringConfig Override Policy Defines what client configurations can be overridden by the connector. Default is None
- Consumer
Auto stringOffset Reset What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
- Consumer
Fetch stringMax Bytes Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
- Consumer
Isolation stringLevel Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
- Consumer
Max stringPartition Fetch Bytes Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
- Consumer
Max stringPoll Interval Ms The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
- Consumer
Max stringPoll Records The maximum number of records returned in a single call to poll() (defaults to 500).
- Offset
Flush stringInterval Ms The interval at which to try committing offsets for tasks (defaults to 60000).
- Offset
Flush stringTimeout Ms Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
- Producer
Max stringRequest Size This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
- Session
Timeout stringMs The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
- Connector
Client stringConfig Override Policy Defines what client configurations can be overridden by the connector. Default is None
- Consumer
Auto stringOffset Reset What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
- Consumer
Fetch stringMax Bytes Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
- Consumer
Isolation stringLevel Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
- Consumer
Max stringPartition Fetch Bytes Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
- Consumer
Max stringPoll Interval Ms The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
- Consumer
Max stringPoll Records The maximum number of records returned in a single call to poll() (defaults to 500).
- Offset
Flush stringInterval Ms The interval at which to try committing offsets for tasks (defaults to 60000).
- Offset
Flush stringTimeout Ms Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
- Producer
Max stringRequest Size This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
- Session
Timeout stringMs The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
- connector
Client stringConfig Override Policy Defines what client configurations can be overridden by the connector. Default is None
- consumer
Auto stringOffset Reset What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
- consumer
Fetch stringMax Bytes Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
- consumer
Isolation stringLevel Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
- consumer
Max stringPartition Fetch Bytes Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
- consumer
Max stringPoll Interval Ms The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
- consumer
Max stringPoll Records The maximum number of records returned in a single call to poll() (defaults to 500).
- offset
Flush stringInterval Ms The interval at which to try committing offsets for tasks (defaults to 60000).
- offset
Flush stringTimeout Ms Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
- producer
Max stringRequest Size This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
- session
Timeout stringMs The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
- connector_
client_ strconfig_ override_ policy Defines what client configurations can be overridden by the connector. Default is None
- consumer_
auto_ stroffset_ reset What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
- consumer_
fetch_ strmax_ bytes Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
- consumer_
isolation_ strlevel Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
- consumer_
max_ strpartition_ fetch_ bytes Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
- consumer_
max_ strpoll_ interval_ ms The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
- consumer_
max_ strpoll_ records The maximum number of records returned in a single call to poll() (defaults to 500).
- offset_
flush_ strinterval_ ms The interval at which to try committing offsets for tasks (defaults to 60000).
- offset_
flush_ strtimeout_ ms Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
- producer_
max_ strrequest_ size This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
- session_
timeout_ strms The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
KafkaKafkaUserConfigKafkaRestConfig
- Consumer
Enable stringAuto Commit If true the consumer’s offset will be periodically committed to Kafka in the background
- Consumer
Request stringMax Bytes Maximum number of bytes in unencoded message keys and values by a single request
- Consumer
Request stringTimeout Ms The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached
- Producer
Acks string The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to ‘all’ or ‘-1’, the leader will wait for the full set of in-sync replicas to acknowledge the record.
- Producer
Linger stringMs Wait for up to the given delay to allow batching records together
- Simpleconsumer
Pool stringSize Max Maximum number of SimpleConsumers that can be instantiated per broker.
- Consumer
Enable stringAuto Commit If true the consumer’s offset will be periodically committed to Kafka in the background
- Consumer
Request stringMax Bytes Maximum number of bytes in unencoded message keys and values by a single request
- Consumer
Request stringTimeout Ms The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached
- Producer
Acks string The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to ‘all’ or ‘-1’, the leader will wait for the full set of in-sync replicas to acknowledge the record.
- Producer
Linger stringMs Wait for up to the given delay to allow batching records together
- Simpleconsumer
Pool stringSize Max Maximum number of SimpleConsumers that can be instantiated per broker.
- consumer
Enable stringAuto Commit If true the consumer’s offset will be periodically committed to Kafka in the background
- consumer
Request stringMax Bytes Maximum number of bytes in unencoded message keys and values by a single request
- consumer
Request stringTimeout Ms The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached
- producer
Acks string The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to ‘all’ or ‘-1’, the leader will wait for the full set of in-sync replicas to acknowledge the record.
- producer
Linger stringMs Wait for up to the given delay to allow batching records together
- simpleconsumer
Pool stringSize Max Maximum number of SimpleConsumers that can be instantiated per broker.
- consumer_
enable_ strauto_ commit If true the consumer’s offset will be periodically committed to Kafka in the background
- consumer_
request_ strmax_ bytes Maximum number of bytes in unencoded message keys and values by a single request
- consumer_
request_ strtimeout_ ms The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached
- producer_
acks str The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to ‘all’ or ‘-1’, the leader will wait for the full set of in-sync replicas to acknowledge the record.
- producer_
linger_ strms Wait for up to the given delay to allow batching records together
- simpleconsumer_
pool_ strsize_ max Maximum number of SimpleConsumers that can be instantiated per broker.
KafkaKafkaUserConfigPrivateAccess
- Prometheus string
Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network
- Prometheus string
Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network
- prometheus string
Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network
- prometheus str
Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network
KafkaKafkaUserConfigPrivatelinkAccess
- Kafka string
Enable kafka
- Kafka
Connect string Enable kafka_connect
- Kafka
Rest string Enable kafka_rest
- Schema
Registry string Enable Schema-Registry service
- Kafka string
Enable kafka
- Kafka
Connect string Enable kafka_connect
- Kafka
Rest string Enable kafka_rest
- Schema
Registry string Enable Schema-Registry service
- kafka string
Enable kafka
- kafka
Connect string Enable kafka_connect
- kafka
Rest string Enable kafka_rest
- schema
Registry string Enable Schema-Registry service
- kafka str
Enable kafka
- kafka_
connect str Enable kafka_connect
- kafka_
rest str Enable kafka_rest
- schema_
registry str Enable Schema-Registry service
KafkaKafkaUserConfigPublicAccess
- Kafka string
Enable kafka
- Kafka
Connect string Enable kafka_connect
- Kafka
Rest string Enable kafka_rest
- Prometheus string
Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network
- Schema
Registry string Enable Schema-Registry service
- Kafka string
Enable kafka
- Kafka
Connect string Enable kafka_connect
- Kafka
Rest string Enable kafka_rest
- Prometheus string
Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network
- Schema
Registry string Enable Schema-Registry service
- kafka string
Enable kafka
- kafka
Connect string Enable kafka_connect
- kafka
Rest string Enable kafka_rest
- prometheus string
Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network
- schema
Registry string Enable Schema-Registry service
- kafka str
Enable kafka
- kafka_
connect str Enable kafka_connect
- kafka_
rest str Enable kafka_rest
- prometheus str
Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network
- schema_
registry str Enable Schema-Registry service
KafkaKafkaUserConfigSchemaRegistryConfig
- Leader
Eligibility string If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to ‘true’.
- Topic
Name string The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It’s only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to ‘_schemas’.
- Leader
Eligibility string If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to ‘true’.
- Topic
Name string The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It’s only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to ‘_schemas’.
- leader
Eligibility string If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to ‘true’.
- topic
Name string The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It’s only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to ‘_schemas’.
- leader_
eligibility str If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to ‘true’.
- topic_
name str The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It’s only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to ‘_schemas’.
KafkaServiceIntegration
- Integration
Type string - Source
Service stringName
- Integration
Type string - Source
Service stringName
- integration
Type string - source
Service stringName
Package Details
- Repository
- https://github.com/pulumi/pulumi-aiven
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
aiven
Terraform Provider.