Aiven v6.44.0 published on Friday, Oct 10, 2025 by Pulumi
aiven.getServiceIntegration
Gets information about an Aiven service integration.
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as aiven from "@pulumi/aiven";
const exampleIntegration = aiven.getServiceIntegration({
    project: exampleProject.project,
    destinationServiceName: exampleThanos.serviceName,
    integrationType: "metrics",
    sourceServiceName: exampleKafka.serviceName,
});
import pulumi
import pulumi_aiven as aiven
example_integration = aiven.get_service_integration(project=example_project["project"],
    destination_service_name=example_thanos["serviceName"],
    integration_type="metrics",
    source_service_name=example_kafka["serviceName"])
package main
import (
	"github.com/pulumi/pulumi-aiven/sdk/v6/go/aiven"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := aiven.LookupServiceIntegration(ctx, &aiven.LookupServiceIntegrationArgs{
			Project:                exampleProject.Project,
			DestinationServiceName: exampleThanos.ServiceName,
			IntegrationType:        "metrics",
			SourceServiceName:      exampleKafka.ServiceName,
		}, nil)
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aiven = Pulumi.Aiven;
return await Deployment.RunAsync(() => 
{
    var exampleIntegration = Aiven.GetServiceIntegration.Invoke(new()
    {
        Project = exampleProject.Project,
        DestinationServiceName = exampleThanos.ServiceName,
        IntegrationType = "metrics",
        SourceServiceName = exampleKafka.ServiceName,
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aiven.AivenFunctions;
import com.pulumi.aiven.inputs.GetServiceIntegrationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        final var exampleIntegration = AivenFunctions.getServiceIntegration(GetServiceIntegrationArgs.builder()
            .project(exampleProject.project())
            .destinationServiceName(exampleThanos.serviceName())
            .integrationType("metrics")
            .sourceServiceName(exampleKafka.serviceName())
            .build());
    }
}
variables:
  exampleIntegration:
    fn::invoke:
      function: aiven:getServiceIntegration
      arguments:
        project: ${exampleProject.project}
        destinationServiceName: ${exampleThanos.serviceName}
        integrationType: metrics
        sourceServiceName: ${exampleKafka.serviceName}
Using getServiceIntegration
Two invocation forms are available. The direct form accepts plain arguments and either blocks until the result value is available, or returns a Promise-wrapped result. The output form accepts Input-wrapped arguments and returns an Output-wrapped result.
function getServiceIntegration(args: GetServiceIntegrationArgs, opts?: InvokeOptions): Promise<GetServiceIntegrationResult>
function getServiceIntegrationOutput(args: GetServiceIntegrationOutputArgs, opts?: InvokeOptions): Output<GetServiceIntegrationResult>def get_service_integration(destination_service_name: Optional[str] = None,
                            integration_type: Optional[str] = None,
                            project: Optional[str] = None,
                            source_service_name: Optional[str] = None,
                            opts: Optional[InvokeOptions] = None) -> GetServiceIntegrationResult
def get_service_integration_output(destination_service_name: Optional[pulumi.Input[str]] = None,
                            integration_type: Optional[pulumi.Input[str]] = None,
                            project: Optional[pulumi.Input[str]] = None,
                            source_service_name: Optional[pulumi.Input[str]] = None,
                            opts: Optional[InvokeOptions] = None) -> Output[GetServiceIntegrationResult]func LookupServiceIntegration(ctx *Context, args *LookupServiceIntegrationArgs, opts ...InvokeOption) (*LookupServiceIntegrationResult, error)
func LookupServiceIntegrationOutput(ctx *Context, args *LookupServiceIntegrationOutputArgs, opts ...InvokeOption) LookupServiceIntegrationResultOutput> Note: This function is named LookupServiceIntegration in the Go SDK.
public static class GetServiceIntegration 
{
    public static Task<GetServiceIntegrationResult> InvokeAsync(GetServiceIntegrationArgs args, InvokeOptions? opts = null)
    public static Output<GetServiceIntegrationResult> Invoke(GetServiceIntegrationInvokeArgs args, InvokeOptions? opts = null)
}public static CompletableFuture<GetServiceIntegrationResult> getServiceIntegration(GetServiceIntegrationArgs args, InvokeOptions options)
public static Output<GetServiceIntegrationResult> getServiceIntegration(GetServiceIntegrationArgs args, InvokeOptions options)
fn::invoke:
  function: aiven:index/getServiceIntegration:getServiceIntegration
  arguments:
    # arguments dictionaryThe following arguments are supported:
- DestinationService stringName 
- Destination service for the integration.
- IntegrationType string
- Type of the service integration. The possible values are alertmanager,autoscaler,caching,cassandra_cross_service_cluster,clickhouse_credentials,clickhouse_kafka,clickhouse_postgresql,dashboard,datadog,datasource,disaster_recovery,external_aws_cloudwatch_logs,external_aws_cloudwatch_metrics,external_elasticsearch_logs,external_google_cloud_logging,external_opensearch_logs,flink,flink_external_bigquery,flink_external_kafka,flink_external_postgresql,internal_connectivity,jolokia,kafka_connect,kafka_connect_postgresql,kafka_logs,kafka_mirrormaker,logs,m3aggregator,m3coordinator,metrics,opensearch_cross_cluster_replication,opensearch_cross_cluster_search,prometheus,read_replica,rsyslog,schema_registry_proxy,stresstester,thanos_distributed_query,thanos_migrate,thanoscompactor,thanosquery,thanosruler,thanosstore,vectorandvmalert.
- Project string
- Project the integration belongs to.
- SourceService stringName 
- Source service for the integration (if any)
- DestinationService stringName 
- Destination service for the integration.
- IntegrationType string
- Type of the service integration. The possible values are alertmanager,autoscaler,caching,cassandra_cross_service_cluster,clickhouse_credentials,clickhouse_kafka,clickhouse_postgresql,dashboard,datadog,datasource,disaster_recovery,external_aws_cloudwatch_logs,external_aws_cloudwatch_metrics,external_elasticsearch_logs,external_google_cloud_logging,external_opensearch_logs,flink,flink_external_bigquery,flink_external_kafka,flink_external_postgresql,internal_connectivity,jolokia,kafka_connect,kafka_connect_postgresql,kafka_logs,kafka_mirrormaker,logs,m3aggregator,m3coordinator,metrics,opensearch_cross_cluster_replication,opensearch_cross_cluster_search,prometheus,read_replica,rsyslog,schema_registry_proxy,stresstester,thanos_distributed_query,thanos_migrate,thanoscompactor,thanosquery,thanosruler,thanosstore,vectorandvmalert.
- Project string
- Project the integration belongs to.
- SourceService stringName 
- Source service for the integration (if any)
- destinationService StringName 
- Destination service for the integration.
- integrationType String
- Type of the service integration. The possible values are alertmanager,autoscaler,caching,cassandra_cross_service_cluster,clickhouse_credentials,clickhouse_kafka,clickhouse_postgresql,dashboard,datadog,datasource,disaster_recovery,external_aws_cloudwatch_logs,external_aws_cloudwatch_metrics,external_elasticsearch_logs,external_google_cloud_logging,external_opensearch_logs,flink,flink_external_bigquery,flink_external_kafka,flink_external_postgresql,internal_connectivity,jolokia,kafka_connect,kafka_connect_postgresql,kafka_logs,kafka_mirrormaker,logs,m3aggregator,m3coordinator,metrics,opensearch_cross_cluster_replication,opensearch_cross_cluster_search,prometheus,read_replica,rsyslog,schema_registry_proxy,stresstester,thanos_distributed_query,thanos_migrate,thanoscompactor,thanosquery,thanosruler,thanosstore,vectorandvmalert.
- project String
- Project the integration belongs to.
- sourceService StringName 
- Source service for the integration (if any)
- destinationService stringName 
- Destination service for the integration.
- integrationType string
- Type of the service integration. The possible values are alertmanager,autoscaler,caching,cassandra_cross_service_cluster,clickhouse_credentials,clickhouse_kafka,clickhouse_postgresql,dashboard,datadog,datasource,disaster_recovery,external_aws_cloudwatch_logs,external_aws_cloudwatch_metrics,external_elasticsearch_logs,external_google_cloud_logging,external_opensearch_logs,flink,flink_external_bigquery,flink_external_kafka,flink_external_postgresql,internal_connectivity,jolokia,kafka_connect,kafka_connect_postgresql,kafka_logs,kafka_mirrormaker,logs,m3aggregator,m3coordinator,metrics,opensearch_cross_cluster_replication,opensearch_cross_cluster_search,prometheus,read_replica,rsyslog,schema_registry_proxy,stresstester,thanos_distributed_query,thanos_migrate,thanoscompactor,thanosquery,thanosruler,thanosstore,vectorandvmalert.
- project string
- Project the integration belongs to.
- sourceService stringName 
- Source service for the integration (if any)
- destination_service_ strname 
- Destination service for the integration.
- integration_type str
- Type of the service integration. The possible values are alertmanager,autoscaler,caching,cassandra_cross_service_cluster,clickhouse_credentials,clickhouse_kafka,clickhouse_postgresql,dashboard,datadog,datasource,disaster_recovery,external_aws_cloudwatch_logs,external_aws_cloudwatch_metrics,external_elasticsearch_logs,external_google_cloud_logging,external_opensearch_logs,flink,flink_external_bigquery,flink_external_kafka,flink_external_postgresql,internal_connectivity,jolokia,kafka_connect,kafka_connect_postgresql,kafka_logs,kafka_mirrormaker,logs,m3aggregator,m3coordinator,metrics,opensearch_cross_cluster_replication,opensearch_cross_cluster_search,prometheus,read_replica,rsyslog,schema_registry_proxy,stresstester,thanos_distributed_query,thanos_migrate,thanoscompactor,thanosquery,thanosruler,thanosstore,vectorandvmalert.
- project str
- Project the integration belongs to.
- source_service_ strname 
- Source service for the integration (if any)
- destinationService StringName 
- Destination service for the integration.
- integrationType String
- Type of the service integration. The possible values are alertmanager,autoscaler,caching,cassandra_cross_service_cluster,clickhouse_credentials,clickhouse_kafka,clickhouse_postgresql,dashboard,datadog,datasource,disaster_recovery,external_aws_cloudwatch_logs,external_aws_cloudwatch_metrics,external_elasticsearch_logs,external_google_cloud_logging,external_opensearch_logs,flink,flink_external_bigquery,flink_external_kafka,flink_external_postgresql,internal_connectivity,jolokia,kafka_connect,kafka_connect_postgresql,kafka_logs,kafka_mirrormaker,logs,m3aggregator,m3coordinator,metrics,opensearch_cross_cluster_replication,opensearch_cross_cluster_search,prometheus,read_replica,rsyslog,schema_registry_proxy,stresstester,thanos_distributed_query,thanos_migrate,thanoscompactor,thanosquery,thanosruler,thanosstore,vectorandvmalert.
- project String
- Project the integration belongs to.
- sourceService StringName 
- Source service for the integration (if any)
getServiceIntegration Result
The following output properties are available:
- ClickhouseCredentials List<GetUser Configs Service Integration Clickhouse Credentials User Config> 
- ClickhouseCredentials user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- ClickhouseKafka List<GetUser Configs Service Integration Clickhouse Kafka User Config> 
- ClickhouseKafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- ClickhousePostgresql List<GetUser Configs Service Integration Clickhouse Postgresql User Config> 
- ClickhousePostgresql user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- DatadogUser List<GetConfigs Service Integration Datadog User Config> 
- Datadog user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- DestinationEndpoint stringId 
- Destination endpoint for the integration.
- DestinationService stringName 
- Destination service for the integration.
- DestinationService stringProject 
- Destination service project name
- ExternalAws List<GetCloudwatch Logs User Configs Service Integration External Aws Cloudwatch Logs User Config> 
- ExternalAwsCloudwatchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- ExternalAws List<GetCloudwatch Metrics User Configs Service Integration External Aws Cloudwatch Metrics User Config> 
- ExternalAwsCloudwatchMetrics user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- ExternalElasticsearch List<GetLogs User Configs Service Integration External Elasticsearch Logs User Config> 
- ExternalElasticsearchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- ExternalOpensearch List<GetLogs User Configs Service Integration External Opensearch Logs User Config> 
- ExternalOpensearchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- FlinkExternal List<GetPostgresql User Configs Service Integration Flink External Postgresql User Config> 
- FlinkExternalPostgresql user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- Id string
- The provider-assigned unique ID for this managed resource.
- IntegrationId string
- The ID of the Aiven service integration.
- IntegrationType string
- Type of the service integration. The possible values are alertmanager,autoscaler,caching,cassandra_cross_service_cluster,clickhouse_credentials,clickhouse_kafka,clickhouse_postgresql,dashboard,datadog,datasource,disaster_recovery,external_aws_cloudwatch_logs,external_aws_cloudwatch_metrics,external_elasticsearch_logs,external_google_cloud_logging,external_opensearch_logs,flink,flink_external_bigquery,flink_external_kafka,flink_external_postgresql,internal_connectivity,jolokia,kafka_connect,kafka_connect_postgresql,kafka_logs,kafka_mirrormaker,logs,m3aggregator,m3coordinator,metrics,opensearch_cross_cluster_replication,opensearch_cross_cluster_search,prometheus,read_replica,rsyslog,schema_registry_proxy,stresstester,thanos_distributed_query,thanos_migrate,thanoscompactor,thanosquery,thanosruler,thanosstore,vectorandvmalert.
- KafkaConnect List<GetUser Configs Service Integration Kafka Connect User Config> 
- KafkaConnect user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- KafkaLogs List<GetUser Configs Service Integration Kafka Logs User Config> 
- KafkaLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- KafkaMirrormaker List<GetUser Configs Service Integration Kafka Mirrormaker User Config> 
- KafkaMirrormaker user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- LogsUser List<GetConfigs Service Integration Logs User Config> 
- Logs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- MetricsUser List<GetConfigs Service Integration Metrics User Config> 
- Metrics user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- Project string
- Project the integration belongs to.
- PrometheusUser List<GetConfigs Service Integration Prometheus User Config> 
- Prometheus user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- SourceEndpoint stringId 
- Source endpoint for the integration.
- SourceService stringName 
- Source service for the integration (if any)
- SourceService stringProject 
- Source service project name
- ClickhouseCredentials []GetUser Configs Service Integration Clickhouse Credentials User Config 
- ClickhouseCredentials user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- ClickhouseKafka []GetUser Configs Service Integration Clickhouse Kafka User Config 
- ClickhouseKafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- ClickhousePostgresql []GetUser Configs Service Integration Clickhouse Postgresql User Config 
- ClickhousePostgresql user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- DatadogUser []GetConfigs Service Integration Datadog User Config 
- Datadog user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- DestinationEndpoint stringId 
- Destination endpoint for the integration.
- DestinationService stringName 
- Destination service for the integration.
- DestinationService stringProject 
- Destination service project name
- ExternalAws []GetCloudwatch Logs User Configs Service Integration External Aws Cloudwatch Logs User Config 
- ExternalAwsCloudwatchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- ExternalAws []GetCloudwatch Metrics User Configs Service Integration External Aws Cloudwatch Metrics User Config 
- ExternalAwsCloudwatchMetrics user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- ExternalElasticsearch []GetLogs User Configs Service Integration External Elasticsearch Logs User Config 
- ExternalElasticsearchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- ExternalOpensearch []GetLogs User Configs Service Integration External Opensearch Logs User Config 
- ExternalOpensearchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- FlinkExternal []GetPostgresql User Configs Service Integration Flink External Postgresql User Config 
- FlinkExternalPostgresql user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- Id string
- The provider-assigned unique ID for this managed resource.
- IntegrationId string
- The ID of the Aiven service integration.
- IntegrationType string
- Type of the service integration. The possible values are alertmanager,autoscaler,caching,cassandra_cross_service_cluster,clickhouse_credentials,clickhouse_kafka,clickhouse_postgresql,dashboard,datadog,datasource,disaster_recovery,external_aws_cloudwatch_logs,external_aws_cloudwatch_metrics,external_elasticsearch_logs,external_google_cloud_logging,external_opensearch_logs,flink,flink_external_bigquery,flink_external_kafka,flink_external_postgresql,internal_connectivity,jolokia,kafka_connect,kafka_connect_postgresql,kafka_logs,kafka_mirrormaker,logs,m3aggregator,m3coordinator,metrics,opensearch_cross_cluster_replication,opensearch_cross_cluster_search,prometheus,read_replica,rsyslog,schema_registry_proxy,stresstester,thanos_distributed_query,thanos_migrate,thanoscompactor,thanosquery,thanosruler,thanosstore,vectorandvmalert.
- KafkaConnect []GetUser Configs Service Integration Kafka Connect User Config 
- KafkaConnect user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- KafkaLogs []GetUser Configs Service Integration Kafka Logs User Config 
- KafkaLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- KafkaMirrormaker []GetUser Configs Service Integration Kafka Mirrormaker User Config 
- KafkaMirrormaker user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- LogsUser []GetConfigs Service Integration Logs User Config 
- Logs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- MetricsUser []GetConfigs Service Integration Metrics User Config 
- Metrics user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- Project string
- Project the integration belongs to.
- PrometheusUser []GetConfigs Service Integration Prometheus User Config 
- Prometheus user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- SourceEndpoint stringId 
- Source endpoint for the integration.
- SourceService stringName 
- Source service for the integration (if any)
- SourceService stringProject 
- Source service project name
- clickhouseCredentials List<GetUser Configs Service Integration Clickhouse Credentials User Config> 
- ClickhouseCredentials user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- clickhouseKafka List<GetUser Configs Service Integration Clickhouse Kafka User Config> 
- ClickhouseKafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- clickhousePostgresql List<GetUser Configs Service Integration Clickhouse Postgresql User Config> 
- ClickhousePostgresql user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- datadogUser List<GetConfigs Service Integration Datadog User Config> 
- Datadog user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- destinationEndpoint StringId 
- Destination endpoint for the integration.
- destinationService StringName 
- Destination service for the integration.
- destinationService StringProject 
- Destination service project name
- externalAws List<GetCloudwatch Logs User Configs Service Integration External Aws Cloudwatch Logs User Config> 
- ExternalAwsCloudwatchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- externalAws List<GetCloudwatch Metrics User Configs Service Integration External Aws Cloudwatch Metrics User Config> 
- ExternalAwsCloudwatchMetrics user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- externalElasticsearch List<GetLogs User Configs Service Integration External Elasticsearch Logs User Config> 
- ExternalElasticsearchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- externalOpensearch List<GetLogs User Configs Service Integration External Opensearch Logs User Config> 
- ExternalOpensearchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- flinkExternal List<GetPostgresql User Configs Service Integration Flink External Postgresql User Config> 
- FlinkExternalPostgresql user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- id String
- The provider-assigned unique ID for this managed resource.
- integrationId String
- The ID of the Aiven service integration.
- integrationType String
- Type of the service integration. The possible values are alertmanager,autoscaler,caching,cassandra_cross_service_cluster,clickhouse_credentials,clickhouse_kafka,clickhouse_postgresql,dashboard,datadog,datasource,disaster_recovery,external_aws_cloudwatch_logs,external_aws_cloudwatch_metrics,external_elasticsearch_logs,external_google_cloud_logging,external_opensearch_logs,flink,flink_external_bigquery,flink_external_kafka,flink_external_postgresql,internal_connectivity,jolokia,kafka_connect,kafka_connect_postgresql,kafka_logs,kafka_mirrormaker,logs,m3aggregator,m3coordinator,metrics,opensearch_cross_cluster_replication,opensearch_cross_cluster_search,prometheus,read_replica,rsyslog,schema_registry_proxy,stresstester,thanos_distributed_query,thanos_migrate,thanoscompactor,thanosquery,thanosruler,thanosstore,vectorandvmalert.
- kafkaConnect List<GetUser Configs Service Integration Kafka Connect User Config> 
- KafkaConnect user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- kafkaLogs List<GetUser Configs Service Integration Kafka Logs User Config> 
- KafkaLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- kafkaMirrormaker List<GetUser Configs Service Integration Kafka Mirrormaker User Config> 
- KafkaMirrormaker user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- logsUser List<GetConfigs Service Integration Logs User Config> 
- Logs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- metricsUser List<GetConfigs Service Integration Metrics User Config> 
- Metrics user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- project String
- Project the integration belongs to.
- prometheusUser List<GetConfigs Service Integration Prometheus User Config> 
- Prometheus user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- sourceEndpoint StringId 
- Source endpoint for the integration.
- sourceService StringName 
- Source service for the integration (if any)
- sourceService StringProject 
- Source service project name
- clickhouseCredentials GetUser Configs Service Integration Clickhouse Credentials User Config[] 
- ClickhouseCredentials user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- clickhouseKafka GetUser Configs Service Integration Clickhouse Kafka User Config[] 
- ClickhouseKafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- clickhousePostgresql GetUser Configs Service Integration Clickhouse Postgresql User Config[] 
- ClickhousePostgresql user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- datadogUser GetConfigs Service Integration Datadog User Config[] 
- Datadog user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- destinationEndpoint stringId 
- Destination endpoint for the integration.
- destinationService stringName 
- Destination service for the integration.
- destinationService stringProject 
- Destination service project name
- externalAws GetCloudwatch Logs User Configs Service Integration External Aws Cloudwatch Logs User Config[] 
- ExternalAwsCloudwatchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- externalAws GetCloudwatch Metrics User Configs Service Integration External Aws Cloudwatch Metrics User Config[] 
- ExternalAwsCloudwatchMetrics user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- externalElasticsearch GetLogs User Configs Service Integration External Elasticsearch Logs User Config[] 
- ExternalElasticsearchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- externalOpensearch GetLogs User Configs Service Integration External Opensearch Logs User Config[] 
- ExternalOpensearchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- flinkExternal GetPostgresql User Configs Service Integration Flink External Postgresql User Config[] 
- FlinkExternalPostgresql user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- id string
- The provider-assigned unique ID for this managed resource.
- integrationId string
- The ID of the Aiven service integration.
- integrationType string
- Type of the service integration. The possible values are alertmanager,autoscaler,caching,cassandra_cross_service_cluster,clickhouse_credentials,clickhouse_kafka,clickhouse_postgresql,dashboard,datadog,datasource,disaster_recovery,external_aws_cloudwatch_logs,external_aws_cloudwatch_metrics,external_elasticsearch_logs,external_google_cloud_logging,external_opensearch_logs,flink,flink_external_bigquery,flink_external_kafka,flink_external_postgresql,internal_connectivity,jolokia,kafka_connect,kafka_connect_postgresql,kafka_logs,kafka_mirrormaker,logs,m3aggregator,m3coordinator,metrics,opensearch_cross_cluster_replication,opensearch_cross_cluster_search,prometheus,read_replica,rsyslog,schema_registry_proxy,stresstester,thanos_distributed_query,thanos_migrate,thanoscompactor,thanosquery,thanosruler,thanosstore,vectorandvmalert.
- kafkaConnect GetUser Configs Service Integration Kafka Connect User Config[] 
- KafkaConnect user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- kafkaLogs GetUser Configs Service Integration Kafka Logs User Config[] 
- KafkaLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- kafkaMirrormaker GetUser Configs Service Integration Kafka Mirrormaker User Config[] 
- KafkaMirrormaker user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- logsUser GetConfigs Service Integration Logs User Config[] 
- Logs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- metricsUser GetConfigs Service Integration Metrics User Config[] 
- Metrics user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- project string
- Project the integration belongs to.
- prometheusUser GetConfigs Service Integration Prometheus User Config[] 
- Prometheus user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- sourceEndpoint stringId 
- Source endpoint for the integration.
- sourceService stringName 
- Source service for the integration (if any)
- sourceService stringProject 
- Source service project name
- clickhouse_credentials_ Sequence[Getuser_ configs Service Integration Clickhouse Credentials User Config] 
- ClickhouseCredentials user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- clickhouse_kafka_ Sequence[Getuser_ configs Service Integration Clickhouse Kafka User Config] 
- ClickhouseKafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- clickhouse_postgresql_ Sequence[Getuser_ configs Service Integration Clickhouse Postgresql User Config] 
- ClickhousePostgresql user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- datadog_user_ Sequence[Getconfigs Service Integration Datadog User Config] 
- Datadog user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- destination_endpoint_ strid 
- Destination endpoint for the integration.
- destination_service_ strname 
- Destination service for the integration.
- destination_service_ strproject 
- Destination service project name
- external_aws_ Sequence[Getcloudwatch_ logs_ user_ configs Service Integration External Aws Cloudwatch Logs User Config] 
- ExternalAwsCloudwatchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- external_aws_ Sequence[Getcloudwatch_ metrics_ user_ configs Service Integration External Aws Cloudwatch Metrics User Config] 
- ExternalAwsCloudwatchMetrics user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- external_elasticsearch_ Sequence[Getlogs_ user_ configs Service Integration External Elasticsearch Logs User Config] 
- ExternalElasticsearchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- external_opensearch_ Sequence[Getlogs_ user_ configs Service Integration External Opensearch Logs User Config] 
- ExternalOpensearchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- flink_external_ Sequence[Getpostgresql_ user_ configs Service Integration Flink External Postgresql User Config] 
- FlinkExternalPostgresql user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- id str
- The provider-assigned unique ID for this managed resource.
- integration_id str
- The ID of the Aiven service integration.
- integration_type str
- Type of the service integration. The possible values are alertmanager,autoscaler,caching,cassandra_cross_service_cluster,clickhouse_credentials,clickhouse_kafka,clickhouse_postgresql,dashboard,datadog,datasource,disaster_recovery,external_aws_cloudwatch_logs,external_aws_cloudwatch_metrics,external_elasticsearch_logs,external_google_cloud_logging,external_opensearch_logs,flink,flink_external_bigquery,flink_external_kafka,flink_external_postgresql,internal_connectivity,jolokia,kafka_connect,kafka_connect_postgresql,kafka_logs,kafka_mirrormaker,logs,m3aggregator,m3coordinator,metrics,opensearch_cross_cluster_replication,opensearch_cross_cluster_search,prometheus,read_replica,rsyslog,schema_registry_proxy,stresstester,thanos_distributed_query,thanos_migrate,thanoscompactor,thanosquery,thanosruler,thanosstore,vectorandvmalert.
- kafka_connect_ Sequence[Getuser_ configs Service Integration Kafka Connect User Config] 
- KafkaConnect user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- kafka_logs_ Sequence[Getuser_ configs Service Integration Kafka Logs User Config] 
- KafkaLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- kafka_mirrormaker_ Sequence[Getuser_ configs Service Integration Kafka Mirrormaker User Config] 
- KafkaMirrormaker user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- logs_user_ Sequence[Getconfigs Service Integration Logs User Config] 
- Logs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- metrics_user_ Sequence[Getconfigs Service Integration Metrics User Config] 
- Metrics user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- project str
- Project the integration belongs to.
- prometheus_user_ Sequence[Getconfigs Service Integration Prometheus User Config] 
- Prometheus user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- source_endpoint_ strid 
- Source endpoint for the integration.
- source_service_ strname 
- Source service for the integration (if any)
- source_service_ strproject 
- Source service project name
- clickhouseCredentials List<Property Map>User Configs 
- ClickhouseCredentials user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- clickhouseKafka List<Property Map>User Configs 
- ClickhouseKafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- clickhousePostgresql List<Property Map>User Configs 
- ClickhousePostgresql user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- datadogUser List<Property Map>Configs 
- Datadog user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- destinationEndpoint StringId 
- Destination endpoint for the integration.
- destinationService StringName 
- Destination service for the integration.
- destinationService StringProject 
- Destination service project name
- externalAws List<Property Map>Cloudwatch Logs User Configs 
- ExternalAwsCloudwatchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- externalAws List<Property Map>Cloudwatch Metrics User Configs 
- ExternalAwsCloudwatchMetrics user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- externalElasticsearch List<Property Map>Logs User Configs 
- ExternalElasticsearchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- externalOpensearch List<Property Map>Logs User Configs 
- ExternalOpensearchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- flinkExternal List<Property Map>Postgresql User Configs 
- FlinkExternalPostgresql user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- id String
- The provider-assigned unique ID for this managed resource.
- integrationId String
- The ID of the Aiven service integration.
- integrationType String
- Type of the service integration. The possible values are alertmanager,autoscaler,caching,cassandra_cross_service_cluster,clickhouse_credentials,clickhouse_kafka,clickhouse_postgresql,dashboard,datadog,datasource,disaster_recovery,external_aws_cloudwatch_logs,external_aws_cloudwatch_metrics,external_elasticsearch_logs,external_google_cloud_logging,external_opensearch_logs,flink,flink_external_bigquery,flink_external_kafka,flink_external_postgresql,internal_connectivity,jolokia,kafka_connect,kafka_connect_postgresql,kafka_logs,kafka_mirrormaker,logs,m3aggregator,m3coordinator,metrics,opensearch_cross_cluster_replication,opensearch_cross_cluster_search,prometheus,read_replica,rsyslog,schema_registry_proxy,stresstester,thanos_distributed_query,thanos_migrate,thanoscompactor,thanosquery,thanosruler,thanosstore,vectorandvmalert.
- kafkaConnect List<Property Map>User Configs 
- KafkaConnect user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- kafkaLogs List<Property Map>User Configs 
- KafkaLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- kafkaMirrormaker List<Property Map>User Configs 
- KafkaMirrormaker user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- logsUser List<Property Map>Configs 
- Logs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- metricsUser List<Property Map>Configs 
- Metrics user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- project String
- Project the integration belongs to.
- prometheusUser List<Property Map>Configs 
- Prometheus user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- sourceEndpoint StringId 
- Source endpoint for the integration.
- sourceService StringName 
- Source service for the integration (if any)
- sourceService StringProject 
- Source service project name
Supporting Types
GetServiceIntegrationClickhouseCredentialsUserConfig      
- Grants
[]GetService Integration Clickhouse Credentials User Config Grant 
- Grants to assign
- grants
GetService Integration Clickhouse Credentials User Config Grant[] 
- Grants to assign
- grants List<Property Map>
- Grants to assign
GetServiceIntegrationClickhouseCredentialsUserConfigGrant       
- User string
- User or role to assign the grant to. Example: alice.
- User string
- User or role to assign the grant to. Example: alice.
- user String
- User or role to assign the grant to. Example: alice.
- user string
- User or role to assign the grant to. Example: alice.
- user str
- User or role to assign the grant to. Example: alice.
- user String
- User or role to assign the grant to. Example: alice.
GetServiceIntegrationClickhouseKafkaUserConfig      
- Tables
List<GetService Integration Clickhouse Kafka User Config Table> 
- Array of table configurations that define how Kafka topics are mapped to ClickHouse tables. Each table configuration specifies the table structure, associated Kafka topics, and read/write settings
- Tables
[]GetService Integration Clickhouse Kafka User Config Table 
- Array of table configurations that define how Kafka topics are mapped to ClickHouse tables. Each table configuration specifies the table structure, associated Kafka topics, and read/write settings
- tables
List<GetService Integration Clickhouse Kafka User Config Table> 
- Array of table configurations that define how Kafka topics are mapped to ClickHouse tables. Each table configuration specifies the table structure, associated Kafka topics, and read/write settings
- tables
GetService Integration Clickhouse Kafka User Config Table[] 
- Array of table configurations that define how Kafka topics are mapped to ClickHouse tables. Each table configuration specifies the table structure, associated Kafka topics, and read/write settings
- tables
Sequence[GetService Integration Clickhouse Kafka User Config Table] 
- Array of table configurations that define how Kafka topics are mapped to ClickHouse tables. Each table configuration specifies the table structure, associated Kafka topics, and read/write settings
- tables List<Property Map>
- Array of table configurations that define how Kafka topics are mapped to ClickHouse tables. Each table configuration specifies the table structure, associated Kafka topics, and read/write settings
GetServiceIntegrationClickhouseKafkaUserConfigTable       
- Columns
List<GetService Integration Clickhouse Kafka User Config Table Column> 
- Array of column definitions that specify the structure of the ClickHouse table. Each column maps to a field in the Kafka messages
- DataFormat string
- Enum: Avro,AvroConfluent,CSV,JSONAsString,JSONCompactEachRow,JSONCompactStringsEachRow,JSONEachRow,JSONStringsEachRow,MsgPack,Parquet,RawBLOB,TSKV,TSV,TabSeparated. The format of the messages in the Kafka topics. Determines how ClickHouse parses and serializes the data (e.g., JSON, CSV, Avro). Default:JSONEachRow.
- GroupName string
- The Kafka consumer group name. Multiple consumers with the same group name will share the workload and maintain offset positions. Default: clickhouse.
- Name string
- The name of the ClickHouse table to be created. This table can consume data from and write data to the specified Kafka topics. Example: events.
- Topics
List<GetService Integration Clickhouse Kafka User Config Table Topic> 
- Array of Kafka topics that this table will read data from or write data to. Messages from all specified topics will be inserted into this table, and data inserted into this table will be published to the topics
- AutoOffset stringReset 
- Enum: beginning,earliest,end,largest,latest,smallest. Determines where to start reading from Kafka when no offset is stored or the stored offset is out of range.earlieststarts from the beginning,lateststarts from the end. Default:earliest.
- DateTime stringInput Format 
- Enum: basic,best_effort,best_effort_us. Specifies how ClickHouse should parse DateTime values from text-based input formats.basicuses simple parsing,best_effortattempts more flexible parsing. Default:basic.
- HandleError stringMode 
- Enum: default,stream. Defines how ClickHouse should handle errors when processing Kafka messages.defaultstops on errors,streamcontinues processing and logs errors. Default:default.
- MaxBlock intSize 
- Maximum number of rows to collect before flushing data between Kafka and ClickHouse. Default: 0.
- MaxRows intPer Message 
- Maximum number of rows that can be processed from a single Kafka message for row-based formats. Useful for controlling memory usage. Default: 1.
- NumConsumers int
- Number of Kafka consumers to run per table per replica. Increasing this can improve throughput but may increase resource usage. Default: 1.
- PollMax intBatch Size 
- Maximum number of messages to fetch in a single Kafka poll operation for reading. Default: 0.
- PollMax intTimeout Ms 
- Timeout in milliseconds for a single poll from Kafka. Takes the value of the stream_flush_interval_ms server setting by default (500ms). Default: 0.
- ProducerBatch intNum Messages 
- The maximum number of messages in a batch sent to Kafka. If the number of messages exceeds this value, the batch is sent. Default: 10000.
- ProducerBatch intSize 
- The maximum size in bytes of a batch of messages sent to Kafka. If the batch size is exceeded, the batch is sent.
- ProducerCompression stringCodec 
- Enum: gzip,lz4,none,snappy,zstd. The compression codec to use when sending a batch of messages to Kafka. Default:none.
- ProducerCompression intLevel 
- The compression level to use when sending a batch of messages to Kafka. Usable range is algorithm-dependent: [0-9] for gzip; [0-12] for lz4; only 0 for snappy; -1 = codec-dependent default compression level. Default: -1.
- ProducerLinger intMs 
- The time in milliseconds to wait for additional messages before sending a batch. If the time is exceeded, the batch is sent. Default: 5.
- ProducerQueue intBuffering Max Kbytes 
- The maximum size of the buffer in kilobytes before sending.
- ProducerQueue intBuffering Max Messages 
- The maximum number of messages to buffer before sending. Default: 100000.
- ProducerRequest intRequired Acks 
- The number of acknowledgements the leader broker must receive from ISR brokers before responding to the request: 0=Broker does not send any response/ack to client, -1 will block until message is committed by all in sync replicas (ISRs). Default: -1.
- SkipBroken intMessages 
- Number of broken messages to skip before stopping processing when reading from Kafka. Useful for handling corrupted data without failing the entire integration. Default: 0.
- ThreadPer boolConsumer 
- When enabled, each consumer runs in its own thread, providing better isolation and potentially better performance for high-throughput scenarios. Default: false.
- Columns
[]GetService Integration Clickhouse Kafka User Config Table Column 
- Array of column definitions that specify the structure of the ClickHouse table. Each column maps to a field in the Kafka messages
- DataFormat string
- Enum: Avro,AvroConfluent,CSV,JSONAsString,JSONCompactEachRow,JSONCompactStringsEachRow,JSONEachRow,JSONStringsEachRow,MsgPack,Parquet,RawBLOB,TSKV,TSV,TabSeparated. The format of the messages in the Kafka topics. Determines how ClickHouse parses and serializes the data (e.g., JSON, CSV, Avro). Default:JSONEachRow.
- GroupName string
- The Kafka consumer group name. Multiple consumers with the same group name will share the workload and maintain offset positions. Default: clickhouse.
- Name string
- The name of the ClickHouse table to be created. This table can consume data from and write data to the specified Kafka topics. Example: events.
- Topics
[]GetService Integration Clickhouse Kafka User Config Table Topic 
- Array of Kafka topics that this table will read data from or write data to. Messages from all specified topics will be inserted into this table, and data inserted into this table will be published to the topics
- AutoOffset stringReset 
- Enum: beginning,earliest,end,largest,latest,smallest. Determines where to start reading from Kafka when no offset is stored or the stored offset is out of range.earlieststarts from the beginning,lateststarts from the end. Default:earliest.
- DateTime stringInput Format 
- Enum: basic,best_effort,best_effort_us. Specifies how ClickHouse should parse DateTime values from text-based input formats.basicuses simple parsing,best_effortattempts more flexible parsing. Default:basic.
- HandleError stringMode 
- Enum: default,stream. Defines how ClickHouse should handle errors when processing Kafka messages.defaultstops on errors,streamcontinues processing and logs errors. Default:default.
- MaxBlock intSize 
- Maximum number of rows to collect before flushing data between Kafka and ClickHouse. Default: 0.
- MaxRows intPer Message 
- Maximum number of rows that can be processed from a single Kafka message for row-based formats. Useful for controlling memory usage. Default: 1.
- NumConsumers int
- Number of Kafka consumers to run per table per replica. Increasing this can improve throughput but may increase resource usage. Default: 1.
- PollMax intBatch Size 
- Maximum number of messages to fetch in a single Kafka poll operation for reading. Default: 0.
- PollMax intTimeout Ms 
- Timeout in milliseconds for a single poll from Kafka. Takes the value of the stream_flush_interval_ms server setting by default (500ms). Default: 0.
- ProducerBatch intNum Messages 
- The maximum number of messages in a batch sent to Kafka. If the number of messages exceeds this value, the batch is sent. Default: 10000.
- ProducerBatch intSize 
- The maximum size in bytes of a batch of messages sent to Kafka. If the batch size is exceeded, the batch is sent.
- ProducerCompression stringCodec 
- Enum: gzip,lz4,none,snappy,zstd. The compression codec to use when sending a batch of messages to Kafka. Default:none.
- ProducerCompression intLevel 
- The compression level to use when sending a batch of messages to Kafka. Usable range is algorithm-dependent: [0-9] for gzip; [0-12] for lz4; only 0 for snappy; -1 = codec-dependent default compression level. Default: -1.
- ProducerLinger intMs 
- The time in milliseconds to wait for additional messages before sending a batch. If the time is exceeded, the batch is sent. Default: 5.
- ProducerQueue intBuffering Max Kbytes 
- The maximum size of the buffer in kilobytes before sending.
- ProducerQueue intBuffering Max Messages 
- The maximum number of messages to buffer before sending. Default: 100000.
- ProducerRequest intRequired Acks 
- The number of acknowledgements the leader broker must receive from ISR brokers before responding to the request: 0=Broker does not send any response/ack to client, -1 will block until message is committed by all in sync replicas (ISRs). Default: -1.
- SkipBroken intMessages 
- Number of broken messages to skip before stopping processing when reading from Kafka. Useful for handling corrupted data without failing the entire integration. Default: 0.
- ThreadPer boolConsumer 
- When enabled, each consumer runs in its own thread, providing better isolation and potentially better performance for high-throughput scenarios. Default: false.
- columns
List<GetService Integration Clickhouse Kafka User Config Table Column> 
- Array of column definitions that specify the structure of the ClickHouse table. Each column maps to a field in the Kafka messages
- dataFormat String
- Enum: Avro,AvroConfluent,CSV,JSONAsString,JSONCompactEachRow,JSONCompactStringsEachRow,JSONEachRow,JSONStringsEachRow,MsgPack,Parquet,RawBLOB,TSKV,TSV,TabSeparated. The format of the messages in the Kafka topics. Determines how ClickHouse parses and serializes the data (e.g., JSON, CSV, Avro). Default:JSONEachRow.
- groupName String
- The Kafka consumer group name. Multiple consumers with the same group name will share the workload and maintain offset positions. Default: clickhouse.
- name String
- The name of the ClickHouse table to be created. This table can consume data from and write data to the specified Kafka topics. Example: events.
- topics
List<GetService Integration Clickhouse Kafka User Config Table Topic> 
- Array of Kafka topics that this table will read data from or write data to. Messages from all specified topics will be inserted into this table, and data inserted into this table will be published to the topics
- autoOffset StringReset 
- Enum: beginning,earliest,end,largest,latest,smallest. Determines where to start reading from Kafka when no offset is stored or the stored offset is out of range.earlieststarts from the beginning,lateststarts from the end. Default:earliest.
- dateTime StringInput Format 
- Enum: basic,best_effort,best_effort_us. Specifies how ClickHouse should parse DateTime values from text-based input formats.basicuses simple parsing,best_effortattempts more flexible parsing. Default:basic.
- handleError StringMode 
- Enum: default,stream. Defines how ClickHouse should handle errors when processing Kafka messages.defaultstops on errors,streamcontinues processing and logs errors. Default:default.
- maxBlock IntegerSize 
- Maximum number of rows to collect before flushing data between Kafka and ClickHouse. Default: 0.
- maxRows IntegerPer Message 
- Maximum number of rows that can be processed from a single Kafka message for row-based formats. Useful for controlling memory usage. Default: 1.
- numConsumers Integer
- Number of Kafka consumers to run per table per replica. Increasing this can improve throughput but may increase resource usage. Default: 1.
- pollMax IntegerBatch Size 
- Maximum number of messages to fetch in a single Kafka poll operation for reading. Default: 0.
- pollMax IntegerTimeout Ms 
- Timeout in milliseconds for a single poll from Kafka. Takes the value of the stream_flush_interval_ms server setting by default (500ms). Default: 0.
- producerBatch IntegerNum Messages 
- The maximum number of messages in a batch sent to Kafka. If the number of messages exceeds this value, the batch is sent. Default: 10000.
- producerBatch IntegerSize 
- The maximum size in bytes of a batch of messages sent to Kafka. If the batch size is exceeded, the batch is sent.
- producerCompression StringCodec 
- Enum: gzip,lz4,none,snappy,zstd. The compression codec to use when sending a batch of messages to Kafka. Default:none.
- producerCompression IntegerLevel 
- The compression level to use when sending a batch of messages to Kafka. Usable range is algorithm-dependent: [0-9] for gzip; [0-12] for lz4; only 0 for snappy; -1 = codec-dependent default compression level. Default: -1.
- producerLinger IntegerMs 
- The time in milliseconds to wait for additional messages before sending a batch. If the time is exceeded, the batch is sent. Default: 5.
- producerQueue IntegerBuffering Max Kbytes 
- The maximum size of the buffer in kilobytes before sending.
- producerQueue IntegerBuffering Max Messages 
- The maximum number of messages to buffer before sending. Default: 100000.
- producerRequest IntegerRequired Acks 
- The number of acknowledgements the leader broker must receive from ISR brokers before responding to the request: 0=Broker does not send any response/ack to client, -1 will block until message is committed by all in sync replicas (ISRs). Default: -1.
- skipBroken IntegerMessages 
- Number of broken messages to skip before stopping processing when reading from Kafka. Useful for handling corrupted data without failing the entire integration. Default: 0.
- threadPer BooleanConsumer 
- When enabled, each consumer runs in its own thread, providing better isolation and potentially better performance for high-throughput scenarios. Default: false.
- columns
GetService Integration Clickhouse Kafka User Config Table Column[] 
- Array of column definitions that specify the structure of the ClickHouse table. Each column maps to a field in the Kafka messages
- dataFormat string
- Enum: Avro,AvroConfluent,CSV,JSONAsString,JSONCompactEachRow,JSONCompactStringsEachRow,JSONEachRow,JSONStringsEachRow,MsgPack,Parquet,RawBLOB,TSKV,TSV,TabSeparated. The format of the messages in the Kafka topics. Determines how ClickHouse parses and serializes the data (e.g., JSON, CSV, Avro). Default:JSONEachRow.
- groupName string
- The Kafka consumer group name. Multiple consumers with the same group name will share the workload and maintain offset positions. Default: clickhouse.
- name string
- The name of the ClickHouse table to be created. This table can consume data from and write data to the specified Kafka topics. Example: events.
- topics
GetService Integration Clickhouse Kafka User Config Table Topic[] 
- Array of Kafka topics that this table will read data from or write data to. Messages from all specified topics will be inserted into this table, and data inserted into this table will be published to the topics
- autoOffset stringReset 
- Enum: beginning,earliest,end,largest,latest,smallest. Determines where to start reading from Kafka when no offset is stored or the stored offset is out of range.earlieststarts from the beginning,lateststarts from the end. Default:earliest.
- dateTime stringInput Format 
- Enum: basic,best_effort,best_effort_us. Specifies how ClickHouse should parse DateTime values from text-based input formats.basicuses simple parsing,best_effortattempts more flexible parsing. Default:basic.
- handleError stringMode 
- Enum: default,stream. Defines how ClickHouse should handle errors when processing Kafka messages.defaultstops on errors,streamcontinues processing and logs errors. Default:default.
- maxBlock numberSize 
- Maximum number of rows to collect before flushing data between Kafka and ClickHouse. Default: 0.
- maxRows numberPer Message 
- Maximum number of rows that can be processed from a single Kafka message for row-based formats. Useful for controlling memory usage. Default: 1.
- numConsumers number
- Number of Kafka consumers to run per table per replica. Increasing this can improve throughput but may increase resource usage. Default: 1.
- pollMax numberBatch Size 
- Maximum number of messages to fetch in a single Kafka poll operation for reading. Default: 0.
- pollMax numberTimeout Ms 
- Timeout in milliseconds for a single poll from Kafka. Takes the value of the stream_flush_interval_ms server setting by default (500ms). Default: 0.
- producerBatch numberNum Messages 
- The maximum number of messages in a batch sent to Kafka. If the number of messages exceeds this value, the batch is sent. Default: 10000.
- producerBatch numberSize 
- The maximum size in bytes of a batch of messages sent to Kafka. If the batch size is exceeded, the batch is sent.
- producerCompression stringCodec 
- Enum: gzip,lz4,none,snappy,zstd. The compression codec to use when sending a batch of messages to Kafka. Default:none.
- producerCompression numberLevel 
- The compression level to use when sending a batch of messages to Kafka. Usable range is algorithm-dependent: [0-9] for gzip; [0-12] for lz4; only 0 for snappy; -1 = codec-dependent default compression level. Default: -1.
- producerLinger numberMs 
- The time in milliseconds to wait for additional messages before sending a batch. If the time is exceeded, the batch is sent. Default: 5.
- producerQueue numberBuffering Max Kbytes 
- The maximum size of the buffer in kilobytes before sending.
- producerQueue numberBuffering Max Messages 
- The maximum number of messages to buffer before sending. Default: 100000.
- producerRequest numberRequired Acks 
- The number of acknowledgements the leader broker must receive from ISR brokers before responding to the request: 0=Broker does not send any response/ack to client, -1 will block until message is committed by all in sync replicas (ISRs). Default: -1.
- skipBroken numberMessages 
- Number of broken messages to skip before stopping processing when reading from Kafka. Useful for handling corrupted data without failing the entire integration. Default: 0.
- threadPer booleanConsumer 
- When enabled, each consumer runs in its own thread, providing better isolation and potentially better performance for high-throughput scenarios. Default: false.
- columns
Sequence[GetService Integration Clickhouse Kafka User Config Table Column] 
- Array of column definitions that specify the structure of the ClickHouse table. Each column maps to a field in the Kafka messages
- data_format str
- Enum: Avro,AvroConfluent,CSV,JSONAsString,JSONCompactEachRow,JSONCompactStringsEachRow,JSONEachRow,JSONStringsEachRow,MsgPack,Parquet,RawBLOB,TSKV,TSV,TabSeparated. The format of the messages in the Kafka topics. Determines how ClickHouse parses and serializes the data (e.g., JSON, CSV, Avro). Default:JSONEachRow.
- group_name str
- The Kafka consumer group name. Multiple consumers with the same group name will share the workload and maintain offset positions. Default: clickhouse.
- name str
- The name of the ClickHouse table to be created. This table can consume data from and write data to the specified Kafka topics. Example: events.
- topics
Sequence[GetService Integration Clickhouse Kafka User Config Table Topic] 
- Array of Kafka topics that this table will read data from or write data to. Messages from all specified topics will be inserted into this table, and data inserted into this table will be published to the topics
- auto_offset_ strreset 
- Enum: beginning,earliest,end,largest,latest,smallest. Determines where to start reading from Kafka when no offset is stored or the stored offset is out of range.earlieststarts from the beginning,lateststarts from the end. Default:earliest.
- date_time_ strinput_ format 
- Enum: basic,best_effort,best_effort_us. Specifies how ClickHouse should parse DateTime values from text-based input formats.basicuses simple parsing,best_effortattempts more flexible parsing. Default:basic.
- handle_error_ strmode 
- Enum: default,stream. Defines how ClickHouse should handle errors when processing Kafka messages.defaultstops on errors,streamcontinues processing and logs errors. Default:default.
- max_block_ intsize 
- Maximum number of rows to collect before flushing data between Kafka and ClickHouse. Default: 0.
- max_rows_ intper_ message 
- Maximum number of rows that can be processed from a single Kafka message for row-based formats. Useful for controlling memory usage. Default: 1.
- num_consumers int
- Number of Kafka consumers to run per table per replica. Increasing this can improve throughput but may increase resource usage. Default: 1.
- poll_max_ intbatch_ size 
- Maximum number of messages to fetch in a single Kafka poll operation for reading. Default: 0.
- poll_max_ inttimeout_ ms 
- Timeout in milliseconds for a single poll from Kafka. Takes the value of the stream_flush_interval_ms server setting by default (500ms). Default: 0.
- producer_batch_ intnum_ messages 
- The maximum number of messages in a batch sent to Kafka. If the number of messages exceeds this value, the batch is sent. Default: 10000.
- producer_batch_ intsize 
- The maximum size in bytes of a batch of messages sent to Kafka. If the batch size is exceeded, the batch is sent.
- producer_compression_ strcodec 
- Enum: gzip,lz4,none,snappy,zstd. The compression codec to use when sending a batch of messages to Kafka. Default:none.
- producer_compression_ intlevel 
- The compression level to use when sending a batch of messages to Kafka. Usable range is algorithm-dependent: [0-9] for gzip; [0-12] for lz4; only 0 for snappy; -1 = codec-dependent default compression level. Default: -1.
- producer_linger_ intms 
- The time in milliseconds to wait for additional messages before sending a batch. If the time is exceeded, the batch is sent. Default: 5.
- producer_queue_ intbuffering_ max_ kbytes 
- The maximum size of the buffer in kilobytes before sending.
- producer_queue_ intbuffering_ max_ messages 
- The maximum number of messages to buffer before sending. Default: 100000.
- producer_request_ intrequired_ acks 
- The number of acknowledgements the leader broker must receive from ISR brokers before responding to the request: 0=Broker does not send any response/ack to client, -1 will block until message is committed by all in sync replicas (ISRs). Default: -1.
- skip_broken_ intmessages 
- Number of broken messages to skip before stopping processing when reading from Kafka. Useful for handling corrupted data without failing the entire integration. Default: 0.
- thread_per_ boolconsumer 
- When enabled, each consumer runs in its own thread, providing better isolation and potentially better performance for high-throughput scenarios. Default: false.
- columns List<Property Map>
- Array of column definitions that specify the structure of the ClickHouse table. Each column maps to a field in the Kafka messages
- dataFormat String
- Enum: Avro,AvroConfluent,CSV,JSONAsString,JSONCompactEachRow,JSONCompactStringsEachRow,JSONEachRow,JSONStringsEachRow,MsgPack,Parquet,RawBLOB,TSKV,TSV,TabSeparated. The format of the messages in the Kafka topics. Determines how ClickHouse parses and serializes the data (e.g., JSON, CSV, Avro). Default:JSONEachRow.
- groupName String
- The Kafka consumer group name. Multiple consumers with the same group name will share the workload and maintain offset positions. Default: clickhouse.
- name String
- The name of the ClickHouse table to be created. This table can consume data from and write data to the specified Kafka topics. Example: events.
- topics List<Property Map>
- Array of Kafka topics that this table will read data from or write data to. Messages from all specified topics will be inserted into this table, and data inserted into this table will be published to the topics
- autoOffset StringReset 
- Enum: beginning,earliest,end,largest,latest,smallest. Determines where to start reading from Kafka when no offset is stored or the stored offset is out of range.earlieststarts from the beginning,lateststarts from the end. Default:earliest.
- dateTime StringInput Format 
- Enum: basic,best_effort,best_effort_us. Specifies how ClickHouse should parse DateTime values from text-based input formats.basicuses simple parsing,best_effortattempts more flexible parsing. Default:basic.
- handleError StringMode 
- Enum: default,stream. Defines how ClickHouse should handle errors when processing Kafka messages.defaultstops on errors,streamcontinues processing and logs errors. Default:default.
- maxBlock NumberSize 
- Maximum number of rows to collect before flushing data between Kafka and ClickHouse. Default: 0.
- maxRows NumberPer Message 
- Maximum number of rows that can be processed from a single Kafka message for row-based formats. Useful for controlling memory usage. Default: 1.
- numConsumers Number
- Number of Kafka consumers to run per table per replica. Increasing this can improve throughput but may increase resource usage. Default: 1.
- pollMax NumberBatch Size 
- Maximum number of messages to fetch in a single Kafka poll operation for reading. Default: 0.
- pollMax NumberTimeout Ms 
- Timeout in milliseconds for a single poll from Kafka. Takes the value of the stream_flush_interval_ms server setting by default (500ms). Default: 0.
- producerBatch NumberNum Messages 
- The maximum number of messages in a batch sent to Kafka. If the number of messages exceeds this value, the batch is sent. Default: 10000.
- producerBatch NumberSize 
- The maximum size in bytes of a batch of messages sent to Kafka. If the batch size is exceeded, the batch is sent.
- producerCompression StringCodec 
- Enum: gzip,lz4,none,snappy,zstd. The compression codec to use when sending a batch of messages to Kafka. Default:none.
- producerCompression NumberLevel 
- The compression level to use when sending a batch of messages to Kafka. Usable range is algorithm-dependent: [0-9] for gzip; [0-12] for lz4; only 0 for snappy; -1 = codec-dependent default compression level. Default: -1.
- producerLinger NumberMs 
- The time in milliseconds to wait for additional messages before sending a batch. If the time is exceeded, the batch is sent. Default: 5.
- producerQueue NumberBuffering Max Kbytes 
- The maximum size of the buffer in kilobytes before sending.
- producerQueue NumberBuffering Max Messages 
- The maximum number of messages to buffer before sending. Default: 100000.
- producerRequest NumberRequired Acks 
- The number of acknowledgements the leader broker must receive from ISR brokers before responding to the request: 0=Broker does not send any response/ack to client, -1 will block until message is committed by all in sync replicas (ISRs). Default: -1.
- skipBroken NumberMessages 
- Number of broken messages to skip before stopping processing when reading from Kafka. Useful for handling corrupted data without failing the entire integration. Default: 0.
- threadPer BooleanConsumer 
- When enabled, each consumer runs in its own thread, providing better isolation and potentially better performance for high-throughput scenarios. Default: false.
GetServiceIntegrationClickhouseKafkaUserConfigTableColumn        
GetServiceIntegrationClickhouseKafkaUserConfigTableTopic        
- Name string
- The name of the Kafka topic to read messages from or write messages to. The topic must exist in the Kafka cluster. Example: topic_name.
- Name string
- The name of the Kafka topic to read messages from or write messages to. The topic must exist in the Kafka cluster. Example: topic_name.
- name String
- The name of the Kafka topic to read messages from or write messages to. The topic must exist in the Kafka cluster. Example: topic_name.
- name string
- The name of the Kafka topic to read messages from or write messages to. The topic must exist in the Kafka cluster. Example: topic_name.
- name str
- The name of the Kafka topic to read messages from or write messages to. The topic must exist in the Kafka cluster. Example: topic_name.
- name String
- The name of the Kafka topic to read messages from or write messages to. The topic must exist in the Kafka cluster. Example: topic_name.
GetServiceIntegrationClickhousePostgresqlUserConfig      
- Databases
List<GetService Integration Clickhouse Postgresql User Config Database> 
- Databases to expose
- Databases
[]GetService Integration Clickhouse Postgresql User Config Database 
- Databases to expose
- databases
List<GetService Integration Clickhouse Postgresql User Config Database> 
- Databases to expose
- databases
GetService Integration Clickhouse Postgresql User Config Database[] 
- Databases to expose
- databases
Sequence[GetService Integration Clickhouse Postgresql User Config Database] 
- Databases to expose
- databases List<Property Map>
- Databases to expose
GetServiceIntegrationClickhousePostgresqlUserConfigDatabase       
GetServiceIntegrationDatadogUserConfig     
- DatadogDbm boolEnabled 
- Enable Datadog Database Monitoring.
- DatadogPgbouncer boolEnabled 
- Enable Datadog PgBouncer Metric Tracking.
- 
List<GetService Integration Datadog User Config Datadog Tag> 
- Custom tags provided by user
- ExcludeConsumer List<string>Groups 
- List of custom metrics.
- ExcludeTopics List<string>
- List of topics to exclude.
- IncludeConsumer List<string>Groups 
- List of custom metrics.
- IncludeTopics List<string>
- List of topics to include.
- KafkaCustom List<string>Metrics 
- List of custom metrics.
- MaxJmx intMetrics 
- Maximum number of JMX metrics to send. Example: 2000.
- MirrormakerCustom List<string>Metrics 
- List of custom metrics.
- Opensearch
GetService Integration Datadog User Config Opensearch 
- Datadog Opensearch Options
- Redis
GetService Integration Datadog User Config Redis 
- Datadog Redis Options
- DatadogDbm boolEnabled 
- Enable Datadog Database Monitoring.
- DatadogPgbouncer boolEnabled 
- Enable Datadog PgBouncer Metric Tracking.
- 
[]GetService Integration Datadog User Config Datadog Tag 
- Custom tags provided by user
- ExcludeConsumer []stringGroups 
- List of custom metrics.
- ExcludeTopics []string
- List of topics to exclude.
- IncludeConsumer []stringGroups 
- List of custom metrics.
- IncludeTopics []string
- List of topics to include.
- KafkaCustom []stringMetrics 
- List of custom metrics.
- MaxJmx intMetrics 
- Maximum number of JMX metrics to send. Example: 2000.
- MirrormakerCustom []stringMetrics 
- List of custom metrics.
- Opensearch
GetService Integration Datadog User Config Opensearch 
- Datadog Opensearch Options
- Redis
GetService Integration Datadog User Config Redis 
- Datadog Redis Options
- datadogDbm BooleanEnabled 
- Enable Datadog Database Monitoring.
- datadogPgbouncer BooleanEnabled 
- Enable Datadog PgBouncer Metric Tracking.
- 
List<GetService Integration Datadog User Config Datadog Tag> 
- Custom tags provided by user
- excludeConsumer List<String>Groups 
- List of custom metrics.
- excludeTopics List<String>
- List of topics to exclude.
- includeConsumer List<String>Groups 
- List of custom metrics.
- includeTopics List<String>
- List of topics to include.
- kafkaCustom List<String>Metrics 
- List of custom metrics.
- maxJmx IntegerMetrics 
- Maximum number of JMX metrics to send. Example: 2000.
- mirrormakerCustom List<String>Metrics 
- List of custom metrics.
- opensearch
GetService Integration Datadog User Config Opensearch 
- Datadog Opensearch Options
- redis
GetService Integration Datadog User Config Redis 
- Datadog Redis Options
- datadogDbm booleanEnabled 
- Enable Datadog Database Monitoring.
- datadogPgbouncer booleanEnabled 
- Enable Datadog PgBouncer Metric Tracking.
- 
GetService Integration Datadog User Config Datadog Tag[] 
- Custom tags provided by user
- excludeConsumer string[]Groups 
- List of custom metrics.
- excludeTopics string[]
- List of topics to exclude.
- includeConsumer string[]Groups 
- List of custom metrics.
- includeTopics string[]
- List of topics to include.
- kafkaCustom string[]Metrics 
- List of custom metrics.
- maxJmx numberMetrics 
- Maximum number of JMX metrics to send. Example: 2000.
- mirrormakerCustom string[]Metrics 
- List of custom metrics.
- opensearch
GetService Integration Datadog User Config Opensearch 
- Datadog Opensearch Options
- redis
GetService Integration Datadog User Config Redis 
- Datadog Redis Options
- datadog_dbm_ boolenabled 
- Enable Datadog Database Monitoring.
- datadog_pgbouncer_ boolenabled 
- Enable Datadog PgBouncer Metric Tracking.
- 
Sequence[GetService Integration Datadog User Config Datadog Tag] 
- Custom tags provided by user
- exclude_consumer_ Sequence[str]groups 
- List of custom metrics.
- exclude_topics Sequence[str]
- List of topics to exclude.
- include_consumer_ Sequence[str]groups 
- List of custom metrics.
- include_topics Sequence[str]
- List of topics to include.
- kafka_custom_ Sequence[str]metrics 
- List of custom metrics.
- max_jmx_ intmetrics 
- Maximum number of JMX metrics to send. Example: 2000.
- mirrormaker_custom_ Sequence[str]metrics 
- List of custom metrics.
- opensearch
GetService Integration Datadog User Config Opensearch 
- Datadog Opensearch Options
- redis
GetService Integration Datadog User Config Redis 
- Datadog Redis Options
- datadogDbm BooleanEnabled 
- Enable Datadog Database Monitoring.
- datadogPgbouncer BooleanEnabled 
- Enable Datadog PgBouncer Metric Tracking.
- List<Property Map>
- Custom tags provided by user
- excludeConsumer List<String>Groups 
- List of custom metrics.
- excludeTopics List<String>
- List of topics to exclude.
- includeConsumer List<String>Groups 
- List of custom metrics.
- includeTopics List<String>
- List of topics to include.
- kafkaCustom List<String>Metrics 
- List of custom metrics.
- maxJmx NumberMetrics 
- Maximum number of JMX metrics to send. Example: 2000.
- mirrormakerCustom List<String>Metrics 
- List of custom metrics.
- opensearch Property Map
- Datadog Opensearch Options
- redis Property Map
- Datadog Redis Options
GetServiceIntegrationDatadogUserConfigDatadogTag       
GetServiceIntegrationDatadogUserConfigOpensearch      
- ClusterStats boolEnabled 
- Enable Datadog Opensearch Cluster Monitoring.
- IndexStats boolEnabled 
- Enable Datadog Opensearch Index Monitoring.
- PendingTask boolStats Enabled 
- Enable Datadog Opensearch Pending Task Monitoring.
- PshardStats boolEnabled 
- Enable Datadog Opensearch Primary Shard Monitoring.
- ClusterStats boolEnabled 
- Enable Datadog Opensearch Cluster Monitoring.
- IndexStats boolEnabled 
- Enable Datadog Opensearch Index Monitoring.
- PendingTask boolStats Enabled 
- Enable Datadog Opensearch Pending Task Monitoring.
- PshardStats boolEnabled 
- Enable Datadog Opensearch Primary Shard Monitoring.
- clusterStats BooleanEnabled 
- Enable Datadog Opensearch Cluster Monitoring.
- indexStats BooleanEnabled 
- Enable Datadog Opensearch Index Monitoring.
- pendingTask BooleanStats Enabled 
- Enable Datadog Opensearch Pending Task Monitoring.
- pshardStats BooleanEnabled 
- Enable Datadog Opensearch Primary Shard Monitoring.
- clusterStats booleanEnabled 
- Enable Datadog Opensearch Cluster Monitoring.
- indexStats booleanEnabled 
- Enable Datadog Opensearch Index Monitoring.
- pendingTask booleanStats Enabled 
- Enable Datadog Opensearch Pending Task Monitoring.
- pshardStats booleanEnabled 
- Enable Datadog Opensearch Primary Shard Monitoring.
- cluster_stats_ boolenabled 
- Enable Datadog Opensearch Cluster Monitoring.
- index_stats_ boolenabled 
- Enable Datadog Opensearch Index Monitoring.
- pending_task_ boolstats_ enabled 
- Enable Datadog Opensearch Pending Task Monitoring.
- pshard_stats_ boolenabled 
- Enable Datadog Opensearch Primary Shard Monitoring.
- clusterStats BooleanEnabled 
- Enable Datadog Opensearch Cluster Monitoring.
- indexStats BooleanEnabled 
- Enable Datadog Opensearch Index Monitoring.
- pendingTask BooleanStats Enabled 
- Enable Datadog Opensearch Pending Task Monitoring.
- pshardStats BooleanEnabled 
- Enable Datadog Opensearch Primary Shard Monitoring.
GetServiceIntegrationDatadogUserConfigRedis      
- CommandStats boolEnabled 
- Enable command_stats option in the agent's configuration. Default: false.
- CommandStats boolEnabled 
- Enable command_stats option in the agent's configuration. Default: false.
- commandStats BooleanEnabled 
- Enable command_stats option in the agent's configuration. Default: false.
- commandStats booleanEnabled 
- Enable command_stats option in the agent's configuration. Default: false.
- command_stats_ boolenabled 
- Enable command_stats option in the agent's configuration. Default: false.
- commandStats BooleanEnabled 
- Enable command_stats option in the agent's configuration. Default: false.
GetServiceIntegrationExternalAwsCloudwatchLogsUserConfig        
- SelectedLog List<string>Fields 
- The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- SelectedLog []stringFields 
- The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selectedLog List<String>Fields 
- The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selectedLog string[]Fields 
- The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selected_log_ Sequence[str]fields 
- The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selectedLog List<String>Fields 
- The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
GetServiceIntegrationExternalAwsCloudwatchMetricsUserConfig        
- DroppedMetrics List<GetService Integration External Aws Cloudwatch Metrics User Config Dropped Metric> 
- Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics)
- ExtraMetrics List<GetService Integration External Aws Cloudwatch Metrics User Config Extra Metric> 
- Metrics to allow through to AWS CloudWatch (in addition to default metrics)
- DroppedMetrics []GetService Integration External Aws Cloudwatch Metrics User Config Dropped Metric 
- Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics)
- ExtraMetrics []GetService Integration External Aws Cloudwatch Metrics User Config Extra Metric 
- Metrics to allow through to AWS CloudWatch (in addition to default metrics)
- droppedMetrics List<GetService Integration External Aws Cloudwatch Metrics User Config Dropped Metric> 
- Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics)
- extraMetrics List<GetService Integration External Aws Cloudwatch Metrics User Config Extra Metric> 
- Metrics to allow through to AWS CloudWatch (in addition to default metrics)
- droppedMetrics GetService Integration External Aws Cloudwatch Metrics User Config Dropped Metric[] 
- Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics)
- extraMetrics GetService Integration External Aws Cloudwatch Metrics User Config Extra Metric[] 
- Metrics to allow through to AWS CloudWatch (in addition to default metrics)
- dropped_metrics Sequence[GetService Integration External Aws Cloudwatch Metrics User Config Dropped Metric] 
- Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics)
- extra_metrics Sequence[GetService Integration External Aws Cloudwatch Metrics User Config Extra Metric] 
- Metrics to allow through to AWS CloudWatch (in addition to default metrics)
- droppedMetrics List<Property Map>
- Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics)
- extraMetrics List<Property Map>
- Metrics to allow through to AWS CloudWatch (in addition to default metrics)
GetServiceIntegrationExternalAwsCloudwatchMetricsUserConfigDroppedMetric          
GetServiceIntegrationExternalAwsCloudwatchMetricsUserConfigExtraMetric          
GetServiceIntegrationExternalElasticsearchLogsUserConfig       
- SelectedLog List<string>Fields 
- The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- SelectedLog []stringFields 
- The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selectedLog List<String>Fields 
- The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selectedLog string[]Fields 
- The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selected_log_ Sequence[str]fields 
- The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selectedLog List<String>Fields 
- The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
GetServiceIntegrationExternalOpensearchLogsUserConfig       
- SelectedLog List<string>Fields 
- The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- SelectedLog []stringFields 
- The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selectedLog List<String>Fields 
- The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selectedLog string[]Fields 
- The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selected_log_ Sequence[str]fields 
- The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selectedLog List<String>Fields 
- The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
GetServiceIntegrationFlinkExternalPostgresqlUserConfig       
- Stringtype string
- Enum: unspecified. If stringtype is set to unspecified, parameters will be sent to the server as untyped values.
- Stringtype string
- Enum: unspecified. If stringtype is set to unspecified, parameters will be sent to the server as untyped values.
- stringtype String
- Enum: unspecified. If stringtype is set to unspecified, parameters will be sent to the server as untyped values.
- stringtype string
- Enum: unspecified. If stringtype is set to unspecified, parameters will be sent to the server as untyped values.
- stringtype str
- Enum: unspecified. If stringtype is set to unspecified, parameters will be sent to the server as untyped values.
- stringtype String
- Enum: unspecified. If stringtype is set to unspecified, parameters will be sent to the server as untyped values.
GetServiceIntegrationKafkaConnectUserConfig      
- KafkaConnect GetService Integration Kafka Connect User Config Kafka Connect 
- Kafka Connect service configuration values
- KafkaConnect GetService Integration Kafka Connect User Config Kafka Connect 
- Kafka Connect service configuration values
- kafkaConnect GetService Integration Kafka Connect User Config Kafka Connect 
- Kafka Connect service configuration values
- kafkaConnect GetService Integration Kafka Connect User Config Kafka Connect 
- Kafka Connect service configuration values
- kafka_connect GetService Integration Kafka Connect User Config Kafka Connect 
- Kafka Connect service configuration values
- kafkaConnect Property Map
- Kafka Connect service configuration values
GetServiceIntegrationKafkaConnectUserConfigKafkaConnect        
- ConfigStorage stringTopic 
- The name of the topic where connector and task configuration data are stored.This must be the same for all workers with the same group_id. Example: __connect_configs.
- GroupId string
- A unique string that identifies the Connect cluster group this worker belongs to. Example: connect.
- OffsetStorage stringTopic 
- The name of the topic where connector and task configuration offsets are stored.This must be the same for all workers with the same group_id. Example: __connect_offsets.
- StatusStorage stringTopic 
- The name of the topic where connector and task configuration status updates are stored.This must be the same for all workers with the same group_id. Example: __connect_status.
- ConfigStorage stringTopic 
- The name of the topic where connector and task configuration data are stored.This must be the same for all workers with the same group_id. Example: __connect_configs.
- GroupId string
- A unique string that identifies the Connect cluster group this worker belongs to. Example: connect.
- OffsetStorage stringTopic 
- The name of the topic where connector and task configuration offsets are stored.This must be the same for all workers with the same group_id. Example: __connect_offsets.
- StatusStorage stringTopic 
- The name of the topic where connector and task configuration status updates are stored.This must be the same for all workers with the same group_id. Example: __connect_status.
- configStorage StringTopic 
- The name of the topic where connector and task configuration data are stored.This must be the same for all workers with the same group_id. Example: __connect_configs.
- groupId String
- A unique string that identifies the Connect cluster group this worker belongs to. Example: connect.
- offsetStorage StringTopic 
- The name of the topic where connector and task configuration offsets are stored.This must be the same for all workers with the same group_id. Example: __connect_offsets.
- statusStorage StringTopic 
- The name of the topic where connector and task configuration status updates are stored.This must be the same for all workers with the same group_id. Example: __connect_status.
- configStorage stringTopic 
- The name of the topic where connector and task configuration data are stored.This must be the same for all workers with the same group_id. Example: __connect_configs.
- groupId string
- A unique string that identifies the Connect cluster group this worker belongs to. Example: connect.
- offsetStorage stringTopic 
- The name of the topic where connector and task configuration offsets are stored.This must be the same for all workers with the same group_id. Example: __connect_offsets.
- statusStorage stringTopic 
- The name of the topic where connector and task configuration status updates are stored.This must be the same for all workers with the same group_id. Example: __connect_status.
- config_storage_ strtopic 
- The name of the topic where connector and task configuration data are stored.This must be the same for all workers with the same group_id. Example: __connect_configs.
- group_id str
- A unique string that identifies the Connect cluster group this worker belongs to. Example: connect.
- offset_storage_ strtopic 
- The name of the topic where connector and task configuration offsets are stored.This must be the same for all workers with the same group_id. Example: __connect_offsets.
- status_storage_ strtopic 
- The name of the topic where connector and task configuration status updates are stored.This must be the same for all workers with the same group_id. Example: __connect_status.
- configStorage StringTopic 
- The name of the topic where connector and task configuration data are stored.This must be the same for all workers with the same group_id. Example: __connect_configs.
- groupId String
- A unique string that identifies the Connect cluster group this worker belongs to. Example: connect.
- offsetStorage StringTopic 
- The name of the topic where connector and task configuration offsets are stored.This must be the same for all workers with the same group_id. Example: __connect_offsets.
- statusStorage StringTopic 
- The name of the topic where connector and task configuration status updates are stored.This must be the same for all workers with the same group_id. Example: __connect_status.
GetServiceIntegrationKafkaLogsUserConfig      
- KafkaTopic string
- Topic name. Example: mytopic.
- SelectedLog List<string>Fields 
- The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- KafkaTopic string
- Topic name. Example: mytopic.
- SelectedLog []stringFields 
- The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- kafkaTopic String
- Topic name. Example: mytopic.
- selectedLog List<String>Fields 
- The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- kafkaTopic string
- Topic name. Example: mytopic.
- selectedLog string[]Fields 
- The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- kafka_topic str
- Topic name. Example: mytopic.
- selected_log_ Sequence[str]fields 
- The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- kafkaTopic String
- Topic name. Example: mytopic.
- selectedLog List<String>Fields 
- The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
GetServiceIntegrationKafkaMirrormakerUserConfig      
- ClusterAlias string
- The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics, .,_, and-. Example:kafka-abc.
- KafkaMirrormaker GetService Integration Kafka Mirrormaker User Config Kafka Mirrormaker 
- Kafka MirrorMaker configuration values
- ClusterAlias string
- The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics, .,_, and-. Example:kafka-abc.
- KafkaMirrormaker GetService Integration Kafka Mirrormaker User Config Kafka Mirrormaker 
- Kafka MirrorMaker configuration values
- clusterAlias String
- The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics, .,_, and-. Example:kafka-abc.
- kafkaMirrormaker GetService Integration Kafka Mirrormaker User Config Kafka Mirrormaker 
- Kafka MirrorMaker configuration values
- clusterAlias string
- The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics, .,_, and-. Example:kafka-abc.
- kafkaMirrormaker GetService Integration Kafka Mirrormaker User Config Kafka Mirrormaker 
- Kafka MirrorMaker configuration values
- cluster_alias str
- The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics, .,_, and-. Example:kafka-abc.
- kafka_mirrormaker GetService Integration Kafka Mirrormaker User Config Kafka Mirrormaker 
- Kafka MirrorMaker configuration values
- clusterAlias String
- The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics, .,_, and-. Example:kafka-abc.
- kafkaMirrormaker Property Map
- Kafka MirrorMaker configuration values
GetServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormaker        
- ConsumerAuto stringOffset Reset 
- Enum: earliest,latest. Set where consumer starts to consume data. Valueearliest: Start replication from the earliest offset. Valuelatest: Start replication from the latest offset. Default isearliest.
- ConsumerFetch intMin Bytes 
- The minimum amount of data the server should return for a fetch request. Example: 1024.
- ConsumerMax intPoll Records 
- Set consumer max.poll.records. The default is 500. Example: 500.
- ProducerBatch intSize 
- The batch size in bytes producer will attempt to collect before publishing to broker. Example: 1024.
- ProducerBuffer intMemory 
- The amount of bytes producer can use for buffering data before publishing to broker.
- ProducerCompression stringType 
- Enum: gzip,lz4,none,snappy,zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip,snappy,lz4,zstd). It additionally acceptsnonewhich is the default and equivalent to no compression.
- ProducerLinger intMs 
- The linger time (ms) for waiting new data to arrive for publishing. Example: 100.
- ProducerMax intRequest Size 
- The maximum request size in bytes.
- ConsumerAuto stringOffset Reset 
- Enum: earliest,latest. Set where consumer starts to consume data. Valueearliest: Start replication from the earliest offset. Valuelatest: Start replication from the latest offset. Default isearliest.
- ConsumerFetch intMin Bytes 
- The minimum amount of data the server should return for a fetch request. Example: 1024.
- ConsumerMax intPoll Records 
- Set consumer max.poll.records. The default is 500. Example: 500.
- ProducerBatch intSize 
- The batch size in bytes producer will attempt to collect before publishing to broker. Example: 1024.
- ProducerBuffer intMemory 
- The amount of bytes producer can use for buffering data before publishing to broker.
- ProducerCompression stringType 
- Enum: gzip,lz4,none,snappy,zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip,snappy,lz4,zstd). It additionally acceptsnonewhich is the default and equivalent to no compression.
- ProducerLinger intMs 
- The linger time (ms) for waiting new data to arrive for publishing. Example: 100.
- ProducerMax intRequest Size 
- The maximum request size in bytes.
- consumerAuto StringOffset Reset 
- Enum: earliest,latest. Set where consumer starts to consume data. Valueearliest: Start replication from the earliest offset. Valuelatest: Start replication from the latest offset. Default isearliest.
- consumerFetch IntegerMin Bytes 
- The minimum amount of data the server should return for a fetch request. Example: 1024.
- consumerMax IntegerPoll Records 
- Set consumer max.poll.records. The default is 500. Example: 500.
- producerBatch IntegerSize 
- The batch size in bytes producer will attempt to collect before publishing to broker. Example: 1024.
- producerBuffer IntegerMemory 
- The amount of bytes producer can use for buffering data before publishing to broker.
- producerCompression StringType 
- Enum: gzip,lz4,none,snappy,zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip,snappy,lz4,zstd). It additionally acceptsnonewhich is the default and equivalent to no compression.
- producerLinger IntegerMs 
- The linger time (ms) for waiting new data to arrive for publishing. Example: 100.
- producerMax IntegerRequest Size 
- The maximum request size in bytes.
- consumerAuto stringOffset Reset 
- Enum: earliest,latest. Set where consumer starts to consume data. Valueearliest: Start replication from the earliest offset. Valuelatest: Start replication from the latest offset. Default isearliest.
- consumerFetch numberMin Bytes 
- The minimum amount of data the server should return for a fetch request. Example: 1024.
- consumerMax numberPoll Records 
- Set consumer max.poll.records. The default is 500. Example: 500.
- producerBatch numberSize 
- The batch size in bytes producer will attempt to collect before publishing to broker. Example: 1024.
- producerBuffer numberMemory 
- The amount of bytes producer can use for buffering data before publishing to broker.
- producerCompression stringType 
- Enum: gzip,lz4,none,snappy,zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip,snappy,lz4,zstd). It additionally acceptsnonewhich is the default and equivalent to no compression.
- producerLinger numberMs 
- The linger time (ms) for waiting new data to arrive for publishing. Example: 100.
- producerMax numberRequest Size 
- The maximum request size in bytes.
- consumer_auto_ stroffset_ reset 
- Enum: earliest,latest. Set where consumer starts to consume data. Valueearliest: Start replication from the earliest offset. Valuelatest: Start replication from the latest offset. Default isearliest.
- consumer_fetch_ intmin_ bytes 
- The minimum amount of data the server should return for a fetch request. Example: 1024.
- consumer_max_ intpoll_ records 
- Set consumer max.poll.records. The default is 500. Example: 500.
- producer_batch_ intsize 
- The batch size in bytes producer will attempt to collect before publishing to broker. Example: 1024.
- producer_buffer_ intmemory 
- The amount of bytes producer can use for buffering data before publishing to broker.
- producer_compression_ strtype 
- Enum: gzip,lz4,none,snappy,zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip,snappy,lz4,zstd). It additionally acceptsnonewhich is the default and equivalent to no compression.
- producer_linger_ intms 
- The linger time (ms) for waiting new data to arrive for publishing. Example: 100.
- producer_max_ intrequest_ size 
- The maximum request size in bytes.
- consumerAuto StringOffset Reset 
- Enum: earliest,latest. Set where consumer starts to consume data. Valueearliest: Start replication from the earliest offset. Valuelatest: Start replication from the latest offset. Default isearliest.
- consumerFetch NumberMin Bytes 
- The minimum amount of data the server should return for a fetch request. Example: 1024.
- consumerMax NumberPoll Records 
- Set consumer max.poll.records. The default is 500. Example: 500.
- producerBatch NumberSize 
- The batch size in bytes producer will attempt to collect before publishing to broker. Example: 1024.
- producerBuffer NumberMemory 
- The amount of bytes producer can use for buffering data before publishing to broker.
- producerCompression StringType 
- Enum: gzip,lz4,none,snappy,zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip,snappy,lz4,zstd). It additionally acceptsnonewhich is the default and equivalent to no compression.
- producerLinger NumberMs 
- The linger time (ms) for waiting new data to arrive for publishing. Example: 100.
- producerMax NumberRequest Size 
- The maximum request size in bytes.
GetServiceIntegrationLogsUserConfig     
- ElasticsearchIndex intDays Max 
- Elasticsearch index retention limit. Default: 3.
- ElasticsearchIndex stringPrefix 
- Elasticsearch index prefix. Default: logs.
- SelectedLog List<string>Fields 
- The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- ElasticsearchIndex intDays Max 
- Elasticsearch index retention limit. Default: 3.
- ElasticsearchIndex stringPrefix 
- Elasticsearch index prefix. Default: logs.
- SelectedLog []stringFields 
- The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- elasticsearchIndex IntegerDays Max 
- Elasticsearch index retention limit. Default: 3.
- elasticsearchIndex StringPrefix 
- Elasticsearch index prefix. Default: logs.
- selectedLog List<String>Fields 
- The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- elasticsearchIndex numberDays Max 
- Elasticsearch index retention limit. Default: 3.
- elasticsearchIndex stringPrefix 
- Elasticsearch index prefix. Default: logs.
- selectedLog string[]Fields 
- The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- elasticsearch_index_ intdays_ max 
- Elasticsearch index retention limit. Default: 3.
- elasticsearch_index_ strprefix 
- Elasticsearch index prefix. Default: logs.
- selected_log_ Sequence[str]fields 
- The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- elasticsearchIndex NumberDays Max 
- Elasticsearch index retention limit. Default: 3.
- elasticsearchIndex StringPrefix 
- Elasticsearch index prefix. Default: logs.
- selectedLog List<String>Fields 
- The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
GetServiceIntegrationMetricsUserConfig     
- Database string
- Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to metrics. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
- RetentionDays int
- Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days.
- RoUsername string
- Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to metrics_reader. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
- SourceMysql GetService Integration Metrics User Config Source Mysql 
- Configuration options for metrics where source service is MySQL
- Username string
- Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to metrics_writer. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
- Database string
- Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to metrics. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
- RetentionDays int
- Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days.
- RoUsername string
- Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to metrics_reader. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
- SourceMysql GetService Integration Metrics User Config Source Mysql 
- Configuration options for metrics where source service is MySQL
- Username string
- Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to metrics_writer. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
- database String
- Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to metrics. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
- retentionDays Integer
- Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days.
- roUsername String
- Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to metrics_reader. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
- sourceMysql GetService Integration Metrics User Config Source Mysql 
- Configuration options for metrics where source service is MySQL
- username String
- Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to metrics_writer. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
- database string
- Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to metrics. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
- retentionDays number
- Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days.
- roUsername string
- Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to metrics_reader. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
- sourceMysql GetService Integration Metrics User Config Source Mysql 
- Configuration options for metrics where source service is MySQL
- username string
- Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to metrics_writer. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
- database str
- Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to metrics. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
- retention_days int
- Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days.
- ro_username str
- Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to metrics_reader. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
- source_mysql GetService Integration Metrics User Config Source Mysql 
- Configuration options for metrics where source service is MySQL
- username str
- Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to metrics_writer. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
- database String
- Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to metrics. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
- retentionDays Number
- Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days.
- roUsername String
- Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to metrics_reader. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
- sourceMysql Property Map
- Configuration options for metrics where source service is MySQL
- username String
- Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to metrics_writer. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
GetServiceIntegrationMetricsUserConfigSourceMysql       
- Telegraf
GetService Integration Metrics User Config Source Mysql Telegraf 
- Configuration options for Telegraf MySQL input plugin
- Telegraf
GetService Integration Metrics User Config Source Mysql Telegraf 
- Configuration options for Telegraf MySQL input plugin
- telegraf
GetService Integration Metrics User Config Source Mysql Telegraf 
- Configuration options for Telegraf MySQL input plugin
- telegraf
GetService Integration Metrics User Config Source Mysql Telegraf 
- Configuration options for Telegraf MySQL input plugin
- telegraf
GetService Integration Metrics User Config Source Mysql Telegraf 
- Configuration options for Telegraf MySQL input plugin
- telegraf Property Map
- Configuration options for Telegraf MySQL input plugin
GetServiceIntegrationMetricsUserConfigSourceMysqlTelegraf        
- GatherEvent boolWaits 
- Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.
- GatherFile boolEvents Stats 
- Gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
- GatherIndex boolIo Waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.
- GatherInfo boolSchema Auto Inc 
- Gather auto_increment columns and max values from information schema.
- GatherInnodb boolMetrics 
- Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.
- GatherPerf boolEvents Statements 
- Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.
- GatherProcess boolList 
- Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- GatherSlave boolStatus 
- Gather metrics from SHOW SLAVE STATUS command output.
- GatherTable boolIo Waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.
- GatherTable boolLock Waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.
- GatherTable boolSchema 
- Gather metrics from INFORMATION_SCHEMA.TABLES.
- PerfEvents intStatements Digest Text Limit 
- Truncates digest text from perf_events_statements into this many characters. Example: 120.
- PerfEvents intStatements Limit 
- Limits metrics from perf_events_statements. Example: 250.
- PerfEvents intStatements Time Limit 
- Only include perf_events_statements whose last seen is less than this many seconds. Example: 86400.
- GatherEvent boolWaits 
- Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.
- GatherFile boolEvents Stats 
- Gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
- GatherIndex boolIo Waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.
- GatherInfo boolSchema Auto Inc 
- Gather auto_increment columns and max values from information schema.
- GatherInnodb boolMetrics 
- Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.
- GatherPerf boolEvents Statements 
- Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.
- GatherProcess boolList 
- Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- GatherSlave boolStatus 
- Gather metrics from SHOW SLAVE STATUS command output.
- GatherTable boolIo Waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.
- GatherTable boolLock Waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.
- GatherTable boolSchema 
- Gather metrics from INFORMATION_SCHEMA.TABLES.
- PerfEvents intStatements Digest Text Limit 
- Truncates digest text from perf_events_statements into this many characters. Example: 120.
- PerfEvents intStatements Limit 
- Limits metrics from perf_events_statements. Example: 250.
- PerfEvents intStatements Time Limit 
- Only include perf_events_statements whose last seen is less than this many seconds. Example: 86400.
- gatherEvent BooleanWaits 
- Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.
- gatherFile BooleanEvents Stats 
- Gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
- gatherIndex BooleanIo Waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.
- gatherInfo BooleanSchema Auto Inc 
- Gather auto_increment columns and max values from information schema.
- gatherInnodb BooleanMetrics 
- Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.
- gatherPerf BooleanEvents Statements 
- Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.
- gatherProcess BooleanList 
- Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- gatherSlave BooleanStatus 
- Gather metrics from SHOW SLAVE STATUS command output.
- gatherTable BooleanIo Waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.
- gatherTable BooleanLock Waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.
- gatherTable BooleanSchema 
- Gather metrics from INFORMATION_SCHEMA.TABLES.
- perfEvents IntegerStatements Digest Text Limit 
- Truncates digest text from perf_events_statements into this many characters. Example: 120.
- perfEvents IntegerStatements Limit 
- Limits metrics from perf_events_statements. Example: 250.
- perfEvents IntegerStatements Time Limit 
- Only include perf_events_statements whose last seen is less than this many seconds. Example: 86400.
- gatherEvent booleanWaits 
- Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.
- gatherFile booleanEvents Stats 
- Gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
- gatherIndex booleanIo Waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.
- gatherInfo booleanSchema Auto Inc 
- Gather auto_increment columns and max values from information schema.
- gatherInnodb booleanMetrics 
- Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.
- gatherPerf booleanEvents Statements 
- Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.
- gatherProcess booleanList 
- Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- gatherSlave booleanStatus 
- Gather metrics from SHOW SLAVE STATUS command output.
- gatherTable booleanIo Waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.
- gatherTable booleanLock Waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.
- gatherTable booleanSchema 
- Gather metrics from INFORMATION_SCHEMA.TABLES.
- perfEvents numberStatements Digest Text Limit 
- Truncates digest text from perf_events_statements into this many characters. Example: 120.
- perfEvents numberStatements Limit 
- Limits metrics from perf_events_statements. Example: 250.
- perfEvents numberStatements Time Limit 
- Only include perf_events_statements whose last seen is less than this many seconds. Example: 86400.
- gather_event_ boolwaits 
- Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.
- gather_file_ boolevents_ stats 
- Gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
- gather_index_ boolio_ waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.
- gather_info_ boolschema_ auto_ inc 
- Gather auto_increment columns and max values from information schema.
- gather_innodb_ boolmetrics 
- Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.
- gather_perf_ boolevents_ statements 
- Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.
- gather_process_ boollist 
- Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- gather_slave_ boolstatus 
- Gather metrics from SHOW SLAVE STATUS command output.
- gather_table_ boolio_ waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.
- gather_table_ boollock_ waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.
- gather_table_ boolschema 
- Gather metrics from INFORMATION_SCHEMA.TABLES.
- perf_events_ intstatements_ digest_ text_ limit 
- Truncates digest text from perf_events_statements into this many characters. Example: 120.
- perf_events_ intstatements_ limit 
- Limits metrics from perf_events_statements. Example: 250.
- perf_events_ intstatements_ time_ limit 
- Only include perf_events_statements whose last seen is less than this many seconds. Example: 86400.
- gatherEvent BooleanWaits 
- Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.
- gatherFile BooleanEvents Stats 
- Gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
- gatherIndex BooleanIo Waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.
- gatherInfo BooleanSchema Auto Inc 
- Gather auto_increment columns and max values from information schema.
- gatherInnodb BooleanMetrics 
- Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.
- gatherPerf BooleanEvents Statements 
- Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.
- gatherProcess BooleanList 
- Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- gatherSlave BooleanStatus 
- Gather metrics from SHOW SLAVE STATUS command output.
- gatherTable BooleanIo Waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.
- gatherTable BooleanLock Waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.
- gatherTable BooleanSchema 
- Gather metrics from INFORMATION_SCHEMA.TABLES.
- perfEvents NumberStatements Digest Text Limit 
- Truncates digest text from perf_events_statements into this many characters. Example: 120.
- perfEvents NumberStatements Limit 
- Limits metrics from perf_events_statements. Example: 250.
- perfEvents NumberStatements Time Limit 
- Only include perf_events_statements whose last seen is less than this many seconds. Example: 86400.
GetServiceIntegrationPrometheusUserConfig     
- SourceMysql GetService Integration Prometheus User Config Source Mysql 
- Configuration options for metrics where source service is MySQL
- SourceMysql GetService Integration Prometheus User Config Source Mysql 
- Configuration options for metrics where source service is MySQL
- sourceMysql GetService Integration Prometheus User Config Source Mysql 
- Configuration options for metrics where source service is MySQL
- sourceMysql GetService Integration Prometheus User Config Source Mysql 
- Configuration options for metrics where source service is MySQL
- source_mysql GetService Integration Prometheus User Config Source Mysql 
- Configuration options for metrics where source service is MySQL
- sourceMysql Property Map
- Configuration options for metrics where source service is MySQL
GetServiceIntegrationPrometheusUserConfigSourceMysql       
- Telegraf
GetService Integration Prometheus User Config Source Mysql Telegraf 
- Configuration options for Telegraf MySQL input plugin
- Telegraf
GetService Integration Prometheus User Config Source Mysql Telegraf 
- Configuration options for Telegraf MySQL input plugin
- telegraf
GetService Integration Prometheus User Config Source Mysql Telegraf 
- Configuration options for Telegraf MySQL input plugin
- telegraf
GetService Integration Prometheus User Config Source Mysql Telegraf 
- Configuration options for Telegraf MySQL input plugin
- telegraf
GetService Integration Prometheus User Config Source Mysql Telegraf 
- Configuration options for Telegraf MySQL input plugin
- telegraf Property Map
- Configuration options for Telegraf MySQL input plugin
GetServiceIntegrationPrometheusUserConfigSourceMysqlTelegraf        
- GatherEvent boolWaits 
- Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.
- GatherFile boolEvents Stats 
- Gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
- GatherIndex boolIo Waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.
- GatherInfo boolSchema Auto Inc 
- Gather auto_increment columns and max values from information schema.
- GatherInnodb boolMetrics 
- Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.
- GatherPerf boolEvents Statements 
- Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.
- GatherProcess boolList 
- Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- GatherSlave boolStatus 
- Gather metrics from SHOW SLAVE STATUS command output.
- GatherTable boolIo Waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.
- GatherTable boolLock Waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.
- GatherTable boolSchema 
- Gather metrics from INFORMATION_SCHEMA.TABLES.
- PerfEvents intStatements Digest Text Limit 
- Truncates digest text from perf_events_statements into this many characters. Example: 120.
- PerfEvents intStatements Limit 
- Limits metrics from perf_events_statements. Example: 250.
- PerfEvents intStatements Time Limit 
- Only include perf_events_statements whose last seen is less than this many seconds. Example: 86400.
- GatherEvent boolWaits 
- Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.
- GatherFile boolEvents Stats 
- Gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
- GatherIndex boolIo Waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.
- GatherInfo boolSchema Auto Inc 
- Gather auto_increment columns and max values from information schema.
- GatherInnodb boolMetrics 
- Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.
- GatherPerf boolEvents Statements 
- Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.
- GatherProcess boolList 
- Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- GatherSlave boolStatus 
- Gather metrics from SHOW SLAVE STATUS command output.
- GatherTable boolIo Waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.
- GatherTable boolLock Waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.
- GatherTable boolSchema 
- Gather metrics from INFORMATION_SCHEMA.TABLES.
- PerfEvents intStatements Digest Text Limit 
- Truncates digest text from perf_events_statements into this many characters. Example: 120.
- PerfEvents intStatements Limit 
- Limits metrics from perf_events_statements. Example: 250.
- PerfEvents intStatements Time Limit 
- Only include perf_events_statements whose last seen is less than this many seconds. Example: 86400.
- gatherEvent BooleanWaits 
- Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.
- gatherFile BooleanEvents Stats 
- Gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
- gatherIndex BooleanIo Waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.
- gatherInfo BooleanSchema Auto Inc 
- Gather auto_increment columns and max values from information schema.
- gatherInnodb BooleanMetrics 
- Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.
- gatherPerf BooleanEvents Statements 
- Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.
- gatherProcess BooleanList 
- Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- gatherSlave BooleanStatus 
- Gather metrics from SHOW SLAVE STATUS command output.
- gatherTable BooleanIo Waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.
- gatherTable BooleanLock Waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.
- gatherTable BooleanSchema 
- Gather metrics from INFORMATION_SCHEMA.TABLES.
- perfEvents IntegerStatements Digest Text Limit 
- Truncates digest text from perf_events_statements into this many characters. Example: 120.
- perfEvents IntegerStatements Limit 
- Limits metrics from perf_events_statements. Example: 250.
- perfEvents IntegerStatements Time Limit 
- Only include perf_events_statements whose last seen is less than this many seconds. Example: 86400.
- gatherEvent booleanWaits 
- Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.
- gatherFile booleanEvents Stats 
- Gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
- gatherIndex booleanIo Waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.
- gatherInfo booleanSchema Auto Inc 
- Gather auto_increment columns and max values from information schema.
- gatherInnodb booleanMetrics 
- Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.
- gatherPerf booleanEvents Statements 
- Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.
- gatherProcess booleanList 
- Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- gatherSlave booleanStatus 
- Gather metrics from SHOW SLAVE STATUS command output.
- gatherTable booleanIo Waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.
- gatherTable booleanLock Waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.
- gatherTable booleanSchema 
- Gather metrics from INFORMATION_SCHEMA.TABLES.
- perfEvents numberStatements Digest Text Limit 
- Truncates digest text from perf_events_statements into this many characters. Example: 120.
- perfEvents numberStatements Limit 
- Limits metrics from perf_events_statements. Example: 250.
- perfEvents numberStatements Time Limit 
- Only include perf_events_statements whose last seen is less than this many seconds. Example: 86400.
- gather_event_ boolwaits 
- Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.
- gather_file_ boolevents_ stats 
- Gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
- gather_index_ boolio_ waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.
- gather_info_ boolschema_ auto_ inc 
- Gather auto_increment columns and max values from information schema.
- gather_innodb_ boolmetrics 
- Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.
- gather_perf_ boolevents_ statements 
- Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.
- gather_process_ boollist 
- Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- gather_slave_ boolstatus 
- Gather metrics from SHOW SLAVE STATUS command output.
- gather_table_ boolio_ waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.
- gather_table_ boollock_ waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.
- gather_table_ boolschema 
- Gather metrics from INFORMATION_SCHEMA.TABLES.
- perf_events_ intstatements_ digest_ text_ limit 
- Truncates digest text from perf_events_statements into this many characters. Example: 120.
- perf_events_ intstatements_ limit 
- Limits metrics from perf_events_statements. Example: 250.
- perf_events_ intstatements_ time_ limit 
- Only include perf_events_statements whose last seen is less than this many seconds. Example: 86400.
- gatherEvent BooleanWaits 
- Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.
- gatherFile BooleanEvents Stats 
- Gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
- gatherIndex BooleanIo Waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.
- gatherInfo BooleanSchema Auto Inc 
- Gather auto_increment columns and max values from information schema.
- gatherInnodb BooleanMetrics 
- Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.
- gatherPerf BooleanEvents Statements 
- Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.
- gatherProcess BooleanList 
- Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- gatherSlave BooleanStatus 
- Gather metrics from SHOW SLAVE STATUS command output.
- gatherTable BooleanIo Waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.
- gatherTable BooleanLock Waits 
- Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.
- gatherTable BooleanSchema 
- Gather metrics from INFORMATION_SCHEMA.TABLES.
- perfEvents NumberStatements Digest Text Limit 
- Truncates digest text from perf_events_statements into this many characters. Example: 120.
- perfEvents NumberStatements Limit 
- Limits metrics from perf_events_statements. Example: 250.
- perfEvents NumberStatements Time Limit 
- Only include perf_events_statements whose last seen is less than this many seconds. Example: 86400.
Package Details
- Repository
- Aiven pulumi/pulumi-aiven
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the aivenTerraform Provider.
