1. Packages
  2. Confluent Cloud
  3. API Docs
  4. Connector
Confluent v1.41.0 published on Saturday, Apr 13, 2024 by Pulumi

confluentcloud.Connector

Explore with Pulumi AI

confluentcloud logo
Confluent v1.41.0 published on Saturday, Apr 13, 2024 by Pulumi

    Example Usage

    Example Managed Datagen Source Connector that uses a service account to communicate with your Kafka cluster

    import * as pulumi from "@pulumi/pulumi";
    import * as confluentcloud from "@pulumi/confluentcloud";
    
    const source = new confluentcloud.Connector("source", {
        environment: {
            id: confluent_environment.staging.id,
        },
        kafkaCluster: {
            id: confluent_kafka_cluster.basic.id,
        },
        configSensitive: {},
        configNonsensitive: {
            "connector.class": "DatagenSource",
            name: "DatagenSourceConnector_0",
            "kafka.auth.mode": "SERVICE_ACCOUNT",
            "kafka.service.account.id": confluent_service_account["app-connector"].id,
            "kafka.topic": confluent_kafka_topic.orders.topic_name,
            "output.data.format": "JSON",
            quickstart: "ORDERS",
            "tasks.max": "1",
        },
    }, {
        dependsOn: [
            confluent_kafka_acl["app-connector-describe-on-cluster"],
            confluent_kafka_acl["app-connector-write-on-target-topic"],
            confluent_kafka_acl["app-connector-create-on-data-preview-topics"],
            confluent_kafka_acl["app-connector-write-on-data-preview-topics"],
        ],
    });
    
    import pulumi
    import pulumi_confluentcloud as confluentcloud
    
    source = confluentcloud.Connector("source",
        environment=confluentcloud.ConnectorEnvironmentArgs(
            id=confluent_environment["staging"]["id"],
        ),
        kafka_cluster=confluentcloud.ConnectorKafkaClusterArgs(
            id=confluent_kafka_cluster["basic"]["id"],
        ),
        config_sensitive={},
        config_nonsensitive={
            "connector.class": "DatagenSource",
            "name": "DatagenSourceConnector_0",
            "kafka.auth.mode": "SERVICE_ACCOUNT",
            "kafka.service.account.id": confluent_service_account["app-connector"]["id"],
            "kafka.topic": confluent_kafka_topic["orders"]["topic_name"],
            "output.data.format": "JSON",
            "quickstart": "ORDERS",
            "tasks.max": "1",
        },
        opts=pulumi.ResourceOptions(depends_on=[
                confluent_kafka_acl["app-connector-describe-on-cluster"],
                confluent_kafka_acl["app-connector-write-on-target-topic"],
                confluent_kafka_acl["app-connector-create-on-data-preview-topics"],
                confluent_kafka_acl["app-connector-write-on-data-preview-topics"],
            ]))
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := confluentcloud.NewConnector(ctx, "source", &confluentcloud.ConnectorArgs{
    			Environment: &confluentcloud.ConnectorEnvironmentArgs{
    				Id: pulumi.Any(confluent_environment.Staging.Id),
    			},
    			KafkaCluster: &confluentcloud.ConnectorKafkaClusterArgs{
    				Id: pulumi.Any(confluent_kafka_cluster.Basic.Id),
    			},
    			ConfigSensitive: nil,
    			ConfigNonsensitive: pulumi.StringMap{
    				"connector.class":          pulumi.String("DatagenSource"),
    				"name":                     pulumi.String("DatagenSourceConnector_0"),
    				"kafka.auth.mode":          pulumi.String("SERVICE_ACCOUNT"),
    				"kafka.service.account.id": pulumi.Any(confluent_service_account.AppConnector.Id),
    				"kafka.topic":              pulumi.Any(confluent_kafka_topic.Orders.Topic_name),
    				"output.data.format":       pulumi.String("JSON"),
    				"quickstart":               pulumi.String("ORDERS"),
    				"tasks.max":                pulumi.String("1"),
    			},
    		}, pulumi.DependsOn([]pulumi.Resource{
    			confluent_kafka_acl.AppConnectorDescribeOnCluster,
    			confluent_kafka_acl.AppConnectorWriteOnTargetTopic,
    			confluent_kafka_acl.AppConnectorCreateOnDataPreviewTopics,
    			confluent_kafka_acl.AppConnectorWriteOnDataPreviewTopics,
    		}))
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using ConfluentCloud = Pulumi.ConfluentCloud;
    
    return await Deployment.RunAsync(() => 
    {
        var source = new ConfluentCloud.Connector("source", new()
        {
            Environment = new ConfluentCloud.Inputs.ConnectorEnvironmentArgs
            {
                Id = confluent_environment.Staging.Id,
            },
            KafkaCluster = new ConfluentCloud.Inputs.ConnectorKafkaClusterArgs
            {
                Id = confluent_kafka_cluster.Basic.Id,
            },
            ConfigSensitive = null,
            ConfigNonsensitive = 
            {
                { "connector.class", "DatagenSource" },
                { "name", "DatagenSourceConnector_0" },
                { "kafka.auth.mode", "SERVICE_ACCOUNT" },
                { "kafka.service.account.id", confluent_service_account.App_connector.Id },
                { "kafka.topic", confluent_kafka_topic.Orders.Topic_name },
                { "output.data.format", "JSON" },
                { "quickstart", "ORDERS" },
                { "tasks.max", "1" },
            },
        }, new CustomResourceOptions
        {
            DependsOn =
            {
                confluent_kafka_acl.App_connector_describe_on_cluster, 
                confluent_kafka_acl.App_connector_write_on_target_topic, 
                confluent_kafka_acl.App_connector_create_on_data_preview_topics, 
                confluent_kafka_acl.App_connector_write_on_data_preview_topics, 
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.confluentcloud.Connector;
    import com.pulumi.confluentcloud.ConnectorArgs;
    import com.pulumi.confluentcloud.inputs.ConnectorEnvironmentArgs;
    import com.pulumi.confluentcloud.inputs.ConnectorKafkaClusterArgs;
    import com.pulumi.resources.CustomResourceOptions;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var source = new Connector("source", ConnectorArgs.builder()        
                .environment(ConnectorEnvironmentArgs.builder()
                    .id(confluent_environment.staging().id())
                    .build())
                .kafkaCluster(ConnectorKafkaClusterArgs.builder()
                    .id(confluent_kafka_cluster.basic().id())
                    .build())
                .configSensitive()
                .configNonsensitive(Map.ofEntries(
                    Map.entry("connector.class", "DatagenSource"),
                    Map.entry("name", "DatagenSourceConnector_0"),
                    Map.entry("kafka.auth.mode", "SERVICE_ACCOUNT"),
                    Map.entry("kafka.service.account.id", confluent_service_account.app-connector().id()),
                    Map.entry("kafka.topic", confluent_kafka_topic.orders().topic_name()),
                    Map.entry("output.data.format", "JSON"),
                    Map.entry("quickstart", "ORDERS"),
                    Map.entry("tasks.max", "1")
                ))
                .build(), CustomResourceOptions.builder()
                    .dependsOn(                
                        confluent_kafka_acl.app-connector-describe-on-cluster(),
                        confluent_kafka_acl.app-connector-write-on-target-topic(),
                        confluent_kafka_acl.app-connector-create-on-data-preview-topics(),
                        confluent_kafka_acl.app-connector-write-on-data-preview-topics())
                    .build());
    
        }
    }
    
    resources:
      source:
        type: confluentcloud:Connector
        properties:
          environment:
            id: ${confluent_environment.staging.id}
          kafkaCluster:
            id: ${confluent_kafka_cluster.basic.id}
          configSensitive: {}
          configNonsensitive:
            connector.class: DatagenSource
            name: DatagenSourceConnector_0
            kafka.auth.mode: SERVICE_ACCOUNT
            kafka.service.account.id: ${confluent_service_account"app-connector"[%!s(MISSING)].id}
            kafka.topic: ${confluent_kafka_topic.orders.topic_name}
            output.data.format: JSON
            quickstart: ORDERS
            tasks.max: '1'
        options:
          dependson:
            - ${confluent_kafka_acl"app-connector-describe-on-cluster"[%!s(MISSING)]}
            - ${confluent_kafka_acl"app-connector-write-on-target-topic"[%!s(MISSING)]}
            - ${confluent_kafka_acl"app-connector-create-on-data-preview-topics"[%!s(MISSING)]}
            - ${confluent_kafka_acl"app-connector-write-on-data-preview-topics"[%!s(MISSING)]}
    

    Example Managed Amazon S3 Sink Connector that uses a service account to communicate with your Kafka cluster

    import * as pulumi from "@pulumi/pulumi";
    import * as confluentcloud from "@pulumi/confluentcloud";
    
    const sink = new confluentcloud.Connector("sink", {
        environment: {
            id: confluent_environment.staging.id,
        },
        kafkaCluster: {
            id: confluent_kafka_cluster.basic.id,
        },
        configSensitive: {
            "aws.access.key.id": "***REDACTED***",
            "aws.secret.access.key": "***REDACTED***",
        },
        configNonsensitive: {
            topics: confluent_kafka_topic.orders.topic_name,
            "input.data.format": "JSON",
            "connector.class": "S3_SINK",
            name: "S3_SINKConnector_0",
            "kafka.auth.mode": "SERVICE_ACCOUNT",
            "kafka.service.account.id": confluent_service_account["app-connector"].id,
            "s3.bucket.name": "<s3-bucket-name>",
            "output.data.format": "JSON",
            "time.interval": "DAILY",
            "flush.size": "1000",
            "tasks.max": "1",
        },
    }, {
        dependsOn: [
            confluent_kafka_acl["app-connector-describe-on-cluster"],
            confluent_kafka_acl["app-connector-read-on-target-topic"],
            confluent_kafka_acl["app-connector-create-on-dlq-lcc-topics"],
            confluent_kafka_acl["app-connector-write-on-dlq-lcc-topics"],
            confluent_kafka_acl["app-connector-create-on-success-lcc-topics"],
            confluent_kafka_acl["app-connector-write-on-success-lcc-topics"],
            confluent_kafka_acl["app-connector-create-on-error-lcc-topics"],
            confluent_kafka_acl["app-connector-write-on-error-lcc-topics"],
            confluent_kafka_acl["app-connector-read-on-connect-lcc-group"],
        ],
    });
    
    import pulumi
    import pulumi_confluentcloud as confluentcloud
    
    sink = confluentcloud.Connector("sink",
        environment=confluentcloud.ConnectorEnvironmentArgs(
            id=confluent_environment["staging"]["id"],
        ),
        kafka_cluster=confluentcloud.ConnectorKafkaClusterArgs(
            id=confluent_kafka_cluster["basic"]["id"],
        ),
        config_sensitive={
            "aws.access.key.id": "***REDACTED***",
            "aws.secret.access.key": "***REDACTED***",
        },
        config_nonsensitive={
            "topics": confluent_kafka_topic["orders"]["topic_name"],
            "input.data.format": "JSON",
            "connector.class": "S3_SINK",
            "name": "S3_SINKConnector_0",
            "kafka.auth.mode": "SERVICE_ACCOUNT",
            "kafka.service.account.id": confluent_service_account["app-connector"]["id"],
            "s3.bucket.name": "<s3-bucket-name>",
            "output.data.format": "JSON",
            "time.interval": "DAILY",
            "flush.size": "1000",
            "tasks.max": "1",
        },
        opts=pulumi.ResourceOptions(depends_on=[
                confluent_kafka_acl["app-connector-describe-on-cluster"],
                confluent_kafka_acl["app-connector-read-on-target-topic"],
                confluent_kafka_acl["app-connector-create-on-dlq-lcc-topics"],
                confluent_kafka_acl["app-connector-write-on-dlq-lcc-topics"],
                confluent_kafka_acl["app-connector-create-on-success-lcc-topics"],
                confluent_kafka_acl["app-connector-write-on-success-lcc-topics"],
                confluent_kafka_acl["app-connector-create-on-error-lcc-topics"],
                confluent_kafka_acl["app-connector-write-on-error-lcc-topics"],
                confluent_kafka_acl["app-connector-read-on-connect-lcc-group"],
            ]))
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := confluentcloud.NewConnector(ctx, "sink", &confluentcloud.ConnectorArgs{
    			Environment: &confluentcloud.ConnectorEnvironmentArgs{
    				Id: pulumi.Any(confluent_environment.Staging.Id),
    			},
    			KafkaCluster: &confluentcloud.ConnectorKafkaClusterArgs{
    				Id: pulumi.Any(confluent_kafka_cluster.Basic.Id),
    			},
    			ConfigSensitive: pulumi.StringMap{
    				"aws.access.key.id":     pulumi.String("***REDACTED***"),
    				"aws.secret.access.key": pulumi.String("***REDACTED***"),
    			},
    			ConfigNonsensitive: pulumi.StringMap{
    				"topics":                   pulumi.Any(confluent_kafka_topic.Orders.Topic_name),
    				"input.data.format":        pulumi.String("JSON"),
    				"connector.class":          pulumi.String("S3_SINK"),
    				"name":                     pulumi.String("S3_SINKConnector_0"),
    				"kafka.auth.mode":          pulumi.String("SERVICE_ACCOUNT"),
    				"kafka.service.account.id": pulumi.Any(confluent_service_account.AppConnector.Id),
    				"s3.bucket.name":           pulumi.String("<s3-bucket-name>"),
    				"output.data.format":       pulumi.String("JSON"),
    				"time.interval":            pulumi.String("DAILY"),
    				"flush.size":               pulumi.String("1000"),
    				"tasks.max":                pulumi.String("1"),
    			},
    		}, pulumi.DependsOn([]pulumi.Resource{
    			confluent_kafka_acl.AppConnectorDescribeOnCluster,
    			confluent_kafka_acl.AppConnectorReadOnTargetTopic,
    			confluent_kafka_acl.AppConnectorCreateOnDlqLccTopics,
    			confluent_kafka_acl.AppConnectorWriteOnDlqLccTopics,
    			confluent_kafka_acl.AppConnectorCreateOnSuccessLccTopics,
    			confluent_kafka_acl.AppConnectorWriteOnSuccessLccTopics,
    			confluent_kafka_acl.AppConnectorCreateOnErrorLccTopics,
    			confluent_kafka_acl.AppConnectorWriteOnErrorLccTopics,
    			confluent_kafka_acl.AppConnectorReadOnConnectLccGroup,
    		}))
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using ConfluentCloud = Pulumi.ConfluentCloud;
    
    return await Deployment.RunAsync(() => 
    {
        var sink = new ConfluentCloud.Connector("sink", new()
        {
            Environment = new ConfluentCloud.Inputs.ConnectorEnvironmentArgs
            {
                Id = confluent_environment.Staging.Id,
            },
            KafkaCluster = new ConfluentCloud.Inputs.ConnectorKafkaClusterArgs
            {
                Id = confluent_kafka_cluster.Basic.Id,
            },
            ConfigSensitive = 
            {
                { "aws.access.key.id", "***REDACTED***" },
                { "aws.secret.access.key", "***REDACTED***" },
            },
            ConfigNonsensitive = 
            {
                { "topics", confluent_kafka_topic.Orders.Topic_name },
                { "input.data.format", "JSON" },
                { "connector.class", "S3_SINK" },
                { "name", "S3_SINKConnector_0" },
                { "kafka.auth.mode", "SERVICE_ACCOUNT" },
                { "kafka.service.account.id", confluent_service_account.App_connector.Id },
                { "s3.bucket.name", "<s3-bucket-name>" },
                { "output.data.format", "JSON" },
                { "time.interval", "DAILY" },
                { "flush.size", "1000" },
                { "tasks.max", "1" },
            },
        }, new CustomResourceOptions
        {
            DependsOn =
            {
                confluent_kafka_acl.App_connector_describe_on_cluster, 
                confluent_kafka_acl.App_connector_read_on_target_topic, 
                confluent_kafka_acl.App_connector_create_on_dlq_lcc_topics, 
                confluent_kafka_acl.App_connector_write_on_dlq_lcc_topics, 
                confluent_kafka_acl.App_connector_create_on_success_lcc_topics, 
                confluent_kafka_acl.App_connector_write_on_success_lcc_topics, 
                confluent_kafka_acl.App_connector_create_on_error_lcc_topics, 
                confluent_kafka_acl.App_connector_write_on_error_lcc_topics, 
                confluent_kafka_acl.App_connector_read_on_connect_lcc_group, 
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.confluentcloud.Connector;
    import com.pulumi.confluentcloud.ConnectorArgs;
    import com.pulumi.confluentcloud.inputs.ConnectorEnvironmentArgs;
    import com.pulumi.confluentcloud.inputs.ConnectorKafkaClusterArgs;
    import com.pulumi.resources.CustomResourceOptions;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var sink = new Connector("sink", ConnectorArgs.builder()        
                .environment(ConnectorEnvironmentArgs.builder()
                    .id(confluent_environment.staging().id())
                    .build())
                .kafkaCluster(ConnectorKafkaClusterArgs.builder()
                    .id(confluent_kafka_cluster.basic().id())
                    .build())
                .configSensitive(Map.ofEntries(
                    Map.entry("aws.access.key.id", "***REDACTED***"),
                    Map.entry("aws.secret.access.key", "***REDACTED***")
                ))
                .configNonsensitive(Map.ofEntries(
                    Map.entry("topics", confluent_kafka_topic.orders().topic_name()),
                    Map.entry("input.data.format", "JSON"),
                    Map.entry("connector.class", "S3_SINK"),
                    Map.entry("name", "S3_SINKConnector_0"),
                    Map.entry("kafka.auth.mode", "SERVICE_ACCOUNT"),
                    Map.entry("kafka.service.account.id", confluent_service_account.app-connector().id()),
                    Map.entry("s3.bucket.name", "<s3-bucket-name>"),
                    Map.entry("output.data.format", "JSON"),
                    Map.entry("time.interval", "DAILY"),
                    Map.entry("flush.size", "1000"),
                    Map.entry("tasks.max", "1")
                ))
                .build(), CustomResourceOptions.builder()
                    .dependsOn(                
                        confluent_kafka_acl.app-connector-describe-on-cluster(),
                        confluent_kafka_acl.app-connector-read-on-target-topic(),
                        confluent_kafka_acl.app-connector-create-on-dlq-lcc-topics(),
                        confluent_kafka_acl.app-connector-write-on-dlq-lcc-topics(),
                        confluent_kafka_acl.app-connector-create-on-success-lcc-topics(),
                        confluent_kafka_acl.app-connector-write-on-success-lcc-topics(),
                        confluent_kafka_acl.app-connector-create-on-error-lcc-topics(),
                        confluent_kafka_acl.app-connector-write-on-error-lcc-topics(),
                        confluent_kafka_acl.app-connector-read-on-connect-lcc-group())
                    .build());
    
        }
    }
    
    resources:
      sink:
        type: confluentcloud:Connector
        properties:
          environment:
            id: ${confluent_environment.staging.id}
          kafkaCluster:
            id: ${confluent_kafka_cluster.basic.id}
          # Block for custom *sensitive* configuration properties that are labelled with "Type: password" under "Configuration Properties" section in the docs:
          #   // https://docs.confluent.io/cloud/current/connectors/cc-s3-sink.html#configuration-properties
          configSensitive:
            aws.access.key.id: '***REDACTED***'
            aws.secret.access.key: '***REDACTED***'
          # Block for custom *nonsensitive* configuration properties that are *not* labelled with "Type: password" under "Configuration Properties" section in the docs:
          #   // https://docs.confluent.io/cloud/current/connectors/cc-s3-sink.html#configuration-properties
          configNonsensitive:
            topics: ${confluent_kafka_topic.orders.topic_name}
            input.data.format: JSON
            connector.class: S3_SINK
            name: S3_SINKConnector_0
            kafka.auth.mode: SERVICE_ACCOUNT
            kafka.service.account.id: ${confluent_service_account"app-connector"[%!s(MISSING)].id}
            s3.bucket.name: <s3-bucket-name>
            output.data.format: JSON
            time.interval: DAILY
            flush.size: '1000'
            tasks.max: '1'
        options:
          dependson:
            - ${confluent_kafka_acl"app-connector-describe-on-cluster"[%!s(MISSING)]}
            - ${confluent_kafka_acl"app-connector-read-on-target-topic"[%!s(MISSING)]}
            - ${confluent_kafka_acl"app-connector-create-on-dlq-lcc-topics"[%!s(MISSING)]}
            - ${confluent_kafka_acl"app-connector-write-on-dlq-lcc-topics"[%!s(MISSING)]}
            - ${confluent_kafka_acl"app-connector-create-on-success-lcc-topics"[%!s(MISSING)]}
            - ${confluent_kafka_acl"app-connector-write-on-success-lcc-topics"[%!s(MISSING)]}
            - ${confluent_kafka_acl"app-connector-create-on-error-lcc-topics"[%!s(MISSING)]}
            - ${confluent_kafka_acl"app-connector-write-on-error-lcc-topics"[%!s(MISSING)]}
            - ${confluent_kafka_acl"app-connector-read-on-connect-lcc-group"[%!s(MISSING)]}
    

    Example Managed Amazon DynamoDB Connector that uses a service account to communicate with your Kafka cluster

    import * as pulumi from "@pulumi/pulumi";
    import * as confluentcloud from "@pulumi/confluentcloud";
    
    const sink = new confluentcloud.Connector("sink", {
        environment: {
            id: confluent_environment.staging.id,
        },
        kafkaCluster: {
            id: confluent_kafka_cluster.basic.id,
        },
        configSensitive: {
            "aws.access.key.id": "***REDACTED***",
            "aws.secret.access.key": "***REDACTED***",
        },
        configNonsensitive: {
            topics: confluent_kafka_topic.orders.topic_name,
            "input.data.format": "JSON",
            "connector.class": "DynamoDbSink",
            name: "DynamoDbSinkConnector_0",
            "kafka.auth.mode": "SERVICE_ACCOUNT",
            "kafka.service.account.id": confluent_service_account["app-connector"].id,
            "aws.dynamodb.pk.hash": "value.userid",
            "aws.dynamodb.pk.sort": "value.pageid",
            "tasks.max": "1",
        },
    }, {
        dependsOn: [
            confluent_kafka_acl["app-connector-describe-on-cluster"],
            confluent_kafka_acl["app-connector-read-on-target-topic"],
            confluent_kafka_acl["app-connector-create-on-dlq-lcc-topics"],
            confluent_kafka_acl["app-connector-write-on-dlq-lcc-topics"],
            confluent_kafka_acl["app-connector-create-on-success-lcc-topics"],
            confluent_kafka_acl["app-connector-write-on-success-lcc-topics"],
            confluent_kafka_acl["app-connector-create-on-error-lcc-topics"],
            confluent_kafka_acl["app-connector-write-on-error-lcc-topics"],
            confluent_kafka_acl["app-connector-read-on-connect-lcc-group"],
        ],
    });
    
    import pulumi
    import pulumi_confluentcloud as confluentcloud
    
    sink = confluentcloud.Connector("sink",
        environment=confluentcloud.ConnectorEnvironmentArgs(
            id=confluent_environment["staging"]["id"],
        ),
        kafka_cluster=confluentcloud.ConnectorKafkaClusterArgs(
            id=confluent_kafka_cluster["basic"]["id"],
        ),
        config_sensitive={
            "aws.access.key.id": "***REDACTED***",
            "aws.secret.access.key": "***REDACTED***",
        },
        config_nonsensitive={
            "topics": confluent_kafka_topic["orders"]["topic_name"],
            "input.data.format": "JSON",
            "connector.class": "DynamoDbSink",
            "name": "DynamoDbSinkConnector_0",
            "kafka.auth.mode": "SERVICE_ACCOUNT",
            "kafka.service.account.id": confluent_service_account["app-connector"]["id"],
            "aws.dynamodb.pk.hash": "value.userid",
            "aws.dynamodb.pk.sort": "value.pageid",
            "tasks.max": "1",
        },
        opts=pulumi.ResourceOptions(depends_on=[
                confluent_kafka_acl["app-connector-describe-on-cluster"],
                confluent_kafka_acl["app-connector-read-on-target-topic"],
                confluent_kafka_acl["app-connector-create-on-dlq-lcc-topics"],
                confluent_kafka_acl["app-connector-write-on-dlq-lcc-topics"],
                confluent_kafka_acl["app-connector-create-on-success-lcc-topics"],
                confluent_kafka_acl["app-connector-write-on-success-lcc-topics"],
                confluent_kafka_acl["app-connector-create-on-error-lcc-topics"],
                confluent_kafka_acl["app-connector-write-on-error-lcc-topics"],
                confluent_kafka_acl["app-connector-read-on-connect-lcc-group"],
            ]))
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := confluentcloud.NewConnector(ctx, "sink", &confluentcloud.ConnectorArgs{
    			Environment: &confluentcloud.ConnectorEnvironmentArgs{
    				Id: pulumi.Any(confluent_environment.Staging.Id),
    			},
    			KafkaCluster: &confluentcloud.ConnectorKafkaClusterArgs{
    				Id: pulumi.Any(confluent_kafka_cluster.Basic.Id),
    			},
    			ConfigSensitive: pulumi.StringMap{
    				"aws.access.key.id":     pulumi.String("***REDACTED***"),
    				"aws.secret.access.key": pulumi.String("***REDACTED***"),
    			},
    			ConfigNonsensitive: pulumi.StringMap{
    				"topics":                   pulumi.Any(confluent_kafka_topic.Orders.Topic_name),
    				"input.data.format":        pulumi.String("JSON"),
    				"connector.class":          pulumi.String("DynamoDbSink"),
    				"name":                     pulumi.String("DynamoDbSinkConnector_0"),
    				"kafka.auth.mode":          pulumi.String("SERVICE_ACCOUNT"),
    				"kafka.service.account.id": pulumi.Any(confluent_service_account.AppConnector.Id),
    				"aws.dynamodb.pk.hash":     pulumi.String("value.userid"),
    				"aws.dynamodb.pk.sort":     pulumi.String("value.pageid"),
    				"tasks.max":                pulumi.String("1"),
    			},
    		}, pulumi.DependsOn([]pulumi.Resource{
    			confluent_kafka_acl.AppConnectorDescribeOnCluster,
    			confluent_kafka_acl.AppConnectorReadOnTargetTopic,
    			confluent_kafka_acl.AppConnectorCreateOnDlqLccTopics,
    			confluent_kafka_acl.AppConnectorWriteOnDlqLccTopics,
    			confluent_kafka_acl.AppConnectorCreateOnSuccessLccTopics,
    			confluent_kafka_acl.AppConnectorWriteOnSuccessLccTopics,
    			confluent_kafka_acl.AppConnectorCreateOnErrorLccTopics,
    			confluent_kafka_acl.AppConnectorWriteOnErrorLccTopics,
    			confluent_kafka_acl.AppConnectorReadOnConnectLccGroup,
    		}))
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using ConfluentCloud = Pulumi.ConfluentCloud;
    
    return await Deployment.RunAsync(() => 
    {
        var sink = new ConfluentCloud.Connector("sink", new()
        {
            Environment = new ConfluentCloud.Inputs.ConnectorEnvironmentArgs
            {
                Id = confluent_environment.Staging.Id,
            },
            KafkaCluster = new ConfluentCloud.Inputs.ConnectorKafkaClusterArgs
            {
                Id = confluent_kafka_cluster.Basic.Id,
            },
            ConfigSensitive = 
            {
                { "aws.access.key.id", "***REDACTED***" },
                { "aws.secret.access.key", "***REDACTED***" },
            },
            ConfigNonsensitive = 
            {
                { "topics", confluent_kafka_topic.Orders.Topic_name },
                { "input.data.format", "JSON" },
                { "connector.class", "DynamoDbSink" },
                { "name", "DynamoDbSinkConnector_0" },
                { "kafka.auth.mode", "SERVICE_ACCOUNT" },
                { "kafka.service.account.id", confluent_service_account.App_connector.Id },
                { "aws.dynamodb.pk.hash", "value.userid" },
                { "aws.dynamodb.pk.sort", "value.pageid" },
                { "tasks.max", "1" },
            },
        }, new CustomResourceOptions
        {
            DependsOn =
            {
                confluent_kafka_acl.App_connector_describe_on_cluster, 
                confluent_kafka_acl.App_connector_read_on_target_topic, 
                confluent_kafka_acl.App_connector_create_on_dlq_lcc_topics, 
                confluent_kafka_acl.App_connector_write_on_dlq_lcc_topics, 
                confluent_kafka_acl.App_connector_create_on_success_lcc_topics, 
                confluent_kafka_acl.App_connector_write_on_success_lcc_topics, 
                confluent_kafka_acl.App_connector_create_on_error_lcc_topics, 
                confluent_kafka_acl.App_connector_write_on_error_lcc_topics, 
                confluent_kafka_acl.App_connector_read_on_connect_lcc_group, 
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.confluentcloud.Connector;
    import com.pulumi.confluentcloud.ConnectorArgs;
    import com.pulumi.confluentcloud.inputs.ConnectorEnvironmentArgs;
    import com.pulumi.confluentcloud.inputs.ConnectorKafkaClusterArgs;
    import com.pulumi.resources.CustomResourceOptions;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var sink = new Connector("sink", ConnectorArgs.builder()        
                .environment(ConnectorEnvironmentArgs.builder()
                    .id(confluent_environment.staging().id())
                    .build())
                .kafkaCluster(ConnectorKafkaClusterArgs.builder()
                    .id(confluent_kafka_cluster.basic().id())
                    .build())
                .configSensitive(Map.ofEntries(
                    Map.entry("aws.access.key.id", "***REDACTED***"),
                    Map.entry("aws.secret.access.key", "***REDACTED***")
                ))
                .configNonsensitive(Map.ofEntries(
                    Map.entry("topics", confluent_kafka_topic.orders().topic_name()),
                    Map.entry("input.data.format", "JSON"),
                    Map.entry("connector.class", "DynamoDbSink"),
                    Map.entry("name", "DynamoDbSinkConnector_0"),
                    Map.entry("kafka.auth.mode", "SERVICE_ACCOUNT"),
                    Map.entry("kafka.service.account.id", confluent_service_account.app-connector().id()),
                    Map.entry("aws.dynamodb.pk.hash", "value.userid"),
                    Map.entry("aws.dynamodb.pk.sort", "value.pageid"),
                    Map.entry("tasks.max", "1")
                ))
                .build(), CustomResourceOptions.builder()
                    .dependsOn(                
                        confluent_kafka_acl.app-connector-describe-on-cluster(),
                        confluent_kafka_acl.app-connector-read-on-target-topic(),
                        confluent_kafka_acl.app-connector-create-on-dlq-lcc-topics(),
                        confluent_kafka_acl.app-connector-write-on-dlq-lcc-topics(),
                        confluent_kafka_acl.app-connector-create-on-success-lcc-topics(),
                        confluent_kafka_acl.app-connector-write-on-success-lcc-topics(),
                        confluent_kafka_acl.app-connector-create-on-error-lcc-topics(),
                        confluent_kafka_acl.app-connector-write-on-error-lcc-topics(),
                        confluent_kafka_acl.app-connector-read-on-connect-lcc-group())
                    .build());
    
        }
    }
    
    resources:
      sink:
        type: confluentcloud:Connector
        properties:
          environment:
            id: ${confluent_environment.staging.id}
          kafkaCluster:
            id: ${confluent_kafka_cluster.basic.id}
          # Block for custom *sensitive* configuration properties that are labelled with "Type: password" under "Configuration Properties" section in the docs:
          #   // https://docs.confluent.io/cloud/current/connectors/cc-amazon-dynamo-db-sink.html#configuration-properties
          configSensitive:
            aws.access.key.id: '***REDACTED***'
            aws.secret.access.key: '***REDACTED***'
          # Block for custom *nonsensitive* configuration properties that are *not* labelled with "Type: password" under "Configuration Properties" section in the docs:
          #   // https://docs.confluent.io/cloud/current/connectors/cc-amazon-dynamo-db-sink.html#configuration-properties
          configNonsensitive:
            topics: ${confluent_kafka_topic.orders.topic_name}
            input.data.format: JSON
            connector.class: DynamoDbSink
            name: DynamoDbSinkConnector_0
            kafka.auth.mode: SERVICE_ACCOUNT
            kafka.service.account.id: ${confluent_service_account"app-connector"[%!s(MISSING)].id}
            aws.dynamodb.pk.hash: value.userid
            aws.dynamodb.pk.sort: value.pageid
            tasks.max: '1'
        options:
          dependson:
            - ${confluent_kafka_acl"app-connector-describe-on-cluster"[%!s(MISSING)]}
            - ${confluent_kafka_acl"app-connector-read-on-target-topic"[%!s(MISSING)]}
            - ${confluent_kafka_acl"app-connector-create-on-dlq-lcc-topics"[%!s(MISSING)]}
            - ${confluent_kafka_acl"app-connector-write-on-dlq-lcc-topics"[%!s(MISSING)]}
            - ${confluent_kafka_acl"app-connector-create-on-success-lcc-topics"[%!s(MISSING)]}
            - ${confluent_kafka_acl"app-connector-write-on-success-lcc-topics"[%!s(MISSING)]}
            - ${confluent_kafka_acl"app-connector-create-on-error-lcc-topics"[%!s(MISSING)]}
            - ${confluent_kafka_acl"app-connector-write-on-error-lcc-topics"[%!s(MISSING)]}
            - ${confluent_kafka_acl"app-connector-read-on-connect-lcc-group"[%!s(MISSING)]}
    

    Example Custom Datagen Source Connector that uses a Kafka API Key to communicate with your Kafka cluster

    import * as pulumi from "@pulumi/pulumi";
    import * as confluentcloud from "@pulumi/confluentcloud";
    
    const source = new confluentcloud.Connector("source", {
        environment: {
            id: confluent_environment.staging.id,
        },
        kafkaCluster: {
            id: confluent_kafka_cluster.basic.id,
        },
        configSensitive: {
            "kafka.api.key": "***REDACTED***",
            "kafka.api.secret": "***REDACTED***",
        },
        configNonsensitive: {
            "confluent.connector.type": "CUSTOM",
            "connector.class": confluent_custom_connector_plugin.source.connector_class,
            name: "DatagenConnectorExampleName",
            "kafka.auth.mode": "KAFKA_API_KEY",
            "kafka.topic": confluent_kafka_topic.orders.topic_name,
            "output.data.format": "JSON",
            quickstart: "ORDERS",
            "confluent.custom.plugin.id": confluent_custom_connector_plugin.source.id,
            "min.interval": "1000",
            "max.interval": "2000",
            "tasks.max": "1",
        },
    }, {
        dependsOn: [confluent_role_binding["app-manager-kafka-cluster-admin"]],
    });
    
    import pulumi
    import pulumi_confluentcloud as confluentcloud
    
    source = confluentcloud.Connector("source",
        environment=confluentcloud.ConnectorEnvironmentArgs(
            id=confluent_environment["staging"]["id"],
        ),
        kafka_cluster=confluentcloud.ConnectorKafkaClusterArgs(
            id=confluent_kafka_cluster["basic"]["id"],
        ),
        config_sensitive={
            "kafka.api.key": "***REDACTED***",
            "kafka.api.secret": "***REDACTED***",
        },
        config_nonsensitive={
            "confluent.connector.type": "CUSTOM",
            "connector.class": confluent_custom_connector_plugin["source"]["connector_class"],
            "name": "DatagenConnectorExampleName",
            "kafka.auth.mode": "KAFKA_API_KEY",
            "kafka.topic": confluent_kafka_topic["orders"]["topic_name"],
            "output.data.format": "JSON",
            "quickstart": "ORDERS",
            "confluent.custom.plugin.id": confluent_custom_connector_plugin["source"]["id"],
            "min.interval": "1000",
            "max.interval": "2000",
            "tasks.max": "1",
        },
        opts=pulumi.ResourceOptions(depends_on=[confluent_role_binding["app-manager-kafka-cluster-admin"]]))
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := confluentcloud.NewConnector(ctx, "source", &confluentcloud.ConnectorArgs{
    			Environment: &confluentcloud.ConnectorEnvironmentArgs{
    				Id: pulumi.Any(confluent_environment.Staging.Id),
    			},
    			KafkaCluster: &confluentcloud.ConnectorKafkaClusterArgs{
    				Id: pulumi.Any(confluent_kafka_cluster.Basic.Id),
    			},
    			ConfigSensitive: pulumi.StringMap{
    				"kafka.api.key":    pulumi.String("***REDACTED***"),
    				"kafka.api.secret": pulumi.String("***REDACTED***"),
    			},
    			ConfigNonsensitive: pulumi.StringMap{
    				"confluent.connector.type":   pulumi.String("CUSTOM"),
    				"connector.class":            pulumi.Any(confluent_custom_connector_plugin.Source.Connector_class),
    				"name":                       pulumi.String("DatagenConnectorExampleName"),
    				"kafka.auth.mode":            pulumi.String("KAFKA_API_KEY"),
    				"kafka.topic":                pulumi.Any(confluent_kafka_topic.Orders.Topic_name),
    				"output.data.format":         pulumi.String("JSON"),
    				"quickstart":                 pulumi.String("ORDERS"),
    				"confluent.custom.plugin.id": pulumi.Any(confluent_custom_connector_plugin.Source.Id),
    				"min.interval":               pulumi.String("1000"),
    				"max.interval":               pulumi.String("2000"),
    				"tasks.max":                  pulumi.String("1"),
    			},
    		}, pulumi.DependsOn([]pulumi.Resource{
    			confluent_role_binding.AppManagerKafkaClusterAdmin,
    		}))
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using ConfluentCloud = Pulumi.ConfluentCloud;
    
    return await Deployment.RunAsync(() => 
    {
        var source = new ConfluentCloud.Connector("source", new()
        {
            Environment = new ConfluentCloud.Inputs.ConnectorEnvironmentArgs
            {
                Id = confluent_environment.Staging.Id,
            },
            KafkaCluster = new ConfluentCloud.Inputs.ConnectorKafkaClusterArgs
            {
                Id = confluent_kafka_cluster.Basic.Id,
            },
            ConfigSensitive = 
            {
                { "kafka.api.key", "***REDACTED***" },
                { "kafka.api.secret", "***REDACTED***" },
            },
            ConfigNonsensitive = 
            {
                { "confluent.connector.type", "CUSTOM" },
                { "connector.class", confluent_custom_connector_plugin.Source.Connector_class },
                { "name", "DatagenConnectorExampleName" },
                { "kafka.auth.mode", "KAFKA_API_KEY" },
                { "kafka.topic", confluent_kafka_topic.Orders.Topic_name },
                { "output.data.format", "JSON" },
                { "quickstart", "ORDERS" },
                { "confluent.custom.plugin.id", confluent_custom_connector_plugin.Source.Id },
                { "min.interval", "1000" },
                { "max.interval", "2000" },
                { "tasks.max", "1" },
            },
        }, new CustomResourceOptions
        {
            DependsOn =
            {
                confluent_role_binding.App_manager_kafka_cluster_admin, 
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.confluentcloud.Connector;
    import com.pulumi.confluentcloud.ConnectorArgs;
    import com.pulumi.confluentcloud.inputs.ConnectorEnvironmentArgs;
    import com.pulumi.confluentcloud.inputs.ConnectorKafkaClusterArgs;
    import com.pulumi.resources.CustomResourceOptions;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var source = new Connector("source", ConnectorArgs.builder()        
                .environment(ConnectorEnvironmentArgs.builder()
                    .id(confluent_environment.staging().id())
                    .build())
                .kafkaCluster(ConnectorKafkaClusterArgs.builder()
                    .id(confluent_kafka_cluster.basic().id())
                    .build())
                .configSensitive(Map.ofEntries(
                    Map.entry("kafka.api.key", "***REDACTED***"),
                    Map.entry("kafka.api.secret", "***REDACTED***")
                ))
                .configNonsensitive(Map.ofEntries(
                    Map.entry("confluent.connector.type", "CUSTOM"),
                    Map.entry("connector.class", confluent_custom_connector_plugin.source().connector_class()),
                    Map.entry("name", "DatagenConnectorExampleName"),
                    Map.entry("kafka.auth.mode", "KAFKA_API_KEY"),
                    Map.entry("kafka.topic", confluent_kafka_topic.orders().topic_name()),
                    Map.entry("output.data.format", "JSON"),
                    Map.entry("quickstart", "ORDERS"),
                    Map.entry("confluent.custom.plugin.id", confluent_custom_connector_plugin.source().id()),
                    Map.entry("min.interval", "1000"),
                    Map.entry("max.interval", "2000"),
                    Map.entry("tasks.max", "1")
                ))
                .build(), CustomResourceOptions.builder()
                    .dependsOn(confluent_role_binding.app-manager-kafka-cluster-admin())
                    .build());
    
        }
    }
    
    resources:
      source:
        type: confluentcloud:Connector
        properties:
          environment:
            id: ${confluent_environment.staging.id}
          kafkaCluster:
            id: ${confluent_kafka_cluster.basic.id}
          # Block for custom *sensitive* configuration properties that are labelled with "Type: password" under "Configuration Properties" section in the docs:
          #   // https://docs.confluent.io/platform/current/connect/userguide.html#connect-installing-plugins
          configSensitive:
            kafka.api.key: '***REDACTED***'
            kafka.api.secret: '***REDACTED***'
          # Block for custom *nonsensitive* configuration properties that are *not* labelled with "Type: password" under "Configuration Properties" section in the docs:
          #   // https://docs.confluent.io/platform/current/connect/userguide.html#connect-installing-plugins
          configNonsensitive:
            confluent.connector.type: CUSTOM
            connector.class: ${confluent_custom_connector_plugin.source.connector_class}
            name: DatagenConnectorExampleName
            kafka.auth.mode: KAFKA_API_KEY
            kafka.topic: ${confluent_kafka_topic.orders.topic_name}
            output.data.format: JSON
            quickstart: ORDERS
            confluent.custom.plugin.id: ${confluent_custom_connector_plugin.source.id}
            min.interval: '1000'
            max.interval: '2000'
            tasks.max: '1'
        options:
          dependson:
            - ${confluent_role_binding"app-manager-kafka-cluster-admin"[%!s(MISSING)]}
    

    Note: Custom connectors are available in Preview for early adopters. Preview features are introduced to gather customer feedback. This feature should be used only for evaluation and non-production testing purposes or to provide feedback to Confluent, particularly as it becomes more widely available in follow-on editions.
    Preview features are intended for evaluation use in development and testing environments only, and not for production use. The warranty, SLA, and Support Services provisions of your agreement with Confluent do not apply to Preview features. Preview features are considered to be a Proof of Concept as defined in the Confluent Cloud Terms of Service. Confluent may discontinue providing preview releases of the Preview features at any time in Confluent’s sole discretion.

    Getting Started

    The following end-to-end examples might help to get started with confluentcloud.Connector resource:

    • s3-sink-connector
    • snowflake-sink-connector
    • managed-datagen-source-connector
    • elasticsearch-sink-connector
    • dynamo-db-sink-connector
    • mongo-db-source-connector
    • mongo-db-sink-connector
    • sql-server-cdc-debezium-source-connector
    • postgre-sql-cdc-debezium-source-connector
    • custom-datagen-source-connector

    Note: Certain connectors require additional ACL entries. See Additional ACL entries for more details.

    Create Connector Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new Connector(name: string, args: ConnectorArgs, opts?: CustomResourceOptions);
    @overload
    def Connector(resource_name: str,
                  args: ConnectorArgs,
                  opts: Optional[ResourceOptions] = None)
    
    @overload
    def Connector(resource_name: str,
                  opts: Optional[ResourceOptions] = None,
                  config_nonsensitive: Optional[Mapping[str, str]] = None,
                  environment: Optional[ConnectorEnvironmentArgs] = None,
                  kafka_cluster: Optional[ConnectorKafkaClusterArgs] = None,
                  config_sensitive: Optional[Mapping[str, str]] = None,
                  status: Optional[str] = None)
    func NewConnector(ctx *Context, name string, args ConnectorArgs, opts ...ResourceOption) (*Connector, error)
    public Connector(string name, ConnectorArgs args, CustomResourceOptions? opts = null)
    public Connector(String name, ConnectorArgs args)
    public Connector(String name, ConnectorArgs args, CustomResourceOptions options)
    
    type: confluentcloud:Connector
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args ConnectorArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args ConnectorArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args ConnectorArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args ConnectorArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args ConnectorArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Example

    The following reference example uses placeholder values for all input properties.

    var connectorResource = new ConfluentCloud.Connector("connectorResource", new()
    {
        ConfigNonsensitive = 
        {
            { "string", "string" },
        },
        Environment = new ConfluentCloud.Inputs.ConnectorEnvironmentArgs
        {
            Id = "string",
        },
        KafkaCluster = new ConfluentCloud.Inputs.ConnectorKafkaClusterArgs
        {
            Id = "string",
        },
        ConfigSensitive = 
        {
            { "string", "string" },
        },
        Status = "string",
    });
    
    example, err := confluentcloud.NewConnector(ctx, "connectorResource", &confluentcloud.ConnectorArgs{
    	ConfigNonsensitive: pulumi.StringMap{
    		"string": pulumi.String("string"),
    	},
    	Environment: &confluentcloud.ConnectorEnvironmentArgs{
    		Id: pulumi.String("string"),
    	},
    	KafkaCluster: &confluentcloud.ConnectorKafkaClusterArgs{
    		Id: pulumi.String("string"),
    	},
    	ConfigSensitive: pulumi.StringMap{
    		"string": pulumi.String("string"),
    	},
    	Status: pulumi.String("string"),
    })
    
    var connectorResource = new Connector("connectorResource", ConnectorArgs.builder()        
        .configNonsensitive(Map.of("string", "string"))
        .environment(ConnectorEnvironmentArgs.builder()
            .id("string")
            .build())
        .kafkaCluster(ConnectorKafkaClusterArgs.builder()
            .id("string")
            .build())
        .configSensitive(Map.of("string", "string"))
        .status("string")
        .build());
    
    connector_resource = confluentcloud.Connector("connectorResource",
        config_nonsensitive={
            "string": "string",
        },
        environment=confluentcloud.ConnectorEnvironmentArgs(
            id="string",
        ),
        kafka_cluster=confluentcloud.ConnectorKafkaClusterArgs(
            id="string",
        ),
        config_sensitive={
            "string": "string",
        },
        status="string")
    
    const connectorResource = new confluentcloud.Connector("connectorResource", {
        configNonsensitive: {
            string: "string",
        },
        environment: {
            id: "string",
        },
        kafkaCluster: {
            id: "string",
        },
        configSensitive: {
            string: "string",
        },
        status: "string",
    });
    
    type: confluentcloud:Connector
    properties:
        configNonsensitive:
            string: string
        configSensitive:
            string: string
        environment:
            id: string
        kafkaCluster:
            id: string
        status: string
    

    Connector Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The Connector resource accepts the following input properties:

    ConfigNonsensitive Dictionary<string, string>
    Block for custom nonsensitive configuration properties that are not labelled with "Type: password" under "Configuration Properties" section in the docs:
    Environment Pulumi.ConfluentCloud.Inputs.ConnectorEnvironment
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    KafkaCluster Pulumi.ConfluentCloud.Inputs.ConnectorKafkaCluster
    ConfigSensitive Dictionary<string, string>
    Block for custom sensitive configuration properties that are labelled with "Type: password" under "Configuration Properties" section in the docs:
    Status string

    The status of the connector (one of "NONE", "PROVISIONING", "RUNNING", "DEGRADED", "FAILED", "PAUSED", "DELETED"). Pausing ("RUNNING" > "PAUSED") and resuming ("PAUSED" > "RUNNING") a connector is supported via an update operation.

    Note: If there are no sensitive configuration settings for your connector, set config_sensitive = {} explicitly.

    Note: You may declare sensitive variables for secrets config_sensitive block and set them using environment variables (for example, export TF_VAR_aws_access_key_id="foo").

    ConfigNonsensitive map[string]string
    Block for custom nonsensitive configuration properties that are not labelled with "Type: password" under "Configuration Properties" section in the docs:
    Environment ConnectorEnvironmentArgs
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    KafkaCluster ConnectorKafkaClusterArgs
    ConfigSensitive map[string]string
    Block for custom sensitive configuration properties that are labelled with "Type: password" under "Configuration Properties" section in the docs:
    Status string

    The status of the connector (one of "NONE", "PROVISIONING", "RUNNING", "DEGRADED", "FAILED", "PAUSED", "DELETED"). Pausing ("RUNNING" > "PAUSED") and resuming ("PAUSED" > "RUNNING") a connector is supported via an update operation.

    Note: If there are no sensitive configuration settings for your connector, set config_sensitive = {} explicitly.

    Note: You may declare sensitive variables for secrets config_sensitive block and set them using environment variables (for example, export TF_VAR_aws_access_key_id="foo").

    configNonsensitive Map<String,String>
    Block for custom nonsensitive configuration properties that are not labelled with "Type: password" under "Configuration Properties" section in the docs:
    environment ConnectorEnvironment
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    kafkaCluster ConnectorKafkaCluster
    configSensitive Map<String,String>
    Block for custom sensitive configuration properties that are labelled with "Type: password" under "Configuration Properties" section in the docs:
    status String

    The status of the connector (one of "NONE", "PROVISIONING", "RUNNING", "DEGRADED", "FAILED", "PAUSED", "DELETED"). Pausing ("RUNNING" > "PAUSED") and resuming ("PAUSED" > "RUNNING") a connector is supported via an update operation.

    Note: If there are no sensitive configuration settings for your connector, set config_sensitive = {} explicitly.

    Note: You may declare sensitive variables for secrets config_sensitive block and set them using environment variables (for example, export TF_VAR_aws_access_key_id="foo").

    configNonsensitive {[key: string]: string}
    Block for custom nonsensitive configuration properties that are not labelled with "Type: password" under "Configuration Properties" section in the docs:
    environment ConnectorEnvironment
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    kafkaCluster ConnectorKafkaCluster
    configSensitive {[key: string]: string}
    Block for custom sensitive configuration properties that are labelled with "Type: password" under "Configuration Properties" section in the docs:
    status string

    The status of the connector (one of "NONE", "PROVISIONING", "RUNNING", "DEGRADED", "FAILED", "PAUSED", "DELETED"). Pausing ("RUNNING" > "PAUSED") and resuming ("PAUSED" > "RUNNING") a connector is supported via an update operation.

    Note: If there are no sensitive configuration settings for your connector, set config_sensitive = {} explicitly.

    Note: You may declare sensitive variables for secrets config_sensitive block and set them using environment variables (for example, export TF_VAR_aws_access_key_id="foo").

    config_nonsensitive Mapping[str, str]
    Block for custom nonsensitive configuration properties that are not labelled with "Type: password" under "Configuration Properties" section in the docs:
    environment ConnectorEnvironmentArgs
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    kafka_cluster ConnectorKafkaClusterArgs
    config_sensitive Mapping[str, str]
    Block for custom sensitive configuration properties that are labelled with "Type: password" under "Configuration Properties" section in the docs:
    status str

    The status of the connector (one of "NONE", "PROVISIONING", "RUNNING", "DEGRADED", "FAILED", "PAUSED", "DELETED"). Pausing ("RUNNING" > "PAUSED") and resuming ("PAUSED" > "RUNNING") a connector is supported via an update operation.

    Note: If there are no sensitive configuration settings for your connector, set config_sensitive = {} explicitly.

    Note: You may declare sensitive variables for secrets config_sensitive block and set them using environment variables (for example, export TF_VAR_aws_access_key_id="foo").

    configNonsensitive Map<String>
    Block for custom nonsensitive configuration properties that are not labelled with "Type: password" under "Configuration Properties" section in the docs:
    environment Property Map
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    kafkaCluster Property Map
    configSensitive Map<String>
    Block for custom sensitive configuration properties that are labelled with "Type: password" under "Configuration Properties" section in the docs:
    status String

    The status of the connector (one of "NONE", "PROVISIONING", "RUNNING", "DEGRADED", "FAILED", "PAUSED", "DELETED"). Pausing ("RUNNING" > "PAUSED") and resuming ("PAUSED" > "RUNNING") a connector is supported via an update operation.

    Note: If there are no sensitive configuration settings for your connector, set config_sensitive = {} explicitly.

    Note: You may declare sensitive variables for secrets config_sensitive block and set them using environment variables (for example, export TF_VAR_aws_access_key_id="foo").

    Outputs

    All input properties are implicitly available as output properties. Additionally, the Connector resource produces the following output properties:

    Id string
    The provider-assigned unique ID for this managed resource.
    Id string
    The provider-assigned unique ID for this managed resource.
    id String
    The provider-assigned unique ID for this managed resource.
    id string
    The provider-assigned unique ID for this managed resource.
    id str
    The provider-assigned unique ID for this managed resource.
    id String
    The provider-assigned unique ID for this managed resource.

    Look up Existing Connector Resource

    Get an existing Connector resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: ConnectorState, opts?: CustomResourceOptions): Connector
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            config_nonsensitive: Optional[Mapping[str, str]] = None,
            config_sensitive: Optional[Mapping[str, str]] = None,
            environment: Optional[ConnectorEnvironmentArgs] = None,
            kafka_cluster: Optional[ConnectorKafkaClusterArgs] = None,
            status: Optional[str] = None) -> Connector
    func GetConnector(ctx *Context, name string, id IDInput, state *ConnectorState, opts ...ResourceOption) (*Connector, error)
    public static Connector Get(string name, Input<string> id, ConnectorState? state, CustomResourceOptions? opts = null)
    public static Connector get(String name, Output<String> id, ConnectorState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    ConfigNonsensitive Dictionary<string, string>
    Block for custom nonsensitive configuration properties that are not labelled with "Type: password" under "Configuration Properties" section in the docs:
    ConfigSensitive Dictionary<string, string>
    Block for custom sensitive configuration properties that are labelled with "Type: password" under "Configuration Properties" section in the docs:
    Environment Pulumi.ConfluentCloud.Inputs.ConnectorEnvironment
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    KafkaCluster Pulumi.ConfluentCloud.Inputs.ConnectorKafkaCluster
    Status string

    The status of the connector (one of "NONE", "PROVISIONING", "RUNNING", "DEGRADED", "FAILED", "PAUSED", "DELETED"). Pausing ("RUNNING" > "PAUSED") and resuming ("PAUSED" > "RUNNING") a connector is supported via an update operation.

    Note: If there are no sensitive configuration settings for your connector, set config_sensitive = {} explicitly.

    Note: You may declare sensitive variables for secrets config_sensitive block and set them using environment variables (for example, export TF_VAR_aws_access_key_id="foo").

    ConfigNonsensitive map[string]string
    Block for custom nonsensitive configuration properties that are not labelled with "Type: password" under "Configuration Properties" section in the docs:
    ConfigSensitive map[string]string
    Block for custom sensitive configuration properties that are labelled with "Type: password" under "Configuration Properties" section in the docs:
    Environment ConnectorEnvironmentArgs
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    KafkaCluster ConnectorKafkaClusterArgs
    Status string

    The status of the connector (one of "NONE", "PROVISIONING", "RUNNING", "DEGRADED", "FAILED", "PAUSED", "DELETED"). Pausing ("RUNNING" > "PAUSED") and resuming ("PAUSED" > "RUNNING") a connector is supported via an update operation.

    Note: If there are no sensitive configuration settings for your connector, set config_sensitive = {} explicitly.

    Note: You may declare sensitive variables for secrets config_sensitive block and set them using environment variables (for example, export TF_VAR_aws_access_key_id="foo").

    configNonsensitive Map<String,String>
    Block for custom nonsensitive configuration properties that are not labelled with "Type: password" under "Configuration Properties" section in the docs:
    configSensitive Map<String,String>
    Block for custom sensitive configuration properties that are labelled with "Type: password" under "Configuration Properties" section in the docs:
    environment ConnectorEnvironment
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    kafkaCluster ConnectorKafkaCluster
    status String

    The status of the connector (one of "NONE", "PROVISIONING", "RUNNING", "DEGRADED", "FAILED", "PAUSED", "DELETED"). Pausing ("RUNNING" > "PAUSED") and resuming ("PAUSED" > "RUNNING") a connector is supported via an update operation.

    Note: If there are no sensitive configuration settings for your connector, set config_sensitive = {} explicitly.

    Note: You may declare sensitive variables for secrets config_sensitive block and set them using environment variables (for example, export TF_VAR_aws_access_key_id="foo").

    configNonsensitive {[key: string]: string}
    Block for custom nonsensitive configuration properties that are not labelled with "Type: password" under "Configuration Properties" section in the docs:
    configSensitive {[key: string]: string}
    Block for custom sensitive configuration properties that are labelled with "Type: password" under "Configuration Properties" section in the docs:
    environment ConnectorEnvironment
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    kafkaCluster ConnectorKafkaCluster
    status string

    The status of the connector (one of "NONE", "PROVISIONING", "RUNNING", "DEGRADED", "FAILED", "PAUSED", "DELETED"). Pausing ("RUNNING" > "PAUSED") and resuming ("PAUSED" > "RUNNING") a connector is supported via an update operation.

    Note: If there are no sensitive configuration settings for your connector, set config_sensitive = {} explicitly.

    Note: You may declare sensitive variables for secrets config_sensitive block and set them using environment variables (for example, export TF_VAR_aws_access_key_id="foo").

    config_nonsensitive Mapping[str, str]
    Block for custom nonsensitive configuration properties that are not labelled with "Type: password" under "Configuration Properties" section in the docs:
    config_sensitive Mapping[str, str]
    Block for custom sensitive configuration properties that are labelled with "Type: password" under "Configuration Properties" section in the docs:
    environment ConnectorEnvironmentArgs
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    kafka_cluster ConnectorKafkaClusterArgs
    status str

    The status of the connector (one of "NONE", "PROVISIONING", "RUNNING", "DEGRADED", "FAILED", "PAUSED", "DELETED"). Pausing ("RUNNING" > "PAUSED") and resuming ("PAUSED" > "RUNNING") a connector is supported via an update operation.

    Note: If there are no sensitive configuration settings for your connector, set config_sensitive = {} explicitly.

    Note: You may declare sensitive variables for secrets config_sensitive block and set them using environment variables (for example, export TF_VAR_aws_access_key_id="foo").

    configNonsensitive Map<String>
    Block for custom nonsensitive configuration properties that are not labelled with "Type: password" under "Configuration Properties" section in the docs:
    configSensitive Map<String>
    Block for custom sensitive configuration properties that are labelled with "Type: password" under "Configuration Properties" section in the docs:
    environment Property Map
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    kafkaCluster Property Map
    status String

    The status of the connector (one of "NONE", "PROVISIONING", "RUNNING", "DEGRADED", "FAILED", "PAUSED", "DELETED"). Pausing ("RUNNING" > "PAUSED") and resuming ("PAUSED" > "RUNNING") a connector is supported via an update operation.

    Note: If there are no sensitive configuration settings for your connector, set config_sensitive = {} explicitly.

    Note: You may declare sensitive variables for secrets config_sensitive block and set them using environment variables (for example, export TF_VAR_aws_access_key_id="foo").

    Supporting Types

    ConnectorEnvironment, ConnectorEnvironmentArgs

    Id string
    The ID of the Kafka cluster that the connector belongs to, for example, lkc-abc123.
    Id string
    The ID of the Kafka cluster that the connector belongs to, for example, lkc-abc123.
    id String
    The ID of the Kafka cluster that the connector belongs to, for example, lkc-abc123.
    id string
    The ID of the Kafka cluster that the connector belongs to, for example, lkc-abc123.
    id str
    The ID of the Kafka cluster that the connector belongs to, for example, lkc-abc123.
    id String
    The ID of the Kafka cluster that the connector belongs to, for example, lkc-abc123.

    ConnectorKafkaCluster, ConnectorKafkaClusterArgs

    Id string
    The ID of the Kafka cluster that the connector belongs to, for example, lkc-abc123.
    Id string
    The ID of the Kafka cluster that the connector belongs to, for example, lkc-abc123.
    id String
    The ID of the Kafka cluster that the connector belongs to, for example, lkc-abc123.
    id string
    The ID of the Kafka cluster that the connector belongs to, for example, lkc-abc123.
    id str
    The ID of the Kafka cluster that the connector belongs to, for example, lkc-abc123.
    id String
    The ID of the Kafka cluster that the connector belongs to, for example, lkc-abc123.

    Import

    You can import a connector by using Environment ID, Kafka cluster ID, and connector’s name, in the format <Environment ID>/<Kafka cluster ID>/<Connector name>, for example:

    $ export CONFLUENT_CLOUD_API_KEY="<cloud_api_key>"

    $ export CONFLUENT_CLOUD_API_SECRET="<cloud_api_secret>"

    $ pulumi import confluentcloud:index/connector:Connector my_connector "env-abc123/lkc-abc123/S3_SINKConnector_0"
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    Confluent Cloud pulumi/pulumi-confluentcloud
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the confluent Terraform Provider.
    confluentcloud logo
    Confluent v1.41.0 published on Saturday, Apr 13, 2024 by Pulumi