1. Packages
  2. Confluent Provider
  3. API Docs
  4. CatalogIntegration
Confluent v2.24.0 published on Saturday, Apr 19, 2025 by Pulumi

confluentcloud.CatalogIntegration

Explore with Pulumi AI

confluentcloud logo
Confluent v2.24.0 published on Saturday, Apr 19, 2025 by Pulumi

    Example Usage

    Option #1: Manage multiple Catalog Integrations in the same Pulumi Stack

    import * as pulumi from "@pulumi/pulumi";
    import * as confluentcloud from "@pulumi/confluentcloud";
    
    const example = new confluentcloud.CatalogIntegration("example", {
        environment: {
            id: staging.id,
        },
        kafkaCluster: {
            id: stagingConfluentKafkaCluster.id,
        },
        displayName: "catalog-integration-1",
        awsGlue: {
            providerIntegrationId: main.id,
        },
        credentials: {
            key: env_admin_tableflow_api_key.id,
            secret: env_admin_tableflow_api_key.secret,
        },
    });
    
    import pulumi
    import pulumi_confluentcloud as confluentcloud
    
    example = confluentcloud.CatalogIntegration("example",
        environment={
            "id": staging["id"],
        },
        kafka_cluster={
            "id": staging_confluent_kafka_cluster["id"],
        },
        display_name="catalog-integration-1",
        aws_glue={
            "provider_integration_id": main["id"],
        },
        credentials={
            "key": env_admin_tableflow_api_key["id"],
            "secret": env_admin_tableflow_api_key["secret"],
        })
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-confluentcloud/sdk/v2/go/confluentcloud"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := confluentcloud.NewCatalogIntegration(ctx, "example", &confluentcloud.CatalogIntegrationArgs{
    			Environment: &confluentcloud.CatalogIntegrationEnvironmentArgs{
    				Id: pulumi.Any(staging.Id),
    			},
    			KafkaCluster: &confluentcloud.CatalogIntegrationKafkaClusterArgs{
    				Id: pulumi.Any(stagingConfluentKafkaCluster.Id),
    			},
    			DisplayName: pulumi.String("catalog-integration-1"),
    			AwsGlue: &confluentcloud.CatalogIntegrationAwsGlueArgs{
    				ProviderIntegrationId: pulumi.Any(main.Id),
    			},
    			Credentials: &confluentcloud.CatalogIntegrationCredentialsArgs{
    				Key:    pulumi.Any(env_admin_tableflow_api_key.Id),
    				Secret: pulumi.Any(env_admin_tableflow_api_key.Secret),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using ConfluentCloud = Pulumi.ConfluentCloud;
    
    return await Deployment.RunAsync(() => 
    {
        var example = new ConfluentCloud.CatalogIntegration("example", new()
        {
            Environment = new ConfluentCloud.Inputs.CatalogIntegrationEnvironmentArgs
            {
                Id = staging.Id,
            },
            KafkaCluster = new ConfluentCloud.Inputs.CatalogIntegrationKafkaClusterArgs
            {
                Id = stagingConfluentKafkaCluster.Id,
            },
            DisplayName = "catalog-integration-1",
            AwsGlue = new ConfluentCloud.Inputs.CatalogIntegrationAwsGlueArgs
            {
                ProviderIntegrationId = main.Id,
            },
            Credentials = new ConfluentCloud.Inputs.CatalogIntegrationCredentialsArgs
            {
                Key = env_admin_tableflow_api_key.Id,
                Secret = env_admin_tableflow_api_key.Secret,
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.confluentcloud.CatalogIntegration;
    import com.pulumi.confluentcloud.CatalogIntegrationArgs;
    import com.pulumi.confluentcloud.inputs.CatalogIntegrationEnvironmentArgs;
    import com.pulumi.confluentcloud.inputs.CatalogIntegrationKafkaClusterArgs;
    import com.pulumi.confluentcloud.inputs.CatalogIntegrationAwsGlueArgs;
    import com.pulumi.confluentcloud.inputs.CatalogIntegrationCredentialsArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var example = new CatalogIntegration("example", CatalogIntegrationArgs.builder()
                .environment(CatalogIntegrationEnvironmentArgs.builder()
                    .id(staging.id())
                    .build())
                .kafkaCluster(CatalogIntegrationKafkaClusterArgs.builder()
                    .id(stagingConfluentKafkaCluster.id())
                    .build())
                .displayName("catalog-integration-1")
                .awsGlue(CatalogIntegrationAwsGlueArgs.builder()
                    .providerIntegrationId(main.id())
                    .build())
                .credentials(CatalogIntegrationCredentialsArgs.builder()
                    .key(env_admin_tableflow_api_key.id())
                    .secret(env_admin_tableflow_api_key.secret())
                    .build())
                .build());
    
        }
    }
    
    resources:
      example:
        type: confluentcloud:CatalogIntegration
        properties:
          environment:
            id: ${staging.id}
          kafkaCluster:
            id: ${stagingConfluentKafkaCluster.id}
          displayName: catalog-integration-1
          awsGlue:
            providerIntegrationId: ${main.id}
          credentials:
            key: ${["env-admin-tableflow-api-key"].id}
            secret: ${["env-admin-tableflow-api-key"].secret}
    

    Option #2: Manage a single Catalog Integration in the same Pulumi Stack

    import * as pulumi from "@pulumi/pulumi";
    import * as confluentcloud from "@pulumi/confluentcloud";
    
    const example = new confluentcloud.CatalogIntegration("example", {
        environment: {
            id: staging.id,
        },
        kafkaCluster: {
            id: stagingConfluentKafkaCluster.id,
        },
        displayName: "catalog-integration-1",
        snowflake: {
            endpoint: "https://vuser1_polaris.snowflakecomputing.com/",
            clientId: "***REDACTED***",
            clientSecret: "***REDACTED***",
            warehouse: "catalog-name",
            allowedScope: "session:role:R1",
        },
    });
    
    import pulumi
    import pulumi_confluentcloud as confluentcloud
    
    example = confluentcloud.CatalogIntegration("example",
        environment={
            "id": staging["id"],
        },
        kafka_cluster={
            "id": staging_confluent_kafka_cluster["id"],
        },
        display_name="catalog-integration-1",
        snowflake={
            "endpoint": "https://vuser1_polaris.snowflakecomputing.com/",
            "client_id": "***REDACTED***",
            "client_secret": "***REDACTED***",
            "warehouse": "catalog-name",
            "allowed_scope": "session:role:R1",
        })
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-confluentcloud/sdk/v2/go/confluentcloud"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := confluentcloud.NewCatalogIntegration(ctx, "example", &confluentcloud.CatalogIntegrationArgs{
    			Environment: &confluentcloud.CatalogIntegrationEnvironmentArgs{
    				Id: pulumi.Any(staging.Id),
    			},
    			KafkaCluster: &confluentcloud.CatalogIntegrationKafkaClusterArgs{
    				Id: pulumi.Any(stagingConfluentKafkaCluster.Id),
    			},
    			DisplayName: pulumi.String("catalog-integration-1"),
    			Snowflake: &confluentcloud.CatalogIntegrationSnowflakeArgs{
    				Endpoint:     pulumi.String("https://vuser1_polaris.snowflakecomputing.com/"),
    				ClientId:     pulumi.String("***REDACTED***"),
    				ClientSecret: pulumi.String("***REDACTED***"),
    				Warehouse:    pulumi.String("catalog-name"),
    				AllowedScope: pulumi.String("session:role:R1"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using ConfluentCloud = Pulumi.ConfluentCloud;
    
    return await Deployment.RunAsync(() => 
    {
        var example = new ConfluentCloud.CatalogIntegration("example", new()
        {
            Environment = new ConfluentCloud.Inputs.CatalogIntegrationEnvironmentArgs
            {
                Id = staging.Id,
            },
            KafkaCluster = new ConfluentCloud.Inputs.CatalogIntegrationKafkaClusterArgs
            {
                Id = stagingConfluentKafkaCluster.Id,
            },
            DisplayName = "catalog-integration-1",
            Snowflake = new ConfluentCloud.Inputs.CatalogIntegrationSnowflakeArgs
            {
                Endpoint = "https://vuser1_polaris.snowflakecomputing.com/",
                ClientId = "***REDACTED***",
                ClientSecret = "***REDACTED***",
                Warehouse = "catalog-name",
                AllowedScope = "session:role:R1",
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.confluentcloud.CatalogIntegration;
    import com.pulumi.confluentcloud.CatalogIntegrationArgs;
    import com.pulumi.confluentcloud.inputs.CatalogIntegrationEnvironmentArgs;
    import com.pulumi.confluentcloud.inputs.CatalogIntegrationKafkaClusterArgs;
    import com.pulumi.confluentcloud.inputs.CatalogIntegrationSnowflakeArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var example = new CatalogIntegration("example", CatalogIntegrationArgs.builder()
                .environment(CatalogIntegrationEnvironmentArgs.builder()
                    .id(staging.id())
                    .build())
                .kafkaCluster(CatalogIntegrationKafkaClusterArgs.builder()
                    .id(stagingConfluentKafkaCluster.id())
                    .build())
                .displayName("catalog-integration-1")
                .snowflake(CatalogIntegrationSnowflakeArgs.builder()
                    .endpoint("https://vuser1_polaris.snowflakecomputing.com/")
                    .clientId("***REDACTED***")
                    .clientSecret("***REDACTED***")
                    .warehouse("catalog-name")
                    .allowedScope("session:role:R1")
                    .build())
                .build());
    
        }
    }
    
    resources:
      example:
        type: confluentcloud:CatalogIntegration
        properties:
          environment:
            id: ${staging.id}
          kafkaCluster:
            id: ${stagingConfluentKafkaCluster.id}
          displayName: catalog-integration-1
          snowflake:
            endpoint: https://vuser1_polaris.snowflakecomputing.com/
            clientId: '***REDACTED***'
            clientSecret: '***REDACTED***'
            warehouse: catalog-name
            allowedScope: session:role:R1
    

    Create CatalogIntegration Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new CatalogIntegration(name: string, args: CatalogIntegrationArgs, opts?: CustomResourceOptions);
    @overload
    def CatalogIntegration(resource_name: str,
                           args: CatalogIntegrationArgs,
                           opts: Optional[ResourceOptions] = None)
    
    @overload
    def CatalogIntegration(resource_name: str,
                           opts: Optional[ResourceOptions] = None,
                           display_name: Optional[str] = None,
                           environment: Optional[CatalogIntegrationEnvironmentArgs] = None,
                           kafka_cluster: Optional[CatalogIntegrationKafkaClusterArgs] = None,
                           aws_glue: Optional[CatalogIntegrationAwsGlueArgs] = None,
                           credentials: Optional[CatalogIntegrationCredentialsArgs] = None,
                           snowflake: Optional[CatalogIntegrationSnowflakeArgs] = None)
    func NewCatalogIntegration(ctx *Context, name string, args CatalogIntegrationArgs, opts ...ResourceOption) (*CatalogIntegration, error)
    public CatalogIntegration(string name, CatalogIntegrationArgs args, CustomResourceOptions? opts = null)
    public CatalogIntegration(String name, CatalogIntegrationArgs args)
    public CatalogIntegration(String name, CatalogIntegrationArgs args, CustomResourceOptions options)
    
    type: confluentcloud:CatalogIntegration
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args CatalogIntegrationArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args CatalogIntegrationArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args CatalogIntegrationArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args CatalogIntegrationArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args CatalogIntegrationArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var catalogIntegrationResource = new ConfluentCloud.CatalogIntegration("catalogIntegrationResource", new()
    {
        DisplayName = "string",
        Environment = new ConfluentCloud.Inputs.CatalogIntegrationEnvironmentArgs
        {
            Id = "string",
        },
        KafkaCluster = new ConfluentCloud.Inputs.CatalogIntegrationKafkaClusterArgs
        {
            Id = "string",
        },
        AwsGlue = new ConfluentCloud.Inputs.CatalogIntegrationAwsGlueArgs
        {
            ProviderIntegrationId = "string",
        },
        Credentials = new ConfluentCloud.Inputs.CatalogIntegrationCredentialsArgs
        {
            Key = "string",
            Secret = "string",
        },
        Snowflake = new ConfluentCloud.Inputs.CatalogIntegrationSnowflakeArgs
        {
            AllowedScope = "string",
            ClientId = "string",
            ClientSecret = "string",
            Endpoint = "string",
            Warehouse = "string",
        },
    });
    
    example, err := confluentcloud.NewCatalogIntegration(ctx, "catalogIntegrationResource", &confluentcloud.CatalogIntegrationArgs{
    	DisplayName: pulumi.String("string"),
    	Environment: &confluentcloud.CatalogIntegrationEnvironmentArgs{
    		Id: pulumi.String("string"),
    	},
    	KafkaCluster: &confluentcloud.CatalogIntegrationKafkaClusterArgs{
    		Id: pulumi.String("string"),
    	},
    	AwsGlue: &confluentcloud.CatalogIntegrationAwsGlueArgs{
    		ProviderIntegrationId: pulumi.String("string"),
    	},
    	Credentials: &confluentcloud.CatalogIntegrationCredentialsArgs{
    		Key:    pulumi.String("string"),
    		Secret: pulumi.String("string"),
    	},
    	Snowflake: &confluentcloud.CatalogIntegrationSnowflakeArgs{
    		AllowedScope: pulumi.String("string"),
    		ClientId:     pulumi.String("string"),
    		ClientSecret: pulumi.String("string"),
    		Endpoint:     pulumi.String("string"),
    		Warehouse:    pulumi.String("string"),
    	},
    })
    
    var catalogIntegrationResource = new CatalogIntegration("catalogIntegrationResource", CatalogIntegrationArgs.builder()
        .displayName("string")
        .environment(CatalogIntegrationEnvironmentArgs.builder()
            .id("string")
            .build())
        .kafkaCluster(CatalogIntegrationKafkaClusterArgs.builder()
            .id("string")
            .build())
        .awsGlue(CatalogIntegrationAwsGlueArgs.builder()
            .providerIntegrationId("string")
            .build())
        .credentials(CatalogIntegrationCredentialsArgs.builder()
            .key("string")
            .secret("string")
            .build())
        .snowflake(CatalogIntegrationSnowflakeArgs.builder()
            .allowedScope("string")
            .clientId("string")
            .clientSecret("string")
            .endpoint("string")
            .warehouse("string")
            .build())
        .build());
    
    catalog_integration_resource = confluentcloud.CatalogIntegration("catalogIntegrationResource",
        display_name="string",
        environment={
            "id": "string",
        },
        kafka_cluster={
            "id": "string",
        },
        aws_glue={
            "provider_integration_id": "string",
        },
        credentials={
            "key": "string",
            "secret": "string",
        },
        snowflake={
            "allowed_scope": "string",
            "client_id": "string",
            "client_secret": "string",
            "endpoint": "string",
            "warehouse": "string",
        })
    
    const catalogIntegrationResource = new confluentcloud.CatalogIntegration("catalogIntegrationResource", {
        displayName: "string",
        environment: {
            id: "string",
        },
        kafkaCluster: {
            id: "string",
        },
        awsGlue: {
            providerIntegrationId: "string",
        },
        credentials: {
            key: "string",
            secret: "string",
        },
        snowflake: {
            allowedScope: "string",
            clientId: "string",
            clientSecret: "string",
            endpoint: "string",
            warehouse: "string",
        },
    });
    
    type: confluentcloud:CatalogIntegration
    properties:
        awsGlue:
            providerIntegrationId: string
        credentials:
            key: string
            secret: string
        displayName: string
        environment:
            id: string
        kafkaCluster:
            id: string
        snowflake:
            allowedScope: string
            clientId: string
            clientSecret: string
            endpoint: string
            warehouse: string
    

    CatalogIntegration Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

    The CatalogIntegration resource accepts the following input properties:

    DisplayName string
    The name of the catalog integration.
    Environment CatalogIntegrationEnvironmentArgs
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    KafkaCluster CatalogIntegrationKafkaClusterArgs
    AwsGlue CatalogIntegrationAwsGlueArgs
    supports the following (see Integrate Tableflow with the AWS Glue Catalog in Confluent Cloud for more details):
    Credentials CatalogIntegrationCredentialsArgs
    The Cluster API Credentials.
    Snowflake CatalogIntegrationSnowflakeArgs
    supports the following (see Integrate Tableflow with Snowflake Open Catalog or Apache Polaris in Confluent Cloud for more details):
    displayName String
    The name of the catalog integration.
    environment CatalogIntegrationEnvironment
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    kafkaCluster CatalogIntegrationKafkaCluster
    awsGlue CatalogIntegrationAwsGlue
    supports the following (see Integrate Tableflow with the AWS Glue Catalog in Confluent Cloud for more details):
    credentials CatalogIntegrationCredentials
    The Cluster API Credentials.
    snowflake CatalogIntegrationSnowflake
    supports the following (see Integrate Tableflow with Snowflake Open Catalog or Apache Polaris in Confluent Cloud for more details):
    displayName string
    The name of the catalog integration.
    environment CatalogIntegrationEnvironment
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    kafkaCluster CatalogIntegrationKafkaCluster
    awsGlue CatalogIntegrationAwsGlue
    supports the following (see Integrate Tableflow with the AWS Glue Catalog in Confluent Cloud for more details):
    credentials CatalogIntegrationCredentials
    The Cluster API Credentials.
    snowflake CatalogIntegrationSnowflake
    supports the following (see Integrate Tableflow with Snowflake Open Catalog or Apache Polaris in Confluent Cloud for more details):
    display_name str
    The name of the catalog integration.
    environment CatalogIntegrationEnvironmentArgs
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    kafka_cluster CatalogIntegrationKafkaClusterArgs
    aws_glue CatalogIntegrationAwsGlueArgs
    supports the following (see Integrate Tableflow with the AWS Glue Catalog in Confluent Cloud for more details):
    credentials CatalogIntegrationCredentialsArgs
    The Cluster API Credentials.
    snowflake CatalogIntegrationSnowflakeArgs
    supports the following (see Integrate Tableflow with Snowflake Open Catalog or Apache Polaris in Confluent Cloud for more details):
    displayName String
    The name of the catalog integration.
    environment Property Map
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    kafkaCluster Property Map
    awsGlue Property Map
    supports the following (see Integrate Tableflow with the AWS Glue Catalog in Confluent Cloud for more details):
    credentials Property Map
    The Cluster API Credentials.
    snowflake Property Map
    supports the following (see Integrate Tableflow with Snowflake Open Catalog or Apache Polaris in Confluent Cloud for more details):

    Outputs

    All input properties are implicitly available as output properties. Additionally, the CatalogIntegration resource produces the following output properties:

    Id string
    The provider-assigned unique ID for this managed resource.
    Suspended bool
    (Optional Boolean) Indicates whether the Catalog Integration should be suspended.
    Id string
    The provider-assigned unique ID for this managed resource.
    Suspended bool
    (Optional Boolean) Indicates whether the Catalog Integration should be suspended.
    id String
    The provider-assigned unique ID for this managed resource.
    suspended Boolean
    (Optional Boolean) Indicates whether the Catalog Integration should be suspended.
    id string
    The provider-assigned unique ID for this managed resource.
    suspended boolean
    (Optional Boolean) Indicates whether the Catalog Integration should be suspended.
    id str
    The provider-assigned unique ID for this managed resource.
    suspended bool
    (Optional Boolean) Indicates whether the Catalog Integration should be suspended.
    id String
    The provider-assigned unique ID for this managed resource.
    suspended Boolean
    (Optional Boolean) Indicates whether the Catalog Integration should be suspended.

    Look up Existing CatalogIntegration Resource

    Get an existing CatalogIntegration resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: CatalogIntegrationState, opts?: CustomResourceOptions): CatalogIntegration
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            aws_glue: Optional[CatalogIntegrationAwsGlueArgs] = None,
            credentials: Optional[CatalogIntegrationCredentialsArgs] = None,
            display_name: Optional[str] = None,
            environment: Optional[CatalogIntegrationEnvironmentArgs] = None,
            kafka_cluster: Optional[CatalogIntegrationKafkaClusterArgs] = None,
            snowflake: Optional[CatalogIntegrationSnowflakeArgs] = None,
            suspended: Optional[bool] = None) -> CatalogIntegration
    func GetCatalogIntegration(ctx *Context, name string, id IDInput, state *CatalogIntegrationState, opts ...ResourceOption) (*CatalogIntegration, error)
    public static CatalogIntegration Get(string name, Input<string> id, CatalogIntegrationState? state, CustomResourceOptions? opts = null)
    public static CatalogIntegration get(String name, Output<String> id, CatalogIntegrationState state, CustomResourceOptions options)
    resources:  _:    type: confluentcloud:CatalogIntegration    get:      id: ${id}
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    AwsGlue Pulumi.ConfluentCloud.Inputs.CatalogIntegrationAwsGlue
    supports the following (see Integrate Tableflow with the AWS Glue Catalog in Confluent Cloud for more details):
    Credentials Pulumi.ConfluentCloud.Inputs.CatalogIntegrationCredentials
    The Cluster API Credentials.
    DisplayName string
    The name of the catalog integration.
    Environment Pulumi.ConfluentCloud.Inputs.CatalogIntegrationEnvironment
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    KafkaCluster Pulumi.ConfluentCloud.Inputs.CatalogIntegrationKafkaCluster
    Snowflake Pulumi.ConfluentCloud.Inputs.CatalogIntegrationSnowflake
    supports the following (see Integrate Tableflow with Snowflake Open Catalog or Apache Polaris in Confluent Cloud for more details):
    Suspended bool
    (Optional Boolean) Indicates whether the Catalog Integration should be suspended.
    AwsGlue CatalogIntegrationAwsGlueArgs
    supports the following (see Integrate Tableflow with the AWS Glue Catalog in Confluent Cloud for more details):
    Credentials CatalogIntegrationCredentialsArgs
    The Cluster API Credentials.
    DisplayName string
    The name of the catalog integration.
    Environment CatalogIntegrationEnvironmentArgs
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    KafkaCluster CatalogIntegrationKafkaClusterArgs
    Snowflake CatalogIntegrationSnowflakeArgs
    supports the following (see Integrate Tableflow with Snowflake Open Catalog or Apache Polaris in Confluent Cloud for more details):
    Suspended bool
    (Optional Boolean) Indicates whether the Catalog Integration should be suspended.
    awsGlue CatalogIntegrationAwsGlue
    supports the following (see Integrate Tableflow with the AWS Glue Catalog in Confluent Cloud for more details):
    credentials CatalogIntegrationCredentials
    The Cluster API Credentials.
    displayName String
    The name of the catalog integration.
    environment CatalogIntegrationEnvironment
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    kafkaCluster CatalogIntegrationKafkaCluster
    snowflake CatalogIntegrationSnowflake
    supports the following (see Integrate Tableflow with Snowflake Open Catalog or Apache Polaris in Confluent Cloud for more details):
    suspended Boolean
    (Optional Boolean) Indicates whether the Catalog Integration should be suspended.
    awsGlue CatalogIntegrationAwsGlue
    supports the following (see Integrate Tableflow with the AWS Glue Catalog in Confluent Cloud for more details):
    credentials CatalogIntegrationCredentials
    The Cluster API Credentials.
    displayName string
    The name of the catalog integration.
    environment CatalogIntegrationEnvironment
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    kafkaCluster CatalogIntegrationKafkaCluster
    snowflake CatalogIntegrationSnowflake
    supports the following (see Integrate Tableflow with Snowflake Open Catalog or Apache Polaris in Confluent Cloud for more details):
    suspended boolean
    (Optional Boolean) Indicates whether the Catalog Integration should be suspended.
    aws_glue CatalogIntegrationAwsGlueArgs
    supports the following (see Integrate Tableflow with the AWS Glue Catalog in Confluent Cloud for more details):
    credentials CatalogIntegrationCredentialsArgs
    The Cluster API Credentials.
    display_name str
    The name of the catalog integration.
    environment CatalogIntegrationEnvironmentArgs
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    kafka_cluster CatalogIntegrationKafkaClusterArgs
    snowflake CatalogIntegrationSnowflakeArgs
    supports the following (see Integrate Tableflow with Snowflake Open Catalog or Apache Polaris in Confluent Cloud for more details):
    suspended bool
    (Optional Boolean) Indicates whether the Catalog Integration should be suspended.
    awsGlue Property Map
    supports the following (see Integrate Tableflow with the AWS Glue Catalog in Confluent Cloud for more details):
    credentials Property Map
    The Cluster API Credentials.
    displayName String
    The name of the catalog integration.
    environment Property Map
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    kafkaCluster Property Map
    snowflake Property Map
    supports the following (see Integrate Tableflow with Snowflake Open Catalog or Apache Polaris in Confluent Cloud for more details):
    suspended Boolean
    (Optional Boolean) Indicates whether the Catalog Integration should be suspended.

    Supporting Types

    CatalogIntegrationAwsGlue, CatalogIntegrationAwsGlueArgs

    ProviderIntegrationId string
    The provider integration id.
    ProviderIntegrationId string
    The provider integration id.
    providerIntegrationId String
    The provider integration id.
    providerIntegrationId string
    The provider integration id.
    provider_integration_id str
    The provider integration id.
    providerIntegrationId String
    The provider integration id.

    CatalogIntegrationCredentials, CatalogIntegrationCredentialsArgs

    Key string
    The Tableflow API Key.
    Secret string
    The Cluster API Secret for your Confluent Cloud cluster.
    Key string
    The Tableflow API Key.
    Secret string
    The Cluster API Secret for your Confluent Cloud cluster.
    key String
    The Tableflow API Key.
    secret String
    The Cluster API Secret for your Confluent Cloud cluster.
    key string
    The Tableflow API Key.
    secret string
    The Cluster API Secret for your Confluent Cloud cluster.
    key str
    The Tableflow API Key.
    secret str
    The Cluster API Secret for your Confluent Cloud cluster.
    key String
    The Tableflow API Key.
    secret String
    The Cluster API Secret for your Confluent Cloud cluster.

    CatalogIntegrationEnvironment, CatalogIntegrationEnvironmentArgs

    Id string
    The ID of the Environment, for example, env-abc123.
    Id string
    The ID of the Environment, for example, env-abc123.
    id String
    The ID of the Environment, for example, env-abc123.
    id string
    The ID of the Environment, for example, env-abc123.
    id str
    The ID of the Environment, for example, env-abc123.
    id String
    The ID of the Environment, for example, env-abc123.

    CatalogIntegrationKafkaCluster, CatalogIntegrationKafkaClusterArgs

    Id string
    The ID of the Kafka cluster, for example, lkc-abc123.
    Id string
    The ID of the Kafka cluster, for example, lkc-abc123.
    id String
    The ID of the Kafka cluster, for example, lkc-abc123.
    id string
    The ID of the Kafka cluster, for example, lkc-abc123.
    id str
    The ID of the Kafka cluster, for example, lkc-abc123.
    id String
    The ID of the Kafka cluster, for example, lkc-abc123.

    CatalogIntegrationSnowflake, CatalogIntegrationSnowflakeArgs

    AllowedScope string
    Allowed scope of the Snowflake Open Catalog.
    ClientId string
    The client ID of the catalog integration.
    ClientSecret string
    The client secret of the catalog integration.
    Endpoint string
    The catalog integration connection endpoint for Snowflake Open Catalog.
    Warehouse string
    Warehouse name of the Snowflake Open Catalog, for example, catalog-name.
    AllowedScope string
    Allowed scope of the Snowflake Open Catalog.
    ClientId string
    The client ID of the catalog integration.
    ClientSecret string
    The client secret of the catalog integration.
    Endpoint string
    The catalog integration connection endpoint for Snowflake Open Catalog.
    Warehouse string
    Warehouse name of the Snowflake Open Catalog, for example, catalog-name.
    allowedScope String
    Allowed scope of the Snowflake Open Catalog.
    clientId String
    The client ID of the catalog integration.
    clientSecret String
    The client secret of the catalog integration.
    endpoint String
    The catalog integration connection endpoint for Snowflake Open Catalog.
    warehouse String
    Warehouse name of the Snowflake Open Catalog, for example, catalog-name.
    allowedScope string
    Allowed scope of the Snowflake Open Catalog.
    clientId string
    The client ID of the catalog integration.
    clientSecret string
    The client secret of the catalog integration.
    endpoint string
    The catalog integration connection endpoint for Snowflake Open Catalog.
    warehouse string
    Warehouse name of the Snowflake Open Catalog, for example, catalog-name.
    allowed_scope str
    Allowed scope of the Snowflake Open Catalog.
    client_id str
    The client ID of the catalog integration.
    client_secret str
    The client secret of the catalog integration.
    endpoint str
    The catalog integration connection endpoint for Snowflake Open Catalog.
    warehouse str
    Warehouse name of the Snowflake Open Catalog, for example, catalog-name.
    allowedScope String
    Allowed scope of the Snowflake Open Catalog.
    clientId String
    The client ID of the catalog integration.
    clientSecret String
    The client secret of the catalog integration.
    endpoint String
    The catalog integration connection endpoint for Snowflake Open Catalog.
    warehouse String
    Warehouse name of the Snowflake Open Catalog, for example, catalog-name.

    Import

    You can import a Catalog Integration by using the Catalog Integration name, Environment ID, and Kafka Cluster ID, in the format <Environment ID>/<Kafka Cluster ID>/<Catalog Integration Id>, for example:

    Option #1: Manage multiple Catalog Integrations in the same Pulumi Stack

    $ export IMPORT_TABLEFLOW_API_KEY="<tableflow_api_key>"

    $ export IMPORT_TABLEFLOW_API_SECRET="<tableflow_api_secret>"

    $ pulumi import confluentcloud:index/catalogIntegration:CatalogIntegration example env-abc123/lkc-abc123/tci-abc123
    

    Option #2: Manage a single Catalog Integration in the same Pulumi Stack

    $ pulumi import confluentcloud:index/catalogIntegration:CatalogIntegration example env-abc123/lkc-abc123/tci-abc123
    

    !> Warning: Do not forget to delete terminal command history afterwards for security purposes.

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    Confluent Cloud pulumi/pulumi-confluentcloud
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the confluent Terraform Provider.
    confluentcloud logo
    Confluent v2.24.0 published on Saturday, Apr 19, 2025 by Pulumi