confluentcloud.CatalogIntegration
Explore with Pulumi AI
Example Usage
Option #1: Manage multiple Catalog Integrations in the same Pulumi Stack
import * as pulumi from "@pulumi/pulumi";
import * as confluentcloud from "@pulumi/confluentcloud";
const example = new confluentcloud.CatalogIntegration("example", {
environment: {
id: staging.id,
},
kafkaCluster: {
id: stagingConfluentKafkaCluster.id,
},
displayName: "catalog-integration-1",
awsGlue: {
providerIntegrationId: main.id,
},
credentials: {
key: env_admin_tableflow_api_key.id,
secret: env_admin_tableflow_api_key.secret,
},
});
import pulumi
import pulumi_confluentcloud as confluentcloud
example = confluentcloud.CatalogIntegration("example",
environment={
"id": staging["id"],
},
kafka_cluster={
"id": staging_confluent_kafka_cluster["id"],
},
display_name="catalog-integration-1",
aws_glue={
"provider_integration_id": main["id"],
},
credentials={
"key": env_admin_tableflow_api_key["id"],
"secret": env_admin_tableflow_api_key["secret"],
})
package main
import (
"github.com/pulumi/pulumi-confluentcloud/sdk/v2/go/confluentcloud"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := confluentcloud.NewCatalogIntegration(ctx, "example", &confluentcloud.CatalogIntegrationArgs{
Environment: &confluentcloud.CatalogIntegrationEnvironmentArgs{
Id: pulumi.Any(staging.Id),
},
KafkaCluster: &confluentcloud.CatalogIntegrationKafkaClusterArgs{
Id: pulumi.Any(stagingConfluentKafkaCluster.Id),
},
DisplayName: pulumi.String("catalog-integration-1"),
AwsGlue: &confluentcloud.CatalogIntegrationAwsGlueArgs{
ProviderIntegrationId: pulumi.Any(main.Id),
},
Credentials: &confluentcloud.CatalogIntegrationCredentialsArgs{
Key: pulumi.Any(env_admin_tableflow_api_key.Id),
Secret: pulumi.Any(env_admin_tableflow_api_key.Secret),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using ConfluentCloud = Pulumi.ConfluentCloud;
return await Deployment.RunAsync(() =>
{
var example = new ConfluentCloud.CatalogIntegration("example", new()
{
Environment = new ConfluentCloud.Inputs.CatalogIntegrationEnvironmentArgs
{
Id = staging.Id,
},
KafkaCluster = new ConfluentCloud.Inputs.CatalogIntegrationKafkaClusterArgs
{
Id = stagingConfluentKafkaCluster.Id,
},
DisplayName = "catalog-integration-1",
AwsGlue = new ConfluentCloud.Inputs.CatalogIntegrationAwsGlueArgs
{
ProviderIntegrationId = main.Id,
},
Credentials = new ConfluentCloud.Inputs.CatalogIntegrationCredentialsArgs
{
Key = env_admin_tableflow_api_key.Id,
Secret = env_admin_tableflow_api_key.Secret,
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.confluentcloud.CatalogIntegration;
import com.pulumi.confluentcloud.CatalogIntegrationArgs;
import com.pulumi.confluentcloud.inputs.CatalogIntegrationEnvironmentArgs;
import com.pulumi.confluentcloud.inputs.CatalogIntegrationKafkaClusterArgs;
import com.pulumi.confluentcloud.inputs.CatalogIntegrationAwsGlueArgs;
import com.pulumi.confluentcloud.inputs.CatalogIntegrationCredentialsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new CatalogIntegration("example", CatalogIntegrationArgs.builder()
.environment(CatalogIntegrationEnvironmentArgs.builder()
.id(staging.id())
.build())
.kafkaCluster(CatalogIntegrationKafkaClusterArgs.builder()
.id(stagingConfluentKafkaCluster.id())
.build())
.displayName("catalog-integration-1")
.awsGlue(CatalogIntegrationAwsGlueArgs.builder()
.providerIntegrationId(main.id())
.build())
.credentials(CatalogIntegrationCredentialsArgs.builder()
.key(env_admin_tableflow_api_key.id())
.secret(env_admin_tableflow_api_key.secret())
.build())
.build());
}
}
resources:
example:
type: confluentcloud:CatalogIntegration
properties:
environment:
id: ${staging.id}
kafkaCluster:
id: ${stagingConfluentKafkaCluster.id}
displayName: catalog-integration-1
awsGlue:
providerIntegrationId: ${main.id}
credentials:
key: ${["env-admin-tableflow-api-key"].id}
secret: ${["env-admin-tableflow-api-key"].secret}
Option #2: Manage a single Catalog Integration in the same Pulumi Stack
import * as pulumi from "@pulumi/pulumi";
import * as confluentcloud from "@pulumi/confluentcloud";
const example = new confluentcloud.CatalogIntegration("example", {
environment: {
id: staging.id,
},
kafkaCluster: {
id: stagingConfluentKafkaCluster.id,
},
displayName: "catalog-integration-1",
snowflake: {
endpoint: "https://vuser1_polaris.snowflakecomputing.com/",
clientId: "***REDACTED***",
clientSecret: "***REDACTED***",
warehouse: "catalog-name",
allowedScope: "session:role:R1",
},
});
import pulumi
import pulumi_confluentcloud as confluentcloud
example = confluentcloud.CatalogIntegration("example",
environment={
"id": staging["id"],
},
kafka_cluster={
"id": staging_confluent_kafka_cluster["id"],
},
display_name="catalog-integration-1",
snowflake={
"endpoint": "https://vuser1_polaris.snowflakecomputing.com/",
"client_id": "***REDACTED***",
"client_secret": "***REDACTED***",
"warehouse": "catalog-name",
"allowed_scope": "session:role:R1",
})
package main
import (
"github.com/pulumi/pulumi-confluentcloud/sdk/v2/go/confluentcloud"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := confluentcloud.NewCatalogIntegration(ctx, "example", &confluentcloud.CatalogIntegrationArgs{
Environment: &confluentcloud.CatalogIntegrationEnvironmentArgs{
Id: pulumi.Any(staging.Id),
},
KafkaCluster: &confluentcloud.CatalogIntegrationKafkaClusterArgs{
Id: pulumi.Any(stagingConfluentKafkaCluster.Id),
},
DisplayName: pulumi.String("catalog-integration-1"),
Snowflake: &confluentcloud.CatalogIntegrationSnowflakeArgs{
Endpoint: pulumi.String("https://vuser1_polaris.snowflakecomputing.com/"),
ClientId: pulumi.String("***REDACTED***"),
ClientSecret: pulumi.String("***REDACTED***"),
Warehouse: pulumi.String("catalog-name"),
AllowedScope: pulumi.String("session:role:R1"),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using ConfluentCloud = Pulumi.ConfluentCloud;
return await Deployment.RunAsync(() =>
{
var example = new ConfluentCloud.CatalogIntegration("example", new()
{
Environment = new ConfluentCloud.Inputs.CatalogIntegrationEnvironmentArgs
{
Id = staging.Id,
},
KafkaCluster = new ConfluentCloud.Inputs.CatalogIntegrationKafkaClusterArgs
{
Id = stagingConfluentKafkaCluster.Id,
},
DisplayName = "catalog-integration-1",
Snowflake = new ConfluentCloud.Inputs.CatalogIntegrationSnowflakeArgs
{
Endpoint = "https://vuser1_polaris.snowflakecomputing.com/",
ClientId = "***REDACTED***",
ClientSecret = "***REDACTED***",
Warehouse = "catalog-name",
AllowedScope = "session:role:R1",
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.confluentcloud.CatalogIntegration;
import com.pulumi.confluentcloud.CatalogIntegrationArgs;
import com.pulumi.confluentcloud.inputs.CatalogIntegrationEnvironmentArgs;
import com.pulumi.confluentcloud.inputs.CatalogIntegrationKafkaClusterArgs;
import com.pulumi.confluentcloud.inputs.CatalogIntegrationSnowflakeArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new CatalogIntegration("example", CatalogIntegrationArgs.builder()
.environment(CatalogIntegrationEnvironmentArgs.builder()
.id(staging.id())
.build())
.kafkaCluster(CatalogIntegrationKafkaClusterArgs.builder()
.id(stagingConfluentKafkaCluster.id())
.build())
.displayName("catalog-integration-1")
.snowflake(CatalogIntegrationSnowflakeArgs.builder()
.endpoint("https://vuser1_polaris.snowflakecomputing.com/")
.clientId("***REDACTED***")
.clientSecret("***REDACTED***")
.warehouse("catalog-name")
.allowedScope("session:role:R1")
.build())
.build());
}
}
resources:
example:
type: confluentcloud:CatalogIntegration
properties:
environment:
id: ${staging.id}
kafkaCluster:
id: ${stagingConfluentKafkaCluster.id}
displayName: catalog-integration-1
snowflake:
endpoint: https://vuser1_polaris.snowflakecomputing.com/
clientId: '***REDACTED***'
clientSecret: '***REDACTED***'
warehouse: catalog-name
allowedScope: session:role:R1
Create CatalogIntegration Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new CatalogIntegration(name: string, args: CatalogIntegrationArgs, opts?: CustomResourceOptions);
@overload
def CatalogIntegration(resource_name: str,
args: CatalogIntegrationArgs,
opts: Optional[ResourceOptions] = None)
@overload
def CatalogIntegration(resource_name: str,
opts: Optional[ResourceOptions] = None,
display_name: Optional[str] = None,
environment: Optional[CatalogIntegrationEnvironmentArgs] = None,
kafka_cluster: Optional[CatalogIntegrationKafkaClusterArgs] = None,
aws_glue: Optional[CatalogIntegrationAwsGlueArgs] = None,
credentials: Optional[CatalogIntegrationCredentialsArgs] = None,
snowflake: Optional[CatalogIntegrationSnowflakeArgs] = None)
func NewCatalogIntegration(ctx *Context, name string, args CatalogIntegrationArgs, opts ...ResourceOption) (*CatalogIntegration, error)
public CatalogIntegration(string name, CatalogIntegrationArgs args, CustomResourceOptions? opts = null)
public CatalogIntegration(String name, CatalogIntegrationArgs args)
public CatalogIntegration(String name, CatalogIntegrationArgs args, CustomResourceOptions options)
type: confluentcloud:CatalogIntegration
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args CatalogIntegrationArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args CatalogIntegrationArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args CatalogIntegrationArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args CatalogIntegrationArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args CatalogIntegrationArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var catalogIntegrationResource = new ConfluentCloud.CatalogIntegration("catalogIntegrationResource", new()
{
DisplayName = "string",
Environment = new ConfluentCloud.Inputs.CatalogIntegrationEnvironmentArgs
{
Id = "string",
},
KafkaCluster = new ConfluentCloud.Inputs.CatalogIntegrationKafkaClusterArgs
{
Id = "string",
},
AwsGlue = new ConfluentCloud.Inputs.CatalogIntegrationAwsGlueArgs
{
ProviderIntegrationId = "string",
},
Credentials = new ConfluentCloud.Inputs.CatalogIntegrationCredentialsArgs
{
Key = "string",
Secret = "string",
},
Snowflake = new ConfluentCloud.Inputs.CatalogIntegrationSnowflakeArgs
{
AllowedScope = "string",
ClientId = "string",
ClientSecret = "string",
Endpoint = "string",
Warehouse = "string",
},
});
example, err := confluentcloud.NewCatalogIntegration(ctx, "catalogIntegrationResource", &confluentcloud.CatalogIntegrationArgs{
DisplayName: pulumi.String("string"),
Environment: &confluentcloud.CatalogIntegrationEnvironmentArgs{
Id: pulumi.String("string"),
},
KafkaCluster: &confluentcloud.CatalogIntegrationKafkaClusterArgs{
Id: pulumi.String("string"),
},
AwsGlue: &confluentcloud.CatalogIntegrationAwsGlueArgs{
ProviderIntegrationId: pulumi.String("string"),
},
Credentials: &confluentcloud.CatalogIntegrationCredentialsArgs{
Key: pulumi.String("string"),
Secret: pulumi.String("string"),
},
Snowflake: &confluentcloud.CatalogIntegrationSnowflakeArgs{
AllowedScope: pulumi.String("string"),
ClientId: pulumi.String("string"),
ClientSecret: pulumi.String("string"),
Endpoint: pulumi.String("string"),
Warehouse: pulumi.String("string"),
},
})
var catalogIntegrationResource = new CatalogIntegration("catalogIntegrationResource", CatalogIntegrationArgs.builder()
.displayName("string")
.environment(CatalogIntegrationEnvironmentArgs.builder()
.id("string")
.build())
.kafkaCluster(CatalogIntegrationKafkaClusterArgs.builder()
.id("string")
.build())
.awsGlue(CatalogIntegrationAwsGlueArgs.builder()
.providerIntegrationId("string")
.build())
.credentials(CatalogIntegrationCredentialsArgs.builder()
.key("string")
.secret("string")
.build())
.snowflake(CatalogIntegrationSnowflakeArgs.builder()
.allowedScope("string")
.clientId("string")
.clientSecret("string")
.endpoint("string")
.warehouse("string")
.build())
.build());
catalog_integration_resource = confluentcloud.CatalogIntegration("catalogIntegrationResource",
display_name="string",
environment={
"id": "string",
},
kafka_cluster={
"id": "string",
},
aws_glue={
"provider_integration_id": "string",
},
credentials={
"key": "string",
"secret": "string",
},
snowflake={
"allowed_scope": "string",
"client_id": "string",
"client_secret": "string",
"endpoint": "string",
"warehouse": "string",
})
const catalogIntegrationResource = new confluentcloud.CatalogIntegration("catalogIntegrationResource", {
displayName: "string",
environment: {
id: "string",
},
kafkaCluster: {
id: "string",
},
awsGlue: {
providerIntegrationId: "string",
},
credentials: {
key: "string",
secret: "string",
},
snowflake: {
allowedScope: "string",
clientId: "string",
clientSecret: "string",
endpoint: "string",
warehouse: "string",
},
});
type: confluentcloud:CatalogIntegration
properties:
awsGlue:
providerIntegrationId: string
credentials:
key: string
secret: string
displayName: string
environment:
id: string
kafkaCluster:
id: string
snowflake:
allowedScope: string
clientId: string
clientSecret: string
endpoint: string
warehouse: string
CatalogIntegration Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The CatalogIntegration resource accepts the following input properties:
- Display
Name string - The name of the catalog integration.
- Environment
Pulumi.
Confluent Cloud. Inputs. Catalog Integration Environment - Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
- Kafka
Cluster Pulumi.Confluent Cloud. Inputs. Catalog Integration Kafka Cluster - Aws
Glue Pulumi.Confluent Cloud. Inputs. Catalog Integration Aws Glue - supports the following (see Integrate Tableflow with the AWS Glue Catalog in Confluent Cloud for more details):
- Credentials
Pulumi.
Confluent Cloud. Inputs. Catalog Integration Credentials - The Cluster API Credentials.
- Snowflake
Pulumi.
Confluent Cloud. Inputs. Catalog Integration Snowflake - supports the following (see Integrate Tableflow with Snowflake Open Catalog or Apache Polaris in Confluent Cloud for more details):
- Display
Name string - The name of the catalog integration.
- Environment
Catalog
Integration Environment Args - Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
- Kafka
Cluster CatalogIntegration Kafka Cluster Args - Aws
Glue CatalogIntegration Aws Glue Args - supports the following (see Integrate Tableflow with the AWS Glue Catalog in Confluent Cloud for more details):
- Credentials
Catalog
Integration Credentials Args - The Cluster API Credentials.
- Snowflake
Catalog
Integration Snowflake Args - supports the following (see Integrate Tableflow with Snowflake Open Catalog or Apache Polaris in Confluent Cloud for more details):
- display
Name String - The name of the catalog integration.
- environment
Catalog
Integration Environment - Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
- kafka
Cluster CatalogIntegration Kafka Cluster - aws
Glue CatalogIntegration Aws Glue - supports the following (see Integrate Tableflow with the AWS Glue Catalog in Confluent Cloud for more details):
- credentials
Catalog
Integration Credentials - The Cluster API Credentials.
- snowflake
Catalog
Integration Snowflake - supports the following (see Integrate Tableflow with Snowflake Open Catalog or Apache Polaris in Confluent Cloud for more details):
- display
Name string - The name of the catalog integration.
- environment
Catalog
Integration Environment - Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
- kafka
Cluster CatalogIntegration Kafka Cluster - aws
Glue CatalogIntegration Aws Glue - supports the following (see Integrate Tableflow with the AWS Glue Catalog in Confluent Cloud for more details):
- credentials
Catalog
Integration Credentials - The Cluster API Credentials.
- snowflake
Catalog
Integration Snowflake - supports the following (see Integrate Tableflow with Snowflake Open Catalog or Apache Polaris in Confluent Cloud for more details):
- display_
name str - The name of the catalog integration.
- environment
Catalog
Integration Environment Args - Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
- kafka_
cluster CatalogIntegration Kafka Cluster Args - aws_
glue CatalogIntegration Aws Glue Args - supports the following (see Integrate Tableflow with the AWS Glue Catalog in Confluent Cloud for more details):
- credentials
Catalog
Integration Credentials Args - The Cluster API Credentials.
- snowflake
Catalog
Integration Snowflake Args - supports the following (see Integrate Tableflow with Snowflake Open Catalog or Apache Polaris in Confluent Cloud for more details):
- display
Name String - The name of the catalog integration.
- environment Property Map
- Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
- kafka
Cluster Property Map - aws
Glue Property Map - supports the following (see Integrate Tableflow with the AWS Glue Catalog in Confluent Cloud for more details):
- credentials Property Map
- The Cluster API Credentials.
- snowflake Property Map
- supports the following (see Integrate Tableflow with Snowflake Open Catalog or Apache Polaris in Confluent Cloud for more details):
Outputs
All input properties are implicitly available as output properties. Additionally, the CatalogIntegration resource produces the following output properties:
Look up Existing CatalogIntegration Resource
Get an existing CatalogIntegration resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: CatalogIntegrationState, opts?: CustomResourceOptions): CatalogIntegration
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
aws_glue: Optional[CatalogIntegrationAwsGlueArgs] = None,
credentials: Optional[CatalogIntegrationCredentialsArgs] = None,
display_name: Optional[str] = None,
environment: Optional[CatalogIntegrationEnvironmentArgs] = None,
kafka_cluster: Optional[CatalogIntegrationKafkaClusterArgs] = None,
snowflake: Optional[CatalogIntegrationSnowflakeArgs] = None,
suspended: Optional[bool] = None) -> CatalogIntegration
func GetCatalogIntegration(ctx *Context, name string, id IDInput, state *CatalogIntegrationState, opts ...ResourceOption) (*CatalogIntegration, error)
public static CatalogIntegration Get(string name, Input<string> id, CatalogIntegrationState? state, CustomResourceOptions? opts = null)
public static CatalogIntegration get(String name, Output<String> id, CatalogIntegrationState state, CustomResourceOptions options)
resources: _: type: confluentcloud:CatalogIntegration get: id: ${id}
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Aws
Glue Pulumi.Confluent Cloud. Inputs. Catalog Integration Aws Glue - supports the following (see Integrate Tableflow with the AWS Glue Catalog in Confluent Cloud for more details):
- Credentials
Pulumi.
Confluent Cloud. Inputs. Catalog Integration Credentials - The Cluster API Credentials.
- Display
Name string - The name of the catalog integration.
- Environment
Pulumi.
Confluent Cloud. Inputs. Catalog Integration Environment - Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
- Kafka
Cluster Pulumi.Confluent Cloud. Inputs. Catalog Integration Kafka Cluster - Snowflake
Pulumi.
Confluent Cloud. Inputs. Catalog Integration Snowflake - supports the following (see Integrate Tableflow with Snowflake Open Catalog or Apache Polaris in Confluent Cloud for more details):
- Suspended bool
- (Optional Boolean) Indicates whether the Catalog Integration should be suspended.
- Aws
Glue CatalogIntegration Aws Glue Args - supports the following (see Integrate Tableflow with the AWS Glue Catalog in Confluent Cloud for more details):
- Credentials
Catalog
Integration Credentials Args - The Cluster API Credentials.
- Display
Name string - The name of the catalog integration.
- Environment
Catalog
Integration Environment Args - Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
- Kafka
Cluster CatalogIntegration Kafka Cluster Args - Snowflake
Catalog
Integration Snowflake Args - supports the following (see Integrate Tableflow with Snowflake Open Catalog or Apache Polaris in Confluent Cloud for more details):
- Suspended bool
- (Optional Boolean) Indicates whether the Catalog Integration should be suspended.
- aws
Glue CatalogIntegration Aws Glue - supports the following (see Integrate Tableflow with the AWS Glue Catalog in Confluent Cloud for more details):
- credentials
Catalog
Integration Credentials - The Cluster API Credentials.
- display
Name String - The name of the catalog integration.
- environment
Catalog
Integration Environment - Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
- kafka
Cluster CatalogIntegration Kafka Cluster - snowflake
Catalog
Integration Snowflake - supports the following (see Integrate Tableflow with Snowflake Open Catalog or Apache Polaris in Confluent Cloud for more details):
- suspended Boolean
- (Optional Boolean) Indicates whether the Catalog Integration should be suspended.
- aws
Glue CatalogIntegration Aws Glue - supports the following (see Integrate Tableflow with the AWS Glue Catalog in Confluent Cloud for more details):
- credentials
Catalog
Integration Credentials - The Cluster API Credentials.
- display
Name string - The name of the catalog integration.
- environment
Catalog
Integration Environment - Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
- kafka
Cluster CatalogIntegration Kafka Cluster - snowflake
Catalog
Integration Snowflake - supports the following (see Integrate Tableflow with Snowflake Open Catalog or Apache Polaris in Confluent Cloud for more details):
- suspended boolean
- (Optional Boolean) Indicates whether the Catalog Integration should be suspended.
- aws_
glue CatalogIntegration Aws Glue Args - supports the following (see Integrate Tableflow with the AWS Glue Catalog in Confluent Cloud for more details):
- credentials
Catalog
Integration Credentials Args - The Cluster API Credentials.
- display_
name str - The name of the catalog integration.
- environment
Catalog
Integration Environment Args - Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
- kafka_
cluster CatalogIntegration Kafka Cluster Args - snowflake
Catalog
Integration Snowflake Args - supports the following (see Integrate Tableflow with Snowflake Open Catalog or Apache Polaris in Confluent Cloud for more details):
- suspended bool
- (Optional Boolean) Indicates whether the Catalog Integration should be suspended.
- aws
Glue Property Map - supports the following (see Integrate Tableflow with the AWS Glue Catalog in Confluent Cloud for more details):
- credentials Property Map
- The Cluster API Credentials.
- display
Name String - The name of the catalog integration.
- environment Property Map
- Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
- kafka
Cluster Property Map - snowflake Property Map
- supports the following (see Integrate Tableflow with Snowflake Open Catalog or Apache Polaris in Confluent Cloud for more details):
- suspended Boolean
- (Optional Boolean) Indicates whether the Catalog Integration should be suspended.
Supporting Types
CatalogIntegrationAwsGlue, CatalogIntegrationAwsGlueArgs
- Provider
Integration stringId - The provider integration id.
- Provider
Integration stringId - The provider integration id.
- provider
Integration StringId - The provider integration id.
- provider
Integration stringId - The provider integration id.
- provider_
integration_ strid - The provider integration id.
- provider
Integration StringId - The provider integration id.
CatalogIntegrationCredentials, CatalogIntegrationCredentialsArgs
CatalogIntegrationEnvironment, CatalogIntegrationEnvironmentArgs
- Id string
- The ID of the Environment, for example,
env-abc123
.
- Id string
- The ID of the Environment, for example,
env-abc123
.
- id String
- The ID of the Environment, for example,
env-abc123
.
- id string
- The ID of the Environment, for example,
env-abc123
.
- id str
- The ID of the Environment, for example,
env-abc123
.
- id String
- The ID of the Environment, for example,
env-abc123
.
CatalogIntegrationKafkaCluster, CatalogIntegrationKafkaClusterArgs
- Id string
- The ID of the Kafka cluster, for example,
lkc-abc123
.
- Id string
- The ID of the Kafka cluster, for example,
lkc-abc123
.
- id String
- The ID of the Kafka cluster, for example,
lkc-abc123
.
- id string
- The ID of the Kafka cluster, for example,
lkc-abc123
.
- id str
- The ID of the Kafka cluster, for example,
lkc-abc123
.
- id String
- The ID of the Kafka cluster, for example,
lkc-abc123
.
CatalogIntegrationSnowflake, CatalogIntegrationSnowflakeArgs
- Allowed
Scope string - Allowed scope of the Snowflake Open Catalog.
- Client
Id string - The client ID of the catalog integration.
- Client
Secret string - The client secret of the catalog integration.
- Endpoint string
- The catalog integration connection endpoint for Snowflake Open Catalog.
- Warehouse string
- Warehouse name of the Snowflake Open Catalog, for example,
catalog-name
.
- Allowed
Scope string - Allowed scope of the Snowflake Open Catalog.
- Client
Id string - The client ID of the catalog integration.
- Client
Secret string - The client secret of the catalog integration.
- Endpoint string
- The catalog integration connection endpoint for Snowflake Open Catalog.
- Warehouse string
- Warehouse name of the Snowflake Open Catalog, for example,
catalog-name
.
- allowed
Scope String - Allowed scope of the Snowflake Open Catalog.
- client
Id String - The client ID of the catalog integration.
- client
Secret String - The client secret of the catalog integration.
- endpoint String
- The catalog integration connection endpoint for Snowflake Open Catalog.
- warehouse String
- Warehouse name of the Snowflake Open Catalog, for example,
catalog-name
.
- allowed
Scope string - Allowed scope of the Snowflake Open Catalog.
- client
Id string - The client ID of the catalog integration.
- client
Secret string - The client secret of the catalog integration.
- endpoint string
- The catalog integration connection endpoint for Snowflake Open Catalog.
- warehouse string
- Warehouse name of the Snowflake Open Catalog, for example,
catalog-name
.
- allowed_
scope str - Allowed scope of the Snowflake Open Catalog.
- client_
id str - The client ID of the catalog integration.
- client_
secret str - The client secret of the catalog integration.
- endpoint str
- The catalog integration connection endpoint for Snowflake Open Catalog.
- warehouse str
- Warehouse name of the Snowflake Open Catalog, for example,
catalog-name
.
- allowed
Scope String - Allowed scope of the Snowflake Open Catalog.
- client
Id String - The client ID of the catalog integration.
- client
Secret String - The client secret of the catalog integration.
- endpoint String
- The catalog integration connection endpoint for Snowflake Open Catalog.
- warehouse String
- Warehouse name of the Snowflake Open Catalog, for example,
catalog-name
.
Import
You can import a Catalog Integration by using the Catalog Integration name, Environment ID, and Kafka Cluster ID, in the format <Environment ID>/<Kafka Cluster ID>/<Catalog Integration Id>
, for example:
Option #1: Manage multiple Catalog Integrations in the same Pulumi Stack
$ export IMPORT_TABLEFLOW_API_KEY="<tableflow_api_key>"
$ export IMPORT_TABLEFLOW_API_SECRET="<tableflow_api_secret>"
$ pulumi import confluentcloud:index/catalogIntegration:CatalogIntegration example env-abc123/lkc-abc123/tci-abc123
Option #2: Manage a single Catalog Integration in the same Pulumi Stack
$ pulumi import confluentcloud:index/catalogIntegration:CatalogIntegration example env-abc123/lkc-abc123/tci-abc123
!> Warning: Do not forget to delete terminal command history afterwards for security purposes.
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Confluent Cloud pulumi/pulumi-confluentcloud
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
confluent
Terraform Provider.