gcp.managedkafka.Connector
Explore with Pulumi AI
Example Usage
Managedkafka Connector Basic
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
import * as time from "@pulumi/time";
const project = new gcp.organizations.Project("project", {
projectId: "tf-test_80332",
name: "tf-test_13293",
orgId: "123456789",
billingAccount: "000000-0000000-0000000-000000",
deletionPolicy: "DELETE",
});
const wait60Seconds = new time.index.Sleep("wait_60_seconds", {createDuration: "60s"}, {
dependsOn: [project],
});
const compute = new gcp.projects.Service("compute", {
project: project.projectId,
service: "compute.googleapis.com",
}, {
dependsOn: [wait60Seconds],
});
const managedkafka = new gcp.projects.Service("managedkafka", {
project: project.projectId,
service: "managedkafka.googleapis.com",
}, {
dependsOn: [compute],
});
const wait120Seconds = new time.index.Sleep("wait_120_seconds", {createDuration: "120s"}, {
dependsOn: [managedkafka],
});
const mkcSecondarySubnet = new gcp.compute.Subnetwork("mkc_secondary_subnet", {
project: project.projectId,
name: "my-secondary-subnetwork-00",
ipCidrRange: "10.5.0.0/16",
region: "us-central1",
network: "default",
}, {
dependsOn: [wait120Seconds],
});
const cpsTopic = new gcp.pubsub.Topic("cps_topic", {
project: project.projectId,
name: "my-cps-topic",
messageRetentionDuration: "86600s",
});
const gmkCluster = new gcp.managedkafka.Cluster("gmk_cluster", {
project: project.projectId,
clusterId: "my-cluster",
location: "us-central1",
capacityConfig: {
vcpuCount: "3",
memoryBytes: "3221225472",
},
gcpConfig: {
accessConfig: {
networkConfigs: [{
subnet: pulumi.interpolate`projects/${project.projectId}/regions/us-central1/subnetworks/default`,
}],
},
},
}, {
dependsOn: [managedkafka],
});
const gmkTopic = new gcp.managedkafka.Topic("gmk_topic", {
project: project.projectId,
topicId: "my-topic",
cluster: gmkCluster.clusterId,
location: "us-central1",
partitionCount: 2,
replicationFactor: 3,
}, {
dependsOn: [managedkafka],
});
const mkcCluster = new gcp.managedkafka.ConnectCluster("mkc_cluster", {
project: project.projectId,
connectClusterId: "my-connect-cluster",
kafkaCluster: pulumi.interpolate`projects/${project.projectId}/locations/us-central1/clusters/${gmkCluster.clusterId}`,
location: "us-central1",
capacityConfig: {
vcpuCount: "12",
memoryBytes: "21474836480",
},
gcpConfig: {
accessConfig: {
networkConfigs: [{
primarySubnet: pulumi.interpolate`projects/${project.projectId}/regions/us-central1/subnetworks/default`,
additionalSubnets: [mkcSecondarySubnet.id],
dnsDomainNames: [pulumi.interpolate`${gmkCluster.clusterId}.us-central1.managedkafka.${project.projectId}.cloud.goog`],
}],
},
},
labels: {
key: "value",
},
}, {
dependsOn: [managedkafka],
});
const example = new gcp.managedkafka.Connector("example", {
project: project.projectId,
connectorId: "my-connector",
connectCluster: mkcCluster.connectClusterId,
location: "us-central1",
configs: {
"connector.class": "com.google.pubsub.kafka.sink.CloudPubSubSinkConnector",
name: "my-connector",
"tasks.max": "1",
topics: gmkTopic.topicId,
"cps.topic": cpsTopic.name,
"cps.project": project.projectId,
"value.converter": "org.apache.kafka.connect.storage.StringConverter",
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
},
taskRestartPolicy: {
minimumBackoff: "60s",
maximumBackoff: "1800s",
},
}, {
dependsOn: [managedkafka],
});
import pulumi
import pulumi_gcp as gcp
import pulumi_time as time
project = gcp.organizations.Project("project",
project_id="tf-test_80332",
name="tf-test_13293",
org_id="123456789",
billing_account="000000-0000000-0000000-000000",
deletion_policy="DELETE")
wait60_seconds = time.index.Sleep("wait_60_seconds", create_duration=60s,
opts = pulumi.ResourceOptions(depends_on=[project]))
compute = gcp.projects.Service("compute",
project=project.project_id,
service="compute.googleapis.com",
opts = pulumi.ResourceOptions(depends_on=[wait60_seconds]))
managedkafka = gcp.projects.Service("managedkafka",
project=project.project_id,
service="managedkafka.googleapis.com",
opts = pulumi.ResourceOptions(depends_on=[compute]))
wait120_seconds = time.index.Sleep("wait_120_seconds", create_duration=120s,
opts = pulumi.ResourceOptions(depends_on=[managedkafka]))
mkc_secondary_subnet = gcp.compute.Subnetwork("mkc_secondary_subnet",
project=project.project_id,
name="my-secondary-subnetwork-00",
ip_cidr_range="10.5.0.0/16",
region="us-central1",
network="default",
opts = pulumi.ResourceOptions(depends_on=[wait120_seconds]))
cps_topic = gcp.pubsub.Topic("cps_topic",
project=project.project_id,
name="my-cps-topic",
message_retention_duration="86600s")
gmk_cluster = gcp.managedkafka.Cluster("gmk_cluster",
project=project.project_id,
cluster_id="my-cluster",
location="us-central1",
capacity_config={
"vcpu_count": "3",
"memory_bytes": "3221225472",
},
gcp_config={
"access_config": {
"network_configs": [{
"subnet": project.project_id.apply(lambda project_id: f"projects/{project_id}/regions/us-central1/subnetworks/default"),
}],
},
},
opts = pulumi.ResourceOptions(depends_on=[managedkafka]))
gmk_topic = gcp.managedkafka.Topic("gmk_topic",
project=project.project_id,
topic_id="my-topic",
cluster=gmk_cluster.cluster_id,
location="us-central1",
partition_count=2,
replication_factor=3,
opts = pulumi.ResourceOptions(depends_on=[managedkafka]))
mkc_cluster = gcp.managedkafka.ConnectCluster("mkc_cluster",
project=project.project_id,
connect_cluster_id="my-connect-cluster",
kafka_cluster=pulumi.Output.all(
project_id=project.project_id,
cluster_id=gmk_cluster.cluster_id
).apply(lambda resolved_outputs: f"projects/{resolved_outputs['project_id']}/locations/us-central1/clusters/{resolved_outputs['cluster_id']}")
,
location="us-central1",
capacity_config={
"vcpu_count": "12",
"memory_bytes": "21474836480",
},
gcp_config={
"access_config": {
"network_configs": [{
"primary_subnet": project.project_id.apply(lambda project_id: f"projects/{project_id}/regions/us-central1/subnetworks/default"),
"additional_subnets": [mkc_secondary_subnet.id],
"dns_domain_names": [pulumi.Output.all(
cluster_id=gmk_cluster.cluster_id,
project_id=project.project_id
).apply(lambda resolved_outputs: f"{resolved_outputs['cluster_id']}.us-central1.managedkafka.{resolved_outputs['project_id']}.cloud.goog")
],
}],
},
},
labels={
"key": "value",
},
opts = pulumi.ResourceOptions(depends_on=[managedkafka]))
example = gcp.managedkafka.Connector("example",
project=project.project_id,
connector_id="my-connector",
connect_cluster=mkc_cluster.connect_cluster_id,
location="us-central1",
configs={
"connector.class": "com.google.pubsub.kafka.sink.CloudPubSubSinkConnector",
"name": "my-connector",
"tasks.max": "1",
"topics": gmk_topic.topic_id,
"cps.topic": cps_topic.name,
"cps.project": project.project_id,
"value.converter": "org.apache.kafka.connect.storage.StringConverter",
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
},
task_restart_policy={
"minimum_backoff": "60s",
"maximum_backoff": "1800s",
},
opts = pulumi.ResourceOptions(depends_on=[managedkafka]))
package main
import (
"fmt"
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute"
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/managedkafka"
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations"
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/projects"
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/pubsub"
"github.com/pulumi/pulumi-time/sdk/go/time"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
project, err := organizations.NewProject(ctx, "project", &organizations.ProjectArgs{
ProjectId: pulumi.String("tf-test_80332"),
Name: pulumi.String("tf-test_13293"),
OrgId: pulumi.String("123456789"),
BillingAccount: pulumi.String("000000-0000000-0000000-000000"),
DeletionPolicy: pulumi.String("DELETE"),
})
if err != nil {
return err
}
wait60Seconds, err := time.NewSleep(ctx, "wait_60_seconds", &time.SleepArgs{
CreateDuration: "60s",
}, pulumi.DependsOn([]pulumi.Resource{
project,
}))
if err != nil {
return err
}
compute, err := projects.NewService(ctx, "compute", &projects.ServiceArgs{
Project: project.ProjectId,
Service: pulumi.String("compute.googleapis.com"),
}, pulumi.DependsOn([]pulumi.Resource{
wait60Seconds,
}))
if err != nil {
return err
}
managedkafka, err := projects.NewService(ctx, "managedkafka", &projects.ServiceArgs{
Project: project.ProjectId,
Service: pulumi.String("managedkafka.googleapis.com"),
}, pulumi.DependsOn([]pulumi.Resource{
compute,
}))
if err != nil {
return err
}
wait120Seconds, err := time.NewSleep(ctx, "wait_120_seconds", &time.SleepArgs{
CreateDuration: "120s",
}, pulumi.DependsOn([]pulumi.Resource{
managedkafka,
}))
if err != nil {
return err
}
mkcSecondarySubnet, err := compute.NewSubnetwork(ctx, "mkc_secondary_subnet", &compute.SubnetworkArgs{
Project: project.ProjectId,
Name: pulumi.String("my-secondary-subnetwork-00"),
IpCidrRange: pulumi.String("10.5.0.0/16"),
Region: pulumi.String("us-central1"),
Network: pulumi.String("default"),
}, pulumi.DependsOn([]pulumi.Resource{
wait120Seconds,
}))
if err != nil {
return err
}
cpsTopic, err := pubsub.NewTopic(ctx, "cps_topic", &pubsub.TopicArgs{
Project: project.ProjectId,
Name: pulumi.String("my-cps-topic"),
MessageRetentionDuration: pulumi.String("86600s"),
})
if err != nil {
return err
}
gmkCluster, err := managedkafka.NewCluster(ctx, "gmk_cluster", &managedkafka.ClusterArgs{
Project: project.ProjectId,
ClusterId: pulumi.String("my-cluster"),
Location: pulumi.String("us-central1"),
CapacityConfig: &managedkafka.ClusterCapacityConfigArgs{
VcpuCount: pulumi.String("3"),
MemoryBytes: pulumi.String("3221225472"),
},
GcpConfig: &managedkafka.ClusterGcpConfigArgs{
AccessConfig: &managedkafka.ClusterGcpConfigAccessConfigArgs{
NetworkConfigs: managedkafka.ClusterGcpConfigAccessConfigNetworkConfigArray{
&managedkafka.ClusterGcpConfigAccessConfigNetworkConfigArgs{
Subnet: project.ProjectId.ApplyT(func(projectId string) (string, error) {
return fmt.Sprintf("projects/%v/regions/us-central1/subnetworks/default", projectId), nil
}).(pulumi.StringOutput),
},
},
},
},
}, pulumi.DependsOn([]pulumi.Resource{
managedkafka,
}))
if err != nil {
return err
}
gmkTopic, err := managedkafka.NewTopic(ctx, "gmk_topic", &managedkafka.TopicArgs{
Project: project.ProjectId,
TopicId: pulumi.String("my-topic"),
Cluster: gmkCluster.ClusterId,
Location: pulumi.String("us-central1"),
PartitionCount: pulumi.Int(2),
ReplicationFactor: pulumi.Int(3),
}, pulumi.DependsOn([]pulumi.Resource{
managedkafka,
}))
if err != nil {
return err
}
mkcCluster, err := managedkafka.NewConnectCluster(ctx, "mkc_cluster", &managedkafka.ConnectClusterArgs{
Project: project.ProjectId,
ConnectClusterId: pulumi.String("my-connect-cluster"),
KafkaCluster: pulumi.All(project.ProjectId, gmkCluster.ClusterId).ApplyT(func(_args []interface{}) (string, error) {
projectId := _args[0].(string)
clusterId := _args[1].(string)
return fmt.Sprintf("projects/%v/locations/us-central1/clusters/%v", projectId, clusterId), nil
}).(pulumi.StringOutput),
Location: pulumi.String("us-central1"),
CapacityConfig: &managedkafka.ConnectClusterCapacityConfigArgs{
VcpuCount: pulumi.String("12"),
MemoryBytes: pulumi.String("21474836480"),
},
GcpConfig: &managedkafka.ConnectClusterGcpConfigArgs{
AccessConfig: &managedkafka.ConnectClusterGcpConfigAccessConfigArgs{
NetworkConfigs: managedkafka.ConnectClusterGcpConfigAccessConfigNetworkConfigArray{
&managedkafka.ConnectClusterGcpConfigAccessConfigNetworkConfigArgs{
PrimarySubnet: project.ProjectId.ApplyT(func(projectId string) (string, error) {
return fmt.Sprintf("projects/%v/regions/us-central1/subnetworks/default", projectId), nil
}).(pulumi.StringOutput),
AdditionalSubnets: pulumi.StringArray{
mkcSecondarySubnet.ID(),
},
DnsDomainNames: pulumi.StringArray{
pulumi.All(gmkCluster.ClusterId, project.ProjectId).ApplyT(func(_args []interface{}) (string, error) {
clusterId := _args[0].(string)
projectId := _args[1].(string)
return fmt.Sprintf("%v.us-central1.managedkafka.%v.cloud.goog", clusterId, projectId), nil
}).(pulumi.StringOutput),
},
},
},
},
},
Labels: pulumi.StringMap{
"key": pulumi.String("value"),
},
}, pulumi.DependsOn([]pulumi.Resource{
managedkafka,
}))
if err != nil {
return err
}
_, err = managedkafka.NewConnector(ctx, "example", &managedkafka.ConnectorArgs{
Project: project.ProjectId,
ConnectorId: pulumi.String("my-connector"),
ConnectCluster: mkcCluster.ConnectClusterId,
Location: pulumi.String("us-central1"),
Configs: pulumi.StringMap{
"connector.class": pulumi.String("com.google.pubsub.kafka.sink.CloudPubSubSinkConnector"),
"name": pulumi.String("my-connector"),
"tasks.max": pulumi.String("1"),
"topics": gmkTopic.TopicId,
"cps.topic": cpsTopic.Name,
"cps.project": project.ProjectId,
"value.converter": pulumi.String("org.apache.kafka.connect.storage.StringConverter"),
"key.converter": pulumi.String("org.apache.kafka.connect.storage.StringConverter"),
},
TaskRestartPolicy: &managedkafka.ConnectorTaskRestartPolicyArgs{
MinimumBackoff: pulumi.String("60s"),
MaximumBackoff: pulumi.String("1800s"),
},
}, pulumi.DependsOn([]pulumi.Resource{
managedkafka,
}))
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
using Time = Pulumi.Time;
return await Deployment.RunAsync(() =>
{
var project = new Gcp.Organizations.Project("project", new()
{
ProjectId = "tf-test_80332",
Name = "tf-test_13293",
OrgId = "123456789",
BillingAccount = "000000-0000000-0000000-000000",
DeletionPolicy = "DELETE",
});
var wait60Seconds = new Time.Index.Sleep("wait_60_seconds", new()
{
CreateDuration = "60s",
}, new CustomResourceOptions
{
DependsOn =
{
project,
},
});
var compute = new Gcp.Projects.Service("compute", new()
{
Project = project.ProjectId,
ServiceName = "compute.googleapis.com",
}, new CustomResourceOptions
{
DependsOn =
{
wait60Seconds,
},
});
var managedkafka = new Gcp.Projects.Service("managedkafka", new()
{
Project = project.ProjectId,
ServiceName = "managedkafka.googleapis.com",
}, new CustomResourceOptions
{
DependsOn =
{
compute,
},
});
var wait120Seconds = new Time.Index.Sleep("wait_120_seconds", new()
{
CreateDuration = "120s",
}, new CustomResourceOptions
{
DependsOn =
{
managedkafka,
},
});
var mkcSecondarySubnet = new Gcp.Compute.Subnetwork("mkc_secondary_subnet", new()
{
Project = project.ProjectId,
Name = "my-secondary-subnetwork-00",
IpCidrRange = "10.5.0.0/16",
Region = "us-central1",
Network = "default",
}, new CustomResourceOptions
{
DependsOn =
{
wait120Seconds,
},
});
var cpsTopic = new Gcp.PubSub.Topic("cps_topic", new()
{
Project = project.ProjectId,
Name = "my-cps-topic",
MessageRetentionDuration = "86600s",
});
var gmkCluster = new Gcp.ManagedKafka.Cluster("gmk_cluster", new()
{
Project = project.ProjectId,
ClusterId = "my-cluster",
Location = "us-central1",
CapacityConfig = new Gcp.ManagedKafka.Inputs.ClusterCapacityConfigArgs
{
VcpuCount = "3",
MemoryBytes = "3221225472",
},
GcpConfig = new Gcp.ManagedKafka.Inputs.ClusterGcpConfigArgs
{
AccessConfig = new Gcp.ManagedKafka.Inputs.ClusterGcpConfigAccessConfigArgs
{
NetworkConfigs = new[]
{
new Gcp.ManagedKafka.Inputs.ClusterGcpConfigAccessConfigNetworkConfigArgs
{
Subnet = project.ProjectId.Apply(projectId => $"projects/{projectId}/regions/us-central1/subnetworks/default"),
},
},
},
},
}, new CustomResourceOptions
{
DependsOn =
{
managedkafka,
},
});
var gmkTopic = new Gcp.ManagedKafka.Topic("gmk_topic", new()
{
Project = project.ProjectId,
TopicId = "my-topic",
Cluster = gmkCluster.ClusterId,
Location = "us-central1",
PartitionCount = 2,
ReplicationFactor = 3,
}, new CustomResourceOptions
{
DependsOn =
{
managedkafka,
},
});
var mkcCluster = new Gcp.ManagedKafka.ConnectCluster("mkc_cluster", new()
{
Project = project.ProjectId,
ConnectClusterId = "my-connect-cluster",
KafkaCluster = Output.Tuple(project.ProjectId, gmkCluster.ClusterId).Apply(values =>
{
var projectId = values.Item1;
var clusterId = values.Item2;
return $"projects/{projectId}/locations/us-central1/clusters/{clusterId}";
}),
Location = "us-central1",
CapacityConfig = new Gcp.ManagedKafka.Inputs.ConnectClusterCapacityConfigArgs
{
VcpuCount = "12",
MemoryBytes = "21474836480",
},
GcpConfig = new Gcp.ManagedKafka.Inputs.ConnectClusterGcpConfigArgs
{
AccessConfig = new Gcp.ManagedKafka.Inputs.ConnectClusterGcpConfigAccessConfigArgs
{
NetworkConfigs = new[]
{
new Gcp.ManagedKafka.Inputs.ConnectClusterGcpConfigAccessConfigNetworkConfigArgs
{
PrimarySubnet = project.ProjectId.Apply(projectId => $"projects/{projectId}/regions/us-central1/subnetworks/default"),
AdditionalSubnets = new[]
{
mkcSecondarySubnet.Id,
},
DnsDomainNames = new[]
{
Output.Tuple(gmkCluster.ClusterId, project.ProjectId).Apply(values =>
{
var clusterId = values.Item1;
var projectId = values.Item2;
return $"{clusterId}.us-central1.managedkafka.{projectId}.cloud.goog";
}),
},
},
},
},
},
Labels =
{
{ "key", "value" },
},
}, new CustomResourceOptions
{
DependsOn =
{
managedkafka,
},
});
var example = new Gcp.ManagedKafka.Connector("example", new()
{
Project = project.ProjectId,
ConnectorId = "my-connector",
ConnectCluster = mkcCluster.ConnectClusterId,
Location = "us-central1",
Configs =
{
{ "connector.class", "com.google.pubsub.kafka.sink.CloudPubSubSinkConnector" },
{ "name", "my-connector" },
{ "tasks.max", "1" },
{ "topics", gmkTopic.TopicId },
{ "cps.topic", cpsTopic.Name },
{ "cps.project", project.ProjectId },
{ "value.converter", "org.apache.kafka.connect.storage.StringConverter" },
{ "key.converter", "org.apache.kafka.connect.storage.StringConverter" },
},
TaskRestartPolicy = new Gcp.ManagedKafka.Inputs.ConnectorTaskRestartPolicyArgs
{
MinimumBackoff = "60s",
MaximumBackoff = "1800s",
},
}, new CustomResourceOptions
{
DependsOn =
{
managedkafka,
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.organizations.Project;
import com.pulumi.gcp.organizations.ProjectArgs;
import com.pulumi.time.sleep;
import com.pulumi.time.sleepArgs;
import com.pulumi.gcp.projects.Service;
import com.pulumi.gcp.projects.ServiceArgs;
import com.pulumi.gcp.compute.Subnetwork;
import com.pulumi.gcp.compute.SubnetworkArgs;
import com.pulumi.gcp.managedkafka.Cluster;
import com.pulumi.gcp.managedkafka.ClusterArgs;
import com.pulumi.gcp.managedkafka.inputs.ClusterCapacityConfigArgs;
import com.pulumi.gcp.managedkafka.inputs.ClusterGcpConfigArgs;
import com.pulumi.gcp.managedkafka.inputs.ClusterGcpConfigAccessConfigArgs;
import com.pulumi.gcp.managedkafka.ConnectCluster;
import com.pulumi.gcp.managedkafka.ConnectClusterArgs;
import com.pulumi.gcp.managedkafka.inputs.ConnectClusterCapacityConfigArgs;
import com.pulumi.gcp.managedkafka.inputs.ConnectClusterGcpConfigArgs;
import com.pulumi.gcp.managedkafka.inputs.ConnectClusterGcpConfigAccessConfigArgs;
import com.pulumi.gcp.managedkafka.Connector;
import com.pulumi.gcp.managedkafka.ConnectorArgs;
import com.pulumi.gcp.managedkafka.inputs.ConnectorTaskRestartPolicyArgs;
import com.pulumi.resources.CustomResourceOptions;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var project = new Project("project", ProjectArgs.builder()
.projectId("tf-test_80332")
.name("tf-test_13293")
.orgId("123456789")
.billingAccount("000000-0000000-0000000-000000")
.deletionPolicy("DELETE")
.build());
var wait60Seconds = new Sleep("wait60Seconds", SleepArgs.builder()
.createDuration("60s")
.build(), CustomResourceOptions.builder()
.dependsOn(List.of(project))
.build());
var compute = new Service("compute", ServiceArgs.builder()
.project(project.projectId())
.service("compute.googleapis.com")
.build(), CustomResourceOptions.builder()
.dependsOn(wait60Seconds)
.build());
var managedkafka = new Service("managedkafka", ServiceArgs.builder()
.project(project.projectId())
.service("managedkafka.googleapis.com")
.build(), CustomResourceOptions.builder()
.dependsOn(compute)
.build());
var wait120Seconds = new Sleep("wait120Seconds", SleepArgs.builder()
.createDuration("120s")
.build(), CustomResourceOptions.builder()
.dependsOn(List.of(managedkafka))
.build());
var mkcSecondarySubnet = new Subnetwork("mkcSecondarySubnet", SubnetworkArgs.builder()
.project(project.projectId())
.name("my-secondary-subnetwork-00")
.ipCidrRange("10.5.0.0/16")
.region("us-central1")
.network("default")
.build(), CustomResourceOptions.builder()
.dependsOn(wait120Seconds)
.build());
var cpsTopic = new com.pulumi.gcp.pubsub.Topic("cpsTopic", com.pulumi.gcp.pubsub.TopicArgs.builder()
.project(project.projectId())
.name("my-cps-topic")
.messageRetentionDuration("86600s")
.build());
var gmkCluster = new Cluster("gmkCluster", ClusterArgs.builder()
.project(project.projectId())
.clusterId("my-cluster")
.location("us-central1")
.capacityConfig(ClusterCapacityConfigArgs.builder()
.vcpuCount("3")
.memoryBytes("3221225472")
.build())
.gcpConfig(ClusterGcpConfigArgs.builder()
.accessConfig(ClusterGcpConfigAccessConfigArgs.builder()
.networkConfigs(ClusterGcpConfigAccessConfigNetworkConfigArgs.builder()
.subnet(project.projectId().applyValue(_projectId -> String.format("projects/%s/regions/us-central1/subnetworks/default", _projectId)))
.build())
.build())
.build())
.build(), CustomResourceOptions.builder()
.dependsOn(managedkafka)
.build());
var gmkTopic = new com.pulumi.gcp.managedkafka.Topic("gmkTopic", com.pulumi.gcp.managedkafka.TopicArgs.builder()
.project(project.projectId())
.topicId("my-topic")
.cluster(gmkCluster.clusterId())
.location("us-central1")
.partitionCount(2)
.replicationFactor(3)
.build(), CustomResourceOptions.builder()
.dependsOn(managedkafka)
.build());
var mkcCluster = new ConnectCluster("mkcCluster", ConnectClusterArgs.builder()
.project(project.projectId())
.connectClusterId("my-connect-cluster")
.kafkaCluster(Output.tuple(project.projectId(), gmkCluster.clusterId()).applyValue(values -> {
var projectId = values.t1;
var clusterId = values.t2;
return String.format("projects/%s/locations/us-central1/clusters/%s", projectId,clusterId);
}))
.location("us-central1")
.capacityConfig(ConnectClusterCapacityConfigArgs.builder()
.vcpuCount("12")
.memoryBytes("21474836480")
.build())
.gcpConfig(ConnectClusterGcpConfigArgs.builder()
.accessConfig(ConnectClusterGcpConfigAccessConfigArgs.builder()
.networkConfigs(ConnectClusterGcpConfigAccessConfigNetworkConfigArgs.builder()
.primarySubnet(project.projectId().applyValue(_projectId -> String.format("projects/%s/regions/us-central1/subnetworks/default", _projectId)))
.additionalSubnets(mkcSecondarySubnet.id())
.dnsDomainNames(Output.tuple(gmkCluster.clusterId(), project.projectId()).applyValue(values -> {
var clusterId = values.t1;
var projectId = values.t2;
return String.format("%s.us-central1.managedkafka.%s.cloud.goog", clusterId,projectId);
}))
.build())
.build())
.build())
.labels(Map.of("key", "value"))
.build(), CustomResourceOptions.builder()
.dependsOn(managedkafka)
.build());
var example = new Connector("example", ConnectorArgs.builder()
.project(project.projectId())
.connectorId("my-connector")
.connectCluster(mkcCluster.connectClusterId())
.location("us-central1")
.configs(Map.ofEntries(
Map.entry("connector.class", "com.google.pubsub.kafka.sink.CloudPubSubSinkConnector"),
Map.entry("name", "my-connector"),
Map.entry("tasks.max", "1"),
Map.entry("topics", gmkTopic.topicId()),
Map.entry("cps.topic", cpsTopic.name()),
Map.entry("cps.project", project.projectId()),
Map.entry("value.converter", "org.apache.kafka.connect.storage.StringConverter"),
Map.entry("key.converter", "org.apache.kafka.connect.storage.StringConverter")
))
.taskRestartPolicy(ConnectorTaskRestartPolicyArgs.builder()
.minimumBackoff("60s")
.maximumBackoff("1800s")
.build())
.build(), CustomResourceOptions.builder()
.dependsOn(managedkafka)
.build());
}
}
resources:
project:
type: gcp:organizations:Project
properties:
projectId: tf-test_80332
name: tf-test_13293
orgId: '123456789'
billingAccount: 000000-0000000-0000000-000000
deletionPolicy: DELETE
wait60Seconds:
type: time:sleep
name: wait_60_seconds
properties:
createDuration: 60s
options:
dependsOn:
- ${project}
compute:
type: gcp:projects:Service
properties:
project: ${project.projectId}
service: compute.googleapis.com
options:
dependsOn:
- ${wait60Seconds}
managedkafka:
type: gcp:projects:Service
properties:
project: ${project.projectId}
service: managedkafka.googleapis.com
options:
dependsOn:
- ${compute}
wait120Seconds:
type: time:sleep
name: wait_120_seconds
properties:
createDuration: 120s
options:
dependsOn:
- ${managedkafka}
mkcSecondarySubnet:
type: gcp:compute:Subnetwork
name: mkc_secondary_subnet
properties:
project: ${project.projectId}
name: my-secondary-subnetwork-00
ipCidrRange: 10.5.0.0/16
region: us-central1
network: default
options:
dependsOn:
- ${wait120Seconds}
cpsTopic:
type: gcp:pubsub:Topic
name: cps_topic
properties:
project: ${project.projectId}
name: my-cps-topic
messageRetentionDuration: 86600s
gmkCluster:
type: gcp:managedkafka:Cluster
name: gmk_cluster
properties:
project: ${project.projectId}
clusterId: my-cluster
location: us-central1
capacityConfig:
vcpuCount: 3
memoryBytes: 3.221225472e+09
gcpConfig:
accessConfig:
networkConfigs:
- subnet: projects/${project.projectId}/regions/us-central1/subnetworks/default
options:
dependsOn:
- ${managedkafka}
gmkTopic:
type: gcp:managedkafka:Topic
name: gmk_topic
properties:
project: ${project.projectId}
topicId: my-topic
cluster: ${gmkCluster.clusterId}
location: us-central1
partitionCount: 2
replicationFactor: 3
options:
dependsOn:
- ${managedkafka}
mkcCluster:
type: gcp:managedkafka:ConnectCluster
name: mkc_cluster
properties:
project: ${project.projectId}
connectClusterId: my-connect-cluster
kafkaCluster: projects/${project.projectId}/locations/us-central1/clusters/${gmkCluster.clusterId}
location: us-central1
capacityConfig:
vcpuCount: 12
memoryBytes: 2.147483648e+10
gcpConfig:
accessConfig:
networkConfigs:
- primarySubnet: projects/${project.projectId}/regions/us-central1/subnetworks/default
additionalSubnets:
- ${mkcSecondarySubnet.id}
dnsDomainNames:
- ${gmkCluster.clusterId}.us-central1.managedkafka.${project.projectId}.cloud.goog
labels:
key: value
options:
dependsOn:
- ${managedkafka}
example:
type: gcp:managedkafka:Connector
properties:
project: ${project.projectId}
connectorId: my-connector
connectCluster: ${mkcCluster.connectClusterId}
location: us-central1
configs:
connector.class: com.google.pubsub.kafka.sink.CloudPubSubSinkConnector
name: my-connector
tasks.max: '1'
topics: ${gmkTopic.topicId}
cps.topic: ${cpsTopic.name}
cps.project: ${project.projectId}
value.converter: org.apache.kafka.connect.storage.StringConverter
key.converter: org.apache.kafka.connect.storage.StringConverter
taskRestartPolicy:
minimumBackoff: 60s
maximumBackoff: 1800s
options:
dependsOn:
- ${managedkafka}
Create Connector Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Connector(name: string, args: ConnectorArgs, opts?: CustomResourceOptions);
@overload
def Connector(resource_name: str,
args: ConnectorArgs,
opts: Optional[ResourceOptions] = None)
@overload
def Connector(resource_name: str,
opts: Optional[ResourceOptions] = None,
connect_cluster: Optional[str] = None,
connector_id: Optional[str] = None,
location: Optional[str] = None,
configs: Optional[Mapping[str, str]] = None,
project: Optional[str] = None,
task_restart_policy: Optional[ConnectorTaskRestartPolicyArgs] = None)
func NewConnector(ctx *Context, name string, args ConnectorArgs, opts ...ResourceOption) (*Connector, error)
public Connector(string name, ConnectorArgs args, CustomResourceOptions? opts = null)
public Connector(String name, ConnectorArgs args)
public Connector(String name, ConnectorArgs args, CustomResourceOptions options)
type: gcp:managedkafka:Connector
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args ConnectorArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args ConnectorArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args ConnectorArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args ConnectorArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args ConnectorArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var connectorResource = new Gcp.ManagedKafka.Connector("connectorResource", new()
{
ConnectCluster = "string",
ConnectorId = "string",
Location = "string",
Configs =
{
{ "string", "string" },
},
Project = "string",
TaskRestartPolicy = new Gcp.ManagedKafka.Inputs.ConnectorTaskRestartPolicyArgs
{
MaximumBackoff = "string",
MinimumBackoff = "string",
},
});
example, err := managedkafka.NewConnector(ctx, "connectorResource", &managedkafka.ConnectorArgs{
ConnectCluster: pulumi.String("string"),
ConnectorId: pulumi.String("string"),
Location: pulumi.String("string"),
Configs: pulumi.StringMap{
"string": pulumi.String("string"),
},
Project: pulumi.String("string"),
TaskRestartPolicy: &managedkafka.ConnectorTaskRestartPolicyArgs{
MaximumBackoff: pulumi.String("string"),
MinimumBackoff: pulumi.String("string"),
},
})
var connectorResource = new com.pulumi.gcp.managedkafka.Connector("connectorResource", com.pulumi.gcp.managedkafka.ConnectorArgs.builder()
.connectCluster("string")
.connectorId("string")
.location("string")
.configs(Map.of("string", "string"))
.project("string")
.taskRestartPolicy(ConnectorTaskRestartPolicyArgs.builder()
.maximumBackoff("string")
.minimumBackoff("string")
.build())
.build());
connector_resource = gcp.managedkafka.Connector("connectorResource",
connect_cluster="string",
connector_id="string",
location="string",
configs={
"string": "string",
},
project="string",
task_restart_policy={
"maximum_backoff": "string",
"minimum_backoff": "string",
})
const connectorResource = new gcp.managedkafka.Connector("connectorResource", {
connectCluster: "string",
connectorId: "string",
location: "string",
configs: {
string: "string",
},
project: "string",
taskRestartPolicy: {
maximumBackoff: "string",
minimumBackoff: "string",
},
});
type: gcp:managedkafka:Connector
properties:
configs:
string: string
connectCluster: string
connectorId: string
location: string
project: string
taskRestartPolicy:
maximumBackoff: string
minimumBackoff: string
Connector Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The Connector resource accepts the following input properties:
- Connect
Cluster string - The connect cluster name.
- Connector
Id string - The ID to use for the connector, which will become the final component of the connector's name. This value is structured like:
my-connector-id
. - Location string
- ID of the location of the Kafka Connect resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
- Configs Dictionary<string, string>
- Connector config as keys/values. The keys of the map are connector property names, for example:
connector.class
,tasks.max
,key.converter
. - Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Task
Restart ConnectorPolicy Task Restart Policy - A policy that specifies how to restart the failed connectors/tasks in a Cluster resource. If not set, the failed connectors/tasks won't be restarted. Structure is documented below.
- Connect
Cluster string - The connect cluster name.
- Connector
Id string - The ID to use for the connector, which will become the final component of the connector's name. This value is structured like:
my-connector-id
. - Location string
- ID of the location of the Kafka Connect resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
- Configs map[string]string
- Connector config as keys/values. The keys of the map are connector property names, for example:
connector.class
,tasks.max
,key.converter
. - Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Task
Restart ConnectorPolicy Task Restart Policy Args - A policy that specifies how to restart the failed connectors/tasks in a Cluster resource. If not set, the failed connectors/tasks won't be restarted. Structure is documented below.
- connect
Cluster String - The connect cluster name.
- connector
Id String - The ID to use for the connector, which will become the final component of the connector's name. This value is structured like:
my-connector-id
. - location String
- ID of the location of the Kafka Connect resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
- configs Map<String,String>
- Connector config as keys/values. The keys of the map are connector property names, for example:
connector.class
,tasks.max
,key.converter
. - project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- task
Restart ConnectorPolicy Task Restart Policy - A policy that specifies how to restart the failed connectors/tasks in a Cluster resource. If not set, the failed connectors/tasks won't be restarted. Structure is documented below.
- connect
Cluster string - The connect cluster name.
- connector
Id string - The ID to use for the connector, which will become the final component of the connector's name. This value is structured like:
my-connector-id
. - location string
- ID of the location of the Kafka Connect resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
- configs {[key: string]: string}
- Connector config as keys/values. The keys of the map are connector property names, for example:
connector.class
,tasks.max
,key.converter
. - project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- task
Restart ConnectorPolicy Task Restart Policy - A policy that specifies how to restart the failed connectors/tasks in a Cluster resource. If not set, the failed connectors/tasks won't be restarted. Structure is documented below.
- connect_
cluster str - The connect cluster name.
- connector_
id str - The ID to use for the connector, which will become the final component of the connector's name. This value is structured like:
my-connector-id
. - location str
- ID of the location of the Kafka Connect resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
- configs Mapping[str, str]
- Connector config as keys/values. The keys of the map are connector property names, for example:
connector.class
,tasks.max
,key.converter
. - project str
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- task_
restart_ Connectorpolicy Task Restart Policy Args - A policy that specifies how to restart the failed connectors/tasks in a Cluster resource. If not set, the failed connectors/tasks won't be restarted. Structure is documented below.
- connect
Cluster String - The connect cluster name.
- connector
Id String - The ID to use for the connector, which will become the final component of the connector's name. This value is structured like:
my-connector-id
. - location String
- ID of the location of the Kafka Connect resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
- configs Map<String>
- Connector config as keys/values. The keys of the map are connector property names, for example:
connector.class
,tasks.max
,key.converter
. - project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- task
Restart Property MapPolicy - A policy that specifies how to restart the failed connectors/tasks in a Cluster resource. If not set, the failed connectors/tasks won't be restarted. Structure is documented below.
Outputs
All input properties are implicitly available as output properties. Additionally, the Connector resource produces the following output properties:
- Id string
- The provider-assigned unique ID for this managed resource.
- Name string
- The name of the connector. The
connector
segment is used when connecting directly to the connect cluster. Structured like:projects/PROJECT_ID/locations/LOCATION/connectClusters/CONNECT_CLUSTER/connectors/CONNECTOR_ID
. - State string
- The current state of the connect. Possible values:
STATE_UNSPECIFIED
,UNASSIGNED
,RUNNING
,PAUSED
,FAILED
,RESTARTING
, andSTOPPED
.
- Id string
- The provider-assigned unique ID for this managed resource.
- Name string
- The name of the connector. The
connector
segment is used when connecting directly to the connect cluster. Structured like:projects/PROJECT_ID/locations/LOCATION/connectClusters/CONNECT_CLUSTER/connectors/CONNECTOR_ID
. - State string
- The current state of the connect. Possible values:
STATE_UNSPECIFIED
,UNASSIGNED
,RUNNING
,PAUSED
,FAILED
,RESTARTING
, andSTOPPED
.
- id String
- The provider-assigned unique ID for this managed resource.
- name String
- The name of the connector. The
connector
segment is used when connecting directly to the connect cluster. Structured like:projects/PROJECT_ID/locations/LOCATION/connectClusters/CONNECT_CLUSTER/connectors/CONNECTOR_ID
. - state String
- The current state of the connect. Possible values:
STATE_UNSPECIFIED
,UNASSIGNED
,RUNNING
,PAUSED
,FAILED
,RESTARTING
, andSTOPPED
.
- id string
- The provider-assigned unique ID for this managed resource.
- name string
- The name of the connector. The
connector
segment is used when connecting directly to the connect cluster. Structured like:projects/PROJECT_ID/locations/LOCATION/connectClusters/CONNECT_CLUSTER/connectors/CONNECTOR_ID
. - state string
- The current state of the connect. Possible values:
STATE_UNSPECIFIED
,UNASSIGNED
,RUNNING
,PAUSED
,FAILED
,RESTARTING
, andSTOPPED
.
- id str
- The provider-assigned unique ID for this managed resource.
- name str
- The name of the connector. The
connector
segment is used when connecting directly to the connect cluster. Structured like:projects/PROJECT_ID/locations/LOCATION/connectClusters/CONNECT_CLUSTER/connectors/CONNECTOR_ID
. - state str
- The current state of the connect. Possible values:
STATE_UNSPECIFIED
,UNASSIGNED
,RUNNING
,PAUSED
,FAILED
,RESTARTING
, andSTOPPED
.
- id String
- The provider-assigned unique ID for this managed resource.
- name String
- The name of the connector. The
connector
segment is used when connecting directly to the connect cluster. Structured like:projects/PROJECT_ID/locations/LOCATION/connectClusters/CONNECT_CLUSTER/connectors/CONNECTOR_ID
. - state String
- The current state of the connect. Possible values:
STATE_UNSPECIFIED
,UNASSIGNED
,RUNNING
,PAUSED
,FAILED
,RESTARTING
, andSTOPPED
.
Look up Existing Connector Resource
Get an existing Connector resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: ConnectorState, opts?: CustomResourceOptions): Connector
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
configs: Optional[Mapping[str, str]] = None,
connect_cluster: Optional[str] = None,
connector_id: Optional[str] = None,
location: Optional[str] = None,
name: Optional[str] = None,
project: Optional[str] = None,
state: Optional[str] = None,
task_restart_policy: Optional[ConnectorTaskRestartPolicyArgs] = None) -> Connector
func GetConnector(ctx *Context, name string, id IDInput, state *ConnectorState, opts ...ResourceOption) (*Connector, error)
public static Connector Get(string name, Input<string> id, ConnectorState? state, CustomResourceOptions? opts = null)
public static Connector get(String name, Output<String> id, ConnectorState state, CustomResourceOptions options)
resources: _: type: gcp:managedkafka:Connector get: id: ${id}
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Configs Dictionary<string, string>
- Connector config as keys/values. The keys of the map are connector property names, for example:
connector.class
,tasks.max
,key.converter
. - Connect
Cluster string - The connect cluster name.
- Connector
Id string - The ID to use for the connector, which will become the final component of the connector's name. This value is structured like:
my-connector-id
. - Location string
- ID of the location of the Kafka Connect resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
- Name string
- The name of the connector. The
connector
segment is used when connecting directly to the connect cluster. Structured like:projects/PROJECT_ID/locations/LOCATION/connectClusters/CONNECT_CLUSTER/connectors/CONNECTOR_ID
. - Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- State string
- The current state of the connect. Possible values:
STATE_UNSPECIFIED
,UNASSIGNED
,RUNNING
,PAUSED
,FAILED
,RESTARTING
, andSTOPPED
. - Task
Restart ConnectorPolicy Task Restart Policy - A policy that specifies how to restart the failed connectors/tasks in a Cluster resource. If not set, the failed connectors/tasks won't be restarted. Structure is documented below.
- Configs map[string]string
- Connector config as keys/values. The keys of the map are connector property names, for example:
connector.class
,tasks.max
,key.converter
. - Connect
Cluster string - The connect cluster name.
- Connector
Id string - The ID to use for the connector, which will become the final component of the connector's name. This value is structured like:
my-connector-id
. - Location string
- ID of the location of the Kafka Connect resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
- Name string
- The name of the connector. The
connector
segment is used when connecting directly to the connect cluster. Structured like:projects/PROJECT_ID/locations/LOCATION/connectClusters/CONNECT_CLUSTER/connectors/CONNECTOR_ID
. - Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- State string
- The current state of the connect. Possible values:
STATE_UNSPECIFIED
,UNASSIGNED
,RUNNING
,PAUSED
,FAILED
,RESTARTING
, andSTOPPED
. - Task
Restart ConnectorPolicy Task Restart Policy Args - A policy that specifies how to restart the failed connectors/tasks in a Cluster resource. If not set, the failed connectors/tasks won't be restarted. Structure is documented below.
- configs Map<String,String>
- Connector config as keys/values. The keys of the map are connector property names, for example:
connector.class
,tasks.max
,key.converter
. - connect
Cluster String - The connect cluster name.
- connector
Id String - The ID to use for the connector, which will become the final component of the connector's name. This value is structured like:
my-connector-id
. - location String
- ID of the location of the Kafka Connect resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
- name String
- The name of the connector. The
connector
segment is used when connecting directly to the connect cluster. Structured like:projects/PROJECT_ID/locations/LOCATION/connectClusters/CONNECT_CLUSTER/connectors/CONNECTOR_ID
. - project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- state String
- The current state of the connect. Possible values:
STATE_UNSPECIFIED
,UNASSIGNED
,RUNNING
,PAUSED
,FAILED
,RESTARTING
, andSTOPPED
. - task
Restart ConnectorPolicy Task Restart Policy - A policy that specifies how to restart the failed connectors/tasks in a Cluster resource. If not set, the failed connectors/tasks won't be restarted. Structure is documented below.
- configs {[key: string]: string}
- Connector config as keys/values. The keys of the map are connector property names, for example:
connector.class
,tasks.max
,key.converter
. - connect
Cluster string - The connect cluster name.
- connector
Id string - The ID to use for the connector, which will become the final component of the connector's name. This value is structured like:
my-connector-id
. - location string
- ID of the location of the Kafka Connect resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
- name string
- The name of the connector. The
connector
segment is used when connecting directly to the connect cluster. Structured like:projects/PROJECT_ID/locations/LOCATION/connectClusters/CONNECT_CLUSTER/connectors/CONNECTOR_ID
. - project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- state string
- The current state of the connect. Possible values:
STATE_UNSPECIFIED
,UNASSIGNED
,RUNNING
,PAUSED
,FAILED
,RESTARTING
, andSTOPPED
. - task
Restart ConnectorPolicy Task Restart Policy - A policy that specifies how to restart the failed connectors/tasks in a Cluster resource. If not set, the failed connectors/tasks won't be restarted. Structure is documented below.
- configs Mapping[str, str]
- Connector config as keys/values. The keys of the map are connector property names, for example:
connector.class
,tasks.max
,key.converter
. - connect_
cluster str - The connect cluster name.
- connector_
id str - The ID to use for the connector, which will become the final component of the connector's name. This value is structured like:
my-connector-id
. - location str
- ID of the location of the Kafka Connect resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
- name str
- The name of the connector. The
connector
segment is used when connecting directly to the connect cluster. Structured like:projects/PROJECT_ID/locations/LOCATION/connectClusters/CONNECT_CLUSTER/connectors/CONNECTOR_ID
. - project str
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- state str
- The current state of the connect. Possible values:
STATE_UNSPECIFIED
,UNASSIGNED
,RUNNING
,PAUSED
,FAILED
,RESTARTING
, andSTOPPED
. - task_
restart_ Connectorpolicy Task Restart Policy Args - A policy that specifies how to restart the failed connectors/tasks in a Cluster resource. If not set, the failed connectors/tasks won't be restarted. Structure is documented below.
- configs Map<String>
- Connector config as keys/values. The keys of the map are connector property names, for example:
connector.class
,tasks.max
,key.converter
. - connect
Cluster String - The connect cluster name.
- connector
Id String - The ID to use for the connector, which will become the final component of the connector's name. This value is structured like:
my-connector-id
. - location String
- ID of the location of the Kafka Connect resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
- name String
- The name of the connector. The
connector
segment is used when connecting directly to the connect cluster. Structured like:projects/PROJECT_ID/locations/LOCATION/connectClusters/CONNECT_CLUSTER/connectors/CONNECTOR_ID
. - project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- state String
- The current state of the connect. Possible values:
STATE_UNSPECIFIED
,UNASSIGNED
,RUNNING
,PAUSED
,FAILED
,RESTARTING
, andSTOPPED
. - task
Restart Property MapPolicy - A policy that specifies how to restart the failed connectors/tasks in a Cluster resource. If not set, the failed connectors/tasks won't be restarted. Structure is documented below.
Supporting Types
ConnectorTaskRestartPolicy, ConnectorTaskRestartPolicyArgs
- Maximum
Backoff string - The maximum amount of time to wait before retrying a failed task. This sets an upper bound for the backoff delay. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".
- Minimum
Backoff string - The minimum amount of time to wait before retrying a failed task. This sets a lower bound for the backoff delay. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".
- Maximum
Backoff string - The maximum amount of time to wait before retrying a failed task. This sets an upper bound for the backoff delay. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".
- Minimum
Backoff string - The minimum amount of time to wait before retrying a failed task. This sets a lower bound for the backoff delay. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".
- maximum
Backoff String - The maximum amount of time to wait before retrying a failed task. This sets an upper bound for the backoff delay. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".
- minimum
Backoff String - The minimum amount of time to wait before retrying a failed task. This sets a lower bound for the backoff delay. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".
- maximum
Backoff string - The maximum amount of time to wait before retrying a failed task. This sets an upper bound for the backoff delay. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".
- minimum
Backoff string - The minimum amount of time to wait before retrying a failed task. This sets a lower bound for the backoff delay. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".
- maximum_
backoff str - The maximum amount of time to wait before retrying a failed task. This sets an upper bound for the backoff delay. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".
- minimum_
backoff str - The minimum amount of time to wait before retrying a failed task. This sets a lower bound for the backoff delay. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".
- maximum
Backoff String - The maximum amount of time to wait before retrying a failed task. This sets an upper bound for the backoff delay. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".
- minimum
Backoff String - The minimum amount of time to wait before retrying a failed task. This sets a lower bound for the backoff delay. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".
Import
Connector can be imported using any of these accepted formats:
projects/{{project}}/locations/{{location}}/connectClusters/{{connect_cluster}}/connectors/{{connector_id}}
{{project}}/{{location}}/{{connect_cluster}}/{{connector_id}}
{{location}}/{{connect_cluster}}/{{connector_id}}
When using the pulumi import
command, Connector can be imported using one of the formats above. For example:
$ pulumi import gcp:managedkafka/connector:Connector default projects/{{project}}/locations/{{location}}/connectClusters/{{connect_cluster}}/connectors/{{connector_id}}
$ pulumi import gcp:managedkafka/connector:Connector default {{project}}/{{location}}/{{connect_cluster}}/{{connector_id}}
$ pulumi import gcp:managedkafka/connector:Connector default {{location}}/{{connect_cluster}}/{{connector_id}}
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Google Cloud (GCP) Classic pulumi/pulumi-gcp
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
google-beta
Terraform Provider.