The gcp:bigquery/appProfile:AppProfile resource, part of the Pulumi GCP provider, defines how Cloud Bigtable routes traffic from an application to clusters within an instance. This guide focuses on three capabilities: multi-cluster routing with automatic failover, single-cluster routing for consistency, and priority-based traffic isolation.
App profiles belong to Bigtable instances and reference cluster IDs that must already exist in the instance configuration. The examples are intentionally small. Combine them with your own instance and cluster definitions.
Route requests to any available cluster
Applications that prioritize availability over strict consistency route requests to the nearest available cluster, allowing automatic failover across regions.
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const instance = new gcp.bigtable.Instance("instance", {
name: "bt-instance",
clusters: [
{
clusterId: "cluster-1",
zone: "us-central1-a",
numNodes: 3,
storageType: "HDD",
},
{
clusterId: "cluster-2",
zone: "us-central1-b",
numNodes: 3,
storageType: "HDD",
},
{
clusterId: "cluster-3",
zone: "us-central1-c",
numNodes: 3,
storageType: "HDD",
},
],
deletionProtection: true,
});
const ap = new gcp.bigtable.AppProfile("ap", {
instance: instance.name,
appProfileId: "bt-profile",
multiClusterRoutingUseAny: true,
ignoreWarnings: true,
});
import pulumi
import pulumi_gcp as gcp
instance = gcp.bigtable.Instance("instance",
name="bt-instance",
clusters=[
{
"cluster_id": "cluster-1",
"zone": "us-central1-a",
"num_nodes": 3,
"storage_type": "HDD",
},
{
"cluster_id": "cluster-2",
"zone": "us-central1-b",
"num_nodes": 3,
"storage_type": "HDD",
},
{
"cluster_id": "cluster-3",
"zone": "us-central1-c",
"num_nodes": 3,
"storage_type": "HDD",
},
],
deletion_protection=True)
ap = gcp.bigtable.AppProfile("ap",
instance=instance.name,
app_profile_id="bt-profile",
multi_cluster_routing_use_any=True,
ignore_warnings=True)
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/bigtable"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
instance, err := bigtable.NewInstance(ctx, "instance", &bigtable.InstanceArgs{
Name: pulumi.String("bt-instance"),
Clusters: bigtable.InstanceClusterArray{
&bigtable.InstanceClusterArgs{
ClusterId: pulumi.String("cluster-1"),
Zone: pulumi.String("us-central1-a"),
NumNodes: pulumi.Int(3),
StorageType: pulumi.String("HDD"),
},
&bigtable.InstanceClusterArgs{
ClusterId: pulumi.String("cluster-2"),
Zone: pulumi.String("us-central1-b"),
NumNodes: pulumi.Int(3),
StorageType: pulumi.String("HDD"),
},
&bigtable.InstanceClusterArgs{
ClusterId: pulumi.String("cluster-3"),
Zone: pulumi.String("us-central1-c"),
NumNodes: pulumi.Int(3),
StorageType: pulumi.String("HDD"),
},
},
DeletionProtection: pulumi.Bool(true),
})
if err != nil {
return err
}
_, err = bigtable.NewAppProfile(ctx, "ap", &bigtable.AppProfileArgs{
Instance: instance.Name,
AppProfileId: pulumi.String("bt-profile"),
MultiClusterRoutingUseAny: pulumi.Bool(true),
IgnoreWarnings: pulumi.Bool(true),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var instance = new Gcp.BigTable.Instance("instance", new()
{
Name = "bt-instance",
Clusters = new[]
{
new Gcp.BigTable.Inputs.InstanceClusterArgs
{
ClusterId = "cluster-1",
Zone = "us-central1-a",
NumNodes = 3,
StorageType = "HDD",
},
new Gcp.BigTable.Inputs.InstanceClusterArgs
{
ClusterId = "cluster-2",
Zone = "us-central1-b",
NumNodes = 3,
StorageType = "HDD",
},
new Gcp.BigTable.Inputs.InstanceClusterArgs
{
ClusterId = "cluster-3",
Zone = "us-central1-c",
NumNodes = 3,
StorageType = "HDD",
},
},
DeletionProtection = true,
});
var ap = new Gcp.BigTable.AppProfile("ap", new()
{
Instance = instance.Name,
AppProfileId = "bt-profile",
MultiClusterRoutingUseAny = true,
IgnoreWarnings = true,
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigtable.Instance;
import com.pulumi.gcp.bigtable.InstanceArgs;
import com.pulumi.gcp.bigtable.inputs.InstanceClusterArgs;
import com.pulumi.gcp.bigtable.AppProfile;
import com.pulumi.gcp.bigtable.AppProfileArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var instance = new Instance("instance", InstanceArgs.builder()
.name("bt-instance")
.clusters(
InstanceClusterArgs.builder()
.clusterId("cluster-1")
.zone("us-central1-a")
.numNodes(3)
.storageType("HDD")
.build(),
InstanceClusterArgs.builder()
.clusterId("cluster-2")
.zone("us-central1-b")
.numNodes(3)
.storageType("HDD")
.build(),
InstanceClusterArgs.builder()
.clusterId("cluster-3")
.zone("us-central1-c")
.numNodes(3)
.storageType("HDD")
.build())
.deletionProtection(true)
.build());
var ap = new AppProfile("ap", AppProfileArgs.builder()
.instance(instance.name())
.appProfileId("bt-profile")
.multiClusterRoutingUseAny(true)
.ignoreWarnings(true)
.build());
}
}
resources:
instance:
type: gcp:bigtable:Instance
properties:
name: bt-instance
clusters:
- clusterId: cluster-1
zone: us-central1-a
numNodes: 3
storageType: HDD
- clusterId: cluster-2
zone: us-central1-b
numNodes: 3
storageType: HDD
- clusterId: cluster-3
zone: us-central1-c
numNodes: 3
storageType: HDD
deletionProtection: true
ap:
type: gcp:bigtable:AppProfile
properties:
instance: ${instance.name}
appProfileId: bt-profile
multiClusterRoutingUseAny: true
ignoreWarnings: true
When multiClusterRoutingUseAny is true, Bigtable routes read and write requests to the nearest cluster and fails over automatically during transient errors or delays. This sacrifices read-your-writes consistency for improved availability. The instance property references the Bigtable instance, and appProfileId provides a unique identifier for this routing configuration.
Pin requests to a specific cluster
Workloads that require strong consistency or transactional writes route all traffic to a single designated cluster.
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const instance = new gcp.bigtable.Instance("instance", {
name: "bt-instance",
clusters: [{
clusterId: "cluster-1",
zone: "us-central1-b",
numNodes: 3,
storageType: "HDD",
}],
deletionProtection: true,
});
const ap = new gcp.bigtable.AppProfile("ap", {
instance: instance.name,
appProfileId: "bt-profile",
singleClusterRouting: {
clusterId: "cluster-1",
allowTransactionalWrites: true,
},
ignoreWarnings: true,
});
import pulumi
import pulumi_gcp as gcp
instance = gcp.bigtable.Instance("instance",
name="bt-instance",
clusters=[{
"cluster_id": "cluster-1",
"zone": "us-central1-b",
"num_nodes": 3,
"storage_type": "HDD",
}],
deletion_protection=True)
ap = gcp.bigtable.AppProfile("ap",
instance=instance.name,
app_profile_id="bt-profile",
single_cluster_routing={
"cluster_id": "cluster-1",
"allow_transactional_writes": True,
},
ignore_warnings=True)
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/bigtable"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
instance, err := bigtable.NewInstance(ctx, "instance", &bigtable.InstanceArgs{
Name: pulumi.String("bt-instance"),
Clusters: bigtable.InstanceClusterArray{
&bigtable.InstanceClusterArgs{
ClusterId: pulumi.String("cluster-1"),
Zone: pulumi.String("us-central1-b"),
NumNodes: pulumi.Int(3),
StorageType: pulumi.String("HDD"),
},
},
DeletionProtection: pulumi.Bool(true),
})
if err != nil {
return err
}
_, err = bigtable.NewAppProfile(ctx, "ap", &bigtable.AppProfileArgs{
Instance: instance.Name,
AppProfileId: pulumi.String("bt-profile"),
SingleClusterRouting: &bigtable.AppProfileSingleClusterRoutingArgs{
ClusterId: pulumi.String("cluster-1"),
AllowTransactionalWrites: pulumi.Bool(true),
},
IgnoreWarnings: pulumi.Bool(true),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var instance = new Gcp.BigTable.Instance("instance", new()
{
Name = "bt-instance",
Clusters = new[]
{
new Gcp.BigTable.Inputs.InstanceClusterArgs
{
ClusterId = "cluster-1",
Zone = "us-central1-b",
NumNodes = 3,
StorageType = "HDD",
},
},
DeletionProtection = true,
});
var ap = new Gcp.BigTable.AppProfile("ap", new()
{
Instance = instance.Name,
AppProfileId = "bt-profile",
SingleClusterRouting = new Gcp.BigTable.Inputs.AppProfileSingleClusterRoutingArgs
{
ClusterId = "cluster-1",
AllowTransactionalWrites = true,
},
IgnoreWarnings = true,
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigtable.Instance;
import com.pulumi.gcp.bigtable.InstanceArgs;
import com.pulumi.gcp.bigtable.inputs.InstanceClusterArgs;
import com.pulumi.gcp.bigtable.AppProfile;
import com.pulumi.gcp.bigtable.AppProfileArgs;
import com.pulumi.gcp.bigtable.inputs.AppProfileSingleClusterRoutingArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var instance = new Instance("instance", InstanceArgs.builder()
.name("bt-instance")
.clusters(InstanceClusterArgs.builder()
.clusterId("cluster-1")
.zone("us-central1-b")
.numNodes(3)
.storageType("HDD")
.build())
.deletionProtection(true)
.build());
var ap = new AppProfile("ap", AppProfileArgs.builder()
.instance(instance.name())
.appProfileId("bt-profile")
.singleClusterRouting(AppProfileSingleClusterRoutingArgs.builder()
.clusterId("cluster-1")
.allowTransactionalWrites(true)
.build())
.ignoreWarnings(true)
.build());
}
}
resources:
instance:
type: gcp:bigtable:Instance
properties:
name: bt-instance
clusters:
- clusterId: cluster-1
zone: us-central1-b
numNodes: 3
storageType: HDD
deletionProtection: true
ap:
type: gcp:bigtable:AppProfile
properties:
instance: ${instance.name}
appProfileId: bt-profile
singleClusterRouting:
clusterId: cluster-1
allowTransactionalWrites: true
ignoreWarnings: true
The singleClusterRouting property pins all requests to the specified clusterId. Setting allowTransactionalWrites to true enables strong consistency guarantees for write operations. This configuration ensures predictable behavior but sacrifices automatic failover.
Route to a subset of clusters
Some deployments need multi-cluster routing but want to limit traffic to specific clusters, such as keeping data within certain regions for compliance.
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const instance = new gcp.bigtable.Instance("instance", {
name: "bt-instance",
clusters: [
{
clusterId: "cluster-1",
zone: "us-central1-a",
numNodes: 3,
storageType: "HDD",
},
{
clusterId: "cluster-2",
zone: "us-central1-b",
numNodes: 3,
storageType: "HDD",
},
{
clusterId: "cluster-3",
zone: "us-central1-c",
numNodes: 3,
storageType: "HDD",
},
],
deletionProtection: true,
});
const ap = new gcp.bigtable.AppProfile("ap", {
instance: instance.name,
appProfileId: "bt-profile",
multiClusterRoutingUseAny: true,
multiClusterRoutingClusterIds: [
"cluster-1",
"cluster-2",
],
ignoreWarnings: true,
});
import pulumi
import pulumi_gcp as gcp
instance = gcp.bigtable.Instance("instance",
name="bt-instance",
clusters=[
{
"cluster_id": "cluster-1",
"zone": "us-central1-a",
"num_nodes": 3,
"storage_type": "HDD",
},
{
"cluster_id": "cluster-2",
"zone": "us-central1-b",
"num_nodes": 3,
"storage_type": "HDD",
},
{
"cluster_id": "cluster-3",
"zone": "us-central1-c",
"num_nodes": 3,
"storage_type": "HDD",
},
],
deletion_protection=True)
ap = gcp.bigtable.AppProfile("ap",
instance=instance.name,
app_profile_id="bt-profile",
multi_cluster_routing_use_any=True,
multi_cluster_routing_cluster_ids=[
"cluster-1",
"cluster-2",
],
ignore_warnings=True)
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/bigtable"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
instance, err := bigtable.NewInstance(ctx, "instance", &bigtable.InstanceArgs{
Name: pulumi.String("bt-instance"),
Clusters: bigtable.InstanceClusterArray{
&bigtable.InstanceClusterArgs{
ClusterId: pulumi.String("cluster-1"),
Zone: pulumi.String("us-central1-a"),
NumNodes: pulumi.Int(3),
StorageType: pulumi.String("HDD"),
},
&bigtable.InstanceClusterArgs{
ClusterId: pulumi.String("cluster-2"),
Zone: pulumi.String("us-central1-b"),
NumNodes: pulumi.Int(3),
StorageType: pulumi.String("HDD"),
},
&bigtable.InstanceClusterArgs{
ClusterId: pulumi.String("cluster-3"),
Zone: pulumi.String("us-central1-c"),
NumNodes: pulumi.Int(3),
StorageType: pulumi.String("HDD"),
},
},
DeletionProtection: pulumi.Bool(true),
})
if err != nil {
return err
}
_, err = bigtable.NewAppProfile(ctx, "ap", &bigtable.AppProfileArgs{
Instance: instance.Name,
AppProfileId: pulumi.String("bt-profile"),
MultiClusterRoutingUseAny: pulumi.Bool(true),
MultiClusterRoutingClusterIds: pulumi.StringArray{
pulumi.String("cluster-1"),
pulumi.String("cluster-2"),
},
IgnoreWarnings: pulumi.Bool(true),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var instance = new Gcp.BigTable.Instance("instance", new()
{
Name = "bt-instance",
Clusters = new[]
{
new Gcp.BigTable.Inputs.InstanceClusterArgs
{
ClusterId = "cluster-1",
Zone = "us-central1-a",
NumNodes = 3,
StorageType = "HDD",
},
new Gcp.BigTable.Inputs.InstanceClusterArgs
{
ClusterId = "cluster-2",
Zone = "us-central1-b",
NumNodes = 3,
StorageType = "HDD",
},
new Gcp.BigTable.Inputs.InstanceClusterArgs
{
ClusterId = "cluster-3",
Zone = "us-central1-c",
NumNodes = 3,
StorageType = "HDD",
},
},
DeletionProtection = true,
});
var ap = new Gcp.BigTable.AppProfile("ap", new()
{
Instance = instance.Name,
AppProfileId = "bt-profile",
MultiClusterRoutingUseAny = true,
MultiClusterRoutingClusterIds = new[]
{
"cluster-1",
"cluster-2",
},
IgnoreWarnings = true,
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigtable.Instance;
import com.pulumi.gcp.bigtable.InstanceArgs;
import com.pulumi.gcp.bigtable.inputs.InstanceClusterArgs;
import com.pulumi.gcp.bigtable.AppProfile;
import com.pulumi.gcp.bigtable.AppProfileArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var instance = new Instance("instance", InstanceArgs.builder()
.name("bt-instance")
.clusters(
InstanceClusterArgs.builder()
.clusterId("cluster-1")
.zone("us-central1-a")
.numNodes(3)
.storageType("HDD")
.build(),
InstanceClusterArgs.builder()
.clusterId("cluster-2")
.zone("us-central1-b")
.numNodes(3)
.storageType("HDD")
.build(),
InstanceClusterArgs.builder()
.clusterId("cluster-3")
.zone("us-central1-c")
.numNodes(3)
.storageType("HDD")
.build())
.deletionProtection(true)
.build());
var ap = new AppProfile("ap", AppProfileArgs.builder()
.instance(instance.name())
.appProfileId("bt-profile")
.multiClusterRoutingUseAny(true)
.multiClusterRoutingClusterIds(
"cluster-1",
"cluster-2")
.ignoreWarnings(true)
.build());
}
}
resources:
instance:
type: gcp:bigtable:Instance
properties:
name: bt-instance
clusters:
- clusterId: cluster-1
zone: us-central1-a
numNodes: 3
storageType: HDD
- clusterId: cluster-2
zone: us-central1-b
numNodes: 3
storageType: HDD
- clusterId: cluster-3
zone: us-central1-c
numNodes: 3
storageType: HDD
deletionProtection: true
ap:
type: gcp:bigtable:AppProfile
properties:
instance: ${instance.name}
appProfileId: bt-profile
multiClusterRoutingUseAny: true
multiClusterRoutingClusterIds:
- cluster-1
- cluster-2
ignoreWarnings: true
This extends multi-cluster routing by adding multiClusterRoutingClusterIds, which restricts routing to the specified clusters. Bigtable still routes to the nearest available cluster from the subset, but won’t fail over to clusters outside the list. This balances availability with data locality requirements.
Set traffic priority for resource isolation
Applications with different SLA requirements can use priority settings to ensure high-priority workloads get resources during contention.
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const instance = new gcp.bigtable.Instance("instance", {
name: "bt-instance",
clusters: [{
clusterId: "cluster-1",
zone: "us-central1-b",
numNodes: 3,
storageType: "HDD",
}],
deletionProtection: true,
});
const ap = new gcp.bigtable.AppProfile("ap", {
instance: instance.name,
appProfileId: "bt-profile",
singleClusterRouting: {
clusterId: "cluster-1",
allowTransactionalWrites: true,
},
standardIsolation: {
priority: "PRIORITY_LOW",
},
ignoreWarnings: true,
});
import pulumi
import pulumi_gcp as gcp
instance = gcp.bigtable.Instance("instance",
name="bt-instance",
clusters=[{
"cluster_id": "cluster-1",
"zone": "us-central1-b",
"num_nodes": 3,
"storage_type": "HDD",
}],
deletion_protection=True)
ap = gcp.bigtable.AppProfile("ap",
instance=instance.name,
app_profile_id="bt-profile",
single_cluster_routing={
"cluster_id": "cluster-1",
"allow_transactional_writes": True,
},
standard_isolation={
"priority": "PRIORITY_LOW",
},
ignore_warnings=True)
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/bigtable"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
instance, err := bigtable.NewInstance(ctx, "instance", &bigtable.InstanceArgs{
Name: pulumi.String("bt-instance"),
Clusters: bigtable.InstanceClusterArray{
&bigtable.InstanceClusterArgs{
ClusterId: pulumi.String("cluster-1"),
Zone: pulumi.String("us-central1-b"),
NumNodes: pulumi.Int(3),
StorageType: pulumi.String("HDD"),
},
},
DeletionProtection: pulumi.Bool(true),
})
if err != nil {
return err
}
_, err = bigtable.NewAppProfile(ctx, "ap", &bigtable.AppProfileArgs{
Instance: instance.Name,
AppProfileId: pulumi.String("bt-profile"),
SingleClusterRouting: &bigtable.AppProfileSingleClusterRoutingArgs{
ClusterId: pulumi.String("cluster-1"),
AllowTransactionalWrites: pulumi.Bool(true),
},
StandardIsolation: &bigtable.AppProfileStandardIsolationArgs{
Priority: pulumi.String("PRIORITY_LOW"),
},
IgnoreWarnings: pulumi.Bool(true),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var instance = new Gcp.BigTable.Instance("instance", new()
{
Name = "bt-instance",
Clusters = new[]
{
new Gcp.BigTable.Inputs.InstanceClusterArgs
{
ClusterId = "cluster-1",
Zone = "us-central1-b",
NumNodes = 3,
StorageType = "HDD",
},
},
DeletionProtection = true,
});
var ap = new Gcp.BigTable.AppProfile("ap", new()
{
Instance = instance.Name,
AppProfileId = "bt-profile",
SingleClusterRouting = new Gcp.BigTable.Inputs.AppProfileSingleClusterRoutingArgs
{
ClusterId = "cluster-1",
AllowTransactionalWrites = true,
},
StandardIsolation = new Gcp.BigTable.Inputs.AppProfileStandardIsolationArgs
{
Priority = "PRIORITY_LOW",
},
IgnoreWarnings = true,
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigtable.Instance;
import com.pulumi.gcp.bigtable.InstanceArgs;
import com.pulumi.gcp.bigtable.inputs.InstanceClusterArgs;
import com.pulumi.gcp.bigtable.AppProfile;
import com.pulumi.gcp.bigtable.AppProfileArgs;
import com.pulumi.gcp.bigtable.inputs.AppProfileSingleClusterRoutingArgs;
import com.pulumi.gcp.bigtable.inputs.AppProfileStandardIsolationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var instance = new Instance("instance", InstanceArgs.builder()
.name("bt-instance")
.clusters(InstanceClusterArgs.builder()
.clusterId("cluster-1")
.zone("us-central1-b")
.numNodes(3)
.storageType("HDD")
.build())
.deletionProtection(true)
.build());
var ap = new AppProfile("ap", AppProfileArgs.builder()
.instance(instance.name())
.appProfileId("bt-profile")
.singleClusterRouting(AppProfileSingleClusterRoutingArgs.builder()
.clusterId("cluster-1")
.allowTransactionalWrites(true)
.build())
.standardIsolation(AppProfileStandardIsolationArgs.builder()
.priority("PRIORITY_LOW")
.build())
.ignoreWarnings(true)
.build());
}
}
resources:
instance:
type: gcp:bigtable:Instance
properties:
name: bt-instance
clusters:
- clusterId: cluster-1
zone: us-central1-b
numNodes: 3
storageType: HDD
deletionProtection: true
ap:
type: gcp:bigtable:AppProfile
properties:
instance: ${instance.name}
appProfileId: bt-profile
singleClusterRouting:
clusterId: cluster-1
allowTransactionalWrites: true
standardIsolation:
priority: PRIORITY_LOW
ignoreWarnings: true
The standardIsolation property with priority set to PRIORITY_LOW marks this app profile’s traffic as lower priority. During resource contention, Bigtable allocates resources to higher-priority profiles first. This isolates batch jobs from latency-sensitive traffic while sharing the same instance.
Beyond these examples
These snippets focus on specific app profile features: multi-cluster and single-cluster routing, cluster selection and failover behavior, and priority-based traffic isolation. They’re intentionally minimal rather than full Bigtable deployments.
The examples reference pre-existing infrastructure such as Bigtable instances with configured clusters. They focus on configuring routing policies rather than provisioning instances or clusters.
To keep things focused, common app profile patterns are omitted, including:
- Row affinity for sticky routing (rowAffinity)
- Data Boost read-only isolation (dataBoostIsolationReadOnly)
- Custom descriptions and metadata (description)
These omissions are intentional: the goal is to illustrate how each routing policy is wired, not provide drop-in Bigtable modules. See the Bigtable AppProfile resource reference for all available configuration options.
Let's configure GCP Bigtable App Profiles
Get started with Pulumi Cloud, then follow our quick setup guide to deploy this infrastructure.
Try Pulumi Cloud for FREEFrequently Asked Questions
Routing Configuration
singleClusterRouting) directs all traffic to one specific cluster. Multi-cluster routing (multiClusterRoutingUseAny) routes requests to the nearest available cluster and fails over during errors or delays, sacrificing read-your-writes consistency for improved availability.multiClusterRoutingClusterIds to specify a subset of clusters. Clusters are tried in order of distance regardless of the order you specify. If left empty, all clusters are eligible.ignoreWarnings property bypasses safety checks when creating, updating, or deleting app profiles. It’s commonly used to avoid blocking operations that might trigger warnings.Consistency & Performance
rowAffinity to use sticky routing based on row keys. Each row key gets assigned to a specific cluster, improving read-your-writes consistency for most requests without sacrificing availability. Note that rowAffinity must be used with multi-cluster routing and consistency is not guaranteed.multiClusterRoutingUseAny sacrifices read-your-writes consistency to improve availability. Requests route to the nearest cluster and fail over during transient errors, which can result in reading stale data.Configuration & Naming
appProfileId, instance, and project properties are immutable and cannot be changed after creation.[_a-zA-Z0-9][-_.a-zA-Z0-9]*, starting with an underscore, letter, or number, followed by hyphens, underscores, letters, or numbers.dataBoostIsolationReadOnly property specifies that Data Boost is intended for read-only usage only.