The gcp:gkehub/feature:Feature resource, part of the Pulumi GCP provider, enables and configures fleet-wide capabilities for GKE clusters registered in a fleet. This guide focuses on three capabilities: multi-cluster networking, fleet observability, and fleet-wide policy enforcement.
Features operate on GKE clusters that must already be registered as fleet memberships. The examples are intentionally small. Combine them with your own cluster registrations and IAM configuration.
Enable multi-cluster ingress with a config cluster
Teams running workloads across multiple GKE clusters often need a single ingress controller that routes traffic to services in any cluster.
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const cluster = new gcp.container.Cluster("cluster", {
name: "my-cluster",
location: "us-central1-a",
initialNodeCount: 1,
});
const membership = new gcp.gkehub.Membership("membership", {
membershipId: "my-membership",
endpoint: {
gkeCluster: {
resourceLink: pulumi.interpolate`//container.googleapis.com/${cluster.id}`,
},
},
});
const feature = new gcp.gkehub.Feature("feature", {
name: "multiclusteringress",
location: "global",
spec: {
multiclusteringress: {
configMembership: membership.id,
},
},
});
import pulumi
import pulumi_gcp as gcp
cluster = gcp.container.Cluster("cluster",
name="my-cluster",
location="us-central1-a",
initial_node_count=1)
membership = gcp.gkehub.Membership("membership",
membership_id="my-membership",
endpoint={
"gke_cluster": {
"resource_link": cluster.id.apply(lambda id: f"//container.googleapis.com/{id}"),
},
})
feature = gcp.gkehub.Feature("feature",
name="multiclusteringress",
location="global",
spec={
"multiclusteringress": {
"config_membership": membership.id,
},
})
package main
import (
"fmt"
"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/container"
"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/gkehub"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
cluster, err := container.NewCluster(ctx, "cluster", &container.ClusterArgs{
Name: pulumi.String("my-cluster"),
Location: pulumi.String("us-central1-a"),
InitialNodeCount: pulumi.Int(1),
})
if err != nil {
return err
}
membership, err := gkehub.NewMembership(ctx, "membership", &gkehub.MembershipArgs{
MembershipId: pulumi.String("my-membership"),
Endpoint: &gkehub.MembershipEndpointArgs{
GkeCluster: &gkehub.MembershipEndpointGkeClusterArgs{
ResourceLink: cluster.ID().ApplyT(func(id string) (string, error) {
return fmt.Sprintf("//container.googleapis.com/%v", id), nil
}).(pulumi.StringOutput),
},
},
})
if err != nil {
return err
}
_, err = gkehub.NewFeature(ctx, "feature", &gkehub.FeatureArgs{
Name: pulumi.String("multiclusteringress"),
Location: pulumi.String("global"),
Spec: &gkehub.FeatureSpecArgs{
Multiclusteringress: &gkehub.FeatureSpecMulticlusteringressArgs{
ConfigMembership: membership.ID(),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var cluster = new Gcp.Container.Cluster("cluster", new()
{
Name = "my-cluster",
Location = "us-central1-a",
InitialNodeCount = 1,
});
var membership = new Gcp.GkeHub.Membership("membership", new()
{
MembershipId = "my-membership",
Endpoint = new Gcp.GkeHub.Inputs.MembershipEndpointArgs
{
GkeCluster = new Gcp.GkeHub.Inputs.MembershipEndpointGkeClusterArgs
{
ResourceLink = cluster.Id.Apply(id => $"//container.googleapis.com/{id}"),
},
},
});
var feature = new Gcp.GkeHub.Feature("feature", new()
{
Name = "multiclusteringress",
Location = "global",
Spec = new Gcp.GkeHub.Inputs.FeatureSpecArgs
{
Multiclusteringress = new Gcp.GkeHub.Inputs.FeatureSpecMulticlusteringressArgs
{
ConfigMembership = membership.Id,
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.container.Cluster;
import com.pulumi.gcp.container.ClusterArgs;
import com.pulumi.gcp.gkehub.Membership;
import com.pulumi.gcp.gkehub.MembershipArgs;
import com.pulumi.gcp.gkehub.inputs.MembershipEndpointArgs;
import com.pulumi.gcp.gkehub.inputs.MembershipEndpointGkeClusterArgs;
import com.pulumi.gcp.gkehub.Feature;
import com.pulumi.gcp.gkehub.FeatureArgs;
import com.pulumi.gcp.gkehub.inputs.FeatureSpecArgs;
import com.pulumi.gcp.gkehub.inputs.FeatureSpecMulticlusteringressArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var cluster = new Cluster("cluster", ClusterArgs.builder()
.name("my-cluster")
.location("us-central1-a")
.initialNodeCount(1)
.build());
var membership = new Membership("membership", MembershipArgs.builder()
.membershipId("my-membership")
.endpoint(MembershipEndpointArgs.builder()
.gkeCluster(MembershipEndpointGkeClusterArgs.builder()
.resourceLink(cluster.id().applyValue(_id -> String.format("//container.googleapis.com/%s", _id)))
.build())
.build())
.build());
var feature = new Feature("feature", FeatureArgs.builder()
.name("multiclusteringress")
.location("global")
.spec(FeatureSpecArgs.builder()
.multiclusteringress(FeatureSpecMulticlusteringressArgs.builder()
.configMembership(membership.id())
.build())
.build())
.build());
}
}
resources:
cluster:
type: gcp:container:Cluster
properties:
name: my-cluster
location: us-central1-a
initialNodeCount: 1
membership:
type: gcp:gkehub:Membership
properties:
membershipId: my-membership
endpoint:
gkeCluster:
resourceLink: //container.googleapis.com/${cluster.id}
feature:
type: gcp:gkehub:Feature
properties:
name: multiclusteringress
location: global
spec:
multiclusteringress:
configMembership: ${membership.id}
The multiclusteringress spec designates one cluster as the configuration source for ingress rules. The configMembership property references a fleet membership; that cluster’s ingress controller manages routing for the entire fleet. Traffic flows to services across all registered clusters based on ingress rules defined in the config cluster.
Enable cross-cluster service discovery
Applications that span multiple clusters need DNS-based service discovery to locate endpoints across the fleet without hardcoding addresses.
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const feature = new gcp.gkehub.Feature("feature", {
name: "multiclusterservicediscovery",
location: "global",
labels: {
foo: "bar",
},
});
import pulumi
import pulumi_gcp as gcp
feature = gcp.gkehub.Feature("feature",
name="multiclusterservicediscovery",
location="global",
labels={
"foo": "bar",
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/gkehub"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := gkehub.NewFeature(ctx, "feature", &gkehub.FeatureArgs{
Name: pulumi.String("multiclusterservicediscovery"),
Location: pulumi.String("global"),
Labels: pulumi.StringMap{
"foo": pulumi.String("bar"),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var feature = new Gcp.GkeHub.Feature("feature", new()
{
Name = "multiclusterservicediscovery",
Location = "global",
Labels =
{
{ "foo", "bar" },
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.gkehub.Feature;
import com.pulumi.gcp.gkehub.FeatureArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var feature = new Feature("feature", FeatureArgs.builder()
.name("multiclusterservicediscovery")
.location("global")
.labels(Map.of("foo", "bar"))
.build());
}
}
resources:
feature:
type: gcp:gkehub:Feature
properties:
name: multiclusterservicediscovery
location: global
labels:
foo: bar
The multiclusterservicediscovery feature name enables DNS resolution for services across cluster boundaries. Services can reference endpoints in other clusters using standard Kubernetes DNS names. The labels property adds metadata for organization and filtering.
Collect cluster logs with copy mode
Fleet observability aggregates logs from all clusters into a central location for analysis. Copy mode duplicates logs to both local and fleet-level storage.
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const feature = new gcp.gkehub.Feature("feature", {
name: "fleetobservability",
location: "global",
spec: {
fleetobservability: {
loggingConfig: {
defaultConfig: {
mode: "COPY",
},
},
},
},
});
import pulumi
import pulumi_gcp as gcp
feature = gcp.gkehub.Feature("feature",
name="fleetobservability",
location="global",
spec={
"fleetobservability": {
"logging_config": {
"default_config": {
"mode": "COPY",
},
},
},
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/gkehub"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := gkehub.NewFeature(ctx, "feature", &gkehub.FeatureArgs{
Name: pulumi.String("fleetobservability"),
Location: pulumi.String("global"),
Spec: &gkehub.FeatureSpecArgs{
Fleetobservability: &gkehub.FeatureSpecFleetobservabilityArgs{
LoggingConfig: &gkehub.FeatureSpecFleetobservabilityLoggingConfigArgs{
DefaultConfig: &gkehub.FeatureSpecFleetobservabilityLoggingConfigDefaultConfigArgs{
Mode: pulumi.String("COPY"),
},
},
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var feature = new Gcp.GkeHub.Feature("feature", new()
{
Name = "fleetobservability",
Location = "global",
Spec = new Gcp.GkeHub.Inputs.FeatureSpecArgs
{
Fleetobservability = new Gcp.GkeHub.Inputs.FeatureSpecFleetobservabilityArgs
{
LoggingConfig = new Gcp.GkeHub.Inputs.FeatureSpecFleetobservabilityLoggingConfigArgs
{
DefaultConfig = new Gcp.GkeHub.Inputs.FeatureSpecFleetobservabilityLoggingConfigDefaultConfigArgs
{
Mode = "COPY",
},
},
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.gkehub.Feature;
import com.pulumi.gcp.gkehub.FeatureArgs;
import com.pulumi.gcp.gkehub.inputs.FeatureSpecArgs;
import com.pulumi.gcp.gkehub.inputs.FeatureSpecFleetobservabilityArgs;
import com.pulumi.gcp.gkehub.inputs.FeatureSpecFleetobservabilityLoggingConfigArgs;
import com.pulumi.gcp.gkehub.inputs.FeatureSpecFleetobservabilityLoggingConfigDefaultConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var feature = new Feature("feature", FeatureArgs.builder()
.name("fleetobservability")
.location("global")
.spec(FeatureSpecArgs.builder()
.fleetobservability(FeatureSpecFleetobservabilityArgs.builder()
.loggingConfig(FeatureSpecFleetobservabilityLoggingConfigArgs.builder()
.defaultConfig(FeatureSpecFleetobservabilityLoggingConfigDefaultConfigArgs.builder()
.mode("COPY")
.build())
.build())
.build())
.build())
.build());
}
}
resources:
feature:
type: gcp:gkehub:Feature
properties:
name: fleetobservability
location: global
spec:
fleetobservability:
loggingConfig:
defaultConfig:
mode: COPY
The fleetobservability spec configures logging behavior. The defaultConfig applies to all clusters; mode set to “COPY” sends logs to both the cluster’s local storage and the fleet-level aggregation point. This preserves local troubleshooting while enabling fleet-wide analysis.
Configure logging for both default and scope levels
Organizations with multiple fleet scopes need separate logging configurations for default cluster logs and scope-specific logs.
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const feature = new gcp.gkehub.Feature("feature", {
name: "fleetobservability",
location: "global",
spec: {
fleetobservability: {
loggingConfig: {
defaultConfig: {
mode: "COPY",
},
fleetScopeLogsConfig: {
mode: "MOVE",
},
},
},
},
});
import pulumi
import pulumi_gcp as gcp
feature = gcp.gkehub.Feature("feature",
name="fleetobservability",
location="global",
spec={
"fleetobservability": {
"logging_config": {
"default_config": {
"mode": "COPY",
},
"fleet_scope_logs_config": {
"mode": "MOVE",
},
},
},
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/gkehub"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := gkehub.NewFeature(ctx, "feature", &gkehub.FeatureArgs{
Name: pulumi.String("fleetobservability"),
Location: pulumi.String("global"),
Spec: &gkehub.FeatureSpecArgs{
Fleetobservability: &gkehub.FeatureSpecFleetobservabilityArgs{
LoggingConfig: &gkehub.FeatureSpecFleetobservabilityLoggingConfigArgs{
DefaultConfig: &gkehub.FeatureSpecFleetobservabilityLoggingConfigDefaultConfigArgs{
Mode: pulumi.String("COPY"),
},
FleetScopeLogsConfig: &gkehub.FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigArgs{
Mode: pulumi.String("MOVE"),
},
},
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var feature = new Gcp.GkeHub.Feature("feature", new()
{
Name = "fleetobservability",
Location = "global",
Spec = new Gcp.GkeHub.Inputs.FeatureSpecArgs
{
Fleetobservability = new Gcp.GkeHub.Inputs.FeatureSpecFleetobservabilityArgs
{
LoggingConfig = new Gcp.GkeHub.Inputs.FeatureSpecFleetobservabilityLoggingConfigArgs
{
DefaultConfig = new Gcp.GkeHub.Inputs.FeatureSpecFleetobservabilityLoggingConfigDefaultConfigArgs
{
Mode = "COPY",
},
FleetScopeLogsConfig = new Gcp.GkeHub.Inputs.FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigArgs
{
Mode = "MOVE",
},
},
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.gkehub.Feature;
import com.pulumi.gcp.gkehub.FeatureArgs;
import com.pulumi.gcp.gkehub.inputs.FeatureSpecArgs;
import com.pulumi.gcp.gkehub.inputs.FeatureSpecFleetobservabilityArgs;
import com.pulumi.gcp.gkehub.inputs.FeatureSpecFleetobservabilityLoggingConfigArgs;
import com.pulumi.gcp.gkehub.inputs.FeatureSpecFleetobservabilityLoggingConfigDefaultConfigArgs;
import com.pulumi.gcp.gkehub.inputs.FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var feature = new Feature("feature", FeatureArgs.builder()
.name("fleetobservability")
.location("global")
.spec(FeatureSpecArgs.builder()
.fleetobservability(FeatureSpecFleetobservabilityArgs.builder()
.loggingConfig(FeatureSpecFleetobservabilityLoggingConfigArgs.builder()
.defaultConfig(FeatureSpecFleetobservabilityLoggingConfigDefaultConfigArgs.builder()
.mode("COPY")
.build())
.fleetScopeLogsConfig(FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigArgs.builder()
.mode("MOVE")
.build())
.build())
.build())
.build())
.build());
}
}
resources:
feature:
type: gcp:gkehub:Feature
properties:
name: fleetobservability
location: global
spec:
fleetobservability:
loggingConfig:
defaultConfig:
mode: COPY
fleetScopeLogsConfig:
mode: MOVE
The loggingConfig supports both defaultConfig and fleetScopeLogsConfig. Setting defaultConfig to “COPY” duplicates cluster logs, while fleetScopeLogsConfig with “MOVE” mode transfers scope-specific logs to fleet storage without local copies. This separates logging streams for different organizational boundaries.
Apply automatic mesh management to all clusters
Service mesh features can be enabled fleet-wide so that new clusters automatically join the mesh without per-cluster configuration.
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const feature = new gcp.gkehub.Feature("feature", {
name: "servicemesh",
location: "global",
fleetDefaultMemberConfig: {
mesh: {
management: "MANAGEMENT_AUTOMATIC",
},
},
});
import pulumi
import pulumi_gcp as gcp
feature = gcp.gkehub.Feature("feature",
name="servicemesh",
location="global",
fleet_default_member_config={
"mesh": {
"management": "MANAGEMENT_AUTOMATIC",
},
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/gkehub"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := gkehub.NewFeature(ctx, "feature", &gkehub.FeatureArgs{
Name: pulumi.String("servicemesh"),
Location: pulumi.String("global"),
FleetDefaultMemberConfig: &gkehub.FeatureFleetDefaultMemberConfigArgs{
Mesh: &gkehub.FeatureFleetDefaultMemberConfigMeshArgs{
Management: pulumi.String("MANAGEMENT_AUTOMATIC"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var feature = new Gcp.GkeHub.Feature("feature", new()
{
Name = "servicemesh",
Location = "global",
FleetDefaultMemberConfig = new Gcp.GkeHub.Inputs.FeatureFleetDefaultMemberConfigArgs
{
Mesh = new Gcp.GkeHub.Inputs.FeatureFleetDefaultMemberConfigMeshArgs
{
Management = "MANAGEMENT_AUTOMATIC",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.gkehub.Feature;
import com.pulumi.gcp.gkehub.FeatureArgs;
import com.pulumi.gcp.gkehub.inputs.FeatureFleetDefaultMemberConfigArgs;
import com.pulumi.gcp.gkehub.inputs.FeatureFleetDefaultMemberConfigMeshArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var feature = new Feature("feature", FeatureArgs.builder()
.name("servicemesh")
.location("global")
.fleetDefaultMemberConfig(FeatureFleetDefaultMemberConfigArgs.builder()
.mesh(FeatureFleetDefaultMemberConfigMeshArgs.builder()
.management("MANAGEMENT_AUTOMATIC")
.build())
.build())
.build());
}
}
resources:
feature:
type: gcp:gkehub:Feature
properties:
name: servicemesh
location: global
fleetDefaultMemberConfig:
mesh:
management: MANAGEMENT_AUTOMATIC
The fleetDefaultMemberConfig applies settings to all fleet members. The mesh property with management set to “MANAGEMENT_AUTOMATIC” enables automatic service mesh installation and updates across all registered clusters. New clusters inherit this configuration when they join the fleet.
Enforce policy bundles across the fleet
Policy Controller validates Kubernetes resources against predefined policy bundles to enforce security and compliance standards.
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const feature = new gcp.gkehub.Feature("feature", {
name: "policycontroller",
location: "global",
fleetDefaultMemberConfig: {
policycontroller: {
policyControllerHubConfig: {
installSpec: "INSTALL_SPEC_ENABLED",
exemptableNamespaces: ["foo"],
policyContent: {
bundles: [{
bundle: "policy-essentials-v2022",
exemptedNamespaces: [
"foo",
"bar",
],
}],
templateLibrary: {
installation: "ALL",
},
},
auditIntervalSeconds: 30,
referentialRulesEnabled: true,
},
},
},
});
import pulumi
import pulumi_gcp as gcp
feature = gcp.gkehub.Feature("feature",
name="policycontroller",
location="global",
fleet_default_member_config={
"policycontroller": {
"policy_controller_hub_config": {
"install_spec": "INSTALL_SPEC_ENABLED",
"exemptable_namespaces": ["foo"],
"policy_content": {
"bundles": [{
"bundle": "policy-essentials-v2022",
"exempted_namespaces": [
"foo",
"bar",
],
}],
"template_library": {
"installation": "ALL",
},
},
"audit_interval_seconds": 30,
"referential_rules_enabled": True,
},
},
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/gkehub"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := gkehub.NewFeature(ctx, "feature", &gkehub.FeatureArgs{
Name: pulumi.String("policycontroller"),
Location: pulumi.String("global"),
FleetDefaultMemberConfig: &gkehub.FeatureFleetDefaultMemberConfigArgs{
Policycontroller: &gkehub.FeatureFleetDefaultMemberConfigPolicycontrollerArgs{
PolicyControllerHubConfig: &gkehub.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigArgs{
InstallSpec: pulumi.String("INSTALL_SPEC_ENABLED"),
ExemptableNamespaces: pulumi.StringArray{
pulumi.String("foo"),
},
PolicyContent: &gkehub.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentArgs{
Bundles: gkehub.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentBundleArray{
&gkehub.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentBundleArgs{
Bundle: pulumi.String("policy-essentials-v2022"),
ExemptedNamespaces: pulumi.StringArray{
pulumi.String("foo"),
pulumi.String("bar"),
},
},
},
TemplateLibrary: &gkehub.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryArgs{
Installation: pulumi.String("ALL"),
},
},
AuditIntervalSeconds: pulumi.Int(30),
ReferentialRulesEnabled: pulumi.Bool(true),
},
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var feature = new Gcp.GkeHub.Feature("feature", new()
{
Name = "policycontroller",
Location = "global",
FleetDefaultMemberConfig = new Gcp.GkeHub.Inputs.FeatureFleetDefaultMemberConfigArgs
{
Policycontroller = new Gcp.GkeHub.Inputs.FeatureFleetDefaultMemberConfigPolicycontrollerArgs
{
PolicyControllerHubConfig = new Gcp.GkeHub.Inputs.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigArgs
{
InstallSpec = "INSTALL_SPEC_ENABLED",
ExemptableNamespaces = new[]
{
"foo",
},
PolicyContent = new Gcp.GkeHub.Inputs.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentArgs
{
Bundles = new[]
{
new Gcp.GkeHub.Inputs.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentBundleArgs
{
Bundle = "policy-essentials-v2022",
ExemptedNamespaces = new[]
{
"foo",
"bar",
},
},
},
TemplateLibrary = new Gcp.GkeHub.Inputs.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryArgs
{
Installation = "ALL",
},
},
AuditIntervalSeconds = 30,
ReferentialRulesEnabled = true,
},
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.gkehub.Feature;
import com.pulumi.gcp.gkehub.FeatureArgs;
import com.pulumi.gcp.gkehub.inputs.FeatureFleetDefaultMemberConfigArgs;
import com.pulumi.gcp.gkehub.inputs.FeatureFleetDefaultMemberConfigPolicycontrollerArgs;
import com.pulumi.gcp.gkehub.inputs.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigArgs;
import com.pulumi.gcp.gkehub.inputs.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentArgs;
import com.pulumi.gcp.gkehub.inputs.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var feature = new Feature("feature", FeatureArgs.builder()
.name("policycontroller")
.location("global")
.fleetDefaultMemberConfig(FeatureFleetDefaultMemberConfigArgs.builder()
.policycontroller(FeatureFleetDefaultMemberConfigPolicycontrollerArgs.builder()
.policyControllerHubConfig(FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigArgs.builder()
.installSpec("INSTALL_SPEC_ENABLED")
.exemptableNamespaces("foo")
.policyContent(FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentArgs.builder()
.bundles(FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentBundleArgs.builder()
.bundle("policy-essentials-v2022")
.exemptedNamespaces(
"foo",
"bar")
.build())
.templateLibrary(FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryArgs.builder()
.installation("ALL")
.build())
.build())
.auditIntervalSeconds(30)
.referentialRulesEnabled(true)
.build())
.build())
.build())
.build());
}
}
resources:
feature:
type: gcp:gkehub:Feature
properties:
name: policycontroller
location: global
fleetDefaultMemberConfig:
policycontroller:
policyControllerHubConfig:
installSpec: INSTALL_SPEC_ENABLED
exemptableNamespaces:
- foo
policyContent:
bundles:
- bundle: policy-essentials-v2022
exemptedNamespaces:
- foo
- bar
templateLibrary:
installation: ALL
auditIntervalSeconds: 30
referentialRulesEnabled: true
The policycontroller configuration defines which policy bundles to enforce. The bundles array specifies policy sets like “policy-essentials-v2022”; exemptedNamespaces excludes specific namespaces from bundle enforcement. The exemptableNamespaces property at the hub level defines which namespaces can be exempted. The installSpec controls whether the controller is enabled or suspended.
Configure policy controller with resource limits and monitoring
Production policy deployments often require custom resource allocation, pod scheduling rules, and monitoring integration.
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const feature = new gcp.gkehub.Feature("feature", {
name: "policycontroller",
location: "global",
fleetDefaultMemberConfig: {
policycontroller: {
policyControllerHubConfig: {
installSpec: "INSTALL_SPEC_SUSPENDED",
policyContent: {
bundles: [
{
bundle: "pci-dss-v3.2.1",
exemptedNamespaces: [
"baz",
"bar",
],
},
{
bundle: "nist-sp-800-190",
exemptedNamespaces: [],
},
],
templateLibrary: {
installation: "ALL",
},
},
constraintViolationLimit: 50,
referentialRulesEnabled: true,
logDeniesEnabled: true,
mutationEnabled: true,
deploymentConfigs: [
{
component: "admission",
replicaCount: 2,
podAffinity: "ANTI_AFFINITY",
},
{
component: "audit",
containerResources: {
limits: {
memory: "1Gi",
cpu: "1.5",
},
requests: {
memory: "500Mi",
cpu: "150m",
},
},
podTolerations: [{
key: "key1",
operator: "Equal",
value: "value1",
effect: "NoSchedule",
}],
},
],
monitoring: {
backends: ["PROMETHEUS"],
},
},
},
},
});
import pulumi
import pulumi_gcp as gcp
feature = gcp.gkehub.Feature("feature",
name="policycontroller",
location="global",
fleet_default_member_config={
"policycontroller": {
"policy_controller_hub_config": {
"install_spec": "INSTALL_SPEC_SUSPENDED",
"policy_content": {
"bundles": [
{
"bundle": "pci-dss-v3.2.1",
"exempted_namespaces": [
"baz",
"bar",
],
},
{
"bundle": "nist-sp-800-190",
"exempted_namespaces": [],
},
],
"template_library": {
"installation": "ALL",
},
},
"constraint_violation_limit": 50,
"referential_rules_enabled": True,
"log_denies_enabled": True,
"mutation_enabled": True,
"deployment_configs": [
{
"component": "admission",
"replica_count": 2,
"pod_affinity": "ANTI_AFFINITY",
},
{
"component": "audit",
"container_resources": {
"limits": {
"memory": "1Gi",
"cpu": "1.5",
},
"requests": {
"memory": "500Mi",
"cpu": "150m",
},
},
"pod_tolerations": [{
"key": "key1",
"operator": "Equal",
"value": "value1",
"effect": "NoSchedule",
}],
},
],
"monitoring": {
"backends": ["PROMETHEUS"],
},
},
},
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/gkehub"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := gkehub.NewFeature(ctx, "feature", &gkehub.FeatureArgs{
Name: pulumi.String("policycontroller"),
Location: pulumi.String("global"),
FleetDefaultMemberConfig: &gkehub.FeatureFleetDefaultMemberConfigArgs{
Policycontroller: &gkehub.FeatureFleetDefaultMemberConfigPolicycontrollerArgs{
PolicyControllerHubConfig: &gkehub.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigArgs{
InstallSpec: pulumi.String("INSTALL_SPEC_SUSPENDED"),
PolicyContent: &gkehub.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentArgs{
Bundles: gkehub.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentBundleArray{
&gkehub.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentBundleArgs{
Bundle: pulumi.String("pci-dss-v3.2.1"),
ExemptedNamespaces: pulumi.StringArray{
pulumi.String("baz"),
pulumi.String("bar"),
},
},
&gkehub.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentBundleArgs{
Bundle: pulumi.String("nist-sp-800-190"),
ExemptedNamespaces: pulumi.StringArray{},
},
},
TemplateLibrary: &gkehub.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryArgs{
Installation: pulumi.String("ALL"),
},
},
ConstraintViolationLimit: pulumi.Int(50),
ReferentialRulesEnabled: pulumi.Bool(true),
LogDeniesEnabled: pulumi.Bool(true),
MutationEnabled: pulumi.Bool(true),
DeploymentConfigs: gkehub.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigArray{
&gkehub.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigArgs{
Component: pulumi.String("admission"),
ReplicaCount: pulumi.Int(2),
PodAffinity: pulumi.String("ANTI_AFFINITY"),
},
&gkehub.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigArgs{
Component: pulumi.String("audit"),
ContainerResources: &gkehub.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigContainerResourcesArgs{
Limits: &gkehub.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigContainerResourcesLimitsArgs{
Memory: pulumi.String("1Gi"),
Cpu: pulumi.String("1.5"),
},
Requests: &gkehub.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigContainerResourcesRequestsArgs{
Memory: pulumi.String("500Mi"),
Cpu: pulumi.String("150m"),
},
},
PodTolerations: gkehub.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigPodTolerationArray{
&gkehub.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigPodTolerationArgs{
Key: pulumi.String("key1"),
Operator: pulumi.String("Equal"),
Value: pulumi.String("value1"),
Effect: pulumi.String("NoSchedule"),
},
},
},
},
Monitoring: &gkehub.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigMonitoringArgs{
Backends: pulumi.StringArray{
pulumi.String("PROMETHEUS"),
},
},
},
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var feature = new Gcp.GkeHub.Feature("feature", new()
{
Name = "policycontroller",
Location = "global",
FleetDefaultMemberConfig = new Gcp.GkeHub.Inputs.FeatureFleetDefaultMemberConfigArgs
{
Policycontroller = new Gcp.GkeHub.Inputs.FeatureFleetDefaultMemberConfigPolicycontrollerArgs
{
PolicyControllerHubConfig = new Gcp.GkeHub.Inputs.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigArgs
{
InstallSpec = "INSTALL_SPEC_SUSPENDED",
PolicyContent = new Gcp.GkeHub.Inputs.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentArgs
{
Bundles = new[]
{
new Gcp.GkeHub.Inputs.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentBundleArgs
{
Bundle = "pci-dss-v3.2.1",
ExemptedNamespaces = new[]
{
"baz",
"bar",
},
},
new Gcp.GkeHub.Inputs.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentBundleArgs
{
Bundle = "nist-sp-800-190",
ExemptedNamespaces = new() { },
},
},
TemplateLibrary = new Gcp.GkeHub.Inputs.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryArgs
{
Installation = "ALL",
},
},
ConstraintViolationLimit = 50,
ReferentialRulesEnabled = true,
LogDeniesEnabled = true,
MutationEnabled = true,
DeploymentConfigs = new[]
{
new Gcp.GkeHub.Inputs.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigArgs
{
Component = "admission",
ReplicaCount = 2,
PodAffinity = "ANTI_AFFINITY",
},
new Gcp.GkeHub.Inputs.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigArgs
{
Component = "audit",
ContainerResources = new Gcp.GkeHub.Inputs.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigContainerResourcesArgs
{
Limits = new Gcp.GkeHub.Inputs.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigContainerResourcesLimitsArgs
{
Memory = "1Gi",
Cpu = "1.5",
},
Requests = new Gcp.GkeHub.Inputs.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigContainerResourcesRequestsArgs
{
Memory = "500Mi",
Cpu = "150m",
},
},
PodTolerations = new[]
{
new Gcp.GkeHub.Inputs.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigPodTolerationArgs
{
Key = "key1",
Operator = "Equal",
Value = "value1",
Effect = "NoSchedule",
},
},
},
},
Monitoring = new Gcp.GkeHub.Inputs.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigMonitoringArgs
{
Backends = new[]
{
"PROMETHEUS",
},
},
},
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.gkehub.Feature;
import com.pulumi.gcp.gkehub.FeatureArgs;
import com.pulumi.gcp.gkehub.inputs.FeatureFleetDefaultMemberConfigArgs;
import com.pulumi.gcp.gkehub.inputs.FeatureFleetDefaultMemberConfigPolicycontrollerArgs;
import com.pulumi.gcp.gkehub.inputs.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigArgs;
import com.pulumi.gcp.gkehub.inputs.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentArgs;
import com.pulumi.gcp.gkehub.inputs.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryArgs;
import com.pulumi.gcp.gkehub.inputs.FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigMonitoringArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var feature = new Feature("feature", FeatureArgs.builder()
.name("policycontroller")
.location("global")
.fleetDefaultMemberConfig(FeatureFleetDefaultMemberConfigArgs.builder()
.policycontroller(FeatureFleetDefaultMemberConfigPolicycontrollerArgs.builder()
.policyControllerHubConfig(FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigArgs.builder()
.installSpec("INSTALL_SPEC_SUSPENDED")
.policyContent(FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentArgs.builder()
.bundles(
FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentBundleArgs.builder()
.bundle("pci-dss-v3.2.1")
.exemptedNamespaces(
"baz",
"bar")
.build(),
FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentBundleArgs.builder()
.bundle("nist-sp-800-190")
.exemptedNamespaces()
.build())
.templateLibrary(FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryArgs.builder()
.installation("ALL")
.build())
.build())
.constraintViolationLimit(50)
.referentialRulesEnabled(true)
.logDeniesEnabled(true)
.mutationEnabled(true)
.deploymentConfigs(
FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigArgs.builder()
.component("admission")
.replicaCount(2)
.podAffinity("ANTI_AFFINITY")
.build(),
FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigArgs.builder()
.component("audit")
.containerResources(FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigContainerResourcesArgs.builder()
.limits(FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigContainerResourcesLimitsArgs.builder()
.memory("1Gi")
.cpu("1.5")
.build())
.requests(FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigContainerResourcesRequestsArgs.builder()
.memory("500Mi")
.cpu("150m")
.build())
.build())
.podTolerations(FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigDeploymentConfigPodTolerationArgs.builder()
.key("key1")
.operator("Equal")
.value("value1")
.effect("NoSchedule")
.build())
.build())
.monitoring(FeatureFleetDefaultMemberConfigPolicycontrollerPolicyControllerHubConfigMonitoringArgs.builder()
.backends("PROMETHEUS")
.build())
.build())
.build())
.build())
.build());
}
}
resources:
feature:
type: gcp:gkehub:Feature
properties:
name: policycontroller
location: global
fleetDefaultMemberConfig:
policycontroller:
policyControllerHubConfig:
installSpec: INSTALL_SPEC_SUSPENDED
policyContent:
bundles:
- bundle: pci-dss-v3.2.1
exemptedNamespaces:
- baz
- bar
- bundle: nist-sp-800-190
exemptedNamespaces: []
templateLibrary:
installation: ALL
constraintViolationLimit: 50
referentialRulesEnabled: true
logDeniesEnabled: true
mutationEnabled: true
deploymentConfigs:
- component: admission
replicaCount: 2
podAffinity: ANTI_AFFINITY
- component: audit
containerResources:
limits:
memory: 1Gi
cpu: '1.5'
requests:
memory: 500Mi
cpu: 150m
podTolerations:
- key: key1
operator: Equal
value: value1
effect: NoSchedule
monitoring:
backends:
- PROMETHEUS
The deploymentConfigs array tunes policy controller components. Each entry specifies a component (“admission” or “audit”), resource limits, and scheduling constraints. The containerResources block sets CPU and memory limits; podTolerations allows scheduling on tainted nodes. The monitoring property enables backends like Prometheus for policy violation metrics.
Beyond these examples
These snippets focus on specific fleet feature capabilities: multi-cluster networking (ingress and service discovery), fleet observability and logging, and fleet-wide policy enforcement and service mesh. They’re intentionally minimal rather than full fleet configurations.
The examples assume pre-existing infrastructure such as GKE clusters registered as fleet memberships and IAM permissions for fleet management. They focus on enabling features rather than provisioning the underlying clusters.
To keep things focused, common feature patterns are omitted, including:
- Feature-specific state monitoring (resourceStates, states outputs)
- Config management and GitOps integration (configmanagement)
- Cluster upgrade orchestration (clusterupgrade)
- RBAC role binding actuation (rbacrolebindingactuation)
- Workload identity federation (workloadidentity)
These omissions are intentional: the goal is to illustrate how each fleet feature is wired, not provide drop-in fleet modules. See the GKE Hub Feature resource reference for all available configuration options.
Let's configure GCP GKE Hub Features
Get started with Pulumi Cloud, then follow our quick setup guide to deploy this infrastructure.
Try Pulumi Cloud for FREEFrequently Asked Questions
Configuration & Feature Types
multiclusteringress, multiclusterservicediscovery, servicemesh, fleetobservability, configmanagement, policycontroller, clusterupgrade, rbacrolebindingactuation, and workloadidentity. Each feature type has a different configuration structure in the spec or fleetDefaultMemberConfig properties.name to multiclusteringress, location to global, and configure spec.multiclusteringress.configMembership with a membership ID that points to your GKE cluster membership.Labels & Metadata
labels field is non-authoritative and only manages labels present in your configuration. To see all labels on the resource (including those set by other clients and services), use effectiveLabels.labels contains only the labels you configure directly. effectiveLabels includes all labels on the resource in GCP. pulumiLabels combines your configured labels with default provider labels.Immutability & Lifecycle
location, name, and project properties are immutable. Changing any of these requires replacing the resource.name property is immutable. You’ll need to delete and recreate the feature with a new name.Fleet Observability & Specific Features
COPY mode duplicates logs to the destination, while MOVE mode relocates them. You can use COPY for defaultConfig and MOVE for fleetScopeLogsConfig within the same feature.defaultConfig and fleetScopeLogsConfig within spec.fleetobservability.loggingConfig. Each can have its own mode setting.gcp.iam.WorkloadIdentityPool with mode set to TRUST_DOMAIN, then reference its name in spec.workloadidentity.scopeTenancyPool.Using a different cloud?
Explore containers guides for other cloud providers: