The gcp:edgecontainer/cluster:Cluster resource, part of the Pulumi GCP provider, defines a Google Distributed Cloud Edge Kubernetes cluster that runs on customer-owned hardware at edge locations. This guide focuses on one capability: deploying clusters with local control planes at edge sites.
Edge clusters require pre-provisioned edge sites with hardware, Fleet API enablement, and network planning for CIDR blocks and load balancer pools. The example is intentionally minimal. Combine it with your own encryption, maintenance policies, and system add-ons configuration.
Deploy a local control plane cluster at the edge
Google Distributed Cloud Edge runs Kubernetes on hardware at customer locations, with control planes that can run locally on edge infrastructure for reduced latency to workloads.
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const project = gcp.organizations.getProject({});
const _default = new gcp.edgecontainer.Cluster("default", {
name: "local-control-plane-cluster",
location: "us-central1",
authorization: {
adminUsers: {
username: "admin@hashicorptest.com",
},
},
networking: {
clusterIpv4CidrBlocks: ["10.0.0.0/16"],
servicesIpv4CidrBlocks: ["10.1.0.0/16"],
},
fleet: {
project: project.then(project => `projects/${project.number}`),
},
externalLoadBalancerIpv4AddressPools: ["10.100.0.0-10.100.0.10"],
controlPlane: {
local: {
nodeLocation: "us-central1-edge-example-edgesite",
nodeCount: 1,
machineFilter: "machine-name",
sharedDeploymentPolicy: "ALLOWED",
},
},
});
import pulumi
import pulumi_gcp as gcp
project = gcp.organizations.get_project()
default = gcp.edgecontainer.Cluster("default",
name="local-control-plane-cluster",
location="us-central1",
authorization={
"admin_users": {
"username": "admin@hashicorptest.com",
},
},
networking={
"cluster_ipv4_cidr_blocks": ["10.0.0.0/16"],
"services_ipv4_cidr_blocks": ["10.1.0.0/16"],
},
fleet={
"project": f"projects/{project.number}",
},
external_load_balancer_ipv4_address_pools=["10.100.0.0-10.100.0.10"],
control_plane={
"local": {
"node_location": "us-central1-edge-example-edgesite",
"node_count": 1,
"machine_filter": "machine-name",
"shared_deployment_policy": "ALLOWED",
},
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/edgecontainer"
"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/organizations"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
project, err := organizations.LookupProject(ctx, &organizations.LookupProjectArgs{}, nil)
if err != nil {
return err
}
_, err = edgecontainer.NewCluster(ctx, "default", &edgecontainer.ClusterArgs{
Name: pulumi.String("local-control-plane-cluster"),
Location: pulumi.String("us-central1"),
Authorization: &edgecontainer.ClusterAuthorizationArgs{
AdminUsers: &edgecontainer.ClusterAuthorizationAdminUsersArgs{
Username: pulumi.String("admin@hashicorptest.com"),
},
},
Networking: &edgecontainer.ClusterNetworkingArgs{
ClusterIpv4CidrBlocks: pulumi.StringArray{
pulumi.String("10.0.0.0/16"),
},
ServicesIpv4CidrBlocks: pulumi.StringArray{
pulumi.String("10.1.0.0/16"),
},
},
Fleet: &edgecontainer.ClusterFleetArgs{
Project: pulumi.Sprintf("projects/%v", project.Number),
},
ExternalLoadBalancerIpv4AddressPools: pulumi.StringArray{
pulumi.String("10.100.0.0-10.100.0.10"),
},
ControlPlane: &edgecontainer.ClusterControlPlaneArgs{
Local: &edgecontainer.ClusterControlPlaneLocalArgs{
NodeLocation: pulumi.String("us-central1-edge-example-edgesite"),
NodeCount: pulumi.Int(1),
MachineFilter: pulumi.String("machine-name"),
SharedDeploymentPolicy: pulumi.String("ALLOWED"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var project = Gcp.Organizations.GetProject.Invoke();
var @default = new Gcp.EdgeContainer.Cluster("default", new()
{
Name = "local-control-plane-cluster",
Location = "us-central1",
Authorization = new Gcp.EdgeContainer.Inputs.ClusterAuthorizationArgs
{
AdminUsers = new Gcp.EdgeContainer.Inputs.ClusterAuthorizationAdminUsersArgs
{
Username = "admin@hashicorptest.com",
},
},
Networking = new Gcp.EdgeContainer.Inputs.ClusterNetworkingArgs
{
ClusterIpv4CidrBlocks = new[]
{
"10.0.0.0/16",
},
ServicesIpv4CidrBlocks = new[]
{
"10.1.0.0/16",
},
},
Fleet = new Gcp.EdgeContainer.Inputs.ClusterFleetArgs
{
Project = $"projects/{project.Apply(getProjectResult => getProjectResult.Number)}",
},
ExternalLoadBalancerIpv4AddressPools = new[]
{
"10.100.0.0-10.100.0.10",
},
ControlPlane = new Gcp.EdgeContainer.Inputs.ClusterControlPlaneArgs
{
Local = new Gcp.EdgeContainer.Inputs.ClusterControlPlaneLocalArgs
{
NodeLocation = "us-central1-edge-example-edgesite",
NodeCount = 1,
MachineFilter = "machine-name",
SharedDeploymentPolicy = "ALLOWED",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.organizations.OrganizationsFunctions;
import com.pulumi.gcp.organizations.inputs.GetProjectArgs;
import com.pulumi.gcp.edgecontainer.Cluster;
import com.pulumi.gcp.edgecontainer.ClusterArgs;
import com.pulumi.gcp.edgecontainer.inputs.ClusterAuthorizationArgs;
import com.pulumi.gcp.edgecontainer.inputs.ClusterAuthorizationAdminUsersArgs;
import com.pulumi.gcp.edgecontainer.inputs.ClusterNetworkingArgs;
import com.pulumi.gcp.edgecontainer.inputs.ClusterFleetArgs;
import com.pulumi.gcp.edgecontainer.inputs.ClusterControlPlaneArgs;
import com.pulumi.gcp.edgecontainer.inputs.ClusterControlPlaneLocalArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var project = OrganizationsFunctions.getProject(GetProjectArgs.builder()
.build());
var default_ = new Cluster("default", ClusterArgs.builder()
.name("local-control-plane-cluster")
.location("us-central1")
.authorization(ClusterAuthorizationArgs.builder()
.adminUsers(ClusterAuthorizationAdminUsersArgs.builder()
.username("admin@hashicorptest.com")
.build())
.build())
.networking(ClusterNetworkingArgs.builder()
.clusterIpv4CidrBlocks("10.0.0.0/16")
.servicesIpv4CidrBlocks("10.1.0.0/16")
.build())
.fleet(ClusterFleetArgs.builder()
.project(String.format("projects/%s", project.number()))
.build())
.externalLoadBalancerIpv4AddressPools("10.100.0.0-10.100.0.10")
.controlPlane(ClusterControlPlaneArgs.builder()
.local(ClusterControlPlaneLocalArgs.builder()
.nodeLocation("us-central1-edge-example-edgesite")
.nodeCount(1)
.machineFilter("machine-name")
.sharedDeploymentPolicy("ALLOWED")
.build())
.build())
.build());
}
}
resources:
default:
type: gcp:edgecontainer:Cluster
properties:
name: local-control-plane-cluster
location: us-central1
authorization:
adminUsers:
username: admin@hashicorptest.com
networking:
clusterIpv4CidrBlocks:
- 10.0.0.0/16
servicesIpv4CidrBlocks:
- 10.1.0.0/16
fleet:
project: projects/${project.number}
externalLoadBalancerIpv4AddressPools:
- 10.100.0.0-10.100.0.10
controlPlane:
local:
nodeLocation: us-central1-edge-example-edgesite
nodeCount: 1
machineFilter: machine-name
sharedDeploymentPolicy: ALLOWED
variables:
project:
fn::invoke:
function: gcp:organizations:getProject
arguments: {}
The controlPlane.local block places the control plane on edge hardware rather than in a Google Cloud region. The nodeLocation specifies which edge site hosts the control plane, and machineFilter selects specific hardware by name. The authorization block grants cluster-admin access, while fleet integration enables multi-cluster management through Google Cloud’s Fleet API. The networking block allocates CIDR ranges for pods and services, and externalLoadBalancerIpv4AddressPools reserves IPs for LoadBalancer-type services.
Beyond these examples
This snippet focuses on specific cluster-level features: local control plane configuration, RBAC authorization and Fleet integration, and network CIDR allocation and load balancer pools. It’s intentionally minimal rather than a full edge deployment.
The example relies on pre-existing infrastructure such as Google Distributed Cloud Edge sites with provisioned hardware, GCP project with Fleet API enabled, and edge machines matching the machineFilter criteria. It focuses on configuring the cluster rather than provisioning the underlying edge infrastructure.
To keep things focused, common cluster patterns are omitted, including:
- Control plane encryption (controlPlaneEncryption)
- Maintenance windows and policies (maintenancePolicy)
- System add-ons configuration (systemAddonsConfig)
- Release channel and version management (releaseChannel, targetVersion)
These omissions are intentional: the goal is to illustrate how the cluster resource is wired, not provide drop-in edge deployment modules. See the Edge Container Cluster resource reference for all available configuration options.
Let's deploy GCP Distributed Cloud Edge Clusters
Get started with Pulumi Cloud, then follow our quick setup guide to deploy this infrastructure.
Try Pulumi Cloud for FREEFrequently Asked Questions
Configuration & Immutability
authorization, fleet, location, name, and project.labels contains only user-defined labels in your configuration. effectiveLabels includes all labels present on the resource in GCP (user-defined plus system labels). pulumiLabels combines labels configured directly on the resource with default labels configured on the provider.Fleet & Multi-Cluster Management
projects/${project.number} (not the project ID). For example: fleet: { project: "projects/123456789" }.Networking & Load Balancing
clusterIpv4CidrBlocks (for cluster pods) and servicesIpv4CidrBlocks (for services) in the networking property. For example: ["10.0.0.0/16"] and ["10.1.0.0/16"].["10.100.0.0-10.100.0.10"].Control Plane & Versioning
controlPlane.local with nodeLocation (edge site), nodeCount, machineFilter, and sharedDeploymentPolicy. For example: nodeLocation: "us-central1-edge-example-edgesite".targetVersion is the desired cluster version you configure. controlPlaneVersion is the actual control plane release version (output). nodeVersion is the lowest release version among worker nodes (output).RELEASE_CHANNEL_UNSPECIFIED, NONE, or REGULAR. This determines how the cluster receives updates.Security & Encryption
clusterCaCertificate output property contains the PEM-encoded public certificate. Note that this is a sensitive value and won’t be displayed in the plan.Using a different cloud?
Explore containers guides for other cloud providers: