Deploy GCP Distributed Cloud Edge Clusters

The gcp:edgecontainer/cluster:Cluster resource, part of the Pulumi GCP provider, defines a Google Distributed Cloud Edge Kubernetes cluster: its control plane placement, networking configuration, Fleet registration, and authorization policies. This guide focuses on one capability: deploying clusters with local control planes at edge sites.

Edge clusters require pre-provisioned edge sites with hardware, Fleet-enabled GCP projects, and available IP address pools. The example is intentionally minimal. Extend it with encryption, maintenance policies, and system add-ons for production deployments.

Deploy a local control plane cluster at the edge

Google Distributed Cloud Edge runs Kubernetes on customer hardware with control plane nodes co-located at the edge site, reducing latency and enabling operation during network partitions.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const project = gcp.organizations.getProject({});
const _default = new gcp.edgecontainer.Cluster("default", {
    name: "local-control-plane-cluster",
    location: "us-central1",
    authorization: {
        adminUsers: {
            username: "admin@hashicorptest.com",
        },
    },
    networking: {
        clusterIpv4CidrBlocks: ["10.0.0.0/16"],
        servicesIpv4CidrBlocks: ["10.1.0.0/16"],
    },
    fleet: {
        project: project.then(project => `projects/${project.number}`),
    },
    externalLoadBalancerIpv4AddressPools: ["10.100.0.0-10.100.0.10"],
    controlPlane: {
        local: {
            nodeLocation: "us-central1-edge-example-edgesite",
            nodeCount: 1,
            machineFilter: "machine-name",
            sharedDeploymentPolicy: "ALLOWED",
        },
    },
});
import pulumi
import pulumi_gcp as gcp

project = gcp.organizations.get_project()
default = gcp.edgecontainer.Cluster("default",
    name="local-control-plane-cluster",
    location="us-central1",
    authorization={
        "admin_users": {
            "username": "admin@hashicorptest.com",
        },
    },
    networking={
        "cluster_ipv4_cidr_blocks": ["10.0.0.0/16"],
        "services_ipv4_cidr_blocks": ["10.1.0.0/16"],
    },
    fleet={
        "project": f"projects/{project.number}",
    },
    external_load_balancer_ipv4_address_pools=["10.100.0.0-10.100.0.10"],
    control_plane={
        "local": {
            "node_location": "us-central1-edge-example-edgesite",
            "node_count": 1,
            "machine_filter": "machine-name",
            "shared_deployment_policy": "ALLOWED",
        },
    })
package main

import (
	"fmt"

	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/edgecontainer"
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/organizations"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		project, err := organizations.LookupProject(ctx, &organizations.LookupProjectArgs{}, nil)
		if err != nil {
			return err
		}
		_, err = edgecontainer.NewCluster(ctx, "default", &edgecontainer.ClusterArgs{
			Name:     pulumi.String("local-control-plane-cluster"),
			Location: pulumi.String("us-central1"),
			Authorization: &edgecontainer.ClusterAuthorizationArgs{
				AdminUsers: &edgecontainer.ClusterAuthorizationAdminUsersArgs{
					Username: pulumi.String("admin@hashicorptest.com"),
				},
			},
			Networking: &edgecontainer.ClusterNetworkingArgs{
				ClusterIpv4CidrBlocks: pulumi.StringArray{
					pulumi.String("10.0.0.0/16"),
				},
				ServicesIpv4CidrBlocks: pulumi.StringArray{
					pulumi.String("10.1.0.0/16"),
				},
			},
			Fleet: &edgecontainer.ClusterFleetArgs{
				Project: pulumi.Sprintf("projects/%v", project.Number),
			},
			ExternalLoadBalancerIpv4AddressPools: pulumi.StringArray{
				pulumi.String("10.100.0.0-10.100.0.10"),
			},
			ControlPlane: &edgecontainer.ClusterControlPlaneArgs{
				Local: &edgecontainer.ClusterControlPlaneLocalArgs{
					NodeLocation:           pulumi.String("us-central1-edge-example-edgesite"),
					NodeCount:              pulumi.Int(1),
					MachineFilter:          pulumi.String("machine-name"),
					SharedDeploymentPolicy: pulumi.String("ALLOWED"),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var project = Gcp.Organizations.GetProject.Invoke();

    var @default = new Gcp.EdgeContainer.Cluster("default", new()
    {
        Name = "local-control-plane-cluster",
        Location = "us-central1",
        Authorization = new Gcp.EdgeContainer.Inputs.ClusterAuthorizationArgs
        {
            AdminUsers = new Gcp.EdgeContainer.Inputs.ClusterAuthorizationAdminUsersArgs
            {
                Username = "admin@hashicorptest.com",
            },
        },
        Networking = new Gcp.EdgeContainer.Inputs.ClusterNetworkingArgs
        {
            ClusterIpv4CidrBlocks = new[]
            {
                "10.0.0.0/16",
            },
            ServicesIpv4CidrBlocks = new[]
            {
                "10.1.0.0/16",
            },
        },
        Fleet = new Gcp.EdgeContainer.Inputs.ClusterFleetArgs
        {
            Project = $"projects/{project.Apply(getProjectResult => getProjectResult.Number)}",
        },
        ExternalLoadBalancerIpv4AddressPools = new[]
        {
            "10.100.0.0-10.100.0.10",
        },
        ControlPlane = new Gcp.EdgeContainer.Inputs.ClusterControlPlaneArgs
        {
            Local = new Gcp.EdgeContainer.Inputs.ClusterControlPlaneLocalArgs
            {
                NodeLocation = "us-central1-edge-example-edgesite",
                NodeCount = 1,
                MachineFilter = "machine-name",
                SharedDeploymentPolicy = "ALLOWED",
            },
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.organizations.OrganizationsFunctions;
import com.pulumi.gcp.organizations.inputs.GetProjectArgs;
import com.pulumi.gcp.edgecontainer.Cluster;
import com.pulumi.gcp.edgecontainer.ClusterArgs;
import com.pulumi.gcp.edgecontainer.inputs.ClusterAuthorizationArgs;
import com.pulumi.gcp.edgecontainer.inputs.ClusterAuthorizationAdminUsersArgs;
import com.pulumi.gcp.edgecontainer.inputs.ClusterNetworkingArgs;
import com.pulumi.gcp.edgecontainer.inputs.ClusterFleetArgs;
import com.pulumi.gcp.edgecontainer.inputs.ClusterControlPlaneArgs;
import com.pulumi.gcp.edgecontainer.inputs.ClusterControlPlaneLocalArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        final var project = OrganizationsFunctions.getProject(GetProjectArgs.builder()
            .build());

        var default_ = new Cluster("default", ClusterArgs.builder()
            .name("local-control-plane-cluster")
            .location("us-central1")
            .authorization(ClusterAuthorizationArgs.builder()
                .adminUsers(ClusterAuthorizationAdminUsersArgs.builder()
                    .username("admin@hashicorptest.com")
                    .build())
                .build())
            .networking(ClusterNetworkingArgs.builder()
                .clusterIpv4CidrBlocks("10.0.0.0/16")
                .servicesIpv4CidrBlocks("10.1.0.0/16")
                .build())
            .fleet(ClusterFleetArgs.builder()
                .project(String.format("projects/%s", project.number()))
                .build())
            .externalLoadBalancerIpv4AddressPools("10.100.0.0-10.100.0.10")
            .controlPlane(ClusterControlPlaneArgs.builder()
                .local(ClusterControlPlaneLocalArgs.builder()
                    .nodeLocation("us-central1-edge-example-edgesite")
                    .nodeCount(1)
                    .machineFilter("machine-name")
                    .sharedDeploymentPolicy("ALLOWED")
                    .build())
                .build())
            .build());

    }
}
resources:
  default:
    type: gcp:edgecontainer:Cluster
    properties:
      name: local-control-plane-cluster
      location: us-central1
      authorization:
        adminUsers:
          username: admin@hashicorptest.com
      networking:
        clusterIpv4CidrBlocks:
          - 10.0.0.0/16
        servicesIpv4CidrBlocks:
          - 10.1.0.0/16
      fleet:
        project: projects/${project.number}
      externalLoadBalancerIpv4AddressPools:
        - 10.100.0.0-10.100.0.10
      controlPlane:
        local:
          nodeLocation: us-central1-edge-example-edgesite
          nodeCount: 1
          machineFilter: machine-name
          sharedDeploymentPolicy: ALLOWED
variables:
  project:
    fn::invoke:
      function: gcp:organizations:getProject
      arguments: {}

The controlPlane.local block places control plane nodes at the specified edge site (nodeLocation). The machineFilter selects which hardware to use, and nodeCount determines control plane redundancy. The networking block defines pod and service CIDR ranges, while externalLoadBalancerIpv4AddressPools reserves IPs for LoadBalancer services. The fleet block registers the cluster with Google Cloud Fleet for centralized management across multiple clusters. The authorization block grants admin access to specified users.

Beyond these examples

This snippet focuses on local control plane deployment, Fleet integration for multi-cluster management, and network and load balancer configuration. It’s intentionally minimal rather than a full edge deployment.

The example relies on pre-existing infrastructure such as edge sites with provisioned hardware, GCP projects with Fleet API enabled, and available IP address pools for load balancing. It demonstrates cluster configuration without covering the edge site provisioning process.

To keep things focused, common cluster patterns are omitted, including:

  • Control plane encryption (controlPlaneEncryption)
  • Maintenance windows and policies (maintenancePolicy)
  • System add-ons configuration (systemAddonsConfig)
  • Release channel and version management (releaseChannel, targetVersion)

These omissions are intentional: the goal is to illustrate how the cluster resource is wired, not provide a drop-in edge deployment. See the Edge Container Cluster resource reference for all available configuration options.

Let's deploy GCP Distributed Cloud Edge Clusters

Get started with Pulumi Cloud, then follow our quick setup guide to deploy this infrastructure.

Try Pulumi Cloud for FREE

Frequently Asked Questions

Configuration & Immutability
What properties can't I change after creating a cluster?
The following properties are immutable and require cluster recreation if changed: authorization, fleet, location, name, and project.
How do I configure the fleet for my cluster?
The fleet property is required and immutable. Use the format projects/${project.number} (project number, not project ID) as shown in the example.
Is the control plane configuration required?
No, controlPlane is optional. When configured, you can set up a local control plane with nodeLocation, nodeCount, machineFilter, and sharedDeploymentPolicy.
What release channels are available?
Three options: RELEASE_CHANNEL_UNSPECIFIED, NONE, or REGULAR. This is a required field.
Networking & Load Balancing
What networking configuration is required for a cluster?
You must configure networking with clusterIpv4CidrBlocks and servicesIpv4CidrBlocks. For example: ["10.0.0.0/16"] for cluster IPs and ["10.1.0.0/16"] for services.
How do I configure external load balancer IP pools?
Set externalLoadBalancerIpv4AddressPools with IP ranges (e.g., ["10.100.0.0-10.100.0.10"]) for cluster data plane external load balancing.
Versioning & Updates
What's the difference between targetVersion, controlPlaneVersion, and nodeVersion?
targetVersion is the desired cluster version you set (e.g., “1.5.0”). controlPlaneVersion is the actual control plane release version (output). nodeVersion is the lowest release version among worker nodes (output).
Labels & Outputs
Why aren't all my labels showing up in the labels field?
The labels field is non-authoritative and only manages labels in your configuration. Use effectiveLabels to see all labels on the resource, including those set by other clients and services.
Why can't I see the cluster CA certificate in my plan output?
The clusterCaCertificate output is marked as sensitive and won’t be displayed in plan output for security reasons. Access it programmatically instead.
What import formats can I use for existing clusters?
Three formats are supported: projects/{{project}}/locations/{{location}}/clusters/{{name}}, {{project}}/{{location}}/{{name}}, or {{location}}/{{name}}.

Using a different cloud?

Explore containers guides for other cloud providers: