Configure GCP Monitoring Alert Policies

The gcp:monitoring/alertPolicy:AlertPolicy resource, part of the Pulumi GCP provider, defines Cloud Monitoring alert policies that evaluate metrics or logs and create incidents when conditions are met. This guide focuses on three capabilities: threshold-based metric monitoring, PromQL and SQL query conditions, and missing data handling and forecasting.

Alert policies evaluate metrics from existing GCP resources and can reference notification channels that must be created separately. The examples are intentionally small. Combine them with your own notification channels, documentation, and severity levels.

Monitor GCE disk writes with threshold conditions

Most monitoring deployments start with threshold-based alerts that fire when a metric exceeds a fixed value for a sustained period.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const alertPolicy = new gcp.monitoring.AlertPolicy("alert_policy", {
    displayName: "My Alert Policy",
    combiner: "OR",
    conditions: [{
        displayName: "test condition",
        conditionThreshold: {
            filter: "metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"",
            duration: "60s",
            comparison: "COMPARISON_GT",
            aggregations: [{
                alignmentPeriod: "60s",
                perSeriesAligner: "ALIGN_RATE",
            }],
        },
    }],
    userLabels: {
        foo: "bar",
    },
});
import pulumi
import pulumi_gcp as gcp

alert_policy = gcp.monitoring.AlertPolicy("alert_policy",
    display_name="My Alert Policy",
    combiner="OR",
    conditions=[{
        "display_name": "test condition",
        "condition_threshold": {
            "filter": "metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"",
            "duration": "60s",
            "comparison": "COMPARISON_GT",
            "aggregations": [{
                "alignment_period": "60s",
                "per_series_aligner": "ALIGN_RATE",
            }],
        },
    }],
    user_labels={
        "foo": "bar",
    })
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/monitoring"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := monitoring.NewAlertPolicy(ctx, "alert_policy", &monitoring.AlertPolicyArgs{
			DisplayName: pulumi.String("My Alert Policy"),
			Combiner:    pulumi.String("OR"),
			Conditions: monitoring.AlertPolicyConditionArray{
				&monitoring.AlertPolicyConditionArgs{
					DisplayName: pulumi.String("test condition"),
					ConditionThreshold: &monitoring.AlertPolicyConditionConditionThresholdArgs{
						Filter:     pulumi.String("metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\""),
						Duration:   pulumi.String("60s"),
						Comparison: pulumi.String("COMPARISON_GT"),
						Aggregations: monitoring.AlertPolicyConditionConditionThresholdAggregationArray{
							&monitoring.AlertPolicyConditionConditionThresholdAggregationArgs{
								AlignmentPeriod:  pulumi.String("60s"),
								PerSeriesAligner: pulumi.String("ALIGN_RATE"),
							},
						},
					},
				},
			},
			UserLabels: pulumi.StringMap{
				"foo": pulumi.String("bar"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var alertPolicy = new Gcp.Monitoring.AlertPolicy("alert_policy", new()
    {
        DisplayName = "My Alert Policy",
        Combiner = "OR",
        Conditions = new[]
        {
            new Gcp.Monitoring.Inputs.AlertPolicyConditionArgs
            {
                DisplayName = "test condition",
                ConditionThreshold = new Gcp.Monitoring.Inputs.AlertPolicyConditionConditionThresholdArgs
                {
                    Filter = "metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"",
                    Duration = "60s",
                    Comparison = "COMPARISON_GT",
                    Aggregations = new[]
                    {
                        new Gcp.Monitoring.Inputs.AlertPolicyConditionConditionThresholdAggregationArgs
                        {
                            AlignmentPeriod = "60s",
                            PerSeriesAligner = "ALIGN_RATE",
                        },
                    },
                },
            },
        },
        UserLabels = 
        {
            { "foo", "bar" },
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.monitoring.AlertPolicy;
import com.pulumi.gcp.monitoring.AlertPolicyArgs;
import com.pulumi.gcp.monitoring.inputs.AlertPolicyConditionArgs;
import com.pulumi.gcp.monitoring.inputs.AlertPolicyConditionConditionThresholdArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var alertPolicy = new AlertPolicy("alertPolicy", AlertPolicyArgs.builder()
            .displayName("My Alert Policy")
            .combiner("OR")
            .conditions(AlertPolicyConditionArgs.builder()
                .displayName("test condition")
                .conditionThreshold(AlertPolicyConditionConditionThresholdArgs.builder()
                    .filter("metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"")
                    .duration("60s")
                    .comparison("COMPARISON_GT")
                    .aggregations(AlertPolicyConditionConditionThresholdAggregationArgs.builder()
                        .alignmentPeriod("60s")
                        .perSeriesAligner("ALIGN_RATE")
                        .build())
                    .build())
                .build())
            .userLabels(Map.of("foo", "bar"))
            .build());

    }
}
resources:
  alertPolicy:
    type: gcp:monitoring:AlertPolicy
    name: alert_policy
    properties:
      displayName: My Alert Policy
      combiner: OR
      conditions:
        - displayName: test condition
          conditionThreshold:
            filter: metric.type="compute.googleapis.com/instance/disk/write_bytes_count" AND resource.type="gce_instance"
            duration: 60s
            comparison: COMPARISON_GT
            aggregations:
              - alignmentPeriod: 60s
                perSeriesAligner: ALIGN_RATE
      userLabels:
        foo: bar

When the metric breaches the threshold for the specified duration, Cloud Monitoring creates an incident. The filter property selects which time series to monitor using Cloud Monitoring’s filter syntax. The aggregations block defines how data points are aligned and combined: alignmentPeriod sets the time window, and perSeriesAligner specifies the aggregation method (here, ALIGN_RATE calculates the rate of change). The combiner property (OR in this case) determines how multiple conditions are evaluated together.

Handle missing data with evaluation policies

Time-series data can have gaps due to service interruptions or metric collection failures. The evaluationMissingData property controls whether these gaps trigger alerts.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const alertPolicy = new gcp.monitoring.AlertPolicy("alert_policy", {
    displayName: "My Alert Policy",
    combiner: "OR",
    conditions: [{
        displayName: "test condition",
        conditionThreshold: {
            filter: "metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"",
            duration: "60s",
            comparison: "COMPARISON_GT",
            aggregations: [{
                alignmentPeriod: "60s",
                perSeriesAligner: "ALIGN_RATE",
            }],
            evaluationMissingData: "EVALUATION_MISSING_DATA_INACTIVE",
        },
    }],
    userLabels: {
        foo: "bar",
    },
});
import pulumi
import pulumi_gcp as gcp

alert_policy = gcp.monitoring.AlertPolicy("alert_policy",
    display_name="My Alert Policy",
    combiner="OR",
    conditions=[{
        "display_name": "test condition",
        "condition_threshold": {
            "filter": "metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"",
            "duration": "60s",
            "comparison": "COMPARISON_GT",
            "aggregations": [{
                "alignment_period": "60s",
                "per_series_aligner": "ALIGN_RATE",
            }],
            "evaluation_missing_data": "EVALUATION_MISSING_DATA_INACTIVE",
        },
    }],
    user_labels={
        "foo": "bar",
    })
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/monitoring"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := monitoring.NewAlertPolicy(ctx, "alert_policy", &monitoring.AlertPolicyArgs{
			DisplayName: pulumi.String("My Alert Policy"),
			Combiner:    pulumi.String("OR"),
			Conditions: monitoring.AlertPolicyConditionArray{
				&monitoring.AlertPolicyConditionArgs{
					DisplayName: pulumi.String("test condition"),
					ConditionThreshold: &monitoring.AlertPolicyConditionConditionThresholdArgs{
						Filter:     pulumi.String("metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\""),
						Duration:   pulumi.String("60s"),
						Comparison: pulumi.String("COMPARISON_GT"),
						Aggregations: monitoring.AlertPolicyConditionConditionThresholdAggregationArray{
							&monitoring.AlertPolicyConditionConditionThresholdAggregationArgs{
								AlignmentPeriod:  pulumi.String("60s"),
								PerSeriesAligner: pulumi.String("ALIGN_RATE"),
							},
						},
						EvaluationMissingData: pulumi.String("EVALUATION_MISSING_DATA_INACTIVE"),
					},
				},
			},
			UserLabels: pulumi.StringMap{
				"foo": pulumi.String("bar"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var alertPolicy = new Gcp.Monitoring.AlertPolicy("alert_policy", new()
    {
        DisplayName = "My Alert Policy",
        Combiner = "OR",
        Conditions = new[]
        {
            new Gcp.Monitoring.Inputs.AlertPolicyConditionArgs
            {
                DisplayName = "test condition",
                ConditionThreshold = new Gcp.Monitoring.Inputs.AlertPolicyConditionConditionThresholdArgs
                {
                    Filter = "metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"",
                    Duration = "60s",
                    Comparison = "COMPARISON_GT",
                    Aggregations = new[]
                    {
                        new Gcp.Monitoring.Inputs.AlertPolicyConditionConditionThresholdAggregationArgs
                        {
                            AlignmentPeriod = "60s",
                            PerSeriesAligner = "ALIGN_RATE",
                        },
                    },
                    EvaluationMissingData = "EVALUATION_MISSING_DATA_INACTIVE",
                },
            },
        },
        UserLabels = 
        {
            { "foo", "bar" },
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.monitoring.AlertPolicy;
import com.pulumi.gcp.monitoring.AlertPolicyArgs;
import com.pulumi.gcp.monitoring.inputs.AlertPolicyConditionArgs;
import com.pulumi.gcp.monitoring.inputs.AlertPolicyConditionConditionThresholdArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var alertPolicy = new AlertPolicy("alertPolicy", AlertPolicyArgs.builder()
            .displayName("My Alert Policy")
            .combiner("OR")
            .conditions(AlertPolicyConditionArgs.builder()
                .displayName("test condition")
                .conditionThreshold(AlertPolicyConditionConditionThresholdArgs.builder()
                    .filter("metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"")
                    .duration("60s")
                    .comparison("COMPARISON_GT")
                    .aggregations(AlertPolicyConditionConditionThresholdAggregationArgs.builder()
                        .alignmentPeriod("60s")
                        .perSeriesAligner("ALIGN_RATE")
                        .build())
                    .evaluationMissingData("EVALUATION_MISSING_DATA_INACTIVE")
                    .build())
                .build())
            .userLabels(Map.of("foo", "bar"))
            .build());

    }
}
resources:
  alertPolicy:
    type: gcp:monitoring:AlertPolicy
    name: alert_policy
    properties:
      displayName: My Alert Policy
      combiner: OR
      conditions:
        - displayName: test condition
          conditionThreshold:
            filter: metric.type="compute.googleapis.com/instance/disk/write_bytes_count" AND resource.type="gce_instance"
            duration: 60s
            comparison: COMPARISON_GT
            aggregations:
              - alignmentPeriod: 60s
                perSeriesAligner: ALIGN_RATE
            evaluationMissingData: EVALUATION_MISSING_DATA_INACTIVE
      userLabels:
        foo: bar

Setting evaluationMissingData to EVALUATION_MISSING_DATA_INACTIVE tells Cloud Monitoring to treat missing data points as inactive rather than as threshold violations. This prevents false alerts during expected downtime or metric collection gaps. Without this property, missing data behavior depends on the comparison operator.

Alert on predicted future threshold breaches

Forecasting alerts predict when metrics will breach thresholds based on historical trends, allowing teams to respond proactively.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const alertPolicy = new gcp.monitoring.AlertPolicy("alert_policy", {
    displayName: "My Alert Policy",
    combiner: "OR",
    conditions: [{
        displayName: "test condition",
        conditionThreshold: {
            filter: "metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"",
            duration: "60s",
            forecastOptions: {
                forecastHorizon: "3600s",
            },
            comparison: "COMPARISON_GT",
            aggregations: [{
                alignmentPeriod: "60s",
                perSeriesAligner: "ALIGN_RATE",
            }],
        },
    }],
    userLabels: {
        foo: "bar",
    },
});
import pulumi
import pulumi_gcp as gcp

alert_policy = gcp.monitoring.AlertPolicy("alert_policy",
    display_name="My Alert Policy",
    combiner="OR",
    conditions=[{
        "display_name": "test condition",
        "condition_threshold": {
            "filter": "metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"",
            "duration": "60s",
            "forecast_options": {
                "forecast_horizon": "3600s",
            },
            "comparison": "COMPARISON_GT",
            "aggregations": [{
                "alignment_period": "60s",
                "per_series_aligner": "ALIGN_RATE",
            }],
        },
    }],
    user_labels={
        "foo": "bar",
    })
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/monitoring"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := monitoring.NewAlertPolicy(ctx, "alert_policy", &monitoring.AlertPolicyArgs{
			DisplayName: pulumi.String("My Alert Policy"),
			Combiner:    pulumi.String("OR"),
			Conditions: monitoring.AlertPolicyConditionArray{
				&monitoring.AlertPolicyConditionArgs{
					DisplayName: pulumi.String("test condition"),
					ConditionThreshold: &monitoring.AlertPolicyConditionConditionThresholdArgs{
						Filter:   pulumi.String("metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\""),
						Duration: pulumi.String("60s"),
						ForecastOptions: &monitoring.AlertPolicyConditionConditionThresholdForecastOptionsArgs{
							ForecastHorizon: pulumi.String("3600s"),
						},
						Comparison: pulumi.String("COMPARISON_GT"),
						Aggregations: monitoring.AlertPolicyConditionConditionThresholdAggregationArray{
							&monitoring.AlertPolicyConditionConditionThresholdAggregationArgs{
								AlignmentPeriod:  pulumi.String("60s"),
								PerSeriesAligner: pulumi.String("ALIGN_RATE"),
							},
						},
					},
				},
			},
			UserLabels: pulumi.StringMap{
				"foo": pulumi.String("bar"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var alertPolicy = new Gcp.Monitoring.AlertPolicy("alert_policy", new()
    {
        DisplayName = "My Alert Policy",
        Combiner = "OR",
        Conditions = new[]
        {
            new Gcp.Monitoring.Inputs.AlertPolicyConditionArgs
            {
                DisplayName = "test condition",
                ConditionThreshold = new Gcp.Monitoring.Inputs.AlertPolicyConditionConditionThresholdArgs
                {
                    Filter = "metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"",
                    Duration = "60s",
                    ForecastOptions = new Gcp.Monitoring.Inputs.AlertPolicyConditionConditionThresholdForecastOptionsArgs
                    {
                        ForecastHorizon = "3600s",
                    },
                    Comparison = "COMPARISON_GT",
                    Aggregations = new[]
                    {
                        new Gcp.Monitoring.Inputs.AlertPolicyConditionConditionThresholdAggregationArgs
                        {
                            AlignmentPeriod = "60s",
                            PerSeriesAligner = "ALIGN_RATE",
                        },
                    },
                },
            },
        },
        UserLabels = 
        {
            { "foo", "bar" },
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.monitoring.AlertPolicy;
import com.pulumi.gcp.monitoring.AlertPolicyArgs;
import com.pulumi.gcp.monitoring.inputs.AlertPolicyConditionArgs;
import com.pulumi.gcp.monitoring.inputs.AlertPolicyConditionConditionThresholdArgs;
import com.pulumi.gcp.monitoring.inputs.AlertPolicyConditionConditionThresholdForecastOptionsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var alertPolicy = new AlertPolicy("alertPolicy", AlertPolicyArgs.builder()
            .displayName("My Alert Policy")
            .combiner("OR")
            .conditions(AlertPolicyConditionArgs.builder()
                .displayName("test condition")
                .conditionThreshold(AlertPolicyConditionConditionThresholdArgs.builder()
                    .filter("metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"")
                    .duration("60s")
                    .forecastOptions(AlertPolicyConditionConditionThresholdForecastOptionsArgs.builder()
                        .forecastHorizon("3600s")
                        .build())
                    .comparison("COMPARISON_GT")
                    .aggregations(AlertPolicyConditionConditionThresholdAggregationArgs.builder()
                        .alignmentPeriod("60s")
                        .perSeriesAligner("ALIGN_RATE")
                        .build())
                    .build())
                .build())
            .userLabels(Map.of("foo", "bar"))
            .build());

    }
}
resources:
  alertPolicy:
    type: gcp:monitoring:AlertPolicy
    name: alert_policy
    properties:
      displayName: My Alert Policy
      combiner: OR
      conditions:
        - displayName: test condition
          conditionThreshold:
            filter: metric.type="compute.googleapis.com/instance/disk/write_bytes_count" AND resource.type="gce_instance"
            duration: 60s
            forecastOptions:
              forecastHorizon: 3600s
            comparison: COMPARISON_GT
            aggregations:
              - alignmentPeriod: 60s
                perSeriesAligner: ALIGN_RATE
      userLabels:
        foo: bar

The forecastOptions block enables predictive alerting. The forecastHorizon property (here, 3600s or 1 hour) specifies how far into the future to predict. Cloud Monitoring uses historical data to forecast the metric’s trajectory and triggers the alert if the prediction exceeds the threshold within the forecast window.

Query metrics with PromQL expressions

Teams using Prometheus-compatible monitoring can write alert conditions using PromQL syntax instead of Cloud Monitoring’s filter language.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const alertPolicy = new gcp.monitoring.AlertPolicy("alert_policy", {
    displayName: "My Alert Policy",
    combiner: "OR",
    conditions: [{
        displayName: "test condition",
        conditionPrometheusQueryLanguage: {
            query: "compute_googleapis_com:instance_cpu_usage_time > 0",
            duration: "60s",
            evaluationInterval: "60s",
            alertRule: "AlwaysOn",
            ruleGroup: "a test",
        },
    }],
    alertStrategy: {
        autoClose: "1800s",
    },
});
import pulumi
import pulumi_gcp as gcp

alert_policy = gcp.monitoring.AlertPolicy("alert_policy",
    display_name="My Alert Policy",
    combiner="OR",
    conditions=[{
        "display_name": "test condition",
        "condition_prometheus_query_language": {
            "query": "compute_googleapis_com:instance_cpu_usage_time > 0",
            "duration": "60s",
            "evaluation_interval": "60s",
            "alert_rule": "AlwaysOn",
            "rule_group": "a test",
        },
    }],
    alert_strategy={
        "auto_close": "1800s",
    })
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/monitoring"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := monitoring.NewAlertPolicy(ctx, "alert_policy", &monitoring.AlertPolicyArgs{
			DisplayName: pulumi.String("My Alert Policy"),
			Combiner:    pulumi.String("OR"),
			Conditions: monitoring.AlertPolicyConditionArray{
				&monitoring.AlertPolicyConditionArgs{
					DisplayName: pulumi.String("test condition"),
					ConditionPrometheusQueryLanguage: &monitoring.AlertPolicyConditionConditionPrometheusQueryLanguageArgs{
						Query:              pulumi.String("compute_googleapis_com:instance_cpu_usage_time > 0"),
						Duration:           pulumi.String("60s"),
						EvaluationInterval: pulumi.String("60s"),
						AlertRule:          pulumi.String("AlwaysOn"),
						RuleGroup:          pulumi.String("a test"),
					},
				},
			},
			AlertStrategy: &monitoring.AlertPolicyAlertStrategyArgs{
				AutoClose: pulumi.String("1800s"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var alertPolicy = new Gcp.Monitoring.AlertPolicy("alert_policy", new()
    {
        DisplayName = "My Alert Policy",
        Combiner = "OR",
        Conditions = new[]
        {
            new Gcp.Monitoring.Inputs.AlertPolicyConditionArgs
            {
                DisplayName = "test condition",
                ConditionPrometheusQueryLanguage = new Gcp.Monitoring.Inputs.AlertPolicyConditionConditionPrometheusQueryLanguageArgs
                {
                    Query = "compute_googleapis_com:instance_cpu_usage_time > 0",
                    Duration = "60s",
                    EvaluationInterval = "60s",
                    AlertRule = "AlwaysOn",
                    RuleGroup = "a test",
                },
            },
        },
        AlertStrategy = new Gcp.Monitoring.Inputs.AlertPolicyAlertStrategyArgs
        {
            AutoClose = "1800s",
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.monitoring.AlertPolicy;
import com.pulumi.gcp.monitoring.AlertPolicyArgs;
import com.pulumi.gcp.monitoring.inputs.AlertPolicyConditionArgs;
import com.pulumi.gcp.monitoring.inputs.AlertPolicyConditionConditionPrometheusQueryLanguageArgs;
import com.pulumi.gcp.monitoring.inputs.AlertPolicyAlertStrategyArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var alertPolicy = new AlertPolicy("alertPolicy", AlertPolicyArgs.builder()
            .displayName("My Alert Policy")
            .combiner("OR")
            .conditions(AlertPolicyConditionArgs.builder()
                .displayName("test condition")
                .conditionPrometheusQueryLanguage(AlertPolicyConditionConditionPrometheusQueryLanguageArgs.builder()
                    .query("compute_googleapis_com:instance_cpu_usage_time > 0")
                    .duration("60s")
                    .evaluationInterval("60s")
                    .alertRule("AlwaysOn")
                    .ruleGroup("a test")
                    .build())
                .build())
            .alertStrategy(AlertPolicyAlertStrategyArgs.builder()
                .autoClose("1800s")
                .build())
            .build());

    }
}
resources:
  alertPolicy:
    type: gcp:monitoring:AlertPolicy
    name: alert_policy
    properties:
      displayName: My Alert Policy
      combiner: OR
      conditions:
        - displayName: test condition
          conditionPrometheusQueryLanguage:
            query: compute_googleapis_com:instance_cpu_usage_time > 0
            duration: 60s
            evaluationInterval: 60s
            alertRule: AlwaysOn
            ruleGroup: a test
      alertStrategy:
        autoClose: 1800s

The conditionPrometheusQueryLanguage block accepts a PromQL query that returns time-series data. The evaluationInterval controls how often the query runs, and duration specifies how long the condition must be true before alerting. The alertRule and ruleGroup properties organize alerts within Prometheus-style recording and alerting rules. The alertStrategy block with autoClose automatically resolves incidents after 1800 seconds (30 minutes) of inactivity.

Alert on log query results with SQL

Log-based alerts use SQL queries against Cloud Logging data to detect patterns in application logs, audit trails, or security events.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const alertPolicy = new gcp.monitoring.AlertPolicy("alert_policy", {
    displayName: "My Alert Policy",
    combiner: "OR",
    conditions: [{
        displayName: "minutes row count",
        conditionSql: {
            query: "SELECT severity, resource FROM my_project.global._Default._AllLogs WHERE severity IS NOT NULL",
            minutes: {
                periodicity: 600,
            },
            rowCountTest: {
                comparison: "COMPARISON_GT",
                threshold: 0,
            },
        },
    }],
});
import pulumi
import pulumi_gcp as gcp

alert_policy = gcp.monitoring.AlertPolicy("alert_policy",
    display_name="My Alert Policy",
    combiner="OR",
    conditions=[{
        "display_name": "minutes row count",
        "condition_sql": {
            "query": "SELECT severity, resource FROM my_project.global._Default._AllLogs WHERE severity IS NOT NULL",
            "minutes": {
                "periodicity": 600,
            },
            "row_count_test": {
                "comparison": "COMPARISON_GT",
                "threshold": 0,
            },
        },
    }])
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/monitoring"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := monitoring.NewAlertPolicy(ctx, "alert_policy", &monitoring.AlertPolicyArgs{
			DisplayName: pulumi.String("My Alert Policy"),
			Combiner:    pulumi.String("OR"),
			Conditions: monitoring.AlertPolicyConditionArray{
				&monitoring.AlertPolicyConditionArgs{
					DisplayName: pulumi.String("minutes row count"),
					ConditionSql: &monitoring.AlertPolicyConditionConditionSqlArgs{
						Query: pulumi.String("SELECT severity, resource FROM my_project.global._Default._AllLogs WHERE severity IS NOT NULL"),
						Minutes: &monitoring.AlertPolicyConditionConditionSqlMinutesArgs{
							Periodicity: pulumi.Int(600),
						},
						RowCountTest: &monitoring.AlertPolicyConditionConditionSqlRowCountTestArgs{
							Comparison: pulumi.String("COMPARISON_GT"),
							Threshold:  pulumi.Int(0),
						},
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var alertPolicy = new Gcp.Monitoring.AlertPolicy("alert_policy", new()
    {
        DisplayName = "My Alert Policy",
        Combiner = "OR",
        Conditions = new[]
        {
            new Gcp.Monitoring.Inputs.AlertPolicyConditionArgs
            {
                DisplayName = "minutes row count",
                ConditionSql = new Gcp.Monitoring.Inputs.AlertPolicyConditionConditionSqlArgs
                {
                    Query = "SELECT severity, resource FROM my_project.global._Default._AllLogs WHERE severity IS NOT NULL",
                    Minutes = new Gcp.Monitoring.Inputs.AlertPolicyConditionConditionSqlMinutesArgs
                    {
                        Periodicity = 600,
                    },
                    RowCountTest = new Gcp.Monitoring.Inputs.AlertPolicyConditionConditionSqlRowCountTestArgs
                    {
                        Comparison = "COMPARISON_GT",
                        Threshold = 0,
                    },
                },
            },
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.monitoring.AlertPolicy;
import com.pulumi.gcp.monitoring.AlertPolicyArgs;
import com.pulumi.gcp.monitoring.inputs.AlertPolicyConditionArgs;
import com.pulumi.gcp.monitoring.inputs.AlertPolicyConditionConditionSqlArgs;
import com.pulumi.gcp.monitoring.inputs.AlertPolicyConditionConditionSqlMinutesArgs;
import com.pulumi.gcp.monitoring.inputs.AlertPolicyConditionConditionSqlRowCountTestArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var alertPolicy = new AlertPolicy("alertPolicy", AlertPolicyArgs.builder()
            .displayName("My Alert Policy")
            .combiner("OR")
            .conditions(AlertPolicyConditionArgs.builder()
                .displayName("minutes row count")
                .conditionSql(AlertPolicyConditionConditionSqlArgs.builder()
                    .query("SELECT severity, resource FROM my_project.global._Default._AllLogs WHERE severity IS NOT NULL")
                    .minutes(AlertPolicyConditionConditionSqlMinutesArgs.builder()
                        .periodicity(600)
                        .build())
                    .rowCountTest(AlertPolicyConditionConditionSqlRowCountTestArgs.builder()
                        .comparison("COMPARISON_GT")
                        .threshold(0)
                        .build())
                    .build())
                .build())
            .build());

    }
}
resources:
  alertPolicy:
    type: gcp:monitoring:AlertPolicy
    name: alert_policy
    properties:
      displayName: My Alert Policy
      combiner: OR
      conditions:
        - displayName: minutes row count
          conditionSql:
            query: SELECT severity, resource FROM my_project.global._Default._AllLogs WHERE severity IS NOT NULL
            minutes:
              periodicity: 600
            rowCountTest:
              comparison: COMPARISON_GT
              threshold: '0'

The conditionSql block runs a SQL query against Cloud Logging. The minutes block with periodicity sets the query evaluation interval (600 seconds or 10 minutes). The rowCountTest block defines the alert condition: here, it triggers when the query returns more than zero rows. This pattern is useful for detecting specific log entries, such as errors with particular severity levels.

Beyond these examples

These snippets focus on specific alert policy features: threshold and forecast-based conditions, PromQL and SQL query conditions, and missing data evaluation policies. They’re intentionally minimal rather than full monitoring solutions.

The examples may reference pre-existing infrastructure such as GCE instances or other monitored resources, Cloud Logging data for SQL-based alerts, and notification channels (referenced but not created). They focus on configuring alert conditions rather than provisioning the complete monitoring stack.

To keep things focused, common alert policy patterns are omitted, including:

  • Notification channel configuration (notificationChannels)
  • Alert severity levels (severity)
  • Documentation and runbook links (documentation)
  • Multi-condition logic with AND/OR combiners

These omissions are intentional: the goal is to illustrate how each alert condition type is wired, not provide drop-in monitoring modules. See the Alert Policy resource reference for all available configuration options.

Let's configure GCP Monitoring Alert Policies

Get started with Pulumi Cloud, then follow our quick setup guide to deploy this infrastructure.

Try Pulumi Cloud for FREE

Frequently Asked Questions

Policy Configuration & Constraints
What happens if I change the project after creating an alert policy?
The project field is immutable, so changing it requires replacing the entire resource.
How many conditions can I add to a single alert policy?
Alert policies support between 1 and 6 conditions. If you need more complex monitoring, split it across multiple policies.
How does the combiner field work?
The combiner determines how conditions are evaluated: AND requires all conditions to be true, OR requires any condition to be true, and AND_WITH_MATCHING_RESOURCE combines conditions for the same resource.
Condition Types & Monitoring
What types of conditions can I use in an alert policy?

You can use three condition types:

  1. Threshold conditions (conditionThreshold) - Metric-based alerts with filters and aggregations
  2. PromQL conditions (conditionPrometheusQueryLanguage) - Prometheus-style queries
  3. SQL conditions (conditionSql) - Log-based alerts using SQL queries
How do I handle missing metric data in threshold conditions?
Set evaluationMissingData in your threshold condition. For example, EVALUATION_MISSING_DATA_INACTIVE treats missing data as inactive rather than triggering alerts.
Can I create alerts based on forecasted metrics?
Yes, use forecastOptions with a forecastHorizon (like 3600s) in threshold conditions to alert on predicted future values.
How do I create log-based alerts using SQL?
Use conditionSql with a query field for your SQL statement, minutes for periodicity, and rowCountTest to define the threshold comparison.
Notifications & Severity
What severity levels are available for alert policies?
You can set severity to CRITICAL, ERROR, or WARNING. The severity level appears on incident details and in notifications.
How do I control when alerts automatically close?
Use alertStrategy with an autoClose duration (like 1800s) to automatically close incidents after a specified time.
Labels & Metadata Limits
What are the limits on display names and user labels?
Display names are limited to 512 Unicode characters. User labels can have up to 64 entries, with each key and value limited to 63 Unicode characters or 128 bytes. Labels can only contain lowercase letters, numerals, underscores, and dashes, and keys must start with a letter.

Using a different cloud?

Explore monitoring guides for other cloud providers: