Configure GCP Monitoring Alert Policies

The gcp:monitoring/alertPolicy:AlertPolicy resource, part of the Pulumi GCP provider, defines Cloud Monitoring alert policies that evaluate metrics or logs and trigger incidents when conditions are met. This guide focuses on three capabilities: threshold-based metric monitoring, PromQL and SQL query conditions, and missing data and forecast handling.

Alert policies evaluate metrics from existing GCP resources and can send notifications to pre-configured notification channels. The examples are intentionally small. Combine them with your own notification channels, severity levels, and documentation.

Monitor GCE disk writes with threshold conditions

Most monitoring deployments start with threshold-based alerts that track metrics against fixed values.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const alertPolicy = new gcp.monitoring.AlertPolicy("alert_policy", {
    displayName: "My Alert Policy",
    combiner: "OR",
    conditions: [{
        displayName: "test condition",
        conditionThreshold: {
            filter: "metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"",
            duration: "60s",
            comparison: "COMPARISON_GT",
            aggregations: [{
                alignmentPeriod: "60s",
                perSeriesAligner: "ALIGN_RATE",
            }],
        },
    }],
    userLabels: {
        foo: "bar",
    },
});
import pulumi
import pulumi_gcp as gcp

alert_policy = gcp.monitoring.AlertPolicy("alert_policy",
    display_name="My Alert Policy",
    combiner="OR",
    conditions=[{
        "display_name": "test condition",
        "condition_threshold": {
            "filter": "metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"",
            "duration": "60s",
            "comparison": "COMPARISON_GT",
            "aggregations": [{
                "alignment_period": "60s",
                "per_series_aligner": "ALIGN_RATE",
            }],
        },
    }],
    user_labels={
        "foo": "bar",
    })
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/monitoring"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := monitoring.NewAlertPolicy(ctx, "alert_policy", &monitoring.AlertPolicyArgs{
			DisplayName: pulumi.String("My Alert Policy"),
			Combiner:    pulumi.String("OR"),
			Conditions: monitoring.AlertPolicyConditionArray{
				&monitoring.AlertPolicyConditionArgs{
					DisplayName: pulumi.String("test condition"),
					ConditionThreshold: &monitoring.AlertPolicyConditionConditionThresholdArgs{
						Filter:     pulumi.String("metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\""),
						Duration:   pulumi.String("60s"),
						Comparison: pulumi.String("COMPARISON_GT"),
						Aggregations: monitoring.AlertPolicyConditionConditionThresholdAggregationArray{
							&monitoring.AlertPolicyConditionConditionThresholdAggregationArgs{
								AlignmentPeriod:  pulumi.String("60s"),
								PerSeriesAligner: pulumi.String("ALIGN_RATE"),
							},
						},
					},
				},
			},
			UserLabels: pulumi.StringMap{
				"foo": pulumi.String("bar"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var alertPolicy = new Gcp.Monitoring.AlertPolicy("alert_policy", new()
    {
        DisplayName = "My Alert Policy",
        Combiner = "OR",
        Conditions = new[]
        {
            new Gcp.Monitoring.Inputs.AlertPolicyConditionArgs
            {
                DisplayName = "test condition",
                ConditionThreshold = new Gcp.Monitoring.Inputs.AlertPolicyConditionConditionThresholdArgs
                {
                    Filter = "metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"",
                    Duration = "60s",
                    Comparison = "COMPARISON_GT",
                    Aggregations = new[]
                    {
                        new Gcp.Monitoring.Inputs.AlertPolicyConditionConditionThresholdAggregationArgs
                        {
                            AlignmentPeriod = "60s",
                            PerSeriesAligner = "ALIGN_RATE",
                        },
                    },
                },
            },
        },
        UserLabels = 
        {
            { "foo", "bar" },
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.monitoring.AlertPolicy;
import com.pulumi.gcp.monitoring.AlertPolicyArgs;
import com.pulumi.gcp.monitoring.inputs.AlertPolicyConditionArgs;
import com.pulumi.gcp.monitoring.inputs.AlertPolicyConditionConditionThresholdArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var alertPolicy = new AlertPolicy("alertPolicy", AlertPolicyArgs.builder()
            .displayName("My Alert Policy")
            .combiner("OR")
            .conditions(AlertPolicyConditionArgs.builder()
                .displayName("test condition")
                .conditionThreshold(AlertPolicyConditionConditionThresholdArgs.builder()
                    .filter("metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"")
                    .duration("60s")
                    .comparison("COMPARISON_GT")
                    .aggregations(AlertPolicyConditionConditionThresholdAggregationArgs.builder()
                        .alignmentPeriod("60s")
                        .perSeriesAligner("ALIGN_RATE")
                        .build())
                    .build())
                .build())
            .userLabels(Map.of("foo", "bar"))
            .build());

    }
}
resources:
  alertPolicy:
    type: gcp:monitoring:AlertPolicy
    name: alert_policy
    properties:
      displayName: My Alert Policy
      combiner: OR
      conditions:
        - displayName: test condition
          conditionThreshold:
            filter: metric.type="compute.googleapis.com/instance/disk/write_bytes_count" AND resource.type="gce_instance"
            duration: 60s
            comparison: COMPARISON_GT
            aggregations:
              - alignmentPeriod: 60s
                perSeriesAligner: ALIGN_RATE
      userLabels:
        foo: bar

When the metric breaches the threshold for the specified duration, Cloud Monitoring creates an incident. The filter property selects which time series to monitor using MQL syntax. The aggregations block defines how to align and reduce data points; here, ALIGN_RATE converts cumulative counters to per-second rates. The combiner property (OR in this case) determines how multiple conditions combine when you add more than one.

Handle missing data with evaluation policies

Metrics don’t always arrive continuously due to network issues, service restarts, or configuration changes.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const alertPolicy = new gcp.monitoring.AlertPolicy("alert_policy", {
    displayName: "My Alert Policy",
    combiner: "OR",
    conditions: [{
        displayName: "test condition",
        conditionThreshold: {
            filter: "metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"",
            duration: "60s",
            comparison: "COMPARISON_GT",
            aggregations: [{
                alignmentPeriod: "60s",
                perSeriesAligner: "ALIGN_RATE",
            }],
            evaluationMissingData: "EVALUATION_MISSING_DATA_INACTIVE",
        },
    }],
    userLabels: {
        foo: "bar",
    },
});
import pulumi
import pulumi_gcp as gcp

alert_policy = gcp.monitoring.AlertPolicy("alert_policy",
    display_name="My Alert Policy",
    combiner="OR",
    conditions=[{
        "display_name": "test condition",
        "condition_threshold": {
            "filter": "metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"",
            "duration": "60s",
            "comparison": "COMPARISON_GT",
            "aggregations": [{
                "alignment_period": "60s",
                "per_series_aligner": "ALIGN_RATE",
            }],
            "evaluation_missing_data": "EVALUATION_MISSING_DATA_INACTIVE",
        },
    }],
    user_labels={
        "foo": "bar",
    })
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/monitoring"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := monitoring.NewAlertPolicy(ctx, "alert_policy", &monitoring.AlertPolicyArgs{
			DisplayName: pulumi.String("My Alert Policy"),
			Combiner:    pulumi.String("OR"),
			Conditions: monitoring.AlertPolicyConditionArray{
				&monitoring.AlertPolicyConditionArgs{
					DisplayName: pulumi.String("test condition"),
					ConditionThreshold: &monitoring.AlertPolicyConditionConditionThresholdArgs{
						Filter:     pulumi.String("metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\""),
						Duration:   pulumi.String("60s"),
						Comparison: pulumi.String("COMPARISON_GT"),
						Aggregations: monitoring.AlertPolicyConditionConditionThresholdAggregationArray{
							&monitoring.AlertPolicyConditionConditionThresholdAggregationArgs{
								AlignmentPeriod:  pulumi.String("60s"),
								PerSeriesAligner: pulumi.String("ALIGN_RATE"),
							},
						},
						EvaluationMissingData: pulumi.String("EVALUATION_MISSING_DATA_INACTIVE"),
					},
				},
			},
			UserLabels: pulumi.StringMap{
				"foo": pulumi.String("bar"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var alertPolicy = new Gcp.Monitoring.AlertPolicy("alert_policy", new()
    {
        DisplayName = "My Alert Policy",
        Combiner = "OR",
        Conditions = new[]
        {
            new Gcp.Monitoring.Inputs.AlertPolicyConditionArgs
            {
                DisplayName = "test condition",
                ConditionThreshold = new Gcp.Monitoring.Inputs.AlertPolicyConditionConditionThresholdArgs
                {
                    Filter = "metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"",
                    Duration = "60s",
                    Comparison = "COMPARISON_GT",
                    Aggregations = new[]
                    {
                        new Gcp.Monitoring.Inputs.AlertPolicyConditionConditionThresholdAggregationArgs
                        {
                            AlignmentPeriod = "60s",
                            PerSeriesAligner = "ALIGN_RATE",
                        },
                    },
                    EvaluationMissingData = "EVALUATION_MISSING_DATA_INACTIVE",
                },
            },
        },
        UserLabels = 
        {
            { "foo", "bar" },
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.monitoring.AlertPolicy;
import com.pulumi.gcp.monitoring.AlertPolicyArgs;
import com.pulumi.gcp.monitoring.inputs.AlertPolicyConditionArgs;
import com.pulumi.gcp.monitoring.inputs.AlertPolicyConditionConditionThresholdArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var alertPolicy = new AlertPolicy("alertPolicy", AlertPolicyArgs.builder()
            .displayName("My Alert Policy")
            .combiner("OR")
            .conditions(AlertPolicyConditionArgs.builder()
                .displayName("test condition")
                .conditionThreshold(AlertPolicyConditionConditionThresholdArgs.builder()
                    .filter("metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"")
                    .duration("60s")
                    .comparison("COMPARISON_GT")
                    .aggregations(AlertPolicyConditionConditionThresholdAggregationArgs.builder()
                        .alignmentPeriod("60s")
                        .perSeriesAligner("ALIGN_RATE")
                        .build())
                    .evaluationMissingData("EVALUATION_MISSING_DATA_INACTIVE")
                    .build())
                .build())
            .userLabels(Map.of("foo", "bar"))
            .build());

    }
}
resources:
  alertPolicy:
    type: gcp:monitoring:AlertPolicy
    name: alert_policy
    properties:
      displayName: My Alert Policy
      combiner: OR
      conditions:
        - displayName: test condition
          conditionThreshold:
            filter: metric.type="compute.googleapis.com/instance/disk/write_bytes_count" AND resource.type="gce_instance"
            duration: 60s
            comparison: COMPARISON_GT
            aggregations:
              - alignmentPeriod: 60s
                perSeriesAligner: ALIGN_RATE
            evaluationMissingData: EVALUATION_MISSING_DATA_INACTIVE
      userLabels:
        foo: bar

The evaluationMissingData property controls alert behavior during data gaps. EVALUATION_MISSING_DATA_INACTIVE treats missing data as not violating the condition, preventing false alerts during outages. Other options include treating missing data as breaching or maintaining the previous state.

Alert on predicted future threshold breaches

Some capacity planning scenarios benefit from alerting before a threshold is actually breached.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const alertPolicy = new gcp.monitoring.AlertPolicy("alert_policy", {
    displayName: "My Alert Policy",
    combiner: "OR",
    conditions: [{
        displayName: "test condition",
        conditionThreshold: {
            filter: "metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"",
            duration: "60s",
            forecastOptions: {
                forecastHorizon: "3600s",
            },
            comparison: "COMPARISON_GT",
            aggregations: [{
                alignmentPeriod: "60s",
                perSeriesAligner: "ALIGN_RATE",
            }],
        },
    }],
    userLabels: {
        foo: "bar",
    },
});
import pulumi
import pulumi_gcp as gcp

alert_policy = gcp.monitoring.AlertPolicy("alert_policy",
    display_name="My Alert Policy",
    combiner="OR",
    conditions=[{
        "display_name": "test condition",
        "condition_threshold": {
            "filter": "metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"",
            "duration": "60s",
            "forecast_options": {
                "forecast_horizon": "3600s",
            },
            "comparison": "COMPARISON_GT",
            "aggregations": [{
                "alignment_period": "60s",
                "per_series_aligner": "ALIGN_RATE",
            }],
        },
    }],
    user_labels={
        "foo": "bar",
    })
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/monitoring"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := monitoring.NewAlertPolicy(ctx, "alert_policy", &monitoring.AlertPolicyArgs{
			DisplayName: pulumi.String("My Alert Policy"),
			Combiner:    pulumi.String("OR"),
			Conditions: monitoring.AlertPolicyConditionArray{
				&monitoring.AlertPolicyConditionArgs{
					DisplayName: pulumi.String("test condition"),
					ConditionThreshold: &monitoring.AlertPolicyConditionConditionThresholdArgs{
						Filter:   pulumi.String("metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\""),
						Duration: pulumi.String("60s"),
						ForecastOptions: &monitoring.AlertPolicyConditionConditionThresholdForecastOptionsArgs{
							ForecastHorizon: pulumi.String("3600s"),
						},
						Comparison: pulumi.String("COMPARISON_GT"),
						Aggregations: monitoring.AlertPolicyConditionConditionThresholdAggregationArray{
							&monitoring.AlertPolicyConditionConditionThresholdAggregationArgs{
								AlignmentPeriod:  pulumi.String("60s"),
								PerSeriesAligner: pulumi.String("ALIGN_RATE"),
							},
						},
					},
				},
			},
			UserLabels: pulumi.StringMap{
				"foo": pulumi.String("bar"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var alertPolicy = new Gcp.Monitoring.AlertPolicy("alert_policy", new()
    {
        DisplayName = "My Alert Policy",
        Combiner = "OR",
        Conditions = new[]
        {
            new Gcp.Monitoring.Inputs.AlertPolicyConditionArgs
            {
                DisplayName = "test condition",
                ConditionThreshold = new Gcp.Monitoring.Inputs.AlertPolicyConditionConditionThresholdArgs
                {
                    Filter = "metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"",
                    Duration = "60s",
                    ForecastOptions = new Gcp.Monitoring.Inputs.AlertPolicyConditionConditionThresholdForecastOptionsArgs
                    {
                        ForecastHorizon = "3600s",
                    },
                    Comparison = "COMPARISON_GT",
                    Aggregations = new[]
                    {
                        new Gcp.Monitoring.Inputs.AlertPolicyConditionConditionThresholdAggregationArgs
                        {
                            AlignmentPeriod = "60s",
                            PerSeriesAligner = "ALIGN_RATE",
                        },
                    },
                },
            },
        },
        UserLabels = 
        {
            { "foo", "bar" },
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.monitoring.AlertPolicy;
import com.pulumi.gcp.monitoring.AlertPolicyArgs;
import com.pulumi.gcp.monitoring.inputs.AlertPolicyConditionArgs;
import com.pulumi.gcp.monitoring.inputs.AlertPolicyConditionConditionThresholdArgs;
import com.pulumi.gcp.monitoring.inputs.AlertPolicyConditionConditionThresholdForecastOptionsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var alertPolicy = new AlertPolicy("alertPolicy", AlertPolicyArgs.builder()
            .displayName("My Alert Policy")
            .combiner("OR")
            .conditions(AlertPolicyConditionArgs.builder()
                .displayName("test condition")
                .conditionThreshold(AlertPolicyConditionConditionThresholdArgs.builder()
                    .filter("metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"")
                    .duration("60s")
                    .forecastOptions(AlertPolicyConditionConditionThresholdForecastOptionsArgs.builder()
                        .forecastHorizon("3600s")
                        .build())
                    .comparison("COMPARISON_GT")
                    .aggregations(AlertPolicyConditionConditionThresholdAggregationArgs.builder()
                        .alignmentPeriod("60s")
                        .perSeriesAligner("ALIGN_RATE")
                        .build())
                    .build())
                .build())
            .userLabels(Map.of("foo", "bar"))
            .build());

    }
}
resources:
  alertPolicy:
    type: gcp:monitoring:AlertPolicy
    name: alert_policy
    properties:
      displayName: My Alert Policy
      combiner: OR
      conditions:
        - displayName: test condition
          conditionThreshold:
            filter: metric.type="compute.googleapis.com/instance/disk/write_bytes_count" AND resource.type="gce_instance"
            duration: 60s
            forecastOptions:
              forecastHorizon: 3600s
            comparison: COMPARISON_GT
            aggregations:
              - alignmentPeriod: 60s
                perSeriesAligner: ALIGN_RATE
      userLabels:
        foo: bar

The forecastOptions block enables predictive alerting based on trend analysis. The forecastHorizon property specifies how far into the future to predict; here, 3600s (1 hour) means the alert fires if the metric is predicted to breach the threshold within the next hour.

Query metrics with PromQL expressions

Teams familiar with Prometheus can use PromQL to define alert conditions.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const alertPolicy = new gcp.monitoring.AlertPolicy("alert_policy", {
    displayName: "My Alert Policy",
    combiner: "OR",
    conditions: [{
        displayName: "test condition",
        conditionPrometheusQueryLanguage: {
            query: "compute_googleapis_com:instance_cpu_usage_time > 0",
            duration: "60s",
            evaluationInterval: "60s",
            alertRule: "AlwaysOn",
            ruleGroup: "a test",
        },
    }],
    alertStrategy: {
        autoClose: "1800s",
    },
});
import pulumi
import pulumi_gcp as gcp

alert_policy = gcp.monitoring.AlertPolicy("alert_policy",
    display_name="My Alert Policy",
    combiner="OR",
    conditions=[{
        "display_name": "test condition",
        "condition_prometheus_query_language": {
            "query": "compute_googleapis_com:instance_cpu_usage_time > 0",
            "duration": "60s",
            "evaluation_interval": "60s",
            "alert_rule": "AlwaysOn",
            "rule_group": "a test",
        },
    }],
    alert_strategy={
        "auto_close": "1800s",
    })
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/monitoring"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := monitoring.NewAlertPolicy(ctx, "alert_policy", &monitoring.AlertPolicyArgs{
			DisplayName: pulumi.String("My Alert Policy"),
			Combiner:    pulumi.String("OR"),
			Conditions: monitoring.AlertPolicyConditionArray{
				&monitoring.AlertPolicyConditionArgs{
					DisplayName: pulumi.String("test condition"),
					ConditionPrometheusQueryLanguage: &monitoring.AlertPolicyConditionConditionPrometheusQueryLanguageArgs{
						Query:              pulumi.String("compute_googleapis_com:instance_cpu_usage_time > 0"),
						Duration:           pulumi.String("60s"),
						EvaluationInterval: pulumi.String("60s"),
						AlertRule:          pulumi.String("AlwaysOn"),
						RuleGroup:          pulumi.String("a test"),
					},
				},
			},
			AlertStrategy: &monitoring.AlertPolicyAlertStrategyArgs{
				AutoClose: pulumi.String("1800s"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var alertPolicy = new Gcp.Monitoring.AlertPolicy("alert_policy", new()
    {
        DisplayName = "My Alert Policy",
        Combiner = "OR",
        Conditions = new[]
        {
            new Gcp.Monitoring.Inputs.AlertPolicyConditionArgs
            {
                DisplayName = "test condition",
                ConditionPrometheusQueryLanguage = new Gcp.Monitoring.Inputs.AlertPolicyConditionConditionPrometheusQueryLanguageArgs
                {
                    Query = "compute_googleapis_com:instance_cpu_usage_time > 0",
                    Duration = "60s",
                    EvaluationInterval = "60s",
                    AlertRule = "AlwaysOn",
                    RuleGroup = "a test",
                },
            },
        },
        AlertStrategy = new Gcp.Monitoring.Inputs.AlertPolicyAlertStrategyArgs
        {
            AutoClose = "1800s",
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.monitoring.AlertPolicy;
import com.pulumi.gcp.monitoring.AlertPolicyArgs;
import com.pulumi.gcp.monitoring.inputs.AlertPolicyConditionArgs;
import com.pulumi.gcp.monitoring.inputs.AlertPolicyConditionConditionPrometheusQueryLanguageArgs;
import com.pulumi.gcp.monitoring.inputs.AlertPolicyAlertStrategyArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var alertPolicy = new AlertPolicy("alertPolicy", AlertPolicyArgs.builder()
            .displayName("My Alert Policy")
            .combiner("OR")
            .conditions(AlertPolicyConditionArgs.builder()
                .displayName("test condition")
                .conditionPrometheusQueryLanguage(AlertPolicyConditionConditionPrometheusQueryLanguageArgs.builder()
                    .query("compute_googleapis_com:instance_cpu_usage_time > 0")
                    .duration("60s")
                    .evaluationInterval("60s")
                    .alertRule("AlwaysOn")
                    .ruleGroup("a test")
                    .build())
                .build())
            .alertStrategy(AlertPolicyAlertStrategyArgs.builder()
                .autoClose("1800s")
                .build())
            .build());

    }
}
resources:
  alertPolicy:
    type: gcp:monitoring:AlertPolicy
    name: alert_policy
    properties:
      displayName: My Alert Policy
      combiner: OR
      conditions:
        - displayName: test condition
          conditionPrometheusQueryLanguage:
            query: compute_googleapis_com:instance_cpu_usage_time > 0
            duration: 60s
            evaluationInterval: 60s
            alertRule: AlwaysOn
            ruleGroup: a test
      alertStrategy:
        autoClose: 1800s

The conditionPrometheusQueryLanguage block accepts PromQL queries instead of MQL filters. The query property defines the expression; duration and evaluationInterval control how long the condition must be true and how often to evaluate it. The alertRule and ruleGroup properties organize alerts for Prometheus-style workflows. The alertStrategy block with autoClose automatically resolves incidents after 1800s (30 minutes) of non-violation.

Alert on log-based metrics with SQL queries

Log-based alerting uses SQL queries against Cloud Logging data to detect patterns in application logs or security events.

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const alertPolicy = new gcp.monitoring.AlertPolicy("alert_policy", {
    displayName: "My Alert Policy",
    combiner: "OR",
    conditions: [{
        displayName: "minutes row count",
        conditionSql: {
            query: "SELECT severity, resource FROM my_project.global._Default._AllLogs WHERE severity IS NOT NULL",
            minutes: {
                periodicity: 600,
            },
            rowCountTest: {
                comparison: "COMPARISON_GT",
                threshold: 0,
            },
        },
    }],
});
import pulumi
import pulumi_gcp as gcp

alert_policy = gcp.monitoring.AlertPolicy("alert_policy",
    display_name="My Alert Policy",
    combiner="OR",
    conditions=[{
        "display_name": "minutes row count",
        "condition_sql": {
            "query": "SELECT severity, resource FROM my_project.global._Default._AllLogs WHERE severity IS NOT NULL",
            "minutes": {
                "periodicity": 600,
            },
            "row_count_test": {
                "comparison": "COMPARISON_GT",
                "threshold": 0,
            },
        },
    }])
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/monitoring"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := monitoring.NewAlertPolicy(ctx, "alert_policy", &monitoring.AlertPolicyArgs{
			DisplayName: pulumi.String("My Alert Policy"),
			Combiner:    pulumi.String("OR"),
			Conditions: monitoring.AlertPolicyConditionArray{
				&monitoring.AlertPolicyConditionArgs{
					DisplayName: pulumi.String("minutes row count"),
					ConditionSql: &monitoring.AlertPolicyConditionConditionSqlArgs{
						Query: pulumi.String("SELECT severity, resource FROM my_project.global._Default._AllLogs WHERE severity IS NOT NULL"),
						Minutes: &monitoring.AlertPolicyConditionConditionSqlMinutesArgs{
							Periodicity: pulumi.Int(600),
						},
						RowCountTest: &monitoring.AlertPolicyConditionConditionSqlRowCountTestArgs{
							Comparison: pulumi.String("COMPARISON_GT"),
							Threshold:  pulumi.Int(0),
						},
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var alertPolicy = new Gcp.Monitoring.AlertPolicy("alert_policy", new()
    {
        DisplayName = "My Alert Policy",
        Combiner = "OR",
        Conditions = new[]
        {
            new Gcp.Monitoring.Inputs.AlertPolicyConditionArgs
            {
                DisplayName = "minutes row count",
                ConditionSql = new Gcp.Monitoring.Inputs.AlertPolicyConditionConditionSqlArgs
                {
                    Query = "SELECT severity, resource FROM my_project.global._Default._AllLogs WHERE severity IS NOT NULL",
                    Minutes = new Gcp.Monitoring.Inputs.AlertPolicyConditionConditionSqlMinutesArgs
                    {
                        Periodicity = 600,
                    },
                    RowCountTest = new Gcp.Monitoring.Inputs.AlertPolicyConditionConditionSqlRowCountTestArgs
                    {
                        Comparison = "COMPARISON_GT",
                        Threshold = 0,
                    },
                },
            },
        },
    });

});
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.monitoring.AlertPolicy;
import com.pulumi.gcp.monitoring.AlertPolicyArgs;
import com.pulumi.gcp.monitoring.inputs.AlertPolicyConditionArgs;
import com.pulumi.gcp.monitoring.inputs.AlertPolicyConditionConditionSqlArgs;
import com.pulumi.gcp.monitoring.inputs.AlertPolicyConditionConditionSqlMinutesArgs;
import com.pulumi.gcp.monitoring.inputs.AlertPolicyConditionConditionSqlRowCountTestArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var alertPolicy = new AlertPolicy("alertPolicy", AlertPolicyArgs.builder()
            .displayName("My Alert Policy")
            .combiner("OR")
            .conditions(AlertPolicyConditionArgs.builder()
                .displayName("minutes row count")
                .conditionSql(AlertPolicyConditionConditionSqlArgs.builder()
                    .query("SELECT severity, resource FROM my_project.global._Default._AllLogs WHERE severity IS NOT NULL")
                    .minutes(AlertPolicyConditionConditionSqlMinutesArgs.builder()
                        .periodicity(600)
                        .build())
                    .rowCountTest(AlertPolicyConditionConditionSqlRowCountTestArgs.builder()
                        .comparison("COMPARISON_GT")
                        .threshold(0)
                        .build())
                    .build())
                .build())
            .build());

    }
}
resources:
  alertPolicy:
    type: gcp:monitoring:AlertPolicy
    name: alert_policy
    properties:
      displayName: My Alert Policy
      combiner: OR
      conditions:
        - displayName: minutes row count
          conditionSql:
            query: SELECT severity, resource FROM my_project.global._Default._AllLogs WHERE severity IS NOT NULL
            minutes:
              periodicity: 600
            rowCountTest:
              comparison: COMPARISON_GT
              threshold: '0'

The conditionSql block queries Cloud Logging using SQL syntax. The query property selects log entries; minutes.periodicity defines the evaluation window (600 seconds). The rowCountTest block triggers when the query returns more than the specified threshold of matching rows.

Beyond these examples

These snippets focus on specific alert policy features: threshold and forecast-based conditions, PromQL and SQL query conditions, and missing data handling. They’re intentionally minimal rather than full monitoring solutions.

The examples may reference pre-existing infrastructure such as GCE instances or other metric sources, Cloud Logging data for SQL conditions, and notification channels referenced but not created. They focus on configuring alert conditions rather than provisioning the full monitoring stack.

To keep things focused, common alert policy patterns are omitted, including:

  • Notification channel configuration (notificationChannels)
  • Alert severity levels (severity)
  • Documentation and runbook links (documentation)
  • Multi-condition logic with AND/AND_WITH_MATCHING_RESOURCE combiners
  • Alert auto-close timing (alertStrategy.autoClose, except in PromQL example)

These omissions are intentional: the goal is to illustrate how each alert condition type is wired, not provide drop-in monitoring modules. See the Cloud Monitoring AlertPolicy resource reference for all available configuration options.

Let's configure GCP Monitoring Alert Policies

Get started with Pulumi Cloud, then follow our quick setup guide to deploy this infrastructure.

Try Pulumi Cloud for FREE

Frequently Asked Questions

Conditions & Evaluation
How many conditions can I add to an alert policy?
Alert policies support 1 to 6 conditions, combined using the combiner field logic.
What types of conditions are available?

You can use three condition types:

  1. Threshold conditions - conditionThreshold for metric-based alerts
  2. PromQL conditions - conditionPrometheusQueryLanguage for Prometheus queries
  3. SQL conditions - conditionSql for log-based queries
How do I handle missing metric data in threshold conditions?
Set evaluationMissingData in your conditionThreshold (e.g., EVALUATION_MISSING_DATA_INACTIVE to treat missing data as inactive).
Can I create forecast-based alerts?
Yes, configure forecastOptions with forecastHorizon in your conditionThreshold to alert on predicted future values (e.g., 3600s for 1-hour forecasts).
Configuration & Limits
How does the combiner field work?
The combiner determines how multiple conditions are evaluated: AND (all must be true), OR (any can be true), or AND_WITH_MATCHING_RESOURCE (conditions on same resource must be true).
What are the displayName requirements?
The displayName is limited to 512 Unicode characters and should be unique within your project to avoid confusion in dashboards and notifications.
What happens if I change the project field?
The project field is immutable, so changing it will force Pulumi to replace the entire alert policy resource.
Notifications & Severity
How do I reference notification channels?
Use the format projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID] in the notificationChannels array.
What severity levels are available?
You can set severity to CRITICAL, ERROR, or WARNING. The severity appears on incident details and in notifications.
Can I automatically close incidents after a certain time?
Yes, configure alertStrategy with an autoClose duration (e.g., 1800s for 30 minutes).
Policy Management
Are alert policies enabled by default?
Yes, the enabled field defaults to true. Set it to false to create a disabled policy.

Using a different cloud?

Explore monitoring guides for other cloud providers: