published on Tuesday, Mar 3, 2026 by pulumiverse
published on Tuesday, Mar 3, 2026 by pulumiverse
Manages Prometheus Rules configurations through Grafana Asserts API. Allows creation and management of custom Prometheus recording and alerting rules.
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as grafana from "@pulumiverse/grafana";
// Basic recording rule for latency metrics
const latencyMetrics = new grafana.assert.PromRuleFile("latency_metrics", {
name: "custom-latency-metrics",
active: true,
groups: [{
name: "latency_recording_rules",
interval: "30s",
rules: [
{
record: "custom:latency:p95",
expr: "histogram_quantile(0.95, rate(http_request_duration_seconds_bucket[5m]))",
labels: {
source: "custom_instrumentation",
severity: "info",
},
},
{
record: "custom:latency:p99",
expr: "histogram_quantile(0.99, rate(http_request_duration_seconds_bucket[5m]))",
labels: {
source: "custom_instrumentation",
severity: "info",
},
},
],
}],
});
// Alert rules for high latency
const latencyAlerts = new grafana.assert.PromRuleFile("latency_alerts", {
name: "custom-latency-alerts",
active: true,
groups: [{
name: "latency_alerting",
interval: "30s",
rules: [
{
alert: "HighLatency",
expr: "histogram_quantile(0.99, rate(http_request_duration_seconds_bucket[5m])) > 0.5",
duration: "5m",
labels: {
severity: "warning",
category: "Latency",
},
annotations: {
summary: "High latency detected",
description: "P99 latency is above 500ms for 5 minutes",
},
},
{
alert: "VeryHighLatency",
expr: "histogram_quantile(0.99, rate(http_request_duration_seconds_bucket[5m])) > 1.0",
duration: "2m",
labels: {
severity: "critical",
category: "Latency",
},
annotations: {
summary: "Very high latency detected",
description: "P99 latency is above 1 second",
},
},
],
}],
});
// Comprehensive monitoring rules with multiple groups
const comprehensiveMonitoring = new grafana.assert.PromRuleFile("comprehensive_monitoring", {
name: "custom-comprehensive-monitoring",
active: true,
groups: [
{
name: "latency_monitoring",
interval: "30s",
rules: [
{
record: "custom:latency:p99",
expr: "histogram_quantile(0.99, rate(http_request_duration_seconds_bucket[5m]))",
labels: {
source: "custom",
},
},
{
alert: "HighLatency",
expr: "custom:latency:p99 > 0.5",
duration: "5m",
labels: {
severity: "warning",
},
annotations: {
summary: "High latency detected",
},
},
],
},
{
name: "error_monitoring",
interval: "1m",
rules: [
{
record: "custom:error:rate",
expr: "rate(http_requests_total{status=~\"5..\"}[5m])",
labels: {
source: "custom",
},
},
{
alert: "HighErrorRate",
expr: "custom:error:rate > 0.1",
duration: "10m",
labels: {
severity: "critical",
category: "Errors",
},
annotations: {
summary: "High error rate detected",
description: "Error rate is above 10%",
},
},
],
},
{
name: "throughput_monitoring",
interval: "1m",
rules: [
{
record: "custom:throughput:total",
expr: "sum(rate(http_requests_total[5m]))",
labels: {
source: "custom",
},
},
{
alert: "LowThroughput",
expr: "custom:throughput:total < 10",
duration: "5m",
labels: {
severity: "warning",
category: "Throughput",
},
annotations: {
summary: "Low throughput detected",
description: "Request throughput is below 10 requests/second",
},
},
],
},
],
});
// Rules with conditional enablement
const conditionalRules = new grafana.assert.PromRuleFile("conditional_rules", {
name: "custom-conditional-rules",
active: true,
groups: [{
name: "environment_specific_rules",
interval: "30s",
rules: [
{
alert: "TestAlert",
expr: "up == 0",
duration: "1m",
labels: {
severity: "info",
},
annotations: {
summary: "Test alert that is disabled in production",
},
disableInGroups: ["production"],
},
{
alert: "CriticalAlert",
expr: "up == 0",
duration: "30s",
labels: {
severity: "critical",
},
annotations: {
summary: "Critical alert that fires in all environments",
},
},
],
}],
});
// Inactive rules (for staging/testing)
const stagingRules = new grafana.assert.PromRuleFile("staging_rules", {
name: "custom-staging-rules",
active: false,
groups: [{
name: "staging_tests",
interval: "1m",
rules: [{
record: "staging:test:metric",
expr: "up",
labels: {
environment: "staging",
},
}],
}],
});
// SLO-based alerting
const sloAlerts = new grafana.assert.PromRuleFile("slo_alerts", {
name: "custom-slo-alerts",
active: true,
groups: [{
name: "slo_monitoring",
interval: "1m",
rules: [
{
record: "custom:slo:availability",
expr: "sum(rate(http_requests_total{status!~\"5..\"}[5m])) / sum(rate(http_requests_total[5m]))",
labels: {
slo_type: "availability",
},
},
{
alert: "SLOAvailabilityBreach",
expr: "custom:slo:availability < 0.995",
duration: "5m",
labels: {
severity: "critical",
category: "SLO",
},
annotations: {
summary: "SLO availability breach",
description: "Availability is below 99.5% SLO target",
runbook_url: "https://docs.example.com/runbooks/availability-breach",
},
},
],
}],
});
import pulumi
import pulumiverse_grafana as grafana
# Basic recording rule for latency metrics
latency_metrics = grafana.assert_.PromRuleFile("latency_metrics",
name="custom-latency-metrics",
active=True,
groups=[{
"name": "latency_recording_rules",
"interval": "30s",
"rules": [
{
"record": "custom:latency:p95",
"expr": "histogram_quantile(0.95, rate(http_request_duration_seconds_bucket[5m]))",
"labels": {
"source": "custom_instrumentation",
"severity": "info",
},
},
{
"record": "custom:latency:p99",
"expr": "histogram_quantile(0.99, rate(http_request_duration_seconds_bucket[5m]))",
"labels": {
"source": "custom_instrumentation",
"severity": "info",
},
},
],
}])
# Alert rules for high latency
latency_alerts = grafana.assert_.PromRuleFile("latency_alerts",
name="custom-latency-alerts",
active=True,
groups=[{
"name": "latency_alerting",
"interval": "30s",
"rules": [
{
"alert": "HighLatency",
"expr": "histogram_quantile(0.99, rate(http_request_duration_seconds_bucket[5m])) > 0.5",
"duration": "5m",
"labels": {
"severity": "warning",
"category": "Latency",
},
"annotations": {
"summary": "High latency detected",
"description": "P99 latency is above 500ms for 5 minutes",
},
},
{
"alert": "VeryHighLatency",
"expr": "histogram_quantile(0.99, rate(http_request_duration_seconds_bucket[5m])) > 1.0",
"duration": "2m",
"labels": {
"severity": "critical",
"category": "Latency",
},
"annotations": {
"summary": "Very high latency detected",
"description": "P99 latency is above 1 second",
},
},
],
}])
# Comprehensive monitoring rules with multiple groups
comprehensive_monitoring = grafana.assert_.PromRuleFile("comprehensive_monitoring",
name="custom-comprehensive-monitoring",
active=True,
groups=[
{
"name": "latency_monitoring",
"interval": "30s",
"rules": [
{
"record": "custom:latency:p99",
"expr": "histogram_quantile(0.99, rate(http_request_duration_seconds_bucket[5m]))",
"labels": {
"source": "custom",
},
},
{
"alert": "HighLatency",
"expr": "custom:latency:p99 > 0.5",
"duration": "5m",
"labels": {
"severity": "warning",
},
"annotations": {
"summary": "High latency detected",
},
},
],
},
{
"name": "error_monitoring",
"interval": "1m",
"rules": [
{
"record": "custom:error:rate",
"expr": "rate(http_requests_total{status=~\"5..\"}[5m])",
"labels": {
"source": "custom",
},
},
{
"alert": "HighErrorRate",
"expr": "custom:error:rate > 0.1",
"duration": "10m",
"labels": {
"severity": "critical",
"category": "Errors",
},
"annotations": {
"summary": "High error rate detected",
"description": "Error rate is above 10%",
},
},
],
},
{
"name": "throughput_monitoring",
"interval": "1m",
"rules": [
{
"record": "custom:throughput:total",
"expr": "sum(rate(http_requests_total[5m]))",
"labels": {
"source": "custom",
},
},
{
"alert": "LowThroughput",
"expr": "custom:throughput:total < 10",
"duration": "5m",
"labels": {
"severity": "warning",
"category": "Throughput",
},
"annotations": {
"summary": "Low throughput detected",
"description": "Request throughput is below 10 requests/second",
},
},
],
},
])
# Rules with conditional enablement
conditional_rules = grafana.assert_.PromRuleFile("conditional_rules",
name="custom-conditional-rules",
active=True,
groups=[{
"name": "environment_specific_rules",
"interval": "30s",
"rules": [
{
"alert": "TestAlert",
"expr": "up == 0",
"duration": "1m",
"labels": {
"severity": "info",
},
"annotations": {
"summary": "Test alert that is disabled in production",
},
"disable_in_groups": ["production"],
},
{
"alert": "CriticalAlert",
"expr": "up == 0",
"duration": "30s",
"labels": {
"severity": "critical",
},
"annotations": {
"summary": "Critical alert that fires in all environments",
},
},
],
}])
# Inactive rules (for staging/testing)
staging_rules = grafana.assert_.PromRuleFile("staging_rules",
name="custom-staging-rules",
active=False,
groups=[{
"name": "staging_tests",
"interval": "1m",
"rules": [{
"record": "staging:test:metric",
"expr": "up",
"labels": {
"environment": "staging",
},
}],
}])
# SLO-based alerting
slo_alerts = grafana.assert_.PromRuleFile("slo_alerts",
name="custom-slo-alerts",
active=True,
groups=[{
"name": "slo_monitoring",
"interval": "1m",
"rules": [
{
"record": "custom:slo:availability",
"expr": "sum(rate(http_requests_total{status!~\"5..\"}[5m])) / sum(rate(http_requests_total[5m]))",
"labels": {
"slo_type": "availability",
},
},
{
"alert": "SLOAvailabilityBreach",
"expr": "custom:slo:availability < 0.995",
"duration": "5m",
"labels": {
"severity": "critical",
"category": "SLO",
},
"annotations": {
"summary": "SLO availability breach",
"description": "Availability is below 99.5% SLO target",
"runbook_url": "https://docs.example.com/runbooks/availability-breach",
},
},
],
}])
package main
import (
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
"github.com/pulumiverse/pulumi-grafana/sdk/v2/go/grafana/assert"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
// Basic recording rule for latency metrics
_, err := assert.NewPromRuleFile(ctx, "latency_metrics", &assert.PromRuleFileArgs{
Name: pulumi.String("custom-latency-metrics"),
Active: pulumi.Bool(true),
Groups: assert.PromRuleFileGroupArray{
&assert.PromRuleFileGroupArgs{
Name: pulumi.String("latency_recording_rules"),
Interval: pulumi.String("30s"),
Rules: assert.PromRuleFileGroupRuleArray{
&assert.PromRuleFileGroupRuleArgs{
Record: pulumi.String("custom:latency:p95"),
Expr: pulumi.String("histogram_quantile(0.95, rate(http_request_duration_seconds_bucket[5m]))"),
Labels: pulumi.StringMap{
"source": pulumi.String("custom_instrumentation"),
"severity": pulumi.String("info"),
},
},
&assert.PromRuleFileGroupRuleArgs{
Record: pulumi.String("custom:latency:p99"),
Expr: pulumi.String("histogram_quantile(0.99, rate(http_request_duration_seconds_bucket[5m]))"),
Labels: pulumi.StringMap{
"source": pulumi.String("custom_instrumentation"),
"severity": pulumi.String("info"),
},
},
},
},
},
})
if err != nil {
return err
}
// Alert rules for high latency
_, err = assert.NewPromRuleFile(ctx, "latency_alerts", &assert.PromRuleFileArgs{
Name: pulumi.String("custom-latency-alerts"),
Active: pulumi.Bool(true),
Groups: assert.PromRuleFileGroupArray{
&assert.PromRuleFileGroupArgs{
Name: pulumi.String("latency_alerting"),
Interval: pulumi.String("30s"),
Rules: assert.PromRuleFileGroupRuleArray{
&assert.PromRuleFileGroupRuleArgs{
Alert: pulumi.String("HighLatency"),
Expr: pulumi.String("histogram_quantile(0.99, rate(http_request_duration_seconds_bucket[5m])) > 0.5"),
Duration: pulumi.String("5m"),
Labels: pulumi.StringMap{
"severity": pulumi.String("warning"),
"category": pulumi.String("Latency"),
},
Annotations: pulumi.StringMap{
"summary": pulumi.String("High latency detected"),
"description": pulumi.String("P99 latency is above 500ms for 5 minutes"),
},
},
&assert.PromRuleFileGroupRuleArgs{
Alert: pulumi.String("VeryHighLatency"),
Expr: pulumi.String("histogram_quantile(0.99, rate(http_request_duration_seconds_bucket[5m])) > 1.0"),
Duration: pulumi.String("2m"),
Labels: pulumi.StringMap{
"severity": pulumi.String("critical"),
"category": pulumi.String("Latency"),
},
Annotations: pulumi.StringMap{
"summary": pulumi.String("Very high latency detected"),
"description": pulumi.String("P99 latency is above 1 second"),
},
},
},
},
},
})
if err != nil {
return err
}
// Comprehensive monitoring rules with multiple groups
_, err = assert.NewPromRuleFile(ctx, "comprehensive_monitoring", &assert.PromRuleFileArgs{
Name: pulumi.String("custom-comprehensive-monitoring"),
Active: pulumi.Bool(true),
Groups: assert.PromRuleFileGroupArray{
&assert.PromRuleFileGroupArgs{
Name: pulumi.String("latency_monitoring"),
Interval: pulumi.String("30s"),
Rules: assert.PromRuleFileGroupRuleArray{
&assert.PromRuleFileGroupRuleArgs{
Record: pulumi.String("custom:latency:p99"),
Expr: pulumi.String("histogram_quantile(0.99, rate(http_request_duration_seconds_bucket[5m]))"),
Labels: pulumi.StringMap{
"source": pulumi.String("custom"),
},
},
&assert.PromRuleFileGroupRuleArgs{
Alert: pulumi.String("HighLatency"),
Expr: pulumi.String("custom:latency:p99 > 0.5"),
Duration: pulumi.String("5m"),
Labels: pulumi.StringMap{
"severity": pulumi.String("warning"),
},
Annotations: pulumi.StringMap{
"summary": pulumi.String("High latency detected"),
},
},
},
},
&assert.PromRuleFileGroupArgs{
Name: pulumi.String("error_monitoring"),
Interval: pulumi.String("1m"),
Rules: assert.PromRuleFileGroupRuleArray{
&assert.PromRuleFileGroupRuleArgs{
Record: pulumi.String("custom:error:rate"),
Expr: pulumi.String("rate(http_requests_total{status=~\"5..\"}[5m])"),
Labels: pulumi.StringMap{
"source": pulumi.String("custom"),
},
},
&assert.PromRuleFileGroupRuleArgs{
Alert: pulumi.String("HighErrorRate"),
Expr: pulumi.String("custom:error:rate > 0.1"),
Duration: pulumi.String("10m"),
Labels: pulumi.StringMap{
"severity": pulumi.String("critical"),
"category": pulumi.String("Errors"),
},
Annotations: pulumi.StringMap{
"summary": pulumi.String("High error rate detected"),
"description": pulumi.String("Error rate is above 10%"),
},
},
},
},
&assert.PromRuleFileGroupArgs{
Name: pulumi.String("throughput_monitoring"),
Interval: pulumi.String("1m"),
Rules: assert.PromRuleFileGroupRuleArray{
&assert.PromRuleFileGroupRuleArgs{
Record: pulumi.String("custom:throughput:total"),
Expr: pulumi.String("sum(rate(http_requests_total[5m]))"),
Labels: pulumi.StringMap{
"source": pulumi.String("custom"),
},
},
&assert.PromRuleFileGroupRuleArgs{
Alert: pulumi.String("LowThroughput"),
Expr: pulumi.String("custom:throughput:total < 10"),
Duration: pulumi.String("5m"),
Labels: pulumi.StringMap{
"severity": pulumi.String("warning"),
"category": pulumi.String("Throughput"),
},
Annotations: pulumi.StringMap{
"summary": pulumi.String("Low throughput detected"),
"description": pulumi.String("Request throughput is below 10 requests/second"),
},
},
},
},
},
})
if err != nil {
return err
}
// Rules with conditional enablement
_, err = assert.NewPromRuleFile(ctx, "conditional_rules", &assert.PromRuleFileArgs{
Name: pulumi.String("custom-conditional-rules"),
Active: pulumi.Bool(true),
Groups: assert.PromRuleFileGroupArray{
&assert.PromRuleFileGroupArgs{
Name: pulumi.String("environment_specific_rules"),
Interval: pulumi.String("30s"),
Rules: assert.PromRuleFileGroupRuleArray{
&assert.PromRuleFileGroupRuleArgs{
Alert: pulumi.String("TestAlert"),
Expr: pulumi.String("up == 0"),
Duration: pulumi.String("1m"),
Labels: pulumi.StringMap{
"severity": pulumi.String("info"),
},
Annotations: pulumi.StringMap{
"summary": pulumi.String("Test alert that is disabled in production"),
},
DisableInGroups: pulumi.StringArray{
pulumi.String("production"),
},
},
&assert.PromRuleFileGroupRuleArgs{
Alert: pulumi.String("CriticalAlert"),
Expr: pulumi.String("up == 0"),
Duration: pulumi.String("30s"),
Labels: pulumi.StringMap{
"severity": pulumi.String("critical"),
},
Annotations: pulumi.StringMap{
"summary": pulumi.String("Critical alert that fires in all environments"),
},
},
},
},
},
})
if err != nil {
return err
}
// Inactive rules (for staging/testing)
_, err = assert.NewPromRuleFile(ctx, "staging_rules", &assert.PromRuleFileArgs{
Name: pulumi.String("custom-staging-rules"),
Active: pulumi.Bool(false),
Groups: assert.PromRuleFileGroupArray{
&assert.PromRuleFileGroupArgs{
Name: pulumi.String("staging_tests"),
Interval: pulumi.String("1m"),
Rules: assert.PromRuleFileGroupRuleArray{
&assert.PromRuleFileGroupRuleArgs{
Record: pulumi.String("staging:test:metric"),
Expr: pulumi.String("up"),
Labels: pulumi.StringMap{
"environment": pulumi.String("staging"),
},
},
},
},
},
})
if err != nil {
return err
}
// SLO-based alerting
_, err = assert.NewPromRuleFile(ctx, "slo_alerts", &assert.PromRuleFileArgs{
Name: pulumi.String("custom-slo-alerts"),
Active: pulumi.Bool(true),
Groups: assert.PromRuleFileGroupArray{
&assert.PromRuleFileGroupArgs{
Name: pulumi.String("slo_monitoring"),
Interval: pulumi.String("1m"),
Rules: assert.PromRuleFileGroupRuleArray{
&assert.PromRuleFileGroupRuleArgs{
Record: pulumi.String("custom:slo:availability"),
Expr: pulumi.String("sum(rate(http_requests_total{status!~\"5..\"}[5m])) / sum(rate(http_requests_total[5m]))"),
Labels: pulumi.StringMap{
"slo_type": pulumi.String("availability"),
},
},
&assert.PromRuleFileGroupRuleArgs{
Alert: pulumi.String("SLOAvailabilityBreach"),
Expr: pulumi.String("custom:slo:availability < 0.995"),
Duration: pulumi.String("5m"),
Labels: pulumi.StringMap{
"severity": pulumi.String("critical"),
"category": pulumi.String("SLO"),
},
Annotations: pulumi.StringMap{
"summary": pulumi.String("SLO availability breach"),
"description": pulumi.String("Availability is below 99.5% SLO target"),
"runbook_url": pulumi.String("https://docs.example.com/runbooks/availability-breach"),
},
},
},
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Grafana = Pulumiverse.Grafana;
return await Deployment.RunAsync(() =>
{
// Basic recording rule for latency metrics
var latencyMetrics = new Grafana.Assert.PromRuleFile("latency_metrics", new()
{
Name = "custom-latency-metrics",
Active = true,
Groups = new[]
{
new Grafana.Assert.Inputs.PromRuleFileGroupArgs
{
Name = "latency_recording_rules",
Interval = "30s",
Rules = new[]
{
new Grafana.Assert.Inputs.PromRuleFileGroupRuleArgs
{
Record = "custom:latency:p95",
Expr = "histogram_quantile(0.95, rate(http_request_duration_seconds_bucket[5m]))",
Labels =
{
{ "source", "custom_instrumentation" },
{ "severity", "info" },
},
},
new Grafana.Assert.Inputs.PromRuleFileGroupRuleArgs
{
Record = "custom:latency:p99",
Expr = "histogram_quantile(0.99, rate(http_request_duration_seconds_bucket[5m]))",
Labels =
{
{ "source", "custom_instrumentation" },
{ "severity", "info" },
},
},
},
},
},
});
// Alert rules for high latency
var latencyAlerts = new Grafana.Assert.PromRuleFile("latency_alerts", new()
{
Name = "custom-latency-alerts",
Active = true,
Groups = new[]
{
new Grafana.Assert.Inputs.PromRuleFileGroupArgs
{
Name = "latency_alerting",
Interval = "30s",
Rules = new[]
{
new Grafana.Assert.Inputs.PromRuleFileGroupRuleArgs
{
Alert = "HighLatency",
Expr = "histogram_quantile(0.99, rate(http_request_duration_seconds_bucket[5m])) > 0.5",
Duration = "5m",
Labels =
{
{ "severity", "warning" },
{ "category", "Latency" },
},
Annotations =
{
{ "summary", "High latency detected" },
{ "description", "P99 latency is above 500ms for 5 minutes" },
},
},
new Grafana.Assert.Inputs.PromRuleFileGroupRuleArgs
{
Alert = "VeryHighLatency",
Expr = "histogram_quantile(0.99, rate(http_request_duration_seconds_bucket[5m])) > 1.0",
Duration = "2m",
Labels =
{
{ "severity", "critical" },
{ "category", "Latency" },
},
Annotations =
{
{ "summary", "Very high latency detected" },
{ "description", "P99 latency is above 1 second" },
},
},
},
},
},
});
// Comprehensive monitoring rules with multiple groups
var comprehensiveMonitoring = new Grafana.Assert.PromRuleFile("comprehensive_monitoring", new()
{
Name = "custom-comprehensive-monitoring",
Active = true,
Groups = new[]
{
new Grafana.Assert.Inputs.PromRuleFileGroupArgs
{
Name = "latency_monitoring",
Interval = "30s",
Rules = new[]
{
new Grafana.Assert.Inputs.PromRuleFileGroupRuleArgs
{
Record = "custom:latency:p99",
Expr = "histogram_quantile(0.99, rate(http_request_duration_seconds_bucket[5m]))",
Labels =
{
{ "source", "custom" },
},
},
new Grafana.Assert.Inputs.PromRuleFileGroupRuleArgs
{
Alert = "HighLatency",
Expr = "custom:latency:p99 > 0.5",
Duration = "5m",
Labels =
{
{ "severity", "warning" },
},
Annotations =
{
{ "summary", "High latency detected" },
},
},
},
},
new Grafana.Assert.Inputs.PromRuleFileGroupArgs
{
Name = "error_monitoring",
Interval = "1m",
Rules = new[]
{
new Grafana.Assert.Inputs.PromRuleFileGroupRuleArgs
{
Record = "custom:error:rate",
Expr = "rate(http_requests_total{status=~\"5..\"}[5m])",
Labels =
{
{ "source", "custom" },
},
},
new Grafana.Assert.Inputs.PromRuleFileGroupRuleArgs
{
Alert = "HighErrorRate",
Expr = "custom:error:rate > 0.1",
Duration = "10m",
Labels =
{
{ "severity", "critical" },
{ "category", "Errors" },
},
Annotations =
{
{ "summary", "High error rate detected" },
{ "description", "Error rate is above 10%" },
},
},
},
},
new Grafana.Assert.Inputs.PromRuleFileGroupArgs
{
Name = "throughput_monitoring",
Interval = "1m",
Rules = new[]
{
new Grafana.Assert.Inputs.PromRuleFileGroupRuleArgs
{
Record = "custom:throughput:total",
Expr = "sum(rate(http_requests_total[5m]))",
Labels =
{
{ "source", "custom" },
},
},
new Grafana.Assert.Inputs.PromRuleFileGroupRuleArgs
{
Alert = "LowThroughput",
Expr = "custom:throughput:total < 10",
Duration = "5m",
Labels =
{
{ "severity", "warning" },
{ "category", "Throughput" },
},
Annotations =
{
{ "summary", "Low throughput detected" },
{ "description", "Request throughput is below 10 requests/second" },
},
},
},
},
},
});
// Rules with conditional enablement
var conditionalRules = new Grafana.Assert.PromRuleFile("conditional_rules", new()
{
Name = "custom-conditional-rules",
Active = true,
Groups = new[]
{
new Grafana.Assert.Inputs.PromRuleFileGroupArgs
{
Name = "environment_specific_rules",
Interval = "30s",
Rules = new[]
{
new Grafana.Assert.Inputs.PromRuleFileGroupRuleArgs
{
Alert = "TestAlert",
Expr = "up == 0",
Duration = "1m",
Labels =
{
{ "severity", "info" },
},
Annotations =
{
{ "summary", "Test alert that is disabled in production" },
},
DisableInGroups = new[]
{
"production",
},
},
new Grafana.Assert.Inputs.PromRuleFileGroupRuleArgs
{
Alert = "CriticalAlert",
Expr = "up == 0",
Duration = "30s",
Labels =
{
{ "severity", "critical" },
},
Annotations =
{
{ "summary", "Critical alert that fires in all environments" },
},
},
},
},
},
});
// Inactive rules (for staging/testing)
var stagingRules = new Grafana.Assert.PromRuleFile("staging_rules", new()
{
Name = "custom-staging-rules",
Active = false,
Groups = new[]
{
new Grafana.Assert.Inputs.PromRuleFileGroupArgs
{
Name = "staging_tests",
Interval = "1m",
Rules = new[]
{
new Grafana.Assert.Inputs.PromRuleFileGroupRuleArgs
{
Record = "staging:test:metric",
Expr = "up",
Labels =
{
{ "environment", "staging" },
},
},
},
},
},
});
// SLO-based alerting
var sloAlerts = new Grafana.Assert.PromRuleFile("slo_alerts", new()
{
Name = "custom-slo-alerts",
Active = true,
Groups = new[]
{
new Grafana.Assert.Inputs.PromRuleFileGroupArgs
{
Name = "slo_monitoring",
Interval = "1m",
Rules = new[]
{
new Grafana.Assert.Inputs.PromRuleFileGroupRuleArgs
{
Record = "custom:slo:availability",
Expr = "sum(rate(http_requests_total{status!~\"5..\"}[5m])) / sum(rate(http_requests_total[5m]))",
Labels =
{
{ "slo_type", "availability" },
},
},
new Grafana.Assert.Inputs.PromRuleFileGroupRuleArgs
{
Alert = "SLOAvailabilityBreach",
Expr = "custom:slo:availability < 0.995",
Duration = "5m",
Labels =
{
{ "severity", "critical" },
{ "category", "SLO" },
},
Annotations =
{
{ "summary", "SLO availability breach" },
{ "description", "Availability is below 99.5% SLO target" },
{ "runbook_url", "https://docs.example.com/runbooks/availability-breach" },
},
},
},
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.grafana.assert.PromRuleFile;
import com.pulumi.grafana.assert.PromRuleFileArgs;
import com.pulumi.grafana.assert.inputs.PromRuleFileGroupArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
// Basic recording rule for latency metrics
var latencyMetrics = new PromRuleFile("latencyMetrics", PromRuleFileArgs.builder()
.name("custom-latency-metrics")
.active(true)
.groups(PromRuleFileGroupArgs.builder()
.name("latency_recording_rules")
.interval("30s")
.rules(
PromRuleFileGroupRuleArgs.builder()
.record("custom:latency:p95")
.expr("histogram_quantile(0.95, rate(http_request_duration_seconds_bucket[5m]))")
.labels(Map.ofEntries(
Map.entry("source", "custom_instrumentation"),
Map.entry("severity", "info")
))
.build(),
PromRuleFileGroupRuleArgs.builder()
.record("custom:latency:p99")
.expr("histogram_quantile(0.99, rate(http_request_duration_seconds_bucket[5m]))")
.labels(Map.ofEntries(
Map.entry("source", "custom_instrumentation"),
Map.entry("severity", "info")
))
.build())
.build())
.build());
// Alert rules for high latency
var latencyAlerts = new PromRuleFile("latencyAlerts", PromRuleFileArgs.builder()
.name("custom-latency-alerts")
.active(true)
.groups(PromRuleFileGroupArgs.builder()
.name("latency_alerting")
.interval("30s")
.rules(
PromRuleFileGroupRuleArgs.builder()
.alert("HighLatency")
.expr("histogram_quantile(0.99, rate(http_request_duration_seconds_bucket[5m])) > 0.5")
.duration("5m")
.labels(Map.ofEntries(
Map.entry("severity", "warning"),
Map.entry("category", "Latency")
))
.annotations(Map.ofEntries(
Map.entry("summary", "High latency detected"),
Map.entry("description", "P99 latency is above 500ms for 5 minutes")
))
.build(),
PromRuleFileGroupRuleArgs.builder()
.alert("VeryHighLatency")
.expr("histogram_quantile(0.99, rate(http_request_duration_seconds_bucket[5m])) > 1.0")
.duration("2m")
.labels(Map.ofEntries(
Map.entry("severity", "critical"),
Map.entry("category", "Latency")
))
.annotations(Map.ofEntries(
Map.entry("summary", "Very high latency detected"),
Map.entry("description", "P99 latency is above 1 second")
))
.build())
.build())
.build());
// Comprehensive monitoring rules with multiple groups
var comprehensiveMonitoring = new PromRuleFile("comprehensiveMonitoring", PromRuleFileArgs.builder()
.name("custom-comprehensive-monitoring")
.active(true)
.groups(
PromRuleFileGroupArgs.builder()
.name("latency_monitoring")
.interval("30s")
.rules(
PromRuleFileGroupRuleArgs.builder()
.record("custom:latency:p99")
.expr("histogram_quantile(0.99, rate(http_request_duration_seconds_bucket[5m]))")
.labels(Map.of("source", "custom"))
.build(),
PromRuleFileGroupRuleArgs.builder()
.alert("HighLatency")
.expr("custom:latency:p99 > 0.5")
.duration("5m")
.labels(Map.of("severity", "warning"))
.annotations(Map.of("summary", "High latency detected"))
.build())
.build(),
PromRuleFileGroupArgs.builder()
.name("error_monitoring")
.interval("1m")
.rules(
PromRuleFileGroupRuleArgs.builder()
.record("custom:error:rate")
.expr("rate(http_requests_total{status=~\"5..\"}[5m])")
.labels(Map.of("source", "custom"))
.build(),
PromRuleFileGroupRuleArgs.builder()
.alert("HighErrorRate")
.expr("custom:error:rate > 0.1")
.duration("10m")
.labels(Map.ofEntries(
Map.entry("severity", "critical"),
Map.entry("category", "Errors")
))
.annotations(Map.ofEntries(
Map.entry("summary", "High error rate detected"),
Map.entry("description", "Error rate is above 10%")
))
.build())
.build(),
PromRuleFileGroupArgs.builder()
.name("throughput_monitoring")
.interval("1m")
.rules(
PromRuleFileGroupRuleArgs.builder()
.record("custom:throughput:total")
.expr("sum(rate(http_requests_total[5m]))")
.labels(Map.of("source", "custom"))
.build(),
PromRuleFileGroupRuleArgs.builder()
.alert("LowThroughput")
.expr("custom:throughput:total < 10")
.duration("5m")
.labels(Map.ofEntries(
Map.entry("severity", "warning"),
Map.entry("category", "Throughput")
))
.annotations(Map.ofEntries(
Map.entry("summary", "Low throughput detected"),
Map.entry("description", "Request throughput is below 10 requests/second")
))
.build())
.build())
.build());
// Rules with conditional enablement
var conditionalRules = new PromRuleFile("conditionalRules", PromRuleFileArgs.builder()
.name("custom-conditional-rules")
.active(true)
.groups(PromRuleFileGroupArgs.builder()
.name("environment_specific_rules")
.interval("30s")
.rules(
PromRuleFileGroupRuleArgs.builder()
.alert("TestAlert")
.expr("up == 0")
.duration("1m")
.labels(Map.of("severity", "info"))
.annotations(Map.of("summary", "Test alert that is disabled in production"))
.disableInGroups("production")
.build(),
PromRuleFileGroupRuleArgs.builder()
.alert("CriticalAlert")
.expr("up == 0")
.duration("30s")
.labels(Map.of("severity", "critical"))
.annotations(Map.of("summary", "Critical alert that fires in all environments"))
.build())
.build())
.build());
// Inactive rules (for staging/testing)
var stagingRules = new PromRuleFile("stagingRules", PromRuleFileArgs.builder()
.name("custom-staging-rules")
.active(false)
.groups(PromRuleFileGroupArgs.builder()
.name("staging_tests")
.interval("1m")
.rules(PromRuleFileGroupRuleArgs.builder()
.record("staging:test:metric")
.expr("up")
.labels(Map.of("environment", "staging"))
.build())
.build())
.build());
// SLO-based alerting
var sloAlerts = new PromRuleFile("sloAlerts", PromRuleFileArgs.builder()
.name("custom-slo-alerts")
.active(true)
.groups(PromRuleFileGroupArgs.builder()
.name("slo_monitoring")
.interval("1m")
.rules(
PromRuleFileGroupRuleArgs.builder()
.record("custom:slo:availability")
.expr("sum(rate(http_requests_total{status!~\"5..\"}[5m])) / sum(rate(http_requests_total[5m]))")
.labels(Map.of("slo_type", "availability"))
.build(),
PromRuleFileGroupRuleArgs.builder()
.alert("SLOAvailabilityBreach")
.expr("custom:slo:availability < 0.995")
.duration("5m")
.labels(Map.ofEntries(
Map.entry("severity", "critical"),
Map.entry("category", "SLO")
))
.annotations(Map.ofEntries(
Map.entry("summary", "SLO availability breach"),
Map.entry("description", "Availability is below 99.5% SLO target"),
Map.entry("runbook_url", "https://docs.example.com/runbooks/availability-breach")
))
.build())
.build())
.build());
}
}
resources:
# Basic recording rule for latency metrics
latencyMetrics:
type: grafana:assert:PromRuleFile
name: latency_metrics
properties:
name: custom-latency-metrics
active: true
groups:
- name: latency_recording_rules
interval: 30s
rules:
- record: custom:latency:p95
expr: histogram_quantile(0.95, rate(http_request_duration_seconds_bucket[5m]))
labels:
source: custom_instrumentation
severity: info
- record: custom:latency:p99
expr: histogram_quantile(0.99, rate(http_request_duration_seconds_bucket[5m]))
labels:
source: custom_instrumentation
severity: info
# Alert rules for high latency
latencyAlerts:
type: grafana:assert:PromRuleFile
name: latency_alerts
properties:
name: custom-latency-alerts
active: true
groups:
- name: latency_alerting
interval: 30s
rules:
- alert: HighLatency
expr: histogram_quantile(0.99, rate(http_request_duration_seconds_bucket[5m])) > 0.5
duration: 5m
labels:
severity: warning
category: Latency
annotations:
summary: High latency detected
description: P99 latency is above 500ms for 5 minutes
- alert: VeryHighLatency
expr: histogram_quantile(0.99, rate(http_request_duration_seconds_bucket[5m])) > 1.0
duration: 2m
labels:
severity: critical
category: Latency
annotations:
summary: Very high latency detected
description: P99 latency is above 1 second
# Comprehensive monitoring rules with multiple groups
comprehensiveMonitoring:
type: grafana:assert:PromRuleFile
name: comprehensive_monitoring
properties:
name: custom-comprehensive-monitoring
active: true # Latency monitoring
groups:
- name: latency_monitoring
interval: 30s
rules:
- record: custom:latency:p99
expr: histogram_quantile(0.99, rate(http_request_duration_seconds_bucket[5m]))
labels:
source: custom
- alert: HighLatency
expr: custom:latency:p99 > 0.5
duration: 5m
labels:
severity: warning
annotations:
summary: High latency detected
- name: error_monitoring
interval: 1m
rules:
- record: custom:error:rate
expr: rate(http_requests_total{status=~"5.."}[5m])
labels:
source: custom
- alert: HighErrorRate
expr: custom:error:rate > 0.1
duration: 10m
labels:
severity: critical
category: Errors
annotations:
summary: High error rate detected
description: Error rate is above 10%
- name: throughput_monitoring
interval: 1m
rules:
- record: custom:throughput:total
expr: sum(rate(http_requests_total[5m]))
labels:
source: custom
- alert: LowThroughput
expr: custom:throughput:total < 10
duration: 5m
labels:
severity: warning
category: Throughput
annotations:
summary: Low throughput detected
description: Request throughput is below 10 requests/second
# Rules with conditional enablement
conditionalRules:
type: grafana:assert:PromRuleFile
name: conditional_rules
properties:
name: custom-conditional-rules
active: true
groups:
- name: environment_specific_rules
interval: 30s
rules:
- alert: TestAlert
expr: up == 0
duration: 1m
labels:
severity: info
annotations:
summary: Test alert that is disabled in production
disableInGroups:
- production
- alert: CriticalAlert
expr: up == 0
duration: 30s
labels:
severity: critical
annotations:
summary: Critical alert that fires in all environments
# Inactive rules (for staging/testing)
stagingRules:
type: grafana:assert:PromRuleFile
name: staging_rules
properties:
name: custom-staging-rules
active: false # Rules file is inactive
groups:
- name: staging_tests
interval: 1m
rules:
- record: staging:test:metric
expr: up
labels:
environment: staging
# SLO-based alerting
sloAlerts:
type: grafana:assert:PromRuleFile
name: slo_alerts
properties:
name: custom-slo-alerts
active: true
groups:
- name: slo_monitoring
interval: 1m
rules:
- record: custom:slo:availability
expr: sum(rate(http_requests_total{status!~"5.."}[5m])) / sum(rate(http_requests_total[5m]))
labels:
slo_type: availability
- alert: SLOAvailabilityBreach
expr: custom:slo:availability < 0.995
duration: 5m
labels:
severity: critical
category: SLO
annotations:
summary: SLO availability breach
description: Availability is below 99.5% SLO target
runbook_url: https://docs.example.com/runbooks/availability-breach
Create PromRuleFile Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new PromRuleFile(name: string, args: PromRuleFileArgs, opts?: CustomResourceOptions);@overload
def PromRuleFile(resource_name: str,
args: PromRuleFileArgs,
opts: Optional[ResourceOptions] = None)
@overload
def PromRuleFile(resource_name: str,
opts: Optional[ResourceOptions] = None,
groups: Optional[Sequence[PromRuleFileGroupArgs]] = None,
active: Optional[bool] = None,
name: Optional[str] = None)func NewPromRuleFile(ctx *Context, name string, args PromRuleFileArgs, opts ...ResourceOption) (*PromRuleFile, error)public PromRuleFile(string name, PromRuleFileArgs args, CustomResourceOptions? opts = null)
public PromRuleFile(String name, PromRuleFileArgs args)
public PromRuleFile(String name, PromRuleFileArgs args, CustomResourceOptions options)
type: grafana:assert:PromRuleFile
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args PromRuleFileArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args PromRuleFileArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args PromRuleFileArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args PromRuleFileArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args PromRuleFileArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var promRuleFileResource = new Grafana.Assert.PromRuleFile("promRuleFileResource", new()
{
Groups = new[]
{
new Grafana.Assert.Inputs.PromRuleFileGroupArgs
{
Name = "string",
Rules = new[]
{
new Grafana.Assert.Inputs.PromRuleFileGroupRuleArgs
{
Expr = "string",
Active = false,
Alert = "string",
Annotations =
{
{ "string", "string" },
},
DisableInGroups = new[]
{
"string",
},
Duration = "string",
Labels =
{
{ "string", "string" },
},
Record = "string",
},
},
Interval = "string",
},
},
Active = false,
Name = "string",
});
example, err := assert.NewPromRuleFile(ctx, "promRuleFileResource", &assert.PromRuleFileArgs{
Groups: assert.PromRuleFileGroupArray{
&assert.PromRuleFileGroupArgs{
Name: pulumi.String("string"),
Rules: assert.PromRuleFileGroupRuleArray{
&assert.PromRuleFileGroupRuleArgs{
Expr: pulumi.String("string"),
Active: pulumi.Bool(false),
Alert: pulumi.String("string"),
Annotations: pulumi.StringMap{
"string": pulumi.String("string"),
},
DisableInGroups: pulumi.StringArray{
pulumi.String("string"),
},
Duration: pulumi.String("string"),
Labels: pulumi.StringMap{
"string": pulumi.String("string"),
},
Record: pulumi.String("string"),
},
},
Interval: pulumi.String("string"),
},
},
Active: pulumi.Bool(false),
Name: pulumi.String("string"),
})
var promRuleFileResource = new PromRuleFile("promRuleFileResource", PromRuleFileArgs.builder()
.groups(PromRuleFileGroupArgs.builder()
.name("string")
.rules(PromRuleFileGroupRuleArgs.builder()
.expr("string")
.active(false)
.alert("string")
.annotations(Map.of("string", "string"))
.disableInGroups("string")
.duration("string")
.labels(Map.of("string", "string"))
.record("string")
.build())
.interval("string")
.build())
.active(false)
.name("string")
.build());
prom_rule_file_resource = grafana.assert_.PromRuleFile("promRuleFileResource",
groups=[{
"name": "string",
"rules": [{
"expr": "string",
"active": False,
"alert": "string",
"annotations": {
"string": "string",
},
"disable_in_groups": ["string"],
"duration": "string",
"labels": {
"string": "string",
},
"record": "string",
}],
"interval": "string",
}],
active=False,
name="string")
const promRuleFileResource = new grafana.assert.PromRuleFile("promRuleFileResource", {
groups: [{
name: "string",
rules: [{
expr: "string",
active: false,
alert: "string",
annotations: {
string: "string",
},
disableInGroups: ["string"],
duration: "string",
labels: {
string: "string",
},
record: "string",
}],
interval: "string",
}],
active: false,
name: "string",
});
type: grafana:assert:PromRuleFile
properties:
active: false
groups:
- interval: string
name: string
rules:
- active: false
alert: string
annotations:
string: string
disableInGroups:
- string
duration: string
expr: string
labels:
string: string
record: string
name: string
PromRuleFile Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The PromRuleFile resource accepts the following input properties:
- Groups
List<Pulumiverse.
Grafana. Assert. Inputs. Prom Rule File Group> - List of Prometheus rule groups. Each group contains one or more rules and can have its own evaluation interval.
- Active bool
- Whether the rules file is active. Inactive rules are not evaluated. Defaults to
true. - Name string
- The name of the Prometheus rules file. This will be stored with a .custom extension. Must follow naming validation rules (alphanumeric, hyphens, underscores).
- Groups
[]Prom
Rule File Group Args - List of Prometheus rule groups. Each group contains one or more rules and can have its own evaluation interval.
- Active bool
- Whether the rules file is active. Inactive rules are not evaluated. Defaults to
true. - Name string
- The name of the Prometheus rules file. This will be stored with a .custom extension. Must follow naming validation rules (alphanumeric, hyphens, underscores).
- groups
List<Prom
Rule File Group> - List of Prometheus rule groups. Each group contains one or more rules and can have its own evaluation interval.
- active Boolean
- Whether the rules file is active. Inactive rules are not evaluated. Defaults to
true. - name String
- The name of the Prometheus rules file. This will be stored with a .custom extension. Must follow naming validation rules (alphanumeric, hyphens, underscores).
- groups
Prom
Rule File Group[] - List of Prometheus rule groups. Each group contains one or more rules and can have its own evaluation interval.
- active boolean
- Whether the rules file is active. Inactive rules are not evaluated. Defaults to
true. - name string
- The name of the Prometheus rules file. This will be stored with a .custom extension. Must follow naming validation rules (alphanumeric, hyphens, underscores).
- groups
Sequence[Prom
Rule File Group Args] - List of Prometheus rule groups. Each group contains one or more rules and can have its own evaluation interval.
- active bool
- Whether the rules file is active. Inactive rules are not evaluated. Defaults to
true. - name str
- The name of the Prometheus rules file. This will be stored with a .custom extension. Must follow naming validation rules (alphanumeric, hyphens, underscores).
- groups List<Property Map>
- List of Prometheus rule groups. Each group contains one or more rules and can have its own evaluation interval.
- active Boolean
- Whether the rules file is active. Inactive rules are not evaluated. Defaults to
true. - name String
- The name of the Prometheus rules file. This will be stored with a .custom extension. Must follow naming validation rules (alphanumeric, hyphens, underscores).
Outputs
All input properties are implicitly available as output properties. Additionally, the PromRuleFile resource produces the following output properties:
- Id string
- The provider-assigned unique ID for this managed resource.
- Id string
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
- id string
- The provider-assigned unique ID for this managed resource.
- id str
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
Look up Existing PromRuleFile Resource
Get an existing PromRuleFile resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: PromRuleFileState, opts?: CustomResourceOptions): PromRuleFile@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
active: Optional[bool] = None,
groups: Optional[Sequence[PromRuleFileGroupArgs]] = None,
name: Optional[str] = None) -> PromRuleFilefunc GetPromRuleFile(ctx *Context, name string, id IDInput, state *PromRuleFileState, opts ...ResourceOption) (*PromRuleFile, error)public static PromRuleFile Get(string name, Input<string> id, PromRuleFileState? state, CustomResourceOptions? opts = null)public static PromRuleFile get(String name, Output<String> id, PromRuleFileState state, CustomResourceOptions options)resources: _: type: grafana:assert:PromRuleFile get: id: ${id}- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Active bool
- Whether the rules file is active. Inactive rules are not evaluated. Defaults to
true. - Groups
List<Pulumiverse.
Grafana. Assert. Inputs. Prom Rule File Group> - List of Prometheus rule groups. Each group contains one or more rules and can have its own evaluation interval.
- Name string
- The name of the Prometheus rules file. This will be stored with a .custom extension. Must follow naming validation rules (alphanumeric, hyphens, underscores).
- Active bool
- Whether the rules file is active. Inactive rules are not evaluated. Defaults to
true. - Groups
[]Prom
Rule File Group Args - List of Prometheus rule groups. Each group contains one or more rules and can have its own evaluation interval.
- Name string
- The name of the Prometheus rules file. This will be stored with a .custom extension. Must follow naming validation rules (alphanumeric, hyphens, underscores).
- active Boolean
- Whether the rules file is active. Inactive rules are not evaluated. Defaults to
true. - groups
List<Prom
Rule File Group> - List of Prometheus rule groups. Each group contains one or more rules and can have its own evaluation interval.
- name String
- The name of the Prometheus rules file. This will be stored with a .custom extension. Must follow naming validation rules (alphanumeric, hyphens, underscores).
- active boolean
- Whether the rules file is active. Inactive rules are not evaluated. Defaults to
true. - groups
Prom
Rule File Group[] - List of Prometheus rule groups. Each group contains one or more rules and can have its own evaluation interval.
- name string
- The name of the Prometheus rules file. This will be stored with a .custom extension. Must follow naming validation rules (alphanumeric, hyphens, underscores).
- active bool
- Whether the rules file is active. Inactive rules are not evaluated. Defaults to
true. - groups
Sequence[Prom
Rule File Group Args] - List of Prometheus rule groups. Each group contains one or more rules and can have its own evaluation interval.
- name str
- The name of the Prometheus rules file. This will be stored with a .custom extension. Must follow naming validation rules (alphanumeric, hyphens, underscores).
- active Boolean
- Whether the rules file is active. Inactive rules are not evaluated. Defaults to
true. - groups List<Property Map>
- List of Prometheus rule groups. Each group contains one or more rules and can have its own evaluation interval.
- name String
- The name of the Prometheus rules file. This will be stored with a .custom extension. Must follow naming validation rules (alphanumeric, hyphens, underscores).
Supporting Types
PromRuleFileGroup, PromRuleFileGroupArgs
- Name string
- The name of the rule group (e.g., 'latency_monitoring').
- Rules
List<Pulumiverse.
Grafana. Assert. Inputs. Prom Rule File Group Rule> - List of Prometheus rules in this group.
- Interval string
- Evaluation interval for this group (e.g., '30s', '1m'). If not specified, uses the global evaluation interval.
- Name string
- The name of the rule group (e.g., 'latency_monitoring').
- Rules
[]Prom
Rule File Group Rule - List of Prometheus rules in this group.
- Interval string
- Evaluation interval for this group (e.g., '30s', '1m'). If not specified, uses the global evaluation interval.
- name String
- The name of the rule group (e.g., 'latency_monitoring').
- rules
List<Prom
Rule File Group Rule> - List of Prometheus rules in this group.
- interval String
- Evaluation interval for this group (e.g., '30s', '1m'). If not specified, uses the global evaluation interval.
- name string
- The name of the rule group (e.g., 'latency_monitoring').
- rules
Prom
Rule File Group Rule[] - List of Prometheus rules in this group.
- interval string
- Evaluation interval for this group (e.g., '30s', '1m'). If not specified, uses the global evaluation interval.
- name str
- The name of the rule group (e.g., 'latency_monitoring').
- rules
Sequence[Prom
Rule File Group Rule] - List of Prometheus rules in this group.
- interval str
- Evaluation interval for this group (e.g., '30s', '1m'). If not specified, uses the global evaluation interval.
- name String
- The name of the rule group (e.g., 'latency_monitoring').
- rules List<Property Map>
- List of Prometheus rules in this group.
- interval String
- Evaluation interval for this group (e.g., '30s', '1m'). If not specified, uses the global evaluation interval.
PromRuleFileGroupRule, PromRuleFileGroupRuleArgs
- Expr string
- The PromQL expression to evaluate.
- Active bool
- Whether this specific rule is active. This field is read-only and controlled by the API.
- Alert string
- The name of the alert for alerting rules. Either 'record' or 'alert' must be specified, but not both.
- Annotations Dictionary<string, string>
- Annotations to add to alerts (e.g., summary, description).
- Disable
In List<string>Groups - List of group names where this rule should be disabled. Useful for conditional rule enablement.
- Duration string
- How long the condition must be true before firing the alert (e.g., '5m'). Only applicable for alerting rules. Maps to 'for' in Prometheus.
- Labels Dictionary<string, string>
- Labels to attach to the resulting time series or alert.
- Record string
- The name of the time series to output for recording rules. Either 'record' or 'alert' must be specified, but not both.
- Expr string
- The PromQL expression to evaluate.
- Active bool
- Whether this specific rule is active. This field is read-only and controlled by the API.
- Alert string
- The name of the alert for alerting rules. Either 'record' or 'alert' must be specified, but not both.
- Annotations map[string]string
- Annotations to add to alerts (e.g., summary, description).
- Disable
In []stringGroups - List of group names where this rule should be disabled. Useful for conditional rule enablement.
- Duration string
- How long the condition must be true before firing the alert (e.g., '5m'). Only applicable for alerting rules. Maps to 'for' in Prometheus.
- Labels map[string]string
- Labels to attach to the resulting time series or alert.
- Record string
- The name of the time series to output for recording rules. Either 'record' or 'alert' must be specified, but not both.
- expr String
- The PromQL expression to evaluate.
- active Boolean
- Whether this specific rule is active. This field is read-only and controlled by the API.
- alert String
- The name of the alert for alerting rules. Either 'record' or 'alert' must be specified, but not both.
- annotations Map<String,String>
- Annotations to add to alerts (e.g., summary, description).
- disable
In List<String>Groups - List of group names where this rule should be disabled. Useful for conditional rule enablement.
- duration String
- How long the condition must be true before firing the alert (e.g., '5m'). Only applicable for alerting rules. Maps to 'for' in Prometheus.
- labels Map<String,String>
- Labels to attach to the resulting time series or alert.
- record String
- The name of the time series to output for recording rules. Either 'record' or 'alert' must be specified, but not both.
- expr string
- The PromQL expression to evaluate.
- active boolean
- Whether this specific rule is active. This field is read-only and controlled by the API.
- alert string
- The name of the alert for alerting rules. Either 'record' or 'alert' must be specified, but not both.
- annotations {[key: string]: string}
- Annotations to add to alerts (e.g., summary, description).
- disable
In string[]Groups - List of group names where this rule should be disabled. Useful for conditional rule enablement.
- duration string
- How long the condition must be true before firing the alert (e.g., '5m'). Only applicable for alerting rules. Maps to 'for' in Prometheus.
- labels {[key: string]: string}
- Labels to attach to the resulting time series or alert.
- record string
- The name of the time series to output for recording rules. Either 'record' or 'alert' must be specified, but not both.
- expr str
- The PromQL expression to evaluate.
- active bool
- Whether this specific rule is active. This field is read-only and controlled by the API.
- alert str
- The name of the alert for alerting rules. Either 'record' or 'alert' must be specified, but not both.
- annotations Mapping[str, str]
- Annotations to add to alerts (e.g., summary, description).
- disable_
in_ Sequence[str]groups - List of group names where this rule should be disabled. Useful for conditional rule enablement.
- duration str
- How long the condition must be true before firing the alert (e.g., '5m'). Only applicable for alerting rules. Maps to 'for' in Prometheus.
- labels Mapping[str, str]
- Labels to attach to the resulting time series or alert.
- record str
- The name of the time series to output for recording rules. Either 'record' or 'alert' must be specified, but not both.
- expr String
- The PromQL expression to evaluate.
- active Boolean
- Whether this specific rule is active. This field is read-only and controlled by the API.
- alert String
- The name of the alert for alerting rules. Either 'record' or 'alert' must be specified, but not both.
- annotations Map<String>
- Annotations to add to alerts (e.g., summary, description).
- disable
In List<String>Groups - List of group names where this rule should be disabled. Useful for conditional rule enablement.
- duration String
- How long the condition must be true before firing the alert (e.g., '5m'). Only applicable for alerting rules. Maps to 'for' in Prometheus.
- labels Map<String>
- Labels to attach to the resulting time series or alert.
- record String
- The name of the time series to output for recording rules. Either 'record' or 'alert' must be specified, but not both.
Import
$ pulumi import grafana:assert/promRuleFile:PromRuleFile name "{{ name }}"
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- grafana pulumiverse/pulumi-grafana
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
grafanaTerraform Provider.
published on Tuesday, Mar 3, 2026 by pulumiverse
