1. Packages
  2. Google Cloud (GCP) Classic
  3. API Docs
  4. ces
  5. Guardrail
Google Cloud v9.6.0 published on Wednesday, Nov 26, 2025 by Pulumi
gcp logo
Google Cloud v9.6.0 published on Wednesday, Nov 26, 2025 by Pulumi

    Description

    Example Usage

    Ces Guardrail Basic

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const cesAppForGuardrail = new gcp.ces.App("ces_app_for_guardrail", {
        appId: "app-id",
        location: "us",
        description: "App used as parent for CES Toolset example",
        displayName: "my-app",
        languageSettings: {
            defaultLanguageCode: "en-US",
            supportedLanguageCodes: [
                "es-ES",
                "fr-FR",
            ],
            enableMultilingualSupport: true,
            fallbackAction: "escalate",
        },
        timeZoneSettings: {
            timeZone: "America/Los_Angeles",
        },
    });
    const cesGuardrailBasic = new gcp.ces.Guardrail("ces_guardrail_basic", {
        guardrailId: "guardrail-id",
        location: cesAppForGuardrail.location,
        app: cesAppForGuardrail.appId,
        displayName: "my-guardrail",
        description: "Guardrail description",
        action: {
            respondImmediately: {
                responses: [{
                    text: "Text",
                    disabled: false,
                }],
            },
        },
        enabled: true,
        modelSafety: {
            safetySettings: [{
                category: "HARM_CATEGORY_HATE_SPEECH",
                threshold: "BLOCK_NONE",
            }],
        },
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    ces_app_for_guardrail = gcp.ces.App("ces_app_for_guardrail",
        app_id="app-id",
        location="us",
        description="App used as parent for CES Toolset example",
        display_name="my-app",
        language_settings={
            "default_language_code": "en-US",
            "supported_language_codes": [
                "es-ES",
                "fr-FR",
            ],
            "enable_multilingual_support": True,
            "fallback_action": "escalate",
        },
        time_zone_settings={
            "time_zone": "America/Los_Angeles",
        })
    ces_guardrail_basic = gcp.ces.Guardrail("ces_guardrail_basic",
        guardrail_id="guardrail-id",
        location=ces_app_for_guardrail.location,
        app=ces_app_for_guardrail.app_id,
        display_name="my-guardrail",
        description="Guardrail description",
        action={
            "respond_immediately": {
                "responses": [{
                    "text": "Text",
                    "disabled": False,
                }],
            },
        },
        enabled=True,
        model_safety={
            "safety_settings": [{
                "category": "HARM_CATEGORY_HATE_SPEECH",
                "threshold": "BLOCK_NONE",
            }],
        })
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/ces"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		cesAppForGuardrail, err := ces.NewApp(ctx, "ces_app_for_guardrail", &ces.AppArgs{
    			AppId:       pulumi.String("app-id"),
    			Location:    pulumi.String("us"),
    			Description: pulumi.String("App used as parent for CES Toolset example"),
    			DisplayName: pulumi.String("my-app"),
    			LanguageSettings: &ces.AppLanguageSettingsArgs{
    				DefaultLanguageCode: pulumi.String("en-US"),
    				SupportedLanguageCodes: pulumi.StringArray{
    					pulumi.String("es-ES"),
    					pulumi.String("fr-FR"),
    				},
    				EnableMultilingualSupport: pulumi.Bool(true),
    				FallbackAction:            pulumi.String("escalate"),
    			},
    			TimeZoneSettings: &ces.AppTimeZoneSettingsArgs{
    				TimeZone: pulumi.String("America/Los_Angeles"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = ces.NewGuardrail(ctx, "ces_guardrail_basic", &ces.GuardrailArgs{
    			GuardrailId: pulumi.String("guardrail-id"),
    			Location:    cesAppForGuardrail.Location,
    			App:         cesAppForGuardrail.AppId,
    			DisplayName: pulumi.String("my-guardrail"),
    			Description: pulumi.String("Guardrail description"),
    			Action: &ces.GuardrailActionArgs{
    				RespondImmediately: &ces.GuardrailActionRespondImmediatelyArgs{
    					Responses: ces.GuardrailActionRespondImmediatelyResponseArray{
    						&ces.GuardrailActionRespondImmediatelyResponseArgs{
    							Text:     pulumi.String("Text"),
    							Disabled: pulumi.Bool(false),
    						},
    					},
    				},
    			},
    			Enabled: pulumi.Bool(true),
    			ModelSafety: &ces.GuardrailModelSafetyArgs{
    				SafetySettings: ces.GuardrailModelSafetySafetySettingArray{
    					&ces.GuardrailModelSafetySafetySettingArgs{
    						Category:  pulumi.String("HARM_CATEGORY_HATE_SPEECH"),
    						Threshold: pulumi.String("BLOCK_NONE"),
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var cesAppForGuardrail = new Gcp.Ces.App("ces_app_for_guardrail", new()
        {
            AppId = "app-id",
            Location = "us",
            Description = "App used as parent for CES Toolset example",
            DisplayName = "my-app",
            LanguageSettings = new Gcp.Ces.Inputs.AppLanguageSettingsArgs
            {
                DefaultLanguageCode = "en-US",
                SupportedLanguageCodes = new[]
                {
                    "es-ES",
                    "fr-FR",
                },
                EnableMultilingualSupport = true,
                FallbackAction = "escalate",
            },
            TimeZoneSettings = new Gcp.Ces.Inputs.AppTimeZoneSettingsArgs
            {
                TimeZone = "America/Los_Angeles",
            },
        });
    
        var cesGuardrailBasic = new Gcp.Ces.Guardrail("ces_guardrail_basic", new()
        {
            GuardrailId = "guardrail-id",
            Location = cesAppForGuardrail.Location,
            App = cesAppForGuardrail.AppId,
            DisplayName = "my-guardrail",
            Description = "Guardrail description",
            Action = new Gcp.Ces.Inputs.GuardrailActionArgs
            {
                RespondImmediately = new Gcp.Ces.Inputs.GuardrailActionRespondImmediatelyArgs
                {
                    Responses = new[]
                    {
                        new Gcp.Ces.Inputs.GuardrailActionRespondImmediatelyResponseArgs
                        {
                            Text = "Text",
                            Disabled = false,
                        },
                    },
                },
            },
            Enabled = true,
            ModelSafety = new Gcp.Ces.Inputs.GuardrailModelSafetyArgs
            {
                SafetySettings = new[]
                {
                    new Gcp.Ces.Inputs.GuardrailModelSafetySafetySettingArgs
                    {
                        Category = "HARM_CATEGORY_HATE_SPEECH",
                        Threshold = "BLOCK_NONE",
                    },
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.ces.App;
    import com.pulumi.gcp.ces.AppArgs;
    import com.pulumi.gcp.ces.inputs.AppLanguageSettingsArgs;
    import com.pulumi.gcp.ces.inputs.AppTimeZoneSettingsArgs;
    import com.pulumi.gcp.ces.Guardrail;
    import com.pulumi.gcp.ces.GuardrailArgs;
    import com.pulumi.gcp.ces.inputs.GuardrailActionArgs;
    import com.pulumi.gcp.ces.inputs.GuardrailActionRespondImmediatelyArgs;
    import com.pulumi.gcp.ces.inputs.GuardrailModelSafetyArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var cesAppForGuardrail = new App("cesAppForGuardrail", AppArgs.builder()
                .appId("app-id")
                .location("us")
                .description("App used as parent for CES Toolset example")
                .displayName("my-app")
                .languageSettings(AppLanguageSettingsArgs.builder()
                    .defaultLanguageCode("en-US")
                    .supportedLanguageCodes(                
                        "es-ES",
                        "fr-FR")
                    .enableMultilingualSupport(true)
                    .fallbackAction("escalate")
                    .build())
                .timeZoneSettings(AppTimeZoneSettingsArgs.builder()
                    .timeZone("America/Los_Angeles")
                    .build())
                .build());
    
            var cesGuardrailBasic = new Guardrail("cesGuardrailBasic", GuardrailArgs.builder()
                .guardrailId("guardrail-id")
                .location(cesAppForGuardrail.location())
                .app(cesAppForGuardrail.appId())
                .displayName("my-guardrail")
                .description("Guardrail description")
                .action(GuardrailActionArgs.builder()
                    .respondImmediately(GuardrailActionRespondImmediatelyArgs.builder()
                        .responses(GuardrailActionRespondImmediatelyResponseArgs.builder()
                            .text("Text")
                            .disabled(false)
                            .build())
                        .build())
                    .build())
                .enabled(true)
                .modelSafety(GuardrailModelSafetyArgs.builder()
                    .safetySettings(GuardrailModelSafetySafetySettingArgs.builder()
                        .category("HARM_CATEGORY_HATE_SPEECH")
                        .threshold("BLOCK_NONE")
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      cesAppForGuardrail:
        type: gcp:ces:App
        name: ces_app_for_guardrail
        properties:
          appId: app-id
          location: us
          description: App used as parent for CES Toolset example
          displayName: my-app
          languageSettings:
            defaultLanguageCode: en-US
            supportedLanguageCodes:
              - es-ES
              - fr-FR
            enableMultilingualSupport: true
            fallbackAction: escalate
          timeZoneSettings:
            timeZone: America/Los_Angeles
      cesGuardrailBasic:
        type: gcp:ces:Guardrail
        name: ces_guardrail_basic
        properties:
          guardrailId: guardrail-id
          location: ${cesAppForGuardrail.location}
          app: ${cesAppForGuardrail.appId}
          displayName: my-guardrail
          description: Guardrail description
          action:
            respondImmediately:
              responses:
                - text: Text
                  disabled: false
          enabled: true
          modelSafety:
            safetySettings:
              - category: HARM_CATEGORY_HATE_SPEECH
                threshold: BLOCK_NONE
    

    Ces Guardrail Transfer Agent Content Filter

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const cesAppForGuardrail = new gcp.ces.App("ces_app_for_guardrail", {
        appId: "app-id",
        location: "us",
        description: "App used as parent for CES Toolset example",
        displayName: "my-app",
        languageSettings: {
            defaultLanguageCode: "en-US",
            supportedLanguageCodes: [
                "es-ES",
                "fr-FR",
            ],
            enableMultilingualSupport: true,
            fallbackAction: "escalate",
        },
        timeZoneSettings: {
            timeZone: "America/Los_Angeles",
        },
    });
    const cesGuardrailTransferAgentContentFilter = new gcp.ces.Guardrail("ces_guardrail_transfer_agent_content_filter", {
        guardrailId: "guardrail-id",
        location: cesAppForGuardrail.location,
        app: cesAppForGuardrail.appId,
        displayName: "my-guardrail",
        description: "Guardrail description",
        action: {
            transferAgent: {
                agent: pulumi.interpolate`projects/${cesAppForGuardrail.project}/locations/us/apps/${cesAppForGuardrail.appId}/agents/fake-agent`,
            },
        },
        enabled: true,
        contentFilter: {
            bannedContents: ["example"],
            bannedContentsInUserInputs: ["example"],
            bannedContentsInAgentResponses: ["example"],
            matchType: "SIMPLE_STRING_MATCH",
            disregardDiacritics: true,
        },
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    ces_app_for_guardrail = gcp.ces.App("ces_app_for_guardrail",
        app_id="app-id",
        location="us",
        description="App used as parent for CES Toolset example",
        display_name="my-app",
        language_settings={
            "default_language_code": "en-US",
            "supported_language_codes": [
                "es-ES",
                "fr-FR",
            ],
            "enable_multilingual_support": True,
            "fallback_action": "escalate",
        },
        time_zone_settings={
            "time_zone": "America/Los_Angeles",
        })
    ces_guardrail_transfer_agent_content_filter = gcp.ces.Guardrail("ces_guardrail_transfer_agent_content_filter",
        guardrail_id="guardrail-id",
        location=ces_app_for_guardrail.location,
        app=ces_app_for_guardrail.app_id,
        display_name="my-guardrail",
        description="Guardrail description",
        action={
            "transfer_agent": {
                "agent": pulumi.Output.all(
                    project=ces_app_for_guardrail.project,
                    app_id=ces_app_for_guardrail.app_id
    ).apply(lambda resolved_outputs: f"projects/{resolved_outputs['project']}/locations/us/apps/{resolved_outputs['app_id']}/agents/fake-agent")
    ,
            },
        },
        enabled=True,
        content_filter={
            "banned_contents": ["example"],
            "banned_contents_in_user_inputs": ["example"],
            "banned_contents_in_agent_responses": ["example"],
            "match_type": "SIMPLE_STRING_MATCH",
            "disregard_diacritics": True,
        })
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/ces"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		cesAppForGuardrail, err := ces.NewApp(ctx, "ces_app_for_guardrail", &ces.AppArgs{
    			AppId:       pulumi.String("app-id"),
    			Location:    pulumi.String("us"),
    			Description: pulumi.String("App used as parent for CES Toolset example"),
    			DisplayName: pulumi.String("my-app"),
    			LanguageSettings: &ces.AppLanguageSettingsArgs{
    				DefaultLanguageCode: pulumi.String("en-US"),
    				SupportedLanguageCodes: pulumi.StringArray{
    					pulumi.String("es-ES"),
    					pulumi.String("fr-FR"),
    				},
    				EnableMultilingualSupport: pulumi.Bool(true),
    				FallbackAction:            pulumi.String("escalate"),
    			},
    			TimeZoneSettings: &ces.AppTimeZoneSettingsArgs{
    				TimeZone: pulumi.String("America/Los_Angeles"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = ces.NewGuardrail(ctx, "ces_guardrail_transfer_agent_content_filter", &ces.GuardrailArgs{
    			GuardrailId: pulumi.String("guardrail-id"),
    			Location:    cesAppForGuardrail.Location,
    			App:         cesAppForGuardrail.AppId,
    			DisplayName: pulumi.String("my-guardrail"),
    			Description: pulumi.String("Guardrail description"),
    			Action: &ces.GuardrailActionArgs{
    				TransferAgent: &ces.GuardrailActionTransferAgentArgs{
    					Agent: pulumi.All(cesAppForGuardrail.Project, cesAppForGuardrail.AppId).ApplyT(func(_args []interface{}) (string, error) {
    						project := _args[0].(string)
    						appId := _args[1].(string)
    						return fmt.Sprintf("projects/%v/locations/us/apps/%v/agents/fake-agent", project, appId), nil
    					}).(pulumi.StringOutput),
    				},
    			},
    			Enabled: pulumi.Bool(true),
    			ContentFilter: &ces.GuardrailContentFilterArgs{
    				BannedContents: pulumi.StringArray{
    					pulumi.String("example"),
    				},
    				BannedContentsInUserInputs: pulumi.StringArray{
    					pulumi.String("example"),
    				},
    				BannedContentsInAgentResponses: pulumi.StringArray{
    					pulumi.String("example"),
    				},
    				MatchType:           pulumi.String("SIMPLE_STRING_MATCH"),
    				DisregardDiacritics: pulumi.Bool(true),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var cesAppForGuardrail = new Gcp.Ces.App("ces_app_for_guardrail", new()
        {
            AppId = "app-id",
            Location = "us",
            Description = "App used as parent for CES Toolset example",
            DisplayName = "my-app",
            LanguageSettings = new Gcp.Ces.Inputs.AppLanguageSettingsArgs
            {
                DefaultLanguageCode = "en-US",
                SupportedLanguageCodes = new[]
                {
                    "es-ES",
                    "fr-FR",
                },
                EnableMultilingualSupport = true,
                FallbackAction = "escalate",
            },
            TimeZoneSettings = new Gcp.Ces.Inputs.AppTimeZoneSettingsArgs
            {
                TimeZone = "America/Los_Angeles",
            },
        });
    
        var cesGuardrailTransferAgentContentFilter = new Gcp.Ces.Guardrail("ces_guardrail_transfer_agent_content_filter", new()
        {
            GuardrailId = "guardrail-id",
            Location = cesAppForGuardrail.Location,
            App = cesAppForGuardrail.AppId,
            DisplayName = "my-guardrail",
            Description = "Guardrail description",
            Action = new Gcp.Ces.Inputs.GuardrailActionArgs
            {
                TransferAgent = new Gcp.Ces.Inputs.GuardrailActionTransferAgentArgs
                {
                    Agent = Output.Tuple(cesAppForGuardrail.Project, cesAppForGuardrail.AppId).Apply(values =>
                    {
                        var project = values.Item1;
                        var appId = values.Item2;
                        return $"projects/{project}/locations/us/apps/{appId}/agents/fake-agent";
                    }),
                },
            },
            Enabled = true,
            ContentFilter = new Gcp.Ces.Inputs.GuardrailContentFilterArgs
            {
                BannedContents = new[]
                {
                    "example",
                },
                BannedContentsInUserInputs = new[]
                {
                    "example",
                },
                BannedContentsInAgentResponses = new[]
                {
                    "example",
                },
                MatchType = "SIMPLE_STRING_MATCH",
                DisregardDiacritics = true,
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.ces.App;
    import com.pulumi.gcp.ces.AppArgs;
    import com.pulumi.gcp.ces.inputs.AppLanguageSettingsArgs;
    import com.pulumi.gcp.ces.inputs.AppTimeZoneSettingsArgs;
    import com.pulumi.gcp.ces.Guardrail;
    import com.pulumi.gcp.ces.GuardrailArgs;
    import com.pulumi.gcp.ces.inputs.GuardrailActionArgs;
    import com.pulumi.gcp.ces.inputs.GuardrailActionTransferAgentArgs;
    import com.pulumi.gcp.ces.inputs.GuardrailContentFilterArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var cesAppForGuardrail = new App("cesAppForGuardrail", AppArgs.builder()
                .appId("app-id")
                .location("us")
                .description("App used as parent for CES Toolset example")
                .displayName("my-app")
                .languageSettings(AppLanguageSettingsArgs.builder()
                    .defaultLanguageCode("en-US")
                    .supportedLanguageCodes(                
                        "es-ES",
                        "fr-FR")
                    .enableMultilingualSupport(true)
                    .fallbackAction("escalate")
                    .build())
                .timeZoneSettings(AppTimeZoneSettingsArgs.builder()
                    .timeZone("America/Los_Angeles")
                    .build())
                .build());
    
            var cesGuardrailTransferAgentContentFilter = new Guardrail("cesGuardrailTransferAgentContentFilter", GuardrailArgs.builder()
                .guardrailId("guardrail-id")
                .location(cesAppForGuardrail.location())
                .app(cesAppForGuardrail.appId())
                .displayName("my-guardrail")
                .description("Guardrail description")
                .action(GuardrailActionArgs.builder()
                    .transferAgent(GuardrailActionTransferAgentArgs.builder()
                        .agent(Output.tuple(cesAppForGuardrail.project(), cesAppForGuardrail.appId()).applyValue(values -> {
                            var project = values.t1;
                            var appId = values.t2;
                            return String.format("projects/%s/locations/us/apps/%s/agents/fake-agent", project,appId);
                        }))
                        .build())
                    .build())
                .enabled(true)
                .contentFilter(GuardrailContentFilterArgs.builder()
                    .bannedContents("example")
                    .bannedContentsInUserInputs("example")
                    .bannedContentsInAgentResponses("example")
                    .matchType("SIMPLE_STRING_MATCH")
                    .disregardDiacritics(true)
                    .build())
                .build());
    
        }
    }
    
    resources:
      cesAppForGuardrail:
        type: gcp:ces:App
        name: ces_app_for_guardrail
        properties:
          appId: app-id
          location: us
          description: App used as parent for CES Toolset example
          displayName: my-app
          languageSettings:
            defaultLanguageCode: en-US
            supportedLanguageCodes:
              - es-ES
              - fr-FR
            enableMultilingualSupport: true
            fallbackAction: escalate
          timeZoneSettings:
            timeZone: America/Los_Angeles
      cesGuardrailTransferAgentContentFilter:
        type: gcp:ces:Guardrail
        name: ces_guardrail_transfer_agent_content_filter
        properties:
          guardrailId: guardrail-id
          location: ${cesAppForGuardrail.location}
          app: ${cesAppForGuardrail.appId}
          displayName: my-guardrail
          description: Guardrail description
          action:
            transferAgent:
              agent: projects/${cesAppForGuardrail.project}/locations/us/apps/${cesAppForGuardrail.appId}/agents/fake-agent
          enabled: true
          contentFilter:
            bannedContents:
              - example
            bannedContentsInUserInputs:
              - example
            bannedContentsInAgentResponses:
              - example
            matchType: SIMPLE_STRING_MATCH
            disregardDiacritics: true
    

    Ces Guardrail Generative Answer Llm Prompt Security

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const cesAppForGuardrail = new gcp.ces.App("ces_app_for_guardrail", {
        appId: "app-id",
        location: "us",
        description: "App used as parent for CES Toolset example",
        displayName: "my-app",
        languageSettings: {
            defaultLanguageCode: "en-US",
            supportedLanguageCodes: [
                "es-ES",
                "fr-FR",
            ],
            enableMultilingualSupport: true,
            fallbackAction: "escalate",
        },
        timeZoneSettings: {
            timeZone: "America/Los_Angeles",
        },
    });
    const cesGuardrailGenerativeAnswerLlmPromptSecurity = new gcp.ces.Guardrail("ces_guardrail_generative_answer_llm_prompt_security", {
        guardrailId: "guardrail-id",
        location: cesAppForGuardrail.location,
        app: cesAppForGuardrail.appId,
        displayName: "my-guardrail",
        description: "Guardrail description",
        action: {
            generativeAnswer: {
                prompt: "example_prompt",
            },
        },
        enabled: true,
        llmPromptSecurity: {
            customPolicy: {
                maxConversationMessages: 10,
                modelSettings: {
                    model: "gemini-2.5-flash",
                    temperature: 50,
                },
                prompt: "example_prompt",
                policyScope: "USER_QUERY",
                failOpen: true,
                allowShortUtterance: true,
            },
        },
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    ces_app_for_guardrail = gcp.ces.App("ces_app_for_guardrail",
        app_id="app-id",
        location="us",
        description="App used as parent for CES Toolset example",
        display_name="my-app",
        language_settings={
            "default_language_code": "en-US",
            "supported_language_codes": [
                "es-ES",
                "fr-FR",
            ],
            "enable_multilingual_support": True,
            "fallback_action": "escalate",
        },
        time_zone_settings={
            "time_zone": "America/Los_Angeles",
        })
    ces_guardrail_generative_answer_llm_prompt_security = gcp.ces.Guardrail("ces_guardrail_generative_answer_llm_prompt_security",
        guardrail_id="guardrail-id",
        location=ces_app_for_guardrail.location,
        app=ces_app_for_guardrail.app_id,
        display_name="my-guardrail",
        description="Guardrail description",
        action={
            "generative_answer": {
                "prompt": "example_prompt",
            },
        },
        enabled=True,
        llm_prompt_security={
            "custom_policy": {
                "max_conversation_messages": 10,
                "model_settings": {
                    "model": "gemini-2.5-flash",
                    "temperature": 50,
                },
                "prompt": "example_prompt",
                "policy_scope": "USER_QUERY",
                "fail_open": True,
                "allow_short_utterance": True,
            },
        })
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/ces"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		cesAppForGuardrail, err := ces.NewApp(ctx, "ces_app_for_guardrail", &ces.AppArgs{
    			AppId:       pulumi.String("app-id"),
    			Location:    pulumi.String("us"),
    			Description: pulumi.String("App used as parent for CES Toolset example"),
    			DisplayName: pulumi.String("my-app"),
    			LanguageSettings: &ces.AppLanguageSettingsArgs{
    				DefaultLanguageCode: pulumi.String("en-US"),
    				SupportedLanguageCodes: pulumi.StringArray{
    					pulumi.String("es-ES"),
    					pulumi.String("fr-FR"),
    				},
    				EnableMultilingualSupport: pulumi.Bool(true),
    				FallbackAction:            pulumi.String("escalate"),
    			},
    			TimeZoneSettings: &ces.AppTimeZoneSettingsArgs{
    				TimeZone: pulumi.String("America/Los_Angeles"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = ces.NewGuardrail(ctx, "ces_guardrail_generative_answer_llm_prompt_security", &ces.GuardrailArgs{
    			GuardrailId: pulumi.String("guardrail-id"),
    			Location:    cesAppForGuardrail.Location,
    			App:         cesAppForGuardrail.AppId,
    			DisplayName: pulumi.String("my-guardrail"),
    			Description: pulumi.String("Guardrail description"),
    			Action: &ces.GuardrailActionArgs{
    				GenerativeAnswer: &ces.GuardrailActionGenerativeAnswerArgs{
    					Prompt: pulumi.String("example_prompt"),
    				},
    			},
    			Enabled: pulumi.Bool(true),
    			LlmPromptSecurity: &ces.GuardrailLlmPromptSecurityArgs{
    				CustomPolicy: &ces.GuardrailLlmPromptSecurityCustomPolicyArgs{
    					MaxConversationMessages: pulumi.Int(10),
    					ModelSettings: &ces.GuardrailLlmPromptSecurityCustomPolicyModelSettingsArgs{
    						Model:       pulumi.String("gemini-2.5-flash"),
    						Temperature: pulumi.Float64(50),
    					},
    					Prompt:              pulumi.String("example_prompt"),
    					PolicyScope:         pulumi.String("USER_QUERY"),
    					FailOpen:            pulumi.Bool(true),
    					AllowShortUtterance: pulumi.Bool(true),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var cesAppForGuardrail = new Gcp.Ces.App("ces_app_for_guardrail", new()
        {
            AppId = "app-id",
            Location = "us",
            Description = "App used as parent for CES Toolset example",
            DisplayName = "my-app",
            LanguageSettings = new Gcp.Ces.Inputs.AppLanguageSettingsArgs
            {
                DefaultLanguageCode = "en-US",
                SupportedLanguageCodes = new[]
                {
                    "es-ES",
                    "fr-FR",
                },
                EnableMultilingualSupport = true,
                FallbackAction = "escalate",
            },
            TimeZoneSettings = new Gcp.Ces.Inputs.AppTimeZoneSettingsArgs
            {
                TimeZone = "America/Los_Angeles",
            },
        });
    
        var cesGuardrailGenerativeAnswerLlmPromptSecurity = new Gcp.Ces.Guardrail("ces_guardrail_generative_answer_llm_prompt_security", new()
        {
            GuardrailId = "guardrail-id",
            Location = cesAppForGuardrail.Location,
            App = cesAppForGuardrail.AppId,
            DisplayName = "my-guardrail",
            Description = "Guardrail description",
            Action = new Gcp.Ces.Inputs.GuardrailActionArgs
            {
                GenerativeAnswer = new Gcp.Ces.Inputs.GuardrailActionGenerativeAnswerArgs
                {
                    Prompt = "example_prompt",
                },
            },
            Enabled = true,
            LlmPromptSecurity = new Gcp.Ces.Inputs.GuardrailLlmPromptSecurityArgs
            {
                CustomPolicy = new Gcp.Ces.Inputs.GuardrailLlmPromptSecurityCustomPolicyArgs
                {
                    MaxConversationMessages = 10,
                    ModelSettings = new Gcp.Ces.Inputs.GuardrailLlmPromptSecurityCustomPolicyModelSettingsArgs
                    {
                        Model = "gemini-2.5-flash",
                        Temperature = 50,
                    },
                    Prompt = "example_prompt",
                    PolicyScope = "USER_QUERY",
                    FailOpen = true,
                    AllowShortUtterance = true,
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.ces.App;
    import com.pulumi.gcp.ces.AppArgs;
    import com.pulumi.gcp.ces.inputs.AppLanguageSettingsArgs;
    import com.pulumi.gcp.ces.inputs.AppTimeZoneSettingsArgs;
    import com.pulumi.gcp.ces.Guardrail;
    import com.pulumi.gcp.ces.GuardrailArgs;
    import com.pulumi.gcp.ces.inputs.GuardrailActionArgs;
    import com.pulumi.gcp.ces.inputs.GuardrailActionGenerativeAnswerArgs;
    import com.pulumi.gcp.ces.inputs.GuardrailLlmPromptSecurityArgs;
    import com.pulumi.gcp.ces.inputs.GuardrailLlmPromptSecurityCustomPolicyArgs;
    import com.pulumi.gcp.ces.inputs.GuardrailLlmPromptSecurityCustomPolicyModelSettingsArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var cesAppForGuardrail = new App("cesAppForGuardrail", AppArgs.builder()
                .appId("app-id")
                .location("us")
                .description("App used as parent for CES Toolset example")
                .displayName("my-app")
                .languageSettings(AppLanguageSettingsArgs.builder()
                    .defaultLanguageCode("en-US")
                    .supportedLanguageCodes(                
                        "es-ES",
                        "fr-FR")
                    .enableMultilingualSupport(true)
                    .fallbackAction("escalate")
                    .build())
                .timeZoneSettings(AppTimeZoneSettingsArgs.builder()
                    .timeZone("America/Los_Angeles")
                    .build())
                .build());
    
            var cesGuardrailGenerativeAnswerLlmPromptSecurity = new Guardrail("cesGuardrailGenerativeAnswerLlmPromptSecurity", GuardrailArgs.builder()
                .guardrailId("guardrail-id")
                .location(cesAppForGuardrail.location())
                .app(cesAppForGuardrail.appId())
                .displayName("my-guardrail")
                .description("Guardrail description")
                .action(GuardrailActionArgs.builder()
                    .generativeAnswer(GuardrailActionGenerativeAnswerArgs.builder()
                        .prompt("example_prompt")
                        .build())
                    .build())
                .enabled(true)
                .llmPromptSecurity(GuardrailLlmPromptSecurityArgs.builder()
                    .customPolicy(GuardrailLlmPromptSecurityCustomPolicyArgs.builder()
                        .maxConversationMessages(10)
                        .modelSettings(GuardrailLlmPromptSecurityCustomPolicyModelSettingsArgs.builder()
                            .model("gemini-2.5-flash")
                            .temperature(50.0)
                            .build())
                        .prompt("example_prompt")
                        .policyScope("USER_QUERY")
                        .failOpen(true)
                        .allowShortUtterance(true)
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      cesAppForGuardrail:
        type: gcp:ces:App
        name: ces_app_for_guardrail
        properties:
          appId: app-id
          location: us
          description: App used as parent for CES Toolset example
          displayName: my-app
          languageSettings:
            defaultLanguageCode: en-US
            supportedLanguageCodes:
              - es-ES
              - fr-FR
            enableMultilingualSupport: true
            fallbackAction: escalate
          timeZoneSettings:
            timeZone: America/Los_Angeles
      cesGuardrailGenerativeAnswerLlmPromptSecurity:
        type: gcp:ces:Guardrail
        name: ces_guardrail_generative_answer_llm_prompt_security
        properties:
          guardrailId: guardrail-id
          location: ${cesAppForGuardrail.location}
          app: ${cesAppForGuardrail.appId}
          displayName: my-guardrail
          description: Guardrail description
          action:
            generativeAnswer:
              prompt: example_prompt
          enabled: true
          llmPromptSecurity:
            customPolicy:
              maxConversationMessages: 10
              modelSettings:
                model: gemini-2.5-flash
                temperature: 50
              prompt: example_prompt
              policyScope: USER_QUERY
              failOpen: true
              allowShortUtterance: true
    

    Ces Guardrail Code Callback

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const cesAppForGuardrail = new gcp.ces.App("ces_app_for_guardrail", {
        appId: "app-id",
        location: "us",
        description: "App used as parent for CES Toolset example",
        displayName: "my-app",
        languageSettings: {
            defaultLanguageCode: "en-US",
            supportedLanguageCodes: [
                "es-ES",
                "fr-FR",
            ],
            enableMultilingualSupport: true,
            fallbackAction: "escalate",
        },
        timeZoneSettings: {
            timeZone: "America/Los_Angeles",
        },
    });
    const cesGuardrailCodeCallback = new gcp.ces.Guardrail("ces_guardrail_code_callback", {
        guardrailId: "guardrail-id",
        location: cesAppForGuardrail.location,
        app: cesAppForGuardrail.appId,
        displayName: "my-guardrail",
        description: "Guardrail description",
        action: {
            generativeAnswer: {
                prompt: "example_prompt",
            },
        },
        enabled: true,
        codeCallback: {
            beforeAgentCallback: {
                description: "Example callback",
                disabled: true,
                pythonCode: `def callback(context):
        return {'override': False}`,
            },
            afterAgentCallback: {
                description: "Example callback",
                disabled: true,
                pythonCode: `def callback(context):
        return {'override': False}`,
            },
            beforeModelCallback: {
                description: "Example callback",
                disabled: true,
                pythonCode: `def callback(context):
        return {'override': False}`,
            },
            afterModelCallback: {
                description: "Example callback",
                disabled: true,
                pythonCode: `def callback(context):
        return {'override': False}`,
            },
        },
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    ces_app_for_guardrail = gcp.ces.App("ces_app_for_guardrail",
        app_id="app-id",
        location="us",
        description="App used as parent for CES Toolset example",
        display_name="my-app",
        language_settings={
            "default_language_code": "en-US",
            "supported_language_codes": [
                "es-ES",
                "fr-FR",
            ],
            "enable_multilingual_support": True,
            "fallback_action": "escalate",
        },
        time_zone_settings={
            "time_zone": "America/Los_Angeles",
        })
    ces_guardrail_code_callback = gcp.ces.Guardrail("ces_guardrail_code_callback",
        guardrail_id="guardrail-id",
        location=ces_app_for_guardrail.location,
        app=ces_app_for_guardrail.app_id,
        display_name="my-guardrail",
        description="Guardrail description",
        action={
            "generative_answer": {
                "prompt": "example_prompt",
            },
        },
        enabled=True,
        code_callback={
            "before_agent_callback": {
                "description": "Example callback",
                "disabled": True,
                "python_code": """def callback(context):
        return {'override': False}""",
            },
            "after_agent_callback": {
                "description": "Example callback",
                "disabled": True,
                "python_code": """def callback(context):
        return {'override': False}""",
            },
            "before_model_callback": {
                "description": "Example callback",
                "disabled": True,
                "python_code": """def callback(context):
        return {'override': False}""",
            },
            "after_model_callback": {
                "description": "Example callback",
                "disabled": True,
                "python_code": """def callback(context):
        return {'override': False}""",
            },
        })
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/ces"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		cesAppForGuardrail, err := ces.NewApp(ctx, "ces_app_for_guardrail", &ces.AppArgs{
    			AppId:       pulumi.String("app-id"),
    			Location:    pulumi.String("us"),
    			Description: pulumi.String("App used as parent for CES Toolset example"),
    			DisplayName: pulumi.String("my-app"),
    			LanguageSettings: &ces.AppLanguageSettingsArgs{
    				DefaultLanguageCode: pulumi.String("en-US"),
    				SupportedLanguageCodes: pulumi.StringArray{
    					pulumi.String("es-ES"),
    					pulumi.String("fr-FR"),
    				},
    				EnableMultilingualSupport: pulumi.Bool(true),
    				FallbackAction:            pulumi.String("escalate"),
    			},
    			TimeZoneSettings: &ces.AppTimeZoneSettingsArgs{
    				TimeZone: pulumi.String("America/Los_Angeles"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = ces.NewGuardrail(ctx, "ces_guardrail_code_callback", &ces.GuardrailArgs{
    			GuardrailId: pulumi.String("guardrail-id"),
    			Location:    cesAppForGuardrail.Location,
    			App:         cesAppForGuardrail.AppId,
    			DisplayName: pulumi.String("my-guardrail"),
    			Description: pulumi.String("Guardrail description"),
    			Action: &ces.GuardrailActionArgs{
    				GenerativeAnswer: &ces.GuardrailActionGenerativeAnswerArgs{
    					Prompt: pulumi.String("example_prompt"),
    				},
    			},
    			Enabled: pulumi.Bool(true),
    			CodeCallback: &ces.GuardrailCodeCallbackArgs{
    				BeforeAgentCallback: &ces.GuardrailCodeCallbackBeforeAgentCallbackArgs{
    					Description: pulumi.String("Example callback"),
    					Disabled:    pulumi.Bool(true),
    					PythonCode:  pulumi.String("def callback(context):\n    return {'override': False}"),
    				},
    				AfterAgentCallback: &ces.GuardrailCodeCallbackAfterAgentCallbackArgs{
    					Description: pulumi.String("Example callback"),
    					Disabled:    pulumi.Bool(true),
    					PythonCode:  pulumi.String("def callback(context):\n    return {'override': False}"),
    				},
    				BeforeModelCallback: &ces.GuardrailCodeCallbackBeforeModelCallbackArgs{
    					Description: pulumi.String("Example callback"),
    					Disabled:    pulumi.Bool(true),
    					PythonCode:  pulumi.String("def callback(context):\n    return {'override': False}"),
    				},
    				AfterModelCallback: &ces.GuardrailCodeCallbackAfterModelCallbackArgs{
    					Description: pulumi.String("Example callback"),
    					Disabled:    pulumi.Bool(true),
    					PythonCode:  pulumi.String("def callback(context):\n    return {'override': False}"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var cesAppForGuardrail = new Gcp.Ces.App("ces_app_for_guardrail", new()
        {
            AppId = "app-id",
            Location = "us",
            Description = "App used as parent for CES Toolset example",
            DisplayName = "my-app",
            LanguageSettings = new Gcp.Ces.Inputs.AppLanguageSettingsArgs
            {
                DefaultLanguageCode = "en-US",
                SupportedLanguageCodes = new[]
                {
                    "es-ES",
                    "fr-FR",
                },
                EnableMultilingualSupport = true,
                FallbackAction = "escalate",
            },
            TimeZoneSettings = new Gcp.Ces.Inputs.AppTimeZoneSettingsArgs
            {
                TimeZone = "America/Los_Angeles",
            },
        });
    
        var cesGuardrailCodeCallback = new Gcp.Ces.Guardrail("ces_guardrail_code_callback", new()
        {
            GuardrailId = "guardrail-id",
            Location = cesAppForGuardrail.Location,
            App = cesAppForGuardrail.AppId,
            DisplayName = "my-guardrail",
            Description = "Guardrail description",
            Action = new Gcp.Ces.Inputs.GuardrailActionArgs
            {
                GenerativeAnswer = new Gcp.Ces.Inputs.GuardrailActionGenerativeAnswerArgs
                {
                    Prompt = "example_prompt",
                },
            },
            Enabled = true,
            CodeCallback = new Gcp.Ces.Inputs.GuardrailCodeCallbackArgs
            {
                BeforeAgentCallback = new Gcp.Ces.Inputs.GuardrailCodeCallbackBeforeAgentCallbackArgs
                {
                    Description = "Example callback",
                    Disabled = true,
                    PythonCode = @"def callback(context):
        return {'override': False}",
                },
                AfterAgentCallback = new Gcp.Ces.Inputs.GuardrailCodeCallbackAfterAgentCallbackArgs
                {
                    Description = "Example callback",
                    Disabled = true,
                    PythonCode = @"def callback(context):
        return {'override': False}",
                },
                BeforeModelCallback = new Gcp.Ces.Inputs.GuardrailCodeCallbackBeforeModelCallbackArgs
                {
                    Description = "Example callback",
                    Disabled = true,
                    PythonCode = @"def callback(context):
        return {'override': False}",
                },
                AfterModelCallback = new Gcp.Ces.Inputs.GuardrailCodeCallbackAfterModelCallbackArgs
                {
                    Description = "Example callback",
                    Disabled = true,
                    PythonCode = @"def callback(context):
        return {'override': False}",
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.ces.App;
    import com.pulumi.gcp.ces.AppArgs;
    import com.pulumi.gcp.ces.inputs.AppLanguageSettingsArgs;
    import com.pulumi.gcp.ces.inputs.AppTimeZoneSettingsArgs;
    import com.pulumi.gcp.ces.Guardrail;
    import com.pulumi.gcp.ces.GuardrailArgs;
    import com.pulumi.gcp.ces.inputs.GuardrailActionArgs;
    import com.pulumi.gcp.ces.inputs.GuardrailActionGenerativeAnswerArgs;
    import com.pulumi.gcp.ces.inputs.GuardrailCodeCallbackArgs;
    import com.pulumi.gcp.ces.inputs.GuardrailCodeCallbackBeforeAgentCallbackArgs;
    import com.pulumi.gcp.ces.inputs.GuardrailCodeCallbackAfterAgentCallbackArgs;
    import com.pulumi.gcp.ces.inputs.GuardrailCodeCallbackBeforeModelCallbackArgs;
    import com.pulumi.gcp.ces.inputs.GuardrailCodeCallbackAfterModelCallbackArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var cesAppForGuardrail = new App("cesAppForGuardrail", AppArgs.builder()
                .appId("app-id")
                .location("us")
                .description("App used as parent for CES Toolset example")
                .displayName("my-app")
                .languageSettings(AppLanguageSettingsArgs.builder()
                    .defaultLanguageCode("en-US")
                    .supportedLanguageCodes(                
                        "es-ES",
                        "fr-FR")
                    .enableMultilingualSupport(true)
                    .fallbackAction("escalate")
                    .build())
                .timeZoneSettings(AppTimeZoneSettingsArgs.builder()
                    .timeZone("America/Los_Angeles")
                    .build())
                .build());
    
            var cesGuardrailCodeCallback = new Guardrail("cesGuardrailCodeCallback", GuardrailArgs.builder()
                .guardrailId("guardrail-id")
                .location(cesAppForGuardrail.location())
                .app(cesAppForGuardrail.appId())
                .displayName("my-guardrail")
                .description("Guardrail description")
                .action(GuardrailActionArgs.builder()
                    .generativeAnswer(GuardrailActionGenerativeAnswerArgs.builder()
                        .prompt("example_prompt")
                        .build())
                    .build())
                .enabled(true)
                .codeCallback(GuardrailCodeCallbackArgs.builder()
                    .beforeAgentCallback(GuardrailCodeCallbackBeforeAgentCallbackArgs.builder()
                        .description("Example callback")
                        .disabled(true)
                        .pythonCode("""
    def callback(context):
        return {'override': False}                    """)
                        .build())
                    .afterAgentCallback(GuardrailCodeCallbackAfterAgentCallbackArgs.builder()
                        .description("Example callback")
                        .disabled(true)
                        .pythonCode("""
    def callback(context):
        return {'override': False}                    """)
                        .build())
                    .beforeModelCallback(GuardrailCodeCallbackBeforeModelCallbackArgs.builder()
                        .description("Example callback")
                        .disabled(true)
                        .pythonCode("""
    def callback(context):
        return {'override': False}                    """)
                        .build())
                    .afterModelCallback(GuardrailCodeCallbackAfterModelCallbackArgs.builder()
                        .description("Example callback")
                        .disabled(true)
                        .pythonCode("""
    def callback(context):
        return {'override': False}                    """)
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      cesAppForGuardrail:
        type: gcp:ces:App
        name: ces_app_for_guardrail
        properties:
          appId: app-id
          location: us
          description: App used as parent for CES Toolset example
          displayName: my-app
          languageSettings:
            defaultLanguageCode: en-US
            supportedLanguageCodes:
              - es-ES
              - fr-FR
            enableMultilingualSupport: true
            fallbackAction: escalate
          timeZoneSettings:
            timeZone: America/Los_Angeles
      cesGuardrailCodeCallback:
        type: gcp:ces:Guardrail
        name: ces_guardrail_code_callback
        properties:
          guardrailId: guardrail-id
          location: ${cesAppForGuardrail.location}
          app: ${cesAppForGuardrail.appId}
          displayName: my-guardrail
          description: Guardrail description
          action:
            generativeAnswer:
              prompt: example_prompt
          enabled: true
          codeCallback:
            beforeAgentCallback:
              description: Example callback
              disabled: true
              pythonCode: |-
                def callback(context):
                    return {'override': False}            
            afterAgentCallback:
              description: Example callback
              disabled: true
              pythonCode: |-
                def callback(context):
                    return {'override': False}            
            beforeModelCallback:
              description: Example callback
              disabled: true
              pythonCode: |-
                def callback(context):
                    return {'override': False}            
            afterModelCallback:
              description: Example callback
              disabled: true
              pythonCode: |-
                def callback(context):
                    return {'override': False}            
    

    Ces Guardrail Llm Policy

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const cesAppForGuardrail = new gcp.ces.App("ces_app_for_guardrail", {
        appId: "app-id",
        location: "us",
        description: "App used as parent for CES Toolset example",
        displayName: "my-app",
        languageSettings: {
            defaultLanguageCode: "en-US",
            supportedLanguageCodes: [
                "es-ES",
                "fr-FR",
            ],
            enableMultilingualSupport: true,
            fallbackAction: "escalate",
        },
        timeZoneSettings: {
            timeZone: "America/Los_Angeles",
        },
    });
    const cesGuardrailLlmPolicy = new gcp.ces.Guardrail("ces_guardrail_llm_policy", {
        guardrailId: "guardrail-id",
        location: cesAppForGuardrail.location,
        app: cesAppForGuardrail.appId,
        displayName: "my-guardrail",
        description: "Guardrail description",
        action: {
            generativeAnswer: {
                prompt: "example_prompt",
            },
        },
        enabled: true,
        llmPolicy: {
            maxConversationMessages: 10,
            modelSettings: {
                model: "gemini-2.5-flash",
                temperature: 50,
            },
            prompt: "example_prompt",
            policyScope: "USER_QUERY",
            failOpen: true,
            allowShortUtterance: true,
        },
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    ces_app_for_guardrail = gcp.ces.App("ces_app_for_guardrail",
        app_id="app-id",
        location="us",
        description="App used as parent for CES Toolset example",
        display_name="my-app",
        language_settings={
            "default_language_code": "en-US",
            "supported_language_codes": [
                "es-ES",
                "fr-FR",
            ],
            "enable_multilingual_support": True,
            "fallback_action": "escalate",
        },
        time_zone_settings={
            "time_zone": "America/Los_Angeles",
        })
    ces_guardrail_llm_policy = gcp.ces.Guardrail("ces_guardrail_llm_policy",
        guardrail_id="guardrail-id",
        location=ces_app_for_guardrail.location,
        app=ces_app_for_guardrail.app_id,
        display_name="my-guardrail",
        description="Guardrail description",
        action={
            "generative_answer": {
                "prompt": "example_prompt",
            },
        },
        enabled=True,
        llm_policy={
            "max_conversation_messages": 10,
            "model_settings": {
                "model": "gemini-2.5-flash",
                "temperature": 50,
            },
            "prompt": "example_prompt",
            "policy_scope": "USER_QUERY",
            "fail_open": True,
            "allow_short_utterance": True,
        })
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/ces"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		cesAppForGuardrail, err := ces.NewApp(ctx, "ces_app_for_guardrail", &ces.AppArgs{
    			AppId:       pulumi.String("app-id"),
    			Location:    pulumi.String("us"),
    			Description: pulumi.String("App used as parent for CES Toolset example"),
    			DisplayName: pulumi.String("my-app"),
    			LanguageSettings: &ces.AppLanguageSettingsArgs{
    				DefaultLanguageCode: pulumi.String("en-US"),
    				SupportedLanguageCodes: pulumi.StringArray{
    					pulumi.String("es-ES"),
    					pulumi.String("fr-FR"),
    				},
    				EnableMultilingualSupport: pulumi.Bool(true),
    				FallbackAction:            pulumi.String("escalate"),
    			},
    			TimeZoneSettings: &ces.AppTimeZoneSettingsArgs{
    				TimeZone: pulumi.String("America/Los_Angeles"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = ces.NewGuardrail(ctx, "ces_guardrail_llm_policy", &ces.GuardrailArgs{
    			GuardrailId: pulumi.String("guardrail-id"),
    			Location:    cesAppForGuardrail.Location,
    			App:         cesAppForGuardrail.AppId,
    			DisplayName: pulumi.String("my-guardrail"),
    			Description: pulumi.String("Guardrail description"),
    			Action: &ces.GuardrailActionArgs{
    				GenerativeAnswer: &ces.GuardrailActionGenerativeAnswerArgs{
    					Prompt: pulumi.String("example_prompt"),
    				},
    			},
    			Enabled: pulumi.Bool(true),
    			LlmPolicy: &ces.GuardrailLlmPolicyArgs{
    				MaxConversationMessages: pulumi.Int(10),
    				ModelSettings: &ces.GuardrailLlmPolicyModelSettingsArgs{
    					Model:       pulumi.String("gemini-2.5-flash"),
    					Temperature: pulumi.Float64(50),
    				},
    				Prompt:              pulumi.String("example_prompt"),
    				PolicyScope:         pulumi.String("USER_QUERY"),
    				FailOpen:            pulumi.Bool(true),
    				AllowShortUtterance: pulumi.Bool(true),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var cesAppForGuardrail = new Gcp.Ces.App("ces_app_for_guardrail", new()
        {
            AppId = "app-id",
            Location = "us",
            Description = "App used as parent for CES Toolset example",
            DisplayName = "my-app",
            LanguageSettings = new Gcp.Ces.Inputs.AppLanguageSettingsArgs
            {
                DefaultLanguageCode = "en-US",
                SupportedLanguageCodes = new[]
                {
                    "es-ES",
                    "fr-FR",
                },
                EnableMultilingualSupport = true,
                FallbackAction = "escalate",
            },
            TimeZoneSettings = new Gcp.Ces.Inputs.AppTimeZoneSettingsArgs
            {
                TimeZone = "America/Los_Angeles",
            },
        });
    
        var cesGuardrailLlmPolicy = new Gcp.Ces.Guardrail("ces_guardrail_llm_policy", new()
        {
            GuardrailId = "guardrail-id",
            Location = cesAppForGuardrail.Location,
            App = cesAppForGuardrail.AppId,
            DisplayName = "my-guardrail",
            Description = "Guardrail description",
            Action = new Gcp.Ces.Inputs.GuardrailActionArgs
            {
                GenerativeAnswer = new Gcp.Ces.Inputs.GuardrailActionGenerativeAnswerArgs
                {
                    Prompt = "example_prompt",
                },
            },
            Enabled = true,
            LlmPolicy = new Gcp.Ces.Inputs.GuardrailLlmPolicyArgs
            {
                MaxConversationMessages = 10,
                ModelSettings = new Gcp.Ces.Inputs.GuardrailLlmPolicyModelSettingsArgs
                {
                    Model = "gemini-2.5-flash",
                    Temperature = 50,
                },
                Prompt = "example_prompt",
                PolicyScope = "USER_QUERY",
                FailOpen = true,
                AllowShortUtterance = true,
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.ces.App;
    import com.pulumi.gcp.ces.AppArgs;
    import com.pulumi.gcp.ces.inputs.AppLanguageSettingsArgs;
    import com.pulumi.gcp.ces.inputs.AppTimeZoneSettingsArgs;
    import com.pulumi.gcp.ces.Guardrail;
    import com.pulumi.gcp.ces.GuardrailArgs;
    import com.pulumi.gcp.ces.inputs.GuardrailActionArgs;
    import com.pulumi.gcp.ces.inputs.GuardrailActionGenerativeAnswerArgs;
    import com.pulumi.gcp.ces.inputs.GuardrailLlmPolicyArgs;
    import com.pulumi.gcp.ces.inputs.GuardrailLlmPolicyModelSettingsArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var cesAppForGuardrail = new App("cesAppForGuardrail", AppArgs.builder()
                .appId("app-id")
                .location("us")
                .description("App used as parent for CES Toolset example")
                .displayName("my-app")
                .languageSettings(AppLanguageSettingsArgs.builder()
                    .defaultLanguageCode("en-US")
                    .supportedLanguageCodes(                
                        "es-ES",
                        "fr-FR")
                    .enableMultilingualSupport(true)
                    .fallbackAction("escalate")
                    .build())
                .timeZoneSettings(AppTimeZoneSettingsArgs.builder()
                    .timeZone("America/Los_Angeles")
                    .build())
                .build());
    
            var cesGuardrailLlmPolicy = new Guardrail("cesGuardrailLlmPolicy", GuardrailArgs.builder()
                .guardrailId("guardrail-id")
                .location(cesAppForGuardrail.location())
                .app(cesAppForGuardrail.appId())
                .displayName("my-guardrail")
                .description("Guardrail description")
                .action(GuardrailActionArgs.builder()
                    .generativeAnswer(GuardrailActionGenerativeAnswerArgs.builder()
                        .prompt("example_prompt")
                        .build())
                    .build())
                .enabled(true)
                .llmPolicy(GuardrailLlmPolicyArgs.builder()
                    .maxConversationMessages(10)
                    .modelSettings(GuardrailLlmPolicyModelSettingsArgs.builder()
                        .model("gemini-2.5-flash")
                        .temperature(50.0)
                        .build())
                    .prompt("example_prompt")
                    .policyScope("USER_QUERY")
                    .failOpen(true)
                    .allowShortUtterance(true)
                    .build())
                .build());
    
        }
    }
    
    resources:
      cesAppForGuardrail:
        type: gcp:ces:App
        name: ces_app_for_guardrail
        properties:
          appId: app-id
          location: us
          description: App used as parent for CES Toolset example
          displayName: my-app
          languageSettings:
            defaultLanguageCode: en-US
            supportedLanguageCodes:
              - es-ES
              - fr-FR
            enableMultilingualSupport: true
            fallbackAction: escalate
          timeZoneSettings:
            timeZone: America/Los_Angeles
      cesGuardrailLlmPolicy:
        type: gcp:ces:Guardrail
        name: ces_guardrail_llm_policy
        properties:
          guardrailId: guardrail-id
          location: ${cesAppForGuardrail.location}
          app: ${cesAppForGuardrail.appId}
          displayName: my-guardrail
          description: Guardrail description
          action:
            generativeAnswer:
              prompt: example_prompt
          enabled: true
          llmPolicy:
            maxConversationMessages: 10
            modelSettings:
              model: gemini-2.5-flash
              temperature: 50
            prompt: example_prompt
            policyScope: USER_QUERY
            failOpen: true
            allowShortUtterance: true
    

    Create Guardrail Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new Guardrail(name: string, args: GuardrailArgs, opts?: CustomResourceOptions);
    @overload
    def Guardrail(resource_name: str,
                  args: GuardrailArgs,
                  opts: Optional[ResourceOptions] = None)
    
    @overload
    def Guardrail(resource_name: str,
                  opts: Optional[ResourceOptions] = None,
                  display_name: Optional[str] = None,
                  app: Optional[str] = None,
                  location: Optional[str] = None,
                  guardrail_id: Optional[str] = None,
                  enabled: Optional[bool] = None,
                  description: Optional[str] = None,
                  action: Optional[GuardrailActionArgs] = None,
                  content_filter: Optional[GuardrailContentFilterArgs] = None,
                  llm_policy: Optional[GuardrailLlmPolicyArgs] = None,
                  llm_prompt_security: Optional[GuardrailLlmPromptSecurityArgs] = None,
                  code_callback: Optional[GuardrailCodeCallbackArgs] = None,
                  model_safety: Optional[GuardrailModelSafetyArgs] = None,
                  project: Optional[str] = None)
    func NewGuardrail(ctx *Context, name string, args GuardrailArgs, opts ...ResourceOption) (*Guardrail, error)
    public Guardrail(string name, GuardrailArgs args, CustomResourceOptions? opts = null)
    public Guardrail(String name, GuardrailArgs args)
    public Guardrail(String name, GuardrailArgs args, CustomResourceOptions options)
    
    type: gcp:ces:Guardrail
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args GuardrailArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args GuardrailArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args GuardrailArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args GuardrailArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args GuardrailArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var guardrailResource = new Gcp.Ces.Guardrail("guardrailResource", new()
    {
        DisplayName = "string",
        App = "string",
        Location = "string",
        GuardrailId = "string",
        Enabled = false,
        Description = "string",
        Action = new Gcp.Ces.Inputs.GuardrailActionArgs
        {
            GenerativeAnswer = new Gcp.Ces.Inputs.GuardrailActionGenerativeAnswerArgs
            {
                Prompt = "string",
            },
            RespondImmediately = new Gcp.Ces.Inputs.GuardrailActionRespondImmediatelyArgs
            {
                Responses = new[]
                {
                    new Gcp.Ces.Inputs.GuardrailActionRespondImmediatelyResponseArgs
                    {
                        Text = "string",
                        Disabled = false,
                    },
                },
            },
            TransferAgent = new Gcp.Ces.Inputs.GuardrailActionTransferAgentArgs
            {
                Agent = "string",
            },
        },
        ContentFilter = new Gcp.Ces.Inputs.GuardrailContentFilterArgs
        {
            MatchType = "string",
            BannedContents = new[]
            {
                "string",
            },
            BannedContentsInAgentResponses = new[]
            {
                "string",
            },
            BannedContentsInUserInputs = new[]
            {
                "string",
            },
            DisregardDiacritics = false,
        },
        LlmPolicy = new Gcp.Ces.Inputs.GuardrailLlmPolicyArgs
        {
            PolicyScope = "string",
            Prompt = "string",
            AllowShortUtterance = false,
            FailOpen = false,
            MaxConversationMessages = 0,
            ModelSettings = new Gcp.Ces.Inputs.GuardrailLlmPolicyModelSettingsArgs
            {
                Model = "string",
                Temperature = 0,
            },
        },
        LlmPromptSecurity = new Gcp.Ces.Inputs.GuardrailLlmPromptSecurityArgs
        {
            CustomPolicy = new Gcp.Ces.Inputs.GuardrailLlmPromptSecurityCustomPolicyArgs
            {
                PolicyScope = "string",
                Prompt = "string",
                AllowShortUtterance = false,
                FailOpen = false,
                MaxConversationMessages = 0,
                ModelSettings = new Gcp.Ces.Inputs.GuardrailLlmPromptSecurityCustomPolicyModelSettingsArgs
                {
                    Model = "string",
                    Temperature = 0,
                },
            },
            DefaultSettings = new Gcp.Ces.Inputs.GuardrailLlmPromptSecurityDefaultSettingsArgs
            {
                DefaultPromptTemplate = "string",
            },
        },
        CodeCallback = new Gcp.Ces.Inputs.GuardrailCodeCallbackArgs
        {
            AfterAgentCallback = new Gcp.Ces.Inputs.GuardrailCodeCallbackAfterAgentCallbackArgs
            {
                PythonCode = "string",
                Description = "string",
                Disabled = false,
            },
            AfterModelCallback = new Gcp.Ces.Inputs.GuardrailCodeCallbackAfterModelCallbackArgs
            {
                PythonCode = "string",
                Description = "string",
                Disabled = false,
            },
            BeforeAgentCallback = new Gcp.Ces.Inputs.GuardrailCodeCallbackBeforeAgentCallbackArgs
            {
                PythonCode = "string",
                Description = "string",
                Disabled = false,
            },
            BeforeModelCallback = new Gcp.Ces.Inputs.GuardrailCodeCallbackBeforeModelCallbackArgs
            {
                PythonCode = "string",
                Description = "string",
                Disabled = false,
            },
        },
        ModelSafety = new Gcp.Ces.Inputs.GuardrailModelSafetyArgs
        {
            SafetySettings = new[]
            {
                new Gcp.Ces.Inputs.GuardrailModelSafetySafetySettingArgs
                {
                    Category = "string",
                    Threshold = "string",
                },
            },
        },
        Project = "string",
    });
    
    example, err := ces.NewGuardrail(ctx, "guardrailResource", &ces.GuardrailArgs{
    	DisplayName: pulumi.String("string"),
    	App:         pulumi.String("string"),
    	Location:    pulumi.String("string"),
    	GuardrailId: pulumi.String("string"),
    	Enabled:     pulumi.Bool(false),
    	Description: pulumi.String("string"),
    	Action: &ces.GuardrailActionArgs{
    		GenerativeAnswer: &ces.GuardrailActionGenerativeAnswerArgs{
    			Prompt: pulumi.String("string"),
    		},
    		RespondImmediately: &ces.GuardrailActionRespondImmediatelyArgs{
    			Responses: ces.GuardrailActionRespondImmediatelyResponseArray{
    				&ces.GuardrailActionRespondImmediatelyResponseArgs{
    					Text:     pulumi.String("string"),
    					Disabled: pulumi.Bool(false),
    				},
    			},
    		},
    		TransferAgent: &ces.GuardrailActionTransferAgentArgs{
    			Agent: pulumi.String("string"),
    		},
    	},
    	ContentFilter: &ces.GuardrailContentFilterArgs{
    		MatchType: pulumi.String("string"),
    		BannedContents: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		BannedContentsInAgentResponses: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		BannedContentsInUserInputs: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		DisregardDiacritics: pulumi.Bool(false),
    	},
    	LlmPolicy: &ces.GuardrailLlmPolicyArgs{
    		PolicyScope:             pulumi.String("string"),
    		Prompt:                  pulumi.String("string"),
    		AllowShortUtterance:     pulumi.Bool(false),
    		FailOpen:                pulumi.Bool(false),
    		MaxConversationMessages: pulumi.Int(0),
    		ModelSettings: &ces.GuardrailLlmPolicyModelSettingsArgs{
    			Model:       pulumi.String("string"),
    			Temperature: pulumi.Float64(0),
    		},
    	},
    	LlmPromptSecurity: &ces.GuardrailLlmPromptSecurityArgs{
    		CustomPolicy: &ces.GuardrailLlmPromptSecurityCustomPolicyArgs{
    			PolicyScope:             pulumi.String("string"),
    			Prompt:                  pulumi.String("string"),
    			AllowShortUtterance:     pulumi.Bool(false),
    			FailOpen:                pulumi.Bool(false),
    			MaxConversationMessages: pulumi.Int(0),
    			ModelSettings: &ces.GuardrailLlmPromptSecurityCustomPolicyModelSettingsArgs{
    				Model:       pulumi.String("string"),
    				Temperature: pulumi.Float64(0),
    			},
    		},
    		DefaultSettings: &ces.GuardrailLlmPromptSecurityDefaultSettingsArgs{
    			DefaultPromptTemplate: pulumi.String("string"),
    		},
    	},
    	CodeCallback: &ces.GuardrailCodeCallbackArgs{
    		AfterAgentCallback: &ces.GuardrailCodeCallbackAfterAgentCallbackArgs{
    			PythonCode:  pulumi.String("string"),
    			Description: pulumi.String("string"),
    			Disabled:    pulumi.Bool(false),
    		},
    		AfterModelCallback: &ces.GuardrailCodeCallbackAfterModelCallbackArgs{
    			PythonCode:  pulumi.String("string"),
    			Description: pulumi.String("string"),
    			Disabled:    pulumi.Bool(false),
    		},
    		BeforeAgentCallback: &ces.GuardrailCodeCallbackBeforeAgentCallbackArgs{
    			PythonCode:  pulumi.String("string"),
    			Description: pulumi.String("string"),
    			Disabled:    pulumi.Bool(false),
    		},
    		BeforeModelCallback: &ces.GuardrailCodeCallbackBeforeModelCallbackArgs{
    			PythonCode:  pulumi.String("string"),
    			Description: pulumi.String("string"),
    			Disabled:    pulumi.Bool(false),
    		},
    	},
    	ModelSafety: &ces.GuardrailModelSafetyArgs{
    		SafetySettings: ces.GuardrailModelSafetySafetySettingArray{
    			&ces.GuardrailModelSafetySafetySettingArgs{
    				Category:  pulumi.String("string"),
    				Threshold: pulumi.String("string"),
    			},
    		},
    	},
    	Project: pulumi.String("string"),
    })
    
    var guardrailResource = new Guardrail("guardrailResource", GuardrailArgs.builder()
        .displayName("string")
        .app("string")
        .location("string")
        .guardrailId("string")
        .enabled(false)
        .description("string")
        .action(GuardrailActionArgs.builder()
            .generativeAnswer(GuardrailActionGenerativeAnswerArgs.builder()
                .prompt("string")
                .build())
            .respondImmediately(GuardrailActionRespondImmediatelyArgs.builder()
                .responses(GuardrailActionRespondImmediatelyResponseArgs.builder()
                    .text("string")
                    .disabled(false)
                    .build())
                .build())
            .transferAgent(GuardrailActionTransferAgentArgs.builder()
                .agent("string")
                .build())
            .build())
        .contentFilter(GuardrailContentFilterArgs.builder()
            .matchType("string")
            .bannedContents("string")
            .bannedContentsInAgentResponses("string")
            .bannedContentsInUserInputs("string")
            .disregardDiacritics(false)
            .build())
        .llmPolicy(GuardrailLlmPolicyArgs.builder()
            .policyScope("string")
            .prompt("string")
            .allowShortUtterance(false)
            .failOpen(false)
            .maxConversationMessages(0)
            .modelSettings(GuardrailLlmPolicyModelSettingsArgs.builder()
                .model("string")
                .temperature(0.0)
                .build())
            .build())
        .llmPromptSecurity(GuardrailLlmPromptSecurityArgs.builder()
            .customPolicy(GuardrailLlmPromptSecurityCustomPolicyArgs.builder()
                .policyScope("string")
                .prompt("string")
                .allowShortUtterance(false)
                .failOpen(false)
                .maxConversationMessages(0)
                .modelSettings(GuardrailLlmPromptSecurityCustomPolicyModelSettingsArgs.builder()
                    .model("string")
                    .temperature(0.0)
                    .build())
                .build())
            .defaultSettings(GuardrailLlmPromptSecurityDefaultSettingsArgs.builder()
                .defaultPromptTemplate("string")
                .build())
            .build())
        .codeCallback(GuardrailCodeCallbackArgs.builder()
            .afterAgentCallback(GuardrailCodeCallbackAfterAgentCallbackArgs.builder()
                .pythonCode("string")
                .description("string")
                .disabled(false)
                .build())
            .afterModelCallback(GuardrailCodeCallbackAfterModelCallbackArgs.builder()
                .pythonCode("string")
                .description("string")
                .disabled(false)
                .build())
            .beforeAgentCallback(GuardrailCodeCallbackBeforeAgentCallbackArgs.builder()
                .pythonCode("string")
                .description("string")
                .disabled(false)
                .build())
            .beforeModelCallback(GuardrailCodeCallbackBeforeModelCallbackArgs.builder()
                .pythonCode("string")
                .description("string")
                .disabled(false)
                .build())
            .build())
        .modelSafety(GuardrailModelSafetyArgs.builder()
            .safetySettings(GuardrailModelSafetySafetySettingArgs.builder()
                .category("string")
                .threshold("string")
                .build())
            .build())
        .project("string")
        .build());
    
    guardrail_resource = gcp.ces.Guardrail("guardrailResource",
        display_name="string",
        app="string",
        location="string",
        guardrail_id="string",
        enabled=False,
        description="string",
        action={
            "generative_answer": {
                "prompt": "string",
            },
            "respond_immediately": {
                "responses": [{
                    "text": "string",
                    "disabled": False,
                }],
            },
            "transfer_agent": {
                "agent": "string",
            },
        },
        content_filter={
            "match_type": "string",
            "banned_contents": ["string"],
            "banned_contents_in_agent_responses": ["string"],
            "banned_contents_in_user_inputs": ["string"],
            "disregard_diacritics": False,
        },
        llm_policy={
            "policy_scope": "string",
            "prompt": "string",
            "allow_short_utterance": False,
            "fail_open": False,
            "max_conversation_messages": 0,
            "model_settings": {
                "model": "string",
                "temperature": 0,
            },
        },
        llm_prompt_security={
            "custom_policy": {
                "policy_scope": "string",
                "prompt": "string",
                "allow_short_utterance": False,
                "fail_open": False,
                "max_conversation_messages": 0,
                "model_settings": {
                    "model": "string",
                    "temperature": 0,
                },
            },
            "default_settings": {
                "default_prompt_template": "string",
            },
        },
        code_callback={
            "after_agent_callback": {
                "python_code": "string",
                "description": "string",
                "disabled": False,
            },
            "after_model_callback": {
                "python_code": "string",
                "description": "string",
                "disabled": False,
            },
            "before_agent_callback": {
                "python_code": "string",
                "description": "string",
                "disabled": False,
            },
            "before_model_callback": {
                "python_code": "string",
                "description": "string",
                "disabled": False,
            },
        },
        model_safety={
            "safety_settings": [{
                "category": "string",
                "threshold": "string",
            }],
        },
        project="string")
    
    const guardrailResource = new gcp.ces.Guardrail("guardrailResource", {
        displayName: "string",
        app: "string",
        location: "string",
        guardrailId: "string",
        enabled: false,
        description: "string",
        action: {
            generativeAnswer: {
                prompt: "string",
            },
            respondImmediately: {
                responses: [{
                    text: "string",
                    disabled: false,
                }],
            },
            transferAgent: {
                agent: "string",
            },
        },
        contentFilter: {
            matchType: "string",
            bannedContents: ["string"],
            bannedContentsInAgentResponses: ["string"],
            bannedContentsInUserInputs: ["string"],
            disregardDiacritics: false,
        },
        llmPolicy: {
            policyScope: "string",
            prompt: "string",
            allowShortUtterance: false,
            failOpen: false,
            maxConversationMessages: 0,
            modelSettings: {
                model: "string",
                temperature: 0,
            },
        },
        llmPromptSecurity: {
            customPolicy: {
                policyScope: "string",
                prompt: "string",
                allowShortUtterance: false,
                failOpen: false,
                maxConversationMessages: 0,
                modelSettings: {
                    model: "string",
                    temperature: 0,
                },
            },
            defaultSettings: {
                defaultPromptTemplate: "string",
            },
        },
        codeCallback: {
            afterAgentCallback: {
                pythonCode: "string",
                description: "string",
                disabled: false,
            },
            afterModelCallback: {
                pythonCode: "string",
                description: "string",
                disabled: false,
            },
            beforeAgentCallback: {
                pythonCode: "string",
                description: "string",
                disabled: false,
            },
            beforeModelCallback: {
                pythonCode: "string",
                description: "string",
                disabled: false,
            },
        },
        modelSafety: {
            safetySettings: [{
                category: "string",
                threshold: "string",
            }],
        },
        project: "string",
    });
    
    type: gcp:ces:Guardrail
    properties:
        action:
            generativeAnswer:
                prompt: string
            respondImmediately:
                responses:
                    - disabled: false
                      text: string
            transferAgent:
                agent: string
        app: string
        codeCallback:
            afterAgentCallback:
                description: string
                disabled: false
                pythonCode: string
            afterModelCallback:
                description: string
                disabled: false
                pythonCode: string
            beforeAgentCallback:
                description: string
                disabled: false
                pythonCode: string
            beforeModelCallback:
                description: string
                disabled: false
                pythonCode: string
        contentFilter:
            bannedContents:
                - string
            bannedContentsInAgentResponses:
                - string
            bannedContentsInUserInputs:
                - string
            disregardDiacritics: false
            matchType: string
        description: string
        displayName: string
        enabled: false
        guardrailId: string
        llmPolicy:
            allowShortUtterance: false
            failOpen: false
            maxConversationMessages: 0
            modelSettings:
                model: string
                temperature: 0
            policyScope: string
            prompt: string
        llmPromptSecurity:
            customPolicy:
                allowShortUtterance: false
                failOpen: false
                maxConversationMessages: 0
                modelSettings:
                    model: string
                    temperature: 0
                policyScope: string
                prompt: string
            defaultSettings:
                defaultPromptTemplate: string
        location: string
        modelSafety:
            safetySettings:
                - category: string
                  threshold: string
        project: string
    

    Guardrail Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

    The Guardrail resource accepts the following input properties:

    App string
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    DisplayName string
    Display name of the guardrail.
    GuardrailId string
    The ID to use for the guardrail, which will become the final component of the guardrail's resource name. If not provided, a unique ID will be automatically assigned for the guardrail.
    Location string
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    Action GuardrailAction
    Action that is taken when a certain precondition is met. Structure is documented below.
    CodeCallback GuardrailCodeCallback
    Guardrail that blocks the conversation based on the code callbacks provided. Structure is documented below.
    ContentFilter GuardrailContentFilter
    Guardrail that bans certain content from being used in the conversation. Structure is documented below.
    Description string
    Description of the guardrail.
    Enabled bool
    Whether the guardrail is enabled.
    LlmPolicy GuardrailLlmPolicy
    Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
    LlmPromptSecurity GuardrailLlmPromptSecurity
    Guardrail that blocks the conversation if the input is considered unsafe based on the LLM classification. Structure is documented below.
    ModelSafety GuardrailModelSafety
    Model safety settings overrides. When this is set, it will override the default settings and trigger the guardrail if the response is considered unsafe. Structure is documented below.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    App string
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    DisplayName string
    Display name of the guardrail.
    GuardrailId string
    The ID to use for the guardrail, which will become the final component of the guardrail's resource name. If not provided, a unique ID will be automatically assigned for the guardrail.
    Location string
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    Action GuardrailActionArgs
    Action that is taken when a certain precondition is met. Structure is documented below.
    CodeCallback GuardrailCodeCallbackArgs
    Guardrail that blocks the conversation based on the code callbacks provided. Structure is documented below.
    ContentFilter GuardrailContentFilterArgs
    Guardrail that bans certain content from being used in the conversation. Structure is documented below.
    Description string
    Description of the guardrail.
    Enabled bool
    Whether the guardrail is enabled.
    LlmPolicy GuardrailLlmPolicyArgs
    Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
    LlmPromptSecurity GuardrailLlmPromptSecurityArgs
    Guardrail that blocks the conversation if the input is considered unsafe based on the LLM classification. Structure is documented below.
    ModelSafety GuardrailModelSafetyArgs
    Model safety settings overrides. When this is set, it will override the default settings and trigger the guardrail if the response is considered unsafe. Structure is documented below.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    app String
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    displayName String
    Display name of the guardrail.
    guardrailId String
    The ID to use for the guardrail, which will become the final component of the guardrail's resource name. If not provided, a unique ID will be automatically assigned for the guardrail.
    location String
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    action GuardrailAction
    Action that is taken when a certain precondition is met. Structure is documented below.
    codeCallback GuardrailCodeCallback
    Guardrail that blocks the conversation based on the code callbacks provided. Structure is documented below.
    contentFilter GuardrailContentFilter
    Guardrail that bans certain content from being used in the conversation. Structure is documented below.
    description String
    Description of the guardrail.
    enabled Boolean
    Whether the guardrail is enabled.
    llmPolicy GuardrailLlmPolicy
    Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
    llmPromptSecurity GuardrailLlmPromptSecurity
    Guardrail that blocks the conversation if the input is considered unsafe based on the LLM classification. Structure is documented below.
    modelSafety GuardrailModelSafety
    Model safety settings overrides. When this is set, it will override the default settings and trigger the guardrail if the response is considered unsafe. Structure is documented below.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    app string
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    displayName string
    Display name of the guardrail.
    guardrailId string
    The ID to use for the guardrail, which will become the final component of the guardrail's resource name. If not provided, a unique ID will be automatically assigned for the guardrail.
    location string
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    action GuardrailAction
    Action that is taken when a certain precondition is met. Structure is documented below.
    codeCallback GuardrailCodeCallback
    Guardrail that blocks the conversation based on the code callbacks provided. Structure is documented below.
    contentFilter GuardrailContentFilter
    Guardrail that bans certain content from being used in the conversation. Structure is documented below.
    description string
    Description of the guardrail.
    enabled boolean
    Whether the guardrail is enabled.
    llmPolicy GuardrailLlmPolicy
    Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
    llmPromptSecurity GuardrailLlmPromptSecurity
    Guardrail that blocks the conversation if the input is considered unsafe based on the LLM classification. Structure is documented below.
    modelSafety GuardrailModelSafety
    Model safety settings overrides. When this is set, it will override the default settings and trigger the guardrail if the response is considered unsafe. Structure is documented below.
    project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    app str
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    display_name str
    Display name of the guardrail.
    guardrail_id str
    The ID to use for the guardrail, which will become the final component of the guardrail's resource name. If not provided, a unique ID will be automatically assigned for the guardrail.
    location str
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    action GuardrailActionArgs
    Action that is taken when a certain precondition is met. Structure is documented below.
    code_callback GuardrailCodeCallbackArgs
    Guardrail that blocks the conversation based on the code callbacks provided. Structure is documented below.
    content_filter GuardrailContentFilterArgs
    Guardrail that bans certain content from being used in the conversation. Structure is documented below.
    description str
    Description of the guardrail.
    enabled bool
    Whether the guardrail is enabled.
    llm_policy GuardrailLlmPolicyArgs
    Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
    llm_prompt_security GuardrailLlmPromptSecurityArgs
    Guardrail that blocks the conversation if the input is considered unsafe based on the LLM classification. Structure is documented below.
    model_safety GuardrailModelSafetyArgs
    Model safety settings overrides. When this is set, it will override the default settings and trigger the guardrail if the response is considered unsafe. Structure is documented below.
    project str
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    app String
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    displayName String
    Display name of the guardrail.
    guardrailId String
    The ID to use for the guardrail, which will become the final component of the guardrail's resource name. If not provided, a unique ID will be automatically assigned for the guardrail.
    location String
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    action Property Map
    Action that is taken when a certain precondition is met. Structure is documented below.
    codeCallback Property Map
    Guardrail that blocks the conversation based on the code callbacks provided. Structure is documented below.
    contentFilter Property Map
    Guardrail that bans certain content from being used in the conversation. Structure is documented below.
    description String
    Description of the guardrail.
    enabled Boolean
    Whether the guardrail is enabled.
    llmPolicy Property Map
    Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
    llmPromptSecurity Property Map
    Guardrail that blocks the conversation if the input is considered unsafe based on the LLM classification. Structure is documented below.
    modelSafety Property Map
    Model safety settings overrides. When this is set, it will override the default settings and trigger the guardrail if the response is considered unsafe. Structure is documented below.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the Guardrail resource produces the following output properties:

    CreateTime string
    Timestamp when the guardrail was created.
    Etag string
    Etag used to ensure the object hasn't changed during a read-modify-write operation. If the etag is empty, the update will overwrite any concurrent changes.
    Id string
    The provider-assigned unique ID for this managed resource.
    Name string
    Identifier. The unique identifier of the guardrail. Format: projects/{project}/locations/{location}/apps/{app}/guardrails/{guardrail}
    UpdateTime string
    Timestamp when the guardrail was last updated.
    CreateTime string
    Timestamp when the guardrail was created.
    Etag string
    Etag used to ensure the object hasn't changed during a read-modify-write operation. If the etag is empty, the update will overwrite any concurrent changes.
    Id string
    The provider-assigned unique ID for this managed resource.
    Name string
    Identifier. The unique identifier of the guardrail. Format: projects/{project}/locations/{location}/apps/{app}/guardrails/{guardrail}
    UpdateTime string
    Timestamp when the guardrail was last updated.
    createTime String
    Timestamp when the guardrail was created.
    etag String
    Etag used to ensure the object hasn't changed during a read-modify-write operation. If the etag is empty, the update will overwrite any concurrent changes.
    id String
    The provider-assigned unique ID for this managed resource.
    name String
    Identifier. The unique identifier of the guardrail. Format: projects/{project}/locations/{location}/apps/{app}/guardrails/{guardrail}
    updateTime String
    Timestamp when the guardrail was last updated.
    createTime string
    Timestamp when the guardrail was created.
    etag string
    Etag used to ensure the object hasn't changed during a read-modify-write operation. If the etag is empty, the update will overwrite any concurrent changes.
    id string
    The provider-assigned unique ID for this managed resource.
    name string
    Identifier. The unique identifier of the guardrail. Format: projects/{project}/locations/{location}/apps/{app}/guardrails/{guardrail}
    updateTime string
    Timestamp when the guardrail was last updated.
    create_time str
    Timestamp when the guardrail was created.
    etag str
    Etag used to ensure the object hasn't changed during a read-modify-write operation. If the etag is empty, the update will overwrite any concurrent changes.
    id str
    The provider-assigned unique ID for this managed resource.
    name str
    Identifier. The unique identifier of the guardrail. Format: projects/{project}/locations/{location}/apps/{app}/guardrails/{guardrail}
    update_time str
    Timestamp when the guardrail was last updated.
    createTime String
    Timestamp when the guardrail was created.
    etag String
    Etag used to ensure the object hasn't changed during a read-modify-write operation. If the etag is empty, the update will overwrite any concurrent changes.
    id String
    The provider-assigned unique ID for this managed resource.
    name String
    Identifier. The unique identifier of the guardrail. Format: projects/{project}/locations/{location}/apps/{app}/guardrails/{guardrail}
    updateTime String
    Timestamp when the guardrail was last updated.

    Look up Existing Guardrail Resource

    Get an existing Guardrail resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: GuardrailState, opts?: CustomResourceOptions): Guardrail
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            action: Optional[GuardrailActionArgs] = None,
            app: Optional[str] = None,
            code_callback: Optional[GuardrailCodeCallbackArgs] = None,
            content_filter: Optional[GuardrailContentFilterArgs] = None,
            create_time: Optional[str] = None,
            description: Optional[str] = None,
            display_name: Optional[str] = None,
            enabled: Optional[bool] = None,
            etag: Optional[str] = None,
            guardrail_id: Optional[str] = None,
            llm_policy: Optional[GuardrailLlmPolicyArgs] = None,
            llm_prompt_security: Optional[GuardrailLlmPromptSecurityArgs] = None,
            location: Optional[str] = None,
            model_safety: Optional[GuardrailModelSafetyArgs] = None,
            name: Optional[str] = None,
            project: Optional[str] = None,
            update_time: Optional[str] = None) -> Guardrail
    func GetGuardrail(ctx *Context, name string, id IDInput, state *GuardrailState, opts ...ResourceOption) (*Guardrail, error)
    public static Guardrail Get(string name, Input<string> id, GuardrailState? state, CustomResourceOptions? opts = null)
    public static Guardrail get(String name, Output<String> id, GuardrailState state, CustomResourceOptions options)
    resources:  _:    type: gcp:ces:Guardrail    get:      id: ${id}
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    Action GuardrailAction
    Action that is taken when a certain precondition is met. Structure is documented below.
    App string
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    CodeCallback GuardrailCodeCallback
    Guardrail that blocks the conversation based on the code callbacks provided. Structure is documented below.
    ContentFilter GuardrailContentFilter
    Guardrail that bans certain content from being used in the conversation. Structure is documented below.
    CreateTime string
    Timestamp when the guardrail was created.
    Description string
    Description of the guardrail.
    DisplayName string
    Display name of the guardrail.
    Enabled bool
    Whether the guardrail is enabled.
    Etag string
    Etag used to ensure the object hasn't changed during a read-modify-write operation. If the etag is empty, the update will overwrite any concurrent changes.
    GuardrailId string
    The ID to use for the guardrail, which will become the final component of the guardrail's resource name. If not provided, a unique ID will be automatically assigned for the guardrail.
    LlmPolicy GuardrailLlmPolicy
    Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
    LlmPromptSecurity GuardrailLlmPromptSecurity
    Guardrail that blocks the conversation if the input is considered unsafe based on the LLM classification. Structure is documented below.
    Location string
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    ModelSafety GuardrailModelSafety
    Model safety settings overrides. When this is set, it will override the default settings and trigger the guardrail if the response is considered unsafe. Structure is documented below.
    Name string
    Identifier. The unique identifier of the guardrail. Format: projects/{project}/locations/{location}/apps/{app}/guardrails/{guardrail}
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    UpdateTime string
    Timestamp when the guardrail was last updated.
    Action GuardrailActionArgs
    Action that is taken when a certain precondition is met. Structure is documented below.
    App string
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    CodeCallback GuardrailCodeCallbackArgs
    Guardrail that blocks the conversation based on the code callbacks provided. Structure is documented below.
    ContentFilter GuardrailContentFilterArgs
    Guardrail that bans certain content from being used in the conversation. Structure is documented below.
    CreateTime string
    Timestamp when the guardrail was created.
    Description string
    Description of the guardrail.
    DisplayName string
    Display name of the guardrail.
    Enabled bool
    Whether the guardrail is enabled.
    Etag string
    Etag used to ensure the object hasn't changed during a read-modify-write operation. If the etag is empty, the update will overwrite any concurrent changes.
    GuardrailId string
    The ID to use for the guardrail, which will become the final component of the guardrail's resource name. If not provided, a unique ID will be automatically assigned for the guardrail.
    LlmPolicy GuardrailLlmPolicyArgs
    Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
    LlmPromptSecurity GuardrailLlmPromptSecurityArgs
    Guardrail that blocks the conversation if the input is considered unsafe based on the LLM classification. Structure is documented below.
    Location string
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    ModelSafety GuardrailModelSafetyArgs
    Model safety settings overrides. When this is set, it will override the default settings and trigger the guardrail if the response is considered unsafe. Structure is documented below.
    Name string
    Identifier. The unique identifier of the guardrail. Format: projects/{project}/locations/{location}/apps/{app}/guardrails/{guardrail}
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    UpdateTime string
    Timestamp when the guardrail was last updated.
    action GuardrailAction
    Action that is taken when a certain precondition is met. Structure is documented below.
    app String
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    codeCallback GuardrailCodeCallback
    Guardrail that blocks the conversation based on the code callbacks provided. Structure is documented below.
    contentFilter GuardrailContentFilter
    Guardrail that bans certain content from being used in the conversation. Structure is documented below.
    createTime String
    Timestamp when the guardrail was created.
    description String
    Description of the guardrail.
    displayName String
    Display name of the guardrail.
    enabled Boolean
    Whether the guardrail is enabled.
    etag String
    Etag used to ensure the object hasn't changed during a read-modify-write operation. If the etag is empty, the update will overwrite any concurrent changes.
    guardrailId String
    The ID to use for the guardrail, which will become the final component of the guardrail's resource name. If not provided, a unique ID will be automatically assigned for the guardrail.
    llmPolicy GuardrailLlmPolicy
    Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
    llmPromptSecurity GuardrailLlmPromptSecurity
    Guardrail that blocks the conversation if the input is considered unsafe based on the LLM classification. Structure is documented below.
    location String
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    modelSafety GuardrailModelSafety
    Model safety settings overrides. When this is set, it will override the default settings and trigger the guardrail if the response is considered unsafe. Structure is documented below.
    name String
    Identifier. The unique identifier of the guardrail. Format: projects/{project}/locations/{location}/apps/{app}/guardrails/{guardrail}
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    updateTime String
    Timestamp when the guardrail was last updated.
    action GuardrailAction
    Action that is taken when a certain precondition is met. Structure is documented below.
    app string
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    codeCallback GuardrailCodeCallback
    Guardrail that blocks the conversation based on the code callbacks provided. Structure is documented below.
    contentFilter GuardrailContentFilter
    Guardrail that bans certain content from being used in the conversation. Structure is documented below.
    createTime string
    Timestamp when the guardrail was created.
    description string
    Description of the guardrail.
    displayName string
    Display name of the guardrail.
    enabled boolean
    Whether the guardrail is enabled.
    etag string
    Etag used to ensure the object hasn't changed during a read-modify-write operation. If the etag is empty, the update will overwrite any concurrent changes.
    guardrailId string
    The ID to use for the guardrail, which will become the final component of the guardrail's resource name. If not provided, a unique ID will be automatically assigned for the guardrail.
    llmPolicy GuardrailLlmPolicy
    Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
    llmPromptSecurity GuardrailLlmPromptSecurity
    Guardrail that blocks the conversation if the input is considered unsafe based on the LLM classification. Structure is documented below.
    location string
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    modelSafety GuardrailModelSafety
    Model safety settings overrides. When this is set, it will override the default settings and trigger the guardrail if the response is considered unsafe. Structure is documented below.
    name string
    Identifier. The unique identifier of the guardrail. Format: projects/{project}/locations/{location}/apps/{app}/guardrails/{guardrail}
    project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    updateTime string
    Timestamp when the guardrail was last updated.
    action GuardrailActionArgs
    Action that is taken when a certain precondition is met. Structure is documented below.
    app str
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    code_callback GuardrailCodeCallbackArgs
    Guardrail that blocks the conversation based on the code callbacks provided. Structure is documented below.
    content_filter GuardrailContentFilterArgs
    Guardrail that bans certain content from being used in the conversation. Structure is documented below.
    create_time str
    Timestamp when the guardrail was created.
    description str
    Description of the guardrail.
    display_name str
    Display name of the guardrail.
    enabled bool
    Whether the guardrail is enabled.
    etag str
    Etag used to ensure the object hasn't changed during a read-modify-write operation. If the etag is empty, the update will overwrite any concurrent changes.
    guardrail_id str
    The ID to use for the guardrail, which will become the final component of the guardrail's resource name. If not provided, a unique ID will be automatically assigned for the guardrail.
    llm_policy GuardrailLlmPolicyArgs
    Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
    llm_prompt_security GuardrailLlmPromptSecurityArgs
    Guardrail that blocks the conversation if the input is considered unsafe based on the LLM classification. Structure is documented below.
    location str
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    model_safety GuardrailModelSafetyArgs
    Model safety settings overrides. When this is set, it will override the default settings and trigger the guardrail if the response is considered unsafe. Structure is documented below.
    name str
    Identifier. The unique identifier of the guardrail. Format: projects/{project}/locations/{location}/apps/{app}/guardrails/{guardrail}
    project str
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    update_time str
    Timestamp when the guardrail was last updated.
    action Property Map
    Action that is taken when a certain precondition is met. Structure is documented below.
    app String
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    codeCallback Property Map
    Guardrail that blocks the conversation based on the code callbacks provided. Structure is documented below.
    contentFilter Property Map
    Guardrail that bans certain content from being used in the conversation. Structure is documented below.
    createTime String
    Timestamp when the guardrail was created.
    description String
    Description of the guardrail.
    displayName String
    Display name of the guardrail.
    enabled Boolean
    Whether the guardrail is enabled.
    etag String
    Etag used to ensure the object hasn't changed during a read-modify-write operation. If the etag is empty, the update will overwrite any concurrent changes.
    guardrailId String
    The ID to use for the guardrail, which will become the final component of the guardrail's resource name. If not provided, a unique ID will be automatically assigned for the guardrail.
    llmPolicy Property Map
    Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
    llmPromptSecurity Property Map
    Guardrail that blocks the conversation if the input is considered unsafe based on the LLM classification. Structure is documented below.
    location String
    Resource ID segment making up resource name. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
    modelSafety Property Map
    Model safety settings overrides. When this is set, it will override the default settings and trigger the guardrail if the response is considered unsafe. Structure is documented below.
    name String
    Identifier. The unique identifier of the guardrail. Format: projects/{project}/locations/{location}/apps/{app}/guardrails/{guardrail}
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    updateTime String
    Timestamp when the guardrail was last updated.

    Supporting Types

    GuardrailAction, GuardrailActionArgs

    GenerativeAnswer GuardrailActionGenerativeAnswer
    The agent will immediately respond with a generative answer. Structure is documented below.
    RespondImmediately GuardrailActionRespondImmediately
    The agent will immediately respond with a preconfigured response. Structure is documented below.
    TransferAgent GuardrailActionTransferAgent
    The agent will transfer the conversation to a different agent. Structure is documented below.
    GenerativeAnswer GuardrailActionGenerativeAnswer
    The agent will immediately respond with a generative answer. Structure is documented below.
    RespondImmediately GuardrailActionRespondImmediately
    The agent will immediately respond with a preconfigured response. Structure is documented below.
    TransferAgent GuardrailActionTransferAgent
    The agent will transfer the conversation to a different agent. Structure is documented below.
    generativeAnswer GuardrailActionGenerativeAnswer
    The agent will immediately respond with a generative answer. Structure is documented below.
    respondImmediately GuardrailActionRespondImmediately
    The agent will immediately respond with a preconfigured response. Structure is documented below.
    transferAgent GuardrailActionTransferAgent
    The agent will transfer the conversation to a different agent. Structure is documented below.
    generativeAnswer GuardrailActionGenerativeAnswer
    The agent will immediately respond with a generative answer. Structure is documented below.
    respondImmediately GuardrailActionRespondImmediately
    The agent will immediately respond with a preconfigured response. Structure is documented below.
    transferAgent GuardrailActionTransferAgent
    The agent will transfer the conversation to a different agent. Structure is documented below.
    generative_answer GuardrailActionGenerativeAnswer
    The agent will immediately respond with a generative answer. Structure is documented below.
    respond_immediately GuardrailActionRespondImmediately
    The agent will immediately respond with a preconfigured response. Structure is documented below.
    transfer_agent GuardrailActionTransferAgent
    The agent will transfer the conversation to a different agent. Structure is documented below.
    generativeAnswer Property Map
    The agent will immediately respond with a generative answer. Structure is documented below.
    respondImmediately Property Map
    The agent will immediately respond with a preconfigured response. Structure is documented below.
    transferAgent Property Map
    The agent will transfer the conversation to a different agent. Structure is documented below.

    GuardrailActionGenerativeAnswer, GuardrailActionGenerativeAnswerArgs

    Prompt string
    The prompt to use for the generative answer.
    Prompt string
    The prompt to use for the generative answer.
    prompt String
    The prompt to use for the generative answer.
    prompt string
    The prompt to use for the generative answer.
    prompt str
    The prompt to use for the generative answer.
    prompt String
    The prompt to use for the generative answer.

    GuardrailActionRespondImmediately, GuardrailActionRespondImmediatelyArgs

    Responses List<GuardrailActionRespondImmediatelyResponse>
    The canned responses for the agent to choose from. The response is chosen randomly. Structure is documented below.
    Responses []GuardrailActionRespondImmediatelyResponse
    The canned responses for the agent to choose from. The response is chosen randomly. Structure is documented below.
    responses List<GuardrailActionRespondImmediatelyResponse>
    The canned responses for the agent to choose from. The response is chosen randomly. Structure is documented below.
    responses GuardrailActionRespondImmediatelyResponse[]
    The canned responses for the agent to choose from. The response is chosen randomly. Structure is documented below.
    responses Sequence[GuardrailActionRespondImmediatelyResponse]
    The canned responses for the agent to choose from. The response is chosen randomly. Structure is documented below.
    responses List<Property Map>
    The canned responses for the agent to choose from. The response is chosen randomly. Structure is documented below.

    GuardrailActionRespondImmediatelyResponse, GuardrailActionRespondImmediatelyResponseArgs

    Text string
    Text for the agent to respond with.
    Disabled bool
    Whether the response is disabled. Disabled responses are not used by the agent.
    Text string
    Text for the agent to respond with.
    Disabled bool
    Whether the response is disabled. Disabled responses are not used by the agent.
    text String
    Text for the agent to respond with.
    disabled Boolean
    Whether the response is disabled. Disabled responses are not used by the agent.
    text string
    Text for the agent to respond with.
    disabled boolean
    Whether the response is disabled. Disabled responses are not used by the agent.
    text str
    Text for the agent to respond with.
    disabled bool
    Whether the response is disabled. Disabled responses are not used by the agent.
    text String
    Text for the agent to respond with.
    disabled Boolean
    Whether the response is disabled. Disabled responses are not used by the agent.

    GuardrailActionTransferAgent, GuardrailActionTransferAgentArgs

    Agent string
    The name of the agent to transfer the conversation to. The agent must be in the same app as the current agent. Format: projects/{project}/locations/{location}/apps/{app}/agents/{agent}
    Agent string
    The name of the agent to transfer the conversation to. The agent must be in the same app as the current agent. Format: projects/{project}/locations/{location}/apps/{app}/agents/{agent}
    agent String
    The name of the agent to transfer the conversation to. The agent must be in the same app as the current agent. Format: projects/{project}/locations/{location}/apps/{app}/agents/{agent}
    agent string
    The name of the agent to transfer the conversation to. The agent must be in the same app as the current agent. Format: projects/{project}/locations/{location}/apps/{app}/agents/{agent}
    agent str
    The name of the agent to transfer the conversation to. The agent must be in the same app as the current agent. Format: projects/{project}/locations/{location}/apps/{app}/agents/{agent}
    agent String
    The name of the agent to transfer the conversation to. The agent must be in the same app as the current agent. Format: projects/{project}/locations/{location}/apps/{app}/agents/{agent}

    GuardrailCodeCallback, GuardrailCodeCallbackArgs

    AfterAgentCallback GuardrailCodeCallbackAfterAgentCallback
    A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
    AfterModelCallback GuardrailCodeCallbackAfterModelCallback
    A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
    BeforeAgentCallback GuardrailCodeCallbackBeforeAgentCallback
    A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
    BeforeModelCallback GuardrailCodeCallbackBeforeModelCallback
    A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
    AfterAgentCallback GuardrailCodeCallbackAfterAgentCallback
    A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
    AfterModelCallback GuardrailCodeCallbackAfterModelCallback
    A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
    BeforeAgentCallback GuardrailCodeCallbackBeforeAgentCallback
    A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
    BeforeModelCallback GuardrailCodeCallbackBeforeModelCallback
    A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
    afterAgentCallback GuardrailCodeCallbackAfterAgentCallback
    A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
    afterModelCallback GuardrailCodeCallbackAfterModelCallback
    A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
    beforeAgentCallback GuardrailCodeCallbackBeforeAgentCallback
    A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
    beforeModelCallback GuardrailCodeCallbackBeforeModelCallback
    A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
    afterAgentCallback GuardrailCodeCallbackAfterAgentCallback
    A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
    afterModelCallback GuardrailCodeCallbackAfterModelCallback
    A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
    beforeAgentCallback GuardrailCodeCallbackBeforeAgentCallback
    A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
    beforeModelCallback GuardrailCodeCallbackBeforeModelCallback
    A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
    after_agent_callback GuardrailCodeCallbackAfterAgentCallback
    A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
    after_model_callback GuardrailCodeCallbackAfterModelCallback
    A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
    before_agent_callback GuardrailCodeCallbackBeforeAgentCallback
    A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
    before_model_callback GuardrailCodeCallbackBeforeModelCallback
    A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
    afterAgentCallback Property Map
    A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
    afterModelCallback Property Map
    A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
    beforeAgentCallback Property Map
    A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
    beforeModelCallback Property Map
    A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.

    GuardrailCodeCallbackAfterAgentCallback, GuardrailCodeCallbackAfterAgentCallbackArgs

    PythonCode string
    The python code to execute for the callback.
    Description string
    Human-readable description of the callback.
    Disabled bool
    Whether the callback is disabled. Disabled callbacks are ignored by the agent.
    PythonCode string
    The python code to execute for the callback.
    Description string
    Human-readable description of the callback.
    Disabled bool
    Whether the callback is disabled. Disabled callbacks are ignored by the agent.
    pythonCode String
    The python code to execute for the callback.
    description String
    Human-readable description of the callback.
    disabled Boolean
    Whether the callback is disabled. Disabled callbacks are ignored by the agent.
    pythonCode string
    The python code to execute for the callback.
    description string
    Human-readable description of the callback.
    disabled boolean
    Whether the callback is disabled. Disabled callbacks are ignored by the agent.
    python_code str
    The python code to execute for the callback.
    description str
    Human-readable description of the callback.
    disabled bool
    Whether the callback is disabled. Disabled callbacks are ignored by the agent.
    pythonCode String
    The python code to execute for the callback.
    description String
    Human-readable description of the callback.
    disabled Boolean
    Whether the callback is disabled. Disabled callbacks are ignored by the agent.

    GuardrailCodeCallbackAfterModelCallback, GuardrailCodeCallbackAfterModelCallbackArgs

    PythonCode string
    The python code to execute for the callback.
    Description string
    Human-readable description of the callback.
    Disabled bool
    Whether the callback is disabled. Disabled callbacks are ignored by the agent.
    PythonCode string
    The python code to execute for the callback.
    Description string
    Human-readable description of the callback.
    Disabled bool
    Whether the callback is disabled. Disabled callbacks are ignored by the agent.
    pythonCode String
    The python code to execute for the callback.
    description String
    Human-readable description of the callback.
    disabled Boolean
    Whether the callback is disabled. Disabled callbacks are ignored by the agent.
    pythonCode string
    The python code to execute for the callback.
    description string
    Human-readable description of the callback.
    disabled boolean
    Whether the callback is disabled. Disabled callbacks are ignored by the agent.
    python_code str
    The python code to execute for the callback.
    description str
    Human-readable description of the callback.
    disabled bool
    Whether the callback is disabled. Disabled callbacks are ignored by the agent.
    pythonCode String
    The python code to execute for the callback.
    description String
    Human-readable description of the callback.
    disabled Boolean
    Whether the callback is disabled. Disabled callbacks are ignored by the agent.

    GuardrailCodeCallbackBeforeAgentCallback, GuardrailCodeCallbackBeforeAgentCallbackArgs

    PythonCode string
    The python code to execute for the callback.
    Description string
    Human-readable description of the callback.
    Disabled bool
    Whether the callback is disabled. Disabled callbacks are ignored by the agent.
    PythonCode string
    The python code to execute for the callback.
    Description string
    Human-readable description of the callback.
    Disabled bool
    Whether the callback is disabled. Disabled callbacks are ignored by the agent.
    pythonCode String
    The python code to execute for the callback.
    description String
    Human-readable description of the callback.
    disabled Boolean
    Whether the callback is disabled. Disabled callbacks are ignored by the agent.
    pythonCode string
    The python code to execute for the callback.
    description string
    Human-readable description of the callback.
    disabled boolean
    Whether the callback is disabled. Disabled callbacks are ignored by the agent.
    python_code str
    The python code to execute for the callback.
    description str
    Human-readable description of the callback.
    disabled bool
    Whether the callback is disabled. Disabled callbacks are ignored by the agent.
    pythonCode String
    The python code to execute for the callback.
    description String
    Human-readable description of the callback.
    disabled Boolean
    Whether the callback is disabled. Disabled callbacks are ignored by the agent.

    GuardrailCodeCallbackBeforeModelCallback, GuardrailCodeCallbackBeforeModelCallbackArgs

    PythonCode string
    The python code to execute for the callback.
    Description string
    Human-readable description of the callback.
    Disabled bool
    Whether the callback is disabled. Disabled callbacks are ignored by the agent.
    PythonCode string
    The python code to execute for the callback.
    Description string
    Human-readable description of the callback.
    Disabled bool
    Whether the callback is disabled. Disabled callbacks are ignored by the agent.
    pythonCode String
    The python code to execute for the callback.
    description String
    Human-readable description of the callback.
    disabled Boolean
    Whether the callback is disabled. Disabled callbacks are ignored by the agent.
    pythonCode string
    The python code to execute for the callback.
    description string
    Human-readable description of the callback.
    disabled boolean
    Whether the callback is disabled. Disabled callbacks are ignored by the agent.
    python_code str
    The python code to execute for the callback.
    description str
    Human-readable description of the callback.
    disabled bool
    Whether the callback is disabled. Disabled callbacks are ignored by the agent.
    pythonCode String
    The python code to execute for the callback.
    description String
    Human-readable description of the callback.
    disabled Boolean
    Whether the callback is disabled. Disabled callbacks are ignored by the agent.

    GuardrailContentFilter, GuardrailContentFilterArgs

    MatchType string
    Match type for the content filter. Possible values: SIMPLE_STRING_MATCH WORD_BOUNDARY_STRING_MATCH REGEXP_MATCH
    BannedContents List<string>
    List of banned phrases. Applies to both user inputs and agent responses.
    BannedContentsInAgentResponses List<string>
    List of banned phrases. Applies only to agent responses.
    BannedContentsInUserInputs List<string>
    List of banned phrases. Applies only to user inputs.
    DisregardDiacritics bool
    If true, diacritics are ignored during matching.
    MatchType string
    Match type for the content filter. Possible values: SIMPLE_STRING_MATCH WORD_BOUNDARY_STRING_MATCH REGEXP_MATCH
    BannedContents []string
    List of banned phrases. Applies to both user inputs and agent responses.
    BannedContentsInAgentResponses []string
    List of banned phrases. Applies only to agent responses.
    BannedContentsInUserInputs []string
    List of banned phrases. Applies only to user inputs.
    DisregardDiacritics bool
    If true, diacritics are ignored during matching.
    matchType String
    Match type for the content filter. Possible values: SIMPLE_STRING_MATCH WORD_BOUNDARY_STRING_MATCH REGEXP_MATCH
    bannedContents List<String>
    List of banned phrases. Applies to both user inputs and agent responses.
    bannedContentsInAgentResponses List<String>
    List of banned phrases. Applies only to agent responses.
    bannedContentsInUserInputs List<String>
    List of banned phrases. Applies only to user inputs.
    disregardDiacritics Boolean
    If true, diacritics are ignored during matching.
    matchType string
    Match type for the content filter. Possible values: SIMPLE_STRING_MATCH WORD_BOUNDARY_STRING_MATCH REGEXP_MATCH
    bannedContents string[]
    List of banned phrases. Applies to both user inputs and agent responses.
    bannedContentsInAgentResponses string[]
    List of banned phrases. Applies only to agent responses.
    bannedContentsInUserInputs string[]
    List of banned phrases. Applies only to user inputs.
    disregardDiacritics boolean
    If true, diacritics are ignored during matching.
    match_type str
    Match type for the content filter. Possible values: SIMPLE_STRING_MATCH WORD_BOUNDARY_STRING_MATCH REGEXP_MATCH
    banned_contents Sequence[str]
    List of banned phrases. Applies to both user inputs and agent responses.
    banned_contents_in_agent_responses Sequence[str]
    List of banned phrases. Applies only to agent responses.
    banned_contents_in_user_inputs Sequence[str]
    List of banned phrases. Applies only to user inputs.
    disregard_diacritics bool
    If true, diacritics are ignored during matching.
    matchType String
    Match type for the content filter. Possible values: SIMPLE_STRING_MATCH WORD_BOUNDARY_STRING_MATCH REGEXP_MATCH
    bannedContents List<String>
    List of banned phrases. Applies to both user inputs and agent responses.
    bannedContentsInAgentResponses List<String>
    List of banned phrases. Applies only to agent responses.
    bannedContentsInUserInputs List<String>
    List of banned phrases. Applies only to user inputs.
    disregardDiacritics Boolean
    If true, diacritics are ignored during matching.

    GuardrailLlmPolicy, GuardrailLlmPolicyArgs

    PolicyScope string
    Defines when to apply the policy check during the conversation. If set to POLICY_SCOPE_UNSPECIFIED, the policy will be applied to the user input. When applying the policy to the agent response, additional latency will be introduced before the agent can respond. Possible values: USER_QUERY AGENT_RESPONSE USER_QUERY_AND_AGENT_RESPONSE Possible values are: USER_QUERY, AGENT_RESPONSE, USER_QUERY_AND_AGENT_RESPONSE.
    Prompt string
    Policy prompt.
    AllowShortUtterance bool
    By default, the LLM policy check is bypassed for short utterances. Enabling this setting applies the policy check to all utterances, including those that would normally be skipped.
    FailOpen bool
    If an error occurs during the policy check, fail open and do not trigger the guardrail.
    MaxConversationMessages int
    When checking this policy, consider the last 'n' messages in the conversation. When not set a default value of 10 will be used.
    ModelSettings GuardrailLlmPolicyModelSettings
    Model settings contains various configurations for the LLM model. Structure is documented below.
    PolicyScope string
    Defines when to apply the policy check during the conversation. If set to POLICY_SCOPE_UNSPECIFIED, the policy will be applied to the user input. When applying the policy to the agent response, additional latency will be introduced before the agent can respond. Possible values: USER_QUERY AGENT_RESPONSE USER_QUERY_AND_AGENT_RESPONSE Possible values are: USER_QUERY, AGENT_RESPONSE, USER_QUERY_AND_AGENT_RESPONSE.
    Prompt string
    Policy prompt.
    AllowShortUtterance bool
    By default, the LLM policy check is bypassed for short utterances. Enabling this setting applies the policy check to all utterances, including those that would normally be skipped.
    FailOpen bool
    If an error occurs during the policy check, fail open and do not trigger the guardrail.
    MaxConversationMessages int
    When checking this policy, consider the last 'n' messages in the conversation. When not set a default value of 10 will be used.
    ModelSettings GuardrailLlmPolicyModelSettings
    Model settings contains various configurations for the LLM model. Structure is documented below.
    policyScope String
    Defines when to apply the policy check during the conversation. If set to POLICY_SCOPE_UNSPECIFIED, the policy will be applied to the user input. When applying the policy to the agent response, additional latency will be introduced before the agent can respond. Possible values: USER_QUERY AGENT_RESPONSE USER_QUERY_AND_AGENT_RESPONSE Possible values are: USER_QUERY, AGENT_RESPONSE, USER_QUERY_AND_AGENT_RESPONSE.
    prompt String
    Policy prompt.
    allowShortUtterance Boolean
    By default, the LLM policy check is bypassed for short utterances. Enabling this setting applies the policy check to all utterances, including those that would normally be skipped.
    failOpen Boolean
    If an error occurs during the policy check, fail open and do not trigger the guardrail.
    maxConversationMessages Integer
    When checking this policy, consider the last 'n' messages in the conversation. When not set a default value of 10 will be used.
    modelSettings GuardrailLlmPolicyModelSettings
    Model settings contains various configurations for the LLM model. Structure is documented below.
    policyScope string
    Defines when to apply the policy check during the conversation. If set to POLICY_SCOPE_UNSPECIFIED, the policy will be applied to the user input. When applying the policy to the agent response, additional latency will be introduced before the agent can respond. Possible values: USER_QUERY AGENT_RESPONSE USER_QUERY_AND_AGENT_RESPONSE Possible values are: USER_QUERY, AGENT_RESPONSE, USER_QUERY_AND_AGENT_RESPONSE.
    prompt string
    Policy prompt.
    allowShortUtterance boolean
    By default, the LLM policy check is bypassed for short utterances. Enabling this setting applies the policy check to all utterances, including those that would normally be skipped.
    failOpen boolean
    If an error occurs during the policy check, fail open and do not trigger the guardrail.
    maxConversationMessages number
    When checking this policy, consider the last 'n' messages in the conversation. When not set a default value of 10 will be used.
    modelSettings GuardrailLlmPolicyModelSettings
    Model settings contains various configurations for the LLM model. Structure is documented below.
    policy_scope str
    Defines when to apply the policy check during the conversation. If set to POLICY_SCOPE_UNSPECIFIED, the policy will be applied to the user input. When applying the policy to the agent response, additional latency will be introduced before the agent can respond. Possible values: USER_QUERY AGENT_RESPONSE USER_QUERY_AND_AGENT_RESPONSE Possible values are: USER_QUERY, AGENT_RESPONSE, USER_QUERY_AND_AGENT_RESPONSE.
    prompt str
    Policy prompt.
    allow_short_utterance bool
    By default, the LLM policy check is bypassed for short utterances. Enabling this setting applies the policy check to all utterances, including those that would normally be skipped.
    fail_open bool
    If an error occurs during the policy check, fail open and do not trigger the guardrail.
    max_conversation_messages int
    When checking this policy, consider the last 'n' messages in the conversation. When not set a default value of 10 will be used.
    model_settings GuardrailLlmPolicyModelSettings
    Model settings contains various configurations for the LLM model. Structure is documented below.
    policyScope String
    Defines when to apply the policy check during the conversation. If set to POLICY_SCOPE_UNSPECIFIED, the policy will be applied to the user input. When applying the policy to the agent response, additional latency will be introduced before the agent can respond. Possible values: USER_QUERY AGENT_RESPONSE USER_QUERY_AND_AGENT_RESPONSE Possible values are: USER_QUERY, AGENT_RESPONSE, USER_QUERY_AND_AGENT_RESPONSE.
    prompt String
    Policy prompt.
    allowShortUtterance Boolean
    By default, the LLM policy check is bypassed for short utterances. Enabling this setting applies the policy check to all utterances, including those that would normally be skipped.
    failOpen Boolean
    If an error occurs during the policy check, fail open and do not trigger the guardrail.
    maxConversationMessages Number
    When checking this policy, consider the last 'n' messages in the conversation. When not set a default value of 10 will be used.
    modelSettings Property Map
    Model settings contains various configurations for the LLM model. Structure is documented below.

    GuardrailLlmPolicyModelSettings, GuardrailLlmPolicyModelSettingsArgs

    Model string
    The LLM model that the agent should use. If not set, the agent will inherit the model from its parent agent.
    Temperature double
    If set, this temperature will be used for the LLM model. Temperature controls the randomness of the model's responses. Lower temperatures produce responses that are more predictable. Higher temperatures produce responses that are more creative.
    Model string
    The LLM model that the agent should use. If not set, the agent will inherit the model from its parent agent.
    Temperature float64
    If set, this temperature will be used for the LLM model. Temperature controls the randomness of the model's responses. Lower temperatures produce responses that are more predictable. Higher temperatures produce responses that are more creative.
    model String
    The LLM model that the agent should use. If not set, the agent will inherit the model from its parent agent.
    temperature Double
    If set, this temperature will be used for the LLM model. Temperature controls the randomness of the model's responses. Lower temperatures produce responses that are more predictable. Higher temperatures produce responses that are more creative.
    model string
    The LLM model that the agent should use. If not set, the agent will inherit the model from its parent agent.
    temperature number
    If set, this temperature will be used for the LLM model. Temperature controls the randomness of the model's responses. Lower temperatures produce responses that are more predictable. Higher temperatures produce responses that are more creative.
    model str
    The LLM model that the agent should use. If not set, the agent will inherit the model from its parent agent.
    temperature float
    If set, this temperature will be used for the LLM model. Temperature controls the randomness of the model's responses. Lower temperatures produce responses that are more predictable. Higher temperatures produce responses that are more creative.
    model String
    The LLM model that the agent should use. If not set, the agent will inherit the model from its parent agent.
    temperature Number
    If set, this temperature will be used for the LLM model. Temperature controls the randomness of the model's responses. Lower temperatures produce responses that are more predictable. Higher temperatures produce responses that are more creative.

    GuardrailLlmPromptSecurity, GuardrailLlmPromptSecurityArgs

    CustomPolicy GuardrailLlmPromptSecurityCustomPolicy
    Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
    DefaultSettings GuardrailLlmPromptSecurityDefaultSettings
    Configuration for default system security settings. Structure is documented below.
    CustomPolicy GuardrailLlmPromptSecurityCustomPolicy
    Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
    DefaultSettings GuardrailLlmPromptSecurityDefaultSettings
    Configuration for default system security settings. Structure is documented below.
    customPolicy GuardrailLlmPromptSecurityCustomPolicy
    Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
    defaultSettings GuardrailLlmPromptSecurityDefaultSettings
    Configuration for default system security settings. Structure is documented below.
    customPolicy GuardrailLlmPromptSecurityCustomPolicy
    Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
    defaultSettings GuardrailLlmPromptSecurityDefaultSettings
    Configuration for default system security settings. Structure is documented below.
    custom_policy GuardrailLlmPromptSecurityCustomPolicy
    Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
    default_settings GuardrailLlmPromptSecurityDefaultSettings
    Configuration for default system security settings. Structure is documented below.
    customPolicy Property Map
    Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
    defaultSettings Property Map
    Configuration for default system security settings. Structure is documented below.

    GuardrailLlmPromptSecurityCustomPolicy, GuardrailLlmPromptSecurityCustomPolicyArgs

    PolicyScope string
    Defines when to apply the policy check during the conversation. If set to POLICY_SCOPE_UNSPECIFIED, the policy will be applied to the user input. When applying the policy to the agent response, additional latency will be introduced before the agent can respond. Possible values: USER_QUERY AGENT_RESPONSE USER_QUERY_AND_AGENT_RESPONSE
    Prompt string
    Policy prompt.
    AllowShortUtterance bool
    By default, the LLM policy check is bypassed for short utterances. Enabling this setting applies the policy check to all utterances, including those that would normally be skipped.
    FailOpen bool
    If an error occurs during the policy check, fail open and do not trigger the guardrail.
    MaxConversationMessages int
    When checking this policy, consider the last 'n' messages in the conversation. When not set a default value of 10 will be used.
    ModelSettings GuardrailLlmPromptSecurityCustomPolicyModelSettings
    Model settings contains various configurations for the LLM model. Structure is documented below.
    PolicyScope string
    Defines when to apply the policy check during the conversation. If set to POLICY_SCOPE_UNSPECIFIED, the policy will be applied to the user input. When applying the policy to the agent response, additional latency will be introduced before the agent can respond. Possible values: USER_QUERY AGENT_RESPONSE USER_QUERY_AND_AGENT_RESPONSE
    Prompt string
    Policy prompt.
    AllowShortUtterance bool
    By default, the LLM policy check is bypassed for short utterances. Enabling this setting applies the policy check to all utterances, including those that would normally be skipped.
    FailOpen bool
    If an error occurs during the policy check, fail open and do not trigger the guardrail.
    MaxConversationMessages int
    When checking this policy, consider the last 'n' messages in the conversation. When not set a default value of 10 will be used.
    ModelSettings GuardrailLlmPromptSecurityCustomPolicyModelSettings
    Model settings contains various configurations for the LLM model. Structure is documented below.
    policyScope String
    Defines when to apply the policy check during the conversation. If set to POLICY_SCOPE_UNSPECIFIED, the policy will be applied to the user input. When applying the policy to the agent response, additional latency will be introduced before the agent can respond. Possible values: USER_QUERY AGENT_RESPONSE USER_QUERY_AND_AGENT_RESPONSE
    prompt String
    Policy prompt.
    allowShortUtterance Boolean
    By default, the LLM policy check is bypassed for short utterances. Enabling this setting applies the policy check to all utterances, including those that would normally be skipped.
    failOpen Boolean
    If an error occurs during the policy check, fail open and do not trigger the guardrail.
    maxConversationMessages Integer
    When checking this policy, consider the last 'n' messages in the conversation. When not set a default value of 10 will be used.
    modelSettings GuardrailLlmPromptSecurityCustomPolicyModelSettings
    Model settings contains various configurations for the LLM model. Structure is documented below.
    policyScope string
    Defines when to apply the policy check during the conversation. If set to POLICY_SCOPE_UNSPECIFIED, the policy will be applied to the user input. When applying the policy to the agent response, additional latency will be introduced before the agent can respond. Possible values: USER_QUERY AGENT_RESPONSE USER_QUERY_AND_AGENT_RESPONSE
    prompt string
    Policy prompt.
    allowShortUtterance boolean
    By default, the LLM policy check is bypassed for short utterances. Enabling this setting applies the policy check to all utterances, including those that would normally be skipped.
    failOpen boolean
    If an error occurs during the policy check, fail open and do not trigger the guardrail.
    maxConversationMessages number
    When checking this policy, consider the last 'n' messages in the conversation. When not set a default value of 10 will be used.
    modelSettings GuardrailLlmPromptSecurityCustomPolicyModelSettings
    Model settings contains various configurations for the LLM model. Structure is documented below.
    policy_scope str
    Defines when to apply the policy check during the conversation. If set to POLICY_SCOPE_UNSPECIFIED, the policy will be applied to the user input. When applying the policy to the agent response, additional latency will be introduced before the agent can respond. Possible values: USER_QUERY AGENT_RESPONSE USER_QUERY_AND_AGENT_RESPONSE
    prompt str
    Policy prompt.
    allow_short_utterance bool
    By default, the LLM policy check is bypassed for short utterances. Enabling this setting applies the policy check to all utterances, including those that would normally be skipped.
    fail_open bool
    If an error occurs during the policy check, fail open and do not trigger the guardrail.
    max_conversation_messages int
    When checking this policy, consider the last 'n' messages in the conversation. When not set a default value of 10 will be used.
    model_settings GuardrailLlmPromptSecurityCustomPolicyModelSettings
    Model settings contains various configurations for the LLM model. Structure is documented below.
    policyScope String
    Defines when to apply the policy check during the conversation. If set to POLICY_SCOPE_UNSPECIFIED, the policy will be applied to the user input. When applying the policy to the agent response, additional latency will be introduced before the agent can respond. Possible values: USER_QUERY AGENT_RESPONSE USER_QUERY_AND_AGENT_RESPONSE
    prompt String
    Policy prompt.
    allowShortUtterance Boolean
    By default, the LLM policy check is bypassed for short utterances. Enabling this setting applies the policy check to all utterances, including those that would normally be skipped.
    failOpen Boolean
    If an error occurs during the policy check, fail open and do not trigger the guardrail.
    maxConversationMessages Number
    When checking this policy, consider the last 'n' messages in the conversation. When not set a default value of 10 will be used.
    modelSettings Property Map
    Model settings contains various configurations for the LLM model. Structure is documented below.

    GuardrailLlmPromptSecurityCustomPolicyModelSettings, GuardrailLlmPromptSecurityCustomPolicyModelSettingsArgs

    Model string
    The LLM model that the agent should use. If not set, the agent will inherit the model from its parent agent.
    Temperature double
    If set, this temperature will be used for the LLM model. Temperature controls the randomness of the model's responses. Lower temperatures produce responses that are more predictable. Higher temperatures produce responses that are more creative.
    Model string
    The LLM model that the agent should use. If not set, the agent will inherit the model from its parent agent.
    Temperature float64
    If set, this temperature will be used for the LLM model. Temperature controls the randomness of the model's responses. Lower temperatures produce responses that are more predictable. Higher temperatures produce responses that are more creative.
    model String
    The LLM model that the agent should use. If not set, the agent will inherit the model from its parent agent.
    temperature Double
    If set, this temperature will be used for the LLM model. Temperature controls the randomness of the model's responses. Lower temperatures produce responses that are more predictable. Higher temperatures produce responses that are more creative.
    model string
    The LLM model that the agent should use. If not set, the agent will inherit the model from its parent agent.
    temperature number
    If set, this temperature will be used for the LLM model. Temperature controls the randomness of the model's responses. Lower temperatures produce responses that are more predictable. Higher temperatures produce responses that are more creative.
    model str
    The LLM model that the agent should use. If not set, the agent will inherit the model from its parent agent.
    temperature float
    If set, this temperature will be used for the LLM model. Temperature controls the randomness of the model's responses. Lower temperatures produce responses that are more predictable. Higher temperatures produce responses that are more creative.
    model String
    The LLM model that the agent should use. If not set, the agent will inherit the model from its parent agent.
    temperature Number
    If set, this temperature will be used for the LLM model. Temperature controls the randomness of the model's responses. Lower temperatures produce responses that are more predictable. Higher temperatures produce responses that are more creative.

    GuardrailLlmPromptSecurityDefaultSettings, GuardrailLlmPromptSecurityDefaultSettingsArgs

    DefaultPromptTemplate string
    (Output) The default prompt template used by the system. This field is for display purposes to show the user what prompt the system uses by default. It is OUTPUT_ONLY.
    DefaultPromptTemplate string
    (Output) The default prompt template used by the system. This field is for display purposes to show the user what prompt the system uses by default. It is OUTPUT_ONLY.
    defaultPromptTemplate String
    (Output) The default prompt template used by the system. This field is for display purposes to show the user what prompt the system uses by default. It is OUTPUT_ONLY.
    defaultPromptTemplate string
    (Output) The default prompt template used by the system. This field is for display purposes to show the user what prompt the system uses by default. It is OUTPUT_ONLY.
    default_prompt_template str
    (Output) The default prompt template used by the system. This field is for display purposes to show the user what prompt the system uses by default. It is OUTPUT_ONLY.
    defaultPromptTemplate String
    (Output) The default prompt template used by the system. This field is for display purposes to show the user what prompt the system uses by default. It is OUTPUT_ONLY.

    GuardrailModelSafety, GuardrailModelSafetyArgs

    SafetySettings List<GuardrailModelSafetySafetySetting>
    List of safety settings. Structure is documented below.
    SafetySettings []GuardrailModelSafetySafetySetting
    List of safety settings. Structure is documented below.
    safetySettings List<GuardrailModelSafetySafetySetting>
    List of safety settings. Structure is documented below.
    safetySettings GuardrailModelSafetySafetySetting[]
    List of safety settings. Structure is documented below.
    safety_settings Sequence[GuardrailModelSafetySafetySetting]
    List of safety settings. Structure is documented below.
    safetySettings List<Property Map>
    List of safety settings. Structure is documented below.

    GuardrailModelSafetySafetySetting, GuardrailModelSafetySafetySettingArgs

    Category string
    The harm category. Possible values: HARM_CATEGORY_HATE_SPEECH HARM_CATEGORY_DANGEROUS_CONTENT HARM_CATEGORY_HARASSMENT HARM_CATEGORY_SEXUALLY_EXPLICIT Possible values are: HARM_CATEGORY_HATE_SPEECH, HARM_CATEGORY_DANGEROUS_CONTENT, HARM_CATEGORY_HARASSMENT, HARM_CATEGORY_SEXUALLY_EXPLICIT.
    Threshold string
    The harm block threshold. Possible values: BLOCK_LOW_AND_ABOVE BLOCK_MEDIUM_AND_ABOVE BLOCK_ONLY_HIGH BLOCK_NONE OFF Possible values are: BLOCK_LOW_AND_ABOVE, BLOCK_MEDIUM_AND_ABOVE, BLOCK_ONLY_HIGH, BLOCK_NONE, OFF.
    Category string
    The harm category. Possible values: HARM_CATEGORY_HATE_SPEECH HARM_CATEGORY_DANGEROUS_CONTENT HARM_CATEGORY_HARASSMENT HARM_CATEGORY_SEXUALLY_EXPLICIT Possible values are: HARM_CATEGORY_HATE_SPEECH, HARM_CATEGORY_DANGEROUS_CONTENT, HARM_CATEGORY_HARASSMENT, HARM_CATEGORY_SEXUALLY_EXPLICIT.
    Threshold string
    The harm block threshold. Possible values: BLOCK_LOW_AND_ABOVE BLOCK_MEDIUM_AND_ABOVE BLOCK_ONLY_HIGH BLOCK_NONE OFF Possible values are: BLOCK_LOW_AND_ABOVE, BLOCK_MEDIUM_AND_ABOVE, BLOCK_ONLY_HIGH, BLOCK_NONE, OFF.
    category String
    The harm category. Possible values: HARM_CATEGORY_HATE_SPEECH HARM_CATEGORY_DANGEROUS_CONTENT HARM_CATEGORY_HARASSMENT HARM_CATEGORY_SEXUALLY_EXPLICIT Possible values are: HARM_CATEGORY_HATE_SPEECH, HARM_CATEGORY_DANGEROUS_CONTENT, HARM_CATEGORY_HARASSMENT, HARM_CATEGORY_SEXUALLY_EXPLICIT.
    threshold String
    The harm block threshold. Possible values: BLOCK_LOW_AND_ABOVE BLOCK_MEDIUM_AND_ABOVE BLOCK_ONLY_HIGH BLOCK_NONE OFF Possible values are: BLOCK_LOW_AND_ABOVE, BLOCK_MEDIUM_AND_ABOVE, BLOCK_ONLY_HIGH, BLOCK_NONE, OFF.
    category string
    The harm category. Possible values: HARM_CATEGORY_HATE_SPEECH HARM_CATEGORY_DANGEROUS_CONTENT HARM_CATEGORY_HARASSMENT HARM_CATEGORY_SEXUALLY_EXPLICIT Possible values are: HARM_CATEGORY_HATE_SPEECH, HARM_CATEGORY_DANGEROUS_CONTENT, HARM_CATEGORY_HARASSMENT, HARM_CATEGORY_SEXUALLY_EXPLICIT.
    threshold string
    The harm block threshold. Possible values: BLOCK_LOW_AND_ABOVE BLOCK_MEDIUM_AND_ABOVE BLOCK_ONLY_HIGH BLOCK_NONE OFF Possible values are: BLOCK_LOW_AND_ABOVE, BLOCK_MEDIUM_AND_ABOVE, BLOCK_ONLY_HIGH, BLOCK_NONE, OFF.
    category str
    The harm category. Possible values: HARM_CATEGORY_HATE_SPEECH HARM_CATEGORY_DANGEROUS_CONTENT HARM_CATEGORY_HARASSMENT HARM_CATEGORY_SEXUALLY_EXPLICIT Possible values are: HARM_CATEGORY_HATE_SPEECH, HARM_CATEGORY_DANGEROUS_CONTENT, HARM_CATEGORY_HARASSMENT, HARM_CATEGORY_SEXUALLY_EXPLICIT.
    threshold str
    The harm block threshold. Possible values: BLOCK_LOW_AND_ABOVE BLOCK_MEDIUM_AND_ABOVE BLOCK_ONLY_HIGH BLOCK_NONE OFF Possible values are: BLOCK_LOW_AND_ABOVE, BLOCK_MEDIUM_AND_ABOVE, BLOCK_ONLY_HIGH, BLOCK_NONE, OFF.
    category String
    The harm category. Possible values: HARM_CATEGORY_HATE_SPEECH HARM_CATEGORY_DANGEROUS_CONTENT HARM_CATEGORY_HARASSMENT HARM_CATEGORY_SEXUALLY_EXPLICIT Possible values are: HARM_CATEGORY_HATE_SPEECH, HARM_CATEGORY_DANGEROUS_CONTENT, HARM_CATEGORY_HARASSMENT, HARM_CATEGORY_SEXUALLY_EXPLICIT.
    threshold String
    The harm block threshold. Possible values: BLOCK_LOW_AND_ABOVE BLOCK_MEDIUM_AND_ABOVE BLOCK_ONLY_HIGH BLOCK_NONE OFF Possible values are: BLOCK_LOW_AND_ABOVE, BLOCK_MEDIUM_AND_ABOVE, BLOCK_ONLY_HIGH, BLOCK_NONE, OFF.

    Import

    Guardrail can be imported using any of these accepted formats:

    • projects/{{project}}/locations/{{location}}/apps/{{app}}/guardrails/{{name}}

    • {{project}}/{{location}}/{{app}}/{{name}}

    • {{location}}/{{app}}/{{name}}

    When using the pulumi import command, Guardrail can be imported using one of the formats above. For example:

    $ pulumi import gcp:ces/guardrail:Guardrail default projects/{{project}}/locations/{{location}}/apps/{{app}}/guardrails/{{name}}
    
    $ pulumi import gcp:ces/guardrail:Guardrail default {{project}}/{{location}}/{{app}}/{{name}}
    
    $ pulumi import gcp:ces/guardrail:Guardrail default {{location}}/{{app}}/{{name}}
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    Google Cloud (GCP) Classic pulumi/pulumi-gcp
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the google-beta Terraform Provider.
    gcp logo
    Google Cloud v9.6.0 published on Wednesday, Nov 26, 2025 by Pulumi
      Meet Neo: Your AI Platform Teammate