Description
Example Usage
Ces Guardrail Basic
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const cesAppForGuardrail = new gcp.ces.App("ces_app_for_guardrail", {
appId: "app-id",
location: "us",
description: "App used as parent for CES Toolset example",
displayName: "my-app",
languageSettings: {
defaultLanguageCode: "en-US",
supportedLanguageCodes: [
"es-ES",
"fr-FR",
],
enableMultilingualSupport: true,
fallbackAction: "escalate",
},
timeZoneSettings: {
timeZone: "America/Los_Angeles",
},
});
const cesGuardrailBasic = new gcp.ces.Guardrail("ces_guardrail_basic", {
guardrailId: "guardrail-id",
location: cesAppForGuardrail.location,
app: cesAppForGuardrail.appId,
displayName: "my-guardrail",
description: "Guardrail description",
action: {
respondImmediately: {
responses: [{
text: "Text",
disabled: false,
}],
},
},
enabled: true,
modelSafety: {
safetySettings: [{
category: "HARM_CATEGORY_HATE_SPEECH",
threshold: "BLOCK_NONE",
}],
},
});
import pulumi
import pulumi_gcp as gcp
ces_app_for_guardrail = gcp.ces.App("ces_app_for_guardrail",
app_id="app-id",
location="us",
description="App used as parent for CES Toolset example",
display_name="my-app",
language_settings={
"default_language_code": "en-US",
"supported_language_codes": [
"es-ES",
"fr-FR",
],
"enable_multilingual_support": True,
"fallback_action": "escalate",
},
time_zone_settings={
"time_zone": "America/Los_Angeles",
})
ces_guardrail_basic = gcp.ces.Guardrail("ces_guardrail_basic",
guardrail_id="guardrail-id",
location=ces_app_for_guardrail.location,
app=ces_app_for_guardrail.app_id,
display_name="my-guardrail",
description="Guardrail description",
action={
"respond_immediately": {
"responses": [{
"text": "Text",
"disabled": False,
}],
},
},
enabled=True,
model_safety={
"safety_settings": [{
"category": "HARM_CATEGORY_HATE_SPEECH",
"threshold": "BLOCK_NONE",
}],
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/ces"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
cesAppForGuardrail, err := ces.NewApp(ctx, "ces_app_for_guardrail", &ces.AppArgs{
AppId: pulumi.String("app-id"),
Location: pulumi.String("us"),
Description: pulumi.String("App used as parent for CES Toolset example"),
DisplayName: pulumi.String("my-app"),
LanguageSettings: &ces.AppLanguageSettingsArgs{
DefaultLanguageCode: pulumi.String("en-US"),
SupportedLanguageCodes: pulumi.StringArray{
pulumi.String("es-ES"),
pulumi.String("fr-FR"),
},
EnableMultilingualSupport: pulumi.Bool(true),
FallbackAction: pulumi.String("escalate"),
},
TimeZoneSettings: &ces.AppTimeZoneSettingsArgs{
TimeZone: pulumi.String("America/Los_Angeles"),
},
})
if err != nil {
return err
}
_, err = ces.NewGuardrail(ctx, "ces_guardrail_basic", &ces.GuardrailArgs{
GuardrailId: pulumi.String("guardrail-id"),
Location: cesAppForGuardrail.Location,
App: cesAppForGuardrail.AppId,
DisplayName: pulumi.String("my-guardrail"),
Description: pulumi.String("Guardrail description"),
Action: &ces.GuardrailActionArgs{
RespondImmediately: &ces.GuardrailActionRespondImmediatelyArgs{
Responses: ces.GuardrailActionRespondImmediatelyResponseArray{
&ces.GuardrailActionRespondImmediatelyResponseArgs{
Text: pulumi.String("Text"),
Disabled: pulumi.Bool(false),
},
},
},
},
Enabled: pulumi.Bool(true),
ModelSafety: &ces.GuardrailModelSafetyArgs{
SafetySettings: ces.GuardrailModelSafetySafetySettingArray{
&ces.GuardrailModelSafetySafetySettingArgs{
Category: pulumi.String("HARM_CATEGORY_HATE_SPEECH"),
Threshold: pulumi.String("BLOCK_NONE"),
},
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var cesAppForGuardrail = new Gcp.Ces.App("ces_app_for_guardrail", new()
{
AppId = "app-id",
Location = "us",
Description = "App used as parent for CES Toolset example",
DisplayName = "my-app",
LanguageSettings = new Gcp.Ces.Inputs.AppLanguageSettingsArgs
{
DefaultLanguageCode = "en-US",
SupportedLanguageCodes = new[]
{
"es-ES",
"fr-FR",
},
EnableMultilingualSupport = true,
FallbackAction = "escalate",
},
TimeZoneSettings = new Gcp.Ces.Inputs.AppTimeZoneSettingsArgs
{
TimeZone = "America/Los_Angeles",
},
});
var cesGuardrailBasic = new Gcp.Ces.Guardrail("ces_guardrail_basic", new()
{
GuardrailId = "guardrail-id",
Location = cesAppForGuardrail.Location,
App = cesAppForGuardrail.AppId,
DisplayName = "my-guardrail",
Description = "Guardrail description",
Action = new Gcp.Ces.Inputs.GuardrailActionArgs
{
RespondImmediately = new Gcp.Ces.Inputs.GuardrailActionRespondImmediatelyArgs
{
Responses = new[]
{
new Gcp.Ces.Inputs.GuardrailActionRespondImmediatelyResponseArgs
{
Text = "Text",
Disabled = false,
},
},
},
},
Enabled = true,
ModelSafety = new Gcp.Ces.Inputs.GuardrailModelSafetyArgs
{
SafetySettings = new[]
{
new Gcp.Ces.Inputs.GuardrailModelSafetySafetySettingArgs
{
Category = "HARM_CATEGORY_HATE_SPEECH",
Threshold = "BLOCK_NONE",
},
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.ces.App;
import com.pulumi.gcp.ces.AppArgs;
import com.pulumi.gcp.ces.inputs.AppLanguageSettingsArgs;
import com.pulumi.gcp.ces.inputs.AppTimeZoneSettingsArgs;
import com.pulumi.gcp.ces.Guardrail;
import com.pulumi.gcp.ces.GuardrailArgs;
import com.pulumi.gcp.ces.inputs.GuardrailActionArgs;
import com.pulumi.gcp.ces.inputs.GuardrailActionRespondImmediatelyArgs;
import com.pulumi.gcp.ces.inputs.GuardrailModelSafetyArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var cesAppForGuardrail = new App("cesAppForGuardrail", AppArgs.builder()
.appId("app-id")
.location("us")
.description("App used as parent for CES Toolset example")
.displayName("my-app")
.languageSettings(AppLanguageSettingsArgs.builder()
.defaultLanguageCode("en-US")
.supportedLanguageCodes(
"es-ES",
"fr-FR")
.enableMultilingualSupport(true)
.fallbackAction("escalate")
.build())
.timeZoneSettings(AppTimeZoneSettingsArgs.builder()
.timeZone("America/Los_Angeles")
.build())
.build());
var cesGuardrailBasic = new Guardrail("cesGuardrailBasic", GuardrailArgs.builder()
.guardrailId("guardrail-id")
.location(cesAppForGuardrail.location())
.app(cesAppForGuardrail.appId())
.displayName("my-guardrail")
.description("Guardrail description")
.action(GuardrailActionArgs.builder()
.respondImmediately(GuardrailActionRespondImmediatelyArgs.builder()
.responses(GuardrailActionRespondImmediatelyResponseArgs.builder()
.text("Text")
.disabled(false)
.build())
.build())
.build())
.enabled(true)
.modelSafety(GuardrailModelSafetyArgs.builder()
.safetySettings(GuardrailModelSafetySafetySettingArgs.builder()
.category("HARM_CATEGORY_HATE_SPEECH")
.threshold("BLOCK_NONE")
.build())
.build())
.build());
}
}
resources:
cesAppForGuardrail:
type: gcp:ces:App
name: ces_app_for_guardrail
properties:
appId: app-id
location: us
description: App used as parent for CES Toolset example
displayName: my-app
languageSettings:
defaultLanguageCode: en-US
supportedLanguageCodes:
- es-ES
- fr-FR
enableMultilingualSupport: true
fallbackAction: escalate
timeZoneSettings:
timeZone: America/Los_Angeles
cesGuardrailBasic:
type: gcp:ces:Guardrail
name: ces_guardrail_basic
properties:
guardrailId: guardrail-id
location: ${cesAppForGuardrail.location}
app: ${cesAppForGuardrail.appId}
displayName: my-guardrail
description: Guardrail description
action:
respondImmediately:
responses:
- text: Text
disabled: false
enabled: true
modelSafety:
safetySettings:
- category: HARM_CATEGORY_HATE_SPEECH
threshold: BLOCK_NONE
Ces Guardrail Transfer Agent Content Filter
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const cesAppForGuardrail = new gcp.ces.App("ces_app_for_guardrail", {
appId: "app-id",
location: "us",
description: "App used as parent for CES Toolset example",
displayName: "my-app",
languageSettings: {
defaultLanguageCode: "en-US",
supportedLanguageCodes: [
"es-ES",
"fr-FR",
],
enableMultilingualSupport: true,
fallbackAction: "escalate",
},
timeZoneSettings: {
timeZone: "America/Los_Angeles",
},
});
const cesGuardrailTransferAgentContentFilter = new gcp.ces.Guardrail("ces_guardrail_transfer_agent_content_filter", {
guardrailId: "guardrail-id",
location: cesAppForGuardrail.location,
app: cesAppForGuardrail.appId,
displayName: "my-guardrail",
description: "Guardrail description",
action: {
transferAgent: {
agent: pulumi.interpolate`projects/${cesAppForGuardrail.project}/locations/us/apps/${cesAppForGuardrail.appId}/agents/fake-agent`,
},
},
enabled: true,
contentFilter: {
bannedContents: ["example"],
bannedContentsInUserInputs: ["example"],
bannedContentsInAgentResponses: ["example"],
matchType: "SIMPLE_STRING_MATCH",
disregardDiacritics: true,
},
});
import pulumi
import pulumi_gcp as gcp
ces_app_for_guardrail = gcp.ces.App("ces_app_for_guardrail",
app_id="app-id",
location="us",
description="App used as parent for CES Toolset example",
display_name="my-app",
language_settings={
"default_language_code": "en-US",
"supported_language_codes": [
"es-ES",
"fr-FR",
],
"enable_multilingual_support": True,
"fallback_action": "escalate",
},
time_zone_settings={
"time_zone": "America/Los_Angeles",
})
ces_guardrail_transfer_agent_content_filter = gcp.ces.Guardrail("ces_guardrail_transfer_agent_content_filter",
guardrail_id="guardrail-id",
location=ces_app_for_guardrail.location,
app=ces_app_for_guardrail.app_id,
display_name="my-guardrail",
description="Guardrail description",
action={
"transfer_agent": {
"agent": pulumi.Output.all(
project=ces_app_for_guardrail.project,
app_id=ces_app_for_guardrail.app_id
).apply(lambda resolved_outputs: f"projects/{resolved_outputs['project']}/locations/us/apps/{resolved_outputs['app_id']}/agents/fake-agent")
,
},
},
enabled=True,
content_filter={
"banned_contents": ["example"],
"banned_contents_in_user_inputs": ["example"],
"banned_contents_in_agent_responses": ["example"],
"match_type": "SIMPLE_STRING_MATCH",
"disregard_diacritics": True,
})
package main
import (
"fmt"
"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/ces"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
cesAppForGuardrail, err := ces.NewApp(ctx, "ces_app_for_guardrail", &ces.AppArgs{
AppId: pulumi.String("app-id"),
Location: pulumi.String("us"),
Description: pulumi.String("App used as parent for CES Toolset example"),
DisplayName: pulumi.String("my-app"),
LanguageSettings: &ces.AppLanguageSettingsArgs{
DefaultLanguageCode: pulumi.String("en-US"),
SupportedLanguageCodes: pulumi.StringArray{
pulumi.String("es-ES"),
pulumi.String("fr-FR"),
},
EnableMultilingualSupport: pulumi.Bool(true),
FallbackAction: pulumi.String("escalate"),
},
TimeZoneSettings: &ces.AppTimeZoneSettingsArgs{
TimeZone: pulumi.String("America/Los_Angeles"),
},
})
if err != nil {
return err
}
_, err = ces.NewGuardrail(ctx, "ces_guardrail_transfer_agent_content_filter", &ces.GuardrailArgs{
GuardrailId: pulumi.String("guardrail-id"),
Location: cesAppForGuardrail.Location,
App: cesAppForGuardrail.AppId,
DisplayName: pulumi.String("my-guardrail"),
Description: pulumi.String("Guardrail description"),
Action: &ces.GuardrailActionArgs{
TransferAgent: &ces.GuardrailActionTransferAgentArgs{
Agent: pulumi.All(cesAppForGuardrail.Project, cesAppForGuardrail.AppId).ApplyT(func(_args []interface{}) (string, error) {
project := _args[0].(string)
appId := _args[1].(string)
return fmt.Sprintf("projects/%v/locations/us/apps/%v/agents/fake-agent", project, appId), nil
}).(pulumi.StringOutput),
},
},
Enabled: pulumi.Bool(true),
ContentFilter: &ces.GuardrailContentFilterArgs{
BannedContents: pulumi.StringArray{
pulumi.String("example"),
},
BannedContentsInUserInputs: pulumi.StringArray{
pulumi.String("example"),
},
BannedContentsInAgentResponses: pulumi.StringArray{
pulumi.String("example"),
},
MatchType: pulumi.String("SIMPLE_STRING_MATCH"),
DisregardDiacritics: pulumi.Bool(true),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var cesAppForGuardrail = new Gcp.Ces.App("ces_app_for_guardrail", new()
{
AppId = "app-id",
Location = "us",
Description = "App used as parent for CES Toolset example",
DisplayName = "my-app",
LanguageSettings = new Gcp.Ces.Inputs.AppLanguageSettingsArgs
{
DefaultLanguageCode = "en-US",
SupportedLanguageCodes = new[]
{
"es-ES",
"fr-FR",
},
EnableMultilingualSupport = true,
FallbackAction = "escalate",
},
TimeZoneSettings = new Gcp.Ces.Inputs.AppTimeZoneSettingsArgs
{
TimeZone = "America/Los_Angeles",
},
});
var cesGuardrailTransferAgentContentFilter = new Gcp.Ces.Guardrail("ces_guardrail_transfer_agent_content_filter", new()
{
GuardrailId = "guardrail-id",
Location = cesAppForGuardrail.Location,
App = cesAppForGuardrail.AppId,
DisplayName = "my-guardrail",
Description = "Guardrail description",
Action = new Gcp.Ces.Inputs.GuardrailActionArgs
{
TransferAgent = new Gcp.Ces.Inputs.GuardrailActionTransferAgentArgs
{
Agent = Output.Tuple(cesAppForGuardrail.Project, cesAppForGuardrail.AppId).Apply(values =>
{
var project = values.Item1;
var appId = values.Item2;
return $"projects/{project}/locations/us/apps/{appId}/agents/fake-agent";
}),
},
},
Enabled = true,
ContentFilter = new Gcp.Ces.Inputs.GuardrailContentFilterArgs
{
BannedContents = new[]
{
"example",
},
BannedContentsInUserInputs = new[]
{
"example",
},
BannedContentsInAgentResponses = new[]
{
"example",
},
MatchType = "SIMPLE_STRING_MATCH",
DisregardDiacritics = true,
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.ces.App;
import com.pulumi.gcp.ces.AppArgs;
import com.pulumi.gcp.ces.inputs.AppLanguageSettingsArgs;
import com.pulumi.gcp.ces.inputs.AppTimeZoneSettingsArgs;
import com.pulumi.gcp.ces.Guardrail;
import com.pulumi.gcp.ces.GuardrailArgs;
import com.pulumi.gcp.ces.inputs.GuardrailActionArgs;
import com.pulumi.gcp.ces.inputs.GuardrailActionTransferAgentArgs;
import com.pulumi.gcp.ces.inputs.GuardrailContentFilterArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var cesAppForGuardrail = new App("cesAppForGuardrail", AppArgs.builder()
.appId("app-id")
.location("us")
.description("App used as parent for CES Toolset example")
.displayName("my-app")
.languageSettings(AppLanguageSettingsArgs.builder()
.defaultLanguageCode("en-US")
.supportedLanguageCodes(
"es-ES",
"fr-FR")
.enableMultilingualSupport(true)
.fallbackAction("escalate")
.build())
.timeZoneSettings(AppTimeZoneSettingsArgs.builder()
.timeZone("America/Los_Angeles")
.build())
.build());
var cesGuardrailTransferAgentContentFilter = new Guardrail("cesGuardrailTransferAgentContentFilter", GuardrailArgs.builder()
.guardrailId("guardrail-id")
.location(cesAppForGuardrail.location())
.app(cesAppForGuardrail.appId())
.displayName("my-guardrail")
.description("Guardrail description")
.action(GuardrailActionArgs.builder()
.transferAgent(GuardrailActionTransferAgentArgs.builder()
.agent(Output.tuple(cesAppForGuardrail.project(), cesAppForGuardrail.appId()).applyValue(values -> {
var project = values.t1;
var appId = values.t2;
return String.format("projects/%s/locations/us/apps/%s/agents/fake-agent", project,appId);
}))
.build())
.build())
.enabled(true)
.contentFilter(GuardrailContentFilterArgs.builder()
.bannedContents("example")
.bannedContentsInUserInputs("example")
.bannedContentsInAgentResponses("example")
.matchType("SIMPLE_STRING_MATCH")
.disregardDiacritics(true)
.build())
.build());
}
}
resources:
cesAppForGuardrail:
type: gcp:ces:App
name: ces_app_for_guardrail
properties:
appId: app-id
location: us
description: App used as parent for CES Toolset example
displayName: my-app
languageSettings:
defaultLanguageCode: en-US
supportedLanguageCodes:
- es-ES
- fr-FR
enableMultilingualSupport: true
fallbackAction: escalate
timeZoneSettings:
timeZone: America/Los_Angeles
cesGuardrailTransferAgentContentFilter:
type: gcp:ces:Guardrail
name: ces_guardrail_transfer_agent_content_filter
properties:
guardrailId: guardrail-id
location: ${cesAppForGuardrail.location}
app: ${cesAppForGuardrail.appId}
displayName: my-guardrail
description: Guardrail description
action:
transferAgent:
agent: projects/${cesAppForGuardrail.project}/locations/us/apps/${cesAppForGuardrail.appId}/agents/fake-agent
enabled: true
contentFilter:
bannedContents:
- example
bannedContentsInUserInputs:
- example
bannedContentsInAgentResponses:
- example
matchType: SIMPLE_STRING_MATCH
disregardDiacritics: true
Ces Guardrail Generative Answer Llm Prompt Security
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const cesAppForGuardrail = new gcp.ces.App("ces_app_for_guardrail", {
appId: "app-id",
location: "us",
description: "App used as parent for CES Toolset example",
displayName: "my-app",
languageSettings: {
defaultLanguageCode: "en-US",
supportedLanguageCodes: [
"es-ES",
"fr-FR",
],
enableMultilingualSupport: true,
fallbackAction: "escalate",
},
timeZoneSettings: {
timeZone: "America/Los_Angeles",
},
});
const cesGuardrailGenerativeAnswerLlmPromptSecurity = new gcp.ces.Guardrail("ces_guardrail_generative_answer_llm_prompt_security", {
guardrailId: "guardrail-id",
location: cesAppForGuardrail.location,
app: cesAppForGuardrail.appId,
displayName: "my-guardrail",
description: "Guardrail description",
action: {
generativeAnswer: {
prompt: "example_prompt",
},
},
enabled: true,
llmPromptSecurity: {
customPolicy: {
maxConversationMessages: 10,
modelSettings: {
model: "gemini-2.5-flash",
temperature: 50,
},
prompt: "example_prompt",
policyScope: "USER_QUERY",
failOpen: true,
allowShortUtterance: true,
},
},
});
import pulumi
import pulumi_gcp as gcp
ces_app_for_guardrail = gcp.ces.App("ces_app_for_guardrail",
app_id="app-id",
location="us",
description="App used as parent for CES Toolset example",
display_name="my-app",
language_settings={
"default_language_code": "en-US",
"supported_language_codes": [
"es-ES",
"fr-FR",
],
"enable_multilingual_support": True,
"fallback_action": "escalate",
},
time_zone_settings={
"time_zone": "America/Los_Angeles",
})
ces_guardrail_generative_answer_llm_prompt_security = gcp.ces.Guardrail("ces_guardrail_generative_answer_llm_prompt_security",
guardrail_id="guardrail-id",
location=ces_app_for_guardrail.location,
app=ces_app_for_guardrail.app_id,
display_name="my-guardrail",
description="Guardrail description",
action={
"generative_answer": {
"prompt": "example_prompt",
},
},
enabled=True,
llm_prompt_security={
"custom_policy": {
"max_conversation_messages": 10,
"model_settings": {
"model": "gemini-2.5-flash",
"temperature": 50,
},
"prompt": "example_prompt",
"policy_scope": "USER_QUERY",
"fail_open": True,
"allow_short_utterance": True,
},
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/ces"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
cesAppForGuardrail, err := ces.NewApp(ctx, "ces_app_for_guardrail", &ces.AppArgs{
AppId: pulumi.String("app-id"),
Location: pulumi.String("us"),
Description: pulumi.String("App used as parent for CES Toolset example"),
DisplayName: pulumi.String("my-app"),
LanguageSettings: &ces.AppLanguageSettingsArgs{
DefaultLanguageCode: pulumi.String("en-US"),
SupportedLanguageCodes: pulumi.StringArray{
pulumi.String("es-ES"),
pulumi.String("fr-FR"),
},
EnableMultilingualSupport: pulumi.Bool(true),
FallbackAction: pulumi.String("escalate"),
},
TimeZoneSettings: &ces.AppTimeZoneSettingsArgs{
TimeZone: pulumi.String("America/Los_Angeles"),
},
})
if err != nil {
return err
}
_, err = ces.NewGuardrail(ctx, "ces_guardrail_generative_answer_llm_prompt_security", &ces.GuardrailArgs{
GuardrailId: pulumi.String("guardrail-id"),
Location: cesAppForGuardrail.Location,
App: cesAppForGuardrail.AppId,
DisplayName: pulumi.String("my-guardrail"),
Description: pulumi.String("Guardrail description"),
Action: &ces.GuardrailActionArgs{
GenerativeAnswer: &ces.GuardrailActionGenerativeAnswerArgs{
Prompt: pulumi.String("example_prompt"),
},
},
Enabled: pulumi.Bool(true),
LlmPromptSecurity: &ces.GuardrailLlmPromptSecurityArgs{
CustomPolicy: &ces.GuardrailLlmPromptSecurityCustomPolicyArgs{
MaxConversationMessages: pulumi.Int(10),
ModelSettings: &ces.GuardrailLlmPromptSecurityCustomPolicyModelSettingsArgs{
Model: pulumi.String("gemini-2.5-flash"),
Temperature: pulumi.Float64(50),
},
Prompt: pulumi.String("example_prompt"),
PolicyScope: pulumi.String("USER_QUERY"),
FailOpen: pulumi.Bool(true),
AllowShortUtterance: pulumi.Bool(true),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var cesAppForGuardrail = new Gcp.Ces.App("ces_app_for_guardrail", new()
{
AppId = "app-id",
Location = "us",
Description = "App used as parent for CES Toolset example",
DisplayName = "my-app",
LanguageSettings = new Gcp.Ces.Inputs.AppLanguageSettingsArgs
{
DefaultLanguageCode = "en-US",
SupportedLanguageCodes = new[]
{
"es-ES",
"fr-FR",
},
EnableMultilingualSupport = true,
FallbackAction = "escalate",
},
TimeZoneSettings = new Gcp.Ces.Inputs.AppTimeZoneSettingsArgs
{
TimeZone = "America/Los_Angeles",
},
});
var cesGuardrailGenerativeAnswerLlmPromptSecurity = new Gcp.Ces.Guardrail("ces_guardrail_generative_answer_llm_prompt_security", new()
{
GuardrailId = "guardrail-id",
Location = cesAppForGuardrail.Location,
App = cesAppForGuardrail.AppId,
DisplayName = "my-guardrail",
Description = "Guardrail description",
Action = new Gcp.Ces.Inputs.GuardrailActionArgs
{
GenerativeAnswer = new Gcp.Ces.Inputs.GuardrailActionGenerativeAnswerArgs
{
Prompt = "example_prompt",
},
},
Enabled = true,
LlmPromptSecurity = new Gcp.Ces.Inputs.GuardrailLlmPromptSecurityArgs
{
CustomPolicy = new Gcp.Ces.Inputs.GuardrailLlmPromptSecurityCustomPolicyArgs
{
MaxConversationMessages = 10,
ModelSettings = new Gcp.Ces.Inputs.GuardrailLlmPromptSecurityCustomPolicyModelSettingsArgs
{
Model = "gemini-2.5-flash",
Temperature = 50,
},
Prompt = "example_prompt",
PolicyScope = "USER_QUERY",
FailOpen = true,
AllowShortUtterance = true,
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.ces.App;
import com.pulumi.gcp.ces.AppArgs;
import com.pulumi.gcp.ces.inputs.AppLanguageSettingsArgs;
import com.pulumi.gcp.ces.inputs.AppTimeZoneSettingsArgs;
import com.pulumi.gcp.ces.Guardrail;
import com.pulumi.gcp.ces.GuardrailArgs;
import com.pulumi.gcp.ces.inputs.GuardrailActionArgs;
import com.pulumi.gcp.ces.inputs.GuardrailActionGenerativeAnswerArgs;
import com.pulumi.gcp.ces.inputs.GuardrailLlmPromptSecurityArgs;
import com.pulumi.gcp.ces.inputs.GuardrailLlmPromptSecurityCustomPolicyArgs;
import com.pulumi.gcp.ces.inputs.GuardrailLlmPromptSecurityCustomPolicyModelSettingsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var cesAppForGuardrail = new App("cesAppForGuardrail", AppArgs.builder()
.appId("app-id")
.location("us")
.description("App used as parent for CES Toolset example")
.displayName("my-app")
.languageSettings(AppLanguageSettingsArgs.builder()
.defaultLanguageCode("en-US")
.supportedLanguageCodes(
"es-ES",
"fr-FR")
.enableMultilingualSupport(true)
.fallbackAction("escalate")
.build())
.timeZoneSettings(AppTimeZoneSettingsArgs.builder()
.timeZone("America/Los_Angeles")
.build())
.build());
var cesGuardrailGenerativeAnswerLlmPromptSecurity = new Guardrail("cesGuardrailGenerativeAnswerLlmPromptSecurity", GuardrailArgs.builder()
.guardrailId("guardrail-id")
.location(cesAppForGuardrail.location())
.app(cesAppForGuardrail.appId())
.displayName("my-guardrail")
.description("Guardrail description")
.action(GuardrailActionArgs.builder()
.generativeAnswer(GuardrailActionGenerativeAnswerArgs.builder()
.prompt("example_prompt")
.build())
.build())
.enabled(true)
.llmPromptSecurity(GuardrailLlmPromptSecurityArgs.builder()
.customPolicy(GuardrailLlmPromptSecurityCustomPolicyArgs.builder()
.maxConversationMessages(10)
.modelSettings(GuardrailLlmPromptSecurityCustomPolicyModelSettingsArgs.builder()
.model("gemini-2.5-flash")
.temperature(50.0)
.build())
.prompt("example_prompt")
.policyScope("USER_QUERY")
.failOpen(true)
.allowShortUtterance(true)
.build())
.build())
.build());
}
}
resources:
cesAppForGuardrail:
type: gcp:ces:App
name: ces_app_for_guardrail
properties:
appId: app-id
location: us
description: App used as parent for CES Toolset example
displayName: my-app
languageSettings:
defaultLanguageCode: en-US
supportedLanguageCodes:
- es-ES
- fr-FR
enableMultilingualSupport: true
fallbackAction: escalate
timeZoneSettings:
timeZone: America/Los_Angeles
cesGuardrailGenerativeAnswerLlmPromptSecurity:
type: gcp:ces:Guardrail
name: ces_guardrail_generative_answer_llm_prompt_security
properties:
guardrailId: guardrail-id
location: ${cesAppForGuardrail.location}
app: ${cesAppForGuardrail.appId}
displayName: my-guardrail
description: Guardrail description
action:
generativeAnswer:
prompt: example_prompt
enabled: true
llmPromptSecurity:
customPolicy:
maxConversationMessages: 10
modelSettings:
model: gemini-2.5-flash
temperature: 50
prompt: example_prompt
policyScope: USER_QUERY
failOpen: true
allowShortUtterance: true
Ces Guardrail Code Callback
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const cesAppForGuardrail = new gcp.ces.App("ces_app_for_guardrail", {
appId: "app-id",
location: "us",
description: "App used as parent for CES Toolset example",
displayName: "my-app",
languageSettings: {
defaultLanguageCode: "en-US",
supportedLanguageCodes: [
"es-ES",
"fr-FR",
],
enableMultilingualSupport: true,
fallbackAction: "escalate",
},
timeZoneSettings: {
timeZone: "America/Los_Angeles",
},
});
const cesGuardrailCodeCallback = new gcp.ces.Guardrail("ces_guardrail_code_callback", {
guardrailId: "guardrail-id",
location: cesAppForGuardrail.location,
app: cesAppForGuardrail.appId,
displayName: "my-guardrail",
description: "Guardrail description",
action: {
generativeAnswer: {
prompt: "example_prompt",
},
},
enabled: true,
codeCallback: {
beforeAgentCallback: {
description: "Example callback",
disabled: true,
pythonCode: `def callback(context):
return {'override': False}`,
},
afterAgentCallback: {
description: "Example callback",
disabled: true,
pythonCode: `def callback(context):
return {'override': False}`,
},
beforeModelCallback: {
description: "Example callback",
disabled: true,
pythonCode: `def callback(context):
return {'override': False}`,
},
afterModelCallback: {
description: "Example callback",
disabled: true,
pythonCode: `def callback(context):
return {'override': False}`,
},
},
});
import pulumi
import pulumi_gcp as gcp
ces_app_for_guardrail = gcp.ces.App("ces_app_for_guardrail",
app_id="app-id",
location="us",
description="App used as parent for CES Toolset example",
display_name="my-app",
language_settings={
"default_language_code": "en-US",
"supported_language_codes": [
"es-ES",
"fr-FR",
],
"enable_multilingual_support": True,
"fallback_action": "escalate",
},
time_zone_settings={
"time_zone": "America/Los_Angeles",
})
ces_guardrail_code_callback = gcp.ces.Guardrail("ces_guardrail_code_callback",
guardrail_id="guardrail-id",
location=ces_app_for_guardrail.location,
app=ces_app_for_guardrail.app_id,
display_name="my-guardrail",
description="Guardrail description",
action={
"generative_answer": {
"prompt": "example_prompt",
},
},
enabled=True,
code_callback={
"before_agent_callback": {
"description": "Example callback",
"disabled": True,
"python_code": """def callback(context):
return {'override': False}""",
},
"after_agent_callback": {
"description": "Example callback",
"disabled": True,
"python_code": """def callback(context):
return {'override': False}""",
},
"before_model_callback": {
"description": "Example callback",
"disabled": True,
"python_code": """def callback(context):
return {'override': False}""",
},
"after_model_callback": {
"description": "Example callback",
"disabled": True,
"python_code": """def callback(context):
return {'override': False}""",
},
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/ces"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
cesAppForGuardrail, err := ces.NewApp(ctx, "ces_app_for_guardrail", &ces.AppArgs{
AppId: pulumi.String("app-id"),
Location: pulumi.String("us"),
Description: pulumi.String("App used as parent for CES Toolset example"),
DisplayName: pulumi.String("my-app"),
LanguageSettings: &ces.AppLanguageSettingsArgs{
DefaultLanguageCode: pulumi.String("en-US"),
SupportedLanguageCodes: pulumi.StringArray{
pulumi.String("es-ES"),
pulumi.String("fr-FR"),
},
EnableMultilingualSupport: pulumi.Bool(true),
FallbackAction: pulumi.String("escalate"),
},
TimeZoneSettings: &ces.AppTimeZoneSettingsArgs{
TimeZone: pulumi.String("America/Los_Angeles"),
},
})
if err != nil {
return err
}
_, err = ces.NewGuardrail(ctx, "ces_guardrail_code_callback", &ces.GuardrailArgs{
GuardrailId: pulumi.String("guardrail-id"),
Location: cesAppForGuardrail.Location,
App: cesAppForGuardrail.AppId,
DisplayName: pulumi.String("my-guardrail"),
Description: pulumi.String("Guardrail description"),
Action: &ces.GuardrailActionArgs{
GenerativeAnswer: &ces.GuardrailActionGenerativeAnswerArgs{
Prompt: pulumi.String("example_prompt"),
},
},
Enabled: pulumi.Bool(true),
CodeCallback: &ces.GuardrailCodeCallbackArgs{
BeforeAgentCallback: &ces.GuardrailCodeCallbackBeforeAgentCallbackArgs{
Description: pulumi.String("Example callback"),
Disabled: pulumi.Bool(true),
PythonCode: pulumi.String("def callback(context):\n return {'override': False}"),
},
AfterAgentCallback: &ces.GuardrailCodeCallbackAfterAgentCallbackArgs{
Description: pulumi.String("Example callback"),
Disabled: pulumi.Bool(true),
PythonCode: pulumi.String("def callback(context):\n return {'override': False}"),
},
BeforeModelCallback: &ces.GuardrailCodeCallbackBeforeModelCallbackArgs{
Description: pulumi.String("Example callback"),
Disabled: pulumi.Bool(true),
PythonCode: pulumi.String("def callback(context):\n return {'override': False}"),
},
AfterModelCallback: &ces.GuardrailCodeCallbackAfterModelCallbackArgs{
Description: pulumi.String("Example callback"),
Disabled: pulumi.Bool(true),
PythonCode: pulumi.String("def callback(context):\n return {'override': False}"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var cesAppForGuardrail = new Gcp.Ces.App("ces_app_for_guardrail", new()
{
AppId = "app-id",
Location = "us",
Description = "App used as parent for CES Toolset example",
DisplayName = "my-app",
LanguageSettings = new Gcp.Ces.Inputs.AppLanguageSettingsArgs
{
DefaultLanguageCode = "en-US",
SupportedLanguageCodes = new[]
{
"es-ES",
"fr-FR",
},
EnableMultilingualSupport = true,
FallbackAction = "escalate",
},
TimeZoneSettings = new Gcp.Ces.Inputs.AppTimeZoneSettingsArgs
{
TimeZone = "America/Los_Angeles",
},
});
var cesGuardrailCodeCallback = new Gcp.Ces.Guardrail("ces_guardrail_code_callback", new()
{
GuardrailId = "guardrail-id",
Location = cesAppForGuardrail.Location,
App = cesAppForGuardrail.AppId,
DisplayName = "my-guardrail",
Description = "Guardrail description",
Action = new Gcp.Ces.Inputs.GuardrailActionArgs
{
GenerativeAnswer = new Gcp.Ces.Inputs.GuardrailActionGenerativeAnswerArgs
{
Prompt = "example_prompt",
},
},
Enabled = true,
CodeCallback = new Gcp.Ces.Inputs.GuardrailCodeCallbackArgs
{
BeforeAgentCallback = new Gcp.Ces.Inputs.GuardrailCodeCallbackBeforeAgentCallbackArgs
{
Description = "Example callback",
Disabled = true,
PythonCode = @"def callback(context):
return {'override': False}",
},
AfterAgentCallback = new Gcp.Ces.Inputs.GuardrailCodeCallbackAfterAgentCallbackArgs
{
Description = "Example callback",
Disabled = true,
PythonCode = @"def callback(context):
return {'override': False}",
},
BeforeModelCallback = new Gcp.Ces.Inputs.GuardrailCodeCallbackBeforeModelCallbackArgs
{
Description = "Example callback",
Disabled = true,
PythonCode = @"def callback(context):
return {'override': False}",
},
AfterModelCallback = new Gcp.Ces.Inputs.GuardrailCodeCallbackAfterModelCallbackArgs
{
Description = "Example callback",
Disabled = true,
PythonCode = @"def callback(context):
return {'override': False}",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.ces.App;
import com.pulumi.gcp.ces.AppArgs;
import com.pulumi.gcp.ces.inputs.AppLanguageSettingsArgs;
import com.pulumi.gcp.ces.inputs.AppTimeZoneSettingsArgs;
import com.pulumi.gcp.ces.Guardrail;
import com.pulumi.gcp.ces.GuardrailArgs;
import com.pulumi.gcp.ces.inputs.GuardrailActionArgs;
import com.pulumi.gcp.ces.inputs.GuardrailActionGenerativeAnswerArgs;
import com.pulumi.gcp.ces.inputs.GuardrailCodeCallbackArgs;
import com.pulumi.gcp.ces.inputs.GuardrailCodeCallbackBeforeAgentCallbackArgs;
import com.pulumi.gcp.ces.inputs.GuardrailCodeCallbackAfterAgentCallbackArgs;
import com.pulumi.gcp.ces.inputs.GuardrailCodeCallbackBeforeModelCallbackArgs;
import com.pulumi.gcp.ces.inputs.GuardrailCodeCallbackAfterModelCallbackArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var cesAppForGuardrail = new App("cesAppForGuardrail", AppArgs.builder()
.appId("app-id")
.location("us")
.description("App used as parent for CES Toolset example")
.displayName("my-app")
.languageSettings(AppLanguageSettingsArgs.builder()
.defaultLanguageCode("en-US")
.supportedLanguageCodes(
"es-ES",
"fr-FR")
.enableMultilingualSupport(true)
.fallbackAction("escalate")
.build())
.timeZoneSettings(AppTimeZoneSettingsArgs.builder()
.timeZone("America/Los_Angeles")
.build())
.build());
var cesGuardrailCodeCallback = new Guardrail("cesGuardrailCodeCallback", GuardrailArgs.builder()
.guardrailId("guardrail-id")
.location(cesAppForGuardrail.location())
.app(cesAppForGuardrail.appId())
.displayName("my-guardrail")
.description("Guardrail description")
.action(GuardrailActionArgs.builder()
.generativeAnswer(GuardrailActionGenerativeAnswerArgs.builder()
.prompt("example_prompt")
.build())
.build())
.enabled(true)
.codeCallback(GuardrailCodeCallbackArgs.builder()
.beforeAgentCallback(GuardrailCodeCallbackBeforeAgentCallbackArgs.builder()
.description("Example callback")
.disabled(true)
.pythonCode("""
def callback(context):
return {'override': False} """)
.build())
.afterAgentCallback(GuardrailCodeCallbackAfterAgentCallbackArgs.builder()
.description("Example callback")
.disabled(true)
.pythonCode("""
def callback(context):
return {'override': False} """)
.build())
.beforeModelCallback(GuardrailCodeCallbackBeforeModelCallbackArgs.builder()
.description("Example callback")
.disabled(true)
.pythonCode("""
def callback(context):
return {'override': False} """)
.build())
.afterModelCallback(GuardrailCodeCallbackAfterModelCallbackArgs.builder()
.description("Example callback")
.disabled(true)
.pythonCode("""
def callback(context):
return {'override': False} """)
.build())
.build())
.build());
}
}
resources:
cesAppForGuardrail:
type: gcp:ces:App
name: ces_app_for_guardrail
properties:
appId: app-id
location: us
description: App used as parent for CES Toolset example
displayName: my-app
languageSettings:
defaultLanguageCode: en-US
supportedLanguageCodes:
- es-ES
- fr-FR
enableMultilingualSupport: true
fallbackAction: escalate
timeZoneSettings:
timeZone: America/Los_Angeles
cesGuardrailCodeCallback:
type: gcp:ces:Guardrail
name: ces_guardrail_code_callback
properties:
guardrailId: guardrail-id
location: ${cesAppForGuardrail.location}
app: ${cesAppForGuardrail.appId}
displayName: my-guardrail
description: Guardrail description
action:
generativeAnswer:
prompt: example_prompt
enabled: true
codeCallback:
beforeAgentCallback:
description: Example callback
disabled: true
pythonCode: |-
def callback(context):
return {'override': False}
afterAgentCallback:
description: Example callback
disabled: true
pythonCode: |-
def callback(context):
return {'override': False}
beforeModelCallback:
description: Example callback
disabled: true
pythonCode: |-
def callback(context):
return {'override': False}
afterModelCallback:
description: Example callback
disabled: true
pythonCode: |-
def callback(context):
return {'override': False}
Ces Guardrail Llm Policy
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const cesAppForGuardrail = new gcp.ces.App("ces_app_for_guardrail", {
appId: "app-id",
location: "us",
description: "App used as parent for CES Toolset example",
displayName: "my-app",
languageSettings: {
defaultLanguageCode: "en-US",
supportedLanguageCodes: [
"es-ES",
"fr-FR",
],
enableMultilingualSupport: true,
fallbackAction: "escalate",
},
timeZoneSettings: {
timeZone: "America/Los_Angeles",
},
});
const cesGuardrailLlmPolicy = new gcp.ces.Guardrail("ces_guardrail_llm_policy", {
guardrailId: "guardrail-id",
location: cesAppForGuardrail.location,
app: cesAppForGuardrail.appId,
displayName: "my-guardrail",
description: "Guardrail description",
action: {
generativeAnswer: {
prompt: "example_prompt",
},
},
enabled: true,
llmPolicy: {
maxConversationMessages: 10,
modelSettings: {
model: "gemini-2.5-flash",
temperature: 50,
},
prompt: "example_prompt",
policyScope: "USER_QUERY",
failOpen: true,
allowShortUtterance: true,
},
});
import pulumi
import pulumi_gcp as gcp
ces_app_for_guardrail = gcp.ces.App("ces_app_for_guardrail",
app_id="app-id",
location="us",
description="App used as parent for CES Toolset example",
display_name="my-app",
language_settings={
"default_language_code": "en-US",
"supported_language_codes": [
"es-ES",
"fr-FR",
],
"enable_multilingual_support": True,
"fallback_action": "escalate",
},
time_zone_settings={
"time_zone": "America/Los_Angeles",
})
ces_guardrail_llm_policy = gcp.ces.Guardrail("ces_guardrail_llm_policy",
guardrail_id="guardrail-id",
location=ces_app_for_guardrail.location,
app=ces_app_for_guardrail.app_id,
display_name="my-guardrail",
description="Guardrail description",
action={
"generative_answer": {
"prompt": "example_prompt",
},
},
enabled=True,
llm_policy={
"max_conversation_messages": 10,
"model_settings": {
"model": "gemini-2.5-flash",
"temperature": 50,
},
"prompt": "example_prompt",
"policy_scope": "USER_QUERY",
"fail_open": True,
"allow_short_utterance": True,
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/ces"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
cesAppForGuardrail, err := ces.NewApp(ctx, "ces_app_for_guardrail", &ces.AppArgs{
AppId: pulumi.String("app-id"),
Location: pulumi.String("us"),
Description: pulumi.String("App used as parent for CES Toolset example"),
DisplayName: pulumi.String("my-app"),
LanguageSettings: &ces.AppLanguageSettingsArgs{
DefaultLanguageCode: pulumi.String("en-US"),
SupportedLanguageCodes: pulumi.StringArray{
pulumi.String("es-ES"),
pulumi.String("fr-FR"),
},
EnableMultilingualSupport: pulumi.Bool(true),
FallbackAction: pulumi.String("escalate"),
},
TimeZoneSettings: &ces.AppTimeZoneSettingsArgs{
TimeZone: pulumi.String("America/Los_Angeles"),
},
})
if err != nil {
return err
}
_, err = ces.NewGuardrail(ctx, "ces_guardrail_llm_policy", &ces.GuardrailArgs{
GuardrailId: pulumi.String("guardrail-id"),
Location: cesAppForGuardrail.Location,
App: cesAppForGuardrail.AppId,
DisplayName: pulumi.String("my-guardrail"),
Description: pulumi.String("Guardrail description"),
Action: &ces.GuardrailActionArgs{
GenerativeAnswer: &ces.GuardrailActionGenerativeAnswerArgs{
Prompt: pulumi.String("example_prompt"),
},
},
Enabled: pulumi.Bool(true),
LlmPolicy: &ces.GuardrailLlmPolicyArgs{
MaxConversationMessages: pulumi.Int(10),
ModelSettings: &ces.GuardrailLlmPolicyModelSettingsArgs{
Model: pulumi.String("gemini-2.5-flash"),
Temperature: pulumi.Float64(50),
},
Prompt: pulumi.String("example_prompt"),
PolicyScope: pulumi.String("USER_QUERY"),
FailOpen: pulumi.Bool(true),
AllowShortUtterance: pulumi.Bool(true),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var cesAppForGuardrail = new Gcp.Ces.App("ces_app_for_guardrail", new()
{
AppId = "app-id",
Location = "us",
Description = "App used as parent for CES Toolset example",
DisplayName = "my-app",
LanguageSettings = new Gcp.Ces.Inputs.AppLanguageSettingsArgs
{
DefaultLanguageCode = "en-US",
SupportedLanguageCodes = new[]
{
"es-ES",
"fr-FR",
},
EnableMultilingualSupport = true,
FallbackAction = "escalate",
},
TimeZoneSettings = new Gcp.Ces.Inputs.AppTimeZoneSettingsArgs
{
TimeZone = "America/Los_Angeles",
},
});
var cesGuardrailLlmPolicy = new Gcp.Ces.Guardrail("ces_guardrail_llm_policy", new()
{
GuardrailId = "guardrail-id",
Location = cesAppForGuardrail.Location,
App = cesAppForGuardrail.AppId,
DisplayName = "my-guardrail",
Description = "Guardrail description",
Action = new Gcp.Ces.Inputs.GuardrailActionArgs
{
GenerativeAnswer = new Gcp.Ces.Inputs.GuardrailActionGenerativeAnswerArgs
{
Prompt = "example_prompt",
},
},
Enabled = true,
LlmPolicy = new Gcp.Ces.Inputs.GuardrailLlmPolicyArgs
{
MaxConversationMessages = 10,
ModelSettings = new Gcp.Ces.Inputs.GuardrailLlmPolicyModelSettingsArgs
{
Model = "gemini-2.5-flash",
Temperature = 50,
},
Prompt = "example_prompt",
PolicyScope = "USER_QUERY",
FailOpen = true,
AllowShortUtterance = true,
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.ces.App;
import com.pulumi.gcp.ces.AppArgs;
import com.pulumi.gcp.ces.inputs.AppLanguageSettingsArgs;
import com.pulumi.gcp.ces.inputs.AppTimeZoneSettingsArgs;
import com.pulumi.gcp.ces.Guardrail;
import com.pulumi.gcp.ces.GuardrailArgs;
import com.pulumi.gcp.ces.inputs.GuardrailActionArgs;
import com.pulumi.gcp.ces.inputs.GuardrailActionGenerativeAnswerArgs;
import com.pulumi.gcp.ces.inputs.GuardrailLlmPolicyArgs;
import com.pulumi.gcp.ces.inputs.GuardrailLlmPolicyModelSettingsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var cesAppForGuardrail = new App("cesAppForGuardrail", AppArgs.builder()
.appId("app-id")
.location("us")
.description("App used as parent for CES Toolset example")
.displayName("my-app")
.languageSettings(AppLanguageSettingsArgs.builder()
.defaultLanguageCode("en-US")
.supportedLanguageCodes(
"es-ES",
"fr-FR")
.enableMultilingualSupport(true)
.fallbackAction("escalate")
.build())
.timeZoneSettings(AppTimeZoneSettingsArgs.builder()
.timeZone("America/Los_Angeles")
.build())
.build());
var cesGuardrailLlmPolicy = new Guardrail("cesGuardrailLlmPolicy", GuardrailArgs.builder()
.guardrailId("guardrail-id")
.location(cesAppForGuardrail.location())
.app(cesAppForGuardrail.appId())
.displayName("my-guardrail")
.description("Guardrail description")
.action(GuardrailActionArgs.builder()
.generativeAnswer(GuardrailActionGenerativeAnswerArgs.builder()
.prompt("example_prompt")
.build())
.build())
.enabled(true)
.llmPolicy(GuardrailLlmPolicyArgs.builder()
.maxConversationMessages(10)
.modelSettings(GuardrailLlmPolicyModelSettingsArgs.builder()
.model("gemini-2.5-flash")
.temperature(50.0)
.build())
.prompt("example_prompt")
.policyScope("USER_QUERY")
.failOpen(true)
.allowShortUtterance(true)
.build())
.build());
}
}
resources:
cesAppForGuardrail:
type: gcp:ces:App
name: ces_app_for_guardrail
properties:
appId: app-id
location: us
description: App used as parent for CES Toolset example
displayName: my-app
languageSettings:
defaultLanguageCode: en-US
supportedLanguageCodes:
- es-ES
- fr-FR
enableMultilingualSupport: true
fallbackAction: escalate
timeZoneSettings:
timeZone: America/Los_Angeles
cesGuardrailLlmPolicy:
type: gcp:ces:Guardrail
name: ces_guardrail_llm_policy
properties:
guardrailId: guardrail-id
location: ${cesAppForGuardrail.location}
app: ${cesAppForGuardrail.appId}
displayName: my-guardrail
description: Guardrail description
action:
generativeAnswer:
prompt: example_prompt
enabled: true
llmPolicy:
maxConversationMessages: 10
modelSettings:
model: gemini-2.5-flash
temperature: 50
prompt: example_prompt
policyScope: USER_QUERY
failOpen: true
allowShortUtterance: true
Create Guardrail Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Guardrail(name: string, args: GuardrailArgs, opts?: CustomResourceOptions);@overload
def Guardrail(resource_name: str,
args: GuardrailArgs,
opts: Optional[ResourceOptions] = None)
@overload
def Guardrail(resource_name: str,
opts: Optional[ResourceOptions] = None,
display_name: Optional[str] = None,
app: Optional[str] = None,
location: Optional[str] = None,
guardrail_id: Optional[str] = None,
enabled: Optional[bool] = None,
description: Optional[str] = None,
action: Optional[GuardrailActionArgs] = None,
content_filter: Optional[GuardrailContentFilterArgs] = None,
llm_policy: Optional[GuardrailLlmPolicyArgs] = None,
llm_prompt_security: Optional[GuardrailLlmPromptSecurityArgs] = None,
code_callback: Optional[GuardrailCodeCallbackArgs] = None,
model_safety: Optional[GuardrailModelSafetyArgs] = None,
project: Optional[str] = None)func NewGuardrail(ctx *Context, name string, args GuardrailArgs, opts ...ResourceOption) (*Guardrail, error)public Guardrail(string name, GuardrailArgs args, CustomResourceOptions? opts = null)
public Guardrail(String name, GuardrailArgs args)
public Guardrail(String name, GuardrailArgs args, CustomResourceOptions options)
type: gcp:ces:Guardrail
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args GuardrailArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args GuardrailArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args GuardrailArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args GuardrailArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args GuardrailArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var guardrailResource = new Gcp.Ces.Guardrail("guardrailResource", new()
{
DisplayName = "string",
App = "string",
Location = "string",
GuardrailId = "string",
Enabled = false,
Description = "string",
Action = new Gcp.Ces.Inputs.GuardrailActionArgs
{
GenerativeAnswer = new Gcp.Ces.Inputs.GuardrailActionGenerativeAnswerArgs
{
Prompt = "string",
},
RespondImmediately = new Gcp.Ces.Inputs.GuardrailActionRespondImmediatelyArgs
{
Responses = new[]
{
new Gcp.Ces.Inputs.GuardrailActionRespondImmediatelyResponseArgs
{
Text = "string",
Disabled = false,
},
},
},
TransferAgent = new Gcp.Ces.Inputs.GuardrailActionTransferAgentArgs
{
Agent = "string",
},
},
ContentFilter = new Gcp.Ces.Inputs.GuardrailContentFilterArgs
{
MatchType = "string",
BannedContents = new[]
{
"string",
},
BannedContentsInAgentResponses = new[]
{
"string",
},
BannedContentsInUserInputs = new[]
{
"string",
},
DisregardDiacritics = false,
},
LlmPolicy = new Gcp.Ces.Inputs.GuardrailLlmPolicyArgs
{
PolicyScope = "string",
Prompt = "string",
AllowShortUtterance = false,
FailOpen = false,
MaxConversationMessages = 0,
ModelSettings = new Gcp.Ces.Inputs.GuardrailLlmPolicyModelSettingsArgs
{
Model = "string",
Temperature = 0,
},
},
LlmPromptSecurity = new Gcp.Ces.Inputs.GuardrailLlmPromptSecurityArgs
{
CustomPolicy = new Gcp.Ces.Inputs.GuardrailLlmPromptSecurityCustomPolicyArgs
{
PolicyScope = "string",
Prompt = "string",
AllowShortUtterance = false,
FailOpen = false,
MaxConversationMessages = 0,
ModelSettings = new Gcp.Ces.Inputs.GuardrailLlmPromptSecurityCustomPolicyModelSettingsArgs
{
Model = "string",
Temperature = 0,
},
},
DefaultSettings = new Gcp.Ces.Inputs.GuardrailLlmPromptSecurityDefaultSettingsArgs
{
DefaultPromptTemplate = "string",
},
},
CodeCallback = new Gcp.Ces.Inputs.GuardrailCodeCallbackArgs
{
AfterAgentCallback = new Gcp.Ces.Inputs.GuardrailCodeCallbackAfterAgentCallbackArgs
{
PythonCode = "string",
Description = "string",
Disabled = false,
},
AfterModelCallback = new Gcp.Ces.Inputs.GuardrailCodeCallbackAfterModelCallbackArgs
{
PythonCode = "string",
Description = "string",
Disabled = false,
},
BeforeAgentCallback = new Gcp.Ces.Inputs.GuardrailCodeCallbackBeforeAgentCallbackArgs
{
PythonCode = "string",
Description = "string",
Disabled = false,
},
BeforeModelCallback = new Gcp.Ces.Inputs.GuardrailCodeCallbackBeforeModelCallbackArgs
{
PythonCode = "string",
Description = "string",
Disabled = false,
},
},
ModelSafety = new Gcp.Ces.Inputs.GuardrailModelSafetyArgs
{
SafetySettings = new[]
{
new Gcp.Ces.Inputs.GuardrailModelSafetySafetySettingArgs
{
Category = "string",
Threshold = "string",
},
},
},
Project = "string",
});
example, err := ces.NewGuardrail(ctx, "guardrailResource", &ces.GuardrailArgs{
DisplayName: pulumi.String("string"),
App: pulumi.String("string"),
Location: pulumi.String("string"),
GuardrailId: pulumi.String("string"),
Enabled: pulumi.Bool(false),
Description: pulumi.String("string"),
Action: &ces.GuardrailActionArgs{
GenerativeAnswer: &ces.GuardrailActionGenerativeAnswerArgs{
Prompt: pulumi.String("string"),
},
RespondImmediately: &ces.GuardrailActionRespondImmediatelyArgs{
Responses: ces.GuardrailActionRespondImmediatelyResponseArray{
&ces.GuardrailActionRespondImmediatelyResponseArgs{
Text: pulumi.String("string"),
Disabled: pulumi.Bool(false),
},
},
},
TransferAgent: &ces.GuardrailActionTransferAgentArgs{
Agent: pulumi.String("string"),
},
},
ContentFilter: &ces.GuardrailContentFilterArgs{
MatchType: pulumi.String("string"),
BannedContents: pulumi.StringArray{
pulumi.String("string"),
},
BannedContentsInAgentResponses: pulumi.StringArray{
pulumi.String("string"),
},
BannedContentsInUserInputs: pulumi.StringArray{
pulumi.String("string"),
},
DisregardDiacritics: pulumi.Bool(false),
},
LlmPolicy: &ces.GuardrailLlmPolicyArgs{
PolicyScope: pulumi.String("string"),
Prompt: pulumi.String("string"),
AllowShortUtterance: pulumi.Bool(false),
FailOpen: pulumi.Bool(false),
MaxConversationMessages: pulumi.Int(0),
ModelSettings: &ces.GuardrailLlmPolicyModelSettingsArgs{
Model: pulumi.String("string"),
Temperature: pulumi.Float64(0),
},
},
LlmPromptSecurity: &ces.GuardrailLlmPromptSecurityArgs{
CustomPolicy: &ces.GuardrailLlmPromptSecurityCustomPolicyArgs{
PolicyScope: pulumi.String("string"),
Prompt: pulumi.String("string"),
AllowShortUtterance: pulumi.Bool(false),
FailOpen: pulumi.Bool(false),
MaxConversationMessages: pulumi.Int(0),
ModelSettings: &ces.GuardrailLlmPromptSecurityCustomPolicyModelSettingsArgs{
Model: pulumi.String("string"),
Temperature: pulumi.Float64(0),
},
},
DefaultSettings: &ces.GuardrailLlmPromptSecurityDefaultSettingsArgs{
DefaultPromptTemplate: pulumi.String("string"),
},
},
CodeCallback: &ces.GuardrailCodeCallbackArgs{
AfterAgentCallback: &ces.GuardrailCodeCallbackAfterAgentCallbackArgs{
PythonCode: pulumi.String("string"),
Description: pulumi.String("string"),
Disabled: pulumi.Bool(false),
},
AfterModelCallback: &ces.GuardrailCodeCallbackAfterModelCallbackArgs{
PythonCode: pulumi.String("string"),
Description: pulumi.String("string"),
Disabled: pulumi.Bool(false),
},
BeforeAgentCallback: &ces.GuardrailCodeCallbackBeforeAgentCallbackArgs{
PythonCode: pulumi.String("string"),
Description: pulumi.String("string"),
Disabled: pulumi.Bool(false),
},
BeforeModelCallback: &ces.GuardrailCodeCallbackBeforeModelCallbackArgs{
PythonCode: pulumi.String("string"),
Description: pulumi.String("string"),
Disabled: pulumi.Bool(false),
},
},
ModelSafety: &ces.GuardrailModelSafetyArgs{
SafetySettings: ces.GuardrailModelSafetySafetySettingArray{
&ces.GuardrailModelSafetySafetySettingArgs{
Category: pulumi.String("string"),
Threshold: pulumi.String("string"),
},
},
},
Project: pulumi.String("string"),
})
var guardrailResource = new Guardrail("guardrailResource", GuardrailArgs.builder()
.displayName("string")
.app("string")
.location("string")
.guardrailId("string")
.enabled(false)
.description("string")
.action(GuardrailActionArgs.builder()
.generativeAnswer(GuardrailActionGenerativeAnswerArgs.builder()
.prompt("string")
.build())
.respondImmediately(GuardrailActionRespondImmediatelyArgs.builder()
.responses(GuardrailActionRespondImmediatelyResponseArgs.builder()
.text("string")
.disabled(false)
.build())
.build())
.transferAgent(GuardrailActionTransferAgentArgs.builder()
.agent("string")
.build())
.build())
.contentFilter(GuardrailContentFilterArgs.builder()
.matchType("string")
.bannedContents("string")
.bannedContentsInAgentResponses("string")
.bannedContentsInUserInputs("string")
.disregardDiacritics(false)
.build())
.llmPolicy(GuardrailLlmPolicyArgs.builder()
.policyScope("string")
.prompt("string")
.allowShortUtterance(false)
.failOpen(false)
.maxConversationMessages(0)
.modelSettings(GuardrailLlmPolicyModelSettingsArgs.builder()
.model("string")
.temperature(0.0)
.build())
.build())
.llmPromptSecurity(GuardrailLlmPromptSecurityArgs.builder()
.customPolicy(GuardrailLlmPromptSecurityCustomPolicyArgs.builder()
.policyScope("string")
.prompt("string")
.allowShortUtterance(false)
.failOpen(false)
.maxConversationMessages(0)
.modelSettings(GuardrailLlmPromptSecurityCustomPolicyModelSettingsArgs.builder()
.model("string")
.temperature(0.0)
.build())
.build())
.defaultSettings(GuardrailLlmPromptSecurityDefaultSettingsArgs.builder()
.defaultPromptTemplate("string")
.build())
.build())
.codeCallback(GuardrailCodeCallbackArgs.builder()
.afterAgentCallback(GuardrailCodeCallbackAfterAgentCallbackArgs.builder()
.pythonCode("string")
.description("string")
.disabled(false)
.build())
.afterModelCallback(GuardrailCodeCallbackAfterModelCallbackArgs.builder()
.pythonCode("string")
.description("string")
.disabled(false)
.build())
.beforeAgentCallback(GuardrailCodeCallbackBeforeAgentCallbackArgs.builder()
.pythonCode("string")
.description("string")
.disabled(false)
.build())
.beforeModelCallback(GuardrailCodeCallbackBeforeModelCallbackArgs.builder()
.pythonCode("string")
.description("string")
.disabled(false)
.build())
.build())
.modelSafety(GuardrailModelSafetyArgs.builder()
.safetySettings(GuardrailModelSafetySafetySettingArgs.builder()
.category("string")
.threshold("string")
.build())
.build())
.project("string")
.build());
guardrail_resource = gcp.ces.Guardrail("guardrailResource",
display_name="string",
app="string",
location="string",
guardrail_id="string",
enabled=False,
description="string",
action={
"generative_answer": {
"prompt": "string",
},
"respond_immediately": {
"responses": [{
"text": "string",
"disabled": False,
}],
},
"transfer_agent": {
"agent": "string",
},
},
content_filter={
"match_type": "string",
"banned_contents": ["string"],
"banned_contents_in_agent_responses": ["string"],
"banned_contents_in_user_inputs": ["string"],
"disregard_diacritics": False,
},
llm_policy={
"policy_scope": "string",
"prompt": "string",
"allow_short_utterance": False,
"fail_open": False,
"max_conversation_messages": 0,
"model_settings": {
"model": "string",
"temperature": 0,
},
},
llm_prompt_security={
"custom_policy": {
"policy_scope": "string",
"prompt": "string",
"allow_short_utterance": False,
"fail_open": False,
"max_conversation_messages": 0,
"model_settings": {
"model": "string",
"temperature": 0,
},
},
"default_settings": {
"default_prompt_template": "string",
},
},
code_callback={
"after_agent_callback": {
"python_code": "string",
"description": "string",
"disabled": False,
},
"after_model_callback": {
"python_code": "string",
"description": "string",
"disabled": False,
},
"before_agent_callback": {
"python_code": "string",
"description": "string",
"disabled": False,
},
"before_model_callback": {
"python_code": "string",
"description": "string",
"disabled": False,
},
},
model_safety={
"safety_settings": [{
"category": "string",
"threshold": "string",
}],
},
project="string")
const guardrailResource = new gcp.ces.Guardrail("guardrailResource", {
displayName: "string",
app: "string",
location: "string",
guardrailId: "string",
enabled: false,
description: "string",
action: {
generativeAnswer: {
prompt: "string",
},
respondImmediately: {
responses: [{
text: "string",
disabled: false,
}],
},
transferAgent: {
agent: "string",
},
},
contentFilter: {
matchType: "string",
bannedContents: ["string"],
bannedContentsInAgentResponses: ["string"],
bannedContentsInUserInputs: ["string"],
disregardDiacritics: false,
},
llmPolicy: {
policyScope: "string",
prompt: "string",
allowShortUtterance: false,
failOpen: false,
maxConversationMessages: 0,
modelSettings: {
model: "string",
temperature: 0,
},
},
llmPromptSecurity: {
customPolicy: {
policyScope: "string",
prompt: "string",
allowShortUtterance: false,
failOpen: false,
maxConversationMessages: 0,
modelSettings: {
model: "string",
temperature: 0,
},
},
defaultSettings: {
defaultPromptTemplate: "string",
},
},
codeCallback: {
afterAgentCallback: {
pythonCode: "string",
description: "string",
disabled: false,
},
afterModelCallback: {
pythonCode: "string",
description: "string",
disabled: false,
},
beforeAgentCallback: {
pythonCode: "string",
description: "string",
disabled: false,
},
beforeModelCallback: {
pythonCode: "string",
description: "string",
disabled: false,
},
},
modelSafety: {
safetySettings: [{
category: "string",
threshold: "string",
}],
},
project: "string",
});
type: gcp:ces:Guardrail
properties:
action:
generativeAnswer:
prompt: string
respondImmediately:
responses:
- disabled: false
text: string
transferAgent:
agent: string
app: string
codeCallback:
afterAgentCallback:
description: string
disabled: false
pythonCode: string
afterModelCallback:
description: string
disabled: false
pythonCode: string
beforeAgentCallback:
description: string
disabled: false
pythonCode: string
beforeModelCallback:
description: string
disabled: false
pythonCode: string
contentFilter:
bannedContents:
- string
bannedContentsInAgentResponses:
- string
bannedContentsInUserInputs:
- string
disregardDiacritics: false
matchType: string
description: string
displayName: string
enabled: false
guardrailId: string
llmPolicy:
allowShortUtterance: false
failOpen: false
maxConversationMessages: 0
modelSettings:
model: string
temperature: 0
policyScope: string
prompt: string
llmPromptSecurity:
customPolicy:
allowShortUtterance: false
failOpen: false
maxConversationMessages: 0
modelSettings:
model: string
temperature: 0
policyScope: string
prompt: string
defaultSettings:
defaultPromptTemplate: string
location: string
modelSafety:
safetySettings:
- category: string
threshold: string
project: string
Guardrail Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The Guardrail resource accepts the following input properties:
- App string
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - Display
Name string - Display name of the guardrail.
- Guardrail
Id string - The ID to use for the guardrail, which will become the final component of the guardrail's resource name. If not provided, a unique ID will be automatically assigned for the guardrail.
- Location string
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - Action
Guardrail
Action - Action that is taken when a certain precondition is met. Structure is documented below.
- Code
Callback GuardrailCode Callback - Guardrail that blocks the conversation based on the code callbacks provided. Structure is documented below.
- Content
Filter GuardrailContent Filter - Guardrail that bans certain content from being used in the conversation. Structure is documented below.
- Description string
- Description of the guardrail.
- Enabled bool
- Whether the guardrail is enabled.
- Llm
Policy GuardrailLlm Policy - Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
- Llm
Prompt GuardrailSecurity Llm Prompt Security - Guardrail that blocks the conversation if the input is considered unsafe based on the LLM classification. Structure is documented below.
- Model
Safety GuardrailModel Safety - Model safety settings overrides. When this is set, it will override the default settings and trigger the guardrail if the response is considered unsafe. Structure is documented below.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- App string
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - Display
Name string - Display name of the guardrail.
- Guardrail
Id string - The ID to use for the guardrail, which will become the final component of the guardrail's resource name. If not provided, a unique ID will be automatically assigned for the guardrail.
- Location string
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - Action
Guardrail
Action Args - Action that is taken when a certain precondition is met. Structure is documented below.
- Code
Callback GuardrailCode Callback Args - Guardrail that blocks the conversation based on the code callbacks provided. Structure is documented below.
- Content
Filter GuardrailContent Filter Args - Guardrail that bans certain content from being used in the conversation. Structure is documented below.
- Description string
- Description of the guardrail.
- Enabled bool
- Whether the guardrail is enabled.
- Llm
Policy GuardrailLlm Policy Args - Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
- Llm
Prompt GuardrailSecurity Llm Prompt Security Args - Guardrail that blocks the conversation if the input is considered unsafe based on the LLM classification. Structure is documented below.
- Model
Safety GuardrailModel Safety Args - Model safety settings overrides. When this is set, it will override the default settings and trigger the guardrail if the response is considered unsafe. Structure is documented below.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- app String
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - display
Name String - Display name of the guardrail.
- guardrail
Id String - The ID to use for the guardrail, which will become the final component of the guardrail's resource name. If not provided, a unique ID will be automatically assigned for the guardrail.
- location String
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - action
Guardrail
Action - Action that is taken when a certain precondition is met. Structure is documented below.
- code
Callback GuardrailCode Callback - Guardrail that blocks the conversation based on the code callbacks provided. Structure is documented below.
- content
Filter GuardrailContent Filter - Guardrail that bans certain content from being used in the conversation. Structure is documented below.
- description String
- Description of the guardrail.
- enabled Boolean
- Whether the guardrail is enabled.
- llm
Policy GuardrailLlm Policy - Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
- llm
Prompt GuardrailSecurity Llm Prompt Security - Guardrail that blocks the conversation if the input is considered unsafe based on the LLM classification. Structure is documented below.
- model
Safety GuardrailModel Safety - Model safety settings overrides. When this is set, it will override the default settings and trigger the guardrail if the response is considered unsafe. Structure is documented below.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- app string
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - display
Name string - Display name of the guardrail.
- guardrail
Id string - The ID to use for the guardrail, which will become the final component of the guardrail's resource name. If not provided, a unique ID will be automatically assigned for the guardrail.
- location string
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - action
Guardrail
Action - Action that is taken when a certain precondition is met. Structure is documented below.
- code
Callback GuardrailCode Callback - Guardrail that blocks the conversation based on the code callbacks provided. Structure is documented below.
- content
Filter GuardrailContent Filter - Guardrail that bans certain content from being used in the conversation. Structure is documented below.
- description string
- Description of the guardrail.
- enabled boolean
- Whether the guardrail is enabled.
- llm
Policy GuardrailLlm Policy - Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
- llm
Prompt GuardrailSecurity Llm Prompt Security - Guardrail that blocks the conversation if the input is considered unsafe based on the LLM classification. Structure is documented below.
- model
Safety GuardrailModel Safety - Model safety settings overrides. When this is set, it will override the default settings and trigger the guardrail if the response is considered unsafe. Structure is documented below.
- project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- app str
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - display_
name str - Display name of the guardrail.
- guardrail_
id str - The ID to use for the guardrail, which will become the final component of the guardrail's resource name. If not provided, a unique ID will be automatically assigned for the guardrail.
- location str
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - action
Guardrail
Action Args - Action that is taken when a certain precondition is met. Structure is documented below.
- code_
callback GuardrailCode Callback Args - Guardrail that blocks the conversation based on the code callbacks provided. Structure is documented below.
- content_
filter GuardrailContent Filter Args - Guardrail that bans certain content from being used in the conversation. Structure is documented below.
- description str
- Description of the guardrail.
- enabled bool
- Whether the guardrail is enabled.
- llm_
policy GuardrailLlm Policy Args - Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
- llm_
prompt_ Guardrailsecurity Llm Prompt Security Args - Guardrail that blocks the conversation if the input is considered unsafe based on the LLM classification. Structure is documented below.
- model_
safety GuardrailModel Safety Args - Model safety settings overrides. When this is set, it will override the default settings and trigger the guardrail if the response is considered unsafe. Structure is documented below.
- project str
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- app String
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - display
Name String - Display name of the guardrail.
- guardrail
Id String - The ID to use for the guardrail, which will become the final component of the guardrail's resource name. If not provided, a unique ID will be automatically assigned for the guardrail.
- location String
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - action Property Map
- Action that is taken when a certain precondition is met. Structure is documented below.
- code
Callback Property Map - Guardrail that blocks the conversation based on the code callbacks provided. Structure is documented below.
- content
Filter Property Map - Guardrail that bans certain content from being used in the conversation. Structure is documented below.
- description String
- Description of the guardrail.
- enabled Boolean
- Whether the guardrail is enabled.
- llm
Policy Property Map - Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
- llm
Prompt Property MapSecurity - Guardrail that blocks the conversation if the input is considered unsafe based on the LLM classification. Structure is documented below.
- model
Safety Property Map - Model safety settings overrides. When this is set, it will override the default settings and trigger the guardrail if the response is considered unsafe. Structure is documented below.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
Outputs
All input properties are implicitly available as output properties. Additionally, the Guardrail resource produces the following output properties:
- Create
Time string - Timestamp when the guardrail was created.
- Etag string
- Etag used to ensure the object hasn't changed during a read-modify-write operation. If the etag is empty, the update will overwrite any concurrent changes.
- Id string
- The provider-assigned unique ID for this managed resource.
- Name string
- Identifier. The unique identifier of the guardrail.
Format:
projects/{project}/locations/{location}/apps/{app}/guardrails/{guardrail} - Update
Time string - Timestamp when the guardrail was last updated.
- Create
Time string - Timestamp when the guardrail was created.
- Etag string
- Etag used to ensure the object hasn't changed during a read-modify-write operation. If the etag is empty, the update will overwrite any concurrent changes.
- Id string
- The provider-assigned unique ID for this managed resource.
- Name string
- Identifier. The unique identifier of the guardrail.
Format:
projects/{project}/locations/{location}/apps/{app}/guardrails/{guardrail} - Update
Time string - Timestamp when the guardrail was last updated.
- create
Time String - Timestamp when the guardrail was created.
- etag String
- Etag used to ensure the object hasn't changed during a read-modify-write operation. If the etag is empty, the update will overwrite any concurrent changes.
- id String
- The provider-assigned unique ID for this managed resource.
- name String
- Identifier. The unique identifier of the guardrail.
Format:
projects/{project}/locations/{location}/apps/{app}/guardrails/{guardrail} - update
Time String - Timestamp when the guardrail was last updated.
- create
Time string - Timestamp when the guardrail was created.
- etag string
- Etag used to ensure the object hasn't changed during a read-modify-write operation. If the etag is empty, the update will overwrite any concurrent changes.
- id string
- The provider-assigned unique ID for this managed resource.
- name string
- Identifier. The unique identifier of the guardrail.
Format:
projects/{project}/locations/{location}/apps/{app}/guardrails/{guardrail} - update
Time string - Timestamp when the guardrail was last updated.
- create_
time str - Timestamp when the guardrail was created.
- etag str
- Etag used to ensure the object hasn't changed during a read-modify-write operation. If the etag is empty, the update will overwrite any concurrent changes.
- id str
- The provider-assigned unique ID for this managed resource.
- name str
- Identifier. The unique identifier of the guardrail.
Format:
projects/{project}/locations/{location}/apps/{app}/guardrails/{guardrail} - update_
time str - Timestamp when the guardrail was last updated.
- create
Time String - Timestamp when the guardrail was created.
- etag String
- Etag used to ensure the object hasn't changed during a read-modify-write operation. If the etag is empty, the update will overwrite any concurrent changes.
- id String
- The provider-assigned unique ID for this managed resource.
- name String
- Identifier. The unique identifier of the guardrail.
Format:
projects/{project}/locations/{location}/apps/{app}/guardrails/{guardrail} - update
Time String - Timestamp when the guardrail was last updated.
Look up Existing Guardrail Resource
Get an existing Guardrail resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: GuardrailState, opts?: CustomResourceOptions): Guardrail@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
action: Optional[GuardrailActionArgs] = None,
app: Optional[str] = None,
code_callback: Optional[GuardrailCodeCallbackArgs] = None,
content_filter: Optional[GuardrailContentFilterArgs] = None,
create_time: Optional[str] = None,
description: Optional[str] = None,
display_name: Optional[str] = None,
enabled: Optional[bool] = None,
etag: Optional[str] = None,
guardrail_id: Optional[str] = None,
llm_policy: Optional[GuardrailLlmPolicyArgs] = None,
llm_prompt_security: Optional[GuardrailLlmPromptSecurityArgs] = None,
location: Optional[str] = None,
model_safety: Optional[GuardrailModelSafetyArgs] = None,
name: Optional[str] = None,
project: Optional[str] = None,
update_time: Optional[str] = None) -> Guardrailfunc GetGuardrail(ctx *Context, name string, id IDInput, state *GuardrailState, opts ...ResourceOption) (*Guardrail, error)public static Guardrail Get(string name, Input<string> id, GuardrailState? state, CustomResourceOptions? opts = null)public static Guardrail get(String name, Output<String> id, GuardrailState state, CustomResourceOptions options)resources: _: type: gcp:ces:Guardrail get: id: ${id}- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Action
Guardrail
Action - Action that is taken when a certain precondition is met. Structure is documented below.
- App string
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - Code
Callback GuardrailCode Callback - Guardrail that blocks the conversation based on the code callbacks provided. Structure is documented below.
- Content
Filter GuardrailContent Filter - Guardrail that bans certain content from being used in the conversation. Structure is documented below.
- Create
Time string - Timestamp when the guardrail was created.
- Description string
- Description of the guardrail.
- Display
Name string - Display name of the guardrail.
- Enabled bool
- Whether the guardrail is enabled.
- Etag string
- Etag used to ensure the object hasn't changed during a read-modify-write operation. If the etag is empty, the update will overwrite any concurrent changes.
- Guardrail
Id string - The ID to use for the guardrail, which will become the final component of the guardrail's resource name. If not provided, a unique ID will be automatically assigned for the guardrail.
- Llm
Policy GuardrailLlm Policy - Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
- Llm
Prompt GuardrailSecurity Llm Prompt Security - Guardrail that blocks the conversation if the input is considered unsafe based on the LLM classification. Structure is documented below.
- Location string
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - Model
Safety GuardrailModel Safety - Model safety settings overrides. When this is set, it will override the default settings and trigger the guardrail if the response is considered unsafe. Structure is documented below.
- Name string
- Identifier. The unique identifier of the guardrail.
Format:
projects/{project}/locations/{location}/apps/{app}/guardrails/{guardrail} - Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Update
Time string - Timestamp when the guardrail was last updated.
- Action
Guardrail
Action Args - Action that is taken when a certain precondition is met. Structure is documented below.
- App string
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - Code
Callback GuardrailCode Callback Args - Guardrail that blocks the conversation based on the code callbacks provided. Structure is documented below.
- Content
Filter GuardrailContent Filter Args - Guardrail that bans certain content from being used in the conversation. Structure is documented below.
- Create
Time string - Timestamp when the guardrail was created.
- Description string
- Description of the guardrail.
- Display
Name string - Display name of the guardrail.
- Enabled bool
- Whether the guardrail is enabled.
- Etag string
- Etag used to ensure the object hasn't changed during a read-modify-write operation. If the etag is empty, the update will overwrite any concurrent changes.
- Guardrail
Id string - The ID to use for the guardrail, which will become the final component of the guardrail's resource name. If not provided, a unique ID will be automatically assigned for the guardrail.
- Llm
Policy GuardrailLlm Policy Args - Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
- Llm
Prompt GuardrailSecurity Llm Prompt Security Args - Guardrail that blocks the conversation if the input is considered unsafe based on the LLM classification. Structure is documented below.
- Location string
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - Model
Safety GuardrailModel Safety Args - Model safety settings overrides. When this is set, it will override the default settings and trigger the guardrail if the response is considered unsafe. Structure is documented below.
- Name string
- Identifier. The unique identifier of the guardrail.
Format:
projects/{project}/locations/{location}/apps/{app}/guardrails/{guardrail} - Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Update
Time string - Timestamp when the guardrail was last updated.
- action
Guardrail
Action - Action that is taken when a certain precondition is met. Structure is documented below.
- app String
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - code
Callback GuardrailCode Callback - Guardrail that blocks the conversation based on the code callbacks provided. Structure is documented below.
- content
Filter GuardrailContent Filter - Guardrail that bans certain content from being used in the conversation. Structure is documented below.
- create
Time String - Timestamp when the guardrail was created.
- description String
- Description of the guardrail.
- display
Name String - Display name of the guardrail.
- enabled Boolean
- Whether the guardrail is enabled.
- etag String
- Etag used to ensure the object hasn't changed during a read-modify-write operation. If the etag is empty, the update will overwrite any concurrent changes.
- guardrail
Id String - The ID to use for the guardrail, which will become the final component of the guardrail's resource name. If not provided, a unique ID will be automatically assigned for the guardrail.
- llm
Policy GuardrailLlm Policy - Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
- llm
Prompt GuardrailSecurity Llm Prompt Security - Guardrail that blocks the conversation if the input is considered unsafe based on the LLM classification. Structure is documented below.
- location String
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - model
Safety GuardrailModel Safety - Model safety settings overrides. When this is set, it will override the default settings and trigger the guardrail if the response is considered unsafe. Structure is documented below.
- name String
- Identifier. The unique identifier of the guardrail.
Format:
projects/{project}/locations/{location}/apps/{app}/guardrails/{guardrail} - project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- update
Time String - Timestamp when the guardrail was last updated.
- action
Guardrail
Action - Action that is taken when a certain precondition is met. Structure is documented below.
- app string
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - code
Callback GuardrailCode Callback - Guardrail that blocks the conversation based on the code callbacks provided. Structure is documented below.
- content
Filter GuardrailContent Filter - Guardrail that bans certain content from being used in the conversation. Structure is documented below.
- create
Time string - Timestamp when the guardrail was created.
- description string
- Description of the guardrail.
- display
Name string - Display name of the guardrail.
- enabled boolean
- Whether the guardrail is enabled.
- etag string
- Etag used to ensure the object hasn't changed during a read-modify-write operation. If the etag is empty, the update will overwrite any concurrent changes.
- guardrail
Id string - The ID to use for the guardrail, which will become the final component of the guardrail's resource name. If not provided, a unique ID will be automatically assigned for the guardrail.
- llm
Policy GuardrailLlm Policy - Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
- llm
Prompt GuardrailSecurity Llm Prompt Security - Guardrail that blocks the conversation if the input is considered unsafe based on the LLM classification. Structure is documented below.
- location string
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - model
Safety GuardrailModel Safety - Model safety settings overrides. When this is set, it will override the default settings and trigger the guardrail if the response is considered unsafe. Structure is documented below.
- name string
- Identifier. The unique identifier of the guardrail.
Format:
projects/{project}/locations/{location}/apps/{app}/guardrails/{guardrail} - project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- update
Time string - Timestamp when the guardrail was last updated.
- action
Guardrail
Action Args - Action that is taken when a certain precondition is met. Structure is documented below.
- app str
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - code_
callback GuardrailCode Callback Args - Guardrail that blocks the conversation based on the code callbacks provided. Structure is documented below.
- content_
filter GuardrailContent Filter Args - Guardrail that bans certain content from being used in the conversation. Structure is documented below.
- create_
time str - Timestamp when the guardrail was created.
- description str
- Description of the guardrail.
- display_
name str - Display name of the guardrail.
- enabled bool
- Whether the guardrail is enabled.
- etag str
- Etag used to ensure the object hasn't changed during a read-modify-write operation. If the etag is empty, the update will overwrite any concurrent changes.
- guardrail_
id str - The ID to use for the guardrail, which will become the final component of the guardrail's resource name. If not provided, a unique ID will be automatically assigned for the guardrail.
- llm_
policy GuardrailLlm Policy Args - Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
- llm_
prompt_ Guardrailsecurity Llm Prompt Security Args - Guardrail that blocks the conversation if the input is considered unsafe based on the LLM classification. Structure is documented below.
- location str
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - model_
safety GuardrailModel Safety Args - Model safety settings overrides. When this is set, it will override the default settings and trigger the guardrail if the response is considered unsafe. Structure is documented below.
- name str
- Identifier. The unique identifier of the guardrail.
Format:
projects/{project}/locations/{location}/apps/{app}/guardrails/{guardrail} - project str
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- update_
time str - Timestamp when the guardrail was last updated.
- action Property Map
- Action that is taken when a certain precondition is met. Structure is documented below.
- app String
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - code
Callback Property Map - Guardrail that blocks the conversation based on the code callbacks provided. Structure is documented below.
- content
Filter Property Map - Guardrail that bans certain content from being used in the conversation. Structure is documented below.
- create
Time String - Timestamp when the guardrail was created.
- description String
- Description of the guardrail.
- display
Name String - Display name of the guardrail.
- enabled Boolean
- Whether the guardrail is enabled.
- etag String
- Etag used to ensure the object hasn't changed during a read-modify-write operation. If the etag is empty, the update will overwrite any concurrent changes.
- guardrail
Id String - The ID to use for the guardrail, which will become the final component of the guardrail's resource name. If not provided, a unique ID will be automatically assigned for the guardrail.
- llm
Policy Property Map - Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
- llm
Prompt Property MapSecurity - Guardrail that blocks the conversation if the input is considered unsafe based on the LLM classification. Structure is documented below.
- location String
- Resource ID segment making up resource
name. It identifies the resource within its parent collection as described in https://google.aip.dev/122. - model
Safety Property Map - Model safety settings overrides. When this is set, it will override the default settings and trigger the guardrail if the response is considered unsafe. Structure is documented below.
- name String
- Identifier. The unique identifier of the guardrail.
Format:
projects/{project}/locations/{location}/apps/{app}/guardrails/{guardrail} - project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- update
Time String - Timestamp when the guardrail was last updated.
Supporting Types
GuardrailAction, GuardrailActionArgs
- Generative
Answer GuardrailAction Generative Answer - The agent will immediately respond with a generative answer. Structure is documented below.
- Respond
Immediately GuardrailAction Respond Immediately - The agent will immediately respond with a preconfigured response. Structure is documented below.
- Transfer
Agent GuardrailAction Transfer Agent - The agent will transfer the conversation to a different agent. Structure is documented below.
- Generative
Answer GuardrailAction Generative Answer - The agent will immediately respond with a generative answer. Structure is documented below.
- Respond
Immediately GuardrailAction Respond Immediately - The agent will immediately respond with a preconfigured response. Structure is documented below.
- Transfer
Agent GuardrailAction Transfer Agent - The agent will transfer the conversation to a different agent. Structure is documented below.
- generative
Answer GuardrailAction Generative Answer - The agent will immediately respond with a generative answer. Structure is documented below.
- respond
Immediately GuardrailAction Respond Immediately - The agent will immediately respond with a preconfigured response. Structure is documented below.
- transfer
Agent GuardrailAction Transfer Agent - The agent will transfer the conversation to a different agent. Structure is documented below.
- generative
Answer GuardrailAction Generative Answer - The agent will immediately respond with a generative answer. Structure is documented below.
- respond
Immediately GuardrailAction Respond Immediately - The agent will immediately respond with a preconfigured response. Structure is documented below.
- transfer
Agent GuardrailAction Transfer Agent - The agent will transfer the conversation to a different agent. Structure is documented below.
- generative_
answer GuardrailAction Generative Answer - The agent will immediately respond with a generative answer. Structure is documented below.
- respond_
immediately GuardrailAction Respond Immediately - The agent will immediately respond with a preconfigured response. Structure is documented below.
- transfer_
agent GuardrailAction Transfer Agent - The agent will transfer the conversation to a different agent. Structure is documented below.
- generative
Answer Property Map - The agent will immediately respond with a generative answer. Structure is documented below.
- respond
Immediately Property Map - The agent will immediately respond with a preconfigured response. Structure is documented below.
- transfer
Agent Property Map - The agent will transfer the conversation to a different agent. Structure is documented below.
GuardrailActionGenerativeAnswer, GuardrailActionGenerativeAnswerArgs
- Prompt string
- The prompt to use for the generative answer.
- Prompt string
- The prompt to use for the generative answer.
- prompt String
- The prompt to use for the generative answer.
- prompt string
- The prompt to use for the generative answer.
- prompt str
- The prompt to use for the generative answer.
- prompt String
- The prompt to use for the generative answer.
GuardrailActionRespondImmediately, GuardrailActionRespondImmediatelyArgs
- Responses
List<Guardrail
Action Respond Immediately Response> - The canned responses for the agent to choose from. The response is chosen randomly. Structure is documented below.
- Responses
[]Guardrail
Action Respond Immediately Response - The canned responses for the agent to choose from. The response is chosen randomly. Structure is documented below.
- responses
List<Guardrail
Action Respond Immediately Response> - The canned responses for the agent to choose from. The response is chosen randomly. Structure is documented below.
- responses
Guardrail
Action Respond Immediately Response[] - The canned responses for the agent to choose from. The response is chosen randomly. Structure is documented below.
- responses
Sequence[Guardrail
Action Respond Immediately Response] - The canned responses for the agent to choose from. The response is chosen randomly. Structure is documented below.
- responses List<Property Map>
- The canned responses for the agent to choose from. The response is chosen randomly. Structure is documented below.
GuardrailActionRespondImmediatelyResponse, GuardrailActionRespondImmediatelyResponseArgs
GuardrailActionTransferAgent, GuardrailActionTransferAgentArgs
- Agent string
- The name of the agent to transfer the conversation to. The agent must be
in the same app as the current agent.
Format:
projects/{project}/locations/{location}/apps/{app}/agents/{agent}
- Agent string
- The name of the agent to transfer the conversation to. The agent must be
in the same app as the current agent.
Format:
projects/{project}/locations/{location}/apps/{app}/agents/{agent}
- agent String
- The name of the agent to transfer the conversation to. The agent must be
in the same app as the current agent.
Format:
projects/{project}/locations/{location}/apps/{app}/agents/{agent}
- agent string
- The name of the agent to transfer the conversation to. The agent must be
in the same app as the current agent.
Format:
projects/{project}/locations/{location}/apps/{app}/agents/{agent}
- agent str
- The name of the agent to transfer the conversation to. The agent must be
in the same app as the current agent.
Format:
projects/{project}/locations/{location}/apps/{app}/agents/{agent}
- agent String
- The name of the agent to transfer the conversation to. The agent must be
in the same app as the current agent.
Format:
projects/{project}/locations/{location}/apps/{app}/agents/{agent}
GuardrailCodeCallback, GuardrailCodeCallbackArgs
- After
Agent GuardrailCallback Code Callback After Agent Callback - A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
- After
Model GuardrailCallback Code Callback After Model Callback - A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
- Before
Agent GuardrailCallback Code Callback Before Agent Callback - A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
- Before
Model GuardrailCallback Code Callback Before Model Callback - A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
- After
Agent GuardrailCallback Code Callback After Agent Callback - A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
- After
Model GuardrailCallback Code Callback After Model Callback - A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
- Before
Agent GuardrailCallback Code Callback Before Agent Callback - A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
- Before
Model GuardrailCallback Code Callback Before Model Callback - A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
- after
Agent GuardrailCallback Code Callback After Agent Callback - A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
- after
Model GuardrailCallback Code Callback After Model Callback - A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
- before
Agent GuardrailCallback Code Callback Before Agent Callback - A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
- before
Model GuardrailCallback Code Callback Before Model Callback - A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
- after
Agent GuardrailCallback Code Callback After Agent Callback - A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
- after
Model GuardrailCallback Code Callback After Model Callback - A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
- before
Agent GuardrailCallback Code Callback Before Agent Callback - A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
- before
Model GuardrailCallback Code Callback Before Model Callback - A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
- after_
agent_ Guardrailcallback Code Callback After Agent Callback - A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
- after_
model_ Guardrailcallback Code Callback After Model Callback - A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
- before_
agent_ Guardrailcallback Code Callback Before Agent Callback - A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
- before_
model_ Guardrailcallback Code Callback Before Model Callback - A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
- after
Agent Property MapCallback - A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
- after
Model Property MapCallback - A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
- before
Agent Property MapCallback - A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
- before
Model Property MapCallback - A callback defines the custom logic to be executed at various stages of agent interaction. Structure is documented below.
GuardrailCodeCallbackAfterAgentCallback, GuardrailCodeCallbackAfterAgentCallbackArgs
- Python
Code string - The python code to execute for the callback.
- Description string
- Human-readable description of the callback.
- Disabled bool
- Whether the callback is disabled. Disabled callbacks are ignored by the agent.
- Python
Code string - The python code to execute for the callback.
- Description string
- Human-readable description of the callback.
- Disabled bool
- Whether the callback is disabled. Disabled callbacks are ignored by the agent.
- python
Code String - The python code to execute for the callback.
- description String
- Human-readable description of the callback.
- disabled Boolean
- Whether the callback is disabled. Disabled callbacks are ignored by the agent.
- python
Code string - The python code to execute for the callback.
- description string
- Human-readable description of the callback.
- disabled boolean
- Whether the callback is disabled. Disabled callbacks are ignored by the agent.
- python_
code str - The python code to execute for the callback.
- description str
- Human-readable description of the callback.
- disabled bool
- Whether the callback is disabled. Disabled callbacks are ignored by the agent.
- python
Code String - The python code to execute for the callback.
- description String
- Human-readable description of the callback.
- disabled Boolean
- Whether the callback is disabled. Disabled callbacks are ignored by the agent.
GuardrailCodeCallbackAfterModelCallback, GuardrailCodeCallbackAfterModelCallbackArgs
- Python
Code string - The python code to execute for the callback.
- Description string
- Human-readable description of the callback.
- Disabled bool
- Whether the callback is disabled. Disabled callbacks are ignored by the agent.
- Python
Code string - The python code to execute for the callback.
- Description string
- Human-readable description of the callback.
- Disabled bool
- Whether the callback is disabled. Disabled callbacks are ignored by the agent.
- python
Code String - The python code to execute for the callback.
- description String
- Human-readable description of the callback.
- disabled Boolean
- Whether the callback is disabled. Disabled callbacks are ignored by the agent.
- python
Code string - The python code to execute for the callback.
- description string
- Human-readable description of the callback.
- disabled boolean
- Whether the callback is disabled. Disabled callbacks are ignored by the agent.
- python_
code str - The python code to execute for the callback.
- description str
- Human-readable description of the callback.
- disabled bool
- Whether the callback is disabled. Disabled callbacks are ignored by the agent.
- python
Code String - The python code to execute for the callback.
- description String
- Human-readable description of the callback.
- disabled Boolean
- Whether the callback is disabled. Disabled callbacks are ignored by the agent.
GuardrailCodeCallbackBeforeAgentCallback, GuardrailCodeCallbackBeforeAgentCallbackArgs
- Python
Code string - The python code to execute for the callback.
- Description string
- Human-readable description of the callback.
- Disabled bool
- Whether the callback is disabled. Disabled callbacks are ignored by the agent.
- Python
Code string - The python code to execute for the callback.
- Description string
- Human-readable description of the callback.
- Disabled bool
- Whether the callback is disabled. Disabled callbacks are ignored by the agent.
- python
Code String - The python code to execute for the callback.
- description String
- Human-readable description of the callback.
- disabled Boolean
- Whether the callback is disabled. Disabled callbacks are ignored by the agent.
- python
Code string - The python code to execute for the callback.
- description string
- Human-readable description of the callback.
- disabled boolean
- Whether the callback is disabled. Disabled callbacks are ignored by the agent.
- python_
code str - The python code to execute for the callback.
- description str
- Human-readable description of the callback.
- disabled bool
- Whether the callback is disabled. Disabled callbacks are ignored by the agent.
- python
Code String - The python code to execute for the callback.
- description String
- Human-readable description of the callback.
- disabled Boolean
- Whether the callback is disabled. Disabled callbacks are ignored by the agent.
GuardrailCodeCallbackBeforeModelCallback, GuardrailCodeCallbackBeforeModelCallbackArgs
- Python
Code string - The python code to execute for the callback.
- Description string
- Human-readable description of the callback.
- Disabled bool
- Whether the callback is disabled. Disabled callbacks are ignored by the agent.
- Python
Code string - The python code to execute for the callback.
- Description string
- Human-readable description of the callback.
- Disabled bool
- Whether the callback is disabled. Disabled callbacks are ignored by the agent.
- python
Code String - The python code to execute for the callback.
- description String
- Human-readable description of the callback.
- disabled Boolean
- Whether the callback is disabled. Disabled callbacks are ignored by the agent.
- python
Code string - The python code to execute for the callback.
- description string
- Human-readable description of the callback.
- disabled boolean
- Whether the callback is disabled. Disabled callbacks are ignored by the agent.
- python_
code str - The python code to execute for the callback.
- description str
- Human-readable description of the callback.
- disabled bool
- Whether the callback is disabled. Disabled callbacks are ignored by the agent.
- python
Code String - The python code to execute for the callback.
- description String
- Human-readable description of the callback.
- disabled Boolean
- Whether the callback is disabled. Disabled callbacks are ignored by the agent.
GuardrailContentFilter, GuardrailContentFilterArgs
- Match
Type string - Match type for the content filter. Possible values: SIMPLE_STRING_MATCH WORD_BOUNDARY_STRING_MATCH REGEXP_MATCH
- Banned
Contents List<string> - List of banned phrases. Applies to both user inputs and agent responses.
- Banned
Contents List<string>In Agent Responses - List of banned phrases. Applies only to agent responses.
- Banned
Contents List<string>In User Inputs - List of banned phrases. Applies only to user inputs.
- Disregard
Diacritics bool - If true, diacritics are ignored during matching.
- Match
Type string - Match type for the content filter. Possible values: SIMPLE_STRING_MATCH WORD_BOUNDARY_STRING_MATCH REGEXP_MATCH
- Banned
Contents []string - List of banned phrases. Applies to both user inputs and agent responses.
- Banned
Contents []stringIn Agent Responses - List of banned phrases. Applies only to agent responses.
- Banned
Contents []stringIn User Inputs - List of banned phrases. Applies only to user inputs.
- Disregard
Diacritics bool - If true, diacritics are ignored during matching.
- match
Type String - Match type for the content filter. Possible values: SIMPLE_STRING_MATCH WORD_BOUNDARY_STRING_MATCH REGEXP_MATCH
- banned
Contents List<String> - List of banned phrases. Applies to both user inputs and agent responses.
- banned
Contents List<String>In Agent Responses - List of banned phrases. Applies only to agent responses.
- banned
Contents List<String>In User Inputs - List of banned phrases. Applies only to user inputs.
- disregard
Diacritics Boolean - If true, diacritics are ignored during matching.
- match
Type string - Match type for the content filter. Possible values: SIMPLE_STRING_MATCH WORD_BOUNDARY_STRING_MATCH REGEXP_MATCH
- banned
Contents string[] - List of banned phrases. Applies to both user inputs and agent responses.
- banned
Contents string[]In Agent Responses - List of banned phrases. Applies only to agent responses.
- banned
Contents string[]In User Inputs - List of banned phrases. Applies only to user inputs.
- disregard
Diacritics boolean - If true, diacritics are ignored during matching.
- match_
type str - Match type for the content filter. Possible values: SIMPLE_STRING_MATCH WORD_BOUNDARY_STRING_MATCH REGEXP_MATCH
- banned_
contents Sequence[str] - List of banned phrases. Applies to both user inputs and agent responses.
- banned_
contents_ Sequence[str]in_ agent_ responses - List of banned phrases. Applies only to agent responses.
- banned_
contents_ Sequence[str]in_ user_ inputs - List of banned phrases. Applies only to user inputs.
- disregard_
diacritics bool - If true, diacritics are ignored during matching.
- match
Type String - Match type for the content filter. Possible values: SIMPLE_STRING_MATCH WORD_BOUNDARY_STRING_MATCH REGEXP_MATCH
- banned
Contents List<String> - List of banned phrases. Applies to both user inputs and agent responses.
- banned
Contents List<String>In Agent Responses - List of banned phrases. Applies only to agent responses.
- banned
Contents List<String>In User Inputs - List of banned phrases. Applies only to user inputs.
- disregard
Diacritics Boolean - If true, diacritics are ignored during matching.
GuardrailLlmPolicy, GuardrailLlmPolicyArgs
- Policy
Scope string - Defines when to apply the policy check during the conversation. If set to
POLICY_SCOPE_UNSPECIFIED, the policy will be applied to the user input. When applying the policy to the agent response, additional latency will be introduced before the agent can respond. Possible values: USER_QUERY AGENT_RESPONSE USER_QUERY_AND_AGENT_RESPONSE Possible values are:USER_QUERY,AGENT_RESPONSE,USER_QUERY_AND_AGENT_RESPONSE. - Prompt string
- Policy prompt.
- Allow
Short boolUtterance - By default, the LLM policy check is bypassed for short utterances. Enabling this setting applies the policy check to all utterances, including those that would normally be skipped.
- Fail
Open bool - If an error occurs during the policy check, fail open and do not trigger the guardrail.
- Max
Conversation intMessages - When checking this policy, consider the last 'n' messages in the conversation. When not set a default value of 10 will be used.
- Model
Settings GuardrailLlm Policy Model Settings - Model settings contains various configurations for the LLM model. Structure is documented below.
- Policy
Scope string - Defines when to apply the policy check during the conversation. If set to
POLICY_SCOPE_UNSPECIFIED, the policy will be applied to the user input. When applying the policy to the agent response, additional latency will be introduced before the agent can respond. Possible values: USER_QUERY AGENT_RESPONSE USER_QUERY_AND_AGENT_RESPONSE Possible values are:USER_QUERY,AGENT_RESPONSE,USER_QUERY_AND_AGENT_RESPONSE. - Prompt string
- Policy prompt.
- Allow
Short boolUtterance - By default, the LLM policy check is bypassed for short utterances. Enabling this setting applies the policy check to all utterances, including those that would normally be skipped.
- Fail
Open bool - If an error occurs during the policy check, fail open and do not trigger the guardrail.
- Max
Conversation intMessages - When checking this policy, consider the last 'n' messages in the conversation. When not set a default value of 10 will be used.
- Model
Settings GuardrailLlm Policy Model Settings - Model settings contains various configurations for the LLM model. Structure is documented below.
- policy
Scope String - Defines when to apply the policy check during the conversation. If set to
POLICY_SCOPE_UNSPECIFIED, the policy will be applied to the user input. When applying the policy to the agent response, additional latency will be introduced before the agent can respond. Possible values: USER_QUERY AGENT_RESPONSE USER_QUERY_AND_AGENT_RESPONSE Possible values are:USER_QUERY,AGENT_RESPONSE,USER_QUERY_AND_AGENT_RESPONSE. - prompt String
- Policy prompt.
- allow
Short BooleanUtterance - By default, the LLM policy check is bypassed for short utterances. Enabling this setting applies the policy check to all utterances, including those that would normally be skipped.
- fail
Open Boolean - If an error occurs during the policy check, fail open and do not trigger the guardrail.
- max
Conversation IntegerMessages - When checking this policy, consider the last 'n' messages in the conversation. When not set a default value of 10 will be used.
- model
Settings GuardrailLlm Policy Model Settings - Model settings contains various configurations for the LLM model. Structure is documented below.
- policy
Scope string - Defines when to apply the policy check during the conversation. If set to
POLICY_SCOPE_UNSPECIFIED, the policy will be applied to the user input. When applying the policy to the agent response, additional latency will be introduced before the agent can respond. Possible values: USER_QUERY AGENT_RESPONSE USER_QUERY_AND_AGENT_RESPONSE Possible values are:USER_QUERY,AGENT_RESPONSE,USER_QUERY_AND_AGENT_RESPONSE. - prompt string
- Policy prompt.
- allow
Short booleanUtterance - By default, the LLM policy check is bypassed for short utterances. Enabling this setting applies the policy check to all utterances, including those that would normally be skipped.
- fail
Open boolean - If an error occurs during the policy check, fail open and do not trigger the guardrail.
- max
Conversation numberMessages - When checking this policy, consider the last 'n' messages in the conversation. When not set a default value of 10 will be used.
- model
Settings GuardrailLlm Policy Model Settings - Model settings contains various configurations for the LLM model. Structure is documented below.
- policy_
scope str - Defines when to apply the policy check during the conversation. If set to
POLICY_SCOPE_UNSPECIFIED, the policy will be applied to the user input. When applying the policy to the agent response, additional latency will be introduced before the agent can respond. Possible values: USER_QUERY AGENT_RESPONSE USER_QUERY_AND_AGENT_RESPONSE Possible values are:USER_QUERY,AGENT_RESPONSE,USER_QUERY_AND_AGENT_RESPONSE. - prompt str
- Policy prompt.
- allow_
short_ boolutterance - By default, the LLM policy check is bypassed for short utterances. Enabling this setting applies the policy check to all utterances, including those that would normally be skipped.
- fail_
open bool - If an error occurs during the policy check, fail open and do not trigger the guardrail.
- max_
conversation_ intmessages - When checking this policy, consider the last 'n' messages in the conversation. When not set a default value of 10 will be used.
- model_
settings GuardrailLlm Policy Model Settings - Model settings contains various configurations for the LLM model. Structure is documented below.
- policy
Scope String - Defines when to apply the policy check during the conversation. If set to
POLICY_SCOPE_UNSPECIFIED, the policy will be applied to the user input. When applying the policy to the agent response, additional latency will be introduced before the agent can respond. Possible values: USER_QUERY AGENT_RESPONSE USER_QUERY_AND_AGENT_RESPONSE Possible values are:USER_QUERY,AGENT_RESPONSE,USER_QUERY_AND_AGENT_RESPONSE. - prompt String
- Policy prompt.
- allow
Short BooleanUtterance - By default, the LLM policy check is bypassed for short utterances. Enabling this setting applies the policy check to all utterances, including those that would normally be skipped.
- fail
Open Boolean - If an error occurs during the policy check, fail open and do not trigger the guardrail.
- max
Conversation NumberMessages - When checking this policy, consider the last 'n' messages in the conversation. When not set a default value of 10 will be used.
- model
Settings Property Map - Model settings contains various configurations for the LLM model. Structure is documented below.
GuardrailLlmPolicyModelSettings, GuardrailLlmPolicyModelSettingsArgs
- Model string
- The LLM model that the agent should use. If not set, the agent will inherit the model from its parent agent.
- Temperature double
- If set, this temperature will be used for the LLM model. Temperature controls the randomness of the model's responses. Lower temperatures produce responses that are more predictable. Higher temperatures produce responses that are more creative.
- Model string
- The LLM model that the agent should use. If not set, the agent will inherit the model from its parent agent.
- Temperature float64
- If set, this temperature will be used for the LLM model. Temperature controls the randomness of the model's responses. Lower temperatures produce responses that are more predictable. Higher temperatures produce responses that are more creative.
- model String
- The LLM model that the agent should use. If not set, the agent will inherit the model from its parent agent.
- temperature Double
- If set, this temperature will be used for the LLM model. Temperature controls the randomness of the model's responses. Lower temperatures produce responses that are more predictable. Higher temperatures produce responses that are more creative.
- model string
- The LLM model that the agent should use. If not set, the agent will inherit the model from its parent agent.
- temperature number
- If set, this temperature will be used for the LLM model. Temperature controls the randomness of the model's responses. Lower temperatures produce responses that are more predictable. Higher temperatures produce responses that are more creative.
- model str
- The LLM model that the agent should use. If not set, the agent will inherit the model from its parent agent.
- temperature float
- If set, this temperature will be used for the LLM model. Temperature controls the randomness of the model's responses. Lower temperatures produce responses that are more predictable. Higher temperatures produce responses that are more creative.
- model String
- The LLM model that the agent should use. If not set, the agent will inherit the model from its parent agent.
- temperature Number
- If set, this temperature will be used for the LLM model. Temperature controls the randomness of the model's responses. Lower temperatures produce responses that are more predictable. Higher temperatures produce responses that are more creative.
GuardrailLlmPromptSecurity, GuardrailLlmPromptSecurityArgs
- Custom
Policy GuardrailLlm Prompt Security Custom Policy - Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
- Default
Settings GuardrailLlm Prompt Security Default Settings - Configuration for default system security settings. Structure is documented below.
- Custom
Policy GuardrailLlm Prompt Security Custom Policy - Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
- Default
Settings GuardrailLlm Prompt Security Default Settings - Configuration for default system security settings. Structure is documented below.
- custom
Policy GuardrailLlm Prompt Security Custom Policy - Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
- default
Settings GuardrailLlm Prompt Security Default Settings - Configuration for default system security settings. Structure is documented below.
- custom
Policy GuardrailLlm Prompt Security Custom Policy - Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
- default
Settings GuardrailLlm Prompt Security Default Settings - Configuration for default system security settings. Structure is documented below.
- custom_
policy GuardrailLlm Prompt Security Custom Policy - Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
- default_
settings GuardrailLlm Prompt Security Default Settings - Configuration for default system security settings. Structure is documented below.
- custom
Policy Property Map - Guardrail that blocks the conversation if the LLM response is considered violating the policy based on the LLM classification. Structure is documented below.
- default
Settings Property Map - Configuration for default system security settings. Structure is documented below.
GuardrailLlmPromptSecurityCustomPolicy, GuardrailLlmPromptSecurityCustomPolicyArgs
- Policy
Scope string - Defines when to apply the policy check during the conversation. If set to
POLICY_SCOPE_UNSPECIFIED, the policy will be applied to the user input. When applying the policy to the agent response, additional latency will be introduced before the agent can respond. Possible values: USER_QUERY AGENT_RESPONSE USER_QUERY_AND_AGENT_RESPONSE - Prompt string
- Policy prompt.
- Allow
Short boolUtterance - By default, the LLM policy check is bypassed for short utterances. Enabling this setting applies the policy check to all utterances, including those that would normally be skipped.
- Fail
Open bool - If an error occurs during the policy check, fail open and do not trigger the guardrail.
- Max
Conversation intMessages - When checking this policy, consider the last 'n' messages in the conversation. When not set a default value of 10 will be used.
- Model
Settings GuardrailLlm Prompt Security Custom Policy Model Settings - Model settings contains various configurations for the LLM model. Structure is documented below.
- Policy
Scope string - Defines when to apply the policy check during the conversation. If set to
POLICY_SCOPE_UNSPECIFIED, the policy will be applied to the user input. When applying the policy to the agent response, additional latency will be introduced before the agent can respond. Possible values: USER_QUERY AGENT_RESPONSE USER_QUERY_AND_AGENT_RESPONSE - Prompt string
- Policy prompt.
- Allow
Short boolUtterance - By default, the LLM policy check is bypassed for short utterances. Enabling this setting applies the policy check to all utterances, including those that would normally be skipped.
- Fail
Open bool - If an error occurs during the policy check, fail open and do not trigger the guardrail.
- Max
Conversation intMessages - When checking this policy, consider the last 'n' messages in the conversation. When not set a default value of 10 will be used.
- Model
Settings GuardrailLlm Prompt Security Custom Policy Model Settings - Model settings contains various configurations for the LLM model. Structure is documented below.
- policy
Scope String - Defines when to apply the policy check during the conversation. If set to
POLICY_SCOPE_UNSPECIFIED, the policy will be applied to the user input. When applying the policy to the agent response, additional latency will be introduced before the agent can respond. Possible values: USER_QUERY AGENT_RESPONSE USER_QUERY_AND_AGENT_RESPONSE - prompt String
- Policy prompt.
- allow
Short BooleanUtterance - By default, the LLM policy check is bypassed for short utterances. Enabling this setting applies the policy check to all utterances, including those that would normally be skipped.
- fail
Open Boolean - If an error occurs during the policy check, fail open and do not trigger the guardrail.
- max
Conversation IntegerMessages - When checking this policy, consider the last 'n' messages in the conversation. When not set a default value of 10 will be used.
- model
Settings GuardrailLlm Prompt Security Custom Policy Model Settings - Model settings contains various configurations for the LLM model. Structure is documented below.
- policy
Scope string - Defines when to apply the policy check during the conversation. If set to
POLICY_SCOPE_UNSPECIFIED, the policy will be applied to the user input. When applying the policy to the agent response, additional latency will be introduced before the agent can respond. Possible values: USER_QUERY AGENT_RESPONSE USER_QUERY_AND_AGENT_RESPONSE - prompt string
- Policy prompt.
- allow
Short booleanUtterance - By default, the LLM policy check is bypassed for short utterances. Enabling this setting applies the policy check to all utterances, including those that would normally be skipped.
- fail
Open boolean - If an error occurs during the policy check, fail open and do not trigger the guardrail.
- max
Conversation numberMessages - When checking this policy, consider the last 'n' messages in the conversation. When not set a default value of 10 will be used.
- model
Settings GuardrailLlm Prompt Security Custom Policy Model Settings - Model settings contains various configurations for the LLM model. Structure is documented below.
- policy_
scope str - Defines when to apply the policy check during the conversation. If set to
POLICY_SCOPE_UNSPECIFIED, the policy will be applied to the user input. When applying the policy to the agent response, additional latency will be introduced before the agent can respond. Possible values: USER_QUERY AGENT_RESPONSE USER_QUERY_AND_AGENT_RESPONSE - prompt str
- Policy prompt.
- allow_
short_ boolutterance - By default, the LLM policy check is bypassed for short utterances. Enabling this setting applies the policy check to all utterances, including those that would normally be skipped.
- fail_
open bool - If an error occurs during the policy check, fail open and do not trigger the guardrail.
- max_
conversation_ intmessages - When checking this policy, consider the last 'n' messages in the conversation. When not set a default value of 10 will be used.
- model_
settings GuardrailLlm Prompt Security Custom Policy Model Settings - Model settings contains various configurations for the LLM model. Structure is documented below.
- policy
Scope String - Defines when to apply the policy check during the conversation. If set to
POLICY_SCOPE_UNSPECIFIED, the policy will be applied to the user input. When applying the policy to the agent response, additional latency will be introduced before the agent can respond. Possible values: USER_QUERY AGENT_RESPONSE USER_QUERY_AND_AGENT_RESPONSE - prompt String
- Policy prompt.
- allow
Short BooleanUtterance - By default, the LLM policy check is bypassed for short utterances. Enabling this setting applies the policy check to all utterances, including those that would normally be skipped.
- fail
Open Boolean - If an error occurs during the policy check, fail open and do not trigger the guardrail.
- max
Conversation NumberMessages - When checking this policy, consider the last 'n' messages in the conversation. When not set a default value of 10 will be used.
- model
Settings Property Map - Model settings contains various configurations for the LLM model. Structure is documented below.
GuardrailLlmPromptSecurityCustomPolicyModelSettings, GuardrailLlmPromptSecurityCustomPolicyModelSettingsArgs
- Model string
- The LLM model that the agent should use. If not set, the agent will inherit the model from its parent agent.
- Temperature double
- If set, this temperature will be used for the LLM model. Temperature controls the randomness of the model's responses. Lower temperatures produce responses that are more predictable. Higher temperatures produce responses that are more creative.
- Model string
- The LLM model that the agent should use. If not set, the agent will inherit the model from its parent agent.
- Temperature float64
- If set, this temperature will be used for the LLM model. Temperature controls the randomness of the model's responses. Lower temperatures produce responses that are more predictable. Higher temperatures produce responses that are more creative.
- model String
- The LLM model that the agent should use. If not set, the agent will inherit the model from its parent agent.
- temperature Double
- If set, this temperature will be used for the LLM model. Temperature controls the randomness of the model's responses. Lower temperatures produce responses that are more predictable. Higher temperatures produce responses that are more creative.
- model string
- The LLM model that the agent should use. If not set, the agent will inherit the model from its parent agent.
- temperature number
- If set, this temperature will be used for the LLM model. Temperature controls the randomness of the model's responses. Lower temperatures produce responses that are more predictable. Higher temperatures produce responses that are more creative.
- model str
- The LLM model that the agent should use. If not set, the agent will inherit the model from its parent agent.
- temperature float
- If set, this temperature will be used for the LLM model. Temperature controls the randomness of the model's responses. Lower temperatures produce responses that are more predictable. Higher temperatures produce responses that are more creative.
- model String
- The LLM model that the agent should use. If not set, the agent will inherit the model from its parent agent.
- temperature Number
- If set, this temperature will be used for the LLM model. Temperature controls the randomness of the model's responses. Lower temperatures produce responses that are more predictable. Higher temperatures produce responses that are more creative.
GuardrailLlmPromptSecurityDefaultSettings, GuardrailLlmPromptSecurityDefaultSettingsArgs
- Default
Prompt stringTemplate - (Output) The default prompt template used by the system. This field is for display purposes to show the user what prompt the system uses by default. It is OUTPUT_ONLY.
- Default
Prompt stringTemplate - (Output) The default prompt template used by the system. This field is for display purposes to show the user what prompt the system uses by default. It is OUTPUT_ONLY.
- default
Prompt StringTemplate - (Output) The default prompt template used by the system. This field is for display purposes to show the user what prompt the system uses by default. It is OUTPUT_ONLY.
- default
Prompt stringTemplate - (Output) The default prompt template used by the system. This field is for display purposes to show the user what prompt the system uses by default. It is OUTPUT_ONLY.
- default_
prompt_ strtemplate - (Output) The default prompt template used by the system. This field is for display purposes to show the user what prompt the system uses by default. It is OUTPUT_ONLY.
- default
Prompt StringTemplate - (Output) The default prompt template used by the system. This field is for display purposes to show the user what prompt the system uses by default. It is OUTPUT_ONLY.
GuardrailModelSafety, GuardrailModelSafetyArgs
- Safety
Settings List<GuardrailModel Safety Safety Setting> - List of safety settings. Structure is documented below.
- Safety
Settings []GuardrailModel Safety Safety Setting - List of safety settings. Structure is documented below.
- safety
Settings List<GuardrailModel Safety Safety Setting> - List of safety settings. Structure is documented below.
- safety
Settings GuardrailModel Safety Safety Setting[] - List of safety settings. Structure is documented below.
- safety_
settings Sequence[GuardrailModel Safety Safety Setting] - List of safety settings. Structure is documented below.
- safety
Settings List<Property Map> - List of safety settings. Structure is documented below.
GuardrailModelSafetySafetySetting, GuardrailModelSafetySafetySettingArgs
- Category string
- The harm category.
Possible values:
HARM_CATEGORY_HATE_SPEECH
HARM_CATEGORY_DANGEROUS_CONTENT
HARM_CATEGORY_HARASSMENT
HARM_CATEGORY_SEXUALLY_EXPLICIT
Possible values are:
HARM_CATEGORY_HATE_SPEECH,HARM_CATEGORY_DANGEROUS_CONTENT,HARM_CATEGORY_HARASSMENT,HARM_CATEGORY_SEXUALLY_EXPLICIT. - Threshold string
- The harm block threshold.
Possible values:
BLOCK_LOW_AND_ABOVE
BLOCK_MEDIUM_AND_ABOVE
BLOCK_ONLY_HIGH
BLOCK_NONE
OFF
Possible values are:
BLOCK_LOW_AND_ABOVE,BLOCK_MEDIUM_AND_ABOVE,BLOCK_ONLY_HIGH,BLOCK_NONE,OFF.
- Category string
- The harm category.
Possible values:
HARM_CATEGORY_HATE_SPEECH
HARM_CATEGORY_DANGEROUS_CONTENT
HARM_CATEGORY_HARASSMENT
HARM_CATEGORY_SEXUALLY_EXPLICIT
Possible values are:
HARM_CATEGORY_HATE_SPEECH,HARM_CATEGORY_DANGEROUS_CONTENT,HARM_CATEGORY_HARASSMENT,HARM_CATEGORY_SEXUALLY_EXPLICIT. - Threshold string
- The harm block threshold.
Possible values:
BLOCK_LOW_AND_ABOVE
BLOCK_MEDIUM_AND_ABOVE
BLOCK_ONLY_HIGH
BLOCK_NONE
OFF
Possible values are:
BLOCK_LOW_AND_ABOVE,BLOCK_MEDIUM_AND_ABOVE,BLOCK_ONLY_HIGH,BLOCK_NONE,OFF.
- category String
- The harm category.
Possible values:
HARM_CATEGORY_HATE_SPEECH
HARM_CATEGORY_DANGEROUS_CONTENT
HARM_CATEGORY_HARASSMENT
HARM_CATEGORY_SEXUALLY_EXPLICIT
Possible values are:
HARM_CATEGORY_HATE_SPEECH,HARM_CATEGORY_DANGEROUS_CONTENT,HARM_CATEGORY_HARASSMENT,HARM_CATEGORY_SEXUALLY_EXPLICIT. - threshold String
- The harm block threshold.
Possible values:
BLOCK_LOW_AND_ABOVE
BLOCK_MEDIUM_AND_ABOVE
BLOCK_ONLY_HIGH
BLOCK_NONE
OFF
Possible values are:
BLOCK_LOW_AND_ABOVE,BLOCK_MEDIUM_AND_ABOVE,BLOCK_ONLY_HIGH,BLOCK_NONE,OFF.
- category string
- The harm category.
Possible values:
HARM_CATEGORY_HATE_SPEECH
HARM_CATEGORY_DANGEROUS_CONTENT
HARM_CATEGORY_HARASSMENT
HARM_CATEGORY_SEXUALLY_EXPLICIT
Possible values are:
HARM_CATEGORY_HATE_SPEECH,HARM_CATEGORY_DANGEROUS_CONTENT,HARM_CATEGORY_HARASSMENT,HARM_CATEGORY_SEXUALLY_EXPLICIT. - threshold string
- The harm block threshold.
Possible values:
BLOCK_LOW_AND_ABOVE
BLOCK_MEDIUM_AND_ABOVE
BLOCK_ONLY_HIGH
BLOCK_NONE
OFF
Possible values are:
BLOCK_LOW_AND_ABOVE,BLOCK_MEDIUM_AND_ABOVE,BLOCK_ONLY_HIGH,BLOCK_NONE,OFF.
- category str
- The harm category.
Possible values:
HARM_CATEGORY_HATE_SPEECH
HARM_CATEGORY_DANGEROUS_CONTENT
HARM_CATEGORY_HARASSMENT
HARM_CATEGORY_SEXUALLY_EXPLICIT
Possible values are:
HARM_CATEGORY_HATE_SPEECH,HARM_CATEGORY_DANGEROUS_CONTENT,HARM_CATEGORY_HARASSMENT,HARM_CATEGORY_SEXUALLY_EXPLICIT. - threshold str
- The harm block threshold.
Possible values:
BLOCK_LOW_AND_ABOVE
BLOCK_MEDIUM_AND_ABOVE
BLOCK_ONLY_HIGH
BLOCK_NONE
OFF
Possible values are:
BLOCK_LOW_AND_ABOVE,BLOCK_MEDIUM_AND_ABOVE,BLOCK_ONLY_HIGH,BLOCK_NONE,OFF.
- category String
- The harm category.
Possible values:
HARM_CATEGORY_HATE_SPEECH
HARM_CATEGORY_DANGEROUS_CONTENT
HARM_CATEGORY_HARASSMENT
HARM_CATEGORY_SEXUALLY_EXPLICIT
Possible values are:
HARM_CATEGORY_HATE_SPEECH,HARM_CATEGORY_DANGEROUS_CONTENT,HARM_CATEGORY_HARASSMENT,HARM_CATEGORY_SEXUALLY_EXPLICIT. - threshold String
- The harm block threshold.
Possible values:
BLOCK_LOW_AND_ABOVE
BLOCK_MEDIUM_AND_ABOVE
BLOCK_ONLY_HIGH
BLOCK_NONE
OFF
Possible values are:
BLOCK_LOW_AND_ABOVE,BLOCK_MEDIUM_AND_ABOVE,BLOCK_ONLY_HIGH,BLOCK_NONE,OFF.
Import
Guardrail can be imported using any of these accepted formats:
projects/{{project}}/locations/{{location}}/apps/{{app}}/guardrails/{{name}}{{project}}/{{location}}/{{app}}/{{name}}{{location}}/{{app}}/{{name}}
When using the pulumi import command, Guardrail can be imported using one of the formats above. For example:
$ pulumi import gcp:ces/guardrail:Guardrail default projects/{{project}}/locations/{{location}}/apps/{{app}}/guardrails/{{name}}
$ pulumi import gcp:ces/guardrail:Guardrail default {{project}}/{{location}}/{{app}}/{{name}}
$ pulumi import gcp:ces/guardrail:Guardrail default {{location}}/{{app}}/{{name}}
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Google Cloud (GCP) Classic pulumi/pulumi-gcp
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
google-betaTerraform Provider.
