LLM generator.
To get more information about Generator, see:
- API documentation
- How-to Guides
Example Usage
Dialogflow Generator Basic
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const summarizationBasicGenerator = new gcp.diagflow.Generator("summarization_basic_generator", {
location: "global",
description: "A v4.0 summarization generator.",
inferenceParameter: {
maxOutputTokens: 1024,
temperature: 0,
topK: 40,
topP: 0.95,
},
summarizationContext: {
version: "4.0",
outputLanguageCode: "en",
},
triggerEvent: "MANUAL_CALL",
});
import pulumi
import pulumi_gcp as gcp
summarization_basic_generator = gcp.diagflow.Generator("summarization_basic_generator",
location="global",
description="A v4.0 summarization generator.",
inference_parameter={
"max_output_tokens": 1024,
"temperature": 0,
"top_k": 40,
"top_p": 0.95,
},
summarization_context={
"version": "4.0",
"output_language_code": "en",
},
trigger_event="MANUAL_CALL")
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/diagflow"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := diagflow.NewGenerator(ctx, "summarization_basic_generator", &diagflow.GeneratorArgs{
Location: pulumi.String("global"),
Description: pulumi.String("A v4.0 summarization generator."),
InferenceParameter: &diagflow.GeneratorInferenceParameterArgs{
MaxOutputTokens: pulumi.Int(1024),
Temperature: pulumi.Float64(0),
TopK: pulumi.Int(40),
TopP: pulumi.Float64(0.95),
},
SummarizationContext: &diagflow.GeneratorSummarizationContextArgs{
Version: pulumi.String("4.0"),
OutputLanguageCode: pulumi.String("en"),
},
TriggerEvent: pulumi.String("MANUAL_CALL"),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var summarizationBasicGenerator = new Gcp.Diagflow.Generator("summarization_basic_generator", new()
{
Location = "global",
Description = "A v4.0 summarization generator.",
InferenceParameter = new Gcp.Diagflow.Inputs.GeneratorInferenceParameterArgs
{
MaxOutputTokens = 1024,
Temperature = 0,
TopK = 40,
TopP = 0.95,
},
SummarizationContext = new Gcp.Diagflow.Inputs.GeneratorSummarizationContextArgs
{
Version = "4.0",
OutputLanguageCode = "en",
},
TriggerEvent = "MANUAL_CALL",
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.diagflow.Generator;
import com.pulumi.gcp.diagflow.GeneratorArgs;
import com.pulumi.gcp.diagflow.inputs.GeneratorInferenceParameterArgs;
import com.pulumi.gcp.diagflow.inputs.GeneratorSummarizationContextArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var summarizationBasicGenerator = new Generator("summarizationBasicGenerator", GeneratorArgs.builder()
.location("global")
.description("A v4.0 summarization generator.")
.inferenceParameter(GeneratorInferenceParameterArgs.builder()
.maxOutputTokens(1024)
.temperature(0.0)
.topK(40)
.topP(0.95)
.build())
.summarizationContext(GeneratorSummarizationContextArgs.builder()
.version("4.0")
.outputLanguageCode("en")
.build())
.triggerEvent("MANUAL_CALL")
.build());
}
}
resources:
summarizationBasicGenerator:
type: gcp:diagflow:Generator
name: summarization_basic_generator
properties:
location: global
description: A v4.0 summarization generator.
inferenceParameter:
maxOutputTokens: 1024
temperature: 0
topK: 40
topP: 0.95
summarizationContext:
version: '4.0'
outputLanguageCode: en
triggerEvent: MANUAL_CALL
Create Generator Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Generator(name: string, args: GeneratorArgs, opts?: CustomResourceOptions);@overload
def Generator(resource_name: str,
args: GeneratorArgs,
opts: Optional[ResourceOptions] = None)
@overload
def Generator(resource_name: str,
opts: Optional[ResourceOptions] = None,
location: Optional[str] = None,
summarization_context: Optional[GeneratorSummarizationContextArgs] = None,
description: Optional[str] = None,
generator_id: Optional[str] = None,
inference_parameter: Optional[GeneratorInferenceParameterArgs] = None,
project: Optional[str] = None,
published_model: Optional[str] = None,
trigger_event: Optional[str] = None)func NewGenerator(ctx *Context, name string, args GeneratorArgs, opts ...ResourceOption) (*Generator, error)public Generator(string name, GeneratorArgs args, CustomResourceOptions? opts = null)
public Generator(String name, GeneratorArgs args)
public Generator(String name, GeneratorArgs args, CustomResourceOptions options)
type: gcp:diagflow:Generator
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args GeneratorArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args GeneratorArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args GeneratorArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args GeneratorArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args GeneratorArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var generatorResource = new Gcp.Diagflow.Generator("generatorResource", new()
{
Location = "string",
SummarizationContext = new Gcp.Diagflow.Inputs.GeneratorSummarizationContextArgs
{
FewShotExamples = new[]
{
new Gcp.Diagflow.Inputs.GeneratorSummarizationContextFewShotExampleArgs
{
Output = new Gcp.Diagflow.Inputs.GeneratorSummarizationContextFewShotExampleOutputArgs
{
SummarySuggestion = new Gcp.Diagflow.Inputs.GeneratorSummarizationContextFewShotExampleOutputSummarySuggestionArgs
{
SummarySections = new[]
{
new Gcp.Diagflow.Inputs.GeneratorSummarizationContextFewShotExampleOutputSummarySuggestionSummarySectionArgs
{
Section = "string",
Summary = "string",
},
},
},
},
ConversationContext = new Gcp.Diagflow.Inputs.GeneratorSummarizationContextFewShotExampleConversationContextArgs
{
MessageEntries = new[]
{
new Gcp.Diagflow.Inputs.GeneratorSummarizationContextFewShotExampleConversationContextMessageEntryArgs
{
CreateTime = "string",
LanguageCode = "string",
Role = "string",
Text = "string",
},
},
},
ExtraInfo =
{
{ "string", "string" },
},
SummarizationSectionList = new Gcp.Diagflow.Inputs.GeneratorSummarizationContextFewShotExampleSummarizationSectionListArgs
{
SummarizationSections = new[]
{
new Gcp.Diagflow.Inputs.GeneratorSummarizationContextFewShotExampleSummarizationSectionListSummarizationSectionArgs
{
Definition = "string",
Key = "string",
Type = "string",
},
},
},
},
},
OutputLanguageCode = "string",
SummarizationSections = new[]
{
new Gcp.Diagflow.Inputs.GeneratorSummarizationContextSummarizationSectionArgs
{
Definition = "string",
Key = "string",
Type = "string",
},
},
Version = "string",
},
Description = "string",
GeneratorId = "string",
InferenceParameter = new Gcp.Diagflow.Inputs.GeneratorInferenceParameterArgs
{
MaxOutputTokens = 0,
Temperature = 0,
TopK = 0,
TopP = 0,
},
Project = "string",
PublishedModel = "string",
TriggerEvent = "string",
});
example, err := diagflow.NewGenerator(ctx, "generatorResource", &diagflow.GeneratorArgs{
Location: pulumi.String("string"),
SummarizationContext: &diagflow.GeneratorSummarizationContextArgs{
FewShotExamples: diagflow.GeneratorSummarizationContextFewShotExampleArray{
&diagflow.GeneratorSummarizationContextFewShotExampleArgs{
Output: &diagflow.GeneratorSummarizationContextFewShotExampleOutputTypeArgs{
SummarySuggestion: &diagflow.GeneratorSummarizationContextFewShotExampleOutputSummarySuggestionArgs{
SummarySections: diagflow.GeneratorSummarizationContextFewShotExampleOutputSummarySuggestionSummarySectionArray{
&diagflow.GeneratorSummarizationContextFewShotExampleOutputSummarySuggestionSummarySectionArgs{
Section: pulumi.String("string"),
Summary: pulumi.String("string"),
},
},
},
},
ConversationContext: &diagflow.GeneratorSummarizationContextFewShotExampleConversationContextArgs{
MessageEntries: diagflow.GeneratorSummarizationContextFewShotExampleConversationContextMessageEntryArray{
&diagflow.GeneratorSummarizationContextFewShotExampleConversationContextMessageEntryArgs{
CreateTime: pulumi.String("string"),
LanguageCode: pulumi.String("string"),
Role: pulumi.String("string"),
Text: pulumi.String("string"),
},
},
},
ExtraInfo: pulumi.StringMap{
"string": pulumi.String("string"),
},
SummarizationSectionList: &diagflow.GeneratorSummarizationContextFewShotExampleSummarizationSectionListArgs{
SummarizationSections: diagflow.GeneratorSummarizationContextFewShotExampleSummarizationSectionListSummarizationSectionArray{
&diagflow.GeneratorSummarizationContextFewShotExampleSummarizationSectionListSummarizationSectionArgs{
Definition: pulumi.String("string"),
Key: pulumi.String("string"),
Type: pulumi.String("string"),
},
},
},
},
},
OutputLanguageCode: pulumi.String("string"),
SummarizationSections: diagflow.GeneratorSummarizationContextSummarizationSectionArray{
&diagflow.GeneratorSummarizationContextSummarizationSectionArgs{
Definition: pulumi.String("string"),
Key: pulumi.String("string"),
Type: pulumi.String("string"),
},
},
Version: pulumi.String("string"),
},
Description: pulumi.String("string"),
GeneratorId: pulumi.String("string"),
InferenceParameter: &diagflow.GeneratorInferenceParameterArgs{
MaxOutputTokens: pulumi.Int(0),
Temperature: pulumi.Float64(0),
TopK: pulumi.Int(0),
TopP: pulumi.Float64(0),
},
Project: pulumi.String("string"),
PublishedModel: pulumi.String("string"),
TriggerEvent: pulumi.String("string"),
})
var generatorResource = new Generator("generatorResource", GeneratorArgs.builder()
.location("string")
.summarizationContext(GeneratorSummarizationContextArgs.builder()
.fewShotExamples(GeneratorSummarizationContextFewShotExampleArgs.builder()
.output(GeneratorSummarizationContextFewShotExampleOutputArgs.builder()
.summarySuggestion(GeneratorSummarizationContextFewShotExampleOutputSummarySuggestionArgs.builder()
.summarySections(GeneratorSummarizationContextFewShotExampleOutputSummarySuggestionSummarySectionArgs.builder()
.section("string")
.summary("string")
.build())
.build())
.build())
.conversationContext(GeneratorSummarizationContextFewShotExampleConversationContextArgs.builder()
.messageEntries(GeneratorSummarizationContextFewShotExampleConversationContextMessageEntryArgs.builder()
.createTime("string")
.languageCode("string")
.role("string")
.text("string")
.build())
.build())
.extraInfo(Map.of("string", "string"))
.summarizationSectionList(GeneratorSummarizationContextFewShotExampleSummarizationSectionListArgs.builder()
.summarizationSections(GeneratorSummarizationContextFewShotExampleSummarizationSectionListSummarizationSectionArgs.builder()
.definition("string")
.key("string")
.type("string")
.build())
.build())
.build())
.outputLanguageCode("string")
.summarizationSections(GeneratorSummarizationContextSummarizationSectionArgs.builder()
.definition("string")
.key("string")
.type("string")
.build())
.version("string")
.build())
.description("string")
.generatorId("string")
.inferenceParameter(GeneratorInferenceParameterArgs.builder()
.maxOutputTokens(0)
.temperature(0.0)
.topK(0)
.topP(0.0)
.build())
.project("string")
.publishedModel("string")
.triggerEvent("string")
.build());
generator_resource = gcp.diagflow.Generator("generatorResource",
location="string",
summarization_context={
"few_shot_examples": [{
"output": {
"summary_suggestion": {
"summary_sections": [{
"section": "string",
"summary": "string",
}],
},
},
"conversation_context": {
"message_entries": [{
"create_time": "string",
"language_code": "string",
"role": "string",
"text": "string",
}],
},
"extra_info": {
"string": "string",
},
"summarization_section_list": {
"summarization_sections": [{
"definition": "string",
"key": "string",
"type": "string",
}],
},
}],
"output_language_code": "string",
"summarization_sections": [{
"definition": "string",
"key": "string",
"type": "string",
}],
"version": "string",
},
description="string",
generator_id="string",
inference_parameter={
"max_output_tokens": 0,
"temperature": 0,
"top_k": 0,
"top_p": 0,
},
project="string",
published_model="string",
trigger_event="string")
const generatorResource = new gcp.diagflow.Generator("generatorResource", {
location: "string",
summarizationContext: {
fewShotExamples: [{
output: {
summarySuggestion: {
summarySections: [{
section: "string",
summary: "string",
}],
},
},
conversationContext: {
messageEntries: [{
createTime: "string",
languageCode: "string",
role: "string",
text: "string",
}],
},
extraInfo: {
string: "string",
},
summarizationSectionList: {
summarizationSections: [{
definition: "string",
key: "string",
type: "string",
}],
},
}],
outputLanguageCode: "string",
summarizationSections: [{
definition: "string",
key: "string",
type: "string",
}],
version: "string",
},
description: "string",
generatorId: "string",
inferenceParameter: {
maxOutputTokens: 0,
temperature: 0,
topK: 0,
topP: 0,
},
project: "string",
publishedModel: "string",
triggerEvent: "string",
});
type: gcp:diagflow:Generator
properties:
description: string
generatorId: string
inferenceParameter:
maxOutputTokens: 0
temperature: 0
topK: 0
topP: 0
location: string
project: string
publishedModel: string
summarizationContext:
fewShotExamples:
- conversationContext:
messageEntries:
- createTime: string
languageCode: string
role: string
text: string
extraInfo:
string: string
output:
summarySuggestion:
summarySections:
- section: string
summary: string
summarizationSectionList:
summarizationSections:
- definition: string
key: string
type: string
outputLanguageCode: string
summarizationSections:
- definition: string
key: string
type: string
version: string
triggerEvent: string
Generator Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The Generator resource accepts the following input properties:
- Location string
- desc
- Summarization
Context GeneratorSummarization Context - Input of prebuilt Summarization feature. Structure is documented below.
- Description string
- Optional. Human readable description of the generator.
- Generator
Id string - Optional. The ID to use for the generator, which will become the final component of the generator's resource name.
- Inference
Parameter GeneratorInference Parameter - Optional. Inference parameters for this generator. Structure is documented below.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Published
Model string - Optional. The published Large Language Model name. * To use the latest model version, specify the model name without version number. Example: text-bison * To use a stable model version, specify the version number as well. Example: text-bison@002.
- Trigger
Event string - Optional. The trigger event of the generator. It defines when the generator is triggered in a conversation.
Possible values are:
END_OF_UTTERANCE,MANUAL_CALL,CUSTOMER_MESSAGE,AGENT_MESSAGE.
- Location string
- desc
- Summarization
Context GeneratorSummarization Context Args - Input of prebuilt Summarization feature. Structure is documented below.
- Description string
- Optional. Human readable description of the generator.
- Generator
Id string - Optional. The ID to use for the generator, which will become the final component of the generator's resource name.
- Inference
Parameter GeneratorInference Parameter Args - Optional. Inference parameters for this generator. Structure is documented below.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Published
Model string - Optional. The published Large Language Model name. * To use the latest model version, specify the model name without version number. Example: text-bison * To use a stable model version, specify the version number as well. Example: text-bison@002.
- Trigger
Event string - Optional. The trigger event of the generator. It defines when the generator is triggered in a conversation.
Possible values are:
END_OF_UTTERANCE,MANUAL_CALL,CUSTOMER_MESSAGE,AGENT_MESSAGE.
- location String
- desc
- summarization
Context GeneratorSummarization Context - Input of prebuilt Summarization feature. Structure is documented below.
- description String
- Optional. Human readable description of the generator.
- generator
Id String - Optional. The ID to use for the generator, which will become the final component of the generator's resource name.
- inference
Parameter GeneratorInference Parameter - Optional. Inference parameters for this generator. Structure is documented below.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- published
Model String - Optional. The published Large Language Model name. * To use the latest model version, specify the model name without version number. Example: text-bison * To use a stable model version, specify the version number as well. Example: text-bison@002.
- trigger
Event String - Optional. The trigger event of the generator. It defines when the generator is triggered in a conversation.
Possible values are:
END_OF_UTTERANCE,MANUAL_CALL,CUSTOMER_MESSAGE,AGENT_MESSAGE.
- location string
- desc
- summarization
Context GeneratorSummarization Context - Input of prebuilt Summarization feature. Structure is documented below.
- description string
- Optional. Human readable description of the generator.
- generator
Id string - Optional. The ID to use for the generator, which will become the final component of the generator's resource name.
- inference
Parameter GeneratorInference Parameter - Optional. Inference parameters for this generator. Structure is documented below.
- project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- published
Model string - Optional. The published Large Language Model name. * To use the latest model version, specify the model name without version number. Example: text-bison * To use a stable model version, specify the version number as well. Example: text-bison@002.
- trigger
Event string - Optional. The trigger event of the generator. It defines when the generator is triggered in a conversation.
Possible values are:
END_OF_UTTERANCE,MANUAL_CALL,CUSTOMER_MESSAGE,AGENT_MESSAGE.
- location str
- desc
- summarization_
context GeneratorSummarization Context Args - Input of prebuilt Summarization feature. Structure is documented below.
- description str
- Optional. Human readable description of the generator.
- generator_
id str - Optional. The ID to use for the generator, which will become the final component of the generator's resource name.
- inference_
parameter GeneratorInference Parameter Args - Optional. Inference parameters for this generator. Structure is documented below.
- project str
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- published_
model str - Optional. The published Large Language Model name. * To use the latest model version, specify the model name without version number. Example: text-bison * To use a stable model version, specify the version number as well. Example: text-bison@002.
- trigger_
event str - Optional. The trigger event of the generator. It defines when the generator is triggered in a conversation.
Possible values are:
END_OF_UTTERANCE,MANUAL_CALL,CUSTOMER_MESSAGE,AGENT_MESSAGE.
- location String
- desc
- summarization
Context Property Map - Input of prebuilt Summarization feature. Structure is documented below.
- description String
- Optional. Human readable description of the generator.
- generator
Id String - Optional. The ID to use for the generator, which will become the final component of the generator's resource name.
- inference
Parameter Property Map - Optional. Inference parameters for this generator. Structure is documented below.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- published
Model String - Optional. The published Large Language Model name. * To use the latest model version, specify the model name without version number. Example: text-bison * To use a stable model version, specify the version number as well. Example: text-bison@002.
- trigger
Event String - Optional. The trigger event of the generator. It defines when the generator is triggered in a conversation.
Possible values are:
END_OF_UTTERANCE,MANUAL_CALL,CUSTOMER_MESSAGE,AGENT_MESSAGE.
Outputs
All input properties are implicitly available as output properties. Additionally, the Generator resource produces the following output properties:
Look up Existing Generator Resource
Get an existing Generator resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: GeneratorState, opts?: CustomResourceOptions): Generator@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
description: Optional[str] = None,
generator_id: Optional[str] = None,
inference_parameter: Optional[GeneratorInferenceParameterArgs] = None,
location: Optional[str] = None,
name: Optional[str] = None,
project: Optional[str] = None,
published_model: Optional[str] = None,
summarization_context: Optional[GeneratorSummarizationContextArgs] = None,
trigger_event: Optional[str] = None) -> Generatorfunc GetGenerator(ctx *Context, name string, id IDInput, state *GeneratorState, opts ...ResourceOption) (*Generator, error)public static Generator Get(string name, Input<string> id, GeneratorState? state, CustomResourceOptions? opts = null)public static Generator get(String name, Output<String> id, GeneratorState state, CustomResourceOptions options)resources: _: type: gcp:diagflow:Generator get: id: ${id}- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Description string
- Optional. Human readable description of the generator.
- Generator
Id string - Optional. The ID to use for the generator, which will become the final component of the generator's resource name.
- Inference
Parameter GeneratorInference Parameter - Optional. Inference parameters for this generator. Structure is documented below.
- Location string
- desc
- Name string
- The resource name of the generator.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Published
Model string - Optional. The published Large Language Model name. * To use the latest model version, specify the model name without version number. Example: text-bison * To use a stable model version, specify the version number as well. Example: text-bison@002.
- Summarization
Context GeneratorSummarization Context - Input of prebuilt Summarization feature. Structure is documented below.
- Trigger
Event string - Optional. The trigger event of the generator. It defines when the generator is triggered in a conversation.
Possible values are:
END_OF_UTTERANCE,MANUAL_CALL,CUSTOMER_MESSAGE,AGENT_MESSAGE.
- Description string
- Optional. Human readable description of the generator.
- Generator
Id string - Optional. The ID to use for the generator, which will become the final component of the generator's resource name.
- Inference
Parameter GeneratorInference Parameter Args - Optional. Inference parameters for this generator. Structure is documented below.
- Location string
- desc
- Name string
- The resource name of the generator.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Published
Model string - Optional. The published Large Language Model name. * To use the latest model version, specify the model name without version number. Example: text-bison * To use a stable model version, specify the version number as well. Example: text-bison@002.
- Summarization
Context GeneratorSummarization Context Args - Input of prebuilt Summarization feature. Structure is documented below.
- Trigger
Event string - Optional. The trigger event of the generator. It defines when the generator is triggered in a conversation.
Possible values are:
END_OF_UTTERANCE,MANUAL_CALL,CUSTOMER_MESSAGE,AGENT_MESSAGE.
- description String
- Optional. Human readable description of the generator.
- generator
Id String - Optional. The ID to use for the generator, which will become the final component of the generator's resource name.
- inference
Parameter GeneratorInference Parameter - Optional. Inference parameters for this generator. Structure is documented below.
- location String
- desc
- name String
- The resource name of the generator.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- published
Model String - Optional. The published Large Language Model name. * To use the latest model version, specify the model name without version number. Example: text-bison * To use a stable model version, specify the version number as well. Example: text-bison@002.
- summarization
Context GeneratorSummarization Context - Input of prebuilt Summarization feature. Structure is documented below.
- trigger
Event String - Optional. The trigger event of the generator. It defines when the generator is triggered in a conversation.
Possible values are:
END_OF_UTTERANCE,MANUAL_CALL,CUSTOMER_MESSAGE,AGENT_MESSAGE.
- description string
- Optional. Human readable description of the generator.
- generator
Id string - Optional. The ID to use for the generator, which will become the final component of the generator's resource name.
- inference
Parameter GeneratorInference Parameter - Optional. Inference parameters for this generator. Structure is documented below.
- location string
- desc
- name string
- The resource name of the generator.
- project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- published
Model string - Optional. The published Large Language Model name. * To use the latest model version, specify the model name without version number. Example: text-bison * To use a stable model version, specify the version number as well. Example: text-bison@002.
- summarization
Context GeneratorSummarization Context - Input of prebuilt Summarization feature. Structure is documented below.
- trigger
Event string - Optional. The trigger event of the generator. It defines when the generator is triggered in a conversation.
Possible values are:
END_OF_UTTERANCE,MANUAL_CALL,CUSTOMER_MESSAGE,AGENT_MESSAGE.
- description str
- Optional. Human readable description of the generator.
- generator_
id str - Optional. The ID to use for the generator, which will become the final component of the generator's resource name.
- inference_
parameter GeneratorInference Parameter Args - Optional. Inference parameters for this generator. Structure is documented below.
- location str
- desc
- name str
- The resource name of the generator.
- project str
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- published_
model str - Optional. The published Large Language Model name. * To use the latest model version, specify the model name without version number. Example: text-bison * To use a stable model version, specify the version number as well. Example: text-bison@002.
- summarization_
context GeneratorSummarization Context Args - Input of prebuilt Summarization feature. Structure is documented below.
- trigger_
event str - Optional. The trigger event of the generator. It defines when the generator is triggered in a conversation.
Possible values are:
END_OF_UTTERANCE,MANUAL_CALL,CUSTOMER_MESSAGE,AGENT_MESSAGE.
- description String
- Optional. Human readable description of the generator.
- generator
Id String - Optional. The ID to use for the generator, which will become the final component of the generator's resource name.
- inference
Parameter Property Map - Optional. Inference parameters for this generator. Structure is documented below.
- location String
- desc
- name String
- The resource name of the generator.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- published
Model String - Optional. The published Large Language Model name. * To use the latest model version, specify the model name without version number. Example: text-bison * To use a stable model version, specify the version number as well. Example: text-bison@002.
- summarization
Context Property Map - Input of prebuilt Summarization feature. Structure is documented below.
- trigger
Event String - Optional. The trigger event of the generator. It defines when the generator is triggered in a conversation.
Possible values are:
END_OF_UTTERANCE,MANUAL_CALL,CUSTOMER_MESSAGE,AGENT_MESSAGE.
Supporting Types
GeneratorInferenceParameter, GeneratorInferenceParameterArgs
- Max
Output intTokens - Optional. Maximum number of the output tokens for the generator.
- Temperature double
- Optional. Controls the randomness of LLM predictions. Low temperature = less random. High temperature = more random. If unset (or 0), uses a default value of 0.
- Top
K int - Optional. Top-k changes how the model selects tokens for output. A top-k of 1 means the selected token is the most probable among all tokens in the model's vocabulary (also called greedy decoding), while a top-k of 3 means that the next token is selected from among the 3 most probable tokens (using temperature). For each token selection step, the top K tokens with the highest probabilities are sampled. Then tokens are further filtered based on topP with the final token selected using temperature sampling. Specify a lower value for less random responses and a higher value for more random responses. Acceptable value is [1, 40], default to 40.
- Top
P double - Optional. Top-p changes how the model selects tokens for output. Tokens are selected from most K (see topK parameter) probable to least until the sum of their probabilities equals the top-p value. For example, if tokens A, B, and C have a probability of 0.3, 0.2, and 0.1 and the top-p value is 0.5, then the model will select either A or B as the next token (using temperature) and doesn't consider C. The default top-p value is 0.95. Specify a lower value for less random responses and a higher value for more random responses. Acceptable value is [0.0, 1.0], default to 0.95.
- Max
Output intTokens - Optional. Maximum number of the output tokens for the generator.
- Temperature float64
- Optional. Controls the randomness of LLM predictions. Low temperature = less random. High temperature = more random. If unset (or 0), uses a default value of 0.
- Top
K int - Optional. Top-k changes how the model selects tokens for output. A top-k of 1 means the selected token is the most probable among all tokens in the model's vocabulary (also called greedy decoding), while a top-k of 3 means that the next token is selected from among the 3 most probable tokens (using temperature). For each token selection step, the top K tokens with the highest probabilities are sampled. Then tokens are further filtered based on topP with the final token selected using temperature sampling. Specify a lower value for less random responses and a higher value for more random responses. Acceptable value is [1, 40], default to 40.
- Top
P float64 - Optional. Top-p changes how the model selects tokens for output. Tokens are selected from most K (see topK parameter) probable to least until the sum of their probabilities equals the top-p value. For example, if tokens A, B, and C have a probability of 0.3, 0.2, and 0.1 and the top-p value is 0.5, then the model will select either A or B as the next token (using temperature) and doesn't consider C. The default top-p value is 0.95. Specify a lower value for less random responses and a higher value for more random responses. Acceptable value is [0.0, 1.0], default to 0.95.
- max
Output IntegerTokens - Optional. Maximum number of the output tokens for the generator.
- temperature Double
- Optional. Controls the randomness of LLM predictions. Low temperature = less random. High temperature = more random. If unset (or 0), uses a default value of 0.
- top
K Integer - Optional. Top-k changes how the model selects tokens for output. A top-k of 1 means the selected token is the most probable among all tokens in the model's vocabulary (also called greedy decoding), while a top-k of 3 means that the next token is selected from among the 3 most probable tokens (using temperature). For each token selection step, the top K tokens with the highest probabilities are sampled. Then tokens are further filtered based on topP with the final token selected using temperature sampling. Specify a lower value for less random responses and a higher value for more random responses. Acceptable value is [1, 40], default to 40.
- top
P Double - Optional. Top-p changes how the model selects tokens for output. Tokens are selected from most K (see topK parameter) probable to least until the sum of their probabilities equals the top-p value. For example, if tokens A, B, and C have a probability of 0.3, 0.2, and 0.1 and the top-p value is 0.5, then the model will select either A or B as the next token (using temperature) and doesn't consider C. The default top-p value is 0.95. Specify a lower value for less random responses and a higher value for more random responses. Acceptable value is [0.0, 1.0], default to 0.95.
- max
Output numberTokens - Optional. Maximum number of the output tokens for the generator.
- temperature number
- Optional. Controls the randomness of LLM predictions. Low temperature = less random. High temperature = more random. If unset (or 0), uses a default value of 0.
- top
K number - Optional. Top-k changes how the model selects tokens for output. A top-k of 1 means the selected token is the most probable among all tokens in the model's vocabulary (also called greedy decoding), while a top-k of 3 means that the next token is selected from among the 3 most probable tokens (using temperature). For each token selection step, the top K tokens with the highest probabilities are sampled. Then tokens are further filtered based on topP with the final token selected using temperature sampling. Specify a lower value for less random responses and a higher value for more random responses. Acceptable value is [1, 40], default to 40.
- top
P number - Optional. Top-p changes how the model selects tokens for output. Tokens are selected from most K (see topK parameter) probable to least until the sum of their probabilities equals the top-p value. For example, if tokens A, B, and C have a probability of 0.3, 0.2, and 0.1 and the top-p value is 0.5, then the model will select either A or B as the next token (using temperature) and doesn't consider C. The default top-p value is 0.95. Specify a lower value for less random responses and a higher value for more random responses. Acceptable value is [0.0, 1.0], default to 0.95.
- max_
output_ inttokens - Optional. Maximum number of the output tokens for the generator.
- temperature float
- Optional. Controls the randomness of LLM predictions. Low temperature = less random. High temperature = more random. If unset (or 0), uses a default value of 0.
- top_
k int - Optional. Top-k changes how the model selects tokens for output. A top-k of 1 means the selected token is the most probable among all tokens in the model's vocabulary (also called greedy decoding), while a top-k of 3 means that the next token is selected from among the 3 most probable tokens (using temperature). For each token selection step, the top K tokens with the highest probabilities are sampled. Then tokens are further filtered based on topP with the final token selected using temperature sampling. Specify a lower value for less random responses and a higher value for more random responses. Acceptable value is [1, 40], default to 40.
- top_
p float - Optional. Top-p changes how the model selects tokens for output. Tokens are selected from most K (see topK parameter) probable to least until the sum of their probabilities equals the top-p value. For example, if tokens A, B, and C have a probability of 0.3, 0.2, and 0.1 and the top-p value is 0.5, then the model will select either A or B as the next token (using temperature) and doesn't consider C. The default top-p value is 0.95. Specify a lower value for less random responses and a higher value for more random responses. Acceptable value is [0.0, 1.0], default to 0.95.
- max
Output NumberTokens - Optional. Maximum number of the output tokens for the generator.
- temperature Number
- Optional. Controls the randomness of LLM predictions. Low temperature = less random. High temperature = more random. If unset (or 0), uses a default value of 0.
- top
K Number - Optional. Top-k changes how the model selects tokens for output. A top-k of 1 means the selected token is the most probable among all tokens in the model's vocabulary (also called greedy decoding), while a top-k of 3 means that the next token is selected from among the 3 most probable tokens (using temperature). For each token selection step, the top K tokens with the highest probabilities are sampled. Then tokens are further filtered based on topP with the final token selected using temperature sampling. Specify a lower value for less random responses and a higher value for more random responses. Acceptable value is [1, 40], default to 40.
- top
P Number - Optional. Top-p changes how the model selects tokens for output. Tokens are selected from most K (see topK parameter) probable to least until the sum of their probabilities equals the top-p value. For example, if tokens A, B, and C have a probability of 0.3, 0.2, and 0.1 and the top-p value is 0.5, then the model will select either A or B as the next token (using temperature) and doesn't consider C. The default top-p value is 0.95. Specify a lower value for less random responses and a higher value for more random responses. Acceptable value is [0.0, 1.0], default to 0.95.
GeneratorSummarizationContext, GeneratorSummarizationContextArgs
- Few
Shot List<GeneratorExamples Summarization Context Few Shot Example> - Optional. List of few shot examples. Structure is documented below.
- Output
Language stringCode - Optional. The target language of the generated summary. The language code for conversation will be used if this field is empty. Supported 2.0 and later versions.
- Summarization
Sections List<GeneratorSummarization Context Summarization Section> - Optional. List of sections. Note it contains both predefined section sand customer defined sections. Structure is documented below.
- Version string
- Optional. Version of the feature. If not set, default to latest version. Current candidates are ["1.0"].
- Few
Shot []GeneratorExamples Summarization Context Few Shot Example - Optional. List of few shot examples. Structure is documented below.
- Output
Language stringCode - Optional. The target language of the generated summary. The language code for conversation will be used if this field is empty. Supported 2.0 and later versions.
- Summarization
Sections []GeneratorSummarization Context Summarization Section - Optional. List of sections. Note it contains both predefined section sand customer defined sections. Structure is documented below.
- Version string
- Optional. Version of the feature. If not set, default to latest version. Current candidates are ["1.0"].
- few
Shot List<GeneratorExamples Summarization Context Few Shot Example> - Optional. List of few shot examples. Structure is documented below.
- output
Language StringCode - Optional. The target language of the generated summary. The language code for conversation will be used if this field is empty. Supported 2.0 and later versions.
- summarization
Sections List<GeneratorSummarization Context Summarization Section> - Optional. List of sections. Note it contains both predefined section sand customer defined sections. Structure is documented below.
- version String
- Optional. Version of the feature. If not set, default to latest version. Current candidates are ["1.0"].
- few
Shot GeneratorExamples Summarization Context Few Shot Example[] - Optional. List of few shot examples. Structure is documented below.
- output
Language stringCode - Optional. The target language of the generated summary. The language code for conversation will be used if this field is empty. Supported 2.0 and later versions.
- summarization
Sections GeneratorSummarization Context Summarization Section[] - Optional. List of sections. Note it contains both predefined section sand customer defined sections. Structure is documented below.
- version string
- Optional. Version of the feature. If not set, default to latest version. Current candidates are ["1.0"].
- few_
shot_ Sequence[Generatorexamples Summarization Context Few Shot Example] - Optional. List of few shot examples. Structure is documented below.
- output_
language_ strcode - Optional. The target language of the generated summary. The language code for conversation will be used if this field is empty. Supported 2.0 and later versions.
- summarization_
sections Sequence[GeneratorSummarization Context Summarization Section] - Optional. List of sections. Note it contains both predefined section sand customer defined sections. Structure is documented below.
- version str
- Optional. Version of the feature. If not set, default to latest version. Current candidates are ["1.0"].
- few
Shot List<Property Map>Examples - Optional. List of few shot examples. Structure is documented below.
- output
Language StringCode - Optional. The target language of the generated summary. The language code for conversation will be used if this field is empty. Supported 2.0 and later versions.
- summarization
Sections List<Property Map> - Optional. List of sections. Note it contains both predefined section sand customer defined sections. Structure is documented below.
- version String
- Optional. Version of the feature. If not set, default to latest version. Current candidates are ["1.0"].
GeneratorSummarizationContextFewShotExample, GeneratorSummarizationContextFewShotExampleArgs
- Output
Generator
Summarization Context Few Shot Example Output - Required. Example output of the model. Structure is documented below.
- Conversation
Context GeneratorSummarization Context Few Shot Example Conversation Context - Optional. Conversation transcripts. Structure is documented below.
- Extra
Info Dictionary<string, string> - Optional. Key is the placeholder field name in input, value is the value of the placeholder. E.g. instruction contains "@price", and ingested data has <"price", "10">
- Summarization
Section GeneratorList Summarization Context Few Shot Example Summarization Section List - Summarization sections. Structure is documented below.
- Output
Generator
Summarization Context Few Shot Example Output Type - Required. Example output of the model. Structure is documented below.
- Conversation
Context GeneratorSummarization Context Few Shot Example Conversation Context - Optional. Conversation transcripts. Structure is documented below.
- Extra
Info map[string]string - Optional. Key is the placeholder field name in input, value is the value of the placeholder. E.g. instruction contains "@price", and ingested data has <"price", "10">
- Summarization
Section GeneratorList Summarization Context Few Shot Example Summarization Section List - Summarization sections. Structure is documented below.
- output
Generator
Summarization Context Few Shot Example Output - Required. Example output of the model. Structure is documented below.
- conversation
Context GeneratorSummarization Context Few Shot Example Conversation Context - Optional. Conversation transcripts. Structure is documented below.
- extra
Info Map<String,String> - Optional. Key is the placeholder field name in input, value is the value of the placeholder. E.g. instruction contains "@price", and ingested data has <"price", "10">
- summarization
Section GeneratorList Summarization Context Few Shot Example Summarization Section List - Summarization sections. Structure is documented below.
- output
Generator
Summarization Context Few Shot Example Output - Required. Example output of the model. Structure is documented below.
- conversation
Context GeneratorSummarization Context Few Shot Example Conversation Context - Optional. Conversation transcripts. Structure is documented below.
- extra
Info {[key: string]: string} - Optional. Key is the placeholder field name in input, value is the value of the placeholder. E.g. instruction contains "@price", and ingested data has <"price", "10">
- summarization
Section GeneratorList Summarization Context Few Shot Example Summarization Section List - Summarization sections. Structure is documented below.
- output
Generator
Summarization Context Few Shot Example Output - Required. Example output of the model. Structure is documented below.
- conversation_
context GeneratorSummarization Context Few Shot Example Conversation Context - Optional. Conversation transcripts. Structure is documented below.
- extra_
info Mapping[str, str] - Optional. Key is the placeholder field name in input, value is the value of the placeholder. E.g. instruction contains "@price", and ingested data has <"price", "10">
- summarization_
section_ Generatorlist Summarization Context Few Shot Example Summarization Section List - Summarization sections. Structure is documented below.
- output Property Map
- Required. Example output of the model. Structure is documented below.
- conversation
Context Property Map - Optional. Conversation transcripts. Structure is documented below.
- extra
Info Map<String> - Optional. Key is the placeholder field name in input, value is the value of the placeholder. E.g. instruction contains "@price", and ingested data has <"price", "10">
- summarization
Section Property MapList - Summarization sections. Structure is documented below.
GeneratorSummarizationContextFewShotExampleConversationContext, GeneratorSummarizationContextFewShotExampleConversationContextArgs
- Message
Entries List<GeneratorSummarization Context Few Shot Example Conversation Context Message Entry> - Optional. List of message transcripts in the conversation. Structure is documented below.
- Message
Entries []GeneratorSummarization Context Few Shot Example Conversation Context Message Entry - Optional. List of message transcripts in the conversation. Structure is documented below.
- message
Entries List<GeneratorSummarization Context Few Shot Example Conversation Context Message Entry> - Optional. List of message transcripts in the conversation. Structure is documented below.
- message
Entries GeneratorSummarization Context Few Shot Example Conversation Context Message Entry[] - Optional. List of message transcripts in the conversation. Structure is documented below.
- message_
entries Sequence[GeneratorSummarization Context Few Shot Example Conversation Context Message Entry] - Optional. List of message transcripts in the conversation. Structure is documented below.
- message
Entries List<Property Map> - Optional. List of message transcripts in the conversation. Structure is documented below.
GeneratorSummarizationContextFewShotExampleConversationContextMessageEntry, GeneratorSummarizationContextFewShotExampleConversationContextMessageEntryArgs
- Create
Time string - Optional. Create time of the message entry.
- Language
Code string - Optional. The language of the text.
- Role string
- Optional. Participant role of the message.
Possible values are:
HUMAN_AGENT,AUTOMATED_AGENT,END_USER. - Text string
- Optional. Transcript content of the message.
- Create
Time string - Optional. Create time of the message entry.
- Language
Code string - Optional. The language of the text.
- Role string
- Optional. Participant role of the message.
Possible values are:
HUMAN_AGENT,AUTOMATED_AGENT,END_USER. - Text string
- Optional. Transcript content of the message.
- create
Time String - Optional. Create time of the message entry.
- language
Code String - Optional. The language of the text.
- role String
- Optional. Participant role of the message.
Possible values are:
HUMAN_AGENT,AUTOMATED_AGENT,END_USER. - text String
- Optional. Transcript content of the message.
- create
Time string - Optional. Create time of the message entry.
- language
Code string - Optional. The language of the text.
- role string
- Optional. Participant role of the message.
Possible values are:
HUMAN_AGENT,AUTOMATED_AGENT,END_USER. - text string
- Optional. Transcript content of the message.
- create_
time str - Optional. Create time of the message entry.
- language_
code str - Optional. The language of the text.
- role str
- Optional. Participant role of the message.
Possible values are:
HUMAN_AGENT,AUTOMATED_AGENT,END_USER. - text str
- Optional. Transcript content of the message.
- create
Time String - Optional. Create time of the message entry.
- language
Code String - Optional. The language of the text.
- role String
- Optional. Participant role of the message.
Possible values are:
HUMAN_AGENT,AUTOMATED_AGENT,END_USER. - text String
- Optional. Transcript content of the message.
GeneratorSummarizationContextFewShotExampleOutput, GeneratorSummarizationContextFewShotExampleOutputArgs
- Summary
Suggestion GeneratorSummarization Context Few Shot Example Output Summary Suggestion - Optional. Suggested summary. Structure is documented below.
- Summary
Suggestion GeneratorSummarization Context Few Shot Example Output Summary Suggestion - Optional. Suggested summary. Structure is documented below.
- summary
Suggestion GeneratorSummarization Context Few Shot Example Output Summary Suggestion - Optional. Suggested summary. Structure is documented below.
- summary
Suggestion GeneratorSummarization Context Few Shot Example Output Summary Suggestion - Optional. Suggested summary. Structure is documented below.
- summary_
suggestion GeneratorSummarization Context Few Shot Example Output Summary Suggestion - Optional. Suggested summary. Structure is documented below.
- summary
Suggestion Property Map - Optional. Suggested summary. Structure is documented below.
GeneratorSummarizationContextFewShotExampleOutputSummarySuggestion, GeneratorSummarizationContextFewShotExampleOutputSummarySuggestionArgs
- Summary
Sections List<GeneratorSummarization Context Few Shot Example Output Summary Suggestion Summary Section> - Required. All the parts of generated summary. Structure is documented below.
- Summary
Sections []GeneratorSummarization Context Few Shot Example Output Summary Suggestion Summary Section - Required. All the parts of generated summary. Structure is documented below.
- summary
Sections List<GeneratorSummarization Context Few Shot Example Output Summary Suggestion Summary Section> - Required. All the parts of generated summary. Structure is documented below.
- summary
Sections GeneratorSummarization Context Few Shot Example Output Summary Suggestion Summary Section[] - Required. All the parts of generated summary. Structure is documented below.
- summary_
sections Sequence[GeneratorSummarization Context Few Shot Example Output Summary Suggestion Summary Section] - Required. All the parts of generated summary. Structure is documented below.
- summary
Sections List<Property Map> - Required. All the parts of generated summary. Structure is documented below.
GeneratorSummarizationContextFewShotExampleOutputSummarySuggestionSummarySection, GeneratorSummarizationContextFewShotExampleOutputSummarySuggestionSummarySectionArgs
GeneratorSummarizationContextFewShotExampleSummarizationSectionList, GeneratorSummarizationContextFewShotExampleSummarizationSectionListArgs
- Summarization
Sections List<GeneratorSummarization Context Few Shot Example Summarization Section List Summarization Section> - Optional. Summarization sections. Structure is documented below.
- Summarization
Sections []GeneratorSummarization Context Few Shot Example Summarization Section List Summarization Section - Optional. Summarization sections. Structure is documented below.
- summarization
Sections List<GeneratorSummarization Context Few Shot Example Summarization Section List Summarization Section> - Optional. Summarization sections. Structure is documented below.
- summarization
Sections GeneratorSummarization Context Few Shot Example Summarization Section List Summarization Section[] - Optional. Summarization sections. Structure is documented below.
- summarization_
sections Sequence[GeneratorSummarization Context Few Shot Example Summarization Section List Summarization Section] - Optional. Summarization sections. Structure is documented below.
- summarization
Sections List<Property Map> - Optional. Summarization sections. Structure is documented below.
GeneratorSummarizationContextFewShotExampleSummarizationSectionListSummarizationSection, GeneratorSummarizationContextFewShotExampleSummarizationSectionListSummarizationSectionArgs
- Definition string
- Optional. Definition of the section, for example, "what the customer needs help with or has question about."
- Key string
- Optional. Name of the section, for example, "situation".
- Type string
- Optional. Type of the summarization section.
Possible values are:
SITUATION,ACTION,RESOLUTION,REASON_FOR_CANCELLATION,CUSTOMER_SATISFACTION,ENTITIES,CUSTOMER_DEFINED,SITUATION_CONCISE,ACTION_CONCISE.
- Definition string
- Optional. Definition of the section, for example, "what the customer needs help with or has question about."
- Key string
- Optional. Name of the section, for example, "situation".
- Type string
- Optional. Type of the summarization section.
Possible values are:
SITUATION,ACTION,RESOLUTION,REASON_FOR_CANCELLATION,CUSTOMER_SATISFACTION,ENTITIES,CUSTOMER_DEFINED,SITUATION_CONCISE,ACTION_CONCISE.
- definition String
- Optional. Definition of the section, for example, "what the customer needs help with or has question about."
- key String
- Optional. Name of the section, for example, "situation".
- type String
- Optional. Type of the summarization section.
Possible values are:
SITUATION,ACTION,RESOLUTION,REASON_FOR_CANCELLATION,CUSTOMER_SATISFACTION,ENTITIES,CUSTOMER_DEFINED,SITUATION_CONCISE,ACTION_CONCISE.
- definition string
- Optional. Definition of the section, for example, "what the customer needs help with or has question about."
- key string
- Optional. Name of the section, for example, "situation".
- type string
- Optional. Type of the summarization section.
Possible values are:
SITUATION,ACTION,RESOLUTION,REASON_FOR_CANCELLATION,CUSTOMER_SATISFACTION,ENTITIES,CUSTOMER_DEFINED,SITUATION_CONCISE,ACTION_CONCISE.
- definition str
- Optional. Definition of the section, for example, "what the customer needs help with or has question about."
- key str
- Optional. Name of the section, for example, "situation".
- type str
- Optional. Type of the summarization section.
Possible values are:
SITUATION,ACTION,RESOLUTION,REASON_FOR_CANCELLATION,CUSTOMER_SATISFACTION,ENTITIES,CUSTOMER_DEFINED,SITUATION_CONCISE,ACTION_CONCISE.
- definition String
- Optional. Definition of the section, for example, "what the customer needs help with or has question about."
- key String
- Optional. Name of the section, for example, "situation".
- type String
- Optional. Type of the summarization section.
Possible values are:
SITUATION,ACTION,RESOLUTION,REASON_FOR_CANCELLATION,CUSTOMER_SATISFACTION,ENTITIES,CUSTOMER_DEFINED,SITUATION_CONCISE,ACTION_CONCISE.
GeneratorSummarizationContextSummarizationSection, GeneratorSummarizationContextSummarizationSectionArgs
- Definition string
- Optional. Definition of the section, for example, "what the customer needs help with or has question about."
- Key string
- Optional. Name of the section, for example, "situation".
- Type string
- Optional. Type of the summarization section.
Possible values are:
SITUATION,ACTION,RESOLUTION,REASON_FOR_CANCELLATION,CUSTOMER_SATISFACTION,ENTITIES,CUSTOMER_DEFINED,SITUATION_CONCISE,ACTION_CONCISE.
- Definition string
- Optional. Definition of the section, for example, "what the customer needs help with or has question about."
- Key string
- Optional. Name of the section, for example, "situation".
- Type string
- Optional. Type of the summarization section.
Possible values are:
SITUATION,ACTION,RESOLUTION,REASON_FOR_CANCELLATION,CUSTOMER_SATISFACTION,ENTITIES,CUSTOMER_DEFINED,SITUATION_CONCISE,ACTION_CONCISE.
- definition String
- Optional. Definition of the section, for example, "what the customer needs help with or has question about."
- key String
- Optional. Name of the section, for example, "situation".
- type String
- Optional. Type of the summarization section.
Possible values are:
SITUATION,ACTION,RESOLUTION,REASON_FOR_CANCELLATION,CUSTOMER_SATISFACTION,ENTITIES,CUSTOMER_DEFINED,SITUATION_CONCISE,ACTION_CONCISE.
- definition string
- Optional. Definition of the section, for example, "what the customer needs help with or has question about."
- key string
- Optional. Name of the section, for example, "situation".
- type string
- Optional. Type of the summarization section.
Possible values are:
SITUATION,ACTION,RESOLUTION,REASON_FOR_CANCELLATION,CUSTOMER_SATISFACTION,ENTITIES,CUSTOMER_DEFINED,SITUATION_CONCISE,ACTION_CONCISE.
- definition str
- Optional. Definition of the section, for example, "what the customer needs help with or has question about."
- key str
- Optional. Name of the section, for example, "situation".
- type str
- Optional. Type of the summarization section.
Possible values are:
SITUATION,ACTION,RESOLUTION,REASON_FOR_CANCELLATION,CUSTOMER_SATISFACTION,ENTITIES,CUSTOMER_DEFINED,SITUATION_CONCISE,ACTION_CONCISE.
- definition String
- Optional. Definition of the section, for example, "what the customer needs help with or has question about."
- key String
- Optional. Name of the section, for example, "situation".
- type String
- Optional. Type of the summarization section.
Possible values are:
SITUATION,ACTION,RESOLUTION,REASON_FOR_CANCELLATION,CUSTOMER_SATISFACTION,ENTITIES,CUSTOMER_DEFINED,SITUATION_CONCISE,ACTION_CONCISE.
Import
Generator can be imported using any of these accepted formats:
projects/{{project}}/locations/{{location}}/generators/{{name}}{{project}}/{{location}}/{{name}}{{location}}/{{name}}
When using the pulumi import command, Generator can be imported using one of the formats above. For example:
$ pulumi import gcp:diagflow/generator:Generator default projects/{{project}}/locations/{{location}}/generators/{{name}}
$ pulumi import gcp:diagflow/generator:Generator default {{project}}/{{location}}/{{name}}
$ pulumi import gcp:diagflow/generator:Generator default {{location}}/{{name}}
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Google Cloud (GCP) Classic pulumi/pulumi-gcp
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
google-betaTerraform Provider.
