1. Packages
  2. Google Cloud (GCP) Classic
  3. API Docs
  4. diagflow
  5. Generator
Google Cloud v9.7.0 published on Wednesday, Dec 24, 2025 by Pulumi
gcp logo
Google Cloud v9.7.0 published on Wednesday, Dec 24, 2025 by Pulumi

    LLM generator.

    To get more information about Generator, see:

    Example Usage

    Dialogflow Generator Basic

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const summarizationBasicGenerator = new gcp.diagflow.Generator("summarization_basic_generator", {
        location: "global",
        description: "A v4.0 summarization generator.",
        inferenceParameter: {
            maxOutputTokens: 1024,
            temperature: 0,
            topK: 40,
            topP: 0.95,
        },
        summarizationContext: {
            version: "4.0",
            outputLanguageCode: "en",
        },
        triggerEvent: "MANUAL_CALL",
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    summarization_basic_generator = gcp.diagflow.Generator("summarization_basic_generator",
        location="global",
        description="A v4.0 summarization generator.",
        inference_parameter={
            "max_output_tokens": 1024,
            "temperature": 0,
            "top_k": 40,
            "top_p": 0.95,
        },
        summarization_context={
            "version": "4.0",
            "output_language_code": "en",
        },
        trigger_event="MANUAL_CALL")
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v9/go/gcp/diagflow"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := diagflow.NewGenerator(ctx, "summarization_basic_generator", &diagflow.GeneratorArgs{
    			Location:    pulumi.String("global"),
    			Description: pulumi.String("A v4.0 summarization generator."),
    			InferenceParameter: &diagflow.GeneratorInferenceParameterArgs{
    				MaxOutputTokens: pulumi.Int(1024),
    				Temperature:     pulumi.Float64(0),
    				TopK:            pulumi.Int(40),
    				TopP:            pulumi.Float64(0.95),
    			},
    			SummarizationContext: &diagflow.GeneratorSummarizationContextArgs{
    				Version:            pulumi.String("4.0"),
    				OutputLanguageCode: pulumi.String("en"),
    			},
    			TriggerEvent: pulumi.String("MANUAL_CALL"),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var summarizationBasicGenerator = new Gcp.Diagflow.Generator("summarization_basic_generator", new()
        {
            Location = "global",
            Description = "A v4.0 summarization generator.",
            InferenceParameter = new Gcp.Diagflow.Inputs.GeneratorInferenceParameterArgs
            {
                MaxOutputTokens = 1024,
                Temperature = 0,
                TopK = 40,
                TopP = 0.95,
            },
            SummarizationContext = new Gcp.Diagflow.Inputs.GeneratorSummarizationContextArgs
            {
                Version = "4.0",
                OutputLanguageCode = "en",
            },
            TriggerEvent = "MANUAL_CALL",
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.diagflow.Generator;
    import com.pulumi.gcp.diagflow.GeneratorArgs;
    import com.pulumi.gcp.diagflow.inputs.GeneratorInferenceParameterArgs;
    import com.pulumi.gcp.diagflow.inputs.GeneratorSummarizationContextArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var summarizationBasicGenerator = new Generator("summarizationBasicGenerator", GeneratorArgs.builder()
                .location("global")
                .description("A v4.0 summarization generator.")
                .inferenceParameter(GeneratorInferenceParameterArgs.builder()
                    .maxOutputTokens(1024)
                    .temperature(0.0)
                    .topK(40)
                    .topP(0.95)
                    .build())
                .summarizationContext(GeneratorSummarizationContextArgs.builder()
                    .version("4.0")
                    .outputLanguageCode("en")
                    .build())
                .triggerEvent("MANUAL_CALL")
                .build());
    
        }
    }
    
    resources:
      summarizationBasicGenerator:
        type: gcp:diagflow:Generator
        name: summarization_basic_generator
        properties:
          location: global
          description: A v4.0 summarization generator.
          inferenceParameter:
            maxOutputTokens: 1024
            temperature: 0
            topK: 40
            topP: 0.95
          summarizationContext:
            version: '4.0'
            outputLanguageCode: en
          triggerEvent: MANUAL_CALL
    

    Create Generator Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new Generator(name: string, args: GeneratorArgs, opts?: CustomResourceOptions);
    @overload
    def Generator(resource_name: str,
                  args: GeneratorArgs,
                  opts: Optional[ResourceOptions] = None)
    
    @overload
    def Generator(resource_name: str,
                  opts: Optional[ResourceOptions] = None,
                  location: Optional[str] = None,
                  summarization_context: Optional[GeneratorSummarizationContextArgs] = None,
                  description: Optional[str] = None,
                  generator_id: Optional[str] = None,
                  inference_parameter: Optional[GeneratorInferenceParameterArgs] = None,
                  project: Optional[str] = None,
                  published_model: Optional[str] = None,
                  trigger_event: Optional[str] = None)
    func NewGenerator(ctx *Context, name string, args GeneratorArgs, opts ...ResourceOption) (*Generator, error)
    public Generator(string name, GeneratorArgs args, CustomResourceOptions? opts = null)
    public Generator(String name, GeneratorArgs args)
    public Generator(String name, GeneratorArgs args, CustomResourceOptions options)
    
    type: gcp:diagflow:Generator
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args GeneratorArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args GeneratorArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args GeneratorArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args GeneratorArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args GeneratorArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var generatorResource = new Gcp.Diagflow.Generator("generatorResource", new()
    {
        Location = "string",
        SummarizationContext = new Gcp.Diagflow.Inputs.GeneratorSummarizationContextArgs
        {
            FewShotExamples = new[]
            {
                new Gcp.Diagflow.Inputs.GeneratorSummarizationContextFewShotExampleArgs
                {
                    Output = new Gcp.Diagflow.Inputs.GeneratorSummarizationContextFewShotExampleOutputArgs
                    {
                        SummarySuggestion = new Gcp.Diagflow.Inputs.GeneratorSummarizationContextFewShotExampleOutputSummarySuggestionArgs
                        {
                            SummarySections = new[]
                            {
                                new Gcp.Diagflow.Inputs.GeneratorSummarizationContextFewShotExampleOutputSummarySuggestionSummarySectionArgs
                                {
                                    Section = "string",
                                    Summary = "string",
                                },
                            },
                        },
                    },
                    ConversationContext = new Gcp.Diagflow.Inputs.GeneratorSummarizationContextFewShotExampleConversationContextArgs
                    {
                        MessageEntries = new[]
                        {
                            new Gcp.Diagflow.Inputs.GeneratorSummarizationContextFewShotExampleConversationContextMessageEntryArgs
                            {
                                CreateTime = "string",
                                LanguageCode = "string",
                                Role = "string",
                                Text = "string",
                            },
                        },
                    },
                    ExtraInfo = 
                    {
                        { "string", "string" },
                    },
                    SummarizationSectionList = new Gcp.Diagflow.Inputs.GeneratorSummarizationContextFewShotExampleSummarizationSectionListArgs
                    {
                        SummarizationSections = new[]
                        {
                            new Gcp.Diagflow.Inputs.GeneratorSummarizationContextFewShotExampleSummarizationSectionListSummarizationSectionArgs
                            {
                                Definition = "string",
                                Key = "string",
                                Type = "string",
                            },
                        },
                    },
                },
            },
            OutputLanguageCode = "string",
            SummarizationSections = new[]
            {
                new Gcp.Diagflow.Inputs.GeneratorSummarizationContextSummarizationSectionArgs
                {
                    Definition = "string",
                    Key = "string",
                    Type = "string",
                },
            },
            Version = "string",
        },
        Description = "string",
        GeneratorId = "string",
        InferenceParameter = new Gcp.Diagflow.Inputs.GeneratorInferenceParameterArgs
        {
            MaxOutputTokens = 0,
            Temperature = 0,
            TopK = 0,
            TopP = 0,
        },
        Project = "string",
        PublishedModel = "string",
        TriggerEvent = "string",
    });
    
    example, err := diagflow.NewGenerator(ctx, "generatorResource", &diagflow.GeneratorArgs{
    	Location: pulumi.String("string"),
    	SummarizationContext: &diagflow.GeneratorSummarizationContextArgs{
    		FewShotExamples: diagflow.GeneratorSummarizationContextFewShotExampleArray{
    			&diagflow.GeneratorSummarizationContextFewShotExampleArgs{
    				Output: &diagflow.GeneratorSummarizationContextFewShotExampleOutputTypeArgs{
    					SummarySuggestion: &diagflow.GeneratorSummarizationContextFewShotExampleOutputSummarySuggestionArgs{
    						SummarySections: diagflow.GeneratorSummarizationContextFewShotExampleOutputSummarySuggestionSummarySectionArray{
    							&diagflow.GeneratorSummarizationContextFewShotExampleOutputSummarySuggestionSummarySectionArgs{
    								Section: pulumi.String("string"),
    								Summary: pulumi.String("string"),
    							},
    						},
    					},
    				},
    				ConversationContext: &diagflow.GeneratorSummarizationContextFewShotExampleConversationContextArgs{
    					MessageEntries: diagflow.GeneratorSummarizationContextFewShotExampleConversationContextMessageEntryArray{
    						&diagflow.GeneratorSummarizationContextFewShotExampleConversationContextMessageEntryArgs{
    							CreateTime:   pulumi.String("string"),
    							LanguageCode: pulumi.String("string"),
    							Role:         pulumi.String("string"),
    							Text:         pulumi.String("string"),
    						},
    					},
    				},
    				ExtraInfo: pulumi.StringMap{
    					"string": pulumi.String("string"),
    				},
    				SummarizationSectionList: &diagflow.GeneratorSummarizationContextFewShotExampleSummarizationSectionListArgs{
    					SummarizationSections: diagflow.GeneratorSummarizationContextFewShotExampleSummarizationSectionListSummarizationSectionArray{
    						&diagflow.GeneratorSummarizationContextFewShotExampleSummarizationSectionListSummarizationSectionArgs{
    							Definition: pulumi.String("string"),
    							Key:        pulumi.String("string"),
    							Type:       pulumi.String("string"),
    						},
    					},
    				},
    			},
    		},
    		OutputLanguageCode: pulumi.String("string"),
    		SummarizationSections: diagflow.GeneratorSummarizationContextSummarizationSectionArray{
    			&diagflow.GeneratorSummarizationContextSummarizationSectionArgs{
    				Definition: pulumi.String("string"),
    				Key:        pulumi.String("string"),
    				Type:       pulumi.String("string"),
    			},
    		},
    		Version: pulumi.String("string"),
    	},
    	Description: pulumi.String("string"),
    	GeneratorId: pulumi.String("string"),
    	InferenceParameter: &diagflow.GeneratorInferenceParameterArgs{
    		MaxOutputTokens: pulumi.Int(0),
    		Temperature:     pulumi.Float64(0),
    		TopK:            pulumi.Int(0),
    		TopP:            pulumi.Float64(0),
    	},
    	Project:        pulumi.String("string"),
    	PublishedModel: pulumi.String("string"),
    	TriggerEvent:   pulumi.String("string"),
    })
    
    var generatorResource = new Generator("generatorResource", GeneratorArgs.builder()
        .location("string")
        .summarizationContext(GeneratorSummarizationContextArgs.builder()
            .fewShotExamples(GeneratorSummarizationContextFewShotExampleArgs.builder()
                .output(GeneratorSummarizationContextFewShotExampleOutputArgs.builder()
                    .summarySuggestion(GeneratorSummarizationContextFewShotExampleOutputSummarySuggestionArgs.builder()
                        .summarySections(GeneratorSummarizationContextFewShotExampleOutputSummarySuggestionSummarySectionArgs.builder()
                            .section("string")
                            .summary("string")
                            .build())
                        .build())
                    .build())
                .conversationContext(GeneratorSummarizationContextFewShotExampleConversationContextArgs.builder()
                    .messageEntries(GeneratorSummarizationContextFewShotExampleConversationContextMessageEntryArgs.builder()
                        .createTime("string")
                        .languageCode("string")
                        .role("string")
                        .text("string")
                        .build())
                    .build())
                .extraInfo(Map.of("string", "string"))
                .summarizationSectionList(GeneratorSummarizationContextFewShotExampleSummarizationSectionListArgs.builder()
                    .summarizationSections(GeneratorSummarizationContextFewShotExampleSummarizationSectionListSummarizationSectionArgs.builder()
                        .definition("string")
                        .key("string")
                        .type("string")
                        .build())
                    .build())
                .build())
            .outputLanguageCode("string")
            .summarizationSections(GeneratorSummarizationContextSummarizationSectionArgs.builder()
                .definition("string")
                .key("string")
                .type("string")
                .build())
            .version("string")
            .build())
        .description("string")
        .generatorId("string")
        .inferenceParameter(GeneratorInferenceParameterArgs.builder()
            .maxOutputTokens(0)
            .temperature(0.0)
            .topK(0)
            .topP(0.0)
            .build())
        .project("string")
        .publishedModel("string")
        .triggerEvent("string")
        .build());
    
    generator_resource = gcp.diagflow.Generator("generatorResource",
        location="string",
        summarization_context={
            "few_shot_examples": [{
                "output": {
                    "summary_suggestion": {
                        "summary_sections": [{
                            "section": "string",
                            "summary": "string",
                        }],
                    },
                },
                "conversation_context": {
                    "message_entries": [{
                        "create_time": "string",
                        "language_code": "string",
                        "role": "string",
                        "text": "string",
                    }],
                },
                "extra_info": {
                    "string": "string",
                },
                "summarization_section_list": {
                    "summarization_sections": [{
                        "definition": "string",
                        "key": "string",
                        "type": "string",
                    }],
                },
            }],
            "output_language_code": "string",
            "summarization_sections": [{
                "definition": "string",
                "key": "string",
                "type": "string",
            }],
            "version": "string",
        },
        description="string",
        generator_id="string",
        inference_parameter={
            "max_output_tokens": 0,
            "temperature": 0,
            "top_k": 0,
            "top_p": 0,
        },
        project="string",
        published_model="string",
        trigger_event="string")
    
    const generatorResource = new gcp.diagflow.Generator("generatorResource", {
        location: "string",
        summarizationContext: {
            fewShotExamples: [{
                output: {
                    summarySuggestion: {
                        summarySections: [{
                            section: "string",
                            summary: "string",
                        }],
                    },
                },
                conversationContext: {
                    messageEntries: [{
                        createTime: "string",
                        languageCode: "string",
                        role: "string",
                        text: "string",
                    }],
                },
                extraInfo: {
                    string: "string",
                },
                summarizationSectionList: {
                    summarizationSections: [{
                        definition: "string",
                        key: "string",
                        type: "string",
                    }],
                },
            }],
            outputLanguageCode: "string",
            summarizationSections: [{
                definition: "string",
                key: "string",
                type: "string",
            }],
            version: "string",
        },
        description: "string",
        generatorId: "string",
        inferenceParameter: {
            maxOutputTokens: 0,
            temperature: 0,
            topK: 0,
            topP: 0,
        },
        project: "string",
        publishedModel: "string",
        triggerEvent: "string",
    });
    
    type: gcp:diagflow:Generator
    properties:
        description: string
        generatorId: string
        inferenceParameter:
            maxOutputTokens: 0
            temperature: 0
            topK: 0
            topP: 0
        location: string
        project: string
        publishedModel: string
        summarizationContext:
            fewShotExamples:
                - conversationContext:
                    messageEntries:
                        - createTime: string
                          languageCode: string
                          role: string
                          text: string
                  extraInfo:
                    string: string
                  output:
                    summarySuggestion:
                        summarySections:
                            - section: string
                              summary: string
                  summarizationSectionList:
                    summarizationSections:
                        - definition: string
                          key: string
                          type: string
            outputLanguageCode: string
            summarizationSections:
                - definition: string
                  key: string
                  type: string
            version: string
        triggerEvent: string
    

    Generator Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

    The Generator resource accepts the following input properties:

    Location string
    desc
    SummarizationContext GeneratorSummarizationContext
    Input of prebuilt Summarization feature. Structure is documented below.
    Description string
    Optional. Human readable description of the generator.
    GeneratorId string
    Optional. The ID to use for the generator, which will become the final component of the generator's resource name.
    InferenceParameter GeneratorInferenceParameter
    Optional. Inference parameters for this generator. Structure is documented below.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    PublishedModel string
    Optional. The published Large Language Model name. * To use the latest model version, specify the model name without version number. Example: text-bison * To use a stable model version, specify the version number as well. Example: text-bison@002.
    TriggerEvent string
    Optional. The trigger event of the generator. It defines when the generator is triggered in a conversation. Possible values are: END_OF_UTTERANCE, MANUAL_CALL, CUSTOMER_MESSAGE, AGENT_MESSAGE.
    Location string
    desc
    SummarizationContext GeneratorSummarizationContextArgs
    Input of prebuilt Summarization feature. Structure is documented below.
    Description string
    Optional. Human readable description of the generator.
    GeneratorId string
    Optional. The ID to use for the generator, which will become the final component of the generator's resource name.
    InferenceParameter GeneratorInferenceParameterArgs
    Optional. Inference parameters for this generator. Structure is documented below.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    PublishedModel string
    Optional. The published Large Language Model name. * To use the latest model version, specify the model name without version number. Example: text-bison * To use a stable model version, specify the version number as well. Example: text-bison@002.
    TriggerEvent string
    Optional. The trigger event of the generator. It defines when the generator is triggered in a conversation. Possible values are: END_OF_UTTERANCE, MANUAL_CALL, CUSTOMER_MESSAGE, AGENT_MESSAGE.
    location String
    desc
    summarizationContext GeneratorSummarizationContext
    Input of prebuilt Summarization feature. Structure is documented below.
    description String
    Optional. Human readable description of the generator.
    generatorId String
    Optional. The ID to use for the generator, which will become the final component of the generator's resource name.
    inferenceParameter GeneratorInferenceParameter
    Optional. Inference parameters for this generator. Structure is documented below.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    publishedModel String
    Optional. The published Large Language Model name. * To use the latest model version, specify the model name without version number. Example: text-bison * To use a stable model version, specify the version number as well. Example: text-bison@002.
    triggerEvent String
    Optional. The trigger event of the generator. It defines when the generator is triggered in a conversation. Possible values are: END_OF_UTTERANCE, MANUAL_CALL, CUSTOMER_MESSAGE, AGENT_MESSAGE.
    location string
    desc
    summarizationContext GeneratorSummarizationContext
    Input of prebuilt Summarization feature. Structure is documented below.
    description string
    Optional. Human readable description of the generator.
    generatorId string
    Optional. The ID to use for the generator, which will become the final component of the generator's resource name.
    inferenceParameter GeneratorInferenceParameter
    Optional. Inference parameters for this generator. Structure is documented below.
    project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    publishedModel string
    Optional. The published Large Language Model name. * To use the latest model version, specify the model name without version number. Example: text-bison * To use a stable model version, specify the version number as well. Example: text-bison@002.
    triggerEvent string
    Optional. The trigger event of the generator. It defines when the generator is triggered in a conversation. Possible values are: END_OF_UTTERANCE, MANUAL_CALL, CUSTOMER_MESSAGE, AGENT_MESSAGE.
    location str
    desc
    summarization_context GeneratorSummarizationContextArgs
    Input of prebuilt Summarization feature. Structure is documented below.
    description str
    Optional. Human readable description of the generator.
    generator_id str
    Optional. The ID to use for the generator, which will become the final component of the generator's resource name.
    inference_parameter GeneratorInferenceParameterArgs
    Optional. Inference parameters for this generator. Structure is documented below.
    project str
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    published_model str
    Optional. The published Large Language Model name. * To use the latest model version, specify the model name without version number. Example: text-bison * To use a stable model version, specify the version number as well. Example: text-bison@002.
    trigger_event str
    Optional. The trigger event of the generator. It defines when the generator is triggered in a conversation. Possible values are: END_OF_UTTERANCE, MANUAL_CALL, CUSTOMER_MESSAGE, AGENT_MESSAGE.
    location String
    desc
    summarizationContext Property Map
    Input of prebuilt Summarization feature. Structure is documented below.
    description String
    Optional. Human readable description of the generator.
    generatorId String
    Optional. The ID to use for the generator, which will become the final component of the generator's resource name.
    inferenceParameter Property Map
    Optional. Inference parameters for this generator. Structure is documented below.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    publishedModel String
    Optional. The published Large Language Model name. * To use the latest model version, specify the model name without version number. Example: text-bison * To use a stable model version, specify the version number as well. Example: text-bison@002.
    triggerEvent String
    Optional. The trigger event of the generator. It defines when the generator is triggered in a conversation. Possible values are: END_OF_UTTERANCE, MANUAL_CALL, CUSTOMER_MESSAGE, AGENT_MESSAGE.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the Generator resource produces the following output properties:

    Id string
    The provider-assigned unique ID for this managed resource.
    Name string
    The resource name of the generator.
    Id string
    The provider-assigned unique ID for this managed resource.
    Name string
    The resource name of the generator.
    id String
    The provider-assigned unique ID for this managed resource.
    name String
    The resource name of the generator.
    id string
    The provider-assigned unique ID for this managed resource.
    name string
    The resource name of the generator.
    id str
    The provider-assigned unique ID for this managed resource.
    name str
    The resource name of the generator.
    id String
    The provider-assigned unique ID for this managed resource.
    name String
    The resource name of the generator.

    Look up Existing Generator Resource

    Get an existing Generator resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: GeneratorState, opts?: CustomResourceOptions): Generator
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            description: Optional[str] = None,
            generator_id: Optional[str] = None,
            inference_parameter: Optional[GeneratorInferenceParameterArgs] = None,
            location: Optional[str] = None,
            name: Optional[str] = None,
            project: Optional[str] = None,
            published_model: Optional[str] = None,
            summarization_context: Optional[GeneratorSummarizationContextArgs] = None,
            trigger_event: Optional[str] = None) -> Generator
    func GetGenerator(ctx *Context, name string, id IDInput, state *GeneratorState, opts ...ResourceOption) (*Generator, error)
    public static Generator Get(string name, Input<string> id, GeneratorState? state, CustomResourceOptions? opts = null)
    public static Generator get(String name, Output<String> id, GeneratorState state, CustomResourceOptions options)
    resources:  _:    type: gcp:diagflow:Generator    get:      id: ${id}
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    Description string
    Optional. Human readable description of the generator.
    GeneratorId string
    Optional. The ID to use for the generator, which will become the final component of the generator's resource name.
    InferenceParameter GeneratorInferenceParameter
    Optional. Inference parameters for this generator. Structure is documented below.
    Location string
    desc
    Name string
    The resource name of the generator.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    PublishedModel string
    Optional. The published Large Language Model name. * To use the latest model version, specify the model name without version number. Example: text-bison * To use a stable model version, specify the version number as well. Example: text-bison@002.
    SummarizationContext GeneratorSummarizationContext
    Input of prebuilt Summarization feature. Structure is documented below.
    TriggerEvent string
    Optional. The trigger event of the generator. It defines when the generator is triggered in a conversation. Possible values are: END_OF_UTTERANCE, MANUAL_CALL, CUSTOMER_MESSAGE, AGENT_MESSAGE.
    Description string
    Optional. Human readable description of the generator.
    GeneratorId string
    Optional. The ID to use for the generator, which will become the final component of the generator's resource name.
    InferenceParameter GeneratorInferenceParameterArgs
    Optional. Inference parameters for this generator. Structure is documented below.
    Location string
    desc
    Name string
    The resource name of the generator.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    PublishedModel string
    Optional. The published Large Language Model name. * To use the latest model version, specify the model name without version number. Example: text-bison * To use a stable model version, specify the version number as well. Example: text-bison@002.
    SummarizationContext GeneratorSummarizationContextArgs
    Input of prebuilt Summarization feature. Structure is documented below.
    TriggerEvent string
    Optional. The trigger event of the generator. It defines when the generator is triggered in a conversation. Possible values are: END_OF_UTTERANCE, MANUAL_CALL, CUSTOMER_MESSAGE, AGENT_MESSAGE.
    description String
    Optional. Human readable description of the generator.
    generatorId String
    Optional. The ID to use for the generator, which will become the final component of the generator's resource name.
    inferenceParameter GeneratorInferenceParameter
    Optional. Inference parameters for this generator. Structure is documented below.
    location String
    desc
    name String
    The resource name of the generator.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    publishedModel String
    Optional. The published Large Language Model name. * To use the latest model version, specify the model name without version number. Example: text-bison * To use a stable model version, specify the version number as well. Example: text-bison@002.
    summarizationContext GeneratorSummarizationContext
    Input of prebuilt Summarization feature. Structure is documented below.
    triggerEvent String
    Optional. The trigger event of the generator. It defines when the generator is triggered in a conversation. Possible values are: END_OF_UTTERANCE, MANUAL_CALL, CUSTOMER_MESSAGE, AGENT_MESSAGE.
    description string
    Optional. Human readable description of the generator.
    generatorId string
    Optional. The ID to use for the generator, which will become the final component of the generator's resource name.
    inferenceParameter GeneratorInferenceParameter
    Optional. Inference parameters for this generator. Structure is documented below.
    location string
    desc
    name string
    The resource name of the generator.
    project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    publishedModel string
    Optional. The published Large Language Model name. * To use the latest model version, specify the model name without version number. Example: text-bison * To use a stable model version, specify the version number as well. Example: text-bison@002.
    summarizationContext GeneratorSummarizationContext
    Input of prebuilt Summarization feature. Structure is documented below.
    triggerEvent string
    Optional. The trigger event of the generator. It defines when the generator is triggered in a conversation. Possible values are: END_OF_UTTERANCE, MANUAL_CALL, CUSTOMER_MESSAGE, AGENT_MESSAGE.
    description str
    Optional. Human readable description of the generator.
    generator_id str
    Optional. The ID to use for the generator, which will become the final component of the generator's resource name.
    inference_parameter GeneratorInferenceParameterArgs
    Optional. Inference parameters for this generator. Structure is documented below.
    location str
    desc
    name str
    The resource name of the generator.
    project str
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    published_model str
    Optional. The published Large Language Model name. * To use the latest model version, specify the model name without version number. Example: text-bison * To use a stable model version, specify the version number as well. Example: text-bison@002.
    summarization_context GeneratorSummarizationContextArgs
    Input of prebuilt Summarization feature. Structure is documented below.
    trigger_event str
    Optional. The trigger event of the generator. It defines when the generator is triggered in a conversation. Possible values are: END_OF_UTTERANCE, MANUAL_CALL, CUSTOMER_MESSAGE, AGENT_MESSAGE.
    description String
    Optional. Human readable description of the generator.
    generatorId String
    Optional. The ID to use for the generator, which will become the final component of the generator's resource name.
    inferenceParameter Property Map
    Optional. Inference parameters for this generator. Structure is documented below.
    location String
    desc
    name String
    The resource name of the generator.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    publishedModel String
    Optional. The published Large Language Model name. * To use the latest model version, specify the model name without version number. Example: text-bison * To use a stable model version, specify the version number as well. Example: text-bison@002.
    summarizationContext Property Map
    Input of prebuilt Summarization feature. Structure is documented below.
    triggerEvent String
    Optional. The trigger event of the generator. It defines when the generator is triggered in a conversation. Possible values are: END_OF_UTTERANCE, MANUAL_CALL, CUSTOMER_MESSAGE, AGENT_MESSAGE.

    Supporting Types

    GeneratorInferenceParameter, GeneratorInferenceParameterArgs

    MaxOutputTokens int
    Optional. Maximum number of the output tokens for the generator.
    Temperature double
    Optional. Controls the randomness of LLM predictions. Low temperature = less random. High temperature = more random. If unset (or 0), uses a default value of 0.
    TopK int
    Optional. Top-k changes how the model selects tokens for output. A top-k of 1 means the selected token is the most probable among all tokens in the model's vocabulary (also called greedy decoding), while a top-k of 3 means that the next token is selected from among the 3 most probable tokens (using temperature). For each token selection step, the top K tokens with the highest probabilities are sampled. Then tokens are further filtered based on topP with the final token selected using temperature sampling. Specify a lower value for less random responses and a higher value for more random responses. Acceptable value is [1, 40], default to 40.
    TopP double
    Optional. Top-p changes how the model selects tokens for output. Tokens are selected from most K (see topK parameter) probable to least until the sum of their probabilities equals the top-p value. For example, if tokens A, B, and C have a probability of 0.3, 0.2, and 0.1 and the top-p value is 0.5, then the model will select either A or B as the next token (using temperature) and doesn't consider C. The default top-p value is 0.95. Specify a lower value for less random responses and a higher value for more random responses. Acceptable value is [0.0, 1.0], default to 0.95.
    MaxOutputTokens int
    Optional. Maximum number of the output tokens for the generator.
    Temperature float64
    Optional. Controls the randomness of LLM predictions. Low temperature = less random. High temperature = more random. If unset (or 0), uses a default value of 0.
    TopK int
    Optional. Top-k changes how the model selects tokens for output. A top-k of 1 means the selected token is the most probable among all tokens in the model's vocabulary (also called greedy decoding), while a top-k of 3 means that the next token is selected from among the 3 most probable tokens (using temperature). For each token selection step, the top K tokens with the highest probabilities are sampled. Then tokens are further filtered based on topP with the final token selected using temperature sampling. Specify a lower value for less random responses and a higher value for more random responses. Acceptable value is [1, 40], default to 40.
    TopP float64
    Optional. Top-p changes how the model selects tokens for output. Tokens are selected from most K (see topK parameter) probable to least until the sum of their probabilities equals the top-p value. For example, if tokens A, B, and C have a probability of 0.3, 0.2, and 0.1 and the top-p value is 0.5, then the model will select either A or B as the next token (using temperature) and doesn't consider C. The default top-p value is 0.95. Specify a lower value for less random responses and a higher value for more random responses. Acceptable value is [0.0, 1.0], default to 0.95.
    maxOutputTokens Integer
    Optional. Maximum number of the output tokens for the generator.
    temperature Double
    Optional. Controls the randomness of LLM predictions. Low temperature = less random. High temperature = more random. If unset (or 0), uses a default value of 0.
    topK Integer
    Optional. Top-k changes how the model selects tokens for output. A top-k of 1 means the selected token is the most probable among all tokens in the model's vocabulary (also called greedy decoding), while a top-k of 3 means that the next token is selected from among the 3 most probable tokens (using temperature). For each token selection step, the top K tokens with the highest probabilities are sampled. Then tokens are further filtered based on topP with the final token selected using temperature sampling. Specify a lower value for less random responses and a higher value for more random responses. Acceptable value is [1, 40], default to 40.
    topP Double
    Optional. Top-p changes how the model selects tokens for output. Tokens are selected from most K (see topK parameter) probable to least until the sum of their probabilities equals the top-p value. For example, if tokens A, B, and C have a probability of 0.3, 0.2, and 0.1 and the top-p value is 0.5, then the model will select either A or B as the next token (using temperature) and doesn't consider C. The default top-p value is 0.95. Specify a lower value for less random responses and a higher value for more random responses. Acceptable value is [0.0, 1.0], default to 0.95.
    maxOutputTokens number
    Optional. Maximum number of the output tokens for the generator.
    temperature number
    Optional. Controls the randomness of LLM predictions. Low temperature = less random. High temperature = more random. If unset (or 0), uses a default value of 0.
    topK number
    Optional. Top-k changes how the model selects tokens for output. A top-k of 1 means the selected token is the most probable among all tokens in the model's vocabulary (also called greedy decoding), while a top-k of 3 means that the next token is selected from among the 3 most probable tokens (using temperature). For each token selection step, the top K tokens with the highest probabilities are sampled. Then tokens are further filtered based on topP with the final token selected using temperature sampling. Specify a lower value for less random responses and a higher value for more random responses. Acceptable value is [1, 40], default to 40.
    topP number
    Optional. Top-p changes how the model selects tokens for output. Tokens are selected from most K (see topK parameter) probable to least until the sum of their probabilities equals the top-p value. For example, if tokens A, B, and C have a probability of 0.3, 0.2, and 0.1 and the top-p value is 0.5, then the model will select either A or B as the next token (using temperature) and doesn't consider C. The default top-p value is 0.95. Specify a lower value for less random responses and a higher value for more random responses. Acceptable value is [0.0, 1.0], default to 0.95.
    max_output_tokens int
    Optional. Maximum number of the output tokens for the generator.
    temperature float
    Optional. Controls the randomness of LLM predictions. Low temperature = less random. High temperature = more random. If unset (or 0), uses a default value of 0.
    top_k int
    Optional. Top-k changes how the model selects tokens for output. A top-k of 1 means the selected token is the most probable among all tokens in the model's vocabulary (also called greedy decoding), while a top-k of 3 means that the next token is selected from among the 3 most probable tokens (using temperature). For each token selection step, the top K tokens with the highest probabilities are sampled. Then tokens are further filtered based on topP with the final token selected using temperature sampling. Specify a lower value for less random responses and a higher value for more random responses. Acceptable value is [1, 40], default to 40.
    top_p float
    Optional. Top-p changes how the model selects tokens for output. Tokens are selected from most K (see topK parameter) probable to least until the sum of their probabilities equals the top-p value. For example, if tokens A, B, and C have a probability of 0.3, 0.2, and 0.1 and the top-p value is 0.5, then the model will select either A or B as the next token (using temperature) and doesn't consider C. The default top-p value is 0.95. Specify a lower value for less random responses and a higher value for more random responses. Acceptable value is [0.0, 1.0], default to 0.95.
    maxOutputTokens Number
    Optional. Maximum number of the output tokens for the generator.
    temperature Number
    Optional. Controls the randomness of LLM predictions. Low temperature = less random. High temperature = more random. If unset (or 0), uses a default value of 0.
    topK Number
    Optional. Top-k changes how the model selects tokens for output. A top-k of 1 means the selected token is the most probable among all tokens in the model's vocabulary (also called greedy decoding), while a top-k of 3 means that the next token is selected from among the 3 most probable tokens (using temperature). For each token selection step, the top K tokens with the highest probabilities are sampled. Then tokens are further filtered based on topP with the final token selected using temperature sampling. Specify a lower value for less random responses and a higher value for more random responses. Acceptable value is [1, 40], default to 40.
    topP Number
    Optional. Top-p changes how the model selects tokens for output. Tokens are selected from most K (see topK parameter) probable to least until the sum of their probabilities equals the top-p value. For example, if tokens A, B, and C have a probability of 0.3, 0.2, and 0.1 and the top-p value is 0.5, then the model will select either A or B as the next token (using temperature) and doesn't consider C. The default top-p value is 0.95. Specify a lower value for less random responses and a higher value for more random responses. Acceptable value is [0.0, 1.0], default to 0.95.

    GeneratorSummarizationContext, GeneratorSummarizationContextArgs

    FewShotExamples List<GeneratorSummarizationContextFewShotExample>
    Optional. List of few shot examples. Structure is documented below.
    OutputLanguageCode string
    Optional. The target language of the generated summary. The language code for conversation will be used if this field is empty. Supported 2.0 and later versions.
    SummarizationSections List<GeneratorSummarizationContextSummarizationSection>
    Optional. List of sections. Note it contains both predefined section sand customer defined sections. Structure is documented below.
    Version string
    Optional. Version of the feature. If not set, default to latest version. Current candidates are ["1.0"].
    FewShotExamples []GeneratorSummarizationContextFewShotExample
    Optional. List of few shot examples. Structure is documented below.
    OutputLanguageCode string
    Optional. The target language of the generated summary. The language code for conversation will be used if this field is empty. Supported 2.0 and later versions.
    SummarizationSections []GeneratorSummarizationContextSummarizationSection
    Optional. List of sections. Note it contains both predefined section sand customer defined sections. Structure is documented below.
    Version string
    Optional. Version of the feature. If not set, default to latest version. Current candidates are ["1.0"].
    fewShotExamples List<GeneratorSummarizationContextFewShotExample>
    Optional. List of few shot examples. Structure is documented below.
    outputLanguageCode String
    Optional. The target language of the generated summary. The language code for conversation will be used if this field is empty. Supported 2.0 and later versions.
    summarizationSections List<GeneratorSummarizationContextSummarizationSection>
    Optional. List of sections. Note it contains both predefined section sand customer defined sections. Structure is documented below.
    version String
    Optional. Version of the feature. If not set, default to latest version. Current candidates are ["1.0"].
    fewShotExamples GeneratorSummarizationContextFewShotExample[]
    Optional. List of few shot examples. Structure is documented below.
    outputLanguageCode string
    Optional. The target language of the generated summary. The language code for conversation will be used if this field is empty. Supported 2.0 and later versions.
    summarizationSections GeneratorSummarizationContextSummarizationSection[]
    Optional. List of sections. Note it contains both predefined section sand customer defined sections. Structure is documented below.
    version string
    Optional. Version of the feature. If not set, default to latest version. Current candidates are ["1.0"].
    few_shot_examples Sequence[GeneratorSummarizationContextFewShotExample]
    Optional. List of few shot examples. Structure is documented below.
    output_language_code str
    Optional. The target language of the generated summary. The language code for conversation will be used if this field is empty. Supported 2.0 and later versions.
    summarization_sections Sequence[GeneratorSummarizationContextSummarizationSection]
    Optional. List of sections. Note it contains both predefined section sand customer defined sections. Structure is documented below.
    version str
    Optional. Version of the feature. If not set, default to latest version. Current candidates are ["1.0"].
    fewShotExamples List<Property Map>
    Optional. List of few shot examples. Structure is documented below.
    outputLanguageCode String
    Optional. The target language of the generated summary. The language code for conversation will be used if this field is empty. Supported 2.0 and later versions.
    summarizationSections List<Property Map>
    Optional. List of sections. Note it contains both predefined section sand customer defined sections. Structure is documented below.
    version String
    Optional. Version of the feature. If not set, default to latest version. Current candidates are ["1.0"].

    GeneratorSummarizationContextFewShotExample, GeneratorSummarizationContextFewShotExampleArgs

    Output GeneratorSummarizationContextFewShotExampleOutput
    Required. Example output of the model. Structure is documented below.
    ConversationContext GeneratorSummarizationContextFewShotExampleConversationContext
    Optional. Conversation transcripts. Structure is documented below.
    ExtraInfo Dictionary<string, string>
    Optional. Key is the placeholder field name in input, value is the value of the placeholder. E.g. instruction contains "@price", and ingested data has <"price", "10">
    SummarizationSectionList GeneratorSummarizationContextFewShotExampleSummarizationSectionList
    Summarization sections. Structure is documented below.
    Output GeneratorSummarizationContextFewShotExampleOutputType
    Required. Example output of the model. Structure is documented below.
    ConversationContext GeneratorSummarizationContextFewShotExampleConversationContext
    Optional. Conversation transcripts. Structure is documented below.
    ExtraInfo map[string]string
    Optional. Key is the placeholder field name in input, value is the value of the placeholder. E.g. instruction contains "@price", and ingested data has <"price", "10">
    SummarizationSectionList GeneratorSummarizationContextFewShotExampleSummarizationSectionList
    Summarization sections. Structure is documented below.
    output GeneratorSummarizationContextFewShotExampleOutput
    Required. Example output of the model. Structure is documented below.
    conversationContext GeneratorSummarizationContextFewShotExampleConversationContext
    Optional. Conversation transcripts. Structure is documented below.
    extraInfo Map<String,String>
    Optional. Key is the placeholder field name in input, value is the value of the placeholder. E.g. instruction contains "@price", and ingested data has <"price", "10">
    summarizationSectionList GeneratorSummarizationContextFewShotExampleSummarizationSectionList
    Summarization sections. Structure is documented below.
    output GeneratorSummarizationContextFewShotExampleOutput
    Required. Example output of the model. Structure is documented below.
    conversationContext GeneratorSummarizationContextFewShotExampleConversationContext
    Optional. Conversation transcripts. Structure is documented below.
    extraInfo {[key: string]: string}
    Optional. Key is the placeholder field name in input, value is the value of the placeholder. E.g. instruction contains "@price", and ingested data has <"price", "10">
    summarizationSectionList GeneratorSummarizationContextFewShotExampleSummarizationSectionList
    Summarization sections. Structure is documented below.
    output GeneratorSummarizationContextFewShotExampleOutput
    Required. Example output of the model. Structure is documented below.
    conversation_context GeneratorSummarizationContextFewShotExampleConversationContext
    Optional. Conversation transcripts. Structure is documented below.
    extra_info Mapping[str, str]
    Optional. Key is the placeholder field name in input, value is the value of the placeholder. E.g. instruction contains "@price", and ingested data has <"price", "10">
    summarization_section_list GeneratorSummarizationContextFewShotExampleSummarizationSectionList
    Summarization sections. Structure is documented below.
    output Property Map
    Required. Example output of the model. Structure is documented below.
    conversationContext Property Map
    Optional. Conversation transcripts. Structure is documented below.
    extraInfo Map<String>
    Optional. Key is the placeholder field name in input, value is the value of the placeholder. E.g. instruction contains "@price", and ingested data has <"price", "10">
    summarizationSectionList Property Map
    Summarization sections. Structure is documented below.

    GeneratorSummarizationContextFewShotExampleConversationContext, GeneratorSummarizationContextFewShotExampleConversationContextArgs

    MessageEntries List<GeneratorSummarizationContextFewShotExampleConversationContextMessageEntry>
    Optional. List of message transcripts in the conversation. Structure is documented below.
    MessageEntries []GeneratorSummarizationContextFewShotExampleConversationContextMessageEntry
    Optional. List of message transcripts in the conversation. Structure is documented below.
    messageEntries List<GeneratorSummarizationContextFewShotExampleConversationContextMessageEntry>
    Optional. List of message transcripts in the conversation. Structure is documented below.
    messageEntries GeneratorSummarizationContextFewShotExampleConversationContextMessageEntry[]
    Optional. List of message transcripts in the conversation. Structure is documented below.
    message_entries Sequence[GeneratorSummarizationContextFewShotExampleConversationContextMessageEntry]
    Optional. List of message transcripts in the conversation. Structure is documented below.
    messageEntries List<Property Map>
    Optional. List of message transcripts in the conversation. Structure is documented below.

    GeneratorSummarizationContextFewShotExampleConversationContextMessageEntry, GeneratorSummarizationContextFewShotExampleConversationContextMessageEntryArgs

    CreateTime string
    Optional. Create time of the message entry.
    LanguageCode string
    Optional. The language of the text.
    Role string
    Optional. Participant role of the message. Possible values are: HUMAN_AGENT, AUTOMATED_AGENT, END_USER.
    Text string
    Optional. Transcript content of the message.
    CreateTime string
    Optional. Create time of the message entry.
    LanguageCode string
    Optional. The language of the text.
    Role string
    Optional. Participant role of the message. Possible values are: HUMAN_AGENT, AUTOMATED_AGENT, END_USER.
    Text string
    Optional. Transcript content of the message.
    createTime String
    Optional. Create time of the message entry.
    languageCode String
    Optional. The language of the text.
    role String
    Optional. Participant role of the message. Possible values are: HUMAN_AGENT, AUTOMATED_AGENT, END_USER.
    text String
    Optional. Transcript content of the message.
    createTime string
    Optional. Create time of the message entry.
    languageCode string
    Optional. The language of the text.
    role string
    Optional. Participant role of the message. Possible values are: HUMAN_AGENT, AUTOMATED_AGENT, END_USER.
    text string
    Optional. Transcript content of the message.
    create_time str
    Optional. Create time of the message entry.
    language_code str
    Optional. The language of the text.
    role str
    Optional. Participant role of the message. Possible values are: HUMAN_AGENT, AUTOMATED_AGENT, END_USER.
    text str
    Optional. Transcript content of the message.
    createTime String
    Optional. Create time of the message entry.
    languageCode String
    Optional. The language of the text.
    role String
    Optional. Participant role of the message. Possible values are: HUMAN_AGENT, AUTOMATED_AGENT, END_USER.
    text String
    Optional. Transcript content of the message.

    GeneratorSummarizationContextFewShotExampleOutput, GeneratorSummarizationContextFewShotExampleOutputArgs

    SummarySuggestion GeneratorSummarizationContextFewShotExampleOutputSummarySuggestion
    Optional. Suggested summary. Structure is documented below.
    SummarySuggestion GeneratorSummarizationContextFewShotExampleOutputSummarySuggestion
    Optional. Suggested summary. Structure is documented below.
    summarySuggestion GeneratorSummarizationContextFewShotExampleOutputSummarySuggestion
    Optional. Suggested summary. Structure is documented below.
    summarySuggestion GeneratorSummarizationContextFewShotExampleOutputSummarySuggestion
    Optional. Suggested summary. Structure is documented below.
    summary_suggestion GeneratorSummarizationContextFewShotExampleOutputSummarySuggestion
    Optional. Suggested summary. Structure is documented below.
    summarySuggestion Property Map
    Optional. Suggested summary. Structure is documented below.

    GeneratorSummarizationContextFewShotExampleOutputSummarySuggestion, GeneratorSummarizationContextFewShotExampleOutputSummarySuggestionArgs

    SummarySections List<GeneratorSummarizationContextFewShotExampleOutputSummarySuggestionSummarySection>
    Required. All the parts of generated summary. Structure is documented below.
    SummarySections []GeneratorSummarizationContextFewShotExampleOutputSummarySuggestionSummarySection
    Required. All the parts of generated summary. Structure is documented below.
    summarySections List<GeneratorSummarizationContextFewShotExampleOutputSummarySuggestionSummarySection>
    Required. All the parts of generated summary. Structure is documented below.
    summarySections GeneratorSummarizationContextFewShotExampleOutputSummarySuggestionSummarySection[]
    Required. All the parts of generated summary. Structure is documented below.
    summary_sections Sequence[GeneratorSummarizationContextFewShotExampleOutputSummarySuggestionSummarySection]
    Required. All the parts of generated summary. Structure is documented below.
    summarySections List<Property Map>
    Required. All the parts of generated summary. Structure is documented below.

    GeneratorSummarizationContextFewShotExampleOutputSummarySuggestionSummarySection, GeneratorSummarizationContextFewShotExampleOutputSummarySuggestionSummarySectionArgs

    Section string
    Required. Name of the section.
    Summary string
    Required. Summary text for the section.
    Section string
    Required. Name of the section.
    Summary string
    Required. Summary text for the section.
    section String
    Required. Name of the section.
    summary String
    Required. Summary text for the section.
    section string
    Required. Name of the section.
    summary string
    Required. Summary text for the section.
    section str
    Required. Name of the section.
    summary str
    Required. Summary text for the section.
    section String
    Required. Name of the section.
    summary String
    Required. Summary text for the section.

    GeneratorSummarizationContextFewShotExampleSummarizationSectionList, GeneratorSummarizationContextFewShotExampleSummarizationSectionListArgs

    summarizationSections List<Property Map>
    Optional. Summarization sections. Structure is documented below.

    GeneratorSummarizationContextFewShotExampleSummarizationSectionListSummarizationSection, GeneratorSummarizationContextFewShotExampleSummarizationSectionListSummarizationSectionArgs

    Definition string
    Optional. Definition of the section, for example, "what the customer needs help with or has question about."
    Key string
    Optional. Name of the section, for example, "situation".
    Type string
    Optional. Type of the summarization section. Possible values are: SITUATION, ACTION, RESOLUTION, REASON_FOR_CANCELLATION, CUSTOMER_SATISFACTION, ENTITIES, CUSTOMER_DEFINED, SITUATION_CONCISE, ACTION_CONCISE.
    Definition string
    Optional. Definition of the section, for example, "what the customer needs help with or has question about."
    Key string
    Optional. Name of the section, for example, "situation".
    Type string
    Optional. Type of the summarization section. Possible values are: SITUATION, ACTION, RESOLUTION, REASON_FOR_CANCELLATION, CUSTOMER_SATISFACTION, ENTITIES, CUSTOMER_DEFINED, SITUATION_CONCISE, ACTION_CONCISE.
    definition String
    Optional. Definition of the section, for example, "what the customer needs help with or has question about."
    key String
    Optional. Name of the section, for example, "situation".
    type String
    Optional. Type of the summarization section. Possible values are: SITUATION, ACTION, RESOLUTION, REASON_FOR_CANCELLATION, CUSTOMER_SATISFACTION, ENTITIES, CUSTOMER_DEFINED, SITUATION_CONCISE, ACTION_CONCISE.
    definition string
    Optional. Definition of the section, for example, "what the customer needs help with or has question about."
    key string
    Optional. Name of the section, for example, "situation".
    type string
    Optional. Type of the summarization section. Possible values are: SITUATION, ACTION, RESOLUTION, REASON_FOR_CANCELLATION, CUSTOMER_SATISFACTION, ENTITIES, CUSTOMER_DEFINED, SITUATION_CONCISE, ACTION_CONCISE.
    definition str
    Optional. Definition of the section, for example, "what the customer needs help with or has question about."
    key str
    Optional. Name of the section, for example, "situation".
    type str
    Optional. Type of the summarization section. Possible values are: SITUATION, ACTION, RESOLUTION, REASON_FOR_CANCELLATION, CUSTOMER_SATISFACTION, ENTITIES, CUSTOMER_DEFINED, SITUATION_CONCISE, ACTION_CONCISE.
    definition String
    Optional. Definition of the section, for example, "what the customer needs help with or has question about."
    key String
    Optional. Name of the section, for example, "situation".
    type String
    Optional. Type of the summarization section. Possible values are: SITUATION, ACTION, RESOLUTION, REASON_FOR_CANCELLATION, CUSTOMER_SATISFACTION, ENTITIES, CUSTOMER_DEFINED, SITUATION_CONCISE, ACTION_CONCISE.

    GeneratorSummarizationContextSummarizationSection, GeneratorSummarizationContextSummarizationSectionArgs

    Definition string
    Optional. Definition of the section, for example, "what the customer needs help with or has question about."
    Key string
    Optional. Name of the section, for example, "situation".
    Type string
    Optional. Type of the summarization section. Possible values are: SITUATION, ACTION, RESOLUTION, REASON_FOR_CANCELLATION, CUSTOMER_SATISFACTION, ENTITIES, CUSTOMER_DEFINED, SITUATION_CONCISE, ACTION_CONCISE.
    Definition string
    Optional. Definition of the section, for example, "what the customer needs help with or has question about."
    Key string
    Optional. Name of the section, for example, "situation".
    Type string
    Optional. Type of the summarization section. Possible values are: SITUATION, ACTION, RESOLUTION, REASON_FOR_CANCELLATION, CUSTOMER_SATISFACTION, ENTITIES, CUSTOMER_DEFINED, SITUATION_CONCISE, ACTION_CONCISE.
    definition String
    Optional. Definition of the section, for example, "what the customer needs help with or has question about."
    key String
    Optional. Name of the section, for example, "situation".
    type String
    Optional. Type of the summarization section. Possible values are: SITUATION, ACTION, RESOLUTION, REASON_FOR_CANCELLATION, CUSTOMER_SATISFACTION, ENTITIES, CUSTOMER_DEFINED, SITUATION_CONCISE, ACTION_CONCISE.
    definition string
    Optional. Definition of the section, for example, "what the customer needs help with or has question about."
    key string
    Optional. Name of the section, for example, "situation".
    type string
    Optional. Type of the summarization section. Possible values are: SITUATION, ACTION, RESOLUTION, REASON_FOR_CANCELLATION, CUSTOMER_SATISFACTION, ENTITIES, CUSTOMER_DEFINED, SITUATION_CONCISE, ACTION_CONCISE.
    definition str
    Optional. Definition of the section, for example, "what the customer needs help with or has question about."
    key str
    Optional. Name of the section, for example, "situation".
    type str
    Optional. Type of the summarization section. Possible values are: SITUATION, ACTION, RESOLUTION, REASON_FOR_CANCELLATION, CUSTOMER_SATISFACTION, ENTITIES, CUSTOMER_DEFINED, SITUATION_CONCISE, ACTION_CONCISE.
    definition String
    Optional. Definition of the section, for example, "what the customer needs help with or has question about."
    key String
    Optional. Name of the section, for example, "situation".
    type String
    Optional. Type of the summarization section. Possible values are: SITUATION, ACTION, RESOLUTION, REASON_FOR_CANCELLATION, CUSTOMER_SATISFACTION, ENTITIES, CUSTOMER_DEFINED, SITUATION_CONCISE, ACTION_CONCISE.

    Import

    Generator can be imported using any of these accepted formats:

    • projects/{{project}}/locations/{{location}}/generators/{{name}}

    • {{project}}/{{location}}/{{name}}

    • {{location}}/{{name}}

    When using the pulumi import command, Generator can be imported using one of the formats above. For example:

    $ pulumi import gcp:diagflow/generator:Generator default projects/{{project}}/locations/{{location}}/generators/{{name}}
    
    $ pulumi import gcp:diagflow/generator:Generator default {{project}}/{{location}}/{{name}}
    
    $ pulumi import gcp:diagflow/generator:Generator default {{location}}/{{name}}
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    Google Cloud (GCP) Classic pulumi/pulumi-gcp
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the google-beta Terraform Provider.
    gcp logo
    Google Cloud v9.7.0 published on Wednesday, Dec 24, 2025 by Pulumi
      Meet Neo: Your AI Platform Teammate