1. Packages
  2. Konnect Provider
  3. API Docs
  4. GatewayPluginAiLlmAsJudge
konnect 3.4.1 published on Wednesday, Oct 29, 2025 by kong

konnect.GatewayPluginAiLlmAsJudge

Get Started
konnect logo
konnect 3.4.1 published on Wednesday, Oct 29, 2025 by kong

    GatewayPluginAiLlmAsJudge Resource

    Example Usage

    Example coming soon!
    
    Example coming soon!
    
    Example coming soon!
    
    Example coming soon!
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.konnect.GatewayPluginAiLlmAsJudge;
    import com.pulumi.konnect.GatewayPluginAiLlmAsJudgeArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiLlmAsJudgeConfigArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiLlmAsJudgeConfigLlmArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiLlmAsJudgeConfigLlmAuthArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiLlmAsJudgeConfigLlmLoggingArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiLlmAsJudgeConfigLlmModelArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsBedrockArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsCohereArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsGeminiArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsHuggingfaceArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiLlmAsJudgeConsumerArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiLlmAsJudgeConsumerGroupArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiLlmAsJudgeOrderingArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiLlmAsJudgeOrderingAfterArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiLlmAsJudgeOrderingBeforeArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiLlmAsJudgePartialArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiLlmAsJudgeRouteArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiLlmAsJudgeServiceArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var myGatewaypluginaillmasjudge = new GatewayPluginAiLlmAsJudge("myGatewaypluginaillmasjudge", GatewayPluginAiLlmAsJudgeArgs.builder()
                .config(GatewayPluginAiLlmAsJudgeConfigArgs.builder()
                    .http_proxy_host("...my_http_proxy_host...")
                    .http_proxy_port(8774)
                    .http_timeout(0)
                    .https_proxy_host("...my_https_proxy_host...")
                    .https_proxy_port(29092)
                    .https_verify(false)
                    .ignore_assistant_prompts(true)
                    .ignore_system_prompts(true)
                    .ignore_tool_prompts(true)
                    .llm(GatewayPluginAiLlmAsJudgeConfigLlmArgs.builder()
                        .auth(GatewayPluginAiLlmAsJudgeConfigLlmAuthArgs.builder()
                            .allowOverride(false)
                            .awsAccessKeyId("...my_aws_access_key_id...")
                            .awsSecretAccessKey("...my_aws_secret_access_key...")
                            .azureClientId("...my_azure_client_id...")
                            .azureClientSecret("...my_azure_client_secret...")
                            .azureTenantId("...my_azure_tenant_id...")
                            .azureUseManagedIdentity(true)
                            .gcpServiceAccountJson("...my_gcp_service_account_json...")
                            .gcpUseServiceAccount(false)
                            .headerName("...my_header_name...")
                            .headerValue("...my_header_value...")
                            .paramLocation("query")
                            .paramName("...my_param_name...")
                            .paramValue("...my_param_value...")
                            .build())
                        .logging(GatewayPluginAiLlmAsJudgeConfigLlmLoggingArgs.builder()
                            .logPayloads(false)
                            .logStatistics(false)
                            .build())
                        .model(GatewayPluginAiLlmAsJudgeConfigLlmModelArgs.builder()
                            .name("...my_name...")
                            .options(GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsArgs.builder()
                                .anthropicVersion("...my_anthropic_version...")
                                .azureApiVersion("...my_azure_api_version...")
                                .azureDeploymentId("...my_azure_deployment_id...")
                                .azureInstance("...my_azure_instance...")
                                .bedrock(GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsBedrockArgs.builder()
                                    .awsAssumeRoleArn("...my_aws_assume_role_arn...")
                                    .awsRegion("...my_aws_region...")
                                    .awsRoleSessionName("...my_aws_role_session_name...")
                                    .awsStsEndpointUrl("...my_aws_sts_endpoint_url...")
                                    .embeddingsNormalize(false)
                                    .performanceConfigLatency("...my_performance_config_latency...")
                                    .build())
                                .cohere(GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsCohereArgs.builder()
                                    .embeddingInputType("search_document")
                                    .waitForModel(false)
                                    .build())
                                .embeddingsDimensions(3)
                                .gemini(GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsGeminiArgs.builder()
                                    .apiEndpoint("...my_api_endpoint...")
                                    .endpointId("...my_endpoint_id...")
                                    .locationId("...my_location_id...")
                                    .projectId("...my_project_id...")
                                    .build())
                                .huggingface(GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsHuggingfaceArgs.builder()
                                    .useCache(false)
                                    .waitForModel(false)
                                    .build())
                                .inputCost(3.16)
                                .llama2Format("ollama")
                                .maxTokens(0)
                                .mistralFormat("openai")
                                .outputCost(2.67)
                                .temperature(4.9)
                                .topK(155)
                                .topP(0.54)
                                .upstreamPath("...my_upstream_path...")
                                .upstreamUrl("...my_upstream_url...")
                                .build())
                            .provider("gemini")
                            .build())
                        .routeType("llm/v1/completions")
                        .build())
                    .message_countback(928.19)
                    .prompt("...my_prompt...")
                    .sampling_rate(0.51)
                    .build())
                .consumer(GatewayPluginAiLlmAsJudgeConsumerArgs.builder()
                    .id("...my_id...")
                    .build())
                .consumerGroup(GatewayPluginAiLlmAsJudgeConsumerGroupArgs.builder()
                    .id("...my_id...")
                    .build())
                .controlPlaneId("9524ec7d-36d9-465d-a8c5-83a3c9390458")
                .createdAt(10)
                .enabled(true)
                .gatewayPluginAiLlmAsJudgeId("...my_id...")
                .instanceName("...my_instance_name...")
                .ordering(GatewayPluginAiLlmAsJudgeOrderingArgs.builder()
                    .after(GatewayPluginAiLlmAsJudgeOrderingAfterArgs.builder()
                        .access("...")
                        .build())
                    .before(GatewayPluginAiLlmAsJudgeOrderingBeforeArgs.builder()
                        .access("...")
                        .build())
                    .build())
                .partials(GatewayPluginAiLlmAsJudgePartialArgs.builder()
                    .id("...my_id...")
                    .name("...my_name...")
                    .path("...my_path...")
                    .build())
                .protocols("grpcs")
                .route(GatewayPluginAiLlmAsJudgeRouteArgs.builder()
                    .id("...my_id...")
                    .build())
                .service(GatewayPluginAiLlmAsJudgeServiceArgs.builder()
                    .id("...my_id...")
                    .build())
                .tags("...")
                .updatedAt(0)
                .build());
    
        }
    }
    
    resources:
      myGatewaypluginaillmasjudge:
        type: konnect:GatewayPluginAiLlmAsJudge
        properties:
          config:
            http_proxy_host: '...my_http_proxy_host...'
            http_proxy_port: 8774
            http_timeout: 0
            https_proxy_host: '...my_https_proxy_host...'
            https_proxy_port: 29092
            https_verify: false
            ignore_assistant_prompts: true
            ignore_system_prompts: true
            ignore_tool_prompts: true
            llm:
              auth:
                allowOverride: false
                awsAccessKeyId: '...my_aws_access_key_id...'
                awsSecretAccessKey: '...my_aws_secret_access_key...'
                azureClientId: '...my_azure_client_id...'
                azureClientSecret: '...my_azure_client_secret...'
                azureTenantId: '...my_azure_tenant_id...'
                azureUseManagedIdentity: true
                gcpServiceAccountJson: '...my_gcp_service_account_json...'
                gcpUseServiceAccount: false
                headerName: '...my_header_name...'
                headerValue: '...my_header_value...'
                paramLocation: query
                paramName: '...my_param_name...'
                paramValue: '...my_param_value...'
              logging:
                logPayloads: false
                logStatistics: false
              model:
                name: '...my_name...'
                options:
                  anthropicVersion: '...my_anthropic_version...'
                  azureApiVersion: '...my_azure_api_version...'
                  azureDeploymentId: '...my_azure_deployment_id...'
                  azureInstance: '...my_azure_instance...'
                  bedrock:
                    awsAssumeRoleArn: '...my_aws_assume_role_arn...'
                    awsRegion: '...my_aws_region...'
                    awsRoleSessionName: '...my_aws_role_session_name...'
                    awsStsEndpointUrl: '...my_aws_sts_endpoint_url...'
                    embeddingsNormalize: false
                    performanceConfigLatency: '...my_performance_config_latency...'
                  cohere:
                    embeddingInputType: search_document
                    waitForModel: false
                  embeddingsDimensions: 3
                  gemini:
                    apiEndpoint: '...my_api_endpoint...'
                    endpointId: '...my_endpoint_id...'
                    locationId: '...my_location_id...'
                    projectId: '...my_project_id...'
                  huggingface:
                    useCache: false
                    waitForModel: false
                  inputCost: 3.16
                  llama2Format: ollama
                  maxTokens: 0
                  mistralFormat: openai
                  outputCost: 2.67
                  temperature: 4.9
                  topK: 155
                  topP: 0.54
                  upstreamPath: '...my_upstream_path...'
                  upstreamUrl: '...my_upstream_url...'
                provider: gemini
              routeType: llm/v1/completions
            message_countback: 928.19
            prompt: '...my_prompt...'
            sampling_rate: 0.51
          consumer:
            id: '...my_id...'
          consumerGroup:
            id: '...my_id...'
          controlPlaneId: 9524ec7d-36d9-465d-a8c5-83a3c9390458
          createdAt: 10
          enabled: true
          gatewayPluginAiLlmAsJudgeId: '...my_id...'
          instanceName: '...my_instance_name...'
          ordering:
            after:
              access:
                - '...'
            before:
              access:
                - '...'
          partials:
            - id: '...my_id...'
              name: '...my_name...'
              path: '...my_path...'
          protocols:
            - grpcs
          route:
            id: '...my_id...'
          service:
            id: '...my_id...'
          tags:
            - '...'
          updatedAt: 0
    

    Create GatewayPluginAiLlmAsJudge Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new GatewayPluginAiLlmAsJudge(name: string, args: GatewayPluginAiLlmAsJudgeArgs, opts?: CustomResourceOptions);
    @overload
    def GatewayPluginAiLlmAsJudge(resource_name: str,
                                  args: GatewayPluginAiLlmAsJudgeArgs,
                                  opts: Optional[ResourceOptions] = None)
    
    @overload
    def GatewayPluginAiLlmAsJudge(resource_name: str,
                                  opts: Optional[ResourceOptions] = None,
                                  control_plane_id: Optional[str] = None,
                                  config: Optional[GatewayPluginAiLlmAsJudgeConfigArgs] = None,
                                  gateway_plugin_ai_llm_as_judge_id: Optional[str] = None,
                                  consumer_group: Optional[GatewayPluginAiLlmAsJudgeConsumerGroupArgs] = None,
                                  created_at: Optional[float] = None,
                                  enabled: Optional[bool] = None,
                                  consumer: Optional[GatewayPluginAiLlmAsJudgeConsumerArgs] = None,
                                  instance_name: Optional[str] = None,
                                  ordering: Optional[GatewayPluginAiLlmAsJudgeOrderingArgs] = None,
                                  partials: Optional[Sequence[GatewayPluginAiLlmAsJudgePartialArgs]] = None,
                                  protocols: Optional[Sequence[str]] = None,
                                  route: Optional[GatewayPluginAiLlmAsJudgeRouteArgs] = None,
                                  service: Optional[GatewayPluginAiLlmAsJudgeServiceArgs] = None,
                                  tags: Optional[Sequence[str]] = None,
                                  updated_at: Optional[float] = None)
    func NewGatewayPluginAiLlmAsJudge(ctx *Context, name string, args GatewayPluginAiLlmAsJudgeArgs, opts ...ResourceOption) (*GatewayPluginAiLlmAsJudge, error)
    public GatewayPluginAiLlmAsJudge(string name, GatewayPluginAiLlmAsJudgeArgs args, CustomResourceOptions? opts = null)
    public GatewayPluginAiLlmAsJudge(String name, GatewayPluginAiLlmAsJudgeArgs args)
    public GatewayPluginAiLlmAsJudge(String name, GatewayPluginAiLlmAsJudgeArgs args, CustomResourceOptions options)
    
    type: konnect:GatewayPluginAiLlmAsJudge
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args GatewayPluginAiLlmAsJudgeArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args GatewayPluginAiLlmAsJudgeArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args GatewayPluginAiLlmAsJudgeArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args GatewayPluginAiLlmAsJudgeArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args GatewayPluginAiLlmAsJudgeArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var gatewayPluginAiLlmAsJudgeResource = new Konnect.GatewayPluginAiLlmAsJudge("gatewayPluginAiLlmAsJudgeResource", new()
    {
        ControlPlaneId = "string",
        Config = new Konnect.Inputs.GatewayPluginAiLlmAsJudgeConfigArgs
        {
            Llm = new Konnect.Inputs.GatewayPluginAiLlmAsJudgeConfigLlmArgs
            {
                Model = new Konnect.Inputs.GatewayPluginAiLlmAsJudgeConfigLlmModelArgs
                {
                    Provider = "string",
                    Name = "string",
                    Options = new Konnect.Inputs.GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsArgs
                    {
                        AnthropicVersion = "string",
                        AzureApiVersion = "string",
                        AzureDeploymentId = "string",
                        AzureInstance = "string",
                        Bedrock = new Konnect.Inputs.GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsBedrockArgs
                        {
                            AwsAssumeRoleArn = "string",
                            AwsRegion = "string",
                            AwsRoleSessionName = "string",
                            AwsStsEndpointUrl = "string",
                            EmbeddingsNormalize = false,
                            PerformanceConfigLatency = "string",
                        },
                        Cohere = new Konnect.Inputs.GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsCohereArgs
                        {
                            EmbeddingInputType = "string",
                            WaitForModel = false,
                        },
                        EmbeddingsDimensions = 0,
                        Gemini = new Konnect.Inputs.GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsGeminiArgs
                        {
                            ApiEndpoint = "string",
                            EndpointId = "string",
                            LocationId = "string",
                            ProjectId = "string",
                        },
                        Huggingface = new Konnect.Inputs.GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsHuggingfaceArgs
                        {
                            UseCache = false,
                            WaitForModel = false,
                        },
                        InputCost = 0,
                        Llama2Format = "string",
                        MaxTokens = 0,
                        MistralFormat = "string",
                        OutputCost = 0,
                        Temperature = 0,
                        TopK = 0,
                        TopP = 0,
                        UpstreamPath = "string",
                        UpstreamUrl = "string",
                    },
                },
                RouteType = "string",
                Auth = new Konnect.Inputs.GatewayPluginAiLlmAsJudgeConfigLlmAuthArgs
                {
                    AllowOverride = false,
                    AwsAccessKeyId = "string",
                    AwsSecretAccessKey = "string",
                    AzureClientId = "string",
                    AzureClientSecret = "string",
                    AzureTenantId = "string",
                    AzureUseManagedIdentity = false,
                    GcpServiceAccountJson = "string",
                    GcpUseServiceAccount = false,
                    HeaderName = "string",
                    HeaderValue = "string",
                    ParamLocation = "string",
                    ParamName = "string",
                    ParamValue = "string",
                },
                Logging = new Konnect.Inputs.GatewayPluginAiLlmAsJudgeConfigLlmLoggingArgs
                {
                    LogPayloads = false,
                    LogStatistics = false,
                },
            },
            HttpsProxyHost = "string",
            HttpTimeout = 0,
            HttpProxyHost = "string",
            HttpsProxyPort = 0,
            HttpsVerify = false,
            IgnoreAssistantPrompts = false,
            IgnoreSystemPrompts = false,
            IgnoreToolPrompts = false,
            HttpProxyPort = 0,
            MessageCountback = 0,
            Prompt = "string",
            SamplingRate = 0,
        },
        GatewayPluginAiLlmAsJudgeId = "string",
        ConsumerGroup = new Konnect.Inputs.GatewayPluginAiLlmAsJudgeConsumerGroupArgs
        {
            Id = "string",
        },
        CreatedAt = 0,
        Enabled = false,
        Consumer = new Konnect.Inputs.GatewayPluginAiLlmAsJudgeConsumerArgs
        {
            Id = "string",
        },
        InstanceName = "string",
        Ordering = new Konnect.Inputs.GatewayPluginAiLlmAsJudgeOrderingArgs
        {
            After = new Konnect.Inputs.GatewayPluginAiLlmAsJudgeOrderingAfterArgs
            {
                Accesses = new[]
                {
                    "string",
                },
            },
            Before = new Konnect.Inputs.GatewayPluginAiLlmAsJudgeOrderingBeforeArgs
            {
                Accesses = new[]
                {
                    "string",
                },
            },
        },
        Partials = new[]
        {
            new Konnect.Inputs.GatewayPluginAiLlmAsJudgePartialArgs
            {
                Id = "string",
                Name = "string",
                Path = "string",
            },
        },
        Protocols = new[]
        {
            "string",
        },
        Route = new Konnect.Inputs.GatewayPluginAiLlmAsJudgeRouteArgs
        {
            Id = "string",
        },
        Service = new Konnect.Inputs.GatewayPluginAiLlmAsJudgeServiceArgs
        {
            Id = "string",
        },
        Tags = new[]
        {
            "string",
        },
        UpdatedAt = 0,
    });
    
    example, err := konnect.NewGatewayPluginAiLlmAsJudge(ctx, "gatewayPluginAiLlmAsJudgeResource", &konnect.GatewayPluginAiLlmAsJudgeArgs{
    	ControlPlaneId: pulumi.String("string"),
    	Config: &konnect.GatewayPluginAiLlmAsJudgeConfigArgs{
    		Llm: &konnect.GatewayPluginAiLlmAsJudgeConfigLlmArgs{
    			Model: &konnect.GatewayPluginAiLlmAsJudgeConfigLlmModelArgs{
    				Provider: pulumi.String("string"),
    				Name:     pulumi.String("string"),
    				Options: &konnect.GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsArgs{
    					AnthropicVersion:  pulumi.String("string"),
    					AzureApiVersion:   pulumi.String("string"),
    					AzureDeploymentId: pulumi.String("string"),
    					AzureInstance:     pulumi.String("string"),
    					Bedrock: &konnect.GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsBedrockArgs{
    						AwsAssumeRoleArn:         pulumi.String("string"),
    						AwsRegion:                pulumi.String("string"),
    						AwsRoleSessionName:       pulumi.String("string"),
    						AwsStsEndpointUrl:        pulumi.String("string"),
    						EmbeddingsNormalize:      pulumi.Bool(false),
    						PerformanceConfigLatency: pulumi.String("string"),
    					},
    					Cohere: &konnect.GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsCohereArgs{
    						EmbeddingInputType: pulumi.String("string"),
    						WaitForModel:       pulumi.Bool(false),
    					},
    					EmbeddingsDimensions: pulumi.Float64(0),
    					Gemini: &konnect.GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsGeminiArgs{
    						ApiEndpoint: pulumi.String("string"),
    						EndpointId:  pulumi.String("string"),
    						LocationId:  pulumi.String("string"),
    						ProjectId:   pulumi.String("string"),
    					},
    					Huggingface: &konnect.GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsHuggingfaceArgs{
    						UseCache:     pulumi.Bool(false),
    						WaitForModel: pulumi.Bool(false),
    					},
    					InputCost:     pulumi.Float64(0),
    					Llama2Format:  pulumi.String("string"),
    					MaxTokens:     pulumi.Float64(0),
    					MistralFormat: pulumi.String("string"),
    					OutputCost:    pulumi.Float64(0),
    					Temperature:   pulumi.Float64(0),
    					TopK:          pulumi.Float64(0),
    					TopP:          pulumi.Float64(0),
    					UpstreamPath:  pulumi.String("string"),
    					UpstreamUrl:   pulumi.String("string"),
    				},
    			},
    			RouteType: pulumi.String("string"),
    			Auth: &konnect.GatewayPluginAiLlmAsJudgeConfigLlmAuthArgs{
    				AllowOverride:           pulumi.Bool(false),
    				AwsAccessKeyId:          pulumi.String("string"),
    				AwsSecretAccessKey:      pulumi.String("string"),
    				AzureClientId:           pulumi.String("string"),
    				AzureClientSecret:       pulumi.String("string"),
    				AzureTenantId:           pulumi.String("string"),
    				AzureUseManagedIdentity: pulumi.Bool(false),
    				GcpServiceAccountJson:   pulumi.String("string"),
    				GcpUseServiceAccount:    pulumi.Bool(false),
    				HeaderName:              pulumi.String("string"),
    				HeaderValue:             pulumi.String("string"),
    				ParamLocation:           pulumi.String("string"),
    				ParamName:               pulumi.String("string"),
    				ParamValue:              pulumi.String("string"),
    			},
    			Logging: &konnect.GatewayPluginAiLlmAsJudgeConfigLlmLoggingArgs{
    				LogPayloads:   pulumi.Bool(false),
    				LogStatistics: pulumi.Bool(false),
    			},
    		},
    		HttpsProxyHost:         pulumi.String("string"),
    		HttpTimeout:            pulumi.Float64(0),
    		HttpProxyHost:          pulumi.String("string"),
    		HttpsProxyPort:         pulumi.Float64(0),
    		HttpsVerify:            pulumi.Bool(false),
    		IgnoreAssistantPrompts: pulumi.Bool(false),
    		IgnoreSystemPrompts:    pulumi.Bool(false),
    		IgnoreToolPrompts:      pulumi.Bool(false),
    		HttpProxyPort:          pulumi.Float64(0),
    		MessageCountback:       pulumi.Float64(0),
    		Prompt:                 pulumi.String("string"),
    		SamplingRate:           pulumi.Float64(0),
    	},
    	GatewayPluginAiLlmAsJudgeId: pulumi.String("string"),
    	ConsumerGroup: &konnect.GatewayPluginAiLlmAsJudgeConsumerGroupArgs{
    		Id: pulumi.String("string"),
    	},
    	CreatedAt: pulumi.Float64(0),
    	Enabled:   pulumi.Bool(false),
    	Consumer: &konnect.GatewayPluginAiLlmAsJudgeConsumerArgs{
    		Id: pulumi.String("string"),
    	},
    	InstanceName: pulumi.String("string"),
    	Ordering: &konnect.GatewayPluginAiLlmAsJudgeOrderingArgs{
    		After: &konnect.GatewayPluginAiLlmAsJudgeOrderingAfterArgs{
    			Accesses: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    		},
    		Before: &konnect.GatewayPluginAiLlmAsJudgeOrderingBeforeArgs{
    			Accesses: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    		},
    	},
    	Partials: konnect.GatewayPluginAiLlmAsJudgePartialArray{
    		&konnect.GatewayPluginAiLlmAsJudgePartialArgs{
    			Id:   pulumi.String("string"),
    			Name: pulumi.String("string"),
    			Path: pulumi.String("string"),
    		},
    	},
    	Protocols: pulumi.StringArray{
    		pulumi.String("string"),
    	},
    	Route: &konnect.GatewayPluginAiLlmAsJudgeRouteArgs{
    		Id: pulumi.String("string"),
    	},
    	Service: &konnect.GatewayPluginAiLlmAsJudgeServiceArgs{
    		Id: pulumi.String("string"),
    	},
    	Tags: pulumi.StringArray{
    		pulumi.String("string"),
    	},
    	UpdatedAt: pulumi.Float64(0),
    })
    
    var gatewayPluginAiLlmAsJudgeResource = new GatewayPluginAiLlmAsJudge("gatewayPluginAiLlmAsJudgeResource", GatewayPluginAiLlmAsJudgeArgs.builder()
        .controlPlaneId("string")
        .config(GatewayPluginAiLlmAsJudgeConfigArgs.builder()
            .llm(GatewayPluginAiLlmAsJudgeConfigLlmArgs.builder()
                .model(GatewayPluginAiLlmAsJudgeConfigLlmModelArgs.builder()
                    .provider("string")
                    .name("string")
                    .options(GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsArgs.builder()
                        .anthropicVersion("string")
                        .azureApiVersion("string")
                        .azureDeploymentId("string")
                        .azureInstance("string")
                        .bedrock(GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsBedrockArgs.builder()
                            .awsAssumeRoleArn("string")
                            .awsRegion("string")
                            .awsRoleSessionName("string")
                            .awsStsEndpointUrl("string")
                            .embeddingsNormalize(false)
                            .performanceConfigLatency("string")
                            .build())
                        .cohere(GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsCohereArgs.builder()
                            .embeddingInputType("string")
                            .waitForModel(false)
                            .build())
                        .embeddingsDimensions(0.0)
                        .gemini(GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsGeminiArgs.builder()
                            .apiEndpoint("string")
                            .endpointId("string")
                            .locationId("string")
                            .projectId("string")
                            .build())
                        .huggingface(GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsHuggingfaceArgs.builder()
                            .useCache(false)
                            .waitForModel(false)
                            .build())
                        .inputCost(0.0)
                        .llama2Format("string")
                        .maxTokens(0.0)
                        .mistralFormat("string")
                        .outputCost(0.0)
                        .temperature(0.0)
                        .topK(0.0)
                        .topP(0.0)
                        .upstreamPath("string")
                        .upstreamUrl("string")
                        .build())
                    .build())
                .routeType("string")
                .auth(GatewayPluginAiLlmAsJudgeConfigLlmAuthArgs.builder()
                    .allowOverride(false)
                    .awsAccessKeyId("string")
                    .awsSecretAccessKey("string")
                    .azureClientId("string")
                    .azureClientSecret("string")
                    .azureTenantId("string")
                    .azureUseManagedIdentity(false)
                    .gcpServiceAccountJson("string")
                    .gcpUseServiceAccount(false)
                    .headerName("string")
                    .headerValue("string")
                    .paramLocation("string")
                    .paramName("string")
                    .paramValue("string")
                    .build())
                .logging(GatewayPluginAiLlmAsJudgeConfigLlmLoggingArgs.builder()
                    .logPayloads(false)
                    .logStatistics(false)
                    .build())
                .build())
            .httpsProxyHost("string")
            .httpTimeout(0.0)
            .httpProxyHost("string")
            .httpsProxyPort(0.0)
            .httpsVerify(false)
            .ignoreAssistantPrompts(false)
            .ignoreSystemPrompts(false)
            .ignoreToolPrompts(false)
            .httpProxyPort(0.0)
            .messageCountback(0.0)
            .prompt("string")
            .samplingRate(0.0)
            .build())
        .gatewayPluginAiLlmAsJudgeId("string")
        .consumerGroup(GatewayPluginAiLlmAsJudgeConsumerGroupArgs.builder()
            .id("string")
            .build())
        .createdAt(0.0)
        .enabled(false)
        .consumer(GatewayPluginAiLlmAsJudgeConsumerArgs.builder()
            .id("string")
            .build())
        .instanceName("string")
        .ordering(GatewayPluginAiLlmAsJudgeOrderingArgs.builder()
            .after(GatewayPluginAiLlmAsJudgeOrderingAfterArgs.builder()
                .accesses("string")
                .build())
            .before(GatewayPluginAiLlmAsJudgeOrderingBeforeArgs.builder()
                .accesses("string")
                .build())
            .build())
        .partials(GatewayPluginAiLlmAsJudgePartialArgs.builder()
            .id("string")
            .name("string")
            .path("string")
            .build())
        .protocols("string")
        .route(GatewayPluginAiLlmAsJudgeRouteArgs.builder()
            .id("string")
            .build())
        .service(GatewayPluginAiLlmAsJudgeServiceArgs.builder()
            .id("string")
            .build())
        .tags("string")
        .updatedAt(0.0)
        .build());
    
    gateway_plugin_ai_llm_as_judge_resource = konnect.GatewayPluginAiLlmAsJudge("gatewayPluginAiLlmAsJudgeResource",
        control_plane_id="string",
        config={
            "llm": {
                "model": {
                    "provider": "string",
                    "name": "string",
                    "options": {
                        "anthropic_version": "string",
                        "azure_api_version": "string",
                        "azure_deployment_id": "string",
                        "azure_instance": "string",
                        "bedrock": {
                            "aws_assume_role_arn": "string",
                            "aws_region": "string",
                            "aws_role_session_name": "string",
                            "aws_sts_endpoint_url": "string",
                            "embeddings_normalize": False,
                            "performance_config_latency": "string",
                        },
                        "cohere": {
                            "embedding_input_type": "string",
                            "wait_for_model": False,
                        },
                        "embeddings_dimensions": 0,
                        "gemini": {
                            "api_endpoint": "string",
                            "endpoint_id": "string",
                            "location_id": "string",
                            "project_id": "string",
                        },
                        "huggingface": {
                            "use_cache": False,
                            "wait_for_model": False,
                        },
                        "input_cost": 0,
                        "llama2_format": "string",
                        "max_tokens": 0,
                        "mistral_format": "string",
                        "output_cost": 0,
                        "temperature": 0,
                        "top_k": 0,
                        "top_p": 0,
                        "upstream_path": "string",
                        "upstream_url": "string",
                    },
                },
                "route_type": "string",
                "auth": {
                    "allow_override": False,
                    "aws_access_key_id": "string",
                    "aws_secret_access_key": "string",
                    "azure_client_id": "string",
                    "azure_client_secret": "string",
                    "azure_tenant_id": "string",
                    "azure_use_managed_identity": False,
                    "gcp_service_account_json": "string",
                    "gcp_use_service_account": False,
                    "header_name": "string",
                    "header_value": "string",
                    "param_location": "string",
                    "param_name": "string",
                    "param_value": "string",
                },
                "logging": {
                    "log_payloads": False,
                    "log_statistics": False,
                },
            },
            "https_proxy_host": "string",
            "http_timeout": 0,
            "http_proxy_host": "string",
            "https_proxy_port": 0,
            "https_verify": False,
            "ignore_assistant_prompts": False,
            "ignore_system_prompts": False,
            "ignore_tool_prompts": False,
            "http_proxy_port": 0,
            "message_countback": 0,
            "prompt": "string",
            "sampling_rate": 0,
        },
        gateway_plugin_ai_llm_as_judge_id="string",
        consumer_group={
            "id": "string",
        },
        created_at=0,
        enabled=False,
        consumer={
            "id": "string",
        },
        instance_name="string",
        ordering={
            "after": {
                "accesses": ["string"],
            },
            "before": {
                "accesses": ["string"],
            },
        },
        partials=[{
            "id": "string",
            "name": "string",
            "path": "string",
        }],
        protocols=["string"],
        route={
            "id": "string",
        },
        service={
            "id": "string",
        },
        tags=["string"],
        updated_at=0)
    
    const gatewayPluginAiLlmAsJudgeResource = new konnect.GatewayPluginAiLlmAsJudge("gatewayPluginAiLlmAsJudgeResource", {
        controlPlaneId: "string",
        config: {
            llm: {
                model: {
                    provider: "string",
                    name: "string",
                    options: {
                        anthropicVersion: "string",
                        azureApiVersion: "string",
                        azureDeploymentId: "string",
                        azureInstance: "string",
                        bedrock: {
                            awsAssumeRoleArn: "string",
                            awsRegion: "string",
                            awsRoleSessionName: "string",
                            awsStsEndpointUrl: "string",
                            embeddingsNormalize: false,
                            performanceConfigLatency: "string",
                        },
                        cohere: {
                            embeddingInputType: "string",
                            waitForModel: false,
                        },
                        embeddingsDimensions: 0,
                        gemini: {
                            apiEndpoint: "string",
                            endpointId: "string",
                            locationId: "string",
                            projectId: "string",
                        },
                        huggingface: {
                            useCache: false,
                            waitForModel: false,
                        },
                        inputCost: 0,
                        llama2Format: "string",
                        maxTokens: 0,
                        mistralFormat: "string",
                        outputCost: 0,
                        temperature: 0,
                        topK: 0,
                        topP: 0,
                        upstreamPath: "string",
                        upstreamUrl: "string",
                    },
                },
                routeType: "string",
                auth: {
                    allowOverride: false,
                    awsAccessKeyId: "string",
                    awsSecretAccessKey: "string",
                    azureClientId: "string",
                    azureClientSecret: "string",
                    azureTenantId: "string",
                    azureUseManagedIdentity: false,
                    gcpServiceAccountJson: "string",
                    gcpUseServiceAccount: false,
                    headerName: "string",
                    headerValue: "string",
                    paramLocation: "string",
                    paramName: "string",
                    paramValue: "string",
                },
                logging: {
                    logPayloads: false,
                    logStatistics: false,
                },
            },
            httpsProxyHost: "string",
            httpTimeout: 0,
            httpProxyHost: "string",
            httpsProxyPort: 0,
            httpsVerify: false,
            ignoreAssistantPrompts: false,
            ignoreSystemPrompts: false,
            ignoreToolPrompts: false,
            httpProxyPort: 0,
            messageCountback: 0,
            prompt: "string",
            samplingRate: 0,
        },
        gatewayPluginAiLlmAsJudgeId: "string",
        consumerGroup: {
            id: "string",
        },
        createdAt: 0,
        enabled: false,
        consumer: {
            id: "string",
        },
        instanceName: "string",
        ordering: {
            after: {
                accesses: ["string"],
            },
            before: {
                accesses: ["string"],
            },
        },
        partials: [{
            id: "string",
            name: "string",
            path: "string",
        }],
        protocols: ["string"],
        route: {
            id: "string",
        },
        service: {
            id: "string",
        },
        tags: ["string"],
        updatedAt: 0,
    });
    
    type: konnect:GatewayPluginAiLlmAsJudge
    properties:
        config:
            httpProxyHost: string
            httpProxyPort: 0
            httpTimeout: 0
            httpsProxyHost: string
            httpsProxyPort: 0
            httpsVerify: false
            ignoreAssistantPrompts: false
            ignoreSystemPrompts: false
            ignoreToolPrompts: false
            llm:
                auth:
                    allowOverride: false
                    awsAccessKeyId: string
                    awsSecretAccessKey: string
                    azureClientId: string
                    azureClientSecret: string
                    azureTenantId: string
                    azureUseManagedIdentity: false
                    gcpServiceAccountJson: string
                    gcpUseServiceAccount: false
                    headerName: string
                    headerValue: string
                    paramLocation: string
                    paramName: string
                    paramValue: string
                logging:
                    logPayloads: false
                    logStatistics: false
                model:
                    name: string
                    options:
                        anthropicVersion: string
                        azureApiVersion: string
                        azureDeploymentId: string
                        azureInstance: string
                        bedrock:
                            awsAssumeRoleArn: string
                            awsRegion: string
                            awsRoleSessionName: string
                            awsStsEndpointUrl: string
                            embeddingsNormalize: false
                            performanceConfigLatency: string
                        cohere:
                            embeddingInputType: string
                            waitForModel: false
                        embeddingsDimensions: 0
                        gemini:
                            apiEndpoint: string
                            endpointId: string
                            locationId: string
                            projectId: string
                        huggingface:
                            useCache: false
                            waitForModel: false
                        inputCost: 0
                        llama2Format: string
                        maxTokens: 0
                        mistralFormat: string
                        outputCost: 0
                        temperature: 0
                        topK: 0
                        topP: 0
                        upstreamPath: string
                        upstreamUrl: string
                    provider: string
                routeType: string
            messageCountback: 0
            prompt: string
            samplingRate: 0
        consumer:
            id: string
        consumerGroup:
            id: string
        controlPlaneId: string
        createdAt: 0
        enabled: false
        gatewayPluginAiLlmAsJudgeId: string
        instanceName: string
        ordering:
            after:
                accesses:
                    - string
            before:
                accesses:
                    - string
        partials:
            - id: string
              name: string
              path: string
        protocols:
            - string
        route:
            id: string
        service:
            id: string
        tags:
            - string
        updatedAt: 0
    

    GatewayPluginAiLlmAsJudge Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

    The GatewayPluginAiLlmAsJudge resource accepts the following input properties:

    Config GatewayPluginAiLlmAsJudgeConfig
    ControlPlaneId string
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    Consumer GatewayPluginAiLlmAsJudgeConsumer
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    ConsumerGroup GatewayPluginAiLlmAsJudgeConsumerGroup
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    CreatedAt double
    Unix epoch when the resource was created.
    Enabled bool
    Whether the plugin is applied. Default: true
    GatewayPluginAiLlmAsJudgeId string
    A string representing a UUID (universally unique identifier).
    InstanceName string
    A unique string representing a UTF-8 encoded name.
    Ordering GatewayPluginAiLlmAsJudgeOrdering
    Partials List<GatewayPluginAiLlmAsJudgePartial>
    A list of partials to be used by the plugin.
    Protocols List<string>
    A set of strings representing HTTP protocols. Default: ["grpc","grpcs","http","https"]
    Route GatewayPluginAiLlmAsJudgeRoute
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    Service GatewayPluginAiLlmAsJudgeService
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    Tags List<string>
    An optional set of strings associated with the Plugin for grouping and filtering.
    UpdatedAt double
    Unix epoch when the resource was last updated.
    Config GatewayPluginAiLlmAsJudgeConfigArgs
    ControlPlaneId string
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    Consumer GatewayPluginAiLlmAsJudgeConsumerArgs
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    ConsumerGroup GatewayPluginAiLlmAsJudgeConsumerGroupArgs
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    CreatedAt float64
    Unix epoch when the resource was created.
    Enabled bool
    Whether the plugin is applied. Default: true
    GatewayPluginAiLlmAsJudgeId string
    A string representing a UUID (universally unique identifier).
    InstanceName string
    A unique string representing a UTF-8 encoded name.
    Ordering GatewayPluginAiLlmAsJudgeOrderingArgs
    Partials []GatewayPluginAiLlmAsJudgePartialArgs
    A list of partials to be used by the plugin.
    Protocols []string
    A set of strings representing HTTP protocols. Default: ["grpc","grpcs","http","https"]
    Route GatewayPluginAiLlmAsJudgeRouteArgs
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    Service GatewayPluginAiLlmAsJudgeServiceArgs
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    Tags []string
    An optional set of strings associated with the Plugin for grouping and filtering.
    UpdatedAt float64
    Unix epoch when the resource was last updated.
    config GatewayPluginAiLlmAsJudgeConfig
    controlPlaneId String
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    consumer GatewayPluginAiLlmAsJudgeConsumer
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    consumerGroup GatewayPluginAiLlmAsJudgeConsumerGroup
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    createdAt Double
    Unix epoch when the resource was created.
    enabled Boolean
    Whether the plugin is applied. Default: true
    gatewayPluginAiLlmAsJudgeId String
    A string representing a UUID (universally unique identifier).
    instanceName String
    A unique string representing a UTF-8 encoded name.
    ordering GatewayPluginAiLlmAsJudgeOrdering
    partials List<GatewayPluginAiLlmAsJudgePartial>
    A list of partials to be used by the plugin.
    protocols List<String>
    A set of strings representing HTTP protocols. Default: ["grpc","grpcs","http","https"]
    route GatewayPluginAiLlmAsJudgeRoute
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    service GatewayPluginAiLlmAsJudgeService
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    tags List<String>
    An optional set of strings associated with the Plugin for grouping and filtering.
    updatedAt Double
    Unix epoch when the resource was last updated.
    config GatewayPluginAiLlmAsJudgeConfig
    controlPlaneId string
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    consumer GatewayPluginAiLlmAsJudgeConsumer
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    consumerGroup GatewayPluginAiLlmAsJudgeConsumerGroup
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    createdAt number
    Unix epoch when the resource was created.
    enabled boolean
    Whether the plugin is applied. Default: true
    gatewayPluginAiLlmAsJudgeId string
    A string representing a UUID (universally unique identifier).
    instanceName string
    A unique string representing a UTF-8 encoded name.
    ordering GatewayPluginAiLlmAsJudgeOrdering
    partials GatewayPluginAiLlmAsJudgePartial[]
    A list of partials to be used by the plugin.
    protocols string[]
    A set of strings representing HTTP protocols. Default: ["grpc","grpcs","http","https"]
    route GatewayPluginAiLlmAsJudgeRoute
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    service GatewayPluginAiLlmAsJudgeService
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    tags string[]
    An optional set of strings associated with the Plugin for grouping and filtering.
    updatedAt number
    Unix epoch when the resource was last updated.
    config GatewayPluginAiLlmAsJudgeConfigArgs
    control_plane_id str
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    consumer GatewayPluginAiLlmAsJudgeConsumerArgs
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    consumer_group GatewayPluginAiLlmAsJudgeConsumerGroupArgs
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    created_at float
    Unix epoch when the resource was created.
    enabled bool
    Whether the plugin is applied. Default: true
    gateway_plugin_ai_llm_as_judge_id str
    A string representing a UUID (universally unique identifier).
    instance_name str
    A unique string representing a UTF-8 encoded name.
    ordering GatewayPluginAiLlmAsJudgeOrderingArgs
    partials Sequence[GatewayPluginAiLlmAsJudgePartialArgs]
    A list of partials to be used by the plugin.
    protocols Sequence[str]
    A set of strings representing HTTP protocols. Default: ["grpc","grpcs","http","https"]
    route GatewayPluginAiLlmAsJudgeRouteArgs
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    service GatewayPluginAiLlmAsJudgeServiceArgs
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    tags Sequence[str]
    An optional set of strings associated with the Plugin for grouping and filtering.
    updated_at float
    Unix epoch when the resource was last updated.
    config Property Map
    controlPlaneId String
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    consumer Property Map
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    consumerGroup Property Map
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    createdAt Number
    Unix epoch when the resource was created.
    enabled Boolean
    Whether the plugin is applied. Default: true
    gatewayPluginAiLlmAsJudgeId String
    A string representing a UUID (universally unique identifier).
    instanceName String
    A unique string representing a UTF-8 encoded name.
    ordering Property Map
    partials List<Property Map>
    A list of partials to be used by the plugin.
    protocols List<String>
    A set of strings representing HTTP protocols. Default: ["grpc","grpcs","http","https"]
    route Property Map
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    service Property Map
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    tags List<String>
    An optional set of strings associated with the Plugin for grouping and filtering.
    updatedAt Number
    Unix epoch when the resource was last updated.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the GatewayPluginAiLlmAsJudge resource produces the following output properties:

    Id string
    The provider-assigned unique ID for this managed resource.
    Id string
    The provider-assigned unique ID for this managed resource.
    id String
    The provider-assigned unique ID for this managed resource.
    id string
    The provider-assigned unique ID for this managed resource.
    id str
    The provider-assigned unique ID for this managed resource.
    id String
    The provider-assigned unique ID for this managed resource.

    Look up Existing GatewayPluginAiLlmAsJudge Resource

    Get an existing GatewayPluginAiLlmAsJudge resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: GatewayPluginAiLlmAsJudgeState, opts?: CustomResourceOptions): GatewayPluginAiLlmAsJudge
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            config: Optional[GatewayPluginAiLlmAsJudgeConfigArgs] = None,
            consumer: Optional[GatewayPluginAiLlmAsJudgeConsumerArgs] = None,
            consumer_group: Optional[GatewayPluginAiLlmAsJudgeConsumerGroupArgs] = None,
            control_plane_id: Optional[str] = None,
            created_at: Optional[float] = None,
            enabled: Optional[bool] = None,
            gateway_plugin_ai_llm_as_judge_id: Optional[str] = None,
            instance_name: Optional[str] = None,
            ordering: Optional[GatewayPluginAiLlmAsJudgeOrderingArgs] = None,
            partials: Optional[Sequence[GatewayPluginAiLlmAsJudgePartialArgs]] = None,
            protocols: Optional[Sequence[str]] = None,
            route: Optional[GatewayPluginAiLlmAsJudgeRouteArgs] = None,
            service: Optional[GatewayPluginAiLlmAsJudgeServiceArgs] = None,
            tags: Optional[Sequence[str]] = None,
            updated_at: Optional[float] = None) -> GatewayPluginAiLlmAsJudge
    func GetGatewayPluginAiLlmAsJudge(ctx *Context, name string, id IDInput, state *GatewayPluginAiLlmAsJudgeState, opts ...ResourceOption) (*GatewayPluginAiLlmAsJudge, error)
    public static GatewayPluginAiLlmAsJudge Get(string name, Input<string> id, GatewayPluginAiLlmAsJudgeState? state, CustomResourceOptions? opts = null)
    public static GatewayPluginAiLlmAsJudge get(String name, Output<String> id, GatewayPluginAiLlmAsJudgeState state, CustomResourceOptions options)
    resources:  _:    type: konnect:GatewayPluginAiLlmAsJudge    get:      id: ${id}
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    Config GatewayPluginAiLlmAsJudgeConfig
    Consumer GatewayPluginAiLlmAsJudgeConsumer
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    ConsumerGroup GatewayPluginAiLlmAsJudgeConsumerGroup
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    ControlPlaneId string
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    CreatedAt double
    Unix epoch when the resource was created.
    Enabled bool
    Whether the plugin is applied. Default: true
    GatewayPluginAiLlmAsJudgeId string
    A string representing a UUID (universally unique identifier).
    InstanceName string
    A unique string representing a UTF-8 encoded name.
    Ordering GatewayPluginAiLlmAsJudgeOrdering
    Partials List<GatewayPluginAiLlmAsJudgePartial>
    A list of partials to be used by the plugin.
    Protocols List<string>
    A set of strings representing HTTP protocols. Default: ["grpc","grpcs","http","https"]
    Route GatewayPluginAiLlmAsJudgeRoute
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    Service GatewayPluginAiLlmAsJudgeService
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    Tags List<string>
    An optional set of strings associated with the Plugin for grouping and filtering.
    UpdatedAt double
    Unix epoch when the resource was last updated.
    Config GatewayPluginAiLlmAsJudgeConfigArgs
    Consumer GatewayPluginAiLlmAsJudgeConsumerArgs
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    ConsumerGroup GatewayPluginAiLlmAsJudgeConsumerGroupArgs
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    ControlPlaneId string
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    CreatedAt float64
    Unix epoch when the resource was created.
    Enabled bool
    Whether the plugin is applied. Default: true
    GatewayPluginAiLlmAsJudgeId string
    A string representing a UUID (universally unique identifier).
    InstanceName string
    A unique string representing a UTF-8 encoded name.
    Ordering GatewayPluginAiLlmAsJudgeOrderingArgs
    Partials []GatewayPluginAiLlmAsJudgePartialArgs
    A list of partials to be used by the plugin.
    Protocols []string
    A set of strings representing HTTP protocols. Default: ["grpc","grpcs","http","https"]
    Route GatewayPluginAiLlmAsJudgeRouteArgs
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    Service GatewayPluginAiLlmAsJudgeServiceArgs
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    Tags []string
    An optional set of strings associated with the Plugin for grouping and filtering.
    UpdatedAt float64
    Unix epoch when the resource was last updated.
    config GatewayPluginAiLlmAsJudgeConfig
    consumer GatewayPluginAiLlmAsJudgeConsumer
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    consumerGroup GatewayPluginAiLlmAsJudgeConsumerGroup
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    controlPlaneId String
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    createdAt Double
    Unix epoch when the resource was created.
    enabled Boolean
    Whether the plugin is applied. Default: true
    gatewayPluginAiLlmAsJudgeId String
    A string representing a UUID (universally unique identifier).
    instanceName String
    A unique string representing a UTF-8 encoded name.
    ordering GatewayPluginAiLlmAsJudgeOrdering
    partials List<GatewayPluginAiLlmAsJudgePartial>
    A list of partials to be used by the plugin.
    protocols List<String>
    A set of strings representing HTTP protocols. Default: ["grpc","grpcs","http","https"]
    route GatewayPluginAiLlmAsJudgeRoute
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    service GatewayPluginAiLlmAsJudgeService
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    tags List<String>
    An optional set of strings associated with the Plugin for grouping and filtering.
    updatedAt Double
    Unix epoch when the resource was last updated.
    config GatewayPluginAiLlmAsJudgeConfig
    consumer GatewayPluginAiLlmAsJudgeConsumer
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    consumerGroup GatewayPluginAiLlmAsJudgeConsumerGroup
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    controlPlaneId string
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    createdAt number
    Unix epoch when the resource was created.
    enabled boolean
    Whether the plugin is applied. Default: true
    gatewayPluginAiLlmAsJudgeId string
    A string representing a UUID (universally unique identifier).
    instanceName string
    A unique string representing a UTF-8 encoded name.
    ordering GatewayPluginAiLlmAsJudgeOrdering
    partials GatewayPluginAiLlmAsJudgePartial[]
    A list of partials to be used by the plugin.
    protocols string[]
    A set of strings representing HTTP protocols. Default: ["grpc","grpcs","http","https"]
    route GatewayPluginAiLlmAsJudgeRoute
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    service GatewayPluginAiLlmAsJudgeService
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    tags string[]
    An optional set of strings associated with the Plugin for grouping and filtering.
    updatedAt number
    Unix epoch when the resource was last updated.
    config GatewayPluginAiLlmAsJudgeConfigArgs
    consumer GatewayPluginAiLlmAsJudgeConsumerArgs
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    consumer_group GatewayPluginAiLlmAsJudgeConsumerGroupArgs
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    control_plane_id str
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    created_at float
    Unix epoch when the resource was created.
    enabled bool
    Whether the plugin is applied. Default: true
    gateway_plugin_ai_llm_as_judge_id str
    A string representing a UUID (universally unique identifier).
    instance_name str
    A unique string representing a UTF-8 encoded name.
    ordering GatewayPluginAiLlmAsJudgeOrderingArgs
    partials Sequence[GatewayPluginAiLlmAsJudgePartialArgs]
    A list of partials to be used by the plugin.
    protocols Sequence[str]
    A set of strings representing HTTP protocols. Default: ["grpc","grpcs","http","https"]
    route GatewayPluginAiLlmAsJudgeRouteArgs
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    service GatewayPluginAiLlmAsJudgeServiceArgs
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    tags Sequence[str]
    An optional set of strings associated with the Plugin for grouping and filtering.
    updated_at float
    Unix epoch when the resource was last updated.
    config Property Map
    consumer Property Map
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    consumerGroup Property Map
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    controlPlaneId String
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    createdAt Number
    Unix epoch when the resource was created.
    enabled Boolean
    Whether the plugin is applied. Default: true
    gatewayPluginAiLlmAsJudgeId String
    A string representing a UUID (universally unique identifier).
    instanceName String
    A unique string representing a UTF-8 encoded name.
    ordering Property Map
    partials List<Property Map>
    A list of partials to be used by the plugin.
    protocols List<String>
    A set of strings representing HTTP protocols. Default: ["grpc","grpcs","http","https"]
    route Property Map
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    service Property Map
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    tags List<String>
    An optional set of strings associated with the Plugin for grouping and filtering.
    updatedAt Number
    Unix epoch when the resource was last updated.

    Supporting Types

    GatewayPluginAiLlmAsJudgeConfig, GatewayPluginAiLlmAsJudgeConfigArgs

    Llm GatewayPluginAiLlmAsJudgeConfigLlm
    HttpProxyHost string
    A string representing a host name, such as example.com.
    HttpProxyPort double
    An integer representing a port number between 0 and 65535, inclusive.
    HttpTimeout double
    Timeout in milliseconds for the AI upstream service. Default: 60000
    HttpsProxyHost string
    A string representing a host name, such as example.com.
    HttpsProxyPort double
    An integer representing a port number between 0 and 65535, inclusive.
    HttpsVerify bool
    Verify the TLS certificate of the AI upstream service. Default: true
    IgnoreAssistantPrompts bool
    Ignore and discard any assistant prompts when evaluating the request. Default: true
    IgnoreSystemPrompts bool
    Ignore and discard any system prompts when evaluating the request. Default: true
    IgnoreToolPrompts bool
    Ignore and discard any tool prompts when evaluating the request. Default: true
    MessageCountback double
    Number of messages in the chat history to use for evaluating the request. Default: 1
    Prompt string
    Use this prompt to tune the LLM system/assistant message for the llm as a judge prompt. Default: "You are a strict evaluator. You will be given a prompt and a response. Your task is to judge whether the response is correct or incorrect. You must assign a score between 1 and 100, where: 100 represents a completely correct and ideal response, 1 represents a completely incorrect or irrelevant response. Your score must be a single number only — no text, labels, or explanations. Use the full range of values (e.g., 13, 47, 86), not just round numbers like 10, 50, or 100. Be accurate and consistent, as this score will be used by another model for learning and evaluation."
    SamplingRate double
    Judging request sampling rate for configuring the probability-based sampler. Default: 1
    Llm GatewayPluginAiLlmAsJudgeConfigLlm
    HttpProxyHost string
    A string representing a host name, such as example.com.
    HttpProxyPort float64
    An integer representing a port number between 0 and 65535, inclusive.
    HttpTimeout float64
    Timeout in milliseconds for the AI upstream service. Default: 60000
    HttpsProxyHost string
    A string representing a host name, such as example.com.
    HttpsProxyPort float64
    An integer representing a port number between 0 and 65535, inclusive.
    HttpsVerify bool
    Verify the TLS certificate of the AI upstream service. Default: true
    IgnoreAssistantPrompts bool
    Ignore and discard any assistant prompts when evaluating the request. Default: true
    IgnoreSystemPrompts bool
    Ignore and discard any system prompts when evaluating the request. Default: true
    IgnoreToolPrompts bool
    Ignore and discard any tool prompts when evaluating the request. Default: true
    MessageCountback float64
    Number of messages in the chat history to use for evaluating the request. Default: 1
    Prompt string
    Use this prompt to tune the LLM system/assistant message for the llm as a judge prompt. Default: "You are a strict evaluator. You will be given a prompt and a response. Your task is to judge whether the response is correct or incorrect. You must assign a score between 1 and 100, where: 100 represents a completely correct and ideal response, 1 represents a completely incorrect or irrelevant response. Your score must be a single number only — no text, labels, or explanations. Use the full range of values (e.g., 13, 47, 86), not just round numbers like 10, 50, or 100. Be accurate and consistent, as this score will be used by another model for learning and evaluation."
    SamplingRate float64
    Judging request sampling rate for configuring the probability-based sampler. Default: 1
    llm GatewayPluginAiLlmAsJudgeConfigLlm
    httpProxyHost String
    A string representing a host name, such as example.com.
    httpProxyPort Double
    An integer representing a port number between 0 and 65535, inclusive.
    httpTimeout Double
    Timeout in milliseconds for the AI upstream service. Default: 60000
    httpsProxyHost String
    A string representing a host name, such as example.com.
    httpsProxyPort Double
    An integer representing a port number between 0 and 65535, inclusive.
    httpsVerify Boolean
    Verify the TLS certificate of the AI upstream service. Default: true
    ignoreAssistantPrompts Boolean
    Ignore and discard any assistant prompts when evaluating the request. Default: true
    ignoreSystemPrompts Boolean
    Ignore and discard any system prompts when evaluating the request. Default: true
    ignoreToolPrompts Boolean
    Ignore and discard any tool prompts when evaluating the request. Default: true
    messageCountback Double
    Number of messages in the chat history to use for evaluating the request. Default: 1
    prompt String
    Use this prompt to tune the LLM system/assistant message for the llm as a judge prompt. Default: "You are a strict evaluator. You will be given a prompt and a response. Your task is to judge whether the response is correct or incorrect. You must assign a score between 1 and 100, where: 100 represents a completely correct and ideal response, 1 represents a completely incorrect or irrelevant response. Your score must be a single number only — no text, labels, or explanations. Use the full range of values (e.g., 13, 47, 86), not just round numbers like 10, 50, or 100. Be accurate and consistent, as this score will be used by another model for learning and evaluation."
    samplingRate Double
    Judging request sampling rate for configuring the probability-based sampler. Default: 1
    llm GatewayPluginAiLlmAsJudgeConfigLlm
    httpProxyHost string
    A string representing a host name, such as example.com.
    httpProxyPort number
    An integer representing a port number between 0 and 65535, inclusive.
    httpTimeout number
    Timeout in milliseconds for the AI upstream service. Default: 60000
    httpsProxyHost string
    A string representing a host name, such as example.com.
    httpsProxyPort number
    An integer representing a port number between 0 and 65535, inclusive.
    httpsVerify boolean
    Verify the TLS certificate of the AI upstream service. Default: true
    ignoreAssistantPrompts boolean
    Ignore and discard any assistant prompts when evaluating the request. Default: true
    ignoreSystemPrompts boolean
    Ignore and discard any system prompts when evaluating the request. Default: true
    ignoreToolPrompts boolean
    Ignore and discard any tool prompts when evaluating the request. Default: true
    messageCountback number
    Number of messages in the chat history to use for evaluating the request. Default: 1
    prompt string
    Use this prompt to tune the LLM system/assistant message for the llm as a judge prompt. Default: "You are a strict evaluator. You will be given a prompt and a response. Your task is to judge whether the response is correct or incorrect. You must assign a score between 1 and 100, where: 100 represents a completely correct and ideal response, 1 represents a completely incorrect or irrelevant response. Your score must be a single number only — no text, labels, or explanations. Use the full range of values (e.g., 13, 47, 86), not just round numbers like 10, 50, or 100. Be accurate and consistent, as this score will be used by another model for learning and evaluation."
    samplingRate number
    Judging request sampling rate for configuring the probability-based sampler. Default: 1
    llm GatewayPluginAiLlmAsJudgeConfigLlm
    http_proxy_host str
    A string representing a host name, such as example.com.
    http_proxy_port float
    An integer representing a port number between 0 and 65535, inclusive.
    http_timeout float
    Timeout in milliseconds for the AI upstream service. Default: 60000
    https_proxy_host str
    A string representing a host name, such as example.com.
    https_proxy_port float
    An integer representing a port number between 0 and 65535, inclusive.
    https_verify bool
    Verify the TLS certificate of the AI upstream service. Default: true
    ignore_assistant_prompts bool
    Ignore and discard any assistant prompts when evaluating the request. Default: true
    ignore_system_prompts bool
    Ignore and discard any system prompts when evaluating the request. Default: true
    ignore_tool_prompts bool
    Ignore and discard any tool prompts when evaluating the request. Default: true
    message_countback float
    Number of messages in the chat history to use for evaluating the request. Default: 1
    prompt str
    Use this prompt to tune the LLM system/assistant message for the llm as a judge prompt. Default: "You are a strict evaluator. You will be given a prompt and a response. Your task is to judge whether the response is correct or incorrect. You must assign a score between 1 and 100, where: 100 represents a completely correct and ideal response, 1 represents a completely incorrect or irrelevant response. Your score must be a single number only — no text, labels, or explanations. Use the full range of values (e.g., 13, 47, 86), not just round numbers like 10, 50, or 100. Be accurate and consistent, as this score will be used by another model for learning and evaluation."
    sampling_rate float
    Judging request sampling rate for configuring the probability-based sampler. Default: 1
    llm Property Map
    httpProxyHost String
    A string representing a host name, such as example.com.
    httpProxyPort Number
    An integer representing a port number between 0 and 65535, inclusive.
    httpTimeout Number
    Timeout in milliseconds for the AI upstream service. Default: 60000
    httpsProxyHost String
    A string representing a host name, such as example.com.
    httpsProxyPort Number
    An integer representing a port number between 0 and 65535, inclusive.
    httpsVerify Boolean
    Verify the TLS certificate of the AI upstream service. Default: true
    ignoreAssistantPrompts Boolean
    Ignore and discard any assistant prompts when evaluating the request. Default: true
    ignoreSystemPrompts Boolean
    Ignore and discard any system prompts when evaluating the request. Default: true
    ignoreToolPrompts Boolean
    Ignore and discard any tool prompts when evaluating the request. Default: true
    messageCountback Number
    Number of messages in the chat history to use for evaluating the request. Default: 1
    prompt String
    Use this prompt to tune the LLM system/assistant message for the llm as a judge prompt. Default: "You are a strict evaluator. You will be given a prompt and a response. Your task is to judge whether the response is correct or incorrect. You must assign a score between 1 and 100, where: 100 represents a completely correct and ideal response, 1 represents a completely incorrect or irrelevant response. Your score must be a single number only — no text, labels, or explanations. Use the full range of values (e.g., 13, 47, 86), not just round numbers like 10, 50, or 100. Be accurate and consistent, as this score will be used by another model for learning and evaluation."
    samplingRate Number
    Judging request sampling rate for configuring the probability-based sampler. Default: 1

    GatewayPluginAiLlmAsJudgeConfigLlm, GatewayPluginAiLlmAsJudgeConfigLlmArgs

    Model GatewayPluginAiLlmAsJudgeConfigLlmModel
    RouteType string
    The model's operation implementation, for this provider. must be one of ["audio/v1/audio/speech", "audio/v1/audio/transcriptions", "audio/v1/audio/translations", "image/v1/images/edits", "image/v1/images/generations", "llm/v1/assistants", "llm/v1/batches", "llm/v1/chat", "llm/v1/completions", "llm/v1/embeddings", "llm/v1/files", "llm/v1/responses", "preserve", "realtime/v1/realtime"]
    Auth GatewayPluginAiLlmAsJudgeConfigLlmAuth
    Logging GatewayPluginAiLlmAsJudgeConfigLlmLogging
    Model GatewayPluginAiLlmAsJudgeConfigLlmModel
    RouteType string
    The model's operation implementation, for this provider. must be one of ["audio/v1/audio/speech", "audio/v1/audio/transcriptions", "audio/v1/audio/translations", "image/v1/images/edits", "image/v1/images/generations", "llm/v1/assistants", "llm/v1/batches", "llm/v1/chat", "llm/v1/completions", "llm/v1/embeddings", "llm/v1/files", "llm/v1/responses", "preserve", "realtime/v1/realtime"]
    Auth GatewayPluginAiLlmAsJudgeConfigLlmAuth
    Logging GatewayPluginAiLlmAsJudgeConfigLlmLogging
    model GatewayPluginAiLlmAsJudgeConfigLlmModel
    routeType String
    The model's operation implementation, for this provider. must be one of ["audio/v1/audio/speech", "audio/v1/audio/transcriptions", "audio/v1/audio/translations", "image/v1/images/edits", "image/v1/images/generations", "llm/v1/assistants", "llm/v1/batches", "llm/v1/chat", "llm/v1/completions", "llm/v1/embeddings", "llm/v1/files", "llm/v1/responses", "preserve", "realtime/v1/realtime"]
    auth GatewayPluginAiLlmAsJudgeConfigLlmAuth
    logging GatewayPluginAiLlmAsJudgeConfigLlmLogging
    model GatewayPluginAiLlmAsJudgeConfigLlmModel
    routeType string
    The model's operation implementation, for this provider. must be one of ["audio/v1/audio/speech", "audio/v1/audio/transcriptions", "audio/v1/audio/translations", "image/v1/images/edits", "image/v1/images/generations", "llm/v1/assistants", "llm/v1/batches", "llm/v1/chat", "llm/v1/completions", "llm/v1/embeddings", "llm/v1/files", "llm/v1/responses", "preserve", "realtime/v1/realtime"]
    auth GatewayPluginAiLlmAsJudgeConfigLlmAuth
    logging GatewayPluginAiLlmAsJudgeConfigLlmLogging
    model GatewayPluginAiLlmAsJudgeConfigLlmModel
    route_type str
    The model's operation implementation, for this provider. must be one of ["audio/v1/audio/speech", "audio/v1/audio/transcriptions", "audio/v1/audio/translations", "image/v1/images/edits", "image/v1/images/generations", "llm/v1/assistants", "llm/v1/batches", "llm/v1/chat", "llm/v1/completions", "llm/v1/embeddings", "llm/v1/files", "llm/v1/responses", "preserve", "realtime/v1/realtime"]
    auth GatewayPluginAiLlmAsJudgeConfigLlmAuth
    logging GatewayPluginAiLlmAsJudgeConfigLlmLogging
    model Property Map
    routeType String
    The model's operation implementation, for this provider. must be one of ["audio/v1/audio/speech", "audio/v1/audio/transcriptions", "audio/v1/audio/translations", "image/v1/images/edits", "image/v1/images/generations", "llm/v1/assistants", "llm/v1/batches", "llm/v1/chat", "llm/v1/completions", "llm/v1/embeddings", "llm/v1/files", "llm/v1/responses", "preserve", "realtime/v1/realtime"]
    auth Property Map
    logging Property Map

    GatewayPluginAiLlmAsJudgeConfigLlmAuth, GatewayPluginAiLlmAsJudgeConfigLlmAuthArgs

    AllowOverride bool
    If enabled, the authorization header or parameter can be overridden in the request by the value configured in the plugin. Default: false
    AwsAccessKeyId string
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSACCESSKEY_ID environment variable for this plugin instance.
    AwsSecretAccessKey string
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSSECRETACCESS_KEY environment variable for this plugin instance.
    AzureClientId string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client ID.
    AzureClientSecret string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client secret.
    AzureTenantId string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the tenant ID.
    AzureUseManagedIdentity bool
    Set true to use the Azure Cloud Managed Identity (or user-assigned identity) to authenticate with Azure-provider models. Default: false
    GcpServiceAccountJson string
    Set this field to the full JSON of the GCP service account to authenticate, if required. If null (and gcpuseservice_account is true), Kong will attempt to read from environment variable GCP_SERVICE_ACCOUNT.
    GcpUseServiceAccount bool
    Use service account auth for GCP-based providers and models. Default: false
    HeaderName string
    If AI model requires authentication via Authorization or API key header, specify its name here.
    HeaderValue string
    Specify the full auth header value for 'header_name', for example 'Bearer key' or just 'key'.
    ParamLocation string
    Specify whether the 'paramname' and 'paramvalue' options go in a query string, or the POST form/JSON body. must be one of ["body", "query"]
    ParamName string
    If AI model requires authentication via query parameter, specify its name here.
    ParamValue string
    Specify the full parameter value for 'param_name'.
    AllowOverride bool
    If enabled, the authorization header or parameter can be overridden in the request by the value configured in the plugin. Default: false
    AwsAccessKeyId string
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSACCESSKEY_ID environment variable for this plugin instance.
    AwsSecretAccessKey string
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSSECRETACCESS_KEY environment variable for this plugin instance.
    AzureClientId string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client ID.
    AzureClientSecret string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client secret.
    AzureTenantId string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the tenant ID.
    AzureUseManagedIdentity bool
    Set true to use the Azure Cloud Managed Identity (or user-assigned identity) to authenticate with Azure-provider models. Default: false
    GcpServiceAccountJson string
    Set this field to the full JSON of the GCP service account to authenticate, if required. If null (and gcpuseservice_account is true), Kong will attempt to read from environment variable GCP_SERVICE_ACCOUNT.
    GcpUseServiceAccount bool
    Use service account auth for GCP-based providers and models. Default: false
    HeaderName string
    If AI model requires authentication via Authorization or API key header, specify its name here.
    HeaderValue string
    Specify the full auth header value for 'header_name', for example 'Bearer key' or just 'key'.
    ParamLocation string
    Specify whether the 'paramname' and 'paramvalue' options go in a query string, or the POST form/JSON body. must be one of ["body", "query"]
    ParamName string
    If AI model requires authentication via query parameter, specify its name here.
    ParamValue string
    Specify the full parameter value for 'param_name'.
    allowOverride Boolean
    If enabled, the authorization header or parameter can be overridden in the request by the value configured in the plugin. Default: false
    awsAccessKeyId String
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSACCESSKEY_ID environment variable for this plugin instance.
    awsSecretAccessKey String
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSSECRETACCESS_KEY environment variable for this plugin instance.
    azureClientId String
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client ID.
    azureClientSecret String
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client secret.
    azureTenantId String
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the tenant ID.
    azureUseManagedIdentity Boolean
    Set true to use the Azure Cloud Managed Identity (or user-assigned identity) to authenticate with Azure-provider models. Default: false
    gcpServiceAccountJson String
    Set this field to the full JSON of the GCP service account to authenticate, if required. If null (and gcpuseservice_account is true), Kong will attempt to read from environment variable GCP_SERVICE_ACCOUNT.
    gcpUseServiceAccount Boolean
    Use service account auth for GCP-based providers and models. Default: false
    headerName String
    If AI model requires authentication via Authorization or API key header, specify its name here.
    headerValue String
    Specify the full auth header value for 'header_name', for example 'Bearer key' or just 'key'.
    paramLocation String
    Specify whether the 'paramname' and 'paramvalue' options go in a query string, or the POST form/JSON body. must be one of ["body", "query"]
    paramName String
    If AI model requires authentication via query parameter, specify its name here.
    paramValue String
    Specify the full parameter value for 'param_name'.
    allowOverride boolean
    If enabled, the authorization header or parameter can be overridden in the request by the value configured in the plugin. Default: false
    awsAccessKeyId string
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSACCESSKEY_ID environment variable for this plugin instance.
    awsSecretAccessKey string
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSSECRETACCESS_KEY environment variable for this plugin instance.
    azureClientId string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client ID.
    azureClientSecret string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client secret.
    azureTenantId string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the tenant ID.
    azureUseManagedIdentity boolean
    Set true to use the Azure Cloud Managed Identity (or user-assigned identity) to authenticate with Azure-provider models. Default: false
    gcpServiceAccountJson string
    Set this field to the full JSON of the GCP service account to authenticate, if required. If null (and gcpuseservice_account is true), Kong will attempt to read from environment variable GCP_SERVICE_ACCOUNT.
    gcpUseServiceAccount boolean
    Use service account auth for GCP-based providers and models. Default: false
    headerName string
    If AI model requires authentication via Authorization or API key header, specify its name here.
    headerValue string
    Specify the full auth header value for 'header_name', for example 'Bearer key' or just 'key'.
    paramLocation string
    Specify whether the 'paramname' and 'paramvalue' options go in a query string, or the POST form/JSON body. must be one of ["body", "query"]
    paramName string
    If AI model requires authentication via query parameter, specify its name here.
    paramValue string
    Specify the full parameter value for 'param_name'.
    allow_override bool
    If enabled, the authorization header or parameter can be overridden in the request by the value configured in the plugin. Default: false
    aws_access_key_id str
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSACCESSKEY_ID environment variable for this plugin instance.
    aws_secret_access_key str
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSSECRETACCESS_KEY environment variable for this plugin instance.
    azure_client_id str
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client ID.
    azure_client_secret str
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client secret.
    azure_tenant_id str
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the tenant ID.
    azure_use_managed_identity bool
    Set true to use the Azure Cloud Managed Identity (or user-assigned identity) to authenticate with Azure-provider models. Default: false
    gcp_service_account_json str
    Set this field to the full JSON of the GCP service account to authenticate, if required. If null (and gcpuseservice_account is true), Kong will attempt to read from environment variable GCP_SERVICE_ACCOUNT.
    gcp_use_service_account bool
    Use service account auth for GCP-based providers and models. Default: false
    header_name str
    If AI model requires authentication via Authorization or API key header, specify its name here.
    header_value str
    Specify the full auth header value for 'header_name', for example 'Bearer key' or just 'key'.
    param_location str
    Specify whether the 'paramname' and 'paramvalue' options go in a query string, or the POST form/JSON body. must be one of ["body", "query"]
    param_name str
    If AI model requires authentication via query parameter, specify its name here.
    param_value str
    Specify the full parameter value for 'param_name'.
    allowOverride Boolean
    If enabled, the authorization header or parameter can be overridden in the request by the value configured in the plugin. Default: false
    awsAccessKeyId String
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSACCESSKEY_ID environment variable for this plugin instance.
    awsSecretAccessKey String
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSSECRETACCESS_KEY environment variable for this plugin instance.
    azureClientId String
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client ID.
    azureClientSecret String
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client secret.
    azureTenantId String
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the tenant ID.
    azureUseManagedIdentity Boolean
    Set true to use the Azure Cloud Managed Identity (or user-assigned identity) to authenticate with Azure-provider models. Default: false
    gcpServiceAccountJson String
    Set this field to the full JSON of the GCP service account to authenticate, if required. If null (and gcpuseservice_account is true), Kong will attempt to read from environment variable GCP_SERVICE_ACCOUNT.
    gcpUseServiceAccount Boolean
    Use service account auth for GCP-based providers and models. Default: false
    headerName String
    If AI model requires authentication via Authorization or API key header, specify its name here.
    headerValue String
    Specify the full auth header value for 'header_name', for example 'Bearer key' or just 'key'.
    paramLocation String
    Specify whether the 'paramname' and 'paramvalue' options go in a query string, or the POST form/JSON body. must be one of ["body", "query"]
    paramName String
    If AI model requires authentication via query parameter, specify its name here.
    paramValue String
    Specify the full parameter value for 'param_name'.

    GatewayPluginAiLlmAsJudgeConfigLlmLogging, GatewayPluginAiLlmAsJudgeConfigLlmLoggingArgs

    LogPayloads bool
    If enabled, will log the request and response body into the Kong log plugin(s) output. Default: false
    LogStatistics bool
    If enabled and supported by the driver, will add model usage and token metrics into the Kong log plugin(s) output. Default: false
    LogPayloads bool
    If enabled, will log the request and response body into the Kong log plugin(s) output. Default: false
    LogStatistics bool
    If enabled and supported by the driver, will add model usage and token metrics into the Kong log plugin(s) output. Default: false
    logPayloads Boolean
    If enabled, will log the request and response body into the Kong log plugin(s) output. Default: false
    logStatistics Boolean
    If enabled and supported by the driver, will add model usage and token metrics into the Kong log plugin(s) output. Default: false
    logPayloads boolean
    If enabled, will log the request and response body into the Kong log plugin(s) output. Default: false
    logStatistics boolean
    If enabled and supported by the driver, will add model usage and token metrics into the Kong log plugin(s) output. Default: false
    log_payloads bool
    If enabled, will log the request and response body into the Kong log plugin(s) output. Default: false
    log_statistics bool
    If enabled and supported by the driver, will add model usage and token metrics into the Kong log plugin(s) output. Default: false
    logPayloads Boolean
    If enabled, will log the request and response body into the Kong log plugin(s) output. Default: false
    logStatistics Boolean
    If enabled and supported by the driver, will add model usage and token metrics into the Kong log plugin(s) output. Default: false

    GatewayPluginAiLlmAsJudgeConfigLlmModel, GatewayPluginAiLlmAsJudgeConfigLlmModelArgs

    Provider string
    AI provider request format - Kong translates requests to and from the specified backend compatible formats. must be one of ["anthropic", "azure", "bedrock", "cohere", "gemini", "huggingface", "llama2", "mistral", "openai"]
    Name string
    Model name to execute.
    Options GatewayPluginAiLlmAsJudgeConfigLlmModelOptions
    Key/value settings for the model
    Provider string
    AI provider request format - Kong translates requests to and from the specified backend compatible formats. must be one of ["anthropic", "azure", "bedrock", "cohere", "gemini", "huggingface", "llama2", "mistral", "openai"]
    Name string
    Model name to execute.
    Options GatewayPluginAiLlmAsJudgeConfigLlmModelOptions
    Key/value settings for the model
    provider String
    AI provider request format - Kong translates requests to and from the specified backend compatible formats. must be one of ["anthropic", "azure", "bedrock", "cohere", "gemini", "huggingface", "llama2", "mistral", "openai"]
    name String
    Model name to execute.
    options GatewayPluginAiLlmAsJudgeConfigLlmModelOptions
    Key/value settings for the model
    provider string
    AI provider request format - Kong translates requests to and from the specified backend compatible formats. must be one of ["anthropic", "azure", "bedrock", "cohere", "gemini", "huggingface", "llama2", "mistral", "openai"]
    name string
    Model name to execute.
    options GatewayPluginAiLlmAsJudgeConfigLlmModelOptions
    Key/value settings for the model
    provider str
    AI provider request format - Kong translates requests to and from the specified backend compatible formats. must be one of ["anthropic", "azure", "bedrock", "cohere", "gemini", "huggingface", "llama2", "mistral", "openai"]
    name str
    Model name to execute.
    options GatewayPluginAiLlmAsJudgeConfigLlmModelOptions
    Key/value settings for the model
    provider String
    AI provider request format - Kong translates requests to and from the specified backend compatible formats. must be one of ["anthropic", "azure", "bedrock", "cohere", "gemini", "huggingface", "llama2", "mistral", "openai"]
    name String
    Model name to execute.
    options Property Map
    Key/value settings for the model

    GatewayPluginAiLlmAsJudgeConfigLlmModelOptions, GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsArgs

    AnthropicVersion string
    Defines the schema/API version, if using Anthropic provider.
    AzureApiVersion string
    'api-version' for Azure OpenAI instances. Default: "2023-05-15"
    AzureDeploymentId string
    Deployment ID for Azure OpenAI instances.
    AzureInstance string
    Instance name for Azure OpenAI hosted models.
    Bedrock GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsBedrock
    Cohere GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsCohere
    EmbeddingsDimensions double
    If using embeddings models, set the number of dimensions to generate.
    Gemini GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsGemini
    Huggingface GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsHuggingface
    InputCost double
    Defines the cost per 1M tokens in your prompt.
    Llama2Format string
    If using llama2 provider, select the upstream message format. must be one of ["ollama", "openai", "raw"]
    MaxTokens double
    Defines the max_tokens, if using chat or completion models.
    MistralFormat string
    If using mistral provider, select the upstream message format. must be one of ["ollama", "openai"]
    OutputCost double
    Defines the cost per 1M tokens in the output of the AI.
    Temperature double
    Defines the matching temperature, if using chat or completion models.
    TopK double
    Defines the top-k most likely tokens, if supported.
    TopP double
    Defines the top-p probability mass, if supported.
    UpstreamPath string
    Manually specify or override the AI operation path, used when e.g. using the 'preserve' route_type.
    UpstreamUrl string
    Manually specify or override the full URL to the AI operation endpoints, when calling (self-)hosted models, or for running via a private endpoint.
    AnthropicVersion string
    Defines the schema/API version, if using Anthropic provider.
    AzureApiVersion string
    'api-version' for Azure OpenAI instances. Default: "2023-05-15"
    AzureDeploymentId string
    Deployment ID for Azure OpenAI instances.
    AzureInstance string
    Instance name for Azure OpenAI hosted models.
    Bedrock GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsBedrock
    Cohere GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsCohere
    EmbeddingsDimensions float64
    If using embeddings models, set the number of dimensions to generate.
    Gemini GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsGemini
    Huggingface GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsHuggingface
    InputCost float64
    Defines the cost per 1M tokens in your prompt.
    Llama2Format string
    If using llama2 provider, select the upstream message format. must be one of ["ollama", "openai", "raw"]
    MaxTokens float64
    Defines the max_tokens, if using chat or completion models.
    MistralFormat string
    If using mistral provider, select the upstream message format. must be one of ["ollama", "openai"]
    OutputCost float64
    Defines the cost per 1M tokens in the output of the AI.
    Temperature float64
    Defines the matching temperature, if using chat or completion models.
    TopK float64
    Defines the top-k most likely tokens, if supported.
    TopP float64
    Defines the top-p probability mass, if supported.
    UpstreamPath string
    Manually specify or override the AI operation path, used when e.g. using the 'preserve' route_type.
    UpstreamUrl string
    Manually specify or override the full URL to the AI operation endpoints, when calling (self-)hosted models, or for running via a private endpoint.
    anthropicVersion String
    Defines the schema/API version, if using Anthropic provider.
    azureApiVersion String
    'api-version' for Azure OpenAI instances. Default: "2023-05-15"
    azureDeploymentId String
    Deployment ID for Azure OpenAI instances.
    azureInstance String
    Instance name for Azure OpenAI hosted models.
    bedrock GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsBedrock
    cohere GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsCohere
    embeddingsDimensions Double
    If using embeddings models, set the number of dimensions to generate.
    gemini GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsGemini
    huggingface GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsHuggingface
    inputCost Double
    Defines the cost per 1M tokens in your prompt.
    llama2Format String
    If using llama2 provider, select the upstream message format. must be one of ["ollama", "openai", "raw"]
    maxTokens Double
    Defines the max_tokens, if using chat or completion models.
    mistralFormat String
    If using mistral provider, select the upstream message format. must be one of ["ollama", "openai"]
    outputCost Double
    Defines the cost per 1M tokens in the output of the AI.
    temperature Double
    Defines the matching temperature, if using chat or completion models.
    topK Double
    Defines the top-k most likely tokens, if supported.
    topP Double
    Defines the top-p probability mass, if supported.
    upstreamPath String
    Manually specify or override the AI operation path, used when e.g. using the 'preserve' route_type.
    upstreamUrl String
    Manually specify or override the full URL to the AI operation endpoints, when calling (self-)hosted models, or for running via a private endpoint.
    anthropicVersion string
    Defines the schema/API version, if using Anthropic provider.
    azureApiVersion string
    'api-version' for Azure OpenAI instances. Default: "2023-05-15"
    azureDeploymentId string
    Deployment ID for Azure OpenAI instances.
    azureInstance string
    Instance name for Azure OpenAI hosted models.
    bedrock GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsBedrock
    cohere GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsCohere
    embeddingsDimensions number
    If using embeddings models, set the number of dimensions to generate.
    gemini GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsGemini
    huggingface GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsHuggingface
    inputCost number
    Defines the cost per 1M tokens in your prompt.
    llama2Format string
    If using llama2 provider, select the upstream message format. must be one of ["ollama", "openai", "raw"]
    maxTokens number
    Defines the max_tokens, if using chat or completion models.
    mistralFormat string
    If using mistral provider, select the upstream message format. must be one of ["ollama", "openai"]
    outputCost number
    Defines the cost per 1M tokens in the output of the AI.
    temperature number
    Defines the matching temperature, if using chat or completion models.
    topK number
    Defines the top-k most likely tokens, if supported.
    topP number
    Defines the top-p probability mass, if supported.
    upstreamPath string
    Manually specify or override the AI operation path, used when e.g. using the 'preserve' route_type.
    upstreamUrl string
    Manually specify or override the full URL to the AI operation endpoints, when calling (self-)hosted models, or for running via a private endpoint.
    anthropic_version str
    Defines the schema/API version, if using Anthropic provider.
    azure_api_version str
    'api-version' for Azure OpenAI instances. Default: "2023-05-15"
    azure_deployment_id str
    Deployment ID for Azure OpenAI instances.
    azure_instance str
    Instance name for Azure OpenAI hosted models.
    bedrock GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsBedrock
    cohere GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsCohere
    embeddings_dimensions float
    If using embeddings models, set the number of dimensions to generate.
    gemini GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsGemini
    huggingface GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsHuggingface
    input_cost float
    Defines the cost per 1M tokens in your prompt.
    llama2_format str
    If using llama2 provider, select the upstream message format. must be one of ["ollama", "openai", "raw"]
    max_tokens float
    Defines the max_tokens, if using chat or completion models.
    mistral_format str
    If using mistral provider, select the upstream message format. must be one of ["ollama", "openai"]
    output_cost float
    Defines the cost per 1M tokens in the output of the AI.
    temperature float
    Defines the matching temperature, if using chat or completion models.
    top_k float
    Defines the top-k most likely tokens, if supported.
    top_p float
    Defines the top-p probability mass, if supported.
    upstream_path str
    Manually specify or override the AI operation path, used when e.g. using the 'preserve' route_type.
    upstream_url str
    Manually specify or override the full URL to the AI operation endpoints, when calling (self-)hosted models, or for running via a private endpoint.
    anthropicVersion String
    Defines the schema/API version, if using Anthropic provider.
    azureApiVersion String
    'api-version' for Azure OpenAI instances. Default: "2023-05-15"
    azureDeploymentId String
    Deployment ID for Azure OpenAI instances.
    azureInstance String
    Instance name for Azure OpenAI hosted models.
    bedrock Property Map
    cohere Property Map
    embeddingsDimensions Number
    If using embeddings models, set the number of dimensions to generate.
    gemini Property Map
    huggingface Property Map
    inputCost Number
    Defines the cost per 1M tokens in your prompt.
    llama2Format String
    If using llama2 provider, select the upstream message format. must be one of ["ollama", "openai", "raw"]
    maxTokens Number
    Defines the max_tokens, if using chat or completion models.
    mistralFormat String
    If using mistral provider, select the upstream message format. must be one of ["ollama", "openai"]
    outputCost Number
    Defines the cost per 1M tokens in the output of the AI.
    temperature Number
    Defines the matching temperature, if using chat or completion models.
    topK Number
    Defines the top-k most likely tokens, if supported.
    topP Number
    Defines the top-p probability mass, if supported.
    upstreamPath String
    Manually specify or override the AI operation path, used when e.g. using the 'preserve' route_type.
    upstreamUrl String
    Manually specify or override the full URL to the AI operation endpoints, when calling (self-)hosted models, or for running via a private endpoint.

    GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsBedrock, GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsBedrockArgs

    AwsAssumeRoleArn string
    If using AWS providers (Bedrock) you can assume a different role after authentication with the current IAM context is successful.
    AwsRegion string
    If using AWS providers (Bedrock) you can override the AWS_REGION environment variable by setting this option.
    AwsRoleSessionName string
    If using AWS providers (Bedrock), set the identifier of the assumed role session.
    AwsStsEndpointUrl string
    If using AWS providers (Bedrock), override the STS endpoint URL when assuming a different role.
    EmbeddingsNormalize bool
    If using AWS providers (Bedrock), set to true to normalize the embeddings. Default: false
    PerformanceConfigLatency string
    Force the client's performance configuration 'latency' for all requests. Leave empty to let the consumer select the performance configuration.
    AwsAssumeRoleArn string
    If using AWS providers (Bedrock) you can assume a different role after authentication with the current IAM context is successful.
    AwsRegion string
    If using AWS providers (Bedrock) you can override the AWS_REGION environment variable by setting this option.
    AwsRoleSessionName string
    If using AWS providers (Bedrock), set the identifier of the assumed role session.
    AwsStsEndpointUrl string
    If using AWS providers (Bedrock), override the STS endpoint URL when assuming a different role.
    EmbeddingsNormalize bool
    If using AWS providers (Bedrock), set to true to normalize the embeddings. Default: false
    PerformanceConfigLatency string
    Force the client's performance configuration 'latency' for all requests. Leave empty to let the consumer select the performance configuration.
    awsAssumeRoleArn String
    If using AWS providers (Bedrock) you can assume a different role after authentication with the current IAM context is successful.
    awsRegion String
    If using AWS providers (Bedrock) you can override the AWS_REGION environment variable by setting this option.
    awsRoleSessionName String
    If using AWS providers (Bedrock), set the identifier of the assumed role session.
    awsStsEndpointUrl String
    If using AWS providers (Bedrock), override the STS endpoint URL when assuming a different role.
    embeddingsNormalize Boolean
    If using AWS providers (Bedrock), set to true to normalize the embeddings. Default: false
    performanceConfigLatency String
    Force the client's performance configuration 'latency' for all requests. Leave empty to let the consumer select the performance configuration.
    awsAssumeRoleArn string
    If using AWS providers (Bedrock) you can assume a different role after authentication with the current IAM context is successful.
    awsRegion string
    If using AWS providers (Bedrock) you can override the AWS_REGION environment variable by setting this option.
    awsRoleSessionName string
    If using AWS providers (Bedrock), set the identifier of the assumed role session.
    awsStsEndpointUrl string
    If using AWS providers (Bedrock), override the STS endpoint URL when assuming a different role.
    embeddingsNormalize boolean
    If using AWS providers (Bedrock), set to true to normalize the embeddings. Default: false
    performanceConfigLatency string
    Force the client's performance configuration 'latency' for all requests. Leave empty to let the consumer select the performance configuration.
    aws_assume_role_arn str
    If using AWS providers (Bedrock) you can assume a different role after authentication with the current IAM context is successful.
    aws_region str
    If using AWS providers (Bedrock) you can override the AWS_REGION environment variable by setting this option.
    aws_role_session_name str
    If using AWS providers (Bedrock), set the identifier of the assumed role session.
    aws_sts_endpoint_url str
    If using AWS providers (Bedrock), override the STS endpoint URL when assuming a different role.
    embeddings_normalize bool
    If using AWS providers (Bedrock), set to true to normalize the embeddings. Default: false
    performance_config_latency str
    Force the client's performance configuration 'latency' for all requests. Leave empty to let the consumer select the performance configuration.
    awsAssumeRoleArn String
    If using AWS providers (Bedrock) you can assume a different role after authentication with the current IAM context is successful.
    awsRegion String
    If using AWS providers (Bedrock) you can override the AWS_REGION environment variable by setting this option.
    awsRoleSessionName String
    If using AWS providers (Bedrock), set the identifier of the assumed role session.
    awsStsEndpointUrl String
    If using AWS providers (Bedrock), override the STS endpoint URL when assuming a different role.
    embeddingsNormalize Boolean
    If using AWS providers (Bedrock), set to true to normalize the embeddings. Default: false
    performanceConfigLatency String
    Force the client's performance configuration 'latency' for all requests. Leave empty to let the consumer select the performance configuration.

    GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsCohere, GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsCohereArgs

    EmbeddingInputType string
    The purpose of the input text to calculate embedding vectors. Default: "classification"; must be one of ["classification", "clustering", "image", "searchdocument", "searchquery"]
    WaitForModel bool
    Wait for the model if it is not ready
    EmbeddingInputType string
    The purpose of the input text to calculate embedding vectors. Default: "classification"; must be one of ["classification", "clustering", "image", "searchdocument", "searchquery"]
    WaitForModel bool
    Wait for the model if it is not ready
    embeddingInputType String
    The purpose of the input text to calculate embedding vectors. Default: "classification"; must be one of ["classification", "clustering", "image", "searchdocument", "searchquery"]
    waitForModel Boolean
    Wait for the model if it is not ready
    embeddingInputType string
    The purpose of the input text to calculate embedding vectors. Default: "classification"; must be one of ["classification", "clustering", "image", "searchdocument", "searchquery"]
    waitForModel boolean
    Wait for the model if it is not ready
    embedding_input_type str
    The purpose of the input text to calculate embedding vectors. Default: "classification"; must be one of ["classification", "clustering", "image", "searchdocument", "searchquery"]
    wait_for_model bool
    Wait for the model if it is not ready
    embeddingInputType String
    The purpose of the input text to calculate embedding vectors. Default: "classification"; must be one of ["classification", "clustering", "image", "searchdocument", "searchquery"]
    waitForModel Boolean
    Wait for the model if it is not ready

    GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsGemini, GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsGeminiArgs

    ApiEndpoint string
    If running Gemini on Vertex, specify the regional API endpoint (hostname only).
    EndpointId string
    If running Gemini on Vertex Model Garden, specify the endpoint ID.
    LocationId string
    If running Gemini on Vertex, specify the location ID.
    ProjectId string
    If running Gemini on Vertex, specify the project ID.
    ApiEndpoint string
    If running Gemini on Vertex, specify the regional API endpoint (hostname only).
    EndpointId string
    If running Gemini on Vertex Model Garden, specify the endpoint ID.
    LocationId string
    If running Gemini on Vertex, specify the location ID.
    ProjectId string
    If running Gemini on Vertex, specify the project ID.
    apiEndpoint String
    If running Gemini on Vertex, specify the regional API endpoint (hostname only).
    endpointId String
    If running Gemini on Vertex Model Garden, specify the endpoint ID.
    locationId String
    If running Gemini on Vertex, specify the location ID.
    projectId String
    If running Gemini on Vertex, specify the project ID.
    apiEndpoint string
    If running Gemini on Vertex, specify the regional API endpoint (hostname only).
    endpointId string
    If running Gemini on Vertex Model Garden, specify the endpoint ID.
    locationId string
    If running Gemini on Vertex, specify the location ID.
    projectId string
    If running Gemini on Vertex, specify the project ID.
    api_endpoint str
    If running Gemini on Vertex, specify the regional API endpoint (hostname only).
    endpoint_id str
    If running Gemini on Vertex Model Garden, specify the endpoint ID.
    location_id str
    If running Gemini on Vertex, specify the location ID.
    project_id str
    If running Gemini on Vertex, specify the project ID.
    apiEndpoint String
    If running Gemini on Vertex, specify the regional API endpoint (hostname only).
    endpointId String
    If running Gemini on Vertex Model Garden, specify the endpoint ID.
    locationId String
    If running Gemini on Vertex, specify the location ID.
    projectId String
    If running Gemini on Vertex, specify the project ID.

    GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsHuggingface, GatewayPluginAiLlmAsJudgeConfigLlmModelOptionsHuggingfaceArgs

    UseCache bool
    Use the cache layer on the inference API
    WaitForModel bool
    Wait for the model if it is not ready
    UseCache bool
    Use the cache layer on the inference API
    WaitForModel bool
    Wait for the model if it is not ready
    useCache Boolean
    Use the cache layer on the inference API
    waitForModel Boolean
    Wait for the model if it is not ready
    useCache boolean
    Use the cache layer on the inference API
    waitForModel boolean
    Wait for the model if it is not ready
    use_cache bool
    Use the cache layer on the inference API
    wait_for_model bool
    Wait for the model if it is not ready
    useCache Boolean
    Use the cache layer on the inference API
    waitForModel Boolean
    Wait for the model if it is not ready

    GatewayPluginAiLlmAsJudgeConsumer, GatewayPluginAiLlmAsJudgeConsumerArgs

    Id string
    Id string
    id String
    id string
    id str
    id String

    GatewayPluginAiLlmAsJudgeConsumerGroup, GatewayPluginAiLlmAsJudgeConsumerGroupArgs

    Id string
    Id string
    id String
    id string
    id str
    id String

    GatewayPluginAiLlmAsJudgeOrdering, GatewayPluginAiLlmAsJudgeOrderingArgs

    GatewayPluginAiLlmAsJudgeOrderingAfter, GatewayPluginAiLlmAsJudgeOrderingAfterArgs

    Accesses List<string>
    Accesses []string
    accesses List<String>
    accesses string[]
    accesses Sequence[str]
    accesses List<String>

    GatewayPluginAiLlmAsJudgeOrderingBefore, GatewayPluginAiLlmAsJudgeOrderingBeforeArgs

    Accesses List<string>
    Accesses []string
    accesses List<String>
    accesses string[]
    accesses Sequence[str]
    accesses List<String>

    GatewayPluginAiLlmAsJudgePartial, GatewayPluginAiLlmAsJudgePartialArgs

    Id string
    A string representing a UUID (universally unique identifier).
    Name string
    A unique string representing a UTF-8 encoded name.
    Path string
    Id string
    A string representing a UUID (universally unique identifier).
    Name string
    A unique string representing a UTF-8 encoded name.
    Path string
    id String
    A string representing a UUID (universally unique identifier).
    name String
    A unique string representing a UTF-8 encoded name.
    path String
    id string
    A string representing a UUID (universally unique identifier).
    name string
    A unique string representing a UTF-8 encoded name.
    path string
    id str
    A string representing a UUID (universally unique identifier).
    name str
    A unique string representing a UTF-8 encoded name.
    path str
    id String
    A string representing a UUID (universally unique identifier).
    name String
    A unique string representing a UTF-8 encoded name.
    path String

    GatewayPluginAiLlmAsJudgeRoute, GatewayPluginAiLlmAsJudgeRouteArgs

    Id string
    Id string
    id String
    id string
    id str
    id String

    GatewayPluginAiLlmAsJudgeService, GatewayPluginAiLlmAsJudgeServiceArgs

    Id string
    Id string
    id String
    id string
    id str
    id String

    Import

    In Terraform v1.5.0 and later, the import block can be used with the id attribute, for example:

    terraform

    import {

    to = konnect_gateway_plugin_ai_llm_as_judge.my_konnect_gateway_plugin_ai_llm_as_judge

    id = jsonencode({

    control_plane_id = "9524ec7d-36d9-465d-a8c5-83a3c9390458"
    
    id = "3473c251-5b6c-4f45-b1ff-7ede735a366d"
    

    })

    }

    The pulumi import command can be used, for example:

    $ pulumi import konnect:index/gatewayPluginAiLlmAsJudge:GatewayPluginAiLlmAsJudge my_konnect_gateway_plugin_ai_llm_as_judge '{"control_plane_id": "9524ec7d-36d9-465d-a8c5-83a3c9390458", "id": "3473c251-5b6c-4f45-b1ff-7ede735a366d"}'
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    konnect kong/terraform-provider-konnect
    License
    Notes
    This Pulumi package is based on the konnect Terraform Provider.
    konnect logo
    konnect 3.4.1 published on Wednesday, Oct 29, 2025 by kong
      Meet Neo: Your AI Platform Teammate