1. Packages
  2. Konnect Provider
  3. API Docs
  4. GatewayPluginAiProxy
konnect 3.0.0 published on Friday, Aug 22, 2025 by kong

konnect.GatewayPluginAiProxy

Explore with Pulumi AI

konnect logo
konnect 3.0.0 published on Friday, Aug 22, 2025 by kong

    GatewayPluginAiProxy Resource

    Example Usage

    Example coming soon!
    
    Example coming soon!
    
    Example coming soon!
    
    Example coming soon!
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.konnect.GatewayPluginAiProxy;
    import com.pulumi.konnect.GatewayPluginAiProxyArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiProxyConfigArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiProxyConfigAuthArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiProxyConfigLoggingArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiProxyConfigModelArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiProxyConfigModelOptionsArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiProxyConfigModelOptionsBedrockArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiProxyConfigModelOptionsCohereArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiProxyConfigModelOptionsGeminiArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiProxyConfigModelOptionsHuggingfaceArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiProxyConsumerArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiProxyConsumerGroupArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiProxyOrderingArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiProxyOrderingAfterArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiProxyOrderingBeforeArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiProxyPartialArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiProxyRouteArgs;
    import com.pulumi.konnect.inputs.GatewayPluginAiProxyServiceArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var myGatewaypluginaiproxy = new GatewayPluginAiProxy("myGatewaypluginaiproxy", GatewayPluginAiProxyArgs.builder()
                .config(GatewayPluginAiProxyConfigArgs.builder()
                    .auth(GatewayPluginAiProxyConfigAuthArgs.builder()
                        .allowOverride(true)
                        .awsAccessKeyId("...my_aws_access_key_id...")
                        .awsSecretAccessKey("...my_aws_secret_access_key...")
                        .azureClientId("...my_azure_client_id...")
                        .azureClientSecret("...my_azure_client_secret...")
                        .azureTenantId("...my_azure_tenant_id...")
                        .azureUseManagedIdentity(true)
                        .gcpServiceAccountJson("...my_gcp_service_account_json...")
                        .gcpUseServiceAccount(false)
                        .headerName("...my_header_name...")
                        .headerValue("...my_header_value...")
                        .paramLocation("query")
                        .paramName("...my_param_name...")
                        .paramValue("...my_param_value...")
                        .build())
                    .genai_category("image/generation")
                    .llm_format("bedrock")
                    .logging(GatewayPluginAiProxyConfigLoggingArgs.builder()
                        .logPayloads(false)
                        .logStatistics(true)
                        .build())
                    .max_request_body_size(10)
                    .model(GatewayPluginAiProxyConfigModelArgs.builder()
                        .name("...my_name...")
                        .options(GatewayPluginAiProxyConfigModelOptionsArgs.builder()
                            .anthropicVersion("...my_anthropic_version...")
                            .azureApiVersion("...my_azure_api_version...")
                            .azureDeploymentId("...my_azure_deployment_id...")
                            .azureInstance("...my_azure_instance...")
                            .bedrock(GatewayPluginAiProxyConfigModelOptionsBedrockArgs.builder()
                                .awsAssumeRoleArn("...my_aws_assume_role_arn...")
                                .awsRegion("...my_aws_region...")
                                .awsRoleSessionName("...my_aws_role_session_name...")
                                .awsStsEndpointUrl("...my_aws_sts_endpoint_url...")
                                .embeddingsNormalize(false)
                                .performanceConfigLatency("...my_performance_config_latency...")
                                .build())
                            .cohere(GatewayPluginAiProxyConfigModelOptionsCohereArgs.builder()
                                .embeddingInputType("image")
                                .waitForModel(false)
                                .build())
                            .embeddingsDimensions(6)
                            .gemini(GatewayPluginAiProxyConfigModelOptionsGeminiArgs.builder()
                                .apiEndpoint("...my_api_endpoint...")
                                .locationId("...my_location_id...")
                                .projectId("...my_project_id...")
                                .build())
                            .huggingface(GatewayPluginAiProxyConfigModelOptionsHuggingfaceArgs.builder()
                                .useCache(true)
                                .waitForModel(false)
                                .build())
                            .inputCost(7.42)
                            .llama2Format("openai")
                            .maxTokens(9)
                            .mistralFormat("ollama")
                            .outputCost(1.81)
                            .temperature(2.26)
                            .topK(359)
                            .topP(0.14)
                            .upstreamPath("...my_upstream_path...")
                            .upstreamUrl("...my_upstream_url...")
                            .build())
                        .provider("anthropic")
                        .build())
                    .model_name_header(true)
                    .response_streaming("allow")
                    .route_type("audio/v1/audio/speech")
                    .build())
                .consumer(GatewayPluginAiProxyConsumerArgs.builder()
                    .id("...my_id...")
                    .build())
                .consumerGroup(GatewayPluginAiProxyConsumerGroupArgs.builder()
                    .id("...my_id...")
                    .build())
                .controlPlaneId("9524ec7d-36d9-465d-a8c5-83a3c9390458")
                .createdAt(9)
                .enabled(true)
                .gatewayPluginAiProxyId("...my_id...")
                .instanceName("...my_instance_name...")
                .ordering(GatewayPluginAiProxyOrderingArgs.builder()
                    .after(GatewayPluginAiProxyOrderingAfterArgs.builder()
                        .access("...")
                        .build())
                    .before(GatewayPluginAiProxyOrderingBeforeArgs.builder()
                        .access("...")
                        .build())
                    .build())
                .partials(GatewayPluginAiProxyPartialArgs.builder()
                    .id("...my_id...")
                    .name("...my_name...")
                    .path("...my_path...")
                    .build())
                .protocols("http")
                .route(GatewayPluginAiProxyRouteArgs.builder()
                    .id("...my_id...")
                    .build())
                .service(GatewayPluginAiProxyServiceArgs.builder()
                    .id("...my_id...")
                    .build())
                .tags("...")
                .updatedAt(3)
                .build());
    
        }
    }
    
    resources:
      myGatewaypluginaiproxy:
        type: konnect:GatewayPluginAiProxy
        properties:
          config:
            auth:
              allowOverride: true
              awsAccessKeyId: '...my_aws_access_key_id...'
              awsSecretAccessKey: '...my_aws_secret_access_key...'
              azureClientId: '...my_azure_client_id...'
              azureClientSecret: '...my_azure_client_secret...'
              azureTenantId: '...my_azure_tenant_id...'
              azureUseManagedIdentity: true
              gcpServiceAccountJson: '...my_gcp_service_account_json...'
              gcpUseServiceAccount: false
              headerName: '...my_header_name...'
              headerValue: '...my_header_value...'
              paramLocation: query
              paramName: '...my_param_name...'
              paramValue: '...my_param_value...'
            genai_category: image/generation
            llm_format: bedrock
            logging:
              logPayloads: false
              logStatistics: true
            max_request_body_size: 10
            model:
              name: '...my_name...'
              options:
                anthropicVersion: '...my_anthropic_version...'
                azureApiVersion: '...my_azure_api_version...'
                azureDeploymentId: '...my_azure_deployment_id...'
                azureInstance: '...my_azure_instance...'
                bedrock:
                  awsAssumeRoleArn: '...my_aws_assume_role_arn...'
                  awsRegion: '...my_aws_region...'
                  awsRoleSessionName: '...my_aws_role_session_name...'
                  awsStsEndpointUrl: '...my_aws_sts_endpoint_url...'
                  embeddingsNormalize: false
                  performanceConfigLatency: '...my_performance_config_latency...'
                cohere:
                  embeddingInputType: image
                  waitForModel: false
                embeddingsDimensions: 6
                gemini:
                  apiEndpoint: '...my_api_endpoint...'
                  locationId: '...my_location_id...'
                  projectId: '...my_project_id...'
                huggingface:
                  useCache: true
                  waitForModel: false
                inputCost: 7.42
                llama2Format: openai
                maxTokens: 9
                mistralFormat: ollama
                outputCost: 1.81
                temperature: 2.26
                topK: 359
                topP: 0.14
                upstreamPath: '...my_upstream_path...'
                upstreamUrl: '...my_upstream_url...'
              provider: anthropic
            model_name_header: true
            response_streaming: allow
            route_type: audio/v1/audio/speech
          consumer:
            id: '...my_id...'
          consumerGroup:
            id: '...my_id...'
          controlPlaneId: 9524ec7d-36d9-465d-a8c5-83a3c9390458
          createdAt: 9
          enabled: true
          gatewayPluginAiProxyId: '...my_id...'
          instanceName: '...my_instance_name...'
          ordering:
            after:
              access:
                - '...'
            before:
              access:
                - '...'
          partials:
            - id: '...my_id...'
              name: '...my_name...'
              path: '...my_path...'
          protocols:
            - http
          route:
            id: '...my_id...'
          service:
            id: '...my_id...'
          tags:
            - '...'
          updatedAt: 3
    

    Create GatewayPluginAiProxy Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new GatewayPluginAiProxy(name: string, args: GatewayPluginAiProxyArgs, opts?: CustomResourceOptions);
    @overload
    def GatewayPluginAiProxy(resource_name: str,
                             args: GatewayPluginAiProxyArgs,
                             opts: Optional[ResourceOptions] = None)
    
    @overload
    def GatewayPluginAiProxy(resource_name: str,
                             opts: Optional[ResourceOptions] = None,
                             control_plane_id: Optional[str] = None,
                             config: Optional[GatewayPluginAiProxyConfigArgs] = None,
                             gateway_plugin_ai_proxy_id: Optional[str] = None,
                             consumer_group: Optional[GatewayPluginAiProxyConsumerGroupArgs] = None,
                             created_at: Optional[float] = None,
                             enabled: Optional[bool] = None,
                             consumer: Optional[GatewayPluginAiProxyConsumerArgs] = None,
                             instance_name: Optional[str] = None,
                             ordering: Optional[GatewayPluginAiProxyOrderingArgs] = None,
                             partials: Optional[Sequence[GatewayPluginAiProxyPartialArgs]] = None,
                             protocols: Optional[Sequence[str]] = None,
                             route: Optional[GatewayPluginAiProxyRouteArgs] = None,
                             service: Optional[GatewayPluginAiProxyServiceArgs] = None,
                             tags: Optional[Sequence[str]] = None,
                             updated_at: Optional[float] = None)
    func NewGatewayPluginAiProxy(ctx *Context, name string, args GatewayPluginAiProxyArgs, opts ...ResourceOption) (*GatewayPluginAiProxy, error)
    public GatewayPluginAiProxy(string name, GatewayPluginAiProxyArgs args, CustomResourceOptions? opts = null)
    public GatewayPluginAiProxy(String name, GatewayPluginAiProxyArgs args)
    public GatewayPluginAiProxy(String name, GatewayPluginAiProxyArgs args, CustomResourceOptions options)
    
    type: konnect:GatewayPluginAiProxy
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args GatewayPluginAiProxyArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args GatewayPluginAiProxyArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args GatewayPluginAiProxyArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args GatewayPluginAiProxyArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args GatewayPluginAiProxyArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var gatewayPluginAiProxyResource = new Konnect.GatewayPluginAiProxy("gatewayPluginAiProxyResource", new()
    {
        ControlPlaneId = "string",
        Config = new Konnect.Inputs.GatewayPluginAiProxyConfigArgs
        {
            Model = new Konnect.Inputs.GatewayPluginAiProxyConfigModelArgs
            {
                Provider = "string",
                Name = "string",
                Options = new Konnect.Inputs.GatewayPluginAiProxyConfigModelOptionsArgs
                {
                    AnthropicVersion = "string",
                    AzureApiVersion = "string",
                    AzureDeploymentId = "string",
                    AzureInstance = "string",
                    Bedrock = new Konnect.Inputs.GatewayPluginAiProxyConfigModelOptionsBedrockArgs
                    {
                        AwsAssumeRoleArn = "string",
                        AwsRegion = "string",
                        AwsRoleSessionName = "string",
                        AwsStsEndpointUrl = "string",
                        EmbeddingsNormalize = false,
                        PerformanceConfigLatency = "string",
                    },
                    Cohere = new Konnect.Inputs.GatewayPluginAiProxyConfigModelOptionsCohereArgs
                    {
                        EmbeddingInputType = "string",
                        WaitForModel = false,
                    },
                    EmbeddingsDimensions = 0,
                    Gemini = new Konnect.Inputs.GatewayPluginAiProxyConfigModelOptionsGeminiArgs
                    {
                        ApiEndpoint = "string",
                        LocationId = "string",
                        ProjectId = "string",
                    },
                    Huggingface = new Konnect.Inputs.GatewayPluginAiProxyConfigModelOptionsHuggingfaceArgs
                    {
                        UseCache = false,
                        WaitForModel = false,
                    },
                    InputCost = 0,
                    Llama2Format = "string",
                    MaxTokens = 0,
                    MistralFormat = "string",
                    OutputCost = 0,
                    Temperature = 0,
                    TopK = 0,
                    TopP = 0,
                    UpstreamPath = "string",
                    UpstreamUrl = "string",
                },
            },
            RouteType = "string",
            Auth = new Konnect.Inputs.GatewayPluginAiProxyConfigAuthArgs
            {
                AllowOverride = false,
                AwsAccessKeyId = "string",
                AwsSecretAccessKey = "string",
                AzureClientId = "string",
                AzureClientSecret = "string",
                AzureTenantId = "string",
                AzureUseManagedIdentity = false,
                GcpServiceAccountJson = "string",
                GcpUseServiceAccount = false,
                HeaderName = "string",
                HeaderValue = "string",
                ParamLocation = "string",
                ParamName = "string",
                ParamValue = "string",
            },
            GenaiCategory = "string",
            LlmFormat = "string",
            Logging = new Konnect.Inputs.GatewayPluginAiProxyConfigLoggingArgs
            {
                LogPayloads = false,
                LogStatistics = false,
            },
            MaxRequestBodySize = 0,
            ModelNameHeader = false,
            ResponseStreaming = "string",
        },
        GatewayPluginAiProxyId = "string",
        ConsumerGroup = new Konnect.Inputs.GatewayPluginAiProxyConsumerGroupArgs
        {
            Id = "string",
        },
        CreatedAt = 0,
        Enabled = false,
        Consumer = new Konnect.Inputs.GatewayPluginAiProxyConsumerArgs
        {
            Id = "string",
        },
        InstanceName = "string",
        Ordering = new Konnect.Inputs.GatewayPluginAiProxyOrderingArgs
        {
            After = new Konnect.Inputs.GatewayPluginAiProxyOrderingAfterArgs
            {
                Accesses = new[]
                {
                    "string",
                },
            },
            Before = new Konnect.Inputs.GatewayPluginAiProxyOrderingBeforeArgs
            {
                Accesses = new[]
                {
                    "string",
                },
            },
        },
        Partials = new[]
        {
            new Konnect.Inputs.GatewayPluginAiProxyPartialArgs
            {
                Id = "string",
                Name = "string",
                Path = "string",
            },
        },
        Protocols = new[]
        {
            "string",
        },
        Route = new Konnect.Inputs.GatewayPluginAiProxyRouteArgs
        {
            Id = "string",
        },
        Service = new Konnect.Inputs.GatewayPluginAiProxyServiceArgs
        {
            Id = "string",
        },
        Tags = new[]
        {
            "string",
        },
        UpdatedAt = 0,
    });
    
    example, err := konnect.NewGatewayPluginAiProxy(ctx, "gatewayPluginAiProxyResource", &konnect.GatewayPluginAiProxyArgs{
    	ControlPlaneId: pulumi.String("string"),
    	Config: &konnect.GatewayPluginAiProxyConfigArgs{
    		Model: &konnect.GatewayPluginAiProxyConfigModelArgs{
    			Provider: pulumi.String("string"),
    			Name:     pulumi.String("string"),
    			Options: &konnect.GatewayPluginAiProxyConfigModelOptionsArgs{
    				AnthropicVersion:  pulumi.String("string"),
    				AzureApiVersion:   pulumi.String("string"),
    				AzureDeploymentId: pulumi.String("string"),
    				AzureInstance:     pulumi.String("string"),
    				Bedrock: &konnect.GatewayPluginAiProxyConfigModelOptionsBedrockArgs{
    					AwsAssumeRoleArn:         pulumi.String("string"),
    					AwsRegion:                pulumi.String("string"),
    					AwsRoleSessionName:       pulumi.String("string"),
    					AwsStsEndpointUrl:        pulumi.String("string"),
    					EmbeddingsNormalize:      pulumi.Bool(false),
    					PerformanceConfigLatency: pulumi.String("string"),
    				},
    				Cohere: &konnect.GatewayPluginAiProxyConfigModelOptionsCohereArgs{
    					EmbeddingInputType: pulumi.String("string"),
    					WaitForModel:       pulumi.Bool(false),
    				},
    				EmbeddingsDimensions: pulumi.Float64(0),
    				Gemini: &konnect.GatewayPluginAiProxyConfigModelOptionsGeminiArgs{
    					ApiEndpoint: pulumi.String("string"),
    					LocationId:  pulumi.String("string"),
    					ProjectId:   pulumi.String("string"),
    				},
    				Huggingface: &konnect.GatewayPluginAiProxyConfigModelOptionsHuggingfaceArgs{
    					UseCache:     pulumi.Bool(false),
    					WaitForModel: pulumi.Bool(false),
    				},
    				InputCost:     pulumi.Float64(0),
    				Llama2Format:  pulumi.String("string"),
    				MaxTokens:     pulumi.Float64(0),
    				MistralFormat: pulumi.String("string"),
    				OutputCost:    pulumi.Float64(0),
    				Temperature:   pulumi.Float64(0),
    				TopK:          pulumi.Float64(0),
    				TopP:          pulumi.Float64(0),
    				UpstreamPath:  pulumi.String("string"),
    				UpstreamUrl:   pulumi.String("string"),
    			},
    		},
    		RouteType: pulumi.String("string"),
    		Auth: &konnect.GatewayPluginAiProxyConfigAuthArgs{
    			AllowOverride:           pulumi.Bool(false),
    			AwsAccessKeyId:          pulumi.String("string"),
    			AwsSecretAccessKey:      pulumi.String("string"),
    			AzureClientId:           pulumi.String("string"),
    			AzureClientSecret:       pulumi.String("string"),
    			AzureTenantId:           pulumi.String("string"),
    			AzureUseManagedIdentity: pulumi.Bool(false),
    			GcpServiceAccountJson:   pulumi.String("string"),
    			GcpUseServiceAccount:    pulumi.Bool(false),
    			HeaderName:              pulumi.String("string"),
    			HeaderValue:             pulumi.String("string"),
    			ParamLocation:           pulumi.String("string"),
    			ParamName:               pulumi.String("string"),
    			ParamValue:              pulumi.String("string"),
    		},
    		GenaiCategory: pulumi.String("string"),
    		LlmFormat:     pulumi.String("string"),
    		Logging: &konnect.GatewayPluginAiProxyConfigLoggingArgs{
    			LogPayloads:   pulumi.Bool(false),
    			LogStatistics: pulumi.Bool(false),
    		},
    		MaxRequestBodySize: pulumi.Float64(0),
    		ModelNameHeader:    pulumi.Bool(false),
    		ResponseStreaming:  pulumi.String("string"),
    	},
    	GatewayPluginAiProxyId: pulumi.String("string"),
    	ConsumerGroup: &konnect.GatewayPluginAiProxyConsumerGroupArgs{
    		Id: pulumi.String("string"),
    	},
    	CreatedAt: pulumi.Float64(0),
    	Enabled:   pulumi.Bool(false),
    	Consumer: &konnect.GatewayPluginAiProxyConsumerArgs{
    		Id: pulumi.String("string"),
    	},
    	InstanceName: pulumi.String("string"),
    	Ordering: &konnect.GatewayPluginAiProxyOrderingArgs{
    		After: &konnect.GatewayPluginAiProxyOrderingAfterArgs{
    			Accesses: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    		},
    		Before: &konnect.GatewayPluginAiProxyOrderingBeforeArgs{
    			Accesses: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    		},
    	},
    	Partials: konnect.GatewayPluginAiProxyPartialArray{
    		&konnect.GatewayPluginAiProxyPartialArgs{
    			Id:   pulumi.String("string"),
    			Name: pulumi.String("string"),
    			Path: pulumi.String("string"),
    		},
    	},
    	Protocols: pulumi.StringArray{
    		pulumi.String("string"),
    	},
    	Route: &konnect.GatewayPluginAiProxyRouteArgs{
    		Id: pulumi.String("string"),
    	},
    	Service: &konnect.GatewayPluginAiProxyServiceArgs{
    		Id: pulumi.String("string"),
    	},
    	Tags: pulumi.StringArray{
    		pulumi.String("string"),
    	},
    	UpdatedAt: pulumi.Float64(0),
    })
    
    var gatewayPluginAiProxyResource = new GatewayPluginAiProxy("gatewayPluginAiProxyResource", GatewayPluginAiProxyArgs.builder()
        .controlPlaneId("string")
        .config(GatewayPluginAiProxyConfigArgs.builder()
            .model(GatewayPluginAiProxyConfigModelArgs.builder()
                .provider("string")
                .name("string")
                .options(GatewayPluginAiProxyConfigModelOptionsArgs.builder()
                    .anthropicVersion("string")
                    .azureApiVersion("string")
                    .azureDeploymentId("string")
                    .azureInstance("string")
                    .bedrock(GatewayPluginAiProxyConfigModelOptionsBedrockArgs.builder()
                        .awsAssumeRoleArn("string")
                        .awsRegion("string")
                        .awsRoleSessionName("string")
                        .awsStsEndpointUrl("string")
                        .embeddingsNormalize(false)
                        .performanceConfigLatency("string")
                        .build())
                    .cohere(GatewayPluginAiProxyConfigModelOptionsCohereArgs.builder()
                        .embeddingInputType("string")
                        .waitForModel(false)
                        .build())
                    .embeddingsDimensions(0.0)
                    .gemini(GatewayPluginAiProxyConfigModelOptionsGeminiArgs.builder()
                        .apiEndpoint("string")
                        .locationId("string")
                        .projectId("string")
                        .build())
                    .huggingface(GatewayPluginAiProxyConfigModelOptionsHuggingfaceArgs.builder()
                        .useCache(false)
                        .waitForModel(false)
                        .build())
                    .inputCost(0.0)
                    .llama2Format("string")
                    .maxTokens(0.0)
                    .mistralFormat("string")
                    .outputCost(0.0)
                    .temperature(0.0)
                    .topK(0.0)
                    .topP(0.0)
                    .upstreamPath("string")
                    .upstreamUrl("string")
                    .build())
                .build())
            .routeType("string")
            .auth(GatewayPluginAiProxyConfigAuthArgs.builder()
                .allowOverride(false)
                .awsAccessKeyId("string")
                .awsSecretAccessKey("string")
                .azureClientId("string")
                .azureClientSecret("string")
                .azureTenantId("string")
                .azureUseManagedIdentity(false)
                .gcpServiceAccountJson("string")
                .gcpUseServiceAccount(false)
                .headerName("string")
                .headerValue("string")
                .paramLocation("string")
                .paramName("string")
                .paramValue("string")
                .build())
            .genaiCategory("string")
            .llmFormat("string")
            .logging(GatewayPluginAiProxyConfigLoggingArgs.builder()
                .logPayloads(false)
                .logStatistics(false)
                .build())
            .maxRequestBodySize(0.0)
            .modelNameHeader(false)
            .responseStreaming("string")
            .build())
        .gatewayPluginAiProxyId("string")
        .consumerGroup(GatewayPluginAiProxyConsumerGroupArgs.builder()
            .id("string")
            .build())
        .createdAt(0.0)
        .enabled(false)
        .consumer(GatewayPluginAiProxyConsumerArgs.builder()
            .id("string")
            .build())
        .instanceName("string")
        .ordering(GatewayPluginAiProxyOrderingArgs.builder()
            .after(GatewayPluginAiProxyOrderingAfterArgs.builder()
                .accesses("string")
                .build())
            .before(GatewayPluginAiProxyOrderingBeforeArgs.builder()
                .accesses("string")
                .build())
            .build())
        .partials(GatewayPluginAiProxyPartialArgs.builder()
            .id("string")
            .name("string")
            .path("string")
            .build())
        .protocols("string")
        .route(GatewayPluginAiProxyRouteArgs.builder()
            .id("string")
            .build())
        .service(GatewayPluginAiProxyServiceArgs.builder()
            .id("string")
            .build())
        .tags("string")
        .updatedAt(0.0)
        .build());
    
    gateway_plugin_ai_proxy_resource = konnect.GatewayPluginAiProxy("gatewayPluginAiProxyResource",
        control_plane_id="string",
        config={
            "model": {
                "provider": "string",
                "name": "string",
                "options": {
                    "anthropic_version": "string",
                    "azure_api_version": "string",
                    "azure_deployment_id": "string",
                    "azure_instance": "string",
                    "bedrock": {
                        "aws_assume_role_arn": "string",
                        "aws_region": "string",
                        "aws_role_session_name": "string",
                        "aws_sts_endpoint_url": "string",
                        "embeddings_normalize": False,
                        "performance_config_latency": "string",
                    },
                    "cohere": {
                        "embedding_input_type": "string",
                        "wait_for_model": False,
                    },
                    "embeddings_dimensions": 0,
                    "gemini": {
                        "api_endpoint": "string",
                        "location_id": "string",
                        "project_id": "string",
                    },
                    "huggingface": {
                        "use_cache": False,
                        "wait_for_model": False,
                    },
                    "input_cost": 0,
                    "llama2_format": "string",
                    "max_tokens": 0,
                    "mistral_format": "string",
                    "output_cost": 0,
                    "temperature": 0,
                    "top_k": 0,
                    "top_p": 0,
                    "upstream_path": "string",
                    "upstream_url": "string",
                },
            },
            "route_type": "string",
            "auth": {
                "allow_override": False,
                "aws_access_key_id": "string",
                "aws_secret_access_key": "string",
                "azure_client_id": "string",
                "azure_client_secret": "string",
                "azure_tenant_id": "string",
                "azure_use_managed_identity": False,
                "gcp_service_account_json": "string",
                "gcp_use_service_account": False,
                "header_name": "string",
                "header_value": "string",
                "param_location": "string",
                "param_name": "string",
                "param_value": "string",
            },
            "genai_category": "string",
            "llm_format": "string",
            "logging": {
                "log_payloads": False,
                "log_statistics": False,
            },
            "max_request_body_size": 0,
            "model_name_header": False,
            "response_streaming": "string",
        },
        gateway_plugin_ai_proxy_id="string",
        consumer_group={
            "id": "string",
        },
        created_at=0,
        enabled=False,
        consumer={
            "id": "string",
        },
        instance_name="string",
        ordering={
            "after": {
                "accesses": ["string"],
            },
            "before": {
                "accesses": ["string"],
            },
        },
        partials=[{
            "id": "string",
            "name": "string",
            "path": "string",
        }],
        protocols=["string"],
        route={
            "id": "string",
        },
        service={
            "id": "string",
        },
        tags=["string"],
        updated_at=0)
    
    const gatewayPluginAiProxyResource = new konnect.GatewayPluginAiProxy("gatewayPluginAiProxyResource", {
        controlPlaneId: "string",
        config: {
            model: {
                provider: "string",
                name: "string",
                options: {
                    anthropicVersion: "string",
                    azureApiVersion: "string",
                    azureDeploymentId: "string",
                    azureInstance: "string",
                    bedrock: {
                        awsAssumeRoleArn: "string",
                        awsRegion: "string",
                        awsRoleSessionName: "string",
                        awsStsEndpointUrl: "string",
                        embeddingsNormalize: false,
                        performanceConfigLatency: "string",
                    },
                    cohere: {
                        embeddingInputType: "string",
                        waitForModel: false,
                    },
                    embeddingsDimensions: 0,
                    gemini: {
                        apiEndpoint: "string",
                        locationId: "string",
                        projectId: "string",
                    },
                    huggingface: {
                        useCache: false,
                        waitForModel: false,
                    },
                    inputCost: 0,
                    llama2Format: "string",
                    maxTokens: 0,
                    mistralFormat: "string",
                    outputCost: 0,
                    temperature: 0,
                    topK: 0,
                    topP: 0,
                    upstreamPath: "string",
                    upstreamUrl: "string",
                },
            },
            routeType: "string",
            auth: {
                allowOverride: false,
                awsAccessKeyId: "string",
                awsSecretAccessKey: "string",
                azureClientId: "string",
                azureClientSecret: "string",
                azureTenantId: "string",
                azureUseManagedIdentity: false,
                gcpServiceAccountJson: "string",
                gcpUseServiceAccount: false,
                headerName: "string",
                headerValue: "string",
                paramLocation: "string",
                paramName: "string",
                paramValue: "string",
            },
            genaiCategory: "string",
            llmFormat: "string",
            logging: {
                logPayloads: false,
                logStatistics: false,
            },
            maxRequestBodySize: 0,
            modelNameHeader: false,
            responseStreaming: "string",
        },
        gatewayPluginAiProxyId: "string",
        consumerGroup: {
            id: "string",
        },
        createdAt: 0,
        enabled: false,
        consumer: {
            id: "string",
        },
        instanceName: "string",
        ordering: {
            after: {
                accesses: ["string"],
            },
            before: {
                accesses: ["string"],
            },
        },
        partials: [{
            id: "string",
            name: "string",
            path: "string",
        }],
        protocols: ["string"],
        route: {
            id: "string",
        },
        service: {
            id: "string",
        },
        tags: ["string"],
        updatedAt: 0,
    });
    
    type: konnect:GatewayPluginAiProxy
    properties:
        config:
            auth:
                allowOverride: false
                awsAccessKeyId: string
                awsSecretAccessKey: string
                azureClientId: string
                azureClientSecret: string
                azureTenantId: string
                azureUseManagedIdentity: false
                gcpServiceAccountJson: string
                gcpUseServiceAccount: false
                headerName: string
                headerValue: string
                paramLocation: string
                paramName: string
                paramValue: string
            genaiCategory: string
            llmFormat: string
            logging:
                logPayloads: false
                logStatistics: false
            maxRequestBodySize: 0
            model:
                name: string
                options:
                    anthropicVersion: string
                    azureApiVersion: string
                    azureDeploymentId: string
                    azureInstance: string
                    bedrock:
                        awsAssumeRoleArn: string
                        awsRegion: string
                        awsRoleSessionName: string
                        awsStsEndpointUrl: string
                        embeddingsNormalize: false
                        performanceConfigLatency: string
                    cohere:
                        embeddingInputType: string
                        waitForModel: false
                    embeddingsDimensions: 0
                    gemini:
                        apiEndpoint: string
                        locationId: string
                        projectId: string
                    huggingface:
                        useCache: false
                        waitForModel: false
                    inputCost: 0
                    llama2Format: string
                    maxTokens: 0
                    mistralFormat: string
                    outputCost: 0
                    temperature: 0
                    topK: 0
                    topP: 0
                    upstreamPath: string
                    upstreamUrl: string
                provider: string
            modelNameHeader: false
            responseStreaming: string
            routeType: string
        consumer:
            id: string
        consumerGroup:
            id: string
        controlPlaneId: string
        createdAt: 0
        enabled: false
        gatewayPluginAiProxyId: string
        instanceName: string
        ordering:
            after:
                accesses:
                    - string
            before:
                accesses:
                    - string
        partials:
            - id: string
              name: string
              path: string
        protocols:
            - string
        route:
            id: string
        service:
            id: string
        tags:
            - string
        updatedAt: 0
    

    GatewayPluginAiProxy Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

    The GatewayPluginAiProxy resource accepts the following input properties:

    Config GatewayPluginAiProxyConfig
    ControlPlaneId string
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    Consumer GatewayPluginAiProxyConsumer
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    ConsumerGroup GatewayPluginAiProxyConsumerGroup
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    CreatedAt double
    Unix epoch when the resource was created.
    Enabled bool
    Whether the plugin is applied. Default: true
    GatewayPluginAiProxyId string
    A string representing a UUID (universally unique identifier).
    InstanceName string
    A unique string representing a UTF-8 encoded name.
    Ordering GatewayPluginAiProxyOrdering
    Partials List<GatewayPluginAiProxyPartial>
    A list of partials to be used by the plugin.
    Protocols List<string>
    A list of the request protocols that will trigger this plugin. The default value, as well as the possible values allowed on this field, may change depending on the plugin type. For example, plugins that only work in stream mode will only support tcp and tls.
    Route GatewayPluginAiProxyRoute
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    Service GatewayPluginAiProxyService
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    Tags List<string>
    An optional set of strings associated with the Plugin for grouping and filtering.
    UpdatedAt double
    Unix epoch when the resource was last updated.
    Config GatewayPluginAiProxyConfigArgs
    ControlPlaneId string
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    Consumer GatewayPluginAiProxyConsumerArgs
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    ConsumerGroup GatewayPluginAiProxyConsumerGroupArgs
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    CreatedAt float64
    Unix epoch when the resource was created.
    Enabled bool
    Whether the plugin is applied. Default: true
    GatewayPluginAiProxyId string
    A string representing a UUID (universally unique identifier).
    InstanceName string
    A unique string representing a UTF-8 encoded name.
    Ordering GatewayPluginAiProxyOrderingArgs
    Partials []GatewayPluginAiProxyPartialArgs
    A list of partials to be used by the plugin.
    Protocols []string
    A list of the request protocols that will trigger this plugin. The default value, as well as the possible values allowed on this field, may change depending on the plugin type. For example, plugins that only work in stream mode will only support tcp and tls.
    Route GatewayPluginAiProxyRouteArgs
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    Service GatewayPluginAiProxyServiceArgs
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    Tags []string
    An optional set of strings associated with the Plugin for grouping and filtering.
    UpdatedAt float64
    Unix epoch when the resource was last updated.
    config GatewayPluginAiProxyConfig
    controlPlaneId String
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    consumer GatewayPluginAiProxyConsumer
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    consumerGroup GatewayPluginAiProxyConsumerGroup
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    createdAt Double
    Unix epoch when the resource was created.
    enabled Boolean
    Whether the plugin is applied. Default: true
    gatewayPluginAiProxyId String
    A string representing a UUID (universally unique identifier).
    instanceName String
    A unique string representing a UTF-8 encoded name.
    ordering GatewayPluginAiProxyOrdering
    partials List<GatewayPluginAiProxyPartial>
    A list of partials to be used by the plugin.
    protocols List<String>
    A list of the request protocols that will trigger this plugin. The default value, as well as the possible values allowed on this field, may change depending on the plugin type. For example, plugins that only work in stream mode will only support tcp and tls.
    route GatewayPluginAiProxyRoute
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    service GatewayPluginAiProxyService
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    tags List<String>
    An optional set of strings associated with the Plugin for grouping and filtering.
    updatedAt Double
    Unix epoch when the resource was last updated.
    config GatewayPluginAiProxyConfig
    controlPlaneId string
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    consumer GatewayPluginAiProxyConsumer
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    consumerGroup GatewayPluginAiProxyConsumerGroup
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    createdAt number
    Unix epoch when the resource was created.
    enabled boolean
    Whether the plugin is applied. Default: true
    gatewayPluginAiProxyId string
    A string representing a UUID (universally unique identifier).
    instanceName string
    A unique string representing a UTF-8 encoded name.
    ordering GatewayPluginAiProxyOrdering
    partials GatewayPluginAiProxyPartial[]
    A list of partials to be used by the plugin.
    protocols string[]
    A list of the request protocols that will trigger this plugin. The default value, as well as the possible values allowed on this field, may change depending on the plugin type. For example, plugins that only work in stream mode will only support tcp and tls.
    route GatewayPluginAiProxyRoute
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    service GatewayPluginAiProxyService
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    tags string[]
    An optional set of strings associated with the Plugin for grouping and filtering.
    updatedAt number
    Unix epoch when the resource was last updated.
    config GatewayPluginAiProxyConfigArgs
    control_plane_id str
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    consumer GatewayPluginAiProxyConsumerArgs
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    consumer_group GatewayPluginAiProxyConsumerGroupArgs
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    created_at float
    Unix epoch when the resource was created.
    enabled bool
    Whether the plugin is applied. Default: true
    gateway_plugin_ai_proxy_id str
    A string representing a UUID (universally unique identifier).
    instance_name str
    A unique string representing a UTF-8 encoded name.
    ordering GatewayPluginAiProxyOrderingArgs
    partials Sequence[GatewayPluginAiProxyPartialArgs]
    A list of partials to be used by the plugin.
    protocols Sequence[str]
    A list of the request protocols that will trigger this plugin. The default value, as well as the possible values allowed on this field, may change depending on the plugin type. For example, plugins that only work in stream mode will only support tcp and tls.
    route GatewayPluginAiProxyRouteArgs
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    service GatewayPluginAiProxyServiceArgs
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    tags Sequence[str]
    An optional set of strings associated with the Plugin for grouping and filtering.
    updated_at float
    Unix epoch when the resource was last updated.
    config Property Map
    controlPlaneId String
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    consumer Property Map
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    consumerGroup Property Map
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    createdAt Number
    Unix epoch when the resource was created.
    enabled Boolean
    Whether the plugin is applied. Default: true
    gatewayPluginAiProxyId String
    A string representing a UUID (universally unique identifier).
    instanceName String
    A unique string representing a UTF-8 encoded name.
    ordering Property Map
    partials List<Property Map>
    A list of partials to be used by the plugin.
    protocols List<String>
    A list of the request protocols that will trigger this plugin. The default value, as well as the possible values allowed on this field, may change depending on the plugin type. For example, plugins that only work in stream mode will only support tcp and tls.
    route Property Map
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    service Property Map
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    tags List<String>
    An optional set of strings associated with the Plugin for grouping and filtering.
    updatedAt Number
    Unix epoch when the resource was last updated.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the GatewayPluginAiProxy resource produces the following output properties:

    Id string
    The provider-assigned unique ID for this managed resource.
    Id string
    The provider-assigned unique ID for this managed resource.
    id String
    The provider-assigned unique ID for this managed resource.
    id string
    The provider-assigned unique ID for this managed resource.
    id str
    The provider-assigned unique ID for this managed resource.
    id String
    The provider-assigned unique ID for this managed resource.

    Look up Existing GatewayPluginAiProxy Resource

    Get an existing GatewayPluginAiProxy resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: GatewayPluginAiProxyState, opts?: CustomResourceOptions): GatewayPluginAiProxy
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            config: Optional[GatewayPluginAiProxyConfigArgs] = None,
            consumer: Optional[GatewayPluginAiProxyConsumerArgs] = None,
            consumer_group: Optional[GatewayPluginAiProxyConsumerGroupArgs] = None,
            control_plane_id: Optional[str] = None,
            created_at: Optional[float] = None,
            enabled: Optional[bool] = None,
            gateway_plugin_ai_proxy_id: Optional[str] = None,
            instance_name: Optional[str] = None,
            ordering: Optional[GatewayPluginAiProxyOrderingArgs] = None,
            partials: Optional[Sequence[GatewayPluginAiProxyPartialArgs]] = None,
            protocols: Optional[Sequence[str]] = None,
            route: Optional[GatewayPluginAiProxyRouteArgs] = None,
            service: Optional[GatewayPluginAiProxyServiceArgs] = None,
            tags: Optional[Sequence[str]] = None,
            updated_at: Optional[float] = None) -> GatewayPluginAiProxy
    func GetGatewayPluginAiProxy(ctx *Context, name string, id IDInput, state *GatewayPluginAiProxyState, opts ...ResourceOption) (*GatewayPluginAiProxy, error)
    public static GatewayPluginAiProxy Get(string name, Input<string> id, GatewayPluginAiProxyState? state, CustomResourceOptions? opts = null)
    public static GatewayPluginAiProxy get(String name, Output<String> id, GatewayPluginAiProxyState state, CustomResourceOptions options)
    resources:  _:    type: konnect:GatewayPluginAiProxy    get:      id: ${id}
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    Config GatewayPluginAiProxyConfig
    Consumer GatewayPluginAiProxyConsumer
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    ConsumerGroup GatewayPluginAiProxyConsumerGroup
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    ControlPlaneId string
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    CreatedAt double
    Unix epoch when the resource was created.
    Enabled bool
    Whether the plugin is applied. Default: true
    GatewayPluginAiProxyId string
    A string representing a UUID (universally unique identifier).
    InstanceName string
    A unique string representing a UTF-8 encoded name.
    Ordering GatewayPluginAiProxyOrdering
    Partials List<GatewayPluginAiProxyPartial>
    A list of partials to be used by the plugin.
    Protocols List<string>
    A list of the request protocols that will trigger this plugin. The default value, as well as the possible values allowed on this field, may change depending on the plugin type. For example, plugins that only work in stream mode will only support tcp and tls.
    Route GatewayPluginAiProxyRoute
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    Service GatewayPluginAiProxyService
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    Tags List<string>
    An optional set of strings associated with the Plugin for grouping and filtering.
    UpdatedAt double
    Unix epoch when the resource was last updated.
    Config GatewayPluginAiProxyConfigArgs
    Consumer GatewayPluginAiProxyConsumerArgs
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    ConsumerGroup GatewayPluginAiProxyConsumerGroupArgs
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    ControlPlaneId string
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    CreatedAt float64
    Unix epoch when the resource was created.
    Enabled bool
    Whether the plugin is applied. Default: true
    GatewayPluginAiProxyId string
    A string representing a UUID (universally unique identifier).
    InstanceName string
    A unique string representing a UTF-8 encoded name.
    Ordering GatewayPluginAiProxyOrderingArgs
    Partials []GatewayPluginAiProxyPartialArgs
    A list of partials to be used by the plugin.
    Protocols []string
    A list of the request protocols that will trigger this plugin. The default value, as well as the possible values allowed on this field, may change depending on the plugin type. For example, plugins that only work in stream mode will only support tcp and tls.
    Route GatewayPluginAiProxyRouteArgs
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    Service GatewayPluginAiProxyServiceArgs
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    Tags []string
    An optional set of strings associated with the Plugin for grouping and filtering.
    UpdatedAt float64
    Unix epoch when the resource was last updated.
    config GatewayPluginAiProxyConfig
    consumer GatewayPluginAiProxyConsumer
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    consumerGroup GatewayPluginAiProxyConsumerGroup
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    controlPlaneId String
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    createdAt Double
    Unix epoch when the resource was created.
    enabled Boolean
    Whether the plugin is applied. Default: true
    gatewayPluginAiProxyId String
    A string representing a UUID (universally unique identifier).
    instanceName String
    A unique string representing a UTF-8 encoded name.
    ordering GatewayPluginAiProxyOrdering
    partials List<GatewayPluginAiProxyPartial>
    A list of partials to be used by the plugin.
    protocols List<String>
    A list of the request protocols that will trigger this plugin. The default value, as well as the possible values allowed on this field, may change depending on the plugin type. For example, plugins that only work in stream mode will only support tcp and tls.
    route GatewayPluginAiProxyRoute
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    service GatewayPluginAiProxyService
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    tags List<String>
    An optional set of strings associated with the Plugin for grouping and filtering.
    updatedAt Double
    Unix epoch when the resource was last updated.
    config GatewayPluginAiProxyConfig
    consumer GatewayPluginAiProxyConsumer
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    consumerGroup GatewayPluginAiProxyConsumerGroup
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    controlPlaneId string
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    createdAt number
    Unix epoch when the resource was created.
    enabled boolean
    Whether the plugin is applied. Default: true
    gatewayPluginAiProxyId string
    A string representing a UUID (universally unique identifier).
    instanceName string
    A unique string representing a UTF-8 encoded name.
    ordering GatewayPluginAiProxyOrdering
    partials GatewayPluginAiProxyPartial[]
    A list of partials to be used by the plugin.
    protocols string[]
    A list of the request protocols that will trigger this plugin. The default value, as well as the possible values allowed on this field, may change depending on the plugin type. For example, plugins that only work in stream mode will only support tcp and tls.
    route GatewayPluginAiProxyRoute
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    service GatewayPluginAiProxyService
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    tags string[]
    An optional set of strings associated with the Plugin for grouping and filtering.
    updatedAt number
    Unix epoch when the resource was last updated.
    config GatewayPluginAiProxyConfigArgs
    consumer GatewayPluginAiProxyConsumerArgs
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    consumer_group GatewayPluginAiProxyConsumerGroupArgs
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    control_plane_id str
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    created_at float
    Unix epoch when the resource was created.
    enabled bool
    Whether the plugin is applied. Default: true
    gateway_plugin_ai_proxy_id str
    A string representing a UUID (universally unique identifier).
    instance_name str
    A unique string representing a UTF-8 encoded name.
    ordering GatewayPluginAiProxyOrderingArgs
    partials Sequence[GatewayPluginAiProxyPartialArgs]
    A list of partials to be used by the plugin.
    protocols Sequence[str]
    A list of the request protocols that will trigger this plugin. The default value, as well as the possible values allowed on this field, may change depending on the plugin type. For example, plugins that only work in stream mode will only support tcp and tls.
    route GatewayPluginAiProxyRouteArgs
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    service GatewayPluginAiProxyServiceArgs
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    tags Sequence[str]
    An optional set of strings associated with the Plugin for grouping and filtering.
    updated_at float
    Unix epoch when the resource was last updated.
    config Property Map
    consumer Property Map
    If set, the plugin will activate only for requests where the specified has been authenticated. (Note that some plugins can not be restricted to consumers this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer.
    consumerGroup Property Map
    If set, the plugin will activate only for requests where the specified consumer group has been authenticated. (Note that some plugins can not be restricted to consumers groups this way.). Leave unset for the plugin to activate regardless of the authenticated Consumer Groups
    controlPlaneId String
    The UUID of your control plane. This variable is available in the Konnect manager. Requires replacement if changed.
    createdAt Number
    Unix epoch when the resource was created.
    enabled Boolean
    Whether the plugin is applied. Default: true
    gatewayPluginAiProxyId String
    A string representing a UUID (universally unique identifier).
    instanceName String
    A unique string representing a UTF-8 encoded name.
    ordering Property Map
    partials List<Property Map>
    A list of partials to be used by the plugin.
    protocols List<String>
    A list of the request protocols that will trigger this plugin. The default value, as well as the possible values allowed on this field, may change depending on the plugin type. For example, plugins that only work in stream mode will only support tcp and tls.
    route Property Map
    If set, the plugin will only activate when receiving requests via the specified route. Leave unset for the plugin to activate regardless of the route being used.
    service Property Map
    If set, the plugin will only activate when receiving requests via one of the routes belonging to the specified Service. Leave unset for the plugin to activate regardless of the Service being matched.
    tags List<String>
    An optional set of strings associated with the Plugin for grouping and filtering.
    updatedAt Number
    Unix epoch when the resource was last updated.

    Supporting Types

    GatewayPluginAiProxyConfig, GatewayPluginAiProxyConfigArgs

    Model GatewayPluginAiProxyConfigModel
    RouteType string
    The model's operation implementation, for this provider. must be one of ["audio/v1/audio/speech", "audio/v1/audio/transcriptions", "audio/v1/audio/translations", "image/v1/images/edits", "image/v1/images/generations", "llm/v1/assistants", "llm/v1/batches", "llm/v1/chat", "llm/v1/completions", "llm/v1/embeddings", "llm/v1/files", "llm/v1/responses", "preserve", "realtime/v1/realtime"]
    Auth GatewayPluginAiProxyConfigAuth
    GenaiCategory string
    Generative AI category of the request. Default: "text/generation"; must be one of ["audio/speech", "audio/transcription", "image/generation", "text/embeddings", "text/generation"]
    LlmFormat string
    LLM input and output format and schema to use. Default: "openai"; must be one of ["bedrock", "cohere", "gemini", "huggingface", "openai"]
    Logging GatewayPluginAiProxyConfigLogging
    MaxRequestBodySize double
    max allowed body size allowed to be introspected. 0 means unlimited, but the size of this body will still be limited by Nginx's clientmaxbody_size. Default: 8192
    ModelNameHeader bool
    Display the model name selected in the X-Kong-LLM-Model response header. Default: true
    ResponseStreaming string
    Whether to 'optionally allow', 'deny', or 'always' (force) the streaming of answers via server sent events. Default: "allow"; must be one of ["allow", "always", "deny"]
    Model GatewayPluginAiProxyConfigModel
    RouteType string
    The model's operation implementation, for this provider. must be one of ["audio/v1/audio/speech", "audio/v1/audio/transcriptions", "audio/v1/audio/translations", "image/v1/images/edits", "image/v1/images/generations", "llm/v1/assistants", "llm/v1/batches", "llm/v1/chat", "llm/v1/completions", "llm/v1/embeddings", "llm/v1/files", "llm/v1/responses", "preserve", "realtime/v1/realtime"]
    Auth GatewayPluginAiProxyConfigAuth
    GenaiCategory string
    Generative AI category of the request. Default: "text/generation"; must be one of ["audio/speech", "audio/transcription", "image/generation", "text/embeddings", "text/generation"]
    LlmFormat string
    LLM input and output format and schema to use. Default: "openai"; must be one of ["bedrock", "cohere", "gemini", "huggingface", "openai"]
    Logging GatewayPluginAiProxyConfigLogging
    MaxRequestBodySize float64
    max allowed body size allowed to be introspected. 0 means unlimited, but the size of this body will still be limited by Nginx's clientmaxbody_size. Default: 8192
    ModelNameHeader bool
    Display the model name selected in the X-Kong-LLM-Model response header. Default: true
    ResponseStreaming string
    Whether to 'optionally allow', 'deny', or 'always' (force) the streaming of answers via server sent events. Default: "allow"; must be one of ["allow", "always", "deny"]
    model GatewayPluginAiProxyConfigModel
    routeType String
    The model's operation implementation, for this provider. must be one of ["audio/v1/audio/speech", "audio/v1/audio/transcriptions", "audio/v1/audio/translations", "image/v1/images/edits", "image/v1/images/generations", "llm/v1/assistants", "llm/v1/batches", "llm/v1/chat", "llm/v1/completions", "llm/v1/embeddings", "llm/v1/files", "llm/v1/responses", "preserve", "realtime/v1/realtime"]
    auth GatewayPluginAiProxyConfigAuth
    genaiCategory String
    Generative AI category of the request. Default: "text/generation"; must be one of ["audio/speech", "audio/transcription", "image/generation", "text/embeddings", "text/generation"]
    llmFormat String
    LLM input and output format and schema to use. Default: "openai"; must be one of ["bedrock", "cohere", "gemini", "huggingface", "openai"]
    logging GatewayPluginAiProxyConfigLogging
    maxRequestBodySize Double
    max allowed body size allowed to be introspected. 0 means unlimited, but the size of this body will still be limited by Nginx's clientmaxbody_size. Default: 8192
    modelNameHeader Boolean
    Display the model name selected in the X-Kong-LLM-Model response header. Default: true
    responseStreaming String
    Whether to 'optionally allow', 'deny', or 'always' (force) the streaming of answers via server sent events. Default: "allow"; must be one of ["allow", "always", "deny"]
    model GatewayPluginAiProxyConfigModel
    routeType string
    The model's operation implementation, for this provider. must be one of ["audio/v1/audio/speech", "audio/v1/audio/transcriptions", "audio/v1/audio/translations", "image/v1/images/edits", "image/v1/images/generations", "llm/v1/assistants", "llm/v1/batches", "llm/v1/chat", "llm/v1/completions", "llm/v1/embeddings", "llm/v1/files", "llm/v1/responses", "preserve", "realtime/v1/realtime"]
    auth GatewayPluginAiProxyConfigAuth
    genaiCategory string
    Generative AI category of the request. Default: "text/generation"; must be one of ["audio/speech", "audio/transcription", "image/generation", "text/embeddings", "text/generation"]
    llmFormat string
    LLM input and output format and schema to use. Default: "openai"; must be one of ["bedrock", "cohere", "gemini", "huggingface", "openai"]
    logging GatewayPluginAiProxyConfigLogging
    maxRequestBodySize number
    max allowed body size allowed to be introspected. 0 means unlimited, but the size of this body will still be limited by Nginx's clientmaxbody_size. Default: 8192
    modelNameHeader boolean
    Display the model name selected in the X-Kong-LLM-Model response header. Default: true
    responseStreaming string
    Whether to 'optionally allow', 'deny', or 'always' (force) the streaming of answers via server sent events. Default: "allow"; must be one of ["allow", "always", "deny"]
    model GatewayPluginAiProxyConfigModel
    route_type str
    The model's operation implementation, for this provider. must be one of ["audio/v1/audio/speech", "audio/v1/audio/transcriptions", "audio/v1/audio/translations", "image/v1/images/edits", "image/v1/images/generations", "llm/v1/assistants", "llm/v1/batches", "llm/v1/chat", "llm/v1/completions", "llm/v1/embeddings", "llm/v1/files", "llm/v1/responses", "preserve", "realtime/v1/realtime"]
    auth GatewayPluginAiProxyConfigAuth
    genai_category str
    Generative AI category of the request. Default: "text/generation"; must be one of ["audio/speech", "audio/transcription", "image/generation", "text/embeddings", "text/generation"]
    llm_format str
    LLM input and output format and schema to use. Default: "openai"; must be one of ["bedrock", "cohere", "gemini", "huggingface", "openai"]
    logging GatewayPluginAiProxyConfigLogging
    max_request_body_size float
    max allowed body size allowed to be introspected. 0 means unlimited, but the size of this body will still be limited by Nginx's clientmaxbody_size. Default: 8192
    model_name_header bool
    Display the model name selected in the X-Kong-LLM-Model response header. Default: true
    response_streaming str
    Whether to 'optionally allow', 'deny', or 'always' (force) the streaming of answers via server sent events. Default: "allow"; must be one of ["allow", "always", "deny"]
    model Property Map
    routeType String
    The model's operation implementation, for this provider. must be one of ["audio/v1/audio/speech", "audio/v1/audio/transcriptions", "audio/v1/audio/translations", "image/v1/images/edits", "image/v1/images/generations", "llm/v1/assistants", "llm/v1/batches", "llm/v1/chat", "llm/v1/completions", "llm/v1/embeddings", "llm/v1/files", "llm/v1/responses", "preserve", "realtime/v1/realtime"]
    auth Property Map
    genaiCategory String
    Generative AI category of the request. Default: "text/generation"; must be one of ["audio/speech", "audio/transcription", "image/generation", "text/embeddings", "text/generation"]
    llmFormat String
    LLM input and output format and schema to use. Default: "openai"; must be one of ["bedrock", "cohere", "gemini", "huggingface", "openai"]
    logging Property Map
    maxRequestBodySize Number
    max allowed body size allowed to be introspected. 0 means unlimited, but the size of this body will still be limited by Nginx's clientmaxbody_size. Default: 8192
    modelNameHeader Boolean
    Display the model name selected in the X-Kong-LLM-Model response header. Default: true
    responseStreaming String
    Whether to 'optionally allow', 'deny', or 'always' (force) the streaming of answers via server sent events. Default: "allow"; must be one of ["allow", "always", "deny"]

    GatewayPluginAiProxyConfigAuth, GatewayPluginAiProxyConfigAuthArgs

    AllowOverride bool
    If enabled, the authorization header or parameter can be overridden in the request by the value configured in the plugin. Default: false
    AwsAccessKeyId string
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSACCESSKEY_ID environment variable for this plugin instance.
    AwsSecretAccessKey string
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSSECRETACCESS_KEY environment variable for this plugin instance.
    AzureClientId string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client ID.
    AzureClientSecret string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client secret.
    AzureTenantId string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the tenant ID.
    AzureUseManagedIdentity bool
    Set true to use the Azure Cloud Managed Identity (or user-assigned identity) to authenticate with Azure-provider models. Default: false
    GcpServiceAccountJson string
    Set this field to the full JSON of the GCP service account to authenticate, if required. If null (and gcpuseservice_account is true), Kong will attempt to read from environment variable GCP_SERVICE_ACCOUNT.
    GcpUseServiceAccount bool
    Use service account auth for GCP-based providers and models. Default: false
    HeaderName string
    If AI model requires authentication via Authorization or API key header, specify its name here.
    HeaderValue string
    Specify the full auth header value for 'header_name', for example 'Bearer key' or just 'key'.
    ParamLocation string
    Specify whether the 'paramname' and 'paramvalue' options go in a query string, or the POST form/JSON body. must be one of ["body", "query"]
    ParamName string
    If AI model requires authentication via query parameter, specify its name here.
    ParamValue string
    Specify the full parameter value for 'param_name'.
    AllowOverride bool
    If enabled, the authorization header or parameter can be overridden in the request by the value configured in the plugin. Default: false
    AwsAccessKeyId string
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSACCESSKEY_ID environment variable for this plugin instance.
    AwsSecretAccessKey string
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSSECRETACCESS_KEY environment variable for this plugin instance.
    AzureClientId string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client ID.
    AzureClientSecret string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client secret.
    AzureTenantId string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the tenant ID.
    AzureUseManagedIdentity bool
    Set true to use the Azure Cloud Managed Identity (or user-assigned identity) to authenticate with Azure-provider models. Default: false
    GcpServiceAccountJson string
    Set this field to the full JSON of the GCP service account to authenticate, if required. If null (and gcpuseservice_account is true), Kong will attempt to read from environment variable GCP_SERVICE_ACCOUNT.
    GcpUseServiceAccount bool
    Use service account auth for GCP-based providers and models. Default: false
    HeaderName string
    If AI model requires authentication via Authorization or API key header, specify its name here.
    HeaderValue string
    Specify the full auth header value for 'header_name', for example 'Bearer key' or just 'key'.
    ParamLocation string
    Specify whether the 'paramname' and 'paramvalue' options go in a query string, or the POST form/JSON body. must be one of ["body", "query"]
    ParamName string
    If AI model requires authentication via query parameter, specify its name here.
    ParamValue string
    Specify the full parameter value for 'param_name'.
    allowOverride Boolean
    If enabled, the authorization header or parameter can be overridden in the request by the value configured in the plugin. Default: false
    awsAccessKeyId String
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSACCESSKEY_ID environment variable for this plugin instance.
    awsSecretAccessKey String
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSSECRETACCESS_KEY environment variable for this plugin instance.
    azureClientId String
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client ID.
    azureClientSecret String
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client secret.
    azureTenantId String
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the tenant ID.
    azureUseManagedIdentity Boolean
    Set true to use the Azure Cloud Managed Identity (or user-assigned identity) to authenticate with Azure-provider models. Default: false
    gcpServiceAccountJson String
    Set this field to the full JSON of the GCP service account to authenticate, if required. If null (and gcpuseservice_account is true), Kong will attempt to read from environment variable GCP_SERVICE_ACCOUNT.
    gcpUseServiceAccount Boolean
    Use service account auth for GCP-based providers and models. Default: false
    headerName String
    If AI model requires authentication via Authorization or API key header, specify its name here.
    headerValue String
    Specify the full auth header value for 'header_name', for example 'Bearer key' or just 'key'.
    paramLocation String
    Specify whether the 'paramname' and 'paramvalue' options go in a query string, or the POST form/JSON body. must be one of ["body", "query"]
    paramName String
    If AI model requires authentication via query parameter, specify its name here.
    paramValue String
    Specify the full parameter value for 'param_name'.
    allowOverride boolean
    If enabled, the authorization header or parameter can be overridden in the request by the value configured in the plugin. Default: false
    awsAccessKeyId string
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSACCESSKEY_ID environment variable for this plugin instance.
    awsSecretAccessKey string
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSSECRETACCESS_KEY environment variable for this plugin instance.
    azureClientId string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client ID.
    azureClientSecret string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client secret.
    azureTenantId string
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the tenant ID.
    azureUseManagedIdentity boolean
    Set true to use the Azure Cloud Managed Identity (or user-assigned identity) to authenticate with Azure-provider models. Default: false
    gcpServiceAccountJson string
    Set this field to the full JSON of the GCP service account to authenticate, if required. If null (and gcpuseservice_account is true), Kong will attempt to read from environment variable GCP_SERVICE_ACCOUNT.
    gcpUseServiceAccount boolean
    Use service account auth for GCP-based providers and models. Default: false
    headerName string
    If AI model requires authentication via Authorization or API key header, specify its name here.
    headerValue string
    Specify the full auth header value for 'header_name', for example 'Bearer key' or just 'key'.
    paramLocation string
    Specify whether the 'paramname' and 'paramvalue' options go in a query string, or the POST form/JSON body. must be one of ["body", "query"]
    paramName string
    If AI model requires authentication via query parameter, specify its name here.
    paramValue string
    Specify the full parameter value for 'param_name'.
    allow_override bool
    If enabled, the authorization header or parameter can be overridden in the request by the value configured in the plugin. Default: false
    aws_access_key_id str
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSACCESSKEY_ID environment variable for this plugin instance.
    aws_secret_access_key str
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSSECRETACCESS_KEY environment variable for this plugin instance.
    azure_client_id str
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client ID.
    azure_client_secret str
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client secret.
    azure_tenant_id str
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the tenant ID.
    azure_use_managed_identity bool
    Set true to use the Azure Cloud Managed Identity (or user-assigned identity) to authenticate with Azure-provider models. Default: false
    gcp_service_account_json str
    Set this field to the full JSON of the GCP service account to authenticate, if required. If null (and gcpuseservice_account is true), Kong will attempt to read from environment variable GCP_SERVICE_ACCOUNT.
    gcp_use_service_account bool
    Use service account auth for GCP-based providers and models. Default: false
    header_name str
    If AI model requires authentication via Authorization or API key header, specify its name here.
    header_value str
    Specify the full auth header value for 'header_name', for example 'Bearer key' or just 'key'.
    param_location str
    Specify whether the 'paramname' and 'paramvalue' options go in a query string, or the POST form/JSON body. must be one of ["body", "query"]
    param_name str
    If AI model requires authentication via query parameter, specify its name here.
    param_value str
    Specify the full parameter value for 'param_name'.
    allowOverride Boolean
    If enabled, the authorization header or parameter can be overridden in the request by the value configured in the plugin. Default: false
    awsAccessKeyId String
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSACCESSKEY_ID environment variable for this plugin instance.
    awsSecretAccessKey String
    Set this if you are using an AWS provider (Bedrock) and you are authenticating using static IAM User credentials. Setting this will override the AWSSECRETACCESS_KEY environment variable for this plugin instance.
    azureClientId String
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client ID.
    azureClientSecret String
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the client secret.
    azureTenantId String
    If azureusemanaged_identity is set to true, and you need to use a different user-assigned identity for this LLM instance, set the tenant ID.
    azureUseManagedIdentity Boolean
    Set true to use the Azure Cloud Managed Identity (or user-assigned identity) to authenticate with Azure-provider models. Default: false
    gcpServiceAccountJson String
    Set this field to the full JSON of the GCP service account to authenticate, if required. If null (and gcpuseservice_account is true), Kong will attempt to read from environment variable GCP_SERVICE_ACCOUNT.
    gcpUseServiceAccount Boolean
    Use service account auth for GCP-based providers and models. Default: false
    headerName String
    If AI model requires authentication via Authorization or API key header, specify its name here.
    headerValue String
    Specify the full auth header value for 'header_name', for example 'Bearer key' or just 'key'.
    paramLocation String
    Specify whether the 'paramname' and 'paramvalue' options go in a query string, or the POST form/JSON body. must be one of ["body", "query"]
    paramName String
    If AI model requires authentication via query parameter, specify its name here.
    paramValue String
    Specify the full parameter value for 'param_name'.

    GatewayPluginAiProxyConfigLogging, GatewayPluginAiProxyConfigLoggingArgs

    LogPayloads bool
    If enabled, will log the request and response body into the Kong log plugin(s) output. Default: false
    LogStatistics bool
    If enabled and supported by the driver, will add model usage and token metrics into the Kong log plugin(s) output. Default: false
    LogPayloads bool
    If enabled, will log the request and response body into the Kong log plugin(s) output. Default: false
    LogStatistics bool
    If enabled and supported by the driver, will add model usage and token metrics into the Kong log plugin(s) output. Default: false
    logPayloads Boolean
    If enabled, will log the request and response body into the Kong log plugin(s) output. Default: false
    logStatistics Boolean
    If enabled and supported by the driver, will add model usage and token metrics into the Kong log plugin(s) output. Default: false
    logPayloads boolean
    If enabled, will log the request and response body into the Kong log plugin(s) output. Default: false
    logStatistics boolean
    If enabled and supported by the driver, will add model usage and token metrics into the Kong log plugin(s) output. Default: false
    log_payloads bool
    If enabled, will log the request and response body into the Kong log plugin(s) output. Default: false
    log_statistics bool
    If enabled and supported by the driver, will add model usage and token metrics into the Kong log plugin(s) output. Default: false
    logPayloads Boolean
    If enabled, will log the request and response body into the Kong log plugin(s) output. Default: false
    logStatistics Boolean
    If enabled and supported by the driver, will add model usage and token metrics into the Kong log plugin(s) output. Default: false

    GatewayPluginAiProxyConfigModel, GatewayPluginAiProxyConfigModelArgs

    Provider string
    AI provider request format - Kong translates requests to and from the specified backend compatible formats. must be one of ["anthropic", "azure", "bedrock", "cohere", "gemini", "huggingface", "llama2", "mistral", "openai"]
    Name string
    Model name to execute.
    Options GatewayPluginAiProxyConfigModelOptions
    Key/value settings for the model
    Provider string
    AI provider request format - Kong translates requests to and from the specified backend compatible formats. must be one of ["anthropic", "azure", "bedrock", "cohere", "gemini", "huggingface", "llama2", "mistral", "openai"]
    Name string
    Model name to execute.
    Options GatewayPluginAiProxyConfigModelOptions
    Key/value settings for the model
    provider String
    AI provider request format - Kong translates requests to and from the specified backend compatible formats. must be one of ["anthropic", "azure", "bedrock", "cohere", "gemini", "huggingface", "llama2", "mistral", "openai"]
    name String
    Model name to execute.
    options GatewayPluginAiProxyConfigModelOptions
    Key/value settings for the model
    provider string
    AI provider request format - Kong translates requests to and from the specified backend compatible formats. must be one of ["anthropic", "azure", "bedrock", "cohere", "gemini", "huggingface", "llama2", "mistral", "openai"]
    name string
    Model name to execute.
    options GatewayPluginAiProxyConfigModelOptions
    Key/value settings for the model
    provider str
    AI provider request format - Kong translates requests to and from the specified backend compatible formats. must be one of ["anthropic", "azure", "bedrock", "cohere", "gemini", "huggingface", "llama2", "mistral", "openai"]
    name str
    Model name to execute.
    options GatewayPluginAiProxyConfigModelOptions
    Key/value settings for the model
    provider String
    AI provider request format - Kong translates requests to and from the specified backend compatible formats. must be one of ["anthropic", "azure", "bedrock", "cohere", "gemini", "huggingface", "llama2", "mistral", "openai"]
    name String
    Model name to execute.
    options Property Map
    Key/value settings for the model

    GatewayPluginAiProxyConfigModelOptions, GatewayPluginAiProxyConfigModelOptionsArgs

    AnthropicVersion string
    Defines the schema/API version, if using Anthropic provider.
    AzureApiVersion string
    'api-version' for Azure OpenAI instances. Default: "2023-05-15"
    AzureDeploymentId string
    Deployment ID for Azure OpenAI instances.
    AzureInstance string
    Instance name for Azure OpenAI hosted models.
    Bedrock GatewayPluginAiProxyConfigModelOptionsBedrock
    Cohere GatewayPluginAiProxyConfigModelOptionsCohere
    EmbeddingsDimensions double
    If using embeddings models, set the number of dimensions to generate.
    Gemini GatewayPluginAiProxyConfigModelOptionsGemini
    Huggingface GatewayPluginAiProxyConfigModelOptionsHuggingface
    InputCost double
    Defines the cost per 1M tokens in your prompt.
    Llama2Format string
    If using llama2 provider, select the upstream message format. must be one of ["ollama", "openai", "raw"]
    MaxTokens double
    Defines the max_tokens, if using chat or completion models.
    MistralFormat string
    If using mistral provider, select the upstream message format. must be one of ["ollama", "openai"]
    OutputCost double
    Defines the cost per 1M tokens in the output of the AI.
    Temperature double
    Defines the matching temperature, if using chat or completion models.
    TopK double
    Defines the top-k most likely tokens, if supported.
    TopP double
    Defines the top-p probability mass, if supported.
    UpstreamPath string
    Manually specify or override the AI operation path, used when e.g. using the 'preserve' route_type.
    UpstreamUrl string
    Manually specify or override the full URL to the AI operation endpoints, when calling (self-)hosted models, or for running via a private endpoint.
    AnthropicVersion string
    Defines the schema/API version, if using Anthropic provider.
    AzureApiVersion string
    'api-version' for Azure OpenAI instances. Default: "2023-05-15"
    AzureDeploymentId string
    Deployment ID for Azure OpenAI instances.
    AzureInstance string
    Instance name for Azure OpenAI hosted models.
    Bedrock GatewayPluginAiProxyConfigModelOptionsBedrock
    Cohere GatewayPluginAiProxyConfigModelOptionsCohere
    EmbeddingsDimensions float64
    If using embeddings models, set the number of dimensions to generate.
    Gemini GatewayPluginAiProxyConfigModelOptionsGemini
    Huggingface GatewayPluginAiProxyConfigModelOptionsHuggingface
    InputCost float64
    Defines the cost per 1M tokens in your prompt.
    Llama2Format string
    If using llama2 provider, select the upstream message format. must be one of ["ollama", "openai", "raw"]
    MaxTokens float64
    Defines the max_tokens, if using chat or completion models.
    MistralFormat string
    If using mistral provider, select the upstream message format. must be one of ["ollama", "openai"]
    OutputCost float64
    Defines the cost per 1M tokens in the output of the AI.
    Temperature float64
    Defines the matching temperature, if using chat or completion models.
    TopK float64
    Defines the top-k most likely tokens, if supported.
    TopP float64
    Defines the top-p probability mass, if supported.
    UpstreamPath string
    Manually specify or override the AI operation path, used when e.g. using the 'preserve' route_type.
    UpstreamUrl string
    Manually specify or override the full URL to the AI operation endpoints, when calling (self-)hosted models, or for running via a private endpoint.
    anthropicVersion String
    Defines the schema/API version, if using Anthropic provider.
    azureApiVersion String
    'api-version' for Azure OpenAI instances. Default: "2023-05-15"
    azureDeploymentId String
    Deployment ID for Azure OpenAI instances.
    azureInstance String
    Instance name for Azure OpenAI hosted models.
    bedrock GatewayPluginAiProxyConfigModelOptionsBedrock
    cohere GatewayPluginAiProxyConfigModelOptionsCohere
    embeddingsDimensions Double
    If using embeddings models, set the number of dimensions to generate.
    gemini GatewayPluginAiProxyConfigModelOptionsGemini
    huggingface GatewayPluginAiProxyConfigModelOptionsHuggingface
    inputCost Double
    Defines the cost per 1M tokens in your prompt.
    llama2Format String
    If using llama2 provider, select the upstream message format. must be one of ["ollama", "openai", "raw"]
    maxTokens Double
    Defines the max_tokens, if using chat or completion models.
    mistralFormat String
    If using mistral provider, select the upstream message format. must be one of ["ollama", "openai"]
    outputCost Double
    Defines the cost per 1M tokens in the output of the AI.
    temperature Double
    Defines the matching temperature, if using chat or completion models.
    topK Double
    Defines the top-k most likely tokens, if supported.
    topP Double
    Defines the top-p probability mass, if supported.
    upstreamPath String
    Manually specify or override the AI operation path, used when e.g. using the 'preserve' route_type.
    upstreamUrl String
    Manually specify or override the full URL to the AI operation endpoints, when calling (self-)hosted models, or for running via a private endpoint.
    anthropicVersion string
    Defines the schema/API version, if using Anthropic provider.
    azureApiVersion string
    'api-version' for Azure OpenAI instances. Default: "2023-05-15"
    azureDeploymentId string
    Deployment ID for Azure OpenAI instances.
    azureInstance string
    Instance name for Azure OpenAI hosted models.
    bedrock GatewayPluginAiProxyConfigModelOptionsBedrock
    cohere GatewayPluginAiProxyConfigModelOptionsCohere
    embeddingsDimensions number
    If using embeddings models, set the number of dimensions to generate.
    gemini GatewayPluginAiProxyConfigModelOptionsGemini
    huggingface GatewayPluginAiProxyConfigModelOptionsHuggingface
    inputCost number
    Defines the cost per 1M tokens in your prompt.
    llama2Format string
    If using llama2 provider, select the upstream message format. must be one of ["ollama", "openai", "raw"]
    maxTokens number
    Defines the max_tokens, if using chat or completion models.
    mistralFormat string
    If using mistral provider, select the upstream message format. must be one of ["ollama", "openai"]
    outputCost number
    Defines the cost per 1M tokens in the output of the AI.
    temperature number
    Defines the matching temperature, if using chat or completion models.
    topK number
    Defines the top-k most likely tokens, if supported.
    topP number
    Defines the top-p probability mass, if supported.
    upstreamPath string
    Manually specify or override the AI operation path, used when e.g. using the 'preserve' route_type.
    upstreamUrl string
    Manually specify or override the full URL to the AI operation endpoints, when calling (self-)hosted models, or for running via a private endpoint.
    anthropic_version str
    Defines the schema/API version, if using Anthropic provider.
    azure_api_version str
    'api-version' for Azure OpenAI instances. Default: "2023-05-15"
    azure_deployment_id str
    Deployment ID for Azure OpenAI instances.
    azure_instance str
    Instance name for Azure OpenAI hosted models.
    bedrock GatewayPluginAiProxyConfigModelOptionsBedrock
    cohere GatewayPluginAiProxyConfigModelOptionsCohere
    embeddings_dimensions float
    If using embeddings models, set the number of dimensions to generate.
    gemini GatewayPluginAiProxyConfigModelOptionsGemini
    huggingface GatewayPluginAiProxyConfigModelOptionsHuggingface
    input_cost float
    Defines the cost per 1M tokens in your prompt.
    llama2_format str
    If using llama2 provider, select the upstream message format. must be one of ["ollama", "openai", "raw"]
    max_tokens float
    Defines the max_tokens, if using chat or completion models.
    mistral_format str
    If using mistral provider, select the upstream message format. must be one of ["ollama", "openai"]
    output_cost float
    Defines the cost per 1M tokens in the output of the AI.
    temperature float
    Defines the matching temperature, if using chat or completion models.
    top_k float
    Defines the top-k most likely tokens, if supported.
    top_p float
    Defines the top-p probability mass, if supported.
    upstream_path str
    Manually specify or override the AI operation path, used when e.g. using the 'preserve' route_type.
    upstream_url str
    Manually specify or override the full URL to the AI operation endpoints, when calling (self-)hosted models, or for running via a private endpoint.
    anthropicVersion String
    Defines the schema/API version, if using Anthropic provider.
    azureApiVersion String
    'api-version' for Azure OpenAI instances. Default: "2023-05-15"
    azureDeploymentId String
    Deployment ID for Azure OpenAI instances.
    azureInstance String
    Instance name for Azure OpenAI hosted models.
    bedrock Property Map
    cohere Property Map
    embeddingsDimensions Number
    If using embeddings models, set the number of dimensions to generate.
    gemini Property Map
    huggingface Property Map
    inputCost Number
    Defines the cost per 1M tokens in your prompt.
    llama2Format String
    If using llama2 provider, select the upstream message format. must be one of ["ollama", "openai", "raw"]
    maxTokens Number
    Defines the max_tokens, if using chat or completion models.
    mistralFormat String
    If using mistral provider, select the upstream message format. must be one of ["ollama", "openai"]
    outputCost Number
    Defines the cost per 1M tokens in the output of the AI.
    temperature Number
    Defines the matching temperature, if using chat or completion models.
    topK Number
    Defines the top-k most likely tokens, if supported.
    topP Number
    Defines the top-p probability mass, if supported.
    upstreamPath String
    Manually specify or override the AI operation path, used when e.g. using the 'preserve' route_type.
    upstreamUrl String
    Manually specify or override the full URL to the AI operation endpoints, when calling (self-)hosted models, or for running via a private endpoint.

    GatewayPluginAiProxyConfigModelOptionsBedrock, GatewayPluginAiProxyConfigModelOptionsBedrockArgs

    AwsAssumeRoleArn string
    If using AWS providers (Bedrock) you can assume a different role after authentication with the current IAM context is successful.
    AwsRegion string
    If using AWS providers (Bedrock) you can override the AWS_REGION environment variable by setting this option.
    AwsRoleSessionName string
    If using AWS providers (Bedrock), set the identifier of the assumed role session.
    AwsStsEndpointUrl string
    If using AWS providers (Bedrock), override the STS endpoint URL when assuming a different role.
    EmbeddingsNormalize bool
    If using AWS providers (Bedrock), set to true to normalize the embeddings. Default: false
    PerformanceConfigLatency string
    Force the client's performance configuration 'latency' for all requests. Leave empty to let the consumer select the performance configuration.
    AwsAssumeRoleArn string
    If using AWS providers (Bedrock) you can assume a different role after authentication with the current IAM context is successful.
    AwsRegion string
    If using AWS providers (Bedrock) you can override the AWS_REGION environment variable by setting this option.
    AwsRoleSessionName string
    If using AWS providers (Bedrock), set the identifier of the assumed role session.
    AwsStsEndpointUrl string
    If using AWS providers (Bedrock), override the STS endpoint URL when assuming a different role.
    EmbeddingsNormalize bool
    If using AWS providers (Bedrock), set to true to normalize the embeddings. Default: false
    PerformanceConfigLatency string
    Force the client's performance configuration 'latency' for all requests. Leave empty to let the consumer select the performance configuration.
    awsAssumeRoleArn String
    If using AWS providers (Bedrock) you can assume a different role after authentication with the current IAM context is successful.
    awsRegion String
    If using AWS providers (Bedrock) you can override the AWS_REGION environment variable by setting this option.
    awsRoleSessionName String
    If using AWS providers (Bedrock), set the identifier of the assumed role session.
    awsStsEndpointUrl String
    If using AWS providers (Bedrock), override the STS endpoint URL when assuming a different role.
    embeddingsNormalize Boolean
    If using AWS providers (Bedrock), set to true to normalize the embeddings. Default: false
    performanceConfigLatency String
    Force the client's performance configuration 'latency' for all requests. Leave empty to let the consumer select the performance configuration.
    awsAssumeRoleArn string
    If using AWS providers (Bedrock) you can assume a different role after authentication with the current IAM context is successful.
    awsRegion string
    If using AWS providers (Bedrock) you can override the AWS_REGION environment variable by setting this option.
    awsRoleSessionName string
    If using AWS providers (Bedrock), set the identifier of the assumed role session.
    awsStsEndpointUrl string
    If using AWS providers (Bedrock), override the STS endpoint URL when assuming a different role.
    embeddingsNormalize boolean
    If using AWS providers (Bedrock), set to true to normalize the embeddings. Default: false
    performanceConfigLatency string
    Force the client's performance configuration 'latency' for all requests. Leave empty to let the consumer select the performance configuration.
    aws_assume_role_arn str
    If using AWS providers (Bedrock) you can assume a different role after authentication with the current IAM context is successful.
    aws_region str
    If using AWS providers (Bedrock) you can override the AWS_REGION environment variable by setting this option.
    aws_role_session_name str
    If using AWS providers (Bedrock), set the identifier of the assumed role session.
    aws_sts_endpoint_url str
    If using AWS providers (Bedrock), override the STS endpoint URL when assuming a different role.
    embeddings_normalize bool
    If using AWS providers (Bedrock), set to true to normalize the embeddings. Default: false
    performance_config_latency str
    Force the client's performance configuration 'latency' for all requests. Leave empty to let the consumer select the performance configuration.
    awsAssumeRoleArn String
    If using AWS providers (Bedrock) you can assume a different role after authentication with the current IAM context is successful.
    awsRegion String
    If using AWS providers (Bedrock) you can override the AWS_REGION environment variable by setting this option.
    awsRoleSessionName String
    If using AWS providers (Bedrock), set the identifier of the assumed role session.
    awsStsEndpointUrl String
    If using AWS providers (Bedrock), override the STS endpoint URL when assuming a different role.
    embeddingsNormalize Boolean
    If using AWS providers (Bedrock), set to true to normalize the embeddings. Default: false
    performanceConfigLatency String
    Force the client's performance configuration 'latency' for all requests. Leave empty to let the consumer select the performance configuration.

    GatewayPluginAiProxyConfigModelOptionsCohere, GatewayPluginAiProxyConfigModelOptionsCohereArgs

    EmbeddingInputType string
    The purpose of the input text to calculate embedding vectors. Default: "classification"; must be one of ["classification", "clustering", "image", "searchdocument", "searchquery"]
    WaitForModel bool
    Wait for the model if it is not ready
    EmbeddingInputType string
    The purpose of the input text to calculate embedding vectors. Default: "classification"; must be one of ["classification", "clustering", "image", "searchdocument", "searchquery"]
    WaitForModel bool
    Wait for the model if it is not ready
    embeddingInputType String
    The purpose of the input text to calculate embedding vectors. Default: "classification"; must be one of ["classification", "clustering", "image", "searchdocument", "searchquery"]
    waitForModel Boolean
    Wait for the model if it is not ready
    embeddingInputType string
    The purpose of the input text to calculate embedding vectors. Default: "classification"; must be one of ["classification", "clustering", "image", "searchdocument", "searchquery"]
    waitForModel boolean
    Wait for the model if it is not ready
    embedding_input_type str
    The purpose of the input text to calculate embedding vectors. Default: "classification"; must be one of ["classification", "clustering", "image", "searchdocument", "searchquery"]
    wait_for_model bool
    Wait for the model if it is not ready
    embeddingInputType String
    The purpose of the input text to calculate embedding vectors. Default: "classification"; must be one of ["classification", "clustering", "image", "searchdocument", "searchquery"]
    waitForModel Boolean
    Wait for the model if it is not ready

    GatewayPluginAiProxyConfigModelOptionsGemini, GatewayPluginAiProxyConfigModelOptionsGeminiArgs

    ApiEndpoint string
    If running Gemini on Vertex, specify the regional API endpoint (hostname only).
    LocationId string
    If running Gemini on Vertex, specify the location ID.
    ProjectId string
    If running Gemini on Vertex, specify the project ID.
    ApiEndpoint string
    If running Gemini on Vertex, specify the regional API endpoint (hostname only).
    LocationId string
    If running Gemini on Vertex, specify the location ID.
    ProjectId string
    If running Gemini on Vertex, specify the project ID.
    apiEndpoint String
    If running Gemini on Vertex, specify the regional API endpoint (hostname only).
    locationId String
    If running Gemini on Vertex, specify the location ID.
    projectId String
    If running Gemini on Vertex, specify the project ID.
    apiEndpoint string
    If running Gemini on Vertex, specify the regional API endpoint (hostname only).
    locationId string
    If running Gemini on Vertex, specify the location ID.
    projectId string
    If running Gemini on Vertex, specify the project ID.
    api_endpoint str
    If running Gemini on Vertex, specify the regional API endpoint (hostname only).
    location_id str
    If running Gemini on Vertex, specify the location ID.
    project_id str
    If running Gemini on Vertex, specify the project ID.
    apiEndpoint String
    If running Gemini on Vertex, specify the regional API endpoint (hostname only).
    locationId String
    If running Gemini on Vertex, specify the location ID.
    projectId String
    If running Gemini on Vertex, specify the project ID.

    GatewayPluginAiProxyConfigModelOptionsHuggingface, GatewayPluginAiProxyConfigModelOptionsHuggingfaceArgs

    UseCache bool
    Use the cache layer on the inference API
    WaitForModel bool
    Wait for the model if it is not ready
    UseCache bool
    Use the cache layer on the inference API
    WaitForModel bool
    Wait for the model if it is not ready
    useCache Boolean
    Use the cache layer on the inference API
    waitForModel Boolean
    Wait for the model if it is not ready
    useCache boolean
    Use the cache layer on the inference API
    waitForModel boolean
    Wait for the model if it is not ready
    use_cache bool
    Use the cache layer on the inference API
    wait_for_model bool
    Wait for the model if it is not ready
    useCache Boolean
    Use the cache layer on the inference API
    waitForModel Boolean
    Wait for the model if it is not ready

    GatewayPluginAiProxyConsumer, GatewayPluginAiProxyConsumerArgs

    Id string
    Id string
    id String
    id string
    id str
    id String

    GatewayPluginAiProxyConsumerGroup, GatewayPluginAiProxyConsumerGroupArgs

    Id string
    Id string
    id String
    id string
    id str
    id String

    GatewayPluginAiProxyOrdering, GatewayPluginAiProxyOrderingArgs

    GatewayPluginAiProxyOrderingAfter, GatewayPluginAiProxyOrderingAfterArgs

    Accesses List<string>
    Accesses []string
    accesses List<String>
    accesses string[]
    accesses Sequence[str]
    accesses List<String>

    GatewayPluginAiProxyOrderingBefore, GatewayPluginAiProxyOrderingBeforeArgs

    Accesses List<string>
    Accesses []string
    accesses List<String>
    accesses string[]
    accesses Sequence[str]
    accesses List<String>

    GatewayPluginAiProxyPartial, GatewayPluginAiProxyPartialArgs

    Id string
    A string representing a UUID (universally unique identifier).
    Name string
    A unique string representing a UTF-8 encoded name.
    Path string
    Id string
    A string representing a UUID (universally unique identifier).
    Name string
    A unique string representing a UTF-8 encoded name.
    Path string
    id String
    A string representing a UUID (universally unique identifier).
    name String
    A unique string representing a UTF-8 encoded name.
    path String
    id string
    A string representing a UUID (universally unique identifier).
    name string
    A unique string representing a UTF-8 encoded name.
    path string
    id str
    A string representing a UUID (universally unique identifier).
    name str
    A unique string representing a UTF-8 encoded name.
    path str
    id String
    A string representing a UUID (universally unique identifier).
    name String
    A unique string representing a UTF-8 encoded name.
    path String

    GatewayPluginAiProxyRoute, GatewayPluginAiProxyRouteArgs

    Id string
    Id string
    id String
    id string
    id str
    id String

    GatewayPluginAiProxyService, GatewayPluginAiProxyServiceArgs

    Id string
    Id string
    id String
    id string
    id str
    id String

    Import

    $ pulumi import konnect:index/gatewayPluginAiProxy:GatewayPluginAiProxy my_konnect_gateway_plugin_ai_proxy '{"control_plane_id": "9524ec7d-36d9-465d-a8c5-83a3c9390458", "id": "3473c251-5b6c-4f45-b1ff-7ede735a366d"}'
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    konnect kong/terraform-provider-konnect
    License
    Notes
    This Pulumi package is based on the konnect Terraform Provider.
    konnect logo
    konnect 3.0.0 published on Friday, Aug 22, 2025 by kong