1. Packages
  2. Packages
  3. Elasticstack Provider
  4. API Docs
  5. ElasticsearchInferenceEndpoint
Viewing docs for elasticstack 0.15.0
published on Thursday, May 14, 2026 by elastic
Viewing docs for elasticstack 0.15.0
published on Thursday, May 14, 2026 by elastic

    Creates or updates an inference endpoint.See the inference endpoint API documentation for more details.

    Example Usage

    import * as pulumi from "@pulumi/pulumi";
    import * as elasticstack from "@pulumi/elasticstack";
    
    const example = new elasticstack.ElasticsearchInferenceEndpoint("example", {
        inferenceId: "text-embedding-3-large",
        taskType: "text_embedding",
        service: "azureaistudio",
        serviceSettings: JSON.stringify({
            api_key: "example_key",
            target: "https://example.com/openai/deployments/text-embedding-3-large/embeddings?api-version=2023-05-151",
            provider: "openai",
            endpoint_type: "token",
        }),
    });
    
    import pulumi
    import json
    import pulumi_elasticstack as elasticstack
    
    example = elasticstack.ElasticsearchInferenceEndpoint("example",
        inference_id="text-embedding-3-large",
        task_type="text_embedding",
        service="azureaistudio",
        service_settings=json.dumps({
            "api_key": "example_key",
            "target": "https://example.com/openai/deployments/text-embedding-3-large/embeddings?api-version=2023-05-151",
            "provider": "openai",
            "endpoint_type": "token",
        }))
    
    package main
    
    import (
    	"encoding/json"
    
    	"github.com/pulumi/pulumi-terraform-provider/sdks/go/elasticstack/elasticstack"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		tmpJSON0, err := json.Marshal(map[string]interface{}{
    			"api_key":       "example_key",
    			"target":        "https://example.com/openai/deployments/text-embedding-3-large/embeddings?api-version=2023-05-151",
    			"provider":      "openai",
    			"endpoint_type": "token",
    		})
    		if err != nil {
    			return err
    		}
    		json0 := string(tmpJSON0)
    		_, err = elasticstack.NewElasticsearchInferenceEndpoint(ctx, "example", &elasticstack.ElasticsearchInferenceEndpointArgs{
    			InferenceId:     pulumi.String("text-embedding-3-large"),
    			TaskType:        pulumi.String("text_embedding"),
    			Service:         pulumi.String("azureaistudio"),
    			ServiceSettings: pulumi.String(json0),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using System.Text.Json;
    using Pulumi;
    using Elasticstack = Pulumi.Elasticstack;
    
    return await Deployment.RunAsync(() => 
    {
        var example = new Elasticstack.ElasticsearchInferenceEndpoint("example", new()
        {
            InferenceId = "text-embedding-3-large",
            TaskType = "text_embedding",
            Service = "azureaistudio",
            ServiceSettings = JsonSerializer.Serialize(new Dictionary<string, object?>
            {
                ["api_key"] = "example_key",
                ["target"] = "https://example.com/openai/deployments/text-embedding-3-large/embeddings?api-version=2023-05-151",
                ["provider"] = "openai",
                ["endpoint_type"] = "token",
            }),
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.elasticstack.ElasticsearchInferenceEndpoint;
    import com.pulumi.elasticstack.ElasticsearchInferenceEndpointArgs;
    import static com.pulumi.codegen.internal.Serialization.*;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var example = new ElasticsearchInferenceEndpoint("example", ElasticsearchInferenceEndpointArgs.builder()
                .inferenceId("text-embedding-3-large")
                .taskType("text_embedding")
                .service("azureaistudio")
                .serviceSettings(serializeJson(
                    jsonObject(
                        jsonProperty("api_key", "example_key"),
                        jsonProperty("target", "https://example.com/openai/deployments/text-embedding-3-large/embeddings?api-version=2023-05-151"),
                        jsonProperty("provider", "openai"),
                        jsonProperty("endpoint_type", "token")
                    )))
                .build());
    
        }
    }
    
    resources:
      example:
        type: elasticstack:ElasticsearchInferenceEndpoint
        properties:
          inferenceId: text-embedding-3-large
          taskType: text_embedding
          service: azureaistudio
          serviceSettings:
            fn::toJSON:
              api_key: example_key
              target: https://example.com/openai/deployments/text-embedding-3-large/embeddings?api-version=2023-05-151
              provider: openai
              endpoint_type: token
    
    Example coming soon!
    

    Create ElasticsearchInferenceEndpoint Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new ElasticsearchInferenceEndpoint(name: string, args: ElasticsearchInferenceEndpointArgs, opts?: CustomResourceOptions);
    @overload
    def ElasticsearchInferenceEndpoint(resource_name: str,
                                       args: ElasticsearchInferenceEndpointArgs,
                                       opts: Optional[ResourceOptions] = None)
    
    @overload
    def ElasticsearchInferenceEndpoint(resource_name: str,
                                       opts: Optional[ResourceOptions] = None,
                                       inference_id: Optional[str] = None,
                                       service: Optional[str] = None,
                                       service_settings: Optional[str] = None,
                                       chunking_settings: Optional[str] = None,
                                       elasticsearch_connections: Optional[Sequence[ElasticsearchInferenceEndpointElasticsearchConnectionArgs]] = None,
                                       task_settings: Optional[str] = None,
                                       task_type: Optional[str] = None)
    func NewElasticsearchInferenceEndpoint(ctx *Context, name string, args ElasticsearchInferenceEndpointArgs, opts ...ResourceOption) (*ElasticsearchInferenceEndpoint, error)
    public ElasticsearchInferenceEndpoint(string name, ElasticsearchInferenceEndpointArgs args, CustomResourceOptions? opts = null)
    public ElasticsearchInferenceEndpoint(String name, ElasticsearchInferenceEndpointArgs args)
    public ElasticsearchInferenceEndpoint(String name, ElasticsearchInferenceEndpointArgs args, CustomResourceOptions options)
    
    type: elasticstack:ElasticsearchInferenceEndpoint
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    
    resource "elasticstack_elasticsearchinferenceendpoint" "name" {
        # resource properties
    }

    Parameters

    name string
    The unique name of the resource.
    args ElasticsearchInferenceEndpointArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args ElasticsearchInferenceEndpointArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args ElasticsearchInferenceEndpointArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args ElasticsearchInferenceEndpointArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args ElasticsearchInferenceEndpointArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var elasticsearchInferenceEndpointResource = new Elasticstack.ElasticsearchInferenceEndpoint("elasticsearchInferenceEndpointResource", new()
    {
        InferenceId = "string",
        Service = "string",
        ServiceSettings = "string",
        ChunkingSettings = "string",
        ElasticsearchConnections = new[]
        {
            new Elasticstack.Inputs.ElasticsearchInferenceEndpointElasticsearchConnectionArgs
            {
                ApiKey = "string",
                BearerToken = "string",
                CaData = "string",
                CaFile = "string",
                CertData = "string",
                CertFile = "string",
                Endpoints = new[]
                {
                    "string",
                },
                EsClientAuthentication = "string",
                Headers = 
                {
                    { "string", "string" },
                },
                Insecure = false,
                KeyData = "string",
                KeyFile = "string",
                Password = "string",
                Username = "string",
            },
        },
        TaskSettings = "string",
        TaskType = "string",
    });
    
    example, err := elasticstack.NewElasticsearchInferenceEndpoint(ctx, "elasticsearchInferenceEndpointResource", &elasticstack.ElasticsearchInferenceEndpointArgs{
    	InferenceId:      pulumi.String("string"),
    	Service:          pulumi.String("string"),
    	ServiceSettings:  pulumi.String("string"),
    	ChunkingSettings: pulumi.String("string"),
    	ElasticsearchConnections: elasticstack.ElasticsearchInferenceEndpointElasticsearchConnectionArray{
    		&elasticstack.ElasticsearchInferenceEndpointElasticsearchConnectionArgs{
    			ApiKey:      pulumi.String("string"),
    			BearerToken: pulumi.String("string"),
    			CaData:      pulumi.String("string"),
    			CaFile:      pulumi.String("string"),
    			CertData:    pulumi.String("string"),
    			CertFile:    pulumi.String("string"),
    			Endpoints: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    			EsClientAuthentication: pulumi.String("string"),
    			Headers: pulumi.StringMap{
    				"string": pulumi.String("string"),
    			},
    			Insecure: pulumi.Bool(false),
    			KeyData:  pulumi.String("string"),
    			KeyFile:  pulumi.String("string"),
    			Password: pulumi.String("string"),
    			Username: pulumi.String("string"),
    		},
    	},
    	TaskSettings: pulumi.String("string"),
    	TaskType:     pulumi.String("string"),
    })
    
    resource "elasticstack_elasticsearchinferenceendpoint" "elasticsearchInferenceEndpointResource" {
      inference_id      = "string"
      service           = "string"
      service_settings  = "string"
      chunking_settings = "string"
      elasticsearch_connections {
        api_key                  = "string"
        bearer_token             = "string"
        ca_data                  = "string"
        ca_file                  = "string"
        cert_data                = "string"
        cert_file                = "string"
        endpoints                = ["string"]
        es_client_authentication = "string"
        headers = {
          "string" = "string"
        }
        insecure = false
        key_data = "string"
        key_file = "string"
        password = "string"
        username = "string"
      }
      task_settings = "string"
      task_type     = "string"
    }
    
    var elasticsearchInferenceEndpointResource = new ElasticsearchInferenceEndpoint("elasticsearchInferenceEndpointResource", ElasticsearchInferenceEndpointArgs.builder()
        .inferenceId("string")
        .service("string")
        .serviceSettings("string")
        .chunkingSettings("string")
        .elasticsearchConnections(ElasticsearchInferenceEndpointElasticsearchConnectionArgs.builder()
            .apiKey("string")
            .bearerToken("string")
            .caData("string")
            .caFile("string")
            .certData("string")
            .certFile("string")
            .endpoints("string")
            .esClientAuthentication("string")
            .headers(Map.of("string", "string"))
            .insecure(false)
            .keyData("string")
            .keyFile("string")
            .password("string")
            .username("string")
            .build())
        .taskSettings("string")
        .taskType("string")
        .build());
    
    elasticsearch_inference_endpoint_resource = elasticstack.ElasticsearchInferenceEndpoint("elasticsearchInferenceEndpointResource",
        inference_id="string",
        service="string",
        service_settings="string",
        chunking_settings="string",
        elasticsearch_connections=[{
            "api_key": "string",
            "bearer_token": "string",
            "ca_data": "string",
            "ca_file": "string",
            "cert_data": "string",
            "cert_file": "string",
            "endpoints": ["string"],
            "es_client_authentication": "string",
            "headers": {
                "string": "string",
            },
            "insecure": False,
            "key_data": "string",
            "key_file": "string",
            "password": "string",
            "username": "string",
        }],
        task_settings="string",
        task_type="string")
    
    const elasticsearchInferenceEndpointResource = new elasticstack.ElasticsearchInferenceEndpoint("elasticsearchInferenceEndpointResource", {
        inferenceId: "string",
        service: "string",
        serviceSettings: "string",
        chunkingSettings: "string",
        elasticsearchConnections: [{
            apiKey: "string",
            bearerToken: "string",
            caData: "string",
            caFile: "string",
            certData: "string",
            certFile: "string",
            endpoints: ["string"],
            esClientAuthentication: "string",
            headers: {
                string: "string",
            },
            insecure: false,
            keyData: "string",
            keyFile: "string",
            password: "string",
            username: "string",
        }],
        taskSettings: "string",
        taskType: "string",
    });
    
    type: elasticstack:ElasticsearchInferenceEndpoint
    properties:
        chunkingSettings: string
        elasticsearchConnections:
            - apiKey: string
              bearerToken: string
              caData: string
              caFile: string
              certData: string
              certFile: string
              endpoints:
                - string
              esClientAuthentication: string
              headers:
                string: string
              insecure: false
              keyData: string
              keyFile: string
              password: string
              username: string
        inferenceId: string
        service: string
        serviceSettings: string
        taskSettings: string
        taskType: string
    

    ElasticsearchInferenceEndpoint Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

    The ElasticsearchInferenceEndpoint resource accepts the following input properties:

    InferenceId string
    The unique identifier of the inference endpoint.
    Service string
    The service type for the inference endpoint (e.g. openai, cohere, elasticsearch).
    ServiceSettings string
    Settings specific to the service provider, as a JSON object. May include credentials and model identifiers.
    ChunkingSettings string
    Configuration for chunking input text, as a JSON object. Applicable only for embedding task types.
    ElasticsearchConnections List<ElasticsearchInferenceEndpointElasticsearchConnection>
    Elasticsearch connection configuration block.
    TaskSettings string
    Task-specific settings, as a JSON object. Optional and service-dependent. Only keys explicitly set here are tracked; server-applied defaults returned by the API are ignored to avoid perpetual drift.
    TaskType string
    must be one of [sparse_embedding, text_embedding, rerank, completion, chat_completion, embedding]
    InferenceId string
    The unique identifier of the inference endpoint.
    Service string
    The service type for the inference endpoint (e.g. openai, cohere, elasticsearch).
    ServiceSettings string
    Settings specific to the service provider, as a JSON object. May include credentials and model identifiers.
    ChunkingSettings string
    Configuration for chunking input text, as a JSON object. Applicable only for embedding task types.
    ElasticsearchConnections []ElasticsearchInferenceEndpointElasticsearchConnectionArgs
    Elasticsearch connection configuration block.
    TaskSettings string
    Task-specific settings, as a JSON object. Optional and service-dependent. Only keys explicitly set here are tracked; server-applied defaults returned by the API are ignored to avoid perpetual drift.
    TaskType string
    must be one of [sparse_embedding, text_embedding, rerank, completion, chat_completion, embedding]
    inference_id string
    The unique identifier of the inference endpoint.
    service string
    The service type for the inference endpoint (e.g. openai, cohere, elasticsearch).
    service_settings string
    Settings specific to the service provider, as a JSON object. May include credentials and model identifiers.
    chunking_settings string
    Configuration for chunking input text, as a JSON object. Applicable only for embedding task types.
    elasticsearch_connections list(object)
    Elasticsearch connection configuration block.
    task_settings string
    Task-specific settings, as a JSON object. Optional and service-dependent. Only keys explicitly set here are tracked; server-applied defaults returned by the API are ignored to avoid perpetual drift.
    task_type string
    must be one of [sparse_embedding, text_embedding, rerank, completion, chat_completion, embedding]
    inferenceId String
    The unique identifier of the inference endpoint.
    service String
    The service type for the inference endpoint (e.g. openai, cohere, elasticsearch).
    serviceSettings String
    Settings specific to the service provider, as a JSON object. May include credentials and model identifiers.
    chunkingSettings String
    Configuration for chunking input text, as a JSON object. Applicable only for embedding task types.
    elasticsearchConnections List<ElasticsearchInferenceEndpointElasticsearchConnection>
    Elasticsearch connection configuration block.
    taskSettings String
    Task-specific settings, as a JSON object. Optional and service-dependent. Only keys explicitly set here are tracked; server-applied defaults returned by the API are ignored to avoid perpetual drift.
    taskType String
    must be one of [sparse_embedding, text_embedding, rerank, completion, chat_completion, embedding]
    inferenceId string
    The unique identifier of the inference endpoint.
    service string
    The service type for the inference endpoint (e.g. openai, cohere, elasticsearch).
    serviceSettings string
    Settings specific to the service provider, as a JSON object. May include credentials and model identifiers.
    chunkingSettings string
    Configuration for chunking input text, as a JSON object. Applicable only for embedding task types.
    elasticsearchConnections ElasticsearchInferenceEndpointElasticsearchConnection[]
    Elasticsearch connection configuration block.
    taskSettings string
    Task-specific settings, as a JSON object. Optional and service-dependent. Only keys explicitly set here are tracked; server-applied defaults returned by the API are ignored to avoid perpetual drift.
    taskType string
    must be one of [sparse_embedding, text_embedding, rerank, completion, chat_completion, embedding]
    inference_id str
    The unique identifier of the inference endpoint.
    service str
    The service type for the inference endpoint (e.g. openai, cohere, elasticsearch).
    service_settings str
    Settings specific to the service provider, as a JSON object. May include credentials and model identifiers.
    chunking_settings str
    Configuration for chunking input text, as a JSON object. Applicable only for embedding task types.
    elasticsearch_connections Sequence[ElasticsearchInferenceEndpointElasticsearchConnectionArgs]
    Elasticsearch connection configuration block.
    task_settings str
    Task-specific settings, as a JSON object. Optional and service-dependent. Only keys explicitly set here are tracked; server-applied defaults returned by the API are ignored to avoid perpetual drift.
    task_type str
    must be one of [sparse_embedding, text_embedding, rerank, completion, chat_completion, embedding]
    inferenceId String
    The unique identifier of the inference endpoint.
    service String
    The service type for the inference endpoint (e.g. openai, cohere, elasticsearch).
    serviceSettings String
    Settings specific to the service provider, as a JSON object. May include credentials and model identifiers.
    chunkingSettings String
    Configuration for chunking input text, as a JSON object. Applicable only for embedding task types.
    elasticsearchConnections List<Property Map>
    Elasticsearch connection configuration block.
    taskSettings String
    Task-specific settings, as a JSON object. Optional and service-dependent. Only keys explicitly set here are tracked; server-applied defaults returned by the API are ignored to avoid perpetual drift.
    taskType String
    must be one of [sparse_embedding, text_embedding, rerank, completion, chat_completion, embedding]

    Outputs

    All input properties are implicitly available as output properties. Additionally, the ElasticsearchInferenceEndpoint resource produces the following output properties:

    Id string
    The provider-assigned unique ID for this managed resource.
    Id string
    The provider-assigned unique ID for this managed resource.
    id string
    The provider-assigned unique ID for this managed resource.
    id String
    The provider-assigned unique ID for this managed resource.
    id string
    The provider-assigned unique ID for this managed resource.
    id str
    The provider-assigned unique ID for this managed resource.
    id String
    The provider-assigned unique ID for this managed resource.

    Look up Existing ElasticsearchInferenceEndpoint Resource

    Get an existing ElasticsearchInferenceEndpoint resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: ElasticsearchInferenceEndpointState, opts?: CustomResourceOptions): ElasticsearchInferenceEndpoint
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            chunking_settings: Optional[str] = None,
            elasticsearch_connections: Optional[Sequence[ElasticsearchInferenceEndpointElasticsearchConnectionArgs]] = None,
            inference_id: Optional[str] = None,
            service: Optional[str] = None,
            service_settings: Optional[str] = None,
            task_settings: Optional[str] = None,
            task_type: Optional[str] = None) -> ElasticsearchInferenceEndpoint
    func GetElasticsearchInferenceEndpoint(ctx *Context, name string, id IDInput, state *ElasticsearchInferenceEndpointState, opts ...ResourceOption) (*ElasticsearchInferenceEndpoint, error)
    public static ElasticsearchInferenceEndpoint Get(string name, Input<string> id, ElasticsearchInferenceEndpointState? state, CustomResourceOptions? opts = null)
    public static ElasticsearchInferenceEndpoint get(String name, Output<String> id, ElasticsearchInferenceEndpointState state, CustomResourceOptions options)
    resources:  _:    type: elasticstack:ElasticsearchInferenceEndpoint    get:      id: ${id}
    import {
      to = elasticstack_elasticsearchinferenceendpoint.example
      id = "${id}"
    }
    
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    ChunkingSettings string
    Configuration for chunking input text, as a JSON object. Applicable only for embedding task types.
    ElasticsearchConnections List<ElasticsearchInferenceEndpointElasticsearchConnection>
    Elasticsearch connection configuration block.
    InferenceId string
    The unique identifier of the inference endpoint.
    Service string
    The service type for the inference endpoint (e.g. openai, cohere, elasticsearch).
    ServiceSettings string
    Settings specific to the service provider, as a JSON object. May include credentials and model identifiers.
    TaskSettings string
    Task-specific settings, as a JSON object. Optional and service-dependent. Only keys explicitly set here are tracked; server-applied defaults returned by the API are ignored to avoid perpetual drift.
    TaskType string
    must be one of [sparse_embedding, text_embedding, rerank, completion, chat_completion, embedding]
    ChunkingSettings string
    Configuration for chunking input text, as a JSON object. Applicable only for embedding task types.
    ElasticsearchConnections []ElasticsearchInferenceEndpointElasticsearchConnectionArgs
    Elasticsearch connection configuration block.
    InferenceId string
    The unique identifier of the inference endpoint.
    Service string
    The service type for the inference endpoint (e.g. openai, cohere, elasticsearch).
    ServiceSettings string
    Settings specific to the service provider, as a JSON object. May include credentials and model identifiers.
    TaskSettings string
    Task-specific settings, as a JSON object. Optional and service-dependent. Only keys explicitly set here are tracked; server-applied defaults returned by the API are ignored to avoid perpetual drift.
    TaskType string
    must be one of [sparse_embedding, text_embedding, rerank, completion, chat_completion, embedding]
    chunking_settings string
    Configuration for chunking input text, as a JSON object. Applicable only for embedding task types.
    elasticsearch_connections list(object)
    Elasticsearch connection configuration block.
    inference_id string
    The unique identifier of the inference endpoint.
    service string
    The service type for the inference endpoint (e.g. openai, cohere, elasticsearch).
    service_settings string
    Settings specific to the service provider, as a JSON object. May include credentials and model identifiers.
    task_settings string
    Task-specific settings, as a JSON object. Optional and service-dependent. Only keys explicitly set here are tracked; server-applied defaults returned by the API are ignored to avoid perpetual drift.
    task_type string
    must be one of [sparse_embedding, text_embedding, rerank, completion, chat_completion, embedding]
    chunkingSettings String
    Configuration for chunking input text, as a JSON object. Applicable only for embedding task types.
    elasticsearchConnections List<ElasticsearchInferenceEndpointElasticsearchConnection>
    Elasticsearch connection configuration block.
    inferenceId String
    The unique identifier of the inference endpoint.
    service String
    The service type for the inference endpoint (e.g. openai, cohere, elasticsearch).
    serviceSettings String
    Settings specific to the service provider, as a JSON object. May include credentials and model identifiers.
    taskSettings String
    Task-specific settings, as a JSON object. Optional and service-dependent. Only keys explicitly set here are tracked; server-applied defaults returned by the API are ignored to avoid perpetual drift.
    taskType String
    must be one of [sparse_embedding, text_embedding, rerank, completion, chat_completion, embedding]
    chunkingSettings string
    Configuration for chunking input text, as a JSON object. Applicable only for embedding task types.
    elasticsearchConnections ElasticsearchInferenceEndpointElasticsearchConnection[]
    Elasticsearch connection configuration block.
    inferenceId string
    The unique identifier of the inference endpoint.
    service string
    The service type for the inference endpoint (e.g. openai, cohere, elasticsearch).
    serviceSettings string
    Settings specific to the service provider, as a JSON object. May include credentials and model identifiers.
    taskSettings string
    Task-specific settings, as a JSON object. Optional and service-dependent. Only keys explicitly set here are tracked; server-applied defaults returned by the API are ignored to avoid perpetual drift.
    taskType string
    must be one of [sparse_embedding, text_embedding, rerank, completion, chat_completion, embedding]
    chunking_settings str
    Configuration for chunking input text, as a JSON object. Applicable only for embedding task types.
    elasticsearch_connections Sequence[ElasticsearchInferenceEndpointElasticsearchConnectionArgs]
    Elasticsearch connection configuration block.
    inference_id str
    The unique identifier of the inference endpoint.
    service str
    The service type for the inference endpoint (e.g. openai, cohere, elasticsearch).
    service_settings str
    Settings specific to the service provider, as a JSON object. May include credentials and model identifiers.
    task_settings str
    Task-specific settings, as a JSON object. Optional and service-dependent. Only keys explicitly set here are tracked; server-applied defaults returned by the API are ignored to avoid perpetual drift.
    task_type str
    must be one of [sparse_embedding, text_embedding, rerank, completion, chat_completion, embedding]
    chunkingSettings String
    Configuration for chunking input text, as a JSON object. Applicable only for embedding task types.
    elasticsearchConnections List<Property Map>
    Elasticsearch connection configuration block.
    inferenceId String
    The unique identifier of the inference endpoint.
    service String
    The service type for the inference endpoint (e.g. openai, cohere, elasticsearch).
    serviceSettings String
    Settings specific to the service provider, as a JSON object. May include credentials and model identifiers.
    taskSettings String
    Task-specific settings, as a JSON object. Optional and service-dependent. Only keys explicitly set here are tracked; server-applied defaults returned by the API are ignored to avoid perpetual drift.
    taskType String
    must be one of [sparse_embedding, text_embedding, rerank, completion, chat_completion, embedding]

    Supporting Types

    ElasticsearchInferenceEndpointElasticsearchConnection, ElasticsearchInferenceEndpointElasticsearchConnectionArgs

    ApiKey string
    API Key to use for authentication to Elasticsearch
    BearerToken string
    Bearer Token to use for authentication to Elasticsearch
    CaData string
    PEM-encoded custom Certificate Authority certificate
    CaFile string
    Path to a custom Certificate Authority certificate
    CertData string
    PEM encoded certificate for client auth
    CertFile string
    Path to a file containing the PEM encoded certificate for client auth
    Endpoints List<string>
    EsClientAuthentication string
    ES Client Authentication field to be used with the JWT token
    Headers Dictionary<string, string>
    A list of headers to be sent with each request to Elasticsearch.
    Insecure bool
    Disable TLS certificate validation
    KeyData string
    PEM encoded private key for client auth
    KeyFile string
    Path to a file containing the PEM encoded private key for client auth
    Password string
    Password to use for API authentication to Elasticsearch.
    Username string
    Username to use for API authentication to Elasticsearch.
    ApiKey string
    API Key to use for authentication to Elasticsearch
    BearerToken string
    Bearer Token to use for authentication to Elasticsearch
    CaData string
    PEM-encoded custom Certificate Authority certificate
    CaFile string
    Path to a custom Certificate Authority certificate
    CertData string
    PEM encoded certificate for client auth
    CertFile string
    Path to a file containing the PEM encoded certificate for client auth
    Endpoints []string
    EsClientAuthentication string
    ES Client Authentication field to be used with the JWT token
    Headers map[string]string
    A list of headers to be sent with each request to Elasticsearch.
    Insecure bool
    Disable TLS certificate validation
    KeyData string
    PEM encoded private key for client auth
    KeyFile string
    Path to a file containing the PEM encoded private key for client auth
    Password string
    Password to use for API authentication to Elasticsearch.
    Username string
    Username to use for API authentication to Elasticsearch.
    api_key string
    API Key to use for authentication to Elasticsearch
    bearer_token string
    Bearer Token to use for authentication to Elasticsearch
    ca_data string
    PEM-encoded custom Certificate Authority certificate
    ca_file string
    Path to a custom Certificate Authority certificate
    cert_data string
    PEM encoded certificate for client auth
    cert_file string
    Path to a file containing the PEM encoded certificate for client auth
    endpoints list(string)
    es_client_authentication string
    ES Client Authentication field to be used with the JWT token
    headers map(string)
    A list of headers to be sent with each request to Elasticsearch.
    insecure bool
    Disable TLS certificate validation
    key_data string
    PEM encoded private key for client auth
    key_file string
    Path to a file containing the PEM encoded private key for client auth
    password string
    Password to use for API authentication to Elasticsearch.
    username string
    Username to use for API authentication to Elasticsearch.
    apiKey String
    API Key to use for authentication to Elasticsearch
    bearerToken String
    Bearer Token to use for authentication to Elasticsearch
    caData String
    PEM-encoded custom Certificate Authority certificate
    caFile String
    Path to a custom Certificate Authority certificate
    certData String
    PEM encoded certificate for client auth
    certFile String
    Path to a file containing the PEM encoded certificate for client auth
    endpoints List<String>
    esClientAuthentication String
    ES Client Authentication field to be used with the JWT token
    headers Map<String,String>
    A list of headers to be sent with each request to Elasticsearch.
    insecure Boolean
    Disable TLS certificate validation
    keyData String
    PEM encoded private key for client auth
    keyFile String
    Path to a file containing the PEM encoded private key for client auth
    password String
    Password to use for API authentication to Elasticsearch.
    username String
    Username to use for API authentication to Elasticsearch.
    apiKey string
    API Key to use for authentication to Elasticsearch
    bearerToken string
    Bearer Token to use for authentication to Elasticsearch
    caData string
    PEM-encoded custom Certificate Authority certificate
    caFile string
    Path to a custom Certificate Authority certificate
    certData string
    PEM encoded certificate for client auth
    certFile string
    Path to a file containing the PEM encoded certificate for client auth
    endpoints string[]
    esClientAuthentication string
    ES Client Authentication field to be used with the JWT token
    headers {[key: string]: string}
    A list of headers to be sent with each request to Elasticsearch.
    insecure boolean
    Disable TLS certificate validation
    keyData string
    PEM encoded private key for client auth
    keyFile string
    Path to a file containing the PEM encoded private key for client auth
    password string
    Password to use for API authentication to Elasticsearch.
    username string
    Username to use for API authentication to Elasticsearch.
    api_key str
    API Key to use for authentication to Elasticsearch
    bearer_token str
    Bearer Token to use for authentication to Elasticsearch
    ca_data str
    PEM-encoded custom Certificate Authority certificate
    ca_file str
    Path to a custom Certificate Authority certificate
    cert_data str
    PEM encoded certificate for client auth
    cert_file str
    Path to a file containing the PEM encoded certificate for client auth
    endpoints Sequence[str]
    es_client_authentication str
    ES Client Authentication field to be used with the JWT token
    headers Mapping[str, str]
    A list of headers to be sent with each request to Elasticsearch.
    insecure bool
    Disable TLS certificate validation
    key_data str
    PEM encoded private key for client auth
    key_file str
    Path to a file containing the PEM encoded private key for client auth
    password str
    Password to use for API authentication to Elasticsearch.
    username str
    Username to use for API authentication to Elasticsearch.
    apiKey String
    API Key to use for authentication to Elasticsearch
    bearerToken String
    Bearer Token to use for authentication to Elasticsearch
    caData String
    PEM-encoded custom Certificate Authority certificate
    caFile String
    Path to a custom Certificate Authority certificate
    certData String
    PEM encoded certificate for client auth
    certFile String
    Path to a file containing the PEM encoded certificate for client auth
    endpoints List<String>
    esClientAuthentication String
    ES Client Authentication field to be used with the JWT token
    headers Map<String>
    A list of headers to be sent with each request to Elasticsearch.
    insecure Boolean
    Disable TLS certificate validation
    keyData String
    PEM encoded private key for client auth
    keyFile String
    Path to a file containing the PEM encoded private key for client auth
    password String
    Password to use for API authentication to Elasticsearch.
    username String
    Username to use for API authentication to Elasticsearch.

    Package Details

    Repository
    elasticstack elastic/terraform-provider-elasticstack
    License
    Notes
    This Pulumi package is based on the elasticstack Terraform Provider.
    Viewing docs for elasticstack 0.15.0
    published on Thursday, May 14, 2026 by elastic
      Try Pulumi Cloud free. Your team will thank you.