hsdp.AiInferenceComputeTarget
Explore with Pulumi AI
Manages HSDP AI Inference Compute Targets
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as hsdp from "@pulumi/hsdp";
const inferenceConfig = hsdp.getConfig({
service: "inference",
});
const inferenceAiInferenceServiceInstance = inferenceConfig.then(inferenceConfig => hsdp.getAiInferenceServiceInstance({
baseUrl: inferenceConfig.url,
organizationId: _var.inference_tenant_org_id,
}));
const target = new hsdp.AiInferenceComputeTarget("target", {
endpoint: inferenceAiInferenceServiceInstance.then(inferenceAiInferenceServiceInstance => inferenceAiInferenceServiceInstance.endpoint),
description: "Tesla v100 GPU based environment with 128MB GPU memory",
instanceType: "ml.p3.16xlarge",
storage: 20,
});
import pulumi
import pulumi_hsdp as hsdp
inference_config = hsdp.get_config(service="inference")
inference_ai_inference_service_instance = hsdp.get_ai_inference_service_instance(base_url=inference_config.url,
organization_id=var["inference_tenant_org_id"])
target = hsdp.AiInferenceComputeTarget("target",
endpoint=inference_ai_inference_service_instance.endpoint,
description="Tesla v100 GPU based environment with 128MB GPU memory",
instance_type="ml.p3.16xlarge",
storage=20)
package main
import (
"github.com/pulumi/pulumi-terraform-provider/sdks/go/hsdp/hsdp"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
inferenceConfig, err := hsdp.GetConfig(ctx, &hsdp.GetConfigArgs{
Service: "inference",
}, nil)
if err != nil {
return err
}
inferenceAiInferenceServiceInstance, err := hsdp.GetAiInferenceServiceInstance(ctx, &hsdp.GetAiInferenceServiceInstanceArgs{
BaseUrl: inferenceConfig.Url,
OrganizationId: _var.Inference_tenant_org_id,
}, nil)
if err != nil {
return err
}
_, err = hsdp.NewAiInferenceComputeTarget(ctx, "target", &hsdp.AiInferenceComputeTargetArgs{
Endpoint: pulumi.String(inferenceAiInferenceServiceInstance.Endpoint),
Description: pulumi.String("Tesla v100 GPU based environment with 128MB GPU memory"),
InstanceType: pulumi.String("ml.p3.16xlarge"),
Storage: pulumi.Float64(20),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Hsdp = Pulumi.Hsdp;
return await Deployment.RunAsync(() =>
{
var inferenceConfig = Hsdp.GetConfig.Invoke(new()
{
Service = "inference",
});
var inferenceAiInferenceServiceInstance = Hsdp.GetAiInferenceServiceInstance.Invoke(new()
{
BaseUrl = inferenceConfig.Apply(getConfigResult => getConfigResult.Url),
OrganizationId = @var.Inference_tenant_org_id,
});
var target = new Hsdp.AiInferenceComputeTarget("target", new()
{
Endpoint = inferenceAiInferenceServiceInstance.Apply(getAiInferenceServiceInstanceResult => getAiInferenceServiceInstanceResult.Endpoint),
Description = "Tesla v100 GPU based environment with 128MB GPU memory",
InstanceType = "ml.p3.16xlarge",
Storage = 20,
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.hsdp.HsdpFunctions;
import com.pulumi.hsdp.inputs.GetConfigArgs;
import com.pulumi.hsdp.inputs.GetAiInferenceServiceInstanceArgs;
import com.pulumi.hsdp.AiInferenceComputeTarget;
import com.pulumi.hsdp.AiInferenceComputeTargetArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var inferenceConfig = HsdpFunctions.getConfig(GetConfigArgs.builder()
.service("inference")
.build());
final var inferenceAiInferenceServiceInstance = HsdpFunctions.getAiInferenceServiceInstance(GetAiInferenceServiceInstanceArgs.builder()
.baseUrl(inferenceConfig.applyValue(getConfigResult -> getConfigResult.url()))
.organizationId(var_.inference_tenant_org_id())
.build());
var target = new AiInferenceComputeTarget("target", AiInferenceComputeTargetArgs.builder()
.endpoint(inferenceAiInferenceServiceInstance.applyValue(getAiInferenceServiceInstanceResult -> getAiInferenceServiceInstanceResult.endpoint()))
.description("Tesla v100 GPU based environment with 128MB GPU memory")
.instanceType("ml.p3.16xlarge")
.storage(20)
.build());
}
}
resources:
target:
type: hsdp:AiInferenceComputeTarget
properties:
endpoint: ${inferenceAiInferenceServiceInstance.endpoint}
description: Tesla v100 GPU based environment with 128MB GPU memory
instanceType: ml.p3.16xlarge
storage: 20
variables:
inferenceConfig:
fn::invoke:
function: hsdp:getConfig
arguments:
service: inference
inferenceAiInferenceServiceInstance:
fn::invoke:
function: hsdp:getAiInferenceServiceInstance
arguments:
baseUrl: ${inferenceConfig.url}
organizationId: ${var.inference_tenant_org_id}
Attributes reference
In addition to all arguments above, the following attributes are exported:
id
- The GUID of the Compute Targetreference
- The reference of this Compute Targetis_factory
- Weather this Compute Environment is factory onecreated
- The date this Compute Environment was createdcreated_by
- Who created the environment
Create AiInferenceComputeTarget Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new AiInferenceComputeTarget(name: string, args: AiInferenceComputeTargetArgs, opts?: CustomResourceOptions);
@overload
def AiInferenceComputeTarget(resource_name: str,
args: AiInferenceComputeTargetArgs,
opts: Optional[ResourceOptions] = None)
@overload
def AiInferenceComputeTarget(resource_name: str,
opts: Optional[ResourceOptions] = None,
endpoint: Optional[str] = None,
instance_type: Optional[str] = None,
storage: Optional[float] = None,
ai_inference_compute_target_id: Optional[str] = None,
description: Optional[str] = None,
name: Optional[str] = None)
func NewAiInferenceComputeTarget(ctx *Context, name string, args AiInferenceComputeTargetArgs, opts ...ResourceOption) (*AiInferenceComputeTarget, error)
public AiInferenceComputeTarget(string name, AiInferenceComputeTargetArgs args, CustomResourceOptions? opts = null)
public AiInferenceComputeTarget(String name, AiInferenceComputeTargetArgs args)
public AiInferenceComputeTarget(String name, AiInferenceComputeTargetArgs args, CustomResourceOptions options)
type: hsdp:AiInferenceComputeTarget
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args AiInferenceComputeTargetArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args AiInferenceComputeTargetArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args AiInferenceComputeTargetArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args AiInferenceComputeTargetArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args AiInferenceComputeTargetArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var aiInferenceComputeTargetResource = new Hsdp.AiInferenceComputeTarget("aiInferenceComputeTargetResource", new()
{
Endpoint = "string",
InstanceType = "string",
Storage = 0,
AiInferenceComputeTargetId = "string",
Description = "string",
Name = "string",
});
example, err := hsdp.NewAiInferenceComputeTarget(ctx, "aiInferenceComputeTargetResource", &hsdp.AiInferenceComputeTargetArgs{
Endpoint: pulumi.String("string"),
InstanceType: pulumi.String("string"),
Storage: pulumi.Float64(0),
AiInferenceComputeTargetId: pulumi.String("string"),
Description: pulumi.String("string"),
Name: pulumi.String("string"),
})
var aiInferenceComputeTargetResource = new AiInferenceComputeTarget("aiInferenceComputeTargetResource", AiInferenceComputeTargetArgs.builder()
.endpoint("string")
.instanceType("string")
.storage(0)
.aiInferenceComputeTargetId("string")
.description("string")
.name("string")
.build());
ai_inference_compute_target_resource = hsdp.AiInferenceComputeTarget("aiInferenceComputeTargetResource",
endpoint="string",
instance_type="string",
storage=0,
ai_inference_compute_target_id="string",
description="string",
name="string")
const aiInferenceComputeTargetResource = new hsdp.AiInferenceComputeTarget("aiInferenceComputeTargetResource", {
endpoint: "string",
instanceType: "string",
storage: 0,
aiInferenceComputeTargetId: "string",
description: "string",
name: "string",
});
type: hsdp:AiInferenceComputeTarget
properties:
aiInferenceComputeTargetId: string
description: string
endpoint: string
instanceType: string
name: string
storage: 0
AiInferenceComputeTarget Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The AiInferenceComputeTarget resource accepts the following input properties:
- Endpoint string
- The AI Inference instance endpoint
- Instance
Type string - The instance type to use. Available instance types for Inference, see Sagemaker pricing
- Storage double
- Additional storage to allocate (in GB). Default:
1
- Ai
Inference stringCompute Target Id - Description string
- Description of the Compute Target
- Name string
- The name of Compute Environment
- Endpoint string
- The AI Inference instance endpoint
- Instance
Type string - The instance type to use. Available instance types for Inference, see Sagemaker pricing
- Storage float64
- Additional storage to allocate (in GB). Default:
1
- Ai
Inference stringCompute Target Id - Description string
- Description of the Compute Target
- Name string
- The name of Compute Environment
- endpoint String
- The AI Inference instance endpoint
- instance
Type String - The instance type to use. Available instance types for Inference, see Sagemaker pricing
- storage Double
- Additional storage to allocate (in GB). Default:
1
- ai
Inference StringCompute Target Id - description String
- Description of the Compute Target
- name String
- The name of Compute Environment
- endpoint string
- The AI Inference instance endpoint
- instance
Type string - The instance type to use. Available instance types for Inference, see Sagemaker pricing
- storage number
- Additional storage to allocate (in GB). Default:
1
- ai
Inference stringCompute Target Id - description string
- Description of the Compute Target
- name string
- The name of Compute Environment
- endpoint str
- The AI Inference instance endpoint
- instance_
type str - The instance type to use. Available instance types for Inference, see Sagemaker pricing
- storage float
- Additional storage to allocate (in GB). Default:
1
- ai_
inference_ strcompute_ target_ id - description str
- Description of the Compute Target
- name str
- The name of Compute Environment
- endpoint String
- The AI Inference instance endpoint
- instance
Type String - The instance type to use. Available instance types for Inference, see Sagemaker pricing
- storage Number
- Additional storage to allocate (in GB). Default:
1
- ai
Inference StringCompute Target Id - description String
- Description of the Compute Target
- name String
- The name of Compute Environment
Outputs
All input properties are implicitly available as output properties. Additionally, the AiInferenceComputeTarget resource produces the following output properties:
- created str
- created_
by str - id str
- The provider-assigned unique ID for this managed resource.
- is_
factory bool - reference str
Look up Existing AiInferenceComputeTarget Resource
Get an existing AiInferenceComputeTarget resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: AiInferenceComputeTargetState, opts?: CustomResourceOptions): AiInferenceComputeTarget
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
ai_inference_compute_target_id: Optional[str] = None,
created: Optional[str] = None,
created_by: Optional[str] = None,
description: Optional[str] = None,
endpoint: Optional[str] = None,
instance_type: Optional[str] = None,
is_factory: Optional[bool] = None,
name: Optional[str] = None,
reference: Optional[str] = None,
storage: Optional[float] = None) -> AiInferenceComputeTarget
func GetAiInferenceComputeTarget(ctx *Context, name string, id IDInput, state *AiInferenceComputeTargetState, opts ...ResourceOption) (*AiInferenceComputeTarget, error)
public static AiInferenceComputeTarget Get(string name, Input<string> id, AiInferenceComputeTargetState? state, CustomResourceOptions? opts = null)
public static AiInferenceComputeTarget get(String name, Output<String> id, AiInferenceComputeTargetState state, CustomResourceOptions options)
resources: _: type: hsdp:AiInferenceComputeTarget get: id: ${id}
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Ai
Inference stringCompute Target Id - Created string
- Created
By string - Description string
- Description of the Compute Target
- Endpoint string
- The AI Inference instance endpoint
- Instance
Type string - The instance type to use. Available instance types for Inference, see Sagemaker pricing
- Is
Factory bool - Name string
- The name of Compute Environment
- Reference string
- Storage double
- Additional storage to allocate (in GB). Default:
1
- Ai
Inference stringCompute Target Id - Created string
- Created
By string - Description string
- Description of the Compute Target
- Endpoint string
- The AI Inference instance endpoint
- Instance
Type string - The instance type to use. Available instance types for Inference, see Sagemaker pricing
- Is
Factory bool - Name string
- The name of Compute Environment
- Reference string
- Storage float64
- Additional storage to allocate (in GB). Default:
1
- ai
Inference StringCompute Target Id - created String
- created
By String - description String
- Description of the Compute Target
- endpoint String
- The AI Inference instance endpoint
- instance
Type String - The instance type to use. Available instance types for Inference, see Sagemaker pricing
- is
Factory Boolean - name String
- The name of Compute Environment
- reference String
- storage Double
- Additional storage to allocate (in GB). Default:
1
- ai
Inference stringCompute Target Id - created string
- created
By string - description string
- Description of the Compute Target
- endpoint string
- The AI Inference instance endpoint
- instance
Type string - The instance type to use. Available instance types for Inference, see Sagemaker pricing
- is
Factory boolean - name string
- The name of Compute Environment
- reference string
- storage number
- Additional storage to allocate (in GB). Default:
1
- ai_
inference_ strcompute_ target_ id - created str
- created_
by str - description str
- Description of the Compute Target
- endpoint str
- The AI Inference instance endpoint
- instance_
type str - The instance type to use. Available instance types for Inference, see Sagemaker pricing
- is_
factory bool - name str
- The name of Compute Environment
- reference str
- storage float
- Additional storage to allocate (in GB). Default:
1
- ai
Inference StringCompute Target Id - created String
- created
By String - description String
- Description of the Compute Target
- endpoint String
- The AI Inference instance endpoint
- instance
Type String - The instance type to use. Available instance types for Inference, see Sagemaker pricing
- is
Factory Boolean - name String
- The name of Compute Environment
- reference String
- storage Number
- Additional storage to allocate (in GB). Default:
1
Import
$ pulumi import hsdp:index/aiInferenceComputeTarget:AiInferenceComputeTarget An existing Compute Environment can be imported using `hsdp_ai_inference_compute_target`, e.g.
bash
$ pulumi import hsdp:index/aiInferenceComputeTarget:AiInferenceComputeTarget target a-guid
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- hsdp philips-software/terraform-provider-hsdp
- License
- Notes
- This Pulumi package is based on the
hsdp
Terraform Provider.