We recommend using Azure Native.
published on Monday, Mar 9, 2026 by Pulumi
We recommend using Azure Native.
published on Monday, Mar 9, 2026 by Pulumi
Manages a Transform.
Example Usage
using Pulumi;
using Azure = Pulumi.Azure;
class MyStack : Stack
{
public MyStack()
{
var exampleResourceGroup = new Azure.Core.ResourceGroup("exampleResourceGroup", new Azure.Core.ResourceGroupArgs
{
Location = "West Europe",
});
var exampleAccount = new Azure.Storage.Account("exampleAccount", new Azure.Storage.AccountArgs
{
ResourceGroupName = exampleResourceGroup.Name,
Location = exampleResourceGroup.Location,
AccountTier = "Standard",
AccountReplicationType = "GRS",
});
var exampleServiceAccount = new Azure.Media.ServiceAccount("exampleServiceAccount", new Azure.Media.ServiceAccountArgs
{
Location = exampleResourceGroup.Location,
ResourceGroupName = exampleResourceGroup.Name,
StorageAccounts =
{
new Azure.Media.Inputs.ServiceAccountStorageAccountArgs
{
Id = exampleAccount.Id,
IsPrimary = true,
},
},
});
var exampleTransform = new Azure.Media.Transform("exampleTransform", new Azure.Media.TransformArgs
{
ResourceGroupName = exampleResourceGroup.Name,
MediaServicesAccountName = exampleServiceAccount.Name,
Description = "My transform description",
Outputs =
{
new Azure.Media.Inputs.TransformOutputArgs
{
RelativePriority = "Normal",
OnErrorAction = "ContinueJob",
BuiltinPreset = new Azure.Media.Inputs.TransformOutputBuiltinPresetArgs
{
PresetName = "AACGoodQualityAudio",
},
},
},
});
}
}
package main
import (
"github.com/pulumi/pulumi-azure/sdk/v4/go/azure/core"
"github.com/pulumi/pulumi-azure/sdk/v4/go/azure/media"
"github.com/pulumi/pulumi-azure/sdk/v4/go/azure/storage"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
exampleResourceGroup, err := core.NewResourceGroup(ctx, "exampleResourceGroup", &core.ResourceGroupArgs{
Location: pulumi.String("West Europe"),
})
if err != nil {
return err
}
exampleAccount, err := storage.NewAccount(ctx, "exampleAccount", &storage.AccountArgs{
ResourceGroupName: exampleResourceGroup.Name,
Location: exampleResourceGroup.Location,
AccountTier: pulumi.String("Standard"),
AccountReplicationType: pulumi.String("GRS"),
})
if err != nil {
return err
}
exampleServiceAccount, err := media.NewServiceAccount(ctx, "exampleServiceAccount", &media.ServiceAccountArgs{
Location: exampleResourceGroup.Location,
ResourceGroupName: exampleResourceGroup.Name,
StorageAccounts: media.ServiceAccountStorageAccountArray{
&media.ServiceAccountStorageAccountArgs{
Id: exampleAccount.ID(),
IsPrimary: pulumi.Bool(true),
},
},
})
if err != nil {
return err
}
_, err = media.NewTransform(ctx, "exampleTransform", &media.TransformArgs{
ResourceGroupName: exampleResourceGroup.Name,
MediaServicesAccountName: exampleServiceAccount.Name,
Description: pulumi.String("My transform description"),
Outputs: media.TransformOutputArray{
media.TransformOutputArgs{
RelativePriority: pulumi.String("Normal"),
OnErrorAction: pulumi.String("ContinueJob"),
BuiltinPreset: &media.TransformOutputBuiltinPresetArgs{
PresetName: pulumi.String("AACGoodQualityAudio"),
},
},
},
})
if err != nil {
return err
}
return nil
})
}
Example coming soon!
import * as pulumi from "@pulumi/pulumi";
import * as azure from "@pulumi/azure";
const exampleResourceGroup = new azure.core.ResourceGroup("exampleResourceGroup", {location: "West Europe"});
const exampleAccount = new azure.storage.Account("exampleAccount", {
resourceGroupName: exampleResourceGroup.name,
location: exampleResourceGroup.location,
accountTier: "Standard",
accountReplicationType: "GRS",
});
const exampleServiceAccount = new azure.media.ServiceAccount("exampleServiceAccount", {
location: exampleResourceGroup.location,
resourceGroupName: exampleResourceGroup.name,
storageAccounts: [{
id: exampleAccount.id,
isPrimary: true,
}],
});
const exampleTransform = new azure.media.Transform("exampleTransform", {
resourceGroupName: exampleResourceGroup.name,
mediaServicesAccountName: exampleServiceAccount.name,
description: "My transform description",
outputs: [{
relativePriority: "Normal",
onErrorAction: "ContinueJob",
builtinPreset: {
presetName: "AACGoodQualityAudio",
},
}],
});
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_account = azure.storage.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
account_tier="Standard",
account_replication_type="GRS")
example_service_account = azure.media.ServiceAccount("exampleServiceAccount",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
storage_accounts=[azure.media.ServiceAccountStorageAccountArgs(
id=example_account.id,
is_primary=True,
)])
example_transform = azure.media.Transform("exampleTransform",
resource_group_name=example_resource_group.name,
media_services_account_name=example_service_account.name,
description="My transform description",
outputs=[azure.media.TransformOutputArgs(
relative_priority="Normal",
on_error_action="ContinueJob",
builtin_preset=azure.media.TransformOutputBuiltinPresetArgs(
preset_name="AACGoodQualityAudio",
),
)])
Example coming soon!
With Multiple Outputs
using Pulumi;
using Azure = Pulumi.Azure;
class MyStack : Stack
{
public MyStack()
{
var exampleResourceGroup = new Azure.Core.ResourceGroup("exampleResourceGroup", new Azure.Core.ResourceGroupArgs
{
Location = "West Europe",
});
var exampleAccount = new Azure.Storage.Account("exampleAccount", new Azure.Storage.AccountArgs
{
ResourceGroupName = exampleResourceGroup.Name,
Location = exampleResourceGroup.Location,
AccountTier = "Standard",
AccountReplicationType = "GRS",
});
var exampleServiceAccount = new Azure.Media.ServiceAccount("exampleServiceAccount", new Azure.Media.ServiceAccountArgs
{
Location = exampleResourceGroup.Location,
ResourceGroupName = exampleResourceGroup.Name,
StorageAccounts =
{
new Azure.Media.Inputs.ServiceAccountStorageAccountArgs
{
Id = exampleAccount.Id,
IsPrimary = true,
},
},
});
var exampleTransform = new Azure.Media.Transform("exampleTransform", new Azure.Media.TransformArgs
{
ResourceGroupName = exampleResourceGroup.Name,
MediaServicesAccountName = exampleServiceAccount.Name,
Description = "My transform description",
Outputs =
{
new Azure.Media.Inputs.TransformOutputArgs
{
RelativePriority = "Normal",
OnErrorAction = "ContinueJob",
BuiltinPreset = new Azure.Media.Inputs.TransformOutputBuiltinPresetArgs
{
PresetName = "AACGoodQualityAudio",
},
},
new Azure.Media.Inputs.TransformOutputArgs
{
RelativePriority = "Low",
OnErrorAction = "ContinueJob",
AudioAnalyzerPreset = new Azure.Media.Inputs.TransformOutputAudioAnalyzerPresetArgs
{
AudioLanguage = "en-US",
AudioAnalysisMode = "Basic",
},
},
new Azure.Media.Inputs.TransformOutputArgs
{
RelativePriority = "Low",
OnErrorAction = "StopProcessingJob",
FaceDetectorPreset = new Azure.Media.Inputs.TransformOutputFaceDetectorPresetArgs
{
AnalysisResolution = "StandardDefinition",
},
},
},
});
}
}
package main
import (
"github.com/pulumi/pulumi-azure/sdk/v4/go/azure/core"
"github.com/pulumi/pulumi-azure/sdk/v4/go/azure/media"
"github.com/pulumi/pulumi-azure/sdk/v4/go/azure/storage"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
exampleResourceGroup, err := core.NewResourceGroup(ctx, "exampleResourceGroup", &core.ResourceGroupArgs{
Location: pulumi.String("West Europe"),
})
if err != nil {
return err
}
exampleAccount, err := storage.NewAccount(ctx, "exampleAccount", &storage.AccountArgs{
ResourceGroupName: exampleResourceGroup.Name,
Location: exampleResourceGroup.Location,
AccountTier: pulumi.String("Standard"),
AccountReplicationType: pulumi.String("GRS"),
})
if err != nil {
return err
}
exampleServiceAccount, err := media.NewServiceAccount(ctx, "exampleServiceAccount", &media.ServiceAccountArgs{
Location: exampleResourceGroup.Location,
ResourceGroupName: exampleResourceGroup.Name,
StorageAccounts: media.ServiceAccountStorageAccountArray{
&media.ServiceAccountStorageAccountArgs{
Id: exampleAccount.ID(),
IsPrimary: pulumi.Bool(true),
},
},
})
if err != nil {
return err
}
_, err = media.NewTransform(ctx, "exampleTransform", &media.TransformArgs{
ResourceGroupName: exampleResourceGroup.Name,
MediaServicesAccountName: exampleServiceAccount.Name,
Description: pulumi.String("My transform description"),
Outputs: media.TransformOutputArray{
media.TransformOutputArgs{
RelativePriority: pulumi.String("Normal"),
OnErrorAction: pulumi.String("ContinueJob"),
BuiltinPreset: &media.TransformOutputBuiltinPresetArgs{
PresetName: pulumi.String("AACGoodQualityAudio"),
},
},
media.TransformOutputArgs{
RelativePriority: pulumi.String("Low"),
OnErrorAction: pulumi.String("ContinueJob"),
AudioAnalyzerPreset: &media.TransformOutputAudioAnalyzerPresetArgs{
AudioLanguage: pulumi.String("en-US"),
AudioAnalysisMode: pulumi.String("Basic"),
},
},
media.TransformOutputArgs{
RelativePriority: pulumi.String("Low"),
OnErrorAction: pulumi.String("StopProcessingJob"),
FaceDetectorPreset: &media.TransformOutputFaceDetectorPresetArgs{
AnalysisResolution: pulumi.String("StandardDefinition"),
},
},
},
})
if err != nil {
return err
}
return nil
})
}
Example coming soon!
import * as pulumi from "@pulumi/pulumi";
import * as azure from "@pulumi/azure";
const exampleResourceGroup = new azure.core.ResourceGroup("exampleResourceGroup", {location: "West Europe"});
const exampleAccount = new azure.storage.Account("exampleAccount", {
resourceGroupName: exampleResourceGroup.name,
location: exampleResourceGroup.location,
accountTier: "Standard",
accountReplicationType: "GRS",
});
const exampleServiceAccount = new azure.media.ServiceAccount("exampleServiceAccount", {
location: exampleResourceGroup.location,
resourceGroupName: exampleResourceGroup.name,
storageAccounts: [{
id: exampleAccount.id,
isPrimary: true,
}],
});
const exampleTransform = new azure.media.Transform("exampleTransform", {
resourceGroupName: exampleResourceGroup.name,
mediaServicesAccountName: exampleServiceAccount.name,
description: "My transform description",
outputs: [
{
relativePriority: "Normal",
onErrorAction: "ContinueJob",
builtinPreset: {
presetName: "AACGoodQualityAudio",
},
},
{
relativePriority: "Low",
onErrorAction: "ContinueJob",
audioAnalyzerPreset: {
audioLanguage: "en-US",
audioAnalysisMode: "Basic",
},
},
{
relativePriority: "Low",
onErrorAction: "StopProcessingJob",
faceDetectorPreset: {
analysisResolution: "StandardDefinition",
},
},
],
});
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_account = azure.storage.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
account_tier="Standard",
account_replication_type="GRS")
example_service_account = azure.media.ServiceAccount("exampleServiceAccount",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
storage_accounts=[azure.media.ServiceAccountStorageAccountArgs(
id=example_account.id,
is_primary=True,
)])
example_transform = azure.media.Transform("exampleTransform",
resource_group_name=example_resource_group.name,
media_services_account_name=example_service_account.name,
description="My transform description",
outputs=[
azure.media.TransformOutputArgs(
relative_priority="Normal",
on_error_action="ContinueJob",
builtin_preset=azure.media.TransformOutputBuiltinPresetArgs(
preset_name="AACGoodQualityAudio",
),
),
azure.media.TransformOutputArgs(
relative_priority="Low",
on_error_action="ContinueJob",
audio_analyzer_preset=azure.media.TransformOutputAudioAnalyzerPresetArgs(
audio_language="en-US",
audio_analysis_mode="Basic",
),
),
azure.media.TransformOutputArgs(
relative_priority="Low",
on_error_action="StopProcessingJob",
face_detector_preset=azure.media.TransformOutputFaceDetectorPresetArgs(
analysis_resolution="StandardDefinition",
),
),
])
Example coming soon!
Create Transform Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Transform(name: string, args: TransformArgs, opts?: CustomResourceOptions);@overload
def Transform(resource_name: str,
args: TransformArgs,
opts: Optional[ResourceOptions] = None)
@overload
def Transform(resource_name: str,
opts: Optional[ResourceOptions] = None,
media_services_account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
description: Optional[str] = None,
name: Optional[str] = None,
outputs: Optional[Sequence[TransformOutputArgs]] = None)func NewTransform(ctx *Context, name string, args TransformArgs, opts ...ResourceOption) (*Transform, error)public Transform(string name, TransformArgs args, CustomResourceOptions? opts = null)
public Transform(String name, TransformArgs args)
public Transform(String name, TransformArgs args, CustomResourceOptions options)
type: azure:media:Transform
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args TransformArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args TransformArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args TransformArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args TransformArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args TransformArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var transformResource = new Azure.Media.Transform("transformResource", new()
{
MediaServicesAccountName = "string",
ResourceGroupName = "string",
Description = "string",
Name = "string",
Outputs = new[]
{
new Azure.Media.Inputs.TransformOutputArgs
{
AudioAnalyzerPreset = new Azure.Media.Inputs.TransformOutputAudioAnalyzerPresetArgs
{
AudioAnalysisMode = "string",
AudioLanguage = "string",
},
BuiltinPreset = new Azure.Media.Inputs.TransformOutputBuiltinPresetArgs
{
PresetName = "string",
},
FaceDetectorPreset = new Azure.Media.Inputs.TransformOutputFaceDetectorPresetArgs
{
AnalysisResolution = "string",
},
OnErrorAction = "string",
RelativePriority = "string",
VideoAnalyzerPreset = new Azure.Media.Inputs.TransformOutputVideoAnalyzerPresetArgs
{
AudioAnalysisMode = "string",
AudioLanguage = "string",
InsightsType = "string",
},
},
},
});
example, err := media.NewTransform(ctx, "transformResource", &media.TransformArgs{
MediaServicesAccountName: pulumi.String("string"),
ResourceGroupName: pulumi.String("string"),
Description: pulumi.String("string"),
Name: pulumi.String("string"),
Outputs: media.TransformOutputTypeArray{
&media.TransformOutputTypeArgs{
AudioAnalyzerPreset: &media.TransformOutputAudioAnalyzerPresetArgs{
AudioAnalysisMode: pulumi.String("string"),
AudioLanguage: pulumi.String("string"),
},
BuiltinPreset: &media.TransformOutputBuiltinPresetArgs{
PresetName: pulumi.String("string"),
},
FaceDetectorPreset: &media.TransformOutputFaceDetectorPresetArgs{
AnalysisResolution: pulumi.String("string"),
},
OnErrorAction: pulumi.String("string"),
RelativePriority: pulumi.String("string"),
VideoAnalyzerPreset: &media.TransformOutputVideoAnalyzerPresetArgs{
AudioAnalysisMode: pulumi.String("string"),
AudioLanguage: pulumi.String("string"),
InsightsType: pulumi.String("string"),
},
},
},
})
var transformResource = new Transform("transformResource", TransformArgs.builder()
.mediaServicesAccountName("string")
.resourceGroupName("string")
.description("string")
.name("string")
.outputs(TransformOutputArgs.builder()
.audioAnalyzerPreset(TransformOutputAudioAnalyzerPresetArgs.builder()
.audioAnalysisMode("string")
.audioLanguage("string")
.build())
.builtinPreset(TransformOutputBuiltinPresetArgs.builder()
.presetName("string")
.build())
.faceDetectorPreset(TransformOutputFaceDetectorPresetArgs.builder()
.analysisResolution("string")
.build())
.onErrorAction("string")
.relativePriority("string")
.videoAnalyzerPreset(TransformOutputVideoAnalyzerPresetArgs.builder()
.audioAnalysisMode("string")
.audioLanguage("string")
.insightsType("string")
.build())
.build())
.build());
transform_resource = azure.media.Transform("transformResource",
media_services_account_name="string",
resource_group_name="string",
description="string",
name="string",
outputs=[{
"audio_analyzer_preset": {
"audio_analysis_mode": "string",
"audio_language": "string",
},
"builtin_preset": {
"preset_name": "string",
},
"face_detector_preset": {
"analysis_resolution": "string",
},
"on_error_action": "string",
"relative_priority": "string",
"video_analyzer_preset": {
"audio_analysis_mode": "string",
"audio_language": "string",
"insights_type": "string",
},
}])
const transformResource = new azure.media.Transform("transformResource", {
mediaServicesAccountName: "string",
resourceGroupName: "string",
description: "string",
name: "string",
outputs: [{
audioAnalyzerPreset: {
audioAnalysisMode: "string",
audioLanguage: "string",
},
builtinPreset: {
presetName: "string",
},
faceDetectorPreset: {
analysisResolution: "string",
},
onErrorAction: "string",
relativePriority: "string",
videoAnalyzerPreset: {
audioAnalysisMode: "string",
audioLanguage: "string",
insightsType: "string",
},
}],
});
type: azure:media:Transform
properties:
description: string
mediaServicesAccountName: string
name: string
outputs:
- audioAnalyzerPreset:
audioAnalysisMode: string
audioLanguage: string
builtinPreset:
presetName: string
faceDetectorPreset:
analysisResolution: string
onErrorAction: string
relativePriority: string
videoAnalyzerPreset:
audioAnalysisMode: string
audioLanguage: string
insightsType: string
resourceGroupName: string
Transform Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The Transform resource accepts the following input properties:
- Media
Services stringAccount Name - The Media Services account name. Changing this forces a new Transform to be created.
- Resource
Group stringName - The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
- Description string
- An optional verbose description of the Transform.
- Name string
- The name which should be used for this Transform. Changing this forces a new Transform to be created.
- Outputs
List<Transform
Output> - One or more
outputblocks as defined below. At least oneoutputmust be defined.
- Media
Services stringAccount Name - The Media Services account name. Changing this forces a new Transform to be created.
- Resource
Group stringName - The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
- Description string
- An optional verbose description of the Transform.
- Name string
- The name which should be used for this Transform. Changing this forces a new Transform to be created.
- Outputs
[]Transform
Output Type Args - One or more
outputblocks as defined below. At least oneoutputmust be defined.
- media
Services StringAccount Name - The Media Services account name. Changing this forces a new Transform to be created.
- resource
Group StringName - The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
- description String
- An optional verbose description of the Transform.
- name String
- The name which should be used for this Transform. Changing this forces a new Transform to be created.
- outputs
List<Transform
Output> - One or more
outputblocks as defined below. At least oneoutputmust be defined.
- media
Services stringAccount Name - The Media Services account name. Changing this forces a new Transform to be created.
- resource
Group stringName - The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
- description string
- An optional verbose description of the Transform.
- name string
- The name which should be used for this Transform. Changing this forces a new Transform to be created.
- outputs
Transform
Output[] - One or more
outputblocks as defined below. At least oneoutputmust be defined.
- media_
services_ straccount_ name - The Media Services account name. Changing this forces a new Transform to be created.
- resource_
group_ strname - The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
- description str
- An optional verbose description of the Transform.
- name str
- The name which should be used for this Transform. Changing this forces a new Transform to be created.
- outputs
Sequence[Transform
Output Args] - One or more
outputblocks as defined below. At least oneoutputmust be defined.
- media
Services StringAccount Name - The Media Services account name. Changing this forces a new Transform to be created.
- resource
Group StringName - The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
- description String
- An optional verbose description of the Transform.
- name String
- The name which should be used for this Transform. Changing this forces a new Transform to be created.
- outputs List<Property Map>
- One or more
outputblocks as defined below. At least oneoutputmust be defined.
Outputs
All input properties are implicitly available as output properties. Additionally, the Transform resource produces the following output properties:
- Id string
- The provider-assigned unique ID for this managed resource.
- Id string
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
- id string
- The provider-assigned unique ID for this managed resource.
- id str
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
Look up Existing Transform Resource
Get an existing Transform resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: TransformState, opts?: CustomResourceOptions): Transform@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
description: Optional[str] = None,
media_services_account_name: Optional[str] = None,
name: Optional[str] = None,
outputs: Optional[Sequence[TransformOutputArgs]] = None,
resource_group_name: Optional[str] = None) -> Transformfunc GetTransform(ctx *Context, name string, id IDInput, state *TransformState, opts ...ResourceOption) (*Transform, error)public static Transform Get(string name, Input<string> id, TransformState? state, CustomResourceOptions? opts = null)public static Transform get(String name, Output<String> id, TransformState state, CustomResourceOptions options)resources: _: type: azure:media:Transform get: id: ${id}- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Description string
- An optional verbose description of the Transform.
- Media
Services stringAccount Name - The Media Services account name. Changing this forces a new Transform to be created.
- Name string
- The name which should be used for this Transform. Changing this forces a new Transform to be created.
- Outputs
List<Transform
Output> - One or more
outputblocks as defined below. At least oneoutputmust be defined. - Resource
Group stringName - The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
- Description string
- An optional verbose description of the Transform.
- Media
Services stringAccount Name - The Media Services account name. Changing this forces a new Transform to be created.
- Name string
- The name which should be used for this Transform. Changing this forces a new Transform to be created.
- Outputs
[]Transform
Output Type Args - One or more
outputblocks as defined below. At least oneoutputmust be defined. - Resource
Group stringName - The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
- description String
- An optional verbose description of the Transform.
- media
Services StringAccount Name - The Media Services account name. Changing this forces a new Transform to be created.
- name String
- The name which should be used for this Transform. Changing this forces a new Transform to be created.
- outputs
List<Transform
Output> - One or more
outputblocks as defined below. At least oneoutputmust be defined. - resource
Group StringName - The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
- description string
- An optional verbose description of the Transform.
- media
Services stringAccount Name - The Media Services account name. Changing this forces a new Transform to be created.
- name string
- The name which should be used for this Transform. Changing this forces a new Transform to be created.
- outputs
Transform
Output[] - One or more
outputblocks as defined below. At least oneoutputmust be defined. - resource
Group stringName - The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
- description str
- An optional verbose description of the Transform.
- media_
services_ straccount_ name - The Media Services account name. Changing this forces a new Transform to be created.
- name str
- The name which should be used for this Transform. Changing this forces a new Transform to be created.
- outputs
Sequence[Transform
Output Args] - One or more
outputblocks as defined below. At least oneoutputmust be defined. - resource_
group_ strname - The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
- description String
- An optional verbose description of the Transform.
- media
Services StringAccount Name - The Media Services account name. Changing this forces a new Transform to be created.
- name String
- The name which should be used for this Transform. Changing this forces a new Transform to be created.
- outputs List<Property Map>
- One or more
outputblocks as defined below. At least oneoutputmust be defined. - resource
Group StringName - The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
Supporting Types
TransformOutput, TransformOutputArgs
- Audio
Analyzer TransformPreset Output Audio Analyzer Preset - A
audio_analyzer_presetblock as defined below. - Builtin
Preset TransformOutput Builtin Preset - A
builtin_presetblock as defined below. - Face
Detector TransformPreset Output Face Detector Preset - A
face_detector_presetblock as defined below. - On
Error stringAction - A Transform can define more than one outputs. This property defines what the service should do when one output fails - either continue to produce other outputs, or, stop the other outputs. The overall Job state will not reflect failures of outputs that are specified with
ContinueJob. Possibles value areStopProcessingJoborContinueJob. - Relative
Priority string - Sets the relative priority of the TransformOutputs within a Transform. This sets the priority that the service uses for processing Transform Outputs. Possibles value are
High,NormalorLow. - Video
Analyzer TransformPreset Output Video Analyzer Preset - A
video_analyzer_presetblock as defined below.
- Audio
Analyzer TransformPreset Output Audio Analyzer Preset - A
audio_analyzer_presetblock as defined below. - Builtin
Preset TransformOutput Builtin Preset - A
builtin_presetblock as defined below. - Face
Detector TransformPreset Output Face Detector Preset - A
face_detector_presetblock as defined below. - On
Error stringAction - A Transform can define more than one outputs. This property defines what the service should do when one output fails - either continue to produce other outputs, or, stop the other outputs. The overall Job state will not reflect failures of outputs that are specified with
ContinueJob. Possibles value areStopProcessingJoborContinueJob. - Relative
Priority string - Sets the relative priority of the TransformOutputs within a Transform. This sets the priority that the service uses for processing Transform Outputs. Possibles value are
High,NormalorLow. - Video
Analyzer TransformPreset Output Video Analyzer Preset - A
video_analyzer_presetblock as defined below.
- audio
Analyzer TransformPreset Output Audio Analyzer Preset - A
audio_analyzer_presetblock as defined below. - builtin
Preset TransformOutput Builtin Preset - A
builtin_presetblock as defined below. - face
Detector TransformPreset Output Face Detector Preset - A
face_detector_presetblock as defined below. - on
Error StringAction - A Transform can define more than one outputs. This property defines what the service should do when one output fails - either continue to produce other outputs, or, stop the other outputs. The overall Job state will not reflect failures of outputs that are specified with
ContinueJob. Possibles value areStopProcessingJoborContinueJob. - relative
Priority String - Sets the relative priority of the TransformOutputs within a Transform. This sets the priority that the service uses for processing Transform Outputs. Possibles value are
High,NormalorLow. - video
Analyzer TransformPreset Output Video Analyzer Preset - A
video_analyzer_presetblock as defined below.
- audio
Analyzer TransformPreset Output Audio Analyzer Preset - A
audio_analyzer_presetblock as defined below. - builtin
Preset TransformOutput Builtin Preset - A
builtin_presetblock as defined below. - face
Detector TransformPreset Output Face Detector Preset - A
face_detector_presetblock as defined below. - on
Error stringAction - A Transform can define more than one outputs. This property defines what the service should do when one output fails - either continue to produce other outputs, or, stop the other outputs. The overall Job state will not reflect failures of outputs that are specified with
ContinueJob. Possibles value areStopProcessingJoborContinueJob. - relative
Priority string - Sets the relative priority of the TransformOutputs within a Transform. This sets the priority that the service uses for processing Transform Outputs. Possibles value are
High,NormalorLow. - video
Analyzer TransformPreset Output Video Analyzer Preset - A
video_analyzer_presetblock as defined below.
- audio_
analyzer_ Transformpreset Output Audio Analyzer Preset - A
audio_analyzer_presetblock as defined below. - builtin_
preset TransformOutput Builtin Preset - A
builtin_presetblock as defined below. - face_
detector_ Transformpreset Output Face Detector Preset - A
face_detector_presetblock as defined below. - on_
error_ straction - A Transform can define more than one outputs. This property defines what the service should do when one output fails - either continue to produce other outputs, or, stop the other outputs. The overall Job state will not reflect failures of outputs that are specified with
ContinueJob. Possibles value areStopProcessingJoborContinueJob. - relative_
priority str - Sets the relative priority of the TransformOutputs within a Transform. This sets the priority that the service uses for processing Transform Outputs. Possibles value are
High,NormalorLow. - video_
analyzer_ Transformpreset Output Video Analyzer Preset - A
video_analyzer_presetblock as defined below.
- audio
Analyzer Property MapPreset - A
audio_analyzer_presetblock as defined below. - builtin
Preset Property Map - A
builtin_presetblock as defined below. - face
Detector Property MapPreset - A
face_detector_presetblock as defined below. - on
Error StringAction - A Transform can define more than one outputs. This property defines what the service should do when one output fails - either continue to produce other outputs, or, stop the other outputs. The overall Job state will not reflect failures of outputs that are specified with
ContinueJob. Possibles value areStopProcessingJoborContinueJob. - relative
Priority String - Sets the relative priority of the TransformOutputs within a Transform. This sets the priority that the service uses for processing Transform Outputs. Possibles value are
High,NormalorLow. - video
Analyzer Property MapPreset - A
video_analyzer_presetblock as defined below.
TransformOutputAudioAnalyzerPreset, TransformOutputAudioAnalyzerPresetArgs
- Audio
Analysis stringMode - Possibles value are
BasicorStandard. Determines the set of audio analysis operations to be performed. - Audio
Language string - The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
- Audio
Analysis stringMode - Possibles value are
BasicorStandard. Determines the set of audio analysis operations to be performed. - Audio
Language string - The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
- audio
Analysis StringMode - Possibles value are
BasicorStandard. Determines the set of audio analysis operations to be performed. - audio
Language String - The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
- audio
Analysis stringMode - Possibles value are
BasicorStandard. Determines the set of audio analysis operations to be performed. - audio
Language string - The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
- audio_
analysis_ strmode - Possibles value are
BasicorStandard. Determines the set of audio analysis operations to be performed. - audio_
language str - The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
- audio
Analysis StringMode - Possibles value are
BasicorStandard. Determines the set of audio analysis operations to be performed. - audio
Language String - The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
TransformOutputBuiltinPreset, TransformOutputBuiltinPresetArgs
- Preset
Name string - The built-in preset to be used for encoding videos. The allowed values are
AACGoodQualityAudio,AdaptiveStreaming,ContentAwareEncoding,ContentAwareEncodingExperimental,CopyAllBitrateNonInterleaved,H264MultipleBitrate1080p,H264MultipleBitrate720p,H264MultipleBitrateSD,H264SingleBitrate1080p,H264SingleBitrate720pandH264SingleBitrateSD.
- Preset
Name string - The built-in preset to be used for encoding videos. The allowed values are
AACGoodQualityAudio,AdaptiveStreaming,ContentAwareEncoding,ContentAwareEncodingExperimental,CopyAllBitrateNonInterleaved,H264MultipleBitrate1080p,H264MultipleBitrate720p,H264MultipleBitrateSD,H264SingleBitrate1080p,H264SingleBitrate720pandH264SingleBitrateSD.
- preset
Name String - The built-in preset to be used for encoding videos. The allowed values are
AACGoodQualityAudio,AdaptiveStreaming,ContentAwareEncoding,ContentAwareEncodingExperimental,CopyAllBitrateNonInterleaved,H264MultipleBitrate1080p,H264MultipleBitrate720p,H264MultipleBitrateSD,H264SingleBitrate1080p,H264SingleBitrate720pandH264SingleBitrateSD.
- preset
Name string - The built-in preset to be used for encoding videos. The allowed values are
AACGoodQualityAudio,AdaptiveStreaming,ContentAwareEncoding,ContentAwareEncodingExperimental,CopyAllBitrateNonInterleaved,H264MultipleBitrate1080p,H264MultipleBitrate720p,H264MultipleBitrateSD,H264SingleBitrate1080p,H264SingleBitrate720pandH264SingleBitrateSD.
- preset_
name str - The built-in preset to be used for encoding videos. The allowed values are
AACGoodQualityAudio,AdaptiveStreaming,ContentAwareEncoding,ContentAwareEncodingExperimental,CopyAllBitrateNonInterleaved,H264MultipleBitrate1080p,H264MultipleBitrate720p,H264MultipleBitrateSD,H264SingleBitrate1080p,H264SingleBitrate720pandH264SingleBitrateSD.
- preset
Name String - The built-in preset to be used for encoding videos. The allowed values are
AACGoodQualityAudio,AdaptiveStreaming,ContentAwareEncoding,ContentAwareEncodingExperimental,CopyAllBitrateNonInterleaved,H264MultipleBitrate1080p,H264MultipleBitrate720p,H264MultipleBitrateSD,H264SingleBitrate1080p,H264SingleBitrate720pandH264SingleBitrateSD.
TransformOutputFaceDetectorPreset, TransformOutputFaceDetectorPresetArgs
- Analysis
Resolution string - Possibles value are
SourceResolutionorStandardDefinition. Specifies the maximum resolution at which your video is analyzed. The default behavior isSourceResolutionwhich will keep the input video at its original resolution when analyzed. UsingStandardDefinitionwill resize input videos to standard definition while preserving the appropriate aspect ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing. Switching toStandardDefinitionwill reduce the time it takes to process high resolution video. It may also reduce the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end up being too small in the resized video may not be detected.
- Analysis
Resolution string - Possibles value are
SourceResolutionorStandardDefinition. Specifies the maximum resolution at which your video is analyzed. The default behavior isSourceResolutionwhich will keep the input video at its original resolution when analyzed. UsingStandardDefinitionwill resize input videos to standard definition while preserving the appropriate aspect ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing. Switching toStandardDefinitionwill reduce the time it takes to process high resolution video. It may also reduce the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end up being too small in the resized video may not be detected.
- analysis
Resolution String - Possibles value are
SourceResolutionorStandardDefinition. Specifies the maximum resolution at which your video is analyzed. The default behavior isSourceResolutionwhich will keep the input video at its original resolution when analyzed. UsingStandardDefinitionwill resize input videos to standard definition while preserving the appropriate aspect ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing. Switching toStandardDefinitionwill reduce the time it takes to process high resolution video. It may also reduce the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end up being too small in the resized video may not be detected.
- analysis
Resolution string - Possibles value are
SourceResolutionorStandardDefinition. Specifies the maximum resolution at which your video is analyzed. The default behavior isSourceResolutionwhich will keep the input video at its original resolution when analyzed. UsingStandardDefinitionwill resize input videos to standard definition while preserving the appropriate aspect ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing. Switching toStandardDefinitionwill reduce the time it takes to process high resolution video. It may also reduce the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end up being too small in the resized video may not be detected.
- analysis_
resolution str - Possibles value are
SourceResolutionorStandardDefinition. Specifies the maximum resolution at which your video is analyzed. The default behavior isSourceResolutionwhich will keep the input video at its original resolution when analyzed. UsingStandardDefinitionwill resize input videos to standard definition while preserving the appropriate aspect ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing. Switching toStandardDefinitionwill reduce the time it takes to process high resolution video. It may also reduce the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end up being too small in the resized video may not be detected.
- analysis
Resolution String - Possibles value are
SourceResolutionorStandardDefinition. Specifies the maximum resolution at which your video is analyzed. The default behavior isSourceResolutionwhich will keep the input video at its original resolution when analyzed. UsingStandardDefinitionwill resize input videos to standard definition while preserving the appropriate aspect ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing. Switching toStandardDefinitionwill reduce the time it takes to process high resolution video. It may also reduce the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end up being too small in the resized video may not be detected.
TransformOutputVideoAnalyzerPreset, TransformOutputVideoAnalyzerPresetArgs
- Audio
Analysis stringMode - Possibles value are
BasicorStandard. Determines the set of audio analysis operations to be performed. - Audio
Language string - The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
- Insights
Type string - Defines the type of insights that you want the service to generate. The allowed values are
AudioInsightsOnly,VideoInsightsOnly, andAllInsights. If you set this toAllInsightsand the input is audio only, then only audio insights are generated. Similarly if the input is video only, then only video insights are generated. It is recommended that you not useAudioInsightsOnlyif you expect some of your inputs to be video only; or useVideoInsightsOnlyif you expect some of your inputs to be audio only. Your Jobs in such conditions would error out.
- Audio
Analysis stringMode - Possibles value are
BasicorStandard. Determines the set of audio analysis operations to be performed. - Audio
Language string - The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
- Insights
Type string - Defines the type of insights that you want the service to generate. The allowed values are
AudioInsightsOnly,VideoInsightsOnly, andAllInsights. If you set this toAllInsightsand the input is audio only, then only audio insights are generated. Similarly if the input is video only, then only video insights are generated. It is recommended that you not useAudioInsightsOnlyif you expect some of your inputs to be video only; or useVideoInsightsOnlyif you expect some of your inputs to be audio only. Your Jobs in such conditions would error out.
- audio
Analysis StringMode - Possibles value are
BasicorStandard. Determines the set of audio analysis operations to be performed. - audio
Language String - The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
- insights
Type String - Defines the type of insights that you want the service to generate. The allowed values are
AudioInsightsOnly,VideoInsightsOnly, andAllInsights. If you set this toAllInsightsand the input is audio only, then only audio insights are generated. Similarly if the input is video only, then only video insights are generated. It is recommended that you not useAudioInsightsOnlyif you expect some of your inputs to be video only; or useVideoInsightsOnlyif you expect some of your inputs to be audio only. Your Jobs in such conditions would error out.
- audio
Analysis stringMode - Possibles value are
BasicorStandard. Determines the set of audio analysis operations to be performed. - audio
Language string - The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
- insights
Type string - Defines the type of insights that you want the service to generate. The allowed values are
AudioInsightsOnly,VideoInsightsOnly, andAllInsights. If you set this toAllInsightsand the input is audio only, then only audio insights are generated. Similarly if the input is video only, then only video insights are generated. It is recommended that you not useAudioInsightsOnlyif you expect some of your inputs to be video only; or useVideoInsightsOnlyif you expect some of your inputs to be audio only. Your Jobs in such conditions would error out.
- audio_
analysis_ strmode - Possibles value are
BasicorStandard. Determines the set of audio analysis operations to be performed. - audio_
language str - The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
- insights_
type str - Defines the type of insights that you want the service to generate. The allowed values are
AudioInsightsOnly,VideoInsightsOnly, andAllInsights. If you set this toAllInsightsand the input is audio only, then only audio insights are generated. Similarly if the input is video only, then only video insights are generated. It is recommended that you not useAudioInsightsOnlyif you expect some of your inputs to be video only; or useVideoInsightsOnlyif you expect some of your inputs to be audio only. Your Jobs in such conditions would error out.
- audio
Analysis StringMode - Possibles value are
BasicorStandard. Determines the set of audio analysis operations to be performed. - audio
Language String - The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
- insights
Type String - Defines the type of insights that you want the service to generate. The allowed values are
AudioInsightsOnly,VideoInsightsOnly, andAllInsights. If you set this toAllInsightsand the input is audio only, then only audio insights are generated. Similarly if the input is video only, then only video insights are generated. It is recommended that you not useAudioInsightsOnlyif you expect some of your inputs to be video only; or useVideoInsightsOnlyif you expect some of your inputs to be audio only. Your Jobs in such conditions would error out.
Import
Transforms can be imported using the resource id, e.g.
$ pulumi import azure:media/transform:Transform example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Media/mediaservices/media1/transforms/transform1
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Azure Classic pulumi/pulumi-azure
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
azurermTerraform Provider.
We recommend using Azure Native.
published on Monday, Mar 9, 2026 by Pulumi
