1. Packages
  2. Azure Classic
  3. API Docs
  4. media
  5. Transform

We recommend using Azure Native.

Viewing docs for Azure v4.42.0 (Older version)
published on Monday, Mar 9, 2026 by Pulumi
azure logo

We recommend using Azure Native.

Viewing docs for Azure v4.42.0 (Older version)
published on Monday, Mar 9, 2026 by Pulumi

    Manages a Transform.

    Example Usage

    using Pulumi;
    using Azure = Pulumi.Azure;
    
    class MyStack : Stack
    {
        public MyStack()
        {
            var exampleResourceGroup = new Azure.Core.ResourceGroup("exampleResourceGroup", new Azure.Core.ResourceGroupArgs
            {
                Location = "West Europe",
            });
            var exampleAccount = new Azure.Storage.Account("exampleAccount", new Azure.Storage.AccountArgs
            {
                ResourceGroupName = exampleResourceGroup.Name,
                Location = exampleResourceGroup.Location,
                AccountTier = "Standard",
                AccountReplicationType = "GRS",
            });
            var exampleServiceAccount = new Azure.Media.ServiceAccount("exampleServiceAccount", new Azure.Media.ServiceAccountArgs
            {
                Location = exampleResourceGroup.Location,
                ResourceGroupName = exampleResourceGroup.Name,
                StorageAccounts = 
                {
                    new Azure.Media.Inputs.ServiceAccountStorageAccountArgs
                    {
                        Id = exampleAccount.Id,
                        IsPrimary = true,
                    },
                },
            });
            var exampleTransform = new Azure.Media.Transform("exampleTransform", new Azure.Media.TransformArgs
            {
                ResourceGroupName = exampleResourceGroup.Name,
                MediaServicesAccountName = exampleServiceAccount.Name,
                Description = "My transform description",
                Outputs = 
                {
                    new Azure.Media.Inputs.TransformOutputArgs
                    {
                        RelativePriority = "Normal",
                        OnErrorAction = "ContinueJob",
                        BuiltinPreset = new Azure.Media.Inputs.TransformOutputBuiltinPresetArgs
                        {
                            PresetName = "AACGoodQualityAudio",
                        },
                    },
                },
            });
        }
    
    }
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-azure/sdk/v4/go/azure/core"
    	"github.com/pulumi/pulumi-azure/sdk/v4/go/azure/media"
    	"github.com/pulumi/pulumi-azure/sdk/v4/go/azure/storage"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		exampleResourceGroup, err := core.NewResourceGroup(ctx, "exampleResourceGroup", &core.ResourceGroupArgs{
    			Location: pulumi.String("West Europe"),
    		})
    		if err != nil {
    			return err
    		}
    		exampleAccount, err := storage.NewAccount(ctx, "exampleAccount", &storage.AccountArgs{
    			ResourceGroupName:      exampleResourceGroup.Name,
    			Location:               exampleResourceGroup.Location,
    			AccountTier:            pulumi.String("Standard"),
    			AccountReplicationType: pulumi.String("GRS"),
    		})
    		if err != nil {
    			return err
    		}
    		exampleServiceAccount, err := media.NewServiceAccount(ctx, "exampleServiceAccount", &media.ServiceAccountArgs{
    			Location:          exampleResourceGroup.Location,
    			ResourceGroupName: exampleResourceGroup.Name,
    			StorageAccounts: media.ServiceAccountStorageAccountArray{
    				&media.ServiceAccountStorageAccountArgs{
    					Id:        exampleAccount.ID(),
    					IsPrimary: pulumi.Bool(true),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = media.NewTransform(ctx, "exampleTransform", &media.TransformArgs{
    			ResourceGroupName:        exampleResourceGroup.Name,
    			MediaServicesAccountName: exampleServiceAccount.Name,
    			Description:              pulumi.String("My transform description"),
    			Outputs: media.TransformOutputArray{
    				media.TransformOutputArgs{
    					RelativePriority: pulumi.String("Normal"),
    					OnErrorAction:    pulumi.String("ContinueJob"),
    					BuiltinPreset: &media.TransformOutputBuiltinPresetArgs{
    						PresetName: pulumi.String("AACGoodQualityAudio"),
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    

    Example coming soon!

    import * as pulumi from "@pulumi/pulumi";
    import * as azure from "@pulumi/azure";
    
    const exampleResourceGroup = new azure.core.ResourceGroup("exampleResourceGroup", {location: "West Europe"});
    const exampleAccount = new azure.storage.Account("exampleAccount", {
        resourceGroupName: exampleResourceGroup.name,
        location: exampleResourceGroup.location,
        accountTier: "Standard",
        accountReplicationType: "GRS",
    });
    const exampleServiceAccount = new azure.media.ServiceAccount("exampleServiceAccount", {
        location: exampleResourceGroup.location,
        resourceGroupName: exampleResourceGroup.name,
        storageAccounts: [{
            id: exampleAccount.id,
            isPrimary: true,
        }],
    });
    const exampleTransform = new azure.media.Transform("exampleTransform", {
        resourceGroupName: exampleResourceGroup.name,
        mediaServicesAccountName: exampleServiceAccount.name,
        description: "My transform description",
        outputs: [{
            relativePriority: "Normal",
            onErrorAction: "ContinueJob",
            builtinPreset: {
                presetName: "AACGoodQualityAudio",
            },
        }],
    });
    
    import pulumi
    import pulumi_azure as azure
    
    example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
    example_account = azure.storage.Account("exampleAccount",
        resource_group_name=example_resource_group.name,
        location=example_resource_group.location,
        account_tier="Standard",
        account_replication_type="GRS")
    example_service_account = azure.media.ServiceAccount("exampleServiceAccount",
        location=example_resource_group.location,
        resource_group_name=example_resource_group.name,
        storage_accounts=[azure.media.ServiceAccountStorageAccountArgs(
            id=example_account.id,
            is_primary=True,
        )])
    example_transform = azure.media.Transform("exampleTransform",
        resource_group_name=example_resource_group.name,
        media_services_account_name=example_service_account.name,
        description="My transform description",
        outputs=[azure.media.TransformOutputArgs(
            relative_priority="Normal",
            on_error_action="ContinueJob",
            builtin_preset=azure.media.TransformOutputBuiltinPresetArgs(
                preset_name="AACGoodQualityAudio",
            ),
        )])
    

    Example coming soon!

    With Multiple Outputs

    using Pulumi;
    using Azure = Pulumi.Azure;
    
    class MyStack : Stack
    {
        public MyStack()
        {
            var exampleResourceGroup = new Azure.Core.ResourceGroup("exampleResourceGroup", new Azure.Core.ResourceGroupArgs
            {
                Location = "West Europe",
            });
            var exampleAccount = new Azure.Storage.Account("exampleAccount", new Azure.Storage.AccountArgs
            {
                ResourceGroupName = exampleResourceGroup.Name,
                Location = exampleResourceGroup.Location,
                AccountTier = "Standard",
                AccountReplicationType = "GRS",
            });
            var exampleServiceAccount = new Azure.Media.ServiceAccount("exampleServiceAccount", new Azure.Media.ServiceAccountArgs
            {
                Location = exampleResourceGroup.Location,
                ResourceGroupName = exampleResourceGroup.Name,
                StorageAccounts = 
                {
                    new Azure.Media.Inputs.ServiceAccountStorageAccountArgs
                    {
                        Id = exampleAccount.Id,
                        IsPrimary = true,
                    },
                },
            });
            var exampleTransform = new Azure.Media.Transform("exampleTransform", new Azure.Media.TransformArgs
            {
                ResourceGroupName = exampleResourceGroup.Name,
                MediaServicesAccountName = exampleServiceAccount.Name,
                Description = "My transform description",
                Outputs = 
                {
                    new Azure.Media.Inputs.TransformOutputArgs
                    {
                        RelativePriority = "Normal",
                        OnErrorAction = "ContinueJob",
                        BuiltinPreset = new Azure.Media.Inputs.TransformOutputBuiltinPresetArgs
                        {
                            PresetName = "AACGoodQualityAudio",
                        },
                    },
                    new Azure.Media.Inputs.TransformOutputArgs
                    {
                        RelativePriority = "Low",
                        OnErrorAction = "ContinueJob",
                        AudioAnalyzerPreset = new Azure.Media.Inputs.TransformOutputAudioAnalyzerPresetArgs
                        {
                            AudioLanguage = "en-US",
                            AudioAnalysisMode = "Basic",
                        },
                    },
                    new Azure.Media.Inputs.TransformOutputArgs
                    {
                        RelativePriority = "Low",
                        OnErrorAction = "StopProcessingJob",
                        FaceDetectorPreset = new Azure.Media.Inputs.TransformOutputFaceDetectorPresetArgs
                        {
                            AnalysisResolution = "StandardDefinition",
                        },
                    },
                },
            });
        }
    
    }
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-azure/sdk/v4/go/azure/core"
    	"github.com/pulumi/pulumi-azure/sdk/v4/go/azure/media"
    	"github.com/pulumi/pulumi-azure/sdk/v4/go/azure/storage"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		exampleResourceGroup, err := core.NewResourceGroup(ctx, "exampleResourceGroup", &core.ResourceGroupArgs{
    			Location: pulumi.String("West Europe"),
    		})
    		if err != nil {
    			return err
    		}
    		exampleAccount, err := storage.NewAccount(ctx, "exampleAccount", &storage.AccountArgs{
    			ResourceGroupName:      exampleResourceGroup.Name,
    			Location:               exampleResourceGroup.Location,
    			AccountTier:            pulumi.String("Standard"),
    			AccountReplicationType: pulumi.String("GRS"),
    		})
    		if err != nil {
    			return err
    		}
    		exampleServiceAccount, err := media.NewServiceAccount(ctx, "exampleServiceAccount", &media.ServiceAccountArgs{
    			Location:          exampleResourceGroup.Location,
    			ResourceGroupName: exampleResourceGroup.Name,
    			StorageAccounts: media.ServiceAccountStorageAccountArray{
    				&media.ServiceAccountStorageAccountArgs{
    					Id:        exampleAccount.ID(),
    					IsPrimary: pulumi.Bool(true),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = media.NewTransform(ctx, "exampleTransform", &media.TransformArgs{
    			ResourceGroupName:        exampleResourceGroup.Name,
    			MediaServicesAccountName: exampleServiceAccount.Name,
    			Description:              pulumi.String("My transform description"),
    			Outputs: media.TransformOutputArray{
    				media.TransformOutputArgs{
    					RelativePriority: pulumi.String("Normal"),
    					OnErrorAction:    pulumi.String("ContinueJob"),
    					BuiltinPreset: &media.TransformOutputBuiltinPresetArgs{
    						PresetName: pulumi.String("AACGoodQualityAudio"),
    					},
    				},
    				media.TransformOutputArgs{
    					RelativePriority: pulumi.String("Low"),
    					OnErrorAction:    pulumi.String("ContinueJob"),
    					AudioAnalyzerPreset: &media.TransformOutputAudioAnalyzerPresetArgs{
    						AudioLanguage:     pulumi.String("en-US"),
    						AudioAnalysisMode: pulumi.String("Basic"),
    					},
    				},
    				media.TransformOutputArgs{
    					RelativePriority: pulumi.String("Low"),
    					OnErrorAction:    pulumi.String("StopProcessingJob"),
    					FaceDetectorPreset: &media.TransformOutputFaceDetectorPresetArgs{
    						AnalysisResolution: pulumi.String("StandardDefinition"),
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    

    Example coming soon!

    import * as pulumi from "@pulumi/pulumi";
    import * as azure from "@pulumi/azure";
    
    const exampleResourceGroup = new azure.core.ResourceGroup("exampleResourceGroup", {location: "West Europe"});
    const exampleAccount = new azure.storage.Account("exampleAccount", {
        resourceGroupName: exampleResourceGroup.name,
        location: exampleResourceGroup.location,
        accountTier: "Standard",
        accountReplicationType: "GRS",
    });
    const exampleServiceAccount = new azure.media.ServiceAccount("exampleServiceAccount", {
        location: exampleResourceGroup.location,
        resourceGroupName: exampleResourceGroup.name,
        storageAccounts: [{
            id: exampleAccount.id,
            isPrimary: true,
        }],
    });
    const exampleTransform = new azure.media.Transform("exampleTransform", {
        resourceGroupName: exampleResourceGroup.name,
        mediaServicesAccountName: exampleServiceAccount.name,
        description: "My transform description",
        outputs: [
            {
                relativePriority: "Normal",
                onErrorAction: "ContinueJob",
                builtinPreset: {
                    presetName: "AACGoodQualityAudio",
                },
            },
            {
                relativePriority: "Low",
                onErrorAction: "ContinueJob",
                audioAnalyzerPreset: {
                    audioLanguage: "en-US",
                    audioAnalysisMode: "Basic",
                },
            },
            {
                relativePriority: "Low",
                onErrorAction: "StopProcessingJob",
                faceDetectorPreset: {
                    analysisResolution: "StandardDefinition",
                },
            },
        ],
    });
    
    import pulumi
    import pulumi_azure as azure
    
    example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
    example_account = azure.storage.Account("exampleAccount",
        resource_group_name=example_resource_group.name,
        location=example_resource_group.location,
        account_tier="Standard",
        account_replication_type="GRS")
    example_service_account = azure.media.ServiceAccount("exampleServiceAccount",
        location=example_resource_group.location,
        resource_group_name=example_resource_group.name,
        storage_accounts=[azure.media.ServiceAccountStorageAccountArgs(
            id=example_account.id,
            is_primary=True,
        )])
    example_transform = azure.media.Transform("exampleTransform",
        resource_group_name=example_resource_group.name,
        media_services_account_name=example_service_account.name,
        description="My transform description",
        outputs=[
            azure.media.TransformOutputArgs(
                relative_priority="Normal",
                on_error_action="ContinueJob",
                builtin_preset=azure.media.TransformOutputBuiltinPresetArgs(
                    preset_name="AACGoodQualityAudio",
                ),
            ),
            azure.media.TransformOutputArgs(
                relative_priority="Low",
                on_error_action="ContinueJob",
                audio_analyzer_preset=azure.media.TransformOutputAudioAnalyzerPresetArgs(
                    audio_language="en-US",
                    audio_analysis_mode="Basic",
                ),
            ),
            azure.media.TransformOutputArgs(
                relative_priority="Low",
                on_error_action="StopProcessingJob",
                face_detector_preset=azure.media.TransformOutputFaceDetectorPresetArgs(
                    analysis_resolution="StandardDefinition",
                ),
            ),
        ])
    

    Example coming soon!

    Create Transform Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new Transform(name: string, args: TransformArgs, opts?: CustomResourceOptions);
    @overload
    def Transform(resource_name: str,
                  args: TransformArgs,
                  opts: Optional[ResourceOptions] = None)
    
    @overload
    def Transform(resource_name: str,
                  opts: Optional[ResourceOptions] = None,
                  media_services_account_name: Optional[str] = None,
                  resource_group_name: Optional[str] = None,
                  description: Optional[str] = None,
                  name: Optional[str] = None,
                  outputs: Optional[Sequence[TransformOutputArgs]] = None)
    func NewTransform(ctx *Context, name string, args TransformArgs, opts ...ResourceOption) (*Transform, error)
    public Transform(string name, TransformArgs args, CustomResourceOptions? opts = null)
    public Transform(String name, TransformArgs args)
    public Transform(String name, TransformArgs args, CustomResourceOptions options)
    
    type: azure:media:Transform
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args TransformArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args TransformArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args TransformArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args TransformArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args TransformArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var transformResource = new Azure.Media.Transform("transformResource", new()
    {
        MediaServicesAccountName = "string",
        ResourceGroupName = "string",
        Description = "string",
        Name = "string",
        Outputs = new[]
        {
            new Azure.Media.Inputs.TransformOutputArgs
            {
                AudioAnalyzerPreset = new Azure.Media.Inputs.TransformOutputAudioAnalyzerPresetArgs
                {
                    AudioAnalysisMode = "string",
                    AudioLanguage = "string",
                },
                BuiltinPreset = new Azure.Media.Inputs.TransformOutputBuiltinPresetArgs
                {
                    PresetName = "string",
                },
                FaceDetectorPreset = new Azure.Media.Inputs.TransformOutputFaceDetectorPresetArgs
                {
                    AnalysisResolution = "string",
                },
                OnErrorAction = "string",
                RelativePriority = "string",
                VideoAnalyzerPreset = new Azure.Media.Inputs.TransformOutputVideoAnalyzerPresetArgs
                {
                    AudioAnalysisMode = "string",
                    AudioLanguage = "string",
                    InsightsType = "string",
                },
            },
        },
    });
    
    example, err := media.NewTransform(ctx, "transformResource", &media.TransformArgs{
    	MediaServicesAccountName: pulumi.String("string"),
    	ResourceGroupName:        pulumi.String("string"),
    	Description:              pulumi.String("string"),
    	Name:                     pulumi.String("string"),
    	Outputs: media.TransformOutputTypeArray{
    		&media.TransformOutputTypeArgs{
    			AudioAnalyzerPreset: &media.TransformOutputAudioAnalyzerPresetArgs{
    				AudioAnalysisMode: pulumi.String("string"),
    				AudioLanguage:     pulumi.String("string"),
    			},
    			BuiltinPreset: &media.TransformOutputBuiltinPresetArgs{
    				PresetName: pulumi.String("string"),
    			},
    			FaceDetectorPreset: &media.TransformOutputFaceDetectorPresetArgs{
    				AnalysisResolution: pulumi.String("string"),
    			},
    			OnErrorAction:    pulumi.String("string"),
    			RelativePriority: pulumi.String("string"),
    			VideoAnalyzerPreset: &media.TransformOutputVideoAnalyzerPresetArgs{
    				AudioAnalysisMode: pulumi.String("string"),
    				AudioLanguage:     pulumi.String("string"),
    				InsightsType:      pulumi.String("string"),
    			},
    		},
    	},
    })
    
    var transformResource = new Transform("transformResource", TransformArgs.builder()
        .mediaServicesAccountName("string")
        .resourceGroupName("string")
        .description("string")
        .name("string")
        .outputs(TransformOutputArgs.builder()
            .audioAnalyzerPreset(TransformOutputAudioAnalyzerPresetArgs.builder()
                .audioAnalysisMode("string")
                .audioLanguage("string")
                .build())
            .builtinPreset(TransformOutputBuiltinPresetArgs.builder()
                .presetName("string")
                .build())
            .faceDetectorPreset(TransformOutputFaceDetectorPresetArgs.builder()
                .analysisResolution("string")
                .build())
            .onErrorAction("string")
            .relativePriority("string")
            .videoAnalyzerPreset(TransformOutputVideoAnalyzerPresetArgs.builder()
                .audioAnalysisMode("string")
                .audioLanguage("string")
                .insightsType("string")
                .build())
            .build())
        .build());
    
    transform_resource = azure.media.Transform("transformResource",
        media_services_account_name="string",
        resource_group_name="string",
        description="string",
        name="string",
        outputs=[{
            "audio_analyzer_preset": {
                "audio_analysis_mode": "string",
                "audio_language": "string",
            },
            "builtin_preset": {
                "preset_name": "string",
            },
            "face_detector_preset": {
                "analysis_resolution": "string",
            },
            "on_error_action": "string",
            "relative_priority": "string",
            "video_analyzer_preset": {
                "audio_analysis_mode": "string",
                "audio_language": "string",
                "insights_type": "string",
            },
        }])
    
    const transformResource = new azure.media.Transform("transformResource", {
        mediaServicesAccountName: "string",
        resourceGroupName: "string",
        description: "string",
        name: "string",
        outputs: [{
            audioAnalyzerPreset: {
                audioAnalysisMode: "string",
                audioLanguage: "string",
            },
            builtinPreset: {
                presetName: "string",
            },
            faceDetectorPreset: {
                analysisResolution: "string",
            },
            onErrorAction: "string",
            relativePriority: "string",
            videoAnalyzerPreset: {
                audioAnalysisMode: "string",
                audioLanguage: "string",
                insightsType: "string",
            },
        }],
    });
    
    type: azure:media:Transform
    properties:
        description: string
        mediaServicesAccountName: string
        name: string
        outputs:
            - audioAnalyzerPreset:
                audioAnalysisMode: string
                audioLanguage: string
              builtinPreset:
                presetName: string
              faceDetectorPreset:
                analysisResolution: string
              onErrorAction: string
              relativePriority: string
              videoAnalyzerPreset:
                audioAnalysisMode: string
                audioLanguage: string
                insightsType: string
        resourceGroupName: string
    

    Transform Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

    The Transform resource accepts the following input properties:

    MediaServicesAccountName string
    The Media Services account name. Changing this forces a new Transform to be created.
    ResourceGroupName string
    The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
    Description string
    An optional verbose description of the Transform.
    Name string
    The name which should be used for this Transform. Changing this forces a new Transform to be created.
    Outputs List<TransformOutput>
    One or more output blocks as defined below. At least one output must be defined.
    MediaServicesAccountName string
    The Media Services account name. Changing this forces a new Transform to be created.
    ResourceGroupName string
    The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
    Description string
    An optional verbose description of the Transform.
    Name string
    The name which should be used for this Transform. Changing this forces a new Transform to be created.
    Outputs []TransformOutputTypeArgs
    One or more output blocks as defined below. At least one output must be defined.
    mediaServicesAccountName String
    The Media Services account name. Changing this forces a new Transform to be created.
    resourceGroupName String
    The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
    description String
    An optional verbose description of the Transform.
    name String
    The name which should be used for this Transform. Changing this forces a new Transform to be created.
    outputs List<TransformOutput>
    One or more output blocks as defined below. At least one output must be defined.
    mediaServicesAccountName string
    The Media Services account name. Changing this forces a new Transform to be created.
    resourceGroupName string
    The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
    description string
    An optional verbose description of the Transform.
    name string
    The name which should be used for this Transform. Changing this forces a new Transform to be created.
    outputs TransformOutput[]
    One or more output blocks as defined below. At least one output must be defined.
    media_services_account_name str
    The Media Services account name. Changing this forces a new Transform to be created.
    resource_group_name str
    The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
    description str
    An optional verbose description of the Transform.
    name str
    The name which should be used for this Transform. Changing this forces a new Transform to be created.
    outputs Sequence[TransformOutputArgs]
    One or more output blocks as defined below. At least one output must be defined.
    mediaServicesAccountName String
    The Media Services account name. Changing this forces a new Transform to be created.
    resourceGroupName String
    The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
    description String
    An optional verbose description of the Transform.
    name String
    The name which should be used for this Transform. Changing this forces a new Transform to be created.
    outputs List<Property Map>
    One or more output blocks as defined below. At least one output must be defined.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the Transform resource produces the following output properties:

    Id string
    The provider-assigned unique ID for this managed resource.
    Id string
    The provider-assigned unique ID for this managed resource.
    id String
    The provider-assigned unique ID for this managed resource.
    id string
    The provider-assigned unique ID for this managed resource.
    id str
    The provider-assigned unique ID for this managed resource.
    id String
    The provider-assigned unique ID for this managed resource.

    Look up Existing Transform Resource

    Get an existing Transform resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: TransformState, opts?: CustomResourceOptions): Transform
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            description: Optional[str] = None,
            media_services_account_name: Optional[str] = None,
            name: Optional[str] = None,
            outputs: Optional[Sequence[TransformOutputArgs]] = None,
            resource_group_name: Optional[str] = None) -> Transform
    func GetTransform(ctx *Context, name string, id IDInput, state *TransformState, opts ...ResourceOption) (*Transform, error)
    public static Transform Get(string name, Input<string> id, TransformState? state, CustomResourceOptions? opts = null)
    public static Transform get(String name, Output<String> id, TransformState state, CustomResourceOptions options)
    resources:  _:    type: azure:media:Transform    get:      id: ${id}
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    Description string
    An optional verbose description of the Transform.
    MediaServicesAccountName string
    The Media Services account name. Changing this forces a new Transform to be created.
    Name string
    The name which should be used for this Transform. Changing this forces a new Transform to be created.
    Outputs List<TransformOutput>
    One or more output blocks as defined below. At least one output must be defined.
    ResourceGroupName string
    The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
    Description string
    An optional verbose description of the Transform.
    MediaServicesAccountName string
    The Media Services account name. Changing this forces a new Transform to be created.
    Name string
    The name which should be used for this Transform. Changing this forces a new Transform to be created.
    Outputs []TransformOutputTypeArgs
    One or more output blocks as defined below. At least one output must be defined.
    ResourceGroupName string
    The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
    description String
    An optional verbose description of the Transform.
    mediaServicesAccountName String
    The Media Services account name. Changing this forces a new Transform to be created.
    name String
    The name which should be used for this Transform. Changing this forces a new Transform to be created.
    outputs List<TransformOutput>
    One or more output blocks as defined below. At least one output must be defined.
    resourceGroupName String
    The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
    description string
    An optional verbose description of the Transform.
    mediaServicesAccountName string
    The Media Services account name. Changing this forces a new Transform to be created.
    name string
    The name which should be used for this Transform. Changing this forces a new Transform to be created.
    outputs TransformOutput[]
    One or more output blocks as defined below. At least one output must be defined.
    resourceGroupName string
    The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
    description str
    An optional verbose description of the Transform.
    media_services_account_name str
    The Media Services account name. Changing this forces a new Transform to be created.
    name str
    The name which should be used for this Transform. Changing this forces a new Transform to be created.
    outputs Sequence[TransformOutputArgs]
    One or more output blocks as defined below. At least one output must be defined.
    resource_group_name str
    The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
    description String
    An optional verbose description of the Transform.
    mediaServicesAccountName String
    The Media Services account name. Changing this forces a new Transform to be created.
    name String
    The name which should be used for this Transform. Changing this forces a new Transform to be created.
    outputs List<Property Map>
    One or more output blocks as defined below. At least one output must be defined.
    resourceGroupName String
    The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.

    Supporting Types

    TransformOutput, TransformOutputArgs

    AudioAnalyzerPreset TransformOutputAudioAnalyzerPreset
    A audio_analyzer_preset block as defined below.
    BuiltinPreset TransformOutputBuiltinPreset
    A builtin_preset block as defined below.
    FaceDetectorPreset TransformOutputFaceDetectorPreset
    A face_detector_preset block as defined below.
    OnErrorAction string
    A Transform can define more than one outputs. This property defines what the service should do when one output fails - either continue to produce other outputs, or, stop the other outputs. The overall Job state will not reflect failures of outputs that are specified with ContinueJob. Possibles value are StopProcessingJob or ContinueJob.
    RelativePriority string
    Sets the relative priority of the TransformOutputs within a Transform. This sets the priority that the service uses for processing Transform Outputs. Possibles value are High, Normal or Low.
    VideoAnalyzerPreset TransformOutputVideoAnalyzerPreset
    A video_analyzer_preset block as defined below.
    AudioAnalyzerPreset TransformOutputAudioAnalyzerPreset
    A audio_analyzer_preset block as defined below.
    BuiltinPreset TransformOutputBuiltinPreset
    A builtin_preset block as defined below.
    FaceDetectorPreset TransformOutputFaceDetectorPreset
    A face_detector_preset block as defined below.
    OnErrorAction string
    A Transform can define more than one outputs. This property defines what the service should do when one output fails - either continue to produce other outputs, or, stop the other outputs. The overall Job state will not reflect failures of outputs that are specified with ContinueJob. Possibles value are StopProcessingJob or ContinueJob.
    RelativePriority string
    Sets the relative priority of the TransformOutputs within a Transform. This sets the priority that the service uses for processing Transform Outputs. Possibles value are High, Normal or Low.
    VideoAnalyzerPreset TransformOutputVideoAnalyzerPreset
    A video_analyzer_preset block as defined below.
    audioAnalyzerPreset TransformOutputAudioAnalyzerPreset
    A audio_analyzer_preset block as defined below.
    builtinPreset TransformOutputBuiltinPreset
    A builtin_preset block as defined below.
    faceDetectorPreset TransformOutputFaceDetectorPreset
    A face_detector_preset block as defined below.
    onErrorAction String
    A Transform can define more than one outputs. This property defines what the service should do when one output fails - either continue to produce other outputs, or, stop the other outputs. The overall Job state will not reflect failures of outputs that are specified with ContinueJob. Possibles value are StopProcessingJob or ContinueJob.
    relativePriority String
    Sets the relative priority of the TransformOutputs within a Transform. This sets the priority that the service uses for processing Transform Outputs. Possibles value are High, Normal or Low.
    videoAnalyzerPreset TransformOutputVideoAnalyzerPreset
    A video_analyzer_preset block as defined below.
    audioAnalyzerPreset TransformOutputAudioAnalyzerPreset
    A audio_analyzer_preset block as defined below.
    builtinPreset TransformOutputBuiltinPreset
    A builtin_preset block as defined below.
    faceDetectorPreset TransformOutputFaceDetectorPreset
    A face_detector_preset block as defined below.
    onErrorAction string
    A Transform can define more than one outputs. This property defines what the service should do when one output fails - either continue to produce other outputs, or, stop the other outputs. The overall Job state will not reflect failures of outputs that are specified with ContinueJob. Possibles value are StopProcessingJob or ContinueJob.
    relativePriority string
    Sets the relative priority of the TransformOutputs within a Transform. This sets the priority that the service uses for processing Transform Outputs. Possibles value are High, Normal or Low.
    videoAnalyzerPreset TransformOutputVideoAnalyzerPreset
    A video_analyzer_preset block as defined below.
    audio_analyzer_preset TransformOutputAudioAnalyzerPreset
    A audio_analyzer_preset block as defined below.
    builtin_preset TransformOutputBuiltinPreset
    A builtin_preset block as defined below.
    face_detector_preset TransformOutputFaceDetectorPreset
    A face_detector_preset block as defined below.
    on_error_action str
    A Transform can define more than one outputs. This property defines what the service should do when one output fails - either continue to produce other outputs, or, stop the other outputs. The overall Job state will not reflect failures of outputs that are specified with ContinueJob. Possibles value are StopProcessingJob or ContinueJob.
    relative_priority str
    Sets the relative priority of the TransformOutputs within a Transform. This sets the priority that the service uses for processing Transform Outputs. Possibles value are High, Normal or Low.
    video_analyzer_preset TransformOutputVideoAnalyzerPreset
    A video_analyzer_preset block as defined below.
    audioAnalyzerPreset Property Map
    A audio_analyzer_preset block as defined below.
    builtinPreset Property Map
    A builtin_preset block as defined below.
    faceDetectorPreset Property Map
    A face_detector_preset block as defined below.
    onErrorAction String
    A Transform can define more than one outputs. This property defines what the service should do when one output fails - either continue to produce other outputs, or, stop the other outputs. The overall Job state will not reflect failures of outputs that are specified with ContinueJob. Possibles value are StopProcessingJob or ContinueJob.
    relativePriority String
    Sets the relative priority of the TransformOutputs within a Transform. This sets the priority that the service uses for processing Transform Outputs. Possibles value are High, Normal or Low.
    videoAnalyzerPreset Property Map
    A video_analyzer_preset block as defined below.

    TransformOutputAudioAnalyzerPreset, TransformOutputAudioAnalyzerPresetArgs

    AudioAnalysisMode string
    Possibles value are Basic or Standard. Determines the set of audio analysis operations to be performed.
    AudioLanguage string
    The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
    AudioAnalysisMode string
    Possibles value are Basic or Standard. Determines the set of audio analysis operations to be performed.
    AudioLanguage string
    The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
    audioAnalysisMode String
    Possibles value are Basic or Standard. Determines the set of audio analysis operations to be performed.
    audioLanguage String
    The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
    audioAnalysisMode string
    Possibles value are Basic or Standard. Determines the set of audio analysis operations to be performed.
    audioLanguage string
    The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
    audio_analysis_mode str
    Possibles value are Basic or Standard. Determines the set of audio analysis operations to be performed.
    audio_language str
    The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
    audioAnalysisMode String
    Possibles value are Basic or Standard. Determines the set of audio analysis operations to be performed.
    audioLanguage String
    The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.

    TransformOutputBuiltinPreset, TransformOutputBuiltinPresetArgs

    PresetName string
    The built-in preset to be used for encoding videos. The allowed values are AACGoodQualityAudio, AdaptiveStreaming,ContentAwareEncoding, ContentAwareEncodingExperimental,CopyAllBitrateNonInterleaved, H264MultipleBitrate1080p,H264MultipleBitrate720p, H264MultipleBitrateSD,H264SingleBitrate1080p, H264SingleBitrate720p and H264SingleBitrateSD.
    PresetName string
    The built-in preset to be used for encoding videos. The allowed values are AACGoodQualityAudio, AdaptiveStreaming,ContentAwareEncoding, ContentAwareEncodingExperimental,CopyAllBitrateNonInterleaved, H264MultipleBitrate1080p,H264MultipleBitrate720p, H264MultipleBitrateSD,H264SingleBitrate1080p, H264SingleBitrate720p and H264SingleBitrateSD.
    presetName String
    The built-in preset to be used for encoding videos. The allowed values are AACGoodQualityAudio, AdaptiveStreaming,ContentAwareEncoding, ContentAwareEncodingExperimental,CopyAllBitrateNonInterleaved, H264MultipleBitrate1080p,H264MultipleBitrate720p, H264MultipleBitrateSD,H264SingleBitrate1080p, H264SingleBitrate720p and H264SingleBitrateSD.
    presetName string
    The built-in preset to be used for encoding videos. The allowed values are AACGoodQualityAudio, AdaptiveStreaming,ContentAwareEncoding, ContentAwareEncodingExperimental,CopyAllBitrateNonInterleaved, H264MultipleBitrate1080p,H264MultipleBitrate720p, H264MultipleBitrateSD,H264SingleBitrate1080p, H264SingleBitrate720p and H264SingleBitrateSD.
    preset_name str
    The built-in preset to be used for encoding videos. The allowed values are AACGoodQualityAudio, AdaptiveStreaming,ContentAwareEncoding, ContentAwareEncodingExperimental,CopyAllBitrateNonInterleaved, H264MultipleBitrate1080p,H264MultipleBitrate720p, H264MultipleBitrateSD,H264SingleBitrate1080p, H264SingleBitrate720p and H264SingleBitrateSD.
    presetName String
    The built-in preset to be used for encoding videos. The allowed values are AACGoodQualityAudio, AdaptiveStreaming,ContentAwareEncoding, ContentAwareEncodingExperimental,CopyAllBitrateNonInterleaved, H264MultipleBitrate1080p,H264MultipleBitrate720p, H264MultipleBitrateSD,H264SingleBitrate1080p, H264SingleBitrate720p and H264SingleBitrateSD.

    TransformOutputFaceDetectorPreset, TransformOutputFaceDetectorPresetArgs

    AnalysisResolution string
    Possibles value are SourceResolution or StandardDefinition. Specifies the maximum resolution at which your video is analyzed. The default behavior is SourceResolution which will keep the input video at its original resolution when analyzed. Using StandardDefinition will resize input videos to standard definition while preserving the appropriate aspect ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing. Switching to StandardDefinition will reduce the time it takes to process high resolution video. It may also reduce the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end up being too small in the resized video may not be detected.
    AnalysisResolution string
    Possibles value are SourceResolution or StandardDefinition. Specifies the maximum resolution at which your video is analyzed. The default behavior is SourceResolution which will keep the input video at its original resolution when analyzed. Using StandardDefinition will resize input videos to standard definition while preserving the appropriate aspect ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing. Switching to StandardDefinition will reduce the time it takes to process high resolution video. It may also reduce the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end up being too small in the resized video may not be detected.
    analysisResolution String
    Possibles value are SourceResolution or StandardDefinition. Specifies the maximum resolution at which your video is analyzed. The default behavior is SourceResolution which will keep the input video at its original resolution when analyzed. Using StandardDefinition will resize input videos to standard definition while preserving the appropriate aspect ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing. Switching to StandardDefinition will reduce the time it takes to process high resolution video. It may also reduce the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end up being too small in the resized video may not be detected.
    analysisResolution string
    Possibles value are SourceResolution or StandardDefinition. Specifies the maximum resolution at which your video is analyzed. The default behavior is SourceResolution which will keep the input video at its original resolution when analyzed. Using StandardDefinition will resize input videos to standard definition while preserving the appropriate aspect ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing. Switching to StandardDefinition will reduce the time it takes to process high resolution video. It may also reduce the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end up being too small in the resized video may not be detected.
    analysis_resolution str
    Possibles value are SourceResolution or StandardDefinition. Specifies the maximum resolution at which your video is analyzed. The default behavior is SourceResolution which will keep the input video at its original resolution when analyzed. Using StandardDefinition will resize input videos to standard definition while preserving the appropriate aspect ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing. Switching to StandardDefinition will reduce the time it takes to process high resolution video. It may also reduce the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end up being too small in the resized video may not be detected.
    analysisResolution String
    Possibles value are SourceResolution or StandardDefinition. Specifies the maximum resolution at which your video is analyzed. The default behavior is SourceResolution which will keep the input video at its original resolution when analyzed. Using StandardDefinition will resize input videos to standard definition while preserving the appropriate aspect ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing. Switching to StandardDefinition will reduce the time it takes to process high resolution video. It may also reduce the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end up being too small in the resized video may not be detected.

    TransformOutputVideoAnalyzerPreset, TransformOutputVideoAnalyzerPresetArgs

    AudioAnalysisMode string
    Possibles value are Basic or Standard. Determines the set of audio analysis operations to be performed.
    AudioLanguage string
    The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
    InsightsType string
    Defines the type of insights that you want the service to generate. The allowed values are AudioInsightsOnly, VideoInsightsOnly, and AllInsights. If you set this to AllInsights and the input is audio only, then only audio insights are generated. Similarly if the input is video only, then only video insights are generated. It is recommended that you not use AudioInsightsOnly if you expect some of your inputs to be video only; or use VideoInsightsOnly if you expect some of your inputs to be audio only. Your Jobs in such conditions would error out.
    AudioAnalysisMode string
    Possibles value are Basic or Standard. Determines the set of audio analysis operations to be performed.
    AudioLanguage string
    The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
    InsightsType string
    Defines the type of insights that you want the service to generate. The allowed values are AudioInsightsOnly, VideoInsightsOnly, and AllInsights. If you set this to AllInsights and the input is audio only, then only audio insights are generated. Similarly if the input is video only, then only video insights are generated. It is recommended that you not use AudioInsightsOnly if you expect some of your inputs to be video only; or use VideoInsightsOnly if you expect some of your inputs to be audio only. Your Jobs in such conditions would error out.
    audioAnalysisMode String
    Possibles value are Basic or Standard. Determines the set of audio analysis operations to be performed.
    audioLanguage String
    The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
    insightsType String
    Defines the type of insights that you want the service to generate. The allowed values are AudioInsightsOnly, VideoInsightsOnly, and AllInsights. If you set this to AllInsights and the input is audio only, then only audio insights are generated. Similarly if the input is video only, then only video insights are generated. It is recommended that you not use AudioInsightsOnly if you expect some of your inputs to be video only; or use VideoInsightsOnly if you expect some of your inputs to be audio only. Your Jobs in such conditions would error out.
    audioAnalysisMode string
    Possibles value are Basic or Standard. Determines the set of audio analysis operations to be performed.
    audioLanguage string
    The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
    insightsType string
    Defines the type of insights that you want the service to generate. The allowed values are AudioInsightsOnly, VideoInsightsOnly, and AllInsights. If you set this to AllInsights and the input is audio only, then only audio insights are generated. Similarly if the input is video only, then only video insights are generated. It is recommended that you not use AudioInsightsOnly if you expect some of your inputs to be video only; or use VideoInsightsOnly if you expect some of your inputs to be audio only. Your Jobs in such conditions would error out.
    audio_analysis_mode str
    Possibles value are Basic or Standard. Determines the set of audio analysis operations to be performed.
    audio_language str
    The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
    insights_type str
    Defines the type of insights that you want the service to generate. The allowed values are AudioInsightsOnly, VideoInsightsOnly, and AllInsights. If you set this to AllInsights and the input is audio only, then only audio insights are generated. Similarly if the input is video only, then only video insights are generated. It is recommended that you not use AudioInsightsOnly if you expect some of your inputs to be video only; or use VideoInsightsOnly if you expect some of your inputs to be audio only. Your Jobs in such conditions would error out.
    audioAnalysisMode String
    Possibles value are Basic or Standard. Determines the set of audio analysis operations to be performed.
    audioLanguage String
    The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.
    insightsType String
    Defines the type of insights that you want the service to generate. The allowed values are AudioInsightsOnly, VideoInsightsOnly, and AllInsights. If you set this to AllInsights and the input is audio only, then only audio insights are generated. Similarly if the input is video only, then only video insights are generated. It is recommended that you not use AudioInsightsOnly if you expect some of your inputs to be video only; or use VideoInsightsOnly if you expect some of your inputs to be audio only. Your Jobs in such conditions would error out.

    Import

    Transforms can be imported using the resource id, e.g.

     $ pulumi import azure:media/transform:Transform example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Media/mediaservices/media1/transforms/transform1
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    Azure Classic pulumi/pulumi-azure
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the azurerm Terraform Provider.
    azure logo

    We recommend using Azure Native.

    Viewing docs for Azure v4.42.0 (Older version)
    published on Monday, Mar 9, 2026 by Pulumi
      Try Pulumi Cloud free. Your team will thank you.