-
Notifications
You must be signed in to change notification settings - Fork 1.9k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Ingest Client - Multichannel support added #2186
base: master
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -15,7 +15,7 @@ private TranscriptionDefinition( | |
string description, | ||
string locale, | ||
IEnumerable<string> contentUrls, | ||
Dictionary<string, string> properties, | ||
Dictionary<string, object> properties, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This looks weird. Why do we need to change this to "object"? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Properties for transcription request not always just string , for example
|
||
ModelIdentity model) | ||
{ | ||
this.DisplayName = name; | ||
|
@@ -36,14 +36,14 @@ private TranscriptionDefinition( | |
|
||
public ModelIdentity Model { get; set; } | ||
|
||
public IDictionary<string, string> Properties { get; } | ||
public IDictionary<string, object> Properties { get; } | ||
|
||
public static TranscriptionDefinition Create( | ||
string name, | ||
string description, | ||
string locale, | ||
string contentUrl, | ||
Dictionary<string, string> properties, | ||
Dictionary<string, object> properties, | ||
ModelIdentity model) | ||
{ | ||
return new TranscriptionDefinition(name, description, locale, new[] { contentUrl }.ToList(), properties, model); | ||
|
@@ -54,7 +54,7 @@ public static TranscriptionDefinition Create( | |
string description, | ||
string locale, | ||
IEnumerable<string> contentUrls, | ||
Dictionary<string, string> properties, | ||
Dictionary<string, object> properties, | ||
ModelIdentity model) | ||
{ | ||
return new TranscriptionDefinition(name, description, locale, contentUrls, properties, model); | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -136,6 +136,13 @@ | |
"description": "A value indicating whether diarization (speaker separation) is requested." | ||
} | ||
}, | ||
"Channels": { | ||
"defaultValue": "1", | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. the default value of transcription is [0,1]. i.e. If no channels is specified, both channel 0 and channel 1 of stereo will be transcribed. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It's not what happens if we do not pass Channels in the Transcription request properties , it results with Invalid Data error .Tested on multichannel audios.
|
||
"type": "int", | ||
"metadata": { | ||
"description": "A value indicating whether multiple channels are processed. 0 - Mono, 1 - Stereo ." | ||
} | ||
}, | ||
"AddWordLevelTimestamps": { | ||
"defaultValue": false, | ||
"type": "bool", | ||
|
@@ -150,35 +157,11 @@ | |
"description": "The key for the Text Analytics subscription." | ||
} | ||
}, | ||
"TextAnalyticsRegion": { | ||
"defaultValue": "None", | ||
"TextAnalyticsEndpoint": { | ||
"defaultValue": "", | ||
"type": "String", | ||
"allowedValues": [ | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. So we support TextAnalytics in any region now? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Please refer to latest release of ARM in this repo yes Endpoint is supported specifically to support Private Endpoints that are not regional. Not sure why latest Tag is not merged in master |
||
"None", | ||
"centralus", | ||
"eastus", | ||
"eastus2", | ||
"northcentralus", | ||
"southcentralus", | ||
"westcentralus", | ||
"westus", | ||
"westus2", | ||
"canadacentral", | ||
"brazilsouth", | ||
"eastasia", | ||
"southeastasia", | ||
"australiaeast", | ||
"centralindia", | ||
"japaneast", | ||
"japanwest", | ||
"koreacentral", | ||
"northeurope", | ||
"westeurope", | ||
"francecentral", | ||
"uksouth" | ||
], | ||
"metadata": { | ||
"description": "The region the Text Analytics subscription is associated with. If none is selected, no text analysis will be performed." | ||
"description": "The endpoint the Text Analytics subscription is associated with (format should be like https://{resourceName}.cognitiveservices.azure.com or https://{region}.api.cognitive.microsoft.com or similar). If empty, no text analysis will be performed." | ||
} | ||
}, | ||
"SentimentAnalysis": { | ||
|
@@ -194,15 +177,15 @@ | |
} | ||
}, | ||
"PiiRedaction": { | ||
"defaultValue": "None", | ||
"type": "String", | ||
"allowedValues": [ | ||
"None", | ||
"UtteranceAndAudioLevel" | ||
], | ||
"metadata": { | ||
"description": "A value indicating whether personally identifiable information (PII) redaction is requested. Will only be performed if a Text Analytics Key and Region is provided." | ||
} | ||
"defaultValue": "None", | ||
"type": "String", | ||
"allowedValues": [ | ||
"None", | ||
"UtteranceAndAudioLevel" | ||
], | ||
"metadata": { | ||
"description": "A value indicating whether personally identifiable information (PII) redaction is requested. Will only be performed if a Text Analytics Key and Region is provided." | ||
} | ||
}, | ||
"SqlAdministratorLogin": { | ||
"type": "string", | ||
|
@@ -227,7 +210,7 @@ | |
} | ||
}, | ||
"variables": { | ||
"Version": "v2.0.5", | ||
"Version": "v2.0.12", | ||
"AudioInputContainer": "audio-input", | ||
"AudioProcessedContainer": "audio-processed", | ||
"ErrorFilesOutputContainer": "audio-failed", | ||
|
@@ -844,6 +827,7 @@ | |
}, | ||
"properties": { | ||
"AddDiarization": "[parameters('AddDiarization')]", | ||
"Channels": "[parameters('Channels')]", | ||
"AddWordLevelTimestamps": "[parameters('AddWordLevelTimestamps')]", | ||
"APPLICATIONINSIGHTS_CONNECTION_STRING": "[reference(resourceId('Microsoft.Insights/components', variables('AppInsightsName')), '2020-02-02-preview').ConnectionString]", | ||
"AudioInputContainer": "[variables('AudioInputContainer')]", | ||
|
@@ -932,7 +916,7 @@ | |
"RetryLimit": "[variables('RetryLimit')]", | ||
"StartTranscriptionServiceBusConnectionString": "[listKeys(variables('AuthRuleCT'),'2015-08-01').primaryConnectionString]", | ||
"TextAnalyticsKey": "[concat('@Microsoft.KeyVault(VaultName=', variables('KeyVaultName'), ';SecretName=', variables('TextAnalyticsKeySecretName'), ')')]", | ||
"TextAnalyticsRegion": "[parameters('TextAnalyticsRegion')]", | ||
"TextAnalyticsEndpoint": "[parameters('TextAnalyticsEndpoint')]", | ||
"UseSqlDatabase": "[variables('UseSqlDatabase')]", | ||
"WEBSITE_RUN_FROM_PACKAGE": "[variables('FetchTranscriptionBinary')]", | ||
"CreateConsolidatedOutputFiles": "[variables('CreateConsolidatedOutputFiles')]", | ||
|
@@ -942,8 +926,8 @@ | |
"PiiCategories": "[variables('PiiCategories')]", | ||
"ConversationPiiCategories": "[variables('ConversationPiiCategories')]", | ||
"ConversationPiiInferenceSource": "[variables('ConversationPiiInferenceSource')]", | ||
"ConversationPiiSetting":"[variables('ConversationPiiRedaction')]", | ||
"ConversationSummarizationOptions":"[variables('ConversationSummarizationOptions')]" | ||
"ConversationPiiSetting": "[variables('ConversationPiiRedaction')]", | ||
"ConversationSummarizationOptions": "[variables('ConversationSummarizationOptions')]" | ||
} | ||
} | ||
], | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -55,5 +55,7 @@ public static class StartTranscriptionEnvironmentVariables | |
public static readonly string PunctuationMode = Environment.GetEnvironmentVariable(nameof(PunctuationMode), EnvironmentVariableTarget.Process); | ||
|
||
public static readonly string StartTranscriptionServiceBusConnectionString = Environment.GetEnvironmentVariable(nameof(StartTranscriptionServiceBusConnectionString), EnvironmentVariableTarget.Process); | ||
|
||
public static readonly int[] Channels = int.TryParse(Environment.GetEnvironmentVariable(nameof(Channels), EnvironmentVariableTarget.Process), out int result) && result == 1 ? Constants.Channels : new int[] { result }; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Reading channels from environment variables looks weird. Do we ask customers to change the environment variable value for each audio to be transcribed? How about if multiple audios are submitted but with different channel settings? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. it's global setting for Azure Function applicable to all audios |
||
} | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Not sure that I understand the purpose of this PR. Multichannel support should be enabled by default in batch transcription. Only if a customer wants to transcribe a specific channel, he needs to specify the channel number in request property. For example, he only wants to transcribe channel 1, but not channel 0, of a stereo audio.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Please test the default settings - it always returns "Invalid Data" for multi Channel audios, it does not do that by default.