-
Notifications
You must be signed in to change notification settings - Fork 85
/
MicDemo.cs
342 lines (319 loc) · 14.3 KB
/
MicDemo.cs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
/*
Copyright 2020-2023 Picovoice Inc.
You may not use this file except in compliance with the license. A copy of the license is located in the "LICENSE"
file accompanying this source.
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
*/
using System;
using System.Collections.Generic;
using System.IO;
using System.Text;
using System.Threading;
using Pv;
namespace RhinoDemo
{
/// <summary>
/// Microphone Demo for Rhino Speech-to-Intent engine. It creates an input audio stream from a microphone, monitors it,
/// and extracts the intent from the speech command.It optionally saves the recorded audio into a file for further debugging.
/// </summary>
public class MicDemo
{
/// <summary>
/// Creates an input audio stream, instantiates an instance of Rhino object, and infers the intent from spoken commands.
/// </summary>
/// <param name="accessKey">AccessKey obtained from Picovoice Console (https://console.picovoice.ai/).</param>
/// <param name="contextPath">
/// Absolute path to file containing context model (file with `.rhn` extension. A context represents the set of
/// expressions(spoken commands), intents, and intent arguments(slots) within a domain of interest.
/// </param>
/// <param name="modelPath">
/// Absolute path to the file containing model parameters. If not set it will be set to the
/// default location.
/// </param>
/// <param name="sensitivity">
/// Inference sensitivity expressed as floating point value within [0,1]. A higher sensitivity value results in fewer misses
/// at the cost of (potentially) increasing the erroneous inference rate.
/// </param>
/// <param name="endpointDurationSec">
/// Endpoint duration in seconds. An endpoint is a chunk of silence at the end of an
/// utterance that marks the end of spoken command. It should be a positive number within [0.5, 5]. A lower endpoint
/// duration reduces delay and improves responsiveness. A higher endpoint duration assures Rhino doesn't return inference
/// preemptively in case the user pauses before finishing the request.
/// </param>
/// <param name="requireEndpoint">
/// If set to `true`, Rhino requires an endpoint (a chunk of silence) after the spoken command.
/// If set to `false`, Rhino tries to detect silence, but if it cannot, it still will provide inference regardless. Set
/// to `false` only if operating in an environment with overlapping speech (e.g. people talking in the background).
/// </param>
/// <param name="audioDeviceIndex">Optional argument. If provided, audio is recorded from this input device. Otherwise, the default audio input device is used.</param>
/// <param name="outputPath">Optional argument. If provided, recorded audio will be stored in this location at the end of the run.</param>
public static void RunDemo(
string accessKey,
string contextPath,
string modelPath,
float sensitivity,
float endpointDurationSec,
bool requireEndpoint,
int audioDeviceIndex,
string outputPath = null)
{
// init rhino speech-to-intent engine
using (Rhino rhino = Rhino.Create(
accessKey,
contextPath,
modelPath,
sensitivity,
endpointDurationSec,
requireEndpoint))
{
// create recorder
using (PvRecorder recorder = PvRecorder.Create(rhino.FrameLength, audioDeviceIndex))
{
Console.WriteLine($"Using device: {recorder.SelectedDevice}");
Console.CancelKeyPress += delegate (object sender, ConsoleCancelEventArgs e)
{
e.Cancel = true;
recorder.Stop();
Console.WriteLine("Stopping...");
};
// open stream to output file
BinaryWriter outputFileWriter = null;
int totalSamplesWritten = 0;
if (!string.IsNullOrWhiteSpace(outputPath))
{
outputFileWriter = new BinaryWriter(new FileStream(outputPath, FileMode.OpenOrCreate, FileAccess.Write));
WriteWavHeader(outputFileWriter, 1, 16, recorder.SampleRate, 0);
}
// create and start recording
recorder.Start();
Console.WriteLine(rhino.ContextInfo);
Console.WriteLine("Listening...\n");
while (recorder.IsRecording)
{
short[] frame = recorder.Read();
bool isFinalized = rhino.Process(frame);
if (isFinalized)
{
Inference inference = rhino.GetInference();
if (inference.IsUnderstood)
{
Console.WriteLine("{");
Console.WriteLine($" intent : '{inference.Intent}'");
Console.WriteLine(" slots : {");
foreach (KeyValuePair<string, string> slot in inference.Slots)
{
Console.WriteLine($" {slot.Key} : '{slot.Value}'");
}
Console.WriteLine(" }");
Console.WriteLine("}");
}
else
{
Console.WriteLine("Didn't understand the command.");
}
}
if (outputFileWriter != null)
{
foreach (short sample in frame)
{
outputFileWriter.Write(sample);
}
totalSamplesWritten += frame.Length;
}
Thread.Yield();
}
if (outputFileWriter != null)
{
// write size to header and clean up
WriteWavHeader(outputFileWriter, 1, 16, recorder.SampleRate, totalSamplesWritten);
outputFileWriter.Flush();
outputFileWriter.Dispose();
Console.Write($"Wrote audio to '{outputPath}'");
}
}
}
}
/// <summary>
/// Writes the RIFF header for a file in WAV format
/// </summary>
/// <param name="writer">Output stream to WAV file</param>
/// <param name="channelCount">Number of channels</param>
/// <param name="bitDepth">Number of bits per sample</param>
/// <param name="sampleRate">Sampling rate in Hz</param>
/// <param name="totalSampleCount">Total number of samples written to the file</param>
private static void WriteWavHeader(BinaryWriter writer, ushort channelCount, ushort bitDepth, int sampleRate, int totalSampleCount)
{
if (writer == null)
return;
writer.Seek(0, SeekOrigin.Begin);
writer.Write(Encoding.ASCII.GetBytes("RIFF"));
writer.Write((bitDepth / 8 * totalSampleCount) + 36);
writer.Write(Encoding.ASCII.GetBytes("WAVE"));
writer.Write(Encoding.ASCII.GetBytes("fmt "));
writer.Write(16);
writer.Write((ushort)1);
writer.Write(channelCount);
writer.Write(sampleRate);
writer.Write(sampleRate * channelCount * bitDepth / 8);
writer.Write((ushort)(channelCount * bitDepth / 8));
writer.Write(bitDepth);
writer.Write(Encoding.ASCII.GetBytes("data"));
writer.Write(bitDepth / 8 * totalSampleCount);
}
/// <summary>
/// Lists available audio input devices.
/// </summary>
public static void ShowAudioDevices()
{
string[] devices = PvRecorder.GetAvailableDevices();
for (int i = 0; i < devices.Length; i++)
{
Console.WriteLine($"index: {i}, device name: {devices[i]}");
}
}
public static void Main(string[] args)
{
AppDomain.CurrentDomain.UnhandledException += OnUnhandledException;
if (args.Length == 0)
{
Console.WriteLine(HELP_STR);
Console.Read();
return;
}
string accessKey = null;
string contextPath = null;
string modelPath = null;
int audioDeviceIndex = -1;
float sensitivity = 0.5f;
float endpointDurationSec = 1.0f;
bool requireEndpoint = true;
string outputPath = null;
bool showAudioDevices = false;
bool showHelp = false;
// parse command line arguments
int argIndex = 0;
while (argIndex < args.Length)
{
if (args[argIndex] == "--access_key")
{
if (++argIndex < args.Length)
{
accessKey = args[argIndex++];
}
}
else if (args[argIndex] == "--context_path")
{
if (++argIndex < args.Length)
{
contextPath = args[argIndex++];
}
}
else if (args[argIndex] == "--model_path")
{
if (++argIndex < args.Length)
{
modelPath = args[argIndex++];
}
}
else if (args[argIndex] == "--sensitivity")
{
argIndex++;
if (argIndex < args.Length && float.TryParse(args[argIndex], out sensitivity))
{
argIndex++;
}
}
else if (args[argIndex] == "--endpoint_duration")
{
argIndex++;
if (argIndex < args.Length && float.TryParse(args[argIndex], out endpointDurationSec))
{
argIndex++;
}
}
else if (args[argIndex] == "--require_endpoint")
{
if (++argIndex < args.Length)
{
if (args[argIndex++].ToLower() == "false")
{
requireEndpoint = false;
}
}
}
else if (args[argIndex] == "--show_audio_devices")
{
showAudioDevices = true;
argIndex++;
}
else if (args[argIndex] == "--audio_device_index")
{
if (++argIndex < args.Length && int.TryParse(args[argIndex], out int deviceIndex))
{
audioDeviceIndex = deviceIndex;
argIndex++;
}
}
else if (args[argIndex] == "--output_path")
{
if (++argIndex < args.Length)
{
outputPath = args[argIndex++];
}
}
else if (args[argIndex] == "-h" || args[argIndex] == "--help")
{
showHelp = true;
argIndex++;
}
else
{
argIndex++;
}
}
// print help text and exit
if (showHelp)
{
Console.WriteLine(HELP_STR);
Console.Read();
return;
}
// print audio device info and exit
if (showAudioDevices)
{
ShowAudioDevices();
Console.Read();
return;
}
// run demo with validated arguments
RunDemo(
accessKey,
contextPath,
modelPath,
sensitivity,
endpointDurationSec,
requireEndpoint,
audioDeviceIndex,
outputPath);
}
private static void OnUnhandledException(object sender, UnhandledExceptionEventArgs e)
{
Console.WriteLine(e.ExceptionObject.ToString());
Console.Read();
Environment.Exit(1);
}
private static readonly string HELP_STR = "Available options: \n " +
"\t--access_key (required): AccessKey obtained from Picovoice Console (https://console.picovoice.ai/)\n" +
"\t--context_path (required): Absolute path to context file.\n" +
"\t--model_path: Absolute path to the file containing model parameters.\n" +
"\t--sensitivity: Inference sensitivity. It should be a number within [0, 1]. A higher sensitivity value results in " +
"fewer misses at the cost of (potentially) increasing the erroneous inference rate.\n" +
"\t--endpoint_duration: Endpoint duration in seconds. It should be a positive number within [0.5, 5].\n" +
"\t--require_endpoint: ['true'|'false'] If set to 'false', Rhino does not require an endpoint (chunk of silence) before finishing inference.\n" +
"\t--audio_device_index: Index of input audio device.\n" +
"\t--output_path: Absolute path to recorded audio for debugging.\n" +
"\t--show_audio_devices: Print available recording devices.\n";
}
}