Skip to content

Commit

Permalink
0.26.0
Browse files Browse the repository at this point in the history
  • Loading branch information
dankinsoid committed Mar 15, 2024
1 parent 0d19a27 commit 46293fe
Show file tree
Hide file tree
Showing 85 changed files with 6,569 additions and 6,427 deletions.
2 changes: 1 addition & 1 deletion Examples/Search/Search/SearchView.swift
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ struct SearchView: View {
Image(systemName: "magnifyingglass")
TextField(
"New York, San Francisco, ...",
text: Binding {
text: Binding {
state.searchQuery
} set: { text in
$state.searchQueryChanged(query: text)
Expand Down
144 changes: 72 additions & 72 deletions Examples/SpeechRecognition/SpeechRecognition/SpeechClient/Client.swift
Original file line number Diff line number Diff line change
@@ -1,90 +1,90 @@
import VDStore
import Speech
import VDStore

struct SpeechClient {

var finishTask: @Sendable () async -> Void = { }
var requestAuthorization: @Sendable () async -> SFSpeechRecognizerAuthorizationStatus = { .notDetermined }
var startTask:
@Sendable (_ request: SFSpeechAudioBufferRecognitionRequest) async -> AsyncThrowingStream<
SpeechRecognitionResult, Error
> = { _ in
AsyncThrowingStream { nil }
}
var finishTask: @Sendable () async -> Void = {}
var requestAuthorization: @Sendable () async -> SFSpeechRecognizerAuthorizationStatus = { .notDetermined }
var startTask:
@Sendable (_ request: SFSpeechAudioBufferRecognitionRequest) async -> AsyncThrowingStream<
SpeechRecognitionResult, Error
> = { _ in
AsyncThrowingStream { nil }
}

enum Failure: Error, Equatable {
case taskError
case couldntStartAudioEngine
case couldntConfigureAudioSession
}
enum Failure: Error, Equatable {
case taskError
case couldntStartAudioEngine
case couldntConfigureAudioSession
}
}

extension SpeechClient {

static let previewValue: Self = {
let isRecording = ActorIsolated(false)
static let previewValue: Self = {
let isRecording = ActorIsolated(false)

return Self(
finishTask: { await isRecording.set(false) },
requestAuthorization: { .authorized },
startTask: { _ in
AsyncThrowingStream { continuation in
Task {
await isRecording.set(true)
var finalText = """
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor \
incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud \
exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute \
irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla \
pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui \
officia deserunt mollit anim id est laborum.
"""
var text = ""
while await isRecording.value {
let word = finalText.prefix { $0 != " " }
try await Task.sleep(for: .milliseconds(word.count * 50 + .random(in: 0...200)))
finalText.removeFirst(word.count)
if finalText.first == " " {
finalText.removeFirst()
}
text += word + " "
continuation.yield(
SpeechRecognitionResult(
bestTranscription: Transcription(
formattedString: text,
segments: []
),
isFinal: false,
transcriptions: []
)
)
}
}
}
}
)
}()
return Self(
finishTask: { await isRecording.set(false) },
requestAuthorization: { .authorized },
startTask: { _ in
AsyncThrowingStream { continuation in
Task {
await isRecording.set(true)
var finalText = """
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor \
incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud \
exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute \
irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla \
pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui \
officia deserunt mollit anim id est laborum.
"""
var text = ""
while await isRecording.value {
let word = finalText.prefix { $0 != " " }
try await Task.sleep(for: .milliseconds(word.count * 50 + .random(in: 0 ... 200)))
finalText.removeFirst(word.count)
if finalText.first == " " {
finalText.removeFirst()
}
text += word + " "
continuation.yield(
SpeechRecognitionResult(
bestTranscription: Transcription(
formattedString: text,
segments: []
),
isFinal: false,
transcriptions: []
)
)
}
}
}
}
)
}()
}

final actor ActorIsolated<T> {
var value: T
init(_ value: T) {
self.value = value
}
func `set`(_ value: T) {
self.value = value
}

var value: T

init(_ value: T) {
self.value = value
}

func set(_ value: T) {
self.value = value
}
}

extension StoreDIValues {

@StoreDIValue
var speechClient: SpeechClient = valueFor(
live: .liveValue,
test: SpeechClient(),
preview: .previewValue
)
@StoreDIValue
var speechClient: SpeechClient = valueFor(
live: .liveValue,
test: SpeechClient(),
preview: .previewValue
)
}
162 changes: 81 additions & 81 deletions Examples/SpeechRecognition/SpeechRecognition/SpeechClient/Live.swift
Original file line number Diff line number Diff line change
@@ -1,97 +1,97 @@
import VDStore
import Speech
import VDStore

extension SpeechClient {

static let liveValue: Self = {
let speech = Speech()
return Self(
finishTask: {
await speech.finishTask()
},
requestAuthorization: {
await withCheckedContinuation { continuation in
SFSpeechRecognizer.requestAuthorization { status in
continuation.resume(returning: status)
}
}
},
startTask: { request in
await speech.startTask(request: request)
}
)
}()
static let liveValue: Self = {
let speech = Speech()
return Self(
finishTask: {
await speech.finishTask()
},
requestAuthorization: {
await withCheckedContinuation { continuation in
SFSpeechRecognizer.requestAuthorization { status in
continuation.resume(returning: status)
}
}
},
startTask: { request in
await speech.startTask(request: request)
}
)
}()
}

private actor Speech {
var audioEngine: AVAudioEngine? = nil
var recognitionTask: SFSpeechRecognitionTask? = nil
var recognitionContinuation: AsyncThrowingStream<SpeechRecognitionResult, Error>.Continuation?
var audioEngine: AVAudioEngine?
var recognitionTask: SFSpeechRecognitionTask?
var recognitionContinuation: AsyncThrowingStream<SpeechRecognitionResult, Error>.Continuation?

func finishTask() {
self.audioEngine?.stop()
self.audioEngine?.inputNode.removeTap(onBus: 0)
self.recognitionTask?.finish()
self.recognitionContinuation?.finish()
}
func finishTask() {
audioEngine?.stop()
audioEngine?.inputNode.removeTap(onBus: 0)
recognitionTask?.finish()
recognitionContinuation?.finish()
}

func startTask(
request: SFSpeechAudioBufferRecognitionRequest
) -> AsyncThrowingStream<SpeechRecognitionResult, Error> {
func startTask(
request: SFSpeechAudioBufferRecognitionRequest
) -> AsyncThrowingStream<SpeechRecognitionResult, Error> {

return AsyncThrowingStream { continuation in
self.recognitionContinuation = continuation
let audioSession = AVAudioSession.sharedInstance()
do {
try audioSession.setCategory(.record, mode: .measurement, options: .duckOthers)
try audioSession.setActive(true, options: .notifyOthersOnDeactivation)
} catch {
continuation.finish(throwing: SpeechClient.Failure.couldntConfigureAudioSession)
return
}
AsyncThrowingStream { continuation in
self.recognitionContinuation = continuation
let audioSession = AVAudioSession.sharedInstance()
do {
try audioSession.setCategory(.record, mode: .measurement, options: .duckOthers)
try audioSession.setActive(true, options: .notifyOthersOnDeactivation)
} catch {
continuation.finish(throwing: SpeechClient.Failure.couldntConfigureAudioSession)
return
}

self.audioEngine = AVAudioEngine()
let speechRecognizer = SFSpeechRecognizer(locale: Locale(identifier: "en-US"))!
self.recognitionTask = speechRecognizer.recognitionTask(with: request) { result, error in
switch (result, error) {
case let (.some(result), _):
continuation.yield(SpeechRecognitionResult(result))
case (_, .some):
continuation.finish(throwing: SpeechClient.Failure.taskError)
case (.none, .none):
fatalError("It should not be possible to have both a nil result and nil error.")
}
}
self.audioEngine = AVAudioEngine()
let speechRecognizer = SFSpeechRecognizer(locale: Locale(identifier: "en-US"))!
self.recognitionTask = speechRecognizer.recognitionTask(with: request) { result, error in
switch (result, error) {
case let (.some(result), _):
continuation.yield(SpeechRecognitionResult(result))
case (_, .some):
continuation.finish(throwing: SpeechClient.Failure.taskError)
case (.none, .none):
fatalError("It should not be possible to have both a nil result and nil error.")
}
}

continuation.onTermination = {
[
speechRecognizer = speechRecognizer,
audioEngine = audioEngine,
recognitionTask = recognitionTask
]
_ in
continuation.onTermination = {
[
speechRecognizer = speechRecognizer,
audioEngine = audioEngine,
recognitionTask = recognitionTask
]
_ in

_ = speechRecognizer
audioEngine?.stop()
audioEngine?.inputNode.removeTap(onBus: 0)
recognitionTask?.finish()
}
_ = speechRecognizer
audioEngine?.stop()
audioEngine?.inputNode.removeTap(onBus: 0)
recognitionTask?.finish()
}

self.audioEngine?.inputNode.installTap(
onBus: 0,
bufferSize: 1024,
format: self.audioEngine?.inputNode.outputFormat(forBus: 0)
) { buffer, when in
request.append(buffer)
}
self.audioEngine?.inputNode.installTap(
onBus: 0,
bufferSize: 1024,
format: self.audioEngine?.inputNode.outputFormat(forBus: 0)
) { buffer, _ in
request.append(buffer)
}

self.audioEngine?.prepare()
do {
try self.audioEngine?.start()
} catch {
continuation.finish(throwing: SpeechClient.Failure.couldntStartAudioEngine)
return
}
}
}
self.audioEngine?.prepare()
do {
try self.audioEngine?.start()
} catch {
continuation.finish(throwing: SpeechClient.Failure.couldntStartAudioEngine)
return
}
}
}
}
Loading

0 comments on commit 46293fe

Please sign in to comment.