Group Group Group Group Group Group Group Group Group

Speech Framework Crash

Speech framework crash required condition is false: nullptr == Tap() when the view appears the second time;
as per the internet, audioEngine.inputNode.removeTap(onBus: 0) should resolve the issue but doesn’t . . .

import Speech

class SpeechRecognizer: ObservableObject {
 let audioEngine = AVAudioEngine()
 let speechRecognizer: SFSpeechRecognizer? = SFSpeechRecognizer()
 let request = SFSpeechAudioBufferRecognitionRequest()
 var recognitionTask: SFSpeechRecognitionTask?

 func instantiateSpeechRecognition() {
   SFSpeechRecognizer.requestAuthorization { authStatus in
     switch authStatus {
     case .authorized:
       do {
         try self.startRecording()
       } catch let error {
         print("There was a problem starting recording: \(error.localizedDescription)")
       }
     case .denied:
       print("Speech recognition authorization denied")
     case .restricted:
       print("Not available on this device")
     case .notDetermined:
       print("Not determined")
     default:
       print("xxxx")
     }
   }
 }
 
 func startRecording() throws {
   let node = audioEngine.inputNode
   let recordingFormat = node.outputFormat(forBus: 0)
   node.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { buffer, _ in
     self.request.append(buffer)
   }
   audioEngine.prepare()
   try audioEngine.start()
   self.recognize()
 }
 
 func recognize() {
   recognitionTask = speechRecognizer?.recognitionTask(with: request, resultHandler: { result, error in
     if let result = result {
       
       let bestString = result.bestTranscription.formattedString
       var lastString: String = ""
       for segment in result.bestTranscription.segments {
         let indexTo = bestString.index(bestString.startIndex, offsetBy: segment.substringRange.location)
         lastString = String(bestString[indexTo...].lowercased())
       }
       self.triggerButton(resultString: lastString)
     } else if let error = error {
       print(error)
     }
   })
 }
 
 func stopRecording() {
   audioEngine.stop()
   request.endAudio()
   audioEngine.inputNode.removeTap(onBus: 0)
   recognitionTask?.cancel()
   recognitionTask = nil
 }
 
 // TODO in training view
 func triggerButton(resultString: String) {
   print(resultString)
 }
}

struct TrainingView: View {
  @State private var recognizeSpeech = true
  @EnvironmentObject var speechRecognizer: SpeechRecognizer
  
  var body: some View {
    VStack(spacing: 20) {
      Toggle(isOn: $recognizeSpeech) {
        if recognizeSpeech {
          Text("Speech recognition for data collection")
            .fixedSize(horizontal: false, vertical: true)
            .font(.subheadline)
        } else {
          Text("XXX")
        }
      }
      .padding()
      .onReceive([self.recognizeSpeech].publisher.first()) { value in
        if value {
          speechRecognizer.instantiateSpeechRecognition()
        } else {
          speechRecognizer.stopRecording()
        }
      }
    }
  }
}