Ios speech recognition Error Domain = kAFAssistantErrorDomain Code = 216 "(null)"
I am mainly learning ios speech recognition module by following this tutorial: https://medium.com/ios-os-x-development/speech-recognition-with-swift-in-ios-10-50d5f4e59c48
But when I tested it on my iphone6 I always got this error: Error Domain = kAFAssistantErrorDomain Code = 216 "(null)"
I searched the internet for this but found very rare information on this.
Here is my code:
//
// ViewController.swift
// speech_sample
//
// Created by Peizheng Ma on 6/22/17.
// Copyright © 2017 Peizheng Ma. All rights reserved.
//
import UIKit
import AVFoundation
import Speech
class ViewController: UIViewController, SFSpeechRecognizerDelegate {
//MARK: speech recognize variables
let audioEngine = AVAudioEngine()
let speechRecognizer: SFSpeechRecognizer? = SFSpeechRecognizer(locale: Locale.init(identifier: "en-US"))
var request = SFSpeechAudioBufferRecognitionRequest()
var recognitionTask: SFSpeechRecognitionTask?
var isRecording = false
override func viewDidLoad() {
// super.viewDidLoad()
// get Authorization
self.requestSpeechAuthorization()
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
//MARK: properties
@IBOutlet weak var detectText: UILabel!
@IBOutlet weak var startButton: UIButton!
//MARK: actions
@IBAction func startButtonTapped(_ sender: UIButton) {
if isRecording == true {
audioEngine.stop()
// if let node = audioEngine.inputNode {
// node.removeTap(onBus: 0)
// }
audioEngine.inputNode?.removeTap(onBus: 0)
// Indicate that the audio source is finished and no more audio will be appended
self.request.endAudio()
// Cancel the previous task if it running
if let recognitionTask = recognitionTask {
recognitionTask.cancel()
self.recognitionTask = nil
}
//recognitionTask?.cancel()
//self.recognitionTask = nil
isRecording = false
startButton.backgroundColor = UIColor.gray
} else {
self.recordAndRecognizeSpeech()
isRecording = true
startButton.backgroundColor = UIColor.red
}
}
//MARK: show alert
func showAlert(title: String, message: String, handler: ((UIAlertAction) -> Swift.Void)? = nil) {
DispatchQueue.main.async { [unowned self] in
let alertController = UIAlertController(title: title, message: message, preferredStyle: .alert)
alertController.addAction(UIAlertAction(title: "OK", style: .cancel, handler: handler))
self.present(alertController, animated: true, completion: nil)
}
}
//MARK: Recognize Speech
func recordAndRecognizeSpeech() {
// Setup Audio Session
guard let node = audioEngine.inputNode else { return }
let recordingFormat = node.outputFormat(forBus: 0)
node.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { buffer, _ in
self.request.append(buffer)
}
audioEngine.prepare()
do {
try audioEngine.start()
} catch {
self.showAlert(title: "SpeechNote", message: "There has been an audio engine error.", handler: nil)
return print(error)
}
guard let myRecognizer = SFSpeechRecognizer() else {
self.showAlert(title: "SpeechNote", message: "Speech recognition is not supported for your current locale.", handler: nil)
return
}
if !myRecognizer.isAvailable {
self.showAlert(title: "SpeechNote", message: "Speech recognition is not currently available. Check back at a later time.", handler: nil)
// Recognizer is not available right now
return
}
recognitionTask = speechRecognizer?.recognitionTask(with: request, resultHandler: { result, error in
if let result = result {
let bestString = result.bestTranscription.formattedString
self.detectText.text = bestString
// var lastString: String = ""
// for segment in result.bestTranscription.segments {
// let indexTo = bestString.index(bestString.startIndex, offsetBy: segment.substringRange.location)
// lastString = bestString.substring(from: indexTo)
// }
// self.checkForColorsSaid(resultString: lastString)
} else if let error = error {
self.showAlert(title: "SpeechNote", message: "There has been a speech recognition error.", handler: nil)
print(error)
}
})
}
//MARK: - Check Authorization Status
func requestSpeechAuthorization() {
SFSpeechRecognizer.requestAuthorization { authStatus in
OperationQueue.main.addOperation {
switch authStatus {
case .authorized:
self.startButton.isEnabled = true
case .denied:
self.startButton.isEnabled = false
self.detectText.text = "User denied access to speech recognition"
case .restricted:
self.startButton.isEnabled = false
self.detectText.text = "Speech recognition restricted on this device"
case .notDetermined:
self.startButton.isEnabled = false
self.detectText.text = "Speech recognition not yet authorized"
}
}
}
}
}
Many thanks.
source to share
I had the same problem following the same (excellent) tutorial, even using the example code on GitHub. To solve this problem, I had to do two things:
First, add request.endAudio()
at the beginning of the code to stop recording in the startButtonTapped action. This marks the end of the recording. I see you have already done this in your example code.
Secondly, in the recordAndRecognizeSpeech function, when the "registrationTask" function is run, if no speech was found, then the "result" will be zero and an error will occur. So, I tested for result != nil
before trying to assign the result.
So, the code for these two functions looks like this: 1. Updated startButtonTapped:
@IBAction func startButtonTapped(_ sender: UIButton) {
if isRecording {
request.endAudio() // Added line to mark end of recording
audioEngine.stop()
if let node = audioEngine.inputNode {
node.removeTap(onBus: 0)
}
recognitionTask?.cancel()
isRecording = false
startButton.backgroundColor = UIColor.gray
} else {
self.recordAndRecognizeSpeech()
isRecording = true
startButton.backgroundColor = UIColor.red
}
}
And 2. Update the line recordAndRecognizeSpeech
in the line recognitionTask = ...
:
recognitionTask = speechRecognizer?.recognitionTask(with: request, resultHandler: { (result, error) in
if result != nil { // check to see if result is empty (i.e. no speech found)
if let result = result {
let bestString = result.bestTranscription.formattedString
self.detectedTextLabel.text = bestString
var lastString: String = ""
for segment in result.bestTranscription.segments {
let indexTo = bestString.index(bestString.startIndex, offsetBy: segment.substringRange.location)
lastString = bestString.substring(from: indexTo)
}
self.checkForColoursSaid(resultString: lastString)
} else if let error = error {
self.sendAlert(message: "There has been a speech recognition error")
print(error)
}
}
})
I hope this helps you.
source to share
Hey, I was getting the same error, but now her work is absurdly fine. Hope this code helps you too :).
import UIKit
import Speech
class SpeechVC: UIViewController {
@IBOutlet weak var slabel: UILabel!
@IBOutlet weak var sbutton: UIButton!
let audioEngine = AVAudioEngine()
let SpeechRecognizer : SFSpeechRecognizer? = SFSpeechRecognizer()
let request = SFSpeechAudioBufferRecognitionRequest()
var recognitionTask:SFSpeechRecognitionTask?
var isRecording = false
override func viewDidLoad() {
super.viewDidLoad()
self.requestSpeechAuthorization()
// Do any additional setup after loading the view, typically from a nib.
}
func recordAndRecognizeSpeech()
{
guard let node = audioEngine.inputNode else { return }
let recordingFormat = node.outputFormat(forBus: 0)
node.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { buffer , _ in
self.request.append(buffer)
}
audioEngine.prepare()
do
{
try audioEngine.start()
}catch
{
return print(error)
}
guard let myRecognizer = SFSpeechRecognizer() else {
return
}
if !myRecognizer.isAvailable
{
return
}
recognitionTask = SpeechRecognizer?.recognitionTask(with: request, resultHandler: { result, error in
if let result = result
{
let bestString = result.bestTranscription.formattedString
self.slabel.text = bestString
var lastString : String = ""
for segment in result.bestTranscription.segments
{
let indexTo = bestString.index(bestString.startIndex, offsetBy: segment.substringRange.location)
lastString = bestString.substring(from: indexTo)
}
}else if let error = error
{
print(error)
}
})
}
@IBAction func startAction(_ sender: Any) {
if isRecording == true
{
audioEngine.stop()
recognitionTask?.cancel()
isRecording = false
sbutton.backgroundColor = UIColor.gray
}
else{
self.recordAndRecognizeSpeech()
isRecording = true
sbutton.backgroundColor = UIColor.red
}
}
func cancelRecording()
{
audioEngine.stop()
if let node = audioEngine.inputNode
{
audioEngine.inputNode?.removeTap(onBus: 0)
}
recognitionTask?.cancel()
}
func requestSpeechAuthorization()
{
SFSpeechRecognizer.requestAuthorization { authStatus in
OperationQueue.main.addOperation {
switch authStatus
{
case .authorized :
self.sbutton.isEnabled = true
case .denied :
self.sbutton.isEnabled = false
self.slabel.text = "User denied access to speech recognition"
case .restricted :
self.sbutton.isEnabled = false
self.slabel.text = "Speech Recognition is restricted on this Device"
case .notDetermined :
self.sbutton.isEnabled = false
self.slabel.text = "Speech Recognition not yet authorized"
}
}
}
}
}
source to share
This will prevent two errors: the aforementioned Code = 216 and the "SFSpeechAudioBufferRecognitionRequest cannot be reused" error .
-
Stop recognition with completion, not canceled
-
Stop audio
like this:
// stop recognition
recognitionTask?.finish()
recognitionTask = nil
// stop audio
request.endAudio()
audioEngine.stop()
audioEngine.inputNode.removeTap(onBus: 0) // Remove tap on bus when stopping recording.
PS audioEngine.inputNode doesn't seem to be an optional value anymore, so I used no if I could construct.
source to share