问题
Microsoft Project Oxford has a nice Speech Recognition API and instructions for Objective-C on IOS. I build it easily following the getting started instructions. However, I am having hard time to convert it to Swift language.
I created a swift project first. I created the bridge header file (ProjectName-Bridging-Header.h) and inserted following code to this file:
#import "SpeechRecognitionService.h"
I want to convert Objective-C both header and implementation files into ViewController.swift.
contents of ViewController.h:
#import <UIKit/UIKit.h>
#import "SpeechRecognitionService.h"
@interface ViewController : UIViewController<SpeechRecognitionProtocol>
{
NSMutableString* textOnScreen;
DataRecognitionClient* dataClient;
MicrophoneRecognitionClient* micClient;
SpeechRecognitionMode recoMode;
bool isMicrophoneReco;
bool isIntent;
int waitSeconds;
}
@property (nonatomic, strong) IBOutlet UIButton* startButton;
/* In our UI, we have a text box to show the reco results.*/
@property (nonatomic, strong) IBOutlet UITextView* quoteText;
/* Action for pressing the "Start" button */
-(IBAction)startButtonTapped:(id)sender;
@end
contents of ViewController.m:
#import "ViewController.h"
#import <AVFoundation/AVAudioSession.h>
@interface ViewController (/*private*/)
/* Create a recognition request to interact with the Speech Service.*/
-(void)initializeRecoClient;
@end
NSString* ConvertSpeechRecoConfidenceEnumToString(Confidence confidence);
/* The Main App */
@implementation ViewController
/* Initialization to be done when app starts. */
-(void)viewDidLoad
{
[super viewDidLoad];
textOnScreen = [NSMutableString stringWithCapacity: 1000];
recoMode = SpeechRecognitionMode_ShortPhrase;
isMicrophoneReco = true;
isIntent = false;
waitSeconds = recoMode == SpeechRecognitionMode_ShortPhrase ? 20 : 200;
[self initializeRecoClient];
}
/* Called when a partial response is received. */
-(void)onPartialResponseReceived:(NSString*) response
{
dispatch_async(dispatch_get_main_queue(), ^{
[textOnScreen appendFormat:(@"%@\n"), response];
self.quoteText.text = response;
});
}
/* Called when a final response is received. */
-(void)onFinalResponseReceived:(RecognitionResult*)response
{
bool isFinalDicationMessage = recoMode == SpeechRecognitionMode_LongDictation &&
(response.RecognitionStatus == RecognitionStatus_EndOfDictation ||
response.RecognitionStatus == RecognitionStatus_DictationEndSilenceTimeout);
if (isMicrophoneReco && ((recoMode == SpeechRecognitionMode_ShortPhrase) || isFinalDicationMessage)) {
[micClient endMicAndRecognition];
}
if ((recoMode == SpeechRecognitionMode_ShortPhrase) || isFinalDicationMessage) {
dispatch_async(dispatch_get_main_queue(), ^{
[[self startButton] setEnabled:YES];
});
}
}
NSString* ConvertSpeechErrorToString(int errorCode)
{
switch ((SpeechClientStatus)errorCode) {
case SpeechClientStatus_SecurityFailed: return @"SpeechClientStatus_SecurityFailed";
case SpeechClientStatus_LoginFailed: return @"SpeechClientStatus_LoginFailed";
case SpeechClientStatus_Timeout: return @"SpeechClientStatus_Timeout";
case SpeechClientStatus_ConnectionFailed: return @"SpeechClientStatus_ConnectionFailed";
case SpeechClientStatus_NameNotFound: return @"SpeechClientStatus_NameNotFound";
case SpeechClientStatus_InvalidService: return @"SpeechClientStatus_InvalidService";
case SpeechClientStatus_InvalidProxy: return @"SpeechClientStatus_InvalidProxy";
case SpeechClientStatus_BadResponse: return @"SpeechClientStatus_BadResponse";
case SpeechClientStatus_InternalError: return @"SpeechClientStatus_InternalError";
case SpeechClientStatus_AuthenticationError: return @"SpeechClientStatus_AuthenticationError";
case SpeechClientStatus_AuthenticationExpired: return @"SpeechClientStatus_AuthenticationExpired";
case SpeechClientStatus_LimitsExceeded: return @"SpeechClientStatus_LimitsExceeded";
case SpeechClientStatus_AudioOutputFailed: return @"SpeechClientStatus_AudioOutputFailed";
case SpeechClientStatus_MicrophoneInUse: return @"SpeechClientStatus_MicrophoneInUse";
case SpeechClientStatus_MicrophoneUnavailable: return @"SpeechClientStatus_MicrophoneUnavailable";
case SpeechClientStatus_MicrophoneStatusUnknown:return @"SpeechClientStatus_MicrophoneStatusUnknown";
case SpeechClientStatus_InvalidArgument: return @"SpeechClientStatus_InvalidArgument";
}
return [[NSString alloc] initWithFormat:@"Unknown error: %d\n", errorCode];
}
/* Called when an error is received. */
-(void)onError:(NSString*)errorMessage withErrorCode:(int)errorCode
{
dispatch_async(dispatch_get_main_queue(), ^{
[[self startButton] setEnabled:YES];
[textOnScreen appendString:(@"********* Error Detected *********\n")];
[textOnScreen appendFormat:(@"%@ %@\n"), errorMessage, ConvertSpeechErrorToString(errorCode)];
self.quoteText.text = textOnScreen;
});
}
/* Event fired when the microphone recording status has changed. */
-(void)onMicrophoneStatus:(Boolean)recording
{
if (!recording) {
[micClient endMicAndRecognition];
}
dispatch_async(dispatch_get_main_queue(), ^{
if (!recording) {
[[self startButton] setEnabled:YES];
}
self.quoteText.text = textOnScreen;
});
}
/* Create a recognition request to interact with the Speech Recognition Service.*/
-(void)initializeRecoClient
{
NSString* language = @"en-us";
NSString* path = [[NSBundle mainBundle] pathForResource:@"settings" ofType:@"plist"];
NSDictionary* settings = [[NSDictionary alloc] initWithContentsOfFile:path];
NSString* primaryOrSecondaryKey = [settings objectForKey:(@"primaryKey")];
NSString* luisAppID = [settings objectForKey:(@"luisAppID")];
NSString* luisSubscriptionID = [settings objectForKey:(@"luisSubscriptionID")];
if (isMicrophoneReco) {
if (!isIntent) {
micClient = [SpeechRecognitionServiceFactory createMicrophoneClient:(recoMode)
withLanguage:(language)
withKey:(primaryOrSecondaryKey)
withProtocol:(self)];
}
else {
MicrophoneRecognitionClientWithIntent* micIntentClient;
micIntentClient = [SpeechRecognitionServiceFactory createMicrophoneClientWithIntent:(language)
withKey:(primaryOrSecondaryKey)
withLUISAppID:(luisAppID)
withLUISSecret:(luisSubscriptionID)
withProtocol:(self)];
micClient = micIntentClient;
}
}
else {
if (!isIntent) {
dataClient = [SpeechRecognitionServiceFactory createDataClient:(recoMode)
withLanguage:(language)
withKey:(primaryOrSecondaryKey)
withProtocol:(self)];
}
else {
DataRecognitionClientWithIntent* dataIntentClient;
dataIntentClient = [SpeechRecognitionServiceFactory createDataClientWithIntent:(language)
withKey:(primaryOrSecondaryKey)
withLUISAppID:(luisAppID)
withLUISSecret:(luisSubscriptionID)
withProtocol:(self)];
dataClient = dataIntentClient;
}
}
}
/* Take enum value and produce NSString */
NSString* ConvertSpeechRecoConfidenceEnumToString(Confidence confidence)
{
switch (confidence) {
case SpeechRecoConfidence_None:
return @"None";
case SpeechRecoConfidence_Low:
return @"Low";
case SpeechRecoConfidence_Normal:
return @"Normal";
case SpeechRecoConfidence_High:
return @"High";
}
}
/* Action for pressing the "Start" button */
-(IBAction)startButtonTapped:(id)sender
{
[textOnScreen setString:(@"")];
self.quoteText.text = textOnScreen;
[[self startButton] setEnabled:NO];
if (isMicrophoneReco) {
OSStatus status = [micClient startMicAndRecognition];
if (status) {
[textOnScreen appendFormat:(@"Error starting audio. %@\n"), ConvertSpeechErrorToString(status)];
}
}
}
/* Action for low memory */
-(void)didReceiveMemoryWarning
{
[super didReceiveMemoryWarning];
}
@end
I am new in ios programming. I will appreciate any help on this. Thanks.
回答1:
Please convert your objective-c view controller to swift. Dont import it via bridging-header.
2.use the new converted class same as u were using previously in objective-c version
3.just import frame work header files in bridging header.
To convert objective-c code to swift use swiftify
EDIT
Here is the converted Code
Both Files Are Combined
class ViewController: UIViewController, SpeechRecognitionProtocol {
//variable declaration.
var textOnScreen: NSMutableString
var dataClient: DataRecognitionClient
var micClient: MicrophoneRecognitionClient
var recoMode: SpeechRecognitionMode
var isMicrophoneReco: Bool
var isIntent: Bool
var waitSeconds: Int
//IBOutlets
@IBOutlet var startButton: UIButton!
/* In our UI, we have a text box to show the reco results.*/
@IBOutlet var startButton: UITextView!
//IBAction
/* Action for pressing the "Start" button */
@IBAction func startButtonTapped(sender: AnyObject) {
textOnScreen.string = ("")
self.quoteText.text = textOnScreen
self.startButton().enabled = false
if isMicrophoneReco {
var status: OSStatus = micClient.startMicAndRecognition()
if status != nil {
textOnScreen.appendFormat(("Error starting audio. %@\n"), ConvertSpeechErrorToString(status))
}
}
}
/* Initialization to be done when app starts. */
override func viewDidLoad() {
super.viewDidLoad()
textOnScreen = NSMutableString(capacity: 1000)
recoMode = SpeechRecognitionMode_ShortPhrase
isMicrophoneReco = true
isIntent = false
waitSeconds = recoMode == SpeechRecognitionMode_ShortPhrase ? 20 : 200
self.initializeRecoClient()
}
/* Action for low memory */
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}
/* Called when a partial response is received. */
func onPartialResponseReceived(response: String) {
dispatch_async(dispatch_get_main_queue(), {() -> Void in
textOnScreen.appendFormat(("%@\n"), response)
self.quoteText.text = response
})
}
/* Called when a final response is received. */
func onFinalResponseReceived(response: RecognitionResult) {
var isFinalDicationMessage: Bool = recoMode == SpeechRecognitionMode_LongDictation && (response.RecognitionStatus == RecognitionStatus_EndOfDictation || response.RecognitionStatus == RecognitionStatus_DictationEndSilenceTimeout)
if isMicrophoneReco && ((recoMode == SpeechRecognitionMode_ShortPhrase) || isFinalDicationMessage) {
micClient.endMicAndRecognition()
}
if (recoMode == SpeechRecognitionMode_ShortPhrase) || isFinalDicationMessage {
dispatch_async(dispatch_get_main_queue(), {() -> Void in
self.startButton().enabled = true
})
}
}
func ConvertSpeechErrorToString( errorCode :Int) -> String
{
switch errorCode as! SpeechClientStatus {
case SpeechClientStatus_SecurityFailed:
return "SpeechClientStatus_SecurityFailed"
case SpeechClientStatus_LoginFailed:
return "SpeechClientStatus_LoginFailed"
case SpeechClientStatus_Timeout:
return "SpeechClientStatus_Timeout"
case SpeechClientStatus_ConnectionFailed:
return "SpeechClientStatus_ConnectionFailed"
case SpeechClientStatus_NameNotFound:
return "SpeechClientStatus_NameNotFound"
case SpeechClientStatus_InvalidService:
return "SpeechClientStatus_InvalidService"
case SpeechClientStatus_InvalidProxy:
return "SpeechClientStatus_InvalidProxy"
case SpeechClientStatus_BadResponse:
return "SpeechClientStatus_BadResponse"
case SpeechClientStatus_InternalError:
return "SpeechClientStatus_InternalError"
case SpeechClientStatus_AuthenticationError:
return "SpeechClientStatus_AuthenticationError"
case SpeechClientStatus_AuthenticationExpired:
return "SpeechClientStatus_AuthenticationExpired"
case SpeechClientStatus_LimitsExceeded:
return "SpeechClientStatus_LimitsExceeded"
case SpeechClientStatus_AudioOutputFailed:
return "SpeechClientStatus_AudioOutputFailed"
case SpeechClientStatus_MicrophoneInUse:
return "SpeechClientStatus_MicrophoneInUse"
case SpeechClientStatus_MicrophoneUnavailable:
return "SpeechClientStatus_MicrophoneUnavailable"
case SpeechClientStatus_MicrophoneStatusUnknown:
return "SpeechClientStatus_MicrophoneStatusUnknown"
case SpeechClientStatus_InvalidArgument:
return "SpeechClientStatus_InvalidArgument"
}
return String(format: "Unknown error: %d\n", errorCode)
}
/* Called when an error is received. */
func onError(errorMessage: String, withErrorCode errorCode: Int) {
dispatch_async(dispatch_get_main_queue(), {() -> Void in
self.startButton().enabled = true
textOnScreen.appendString(("********* Error Detected *********\n"))
textOnScreen.appendFormat(("%@ %@\n"), errorMessage, ConvertSpeechErrorToString(errorCode))
self.quoteText.text = textOnScreen
})
}
/* Event fired when the microphone recording status has changed. */
func onMicrophoneStatus(recording: Boolean) {
if !recording {
micClient.endMicAndRecognition()
}
dispatch_async(dispatch_get_main_queue(), {() -> Void in
if !recording {
self.startButton().enabled = true
}
self.quoteText.text = textOnScreen
})
}
func ConvertSpeechRecoConfidenceEnumToString( confidence:Confidence) -> String
{
switch confidence {
case SpeechRecoConfidence_None:
return "None"
case SpeechRecoConfidence_Low:
return "Low"
case SpeechRecoConfidence_Normal:
return "Normal"
case SpeechRecoConfidence_High:
return "High"
}
}
/* Create a recognition request to interact with the Speech Recognition Service.*/
override func initializeRecoClient() {
var language: String = "en-us"
var path: String = NSBundle.mainBundle().pathForResource("settings", ofType: "plist")
var settings: [NSObject : AnyObject] = [NSObject : AnyObject](contentsOfFile: path)
var primaryOrSecondaryKey: String = (settings[("primaryKey")] as! String)
var luisAppID: String = (settings[("luisAppID")] as! String)
var luisSubscriptionID: String = (settings[("luisSubscriptionID")] as! String)
if isMicrophoneReco {
if !isIntent {
micClient = SpeechRecognitionServiceFactory.createMicrophoneClient(withLanguage as! recoMode, : withKey as! language, : withProtocol as! primaryOrSecondaryKey, : (self))
}
else {
var micIntentClient: MicrophoneRecognitionClientWithIntent
micIntentClient = SpeechRecognitionServiceFactory.createMicrophoneClientWithIntent(withKey as! language, : withLUISAppID as! primaryOrSecondaryKey, : withLUISSecret as! luisAppID, : withProtocol as! luisSubscriptionID, : (self))
micClient = micIntentClient
}
}
else if !isIntent {
dataClient = SpeechRecognitionServiceFactory.createDataClient(withLanguage as! recoMode, : withKey as! language, : withProtocol as! primaryOrSecondaryKey, : (self))
}
else {
var dataIntentClient: DataRecognitionClientWithIntent
dataIntentClient = SpeechRecognitionServiceFactory.createDataClientWithIntent(withKey as! language, : withLUISAppID as! primaryOrSecondaryKey, : withLUISSecret as! luisAppID, : withProtocol as! luisSubscriptionID, : (self))
dataClient = dataIntentClient
}
}
}
来源:https://stackoverflow.com/questions/35471902/convert-microsoft-project-oxford-speech-recognition-from-objective-c-to-swift