Merge branch 'speech_to_text' of https://gitlab.com/Cloud_Solution/doctor_app_flutter into sultan

pull/159/head
Sultan Khan 4 years ago
commit dca7cf9b05

@ -155,6 +155,13 @@ packages:
url: "https://pub.dartlang.org"
source: hosted
version: "1.0.2"
clock:
dependency: transitive
description:
name: clock
url: "https://pub.dartlang.org"
source: hosted
version: "1.0.1"
code_builder:
dependency: transitive
description:
@ -635,6 +642,13 @@ packages:
url: "https://pub.dartlang.org"
source: hosted
version: "1.7.0"
speech_to_text:
dependency: "direct main"
description:
path: speech_to_text
relative: true
source: path
version: "0.0.0"
stack_trace:
dependency: transitive
description:

@ -52,6 +52,10 @@ dependencies:
#flutter_svg: ^0.17.4
percent_indicator: "^2.1.1"
#speech to text
speech_to_text:
path: speech_to_text
dev_dependencies:
flutter_test:
sdk: flutter

@ -0,0 +1,19 @@
name: build
on:
push:
branches:
- master
jobs:
test:
name: Test on Ubuntu
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- uses: subosito/flutter-action@v1.3.2
with:
flutter-version: '1.17.1'
channel: 'stable'
- run: flutter pub get
- run: flutter test

@ -0,0 +1,11 @@
.DS_Store
.dart_tool/
.packages
.pub/
build/
coverage/
example/.flutter-plugins-dependencies
**/ios/Flutter/flutter_export_environment.sh
android/.idea/

@ -0,0 +1,10 @@
# This file tracks properties of this Flutter project.
# Used by Flutter tool to assess capabilities and perform upgrades etc.
#
# This file should be version controlled and should not be manually edited.
version:
revision: 2d2a1ffec95cc70a3218872a2cd3f8de4933c42f
channel: stable
project_type: plugin

@ -0,0 +1,166 @@
# Changelog
## 2.3.0
### New
* new parameter `onDevice` on the `listen` method enforces on device recognition for sensitive content
* onSoundLevelChange now supported on iOS
* added compile troubleshooting help to README.md
* `SpeechToTextProvider` is an alternate and simpler way to interact with the `SpeechToText` plugin.
* new `provider_example.dart` example for usage of `SpeechToTextProvider`.
### Fix
* on iOS handles some conflicts with other applications better to keep speech working after calls for example
## 2.2.0
### New
* improved error handling and logging in the iOS implementation
* added general guides for iOS to the README
* moved stress testing out of the main example
* iOS now defaults to using the speaker rather than the receiver for start /stop sounds when no headphones
### Fix
* iOS now properly deactivates the audio session when no longer listening
* start and stop sounds on iOS should be more reliable when available
## 2.1.0
### Breaking
* `listenFor` now calls `stop` rather than `cancel` as this seems like more useful behaviour
### Fix
* Android no longer stops or cancels the speech recognizer if it has already been shutdown by a
timeout or other platform behaviour.
* Android no longer tries to restart the listener when it is already active
* Now properly notifies errors that happen after listening stops due to platform callback rather than
client request. See https://github.com/csdcorp/speech_to_text/issues/51
## 2.0.1
### Fix
* Resolves an issue with the Android implementation not handling permission requests properly on apps
that didn't use the 1.12.x plugin APIs for registration. The permission dialog would not appear and
permission was denied.
## 2.0.0
### Breaking
* Upgraded to New Swift 1.12 plugin structure, may work with older Flutter version but not guaranteed
### New
* the plugin now requests both speech and microphone permission on initialize on iOS
* added `debugLogging` parameter to the `initialize` method to control native logging
### Fix
* The Android implementation now blocks duplicate results notifications. It appears that at least on some
Android versions the final results notification onResults is notified twice when Android automatically
terminates the session due to a pause time. The de-duplication looks for successive notifications
with < 100 ms between them and blocks the second. If you miss any onResult notifications please post
an issue.
## 1.1.0
### New
* error_timeout has been separated into error_network_timeout and error_speech_timeout
## 1.0.0
### New
* hasPermission to check for the current permission without bringing up the system dialog
* `listen` has a new optional `cancelOnError` parameter to support automatically canceling
a listening session on a permanent error.
* `listen` has a new optional `partialResults` parameter that controls whether the callback
receives partial or only final results.
## 0.8.0
### New
* speech recognizer now exposes multiple possible transcriptions for each recognized speech
* alternates list on SpeechRecognitionResult exposes alternate transcriptions of voice
* confidence on SpeechRecognitionResult gives an estimate of confidence in the transcription
* isConfident on SpeechRecognitionResult supports testing confidence
* hasConfidenceRating on SpeechRecognitionResult indicates if confidence was provided from the device
* new SpeechRecognitionWords class gives details on per transcription words and confidence
### Fix
* speechRecognizer availabilityDidChange was crashing if invoked due to an invalid parameter type
* Added iOS platform 10 to example Podfile to resolve compilation warnings
## 0.7.2
### Breaking
* Upgrade Swift to version 5 to match Flutter. Projects using this plugin must now switch to 5.
## 0.7.1
### Fix
* Upgrade Kotlin to 1.3.5 to match the Flutter 1.12 version
* Upgrade Gradle build to 3.5.0 to match the Flutter 1.12 version
* Android version of the plugin was repeating the system default locale in the `locales` list
## 0.7.0
### New
* locales method returns the list of available languages for speech
* new optional localeId parameter on listen method supports choosing the comprehension language separately from the current system locale.
### Breaking
* `cancel` and `stop` are now async
## 0.6.3
### Fix
* request permission fix on Android to ensure it doesn't conflict with other requests
## 0.6.2
### Fix
* channel invoke wasn't being done on the main thread in iOS
## 0.6.1
### Fix
* listening sound was failing due to timing, now uses play and record mode on iOS.
## 0.6.0
### Breaking
* The filenames for the optional sounds for iOS have changed.
### New
* Added an optional listenFor parameter to set a max duration to listen for speech and then automatically cancel.
### Fix
* Was failing to play sounds because of record mode. Now plays sounds before going into record mode and after coming out.
* Status listener was being ignored, now properly notifies on status changes.
## 0.5.1
* Fixes a problem where the recognizer left the AVAudioSession in record mode which meant that subsequent sounds couldn't be played.
## 0.5.0
Initial draft with limited functionality, supports:
* initializing speech recognition
* asking the user for permission if required
* listening for recognized speech
* canceling the current recognition session
* stopping the current recognition session
* Android and iOS 10+ support
Missing:
* some error handling
* testing across multiple OS versions
* and more, to be discovered...

@ -0,0 +1,29 @@
BSD 3-Clause License
Copyright (c) 2019, Corner Software Development Corp.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

@ -0,0 +1,150 @@
# speech_to_text
[![pub package](https://img.shields.io/badge/pub-v2.3.0-blue)](https://pub.dartlang.org/packages/speech_to_text) [![build status](https://github.com/csdcorp/speech_to_text/workflows/build/badge.svg)](https://github.com/csdcorp/speech_to_text/actions?query=workflow%3Abuild)
A library that exposes device specific speech recognition capability.
This plugin contains a set of classes that make it easy to use the speech recognition
capabilities of the mobile device in Flutter. It supports both Android and iOS. The
target use cases for this library are commands and short phrases, not continuous spoken
conversion or always on listening.
## Recent Updates
The 2.3.0 version adds `SpeechToTextProvider` as a simpler way to interact with the plugin. Checkout
the new `provider_example.dart` for intended usage.
The 2.2.0 version improves audio session handling and start / stop sound playback on iOS.
*Note*: Feedback from any test devices is welcome.
## Using
To recognize text from the microphone import the package and call the plugin, like so:
```dart
import 'package:speech_to_text/speech_to_text.dart' as stt;
stt.SpeechToText speech = stt.SpeechToText();
bool available = await speech.initialize( onStatus: statusListener, onError: errorListener );
if ( available ) {
speech.listen( onResult: resultListener );
}
else {
print("The user has denied the use of speech recognition.");
}
// some time later...
speech.stop()
```
### Initialize once
The `initialize` method only needs to be called once per application session. After that `listen`,
`start`, `stop`, and `cancel` can be used to interact with the plugin. Subsequent calls to `initialize`
are ignored which is safe but does mean that the `onStatus` and `onError` callbacks cannot be reset after
the first call to `initialize`. For that reason there should be only one instance of the plugin per
application. The `SpeechToTextProvider` is one way to create a single instance and easily reuse it in
multiple widgets.
## Permissions
Applications using this plugin require user permissions.
### iOS
Add the following keys to your _Info.plist_ file, located in `<project root>/ios/Runner/Info.plist`:
* `NSSpeechRecognitionUsageDescription` - describe why your app uses speech recognition. This is called _Privacy - Speech Recognition Usage Description_ in the visual editor.
* `NSMicrophoneUsageDescription` - describe why your app needs access to the microphone. This is called _Privacy - Microphone Usage Description_ in the visual editor.
### Android
Add the record audio permission to your _AndroidManifest.xml_ file, located in `<project root>/android/app/src/main/AndroidManifest.xml`.
* `android.permission.RECORD_AUDIO` - this permission is required for microphone access.
* `android.permission.INTERNET` - this permission is required because speech recognition may use remote services.
## Adding Sounds for iOS (optional)
Android automatically plays system sounds when speech listening starts or stops but iOS does not. This plugin supports playing sounds to indicate listening status on iOS if sound files are available as assets in the application. To enable sounds in an application using this plugin add the sound files to the project and reference them in the assets section of the application `pubspec.yaml`. The location and filenames of the sound files must exactly match what
is shown below or they will not be found. The example application for the plugin shows the usage. *Note* These files should be very short as they delay
the start / end of the speech recognizer until the sound playback is complete.
```yaml
assets:
- assets/sounds/speech_to_text_listening.m4r
- assets/sounds/speech_to_text_cancel.m4r
- assets/sounds/speech_to_text_stop.m4r
```
* `speech_to_text_listening.m4r` - played when the listen method is called.
* `speech_to_text_cancel.m4r` - played when the cancel method is called.
* `speech_to_text_stop.m4r` - played when the stop method is called.
## Troubleshooting
### SDK version error trying to compile for Android
```
Manifest merger failed : uses-sdk:minSdkVersion 16 cannot be smaller than version 21 declared in library [:speech_to_text]
```
The speech_to_text plugin requires at least Android SDK 21 because some of the speech functions in Android
were only introduced in that version. To fix this error you need to change the `build.gradle` entry to reflect
this version. Here's what the relevant part of that file looked like as of this writing:
```
defaultConfig {
applicationId "com.example.app"
minSdkVersion 21
targetSdkVersion 28
versionCode flutterVersionCode.toInteger()
versionName flutterVersionName
testInstrumentationRunner "androidx.test.runner.AndroidJUnitRunner"
}
```
### Incorrect Swift version trying to compile for iOS
```
/Users/markvandergon/flutter/.pub-cache/hosted/pub.dartlang.org/speech_to_text-1.1.0/ios/Classes/SwiftSpeechToTextPlugin.swift:224:44: error: value of type 'SwiftSpeechToTextPlugin' has no member 'AVAudioSession'
rememberedAudioCategory = self.AVAudioSession.Category
~~~~ ^~~~~~~~~~~~~~
/Users/markvandergon/flutter/.pub-cache/hosted/pub.dartlang.org/speech_to_text-1.1.0/ios/Classes/SwiftSpeechToTextPlugin.swift:227:63: error: type 'Int' has no member 'notifyOthersOnDeactivation'
try self.audioSession.setActive(true, withFlags: .notifyOthersOnDeactivation)
```
This happens when the Swift language version is not set correctly. See this thread for help https://github.com/csdcorp/speech_to_text/issues/45.
### Swift not supported trying to compile for iOS
```
`speech_to_text` does not specify a Swift version and none of the targets (`Runner`) integrating it have the `SWIFT_VERSION` attribute set.
```
This usually happens for older projects that only support Objective-C. See this thread for help https://github.com/csdcorp/speech_to_text/issues/88.
### Not working on a particular Android device
The symptom for this issue is that the `initialize` method will always fail. If you turn on debug logging
using the `debugLogging: true` flag on the `initialize` method you'll see `'Speech recognition unavailable'`
in the Android log. There's a lengthy issue discussion here https://github.com/csdcorp/speech_to_text/issues/36
about this. The issue seems to be that the recognizer is not always automatically enabled on the device. Two
key things helped resolve the issue in this case at least.
#### First
1. Go to Google Play
2. Search for 'Google'
3. You should find this app: https://play.google.com/store/apps/details?id=com.google.android.googlequicksearchbox
If 'Disabled' enable it
This is the SO post that helped: https://stackoverflow.com/questions/28769320/how-to-check-wether-speech-recognition-is-available-or-not
#### Second
Ensure the app has the required permissions. The symptom for this that you get a permanent error notification
'error_audio_error` when starting a listen session. Here's a Stack Overflow post that addresses that
https://stackoverflow.com/questions/46376193/android-speechrecognizer-audio-recording-error
Here's the important excerpt:
>You should go to system setting, Apps, Google app, then enable its permission of microphone.
### iOS recognition guidelines
Apple has quite a good guide on the user experience for using speech, the original is here
https://developer.apple.com/documentation/speech/sfspeechrecognizer This is the section that I think is particularly relevant:
>#### Create a Great User Experience for Speech Recognition
>Here are some tips to consider when adding speech recognition support to your app.
>**Be prepared to handle failures caused by speech recognition limits.** Because speech recognition is a network-based service, limits are enforced so that the service can remain freely available to all apps. Individual devices may be limited in the number of recognitions that can be performed per day, and each app may be throttled globally based on the number of requests it makes per day. If a recognition request fails quickly (within a second or two of starting), check to see if the recognition service became unavailable. If it is, you may want to ask users to try again later.
>**Plan for a one-minute limit on audio duration.** Speech recognition places a relatively high burden on battery life and network usage. To minimize this burden, the framework stops speech recognition tasks that last longer than one minute. This limit is similar to the one for keyboard-related dictation.
Remind the user when your app is recording. For example, display a visual indicator and play sounds at the beginning and end of speech recognition to help users understand that they're being actively recorded. You can also display speech as it is being recognized so that users understand what your app is doing and see any mistakes made during the recognition process.
>**Do not perform speech recognition on private or sensitive information.** Some speech is not appropriate for recognition. Don't send passwords, health or financial data, and other sensitive speech for recognition.

@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<classpath>
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.8/"/>
<classpathentry kind="con" path="org.eclipse.buildship.core.gradleclasspathcontainer"/>
<classpathentry kind="output" path="bin/default"/>
</classpath>

@ -0,0 +1,8 @@
*.iml
.gradle
/local.properties
/.idea/workspace.xml
/.idea/libraries
.DS_Store
/build
/captures

@ -0,0 +1,23 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>speech_to_text</name>
<comment>Project android_____ created by Buildship.</comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.eclipse.jdt.core.javabuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>org.eclipse.buildship.core.gradleprojectbuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.eclipse.jdt.core.javanature</nature>
<nature>org.eclipse.buildship.core.gradleprojectnature</nature>
</natures>
</projectDescription>

@ -0,0 +1,13 @@
arguments=
auto.sync=false
build.scans.enabled=false
connection.gradle.distribution=GRADLE_DISTRIBUTION(VERSION(5.6.1))
connection.project.dir=
eclipse.preferences.version=1
gradle.user.home=
java.home=
jvm.arguments=
offline.mode=false
override.workspace.settings=true
show.console.view=true
show.executions.view=true

@ -0,0 +1,44 @@
group 'com.csdcorp.speech_to_text'
version '1.0-SNAPSHOT'
buildscript {
ext.kotlin_version = '1.3.50'
repositories {
google()
jcenter()
}
dependencies {
classpath 'com.android.tools.build:gradle:3.5.0'
classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version"
}
}
rootProject.allprojects {
repositories {
google()
jcenter()
}
}
apply plugin: 'com.android.library'
apply plugin: 'kotlin-android'
android {
compileSdkVersion 28
sourceSets {
main.java.srcDirs += 'src/main/kotlin'
}
defaultConfig {
minSdkVersion 21
testInstrumentationRunner "androidx.test.runner.AndroidJUnitRunner"
}
lintOptions {
disable 'InvalidPackage'
}
}
dependencies {
implementation "org.jetbrains.kotlin:kotlin-stdlib-jdk7:$kotlin_version"
}

@ -0,0 +1,3 @@
org.gradle.jvmargs=-Xmx1536M
android.useAndroidX=true
android.enableJetifier=true

@ -0,0 +1,2 @@
sdk.dir=/Users/stephen.owens/Library/Android/sdk
flutter.sdk=/Users/stephen.owens/Documents/dev/flutter/sdk/flutter

@ -0,0 +1 @@
rootProject.name = 'speech_to_text'

@ -0,0 +1,5 @@
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-5.6.4-all.zip

@ -0,0 +1,3 @@
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="com.csdcorp.speech_to_text">
</manifest>

@ -0,0 +1,595 @@
package com.csdcorp.speech_to_text
import androidx.annotation.NonNull;
import io.flutter.embedding.engine.plugins.FlutterPlugin
import android.Manifest
import android.annotation.TargetApi
import android.app.Activity
import android.content.Intent
import android.content.pm.PackageManager
import android.os.Build
import android.os.Bundle
import android.speech.RecognitionListener
import android.speech.SpeechRecognizer.createSpeechRecognizer
import android.speech.RecognizerIntent
import android.speech.SpeechRecognizer
import androidx.core.app.ActivityCompat
import androidx.core.content.ContextCompat
import io.flutter.plugin.common.MethodCall
import io.flutter.plugin.common.MethodChannel
import io.flutter.plugin.common.MethodChannel.MethodCallHandler
import io.flutter.plugin.common.MethodChannel.Result
import io.flutter.plugin.common.PluginRegistry
import io.flutter.plugin.common.PluginRegistry.Registrar
import org.json.JSONObject
import android.content.Context
import android.content.BroadcastReceiver
import android.os.Handler
import android.os.Looper
import android.util.Log
import io.flutter.embedding.engine.plugins.activity.ActivityAware
import io.flutter.embedding.engine.plugins.activity.ActivityPluginBinding
import io.flutter.plugin.common.BinaryMessenger
import org.json.JSONArray
import java.util.*
enum class SpeechToTextErrors {
multipleRequests,
unimplemented,
noLanguageIntent,
recognizerNotAvailable,
missingOrInvalidArg,
unknown
}
enum class SpeechToTextCallbackMethods {
textRecognition,
notifyStatus,
notifyError,
soundLevelChange,
}
enum class SpeechToTextStatus {
listening,
notListening,
unavailable,
available,
}
enum class ListenMode {
deviceDefault,
dictation,
search,
confirmation,
}
const val pluginChannelName = "plugin.csdcorp.com/speech_to_text"
@TargetApi(8)
/** SpeechToTextPlugin */
public class SpeechToTextPlugin :
MethodCallHandler, RecognitionListener,
PluginRegistry.RequestPermissionsResultListener, FlutterPlugin,
ActivityAware {
private var pluginContext: Context? = null
private var channel: MethodChannel? = null
private val minSdkForSpeechSupport = 21
private val speechToTextPermissionCode = 28521
private val missingConfidence: Double = -1.0
private val logTag = "SpeechToTextPlugin"
private var currentActivity: Activity? = null
private var activeResult: Result? = null
private var initializedSuccessfully: Boolean = false
private var permissionToRecordAudio: Boolean = false
private var listening = false
private var debugLogging: Boolean = false
private var speechRecognizer: SpeechRecognizer? = null
private var recognizerIntent: Intent? = null
private var previousRecognizerLang: String? = null
private var previousPartialResults: Boolean = true
private var previousListenMode: ListenMode = ListenMode.deviceDefault
private var lastFinalTime: Long = 0
private val handler: Handler = Handler(Looper.getMainLooper())
private val defaultLanguageTag: String = Locale.getDefault().toLanguageTag()
override fun onAttachedToEngine(@NonNull flutterPluginBinding: FlutterPlugin.FlutterPluginBinding) {
onAttachedToEngine(flutterPluginBinding.getApplicationContext(), flutterPluginBinding.getBinaryMessenger());
}
// This static function is optional and equivalent to onAttachedToEngine. It supports the old
// pre-Flutter-1.12 Android projects. You are encouraged to continue supporting
// plugin registration via this function while apps migrate to use the new Android APIs
// post-flutter-1.12 via https://flutter.dev/go/android-project-migration.
//
// It is encouraged to share logic between onAttachedToEngine and registerWith to keep
// them functionally equivalent. Only one of onAttachedToEngine or registerWith will be called
// depending on the user's project. onAttachedToEngine or registerWith must both be defined
// in the same class.
companion object {
@JvmStatic
fun registerWith(registrar: Registrar) {
val speechPlugin = SpeechToTextPlugin()
speechPlugin.currentActivity = registrar.activity()
registrar.addRequestPermissionsResultListener(speechPlugin)
speechPlugin.onAttachedToEngine(registrar.context(), registrar.messenger())
}
}
private fun onAttachedToEngine(applicationContext: Context, messenger: BinaryMessenger) {
this.pluginContext = applicationContext;
channel = MethodChannel(messenger, pluginChannelName)
channel?.setMethodCallHandler(this)
}
override fun onDetachedFromEngine(@NonNull binding: FlutterPlugin.FlutterPluginBinding) {
this.pluginContext = null;
channel?.setMethodCallHandler(null)
channel = null
}
override fun onDetachedFromActivity() {
currentActivity = null
}
override fun onReattachedToActivityForConfigChanges(binding: ActivityPluginBinding) {
currentActivity = binding.activity
binding.addRequestPermissionsResultListener(this)
}
override fun onAttachedToActivity(binding: ActivityPluginBinding) {
currentActivity = binding.activity
binding.addRequestPermissionsResultListener(this)
}
override fun onDetachedFromActivityForConfigChanges() {
currentActivity = null
}
override fun onMethodCall(@NonNull call: MethodCall, @NonNull rawrResult: Result) {
val result = ChannelResultWrapper(rawrResult)
try {
when (call.method) {
"has_permission" -> hasPermission(result)
"initialize" -> {
var dlog = call.argument<Boolean>("debugLogging")
if (null != dlog) {
debugLogging = dlog
}
initialize(result)
}
"listen" -> {
var localeId = call.argument<String>("localeId")
if (null == localeId) {
localeId = defaultLanguageTag
}
var partialResults = call.argument<Boolean>("partialResults")
if (null == partialResults) {
partialResults = true
}
val listenModeIndex = call.argument<Int>("listenMode")
if ( null == listenModeIndex ) {
result.error(SpeechToTextErrors.missingOrInvalidArg.name,
"listenMode is required", null)
return
}
startListening(result, localeId, partialResults, listenModeIndex )
}
"stop" -> stopListening(result)
"cancel" -> cancelListening(result)
"locales" -> locales(result)
else -> result.notImplemented()
}
} catch (exc: Exception) {
Log.e(logTag, "Unexpected exception", exc)
result.error(SpeechToTextErrors.unknown.name,
"Unexpected exception", exc.localizedMessage)
}
}
private fun hasPermission(result: Result) {
if (sdkVersionTooLow(result)) {
return
}
debugLog("Start has_permission")
val localContext = pluginContext
if (localContext != null) {
val hasPerm = ContextCompat.checkSelfPermission(localContext,
Manifest.permission.RECORD_AUDIO) == PackageManager.PERMISSION_GRANTED
result.success(hasPerm)
}
}
private fun initialize(result: Result) {
if (sdkVersionTooLow(result)) {
return
}
debugLog("Start initialize")
if (null != activeResult) {
result.error(SpeechToTextErrors.multipleRequests.name,
"Only one initialize at a time", null)
return
}
activeResult = result
val localContext = pluginContext
initializeIfPermitted(pluginContext)
}
private fun sdkVersionTooLow(result: Result): Boolean {
if (Build.VERSION.SDK_INT < minSdkForSpeechSupport) {
result.success(false)
return true;
}
return false;
}
private fun isNotInitialized(result: Result): Boolean {
if (!initializedSuccessfully || null == pluginContext) {
result.success(false)
}
return !initializedSuccessfully
}
private fun isListening(): Boolean {
return listening
}
private fun isNotListening(): Boolean {
return !listening
}
private fun startListening(result: Result, languageTag: String, partialResults: Boolean,
listenModeIndex: Int) {
if (sdkVersionTooLow(result) || isNotInitialized(result) || isListening()) {
return
}
debugLog("Start listening")
var listenMode = ListenMode.deviceDefault
if ( listenModeIndex == ListenMode.dictation.ordinal) {
listenMode = ListenMode.dictation
}
setupRecognizerIntent(languageTag, partialResults, listenMode)
handler.post {
run {
speechRecognizer?.startListening(recognizerIntent)
}
}
notifyListening(isRecording = true)
result.success(true)
debugLog("Start listening done")
}
private fun stopListening(result: Result) {
if (sdkVersionTooLow(result) || isNotInitialized(result) || isNotListening()) {
return
}
debugLog("Stop listening")
handler.post {
run {
speechRecognizer?.stopListening()
}
}
notifyListening(isRecording = false)
result.success(true)
debugLog("Stop listening done")
}
private fun cancelListening(result: Result) {
if (sdkVersionTooLow(result) || isNotInitialized(result) || isNotListening()) {
return
}
debugLog("Cancel listening")
handler.post {
run {
speechRecognizer?.cancel()
}
}
notifyListening(isRecording = false)
result.success(true)
debugLog("Cancel listening done")
}
private fun locales(result: Result) {
if (sdkVersionTooLow(result) || isNotInitialized(result)) {
return
}
var detailsIntent = RecognizerIntent.getVoiceDetailsIntent(pluginContext)
if (null == detailsIntent) {
detailsIntent = Intent(RecognizerIntent.ACTION_GET_LANGUAGE_DETAILS)
}
if (null == detailsIntent) {
result.error(SpeechToTextErrors.noLanguageIntent.name,
"Could not get voice details", null)
return
}
pluginContext?.sendOrderedBroadcast(
detailsIntent, null, LanguageDetailsChecker(result),
null, Activity.RESULT_OK, null, null)
}
private fun notifyListening(isRecording: Boolean) {
debugLog("Notify listening")
listening = isRecording
val status = when (isRecording) {
true -> SpeechToTextStatus.listening.name
false -> SpeechToTextStatus.notListening.name
}
channel?.invokeMethod(SpeechToTextCallbackMethods.notifyStatus.name, status)
debugLog("Notify listening done")
}
private fun updateResults(speechBundle: Bundle?, isFinal: Boolean) {
if (isDuplicateFinal( isFinal )) {
debugLog("Discarding duplicate final")
return
}
val userSaid = speechBundle?.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION)
if (null != userSaid && userSaid.isNotEmpty()) {
val speechResult = JSONObject()
speechResult.put("finalResult", isFinal)
val confidence = speechBundle?.getFloatArray(SpeechRecognizer.CONFIDENCE_SCORES)
val alternates = JSONArray()
for (resultIndex in 0..userSaid.size - 1) {
val speechWords = JSONObject()
speechWords.put("recognizedWords", userSaid[resultIndex])
if (null != confidence && confidence.size >= userSaid.size) {
speechWords.put("confidence", confidence[resultIndex])
} else {
speechWords.put("confidence", missingConfidence)
}
alternates.put(speechWords)
}
speechResult.put("alternates", alternates)
val jsonResult = speechResult.toString()
debugLog("Calling results callback")
channel?.invokeMethod(SpeechToTextCallbackMethods.textRecognition.name,
jsonResult)
}
}
private fun isDuplicateFinal( isFinal: Boolean ) : Boolean {
if ( !isFinal ) {
return false
}
val delta = System.currentTimeMillis() - lastFinalTime
lastFinalTime = System.currentTimeMillis()
return delta >= 0 && delta < 100
}
private fun initializeIfPermitted(context: Context?) {
val localContext = context
if (null == localContext) {
completeInitialize()
return
}
permissionToRecordAudio = ContextCompat.checkSelfPermission(localContext,
Manifest.permission.RECORD_AUDIO) == PackageManager.PERMISSION_GRANTED
debugLog("Checked permission")
if (!permissionToRecordAudio) {
val localActivity = currentActivity
if (null != localActivity) {
debugLog("Requesting permission")
ActivityCompat.requestPermissions(localActivity,
arrayOf(Manifest.permission.RECORD_AUDIO), speechToTextPermissionCode)
} else {
debugLog("no permission, no activity, completing")
completeInitialize()
}
} else {
debugLog("has permission, completing")
completeInitialize()
}
debugLog("leaving initializeIfPermitted")
}
private fun completeInitialize() {
debugLog("completeInitialize")
if (permissionToRecordAudio) {
debugLog("Testing recognition availability")
if (!SpeechRecognizer.isRecognitionAvailable(pluginContext)) {
Log.e(logTag, "Speech recognition not available on this device")
activeResult?.error(SpeechToTextErrors.recognizerNotAvailable.name,
"Speech recognition not available on this device", "")
activeResult = null
return
}
debugLog("Creating recognizer")
speechRecognizer = createSpeechRecognizer(pluginContext).apply {
debugLog("Setting listener")
setRecognitionListener(this@SpeechToTextPlugin)
}
if (null == speechRecognizer) {
Log.e(logTag, "Speech recognizer null")
activeResult?.error(
SpeechToTextErrors.recognizerNotAvailable.name,
"Speech recognizer null", "")
activeResult = null
}
debugLog("before setup intent")
setupRecognizerIntent(defaultLanguageTag, true, ListenMode.deviceDefault)
debugLog("after setup intent")
}
initializedSuccessfully = permissionToRecordAudio
debugLog("sending result")
activeResult?.success(permissionToRecordAudio)
debugLog("leaving complete")
activeResult = null
}
private fun setupRecognizerIntent(languageTag: String, partialResults: Boolean, listenMode: ListenMode) {
debugLog("setupRecognizerIntent")
if (previousRecognizerLang == null ||
previousRecognizerLang != languageTag ||
partialResults != previousPartialResults || previousListenMode != listenMode ) {
previousRecognizerLang = languageTag;
previousPartialResults = partialResults
previousListenMode = listenMode
handler.post {
run {
recognizerIntent = Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH).apply {
debugLog("In RecognizerIntent apply")
putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, RecognizerIntent.LANGUAGE_MODEL_FREE_FORM)
debugLog("put model")
val localContext = pluginContext
if (null != localContext) {
putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE,
localContext.applicationInfo.packageName)
}
debugLog("put package")
putExtra(RecognizerIntent.EXTRA_PARTIAL_RESULTS, partialResults)
debugLog("put partial")
if (languageTag != Locale.getDefault().toLanguageTag()) {
putExtra(RecognizerIntent.EXTRA_LANGUAGE, languageTag);
debugLog("put languageTag")
}
}
}
}
}
}
override fun onRequestPermissionsResult(requestCode: Int, permissions: Array<out String>?,
grantResults: IntArray?): Boolean {
when (requestCode) {
speechToTextPermissionCode -> {
if (null != grantResults) {
permissionToRecordAudio = grantResults.isNotEmpty() &&
grantResults.get(0) == PackageManager.PERMISSION_GRANTED
}
completeInitialize()
return true
}
}
return false
}
override fun onPartialResults(results: Bundle?) = updateResults(results, false)
override fun onResults(results: Bundle?) = updateResults(results, true)
override fun onEndOfSpeech() = notifyListening(isRecording = false)
override fun onError(errorCode: Int) {
val errorMsg = when (errorCode) {
SpeechRecognizer.ERROR_AUDIO -> "error_audio_error"
SpeechRecognizer.ERROR_CLIENT -> "error_client"
SpeechRecognizer.ERROR_INSUFFICIENT_PERMISSIONS -> "error_permission"
SpeechRecognizer.ERROR_NETWORK -> "error_network"
SpeechRecognizer.ERROR_NETWORK_TIMEOUT -> "error_network_timeout"
SpeechRecognizer.ERROR_NO_MATCH -> "error_no_match"
SpeechRecognizer.ERROR_RECOGNIZER_BUSY -> "error_busy"
SpeechRecognizer.ERROR_SERVER -> "error_server"
SpeechRecognizer.ERROR_SPEECH_TIMEOUT -> "error_speech_timeout"
else -> "error_unknown"
}
sendError(errorMsg)
}
private fun debugLog( msg: String ) {
if ( debugLogging ) {
Log.d( logTag, msg )
}
}
private fun sendError(errorMsg: String) {
val speechError = JSONObject()
speechError.put("errorMsg", errorMsg)
speechError.put("permanent", true)
handler.post {
run {
channel?.invokeMethod(SpeechToTextCallbackMethods.notifyError.name, speechError.toString())
}
}
}
override fun onRmsChanged(rmsdB: Float) {
handler.post {
run {
channel?.invokeMethod(SpeechToTextCallbackMethods.soundLevelChange.name, rmsdB)
}
}
}
override fun onReadyForSpeech(p0: Bundle?) {}
override fun onBufferReceived(p0: ByteArray?) {}
override fun onEvent(p0: Int, p1: Bundle?) {}
override fun onBeginningOfSpeech() {}
}
// See https://stackoverflow.com/questions/10538791/how-to-set-the-language-in-speech-recognition-on-android/10548680#10548680
class LanguageDetailsChecker(flutterResult: Result) : BroadcastReceiver() {
private val result: Result = flutterResult
private var supportedLanguages: List<String>? = null
private var languagePreference: String? = null
override fun onReceive(context: Context, intent: Intent) {
val results = getResultExtras(true)
if (results.containsKey(RecognizerIntent.EXTRA_LANGUAGE_PREFERENCE)) {
languagePreference = results.getString(RecognizerIntent.EXTRA_LANGUAGE_PREFERENCE)
}
if (results.containsKey(RecognizerIntent.EXTRA_SUPPORTED_LANGUAGES)) {
supportedLanguages = results.getStringArrayList(
RecognizerIntent.EXTRA_SUPPORTED_LANGUAGES)
createResponse(supportedLanguages)
}
}
private fun createResponse(supportedLanguages: List<String>?) {
val currentLocale = Locale.getDefault()
val localeNames = ArrayList<String>()
localeNames.add(buildIdNameForLocale(currentLocale))
if (null != supportedLanguages) {
for (lang in supportedLanguages) {
if (currentLocale.toLanguageTag() == lang) {
continue
}
val locale = Locale.forLanguageTag(lang)
localeNames.add(buildIdNameForLocale(locale))
}
}
result.success(localeNames)
}
private fun buildIdNameForLocale(locale: Locale): String {
val name = locale.displayName.replace(':', ' ')
return "${locale.language}_${locale.country}:$name"
}
}
private class ChannelResultWrapper(result: Result) : Result {
// Caller handler
val handler: Handler = Handler(Looper.getMainLooper())
val result: Result = result
// make sure to respond in the caller thread
override fun success(results: Any?) {
handler.post {
run {
result.success(results);
}
}
}
override fun error(errorCode: String?, errorMessage: String?, data: Any?) {
handler.post {
run {
result.error(errorCode, errorMessage, data);
}
}
}
override fun notImplemented() {
handler.post {
run {
result.notImplemented();
}
}
}
}

@ -0,0 +1,73 @@
# Miscellaneous
*.class
*.log
*.pyc
*.swp
.DS_Store
.atom/
.buildlog/
.history
.svn/
# IntelliJ related
*.iml
*.ipr
*.iws
.idea/
# The .vscode folder contains launch configuration and tasks you configure in
# VS Code which you may wish to be included in version control, so this line
# is commented out by default.
#.vscode/
# Flutter/Dart/Pub related
**/doc/api/
.dart_tool/
.flutter-plugins
.packages
.pub-cache/
.pub/
/build/
# Android related
**/android/**/gradle-wrapper.jar
**/android/.gradle
**/android/captures/
**/android/gradlew
**/android/gradlew.bat
**/android/local.properties
**/android/**/GeneratedPluginRegistrant.java
# iOS/XCode related
**/ios/**/*.mode1v3
**/ios/**/*.mode2v3
**/ios/**/*.moved-aside
**/ios/**/*.pbxuser
**/ios/**/*.perspectivev3
**/ios/**/*sync/
**/ios/**/.sconsign.dblite
**/ios/**/.tags*
**/ios/**/.vagrant/
**/ios/**/DerivedData/
**/ios/**/Icon?
**/ios/**/Pods/
**/ios/**/.symlinks/
**/ios/**/profile
**/ios/**/xcuserdata
**/ios/.generated/
**/ios/Flutter/App.framework
**/ios/Flutter/Flutter.framework
**/ios/Flutter/Generated.xcconfig
**/ios/Flutter/app.flx
**/ios/Flutter/app.zip
**/ios/Flutter/flutter_assets/
**/ios/Flutter/flutter_export_environment.sh
**/ios/ServiceDefinitions.json
**/ios/Runner/GeneratedPluginRegistrant.*
# Exceptions to above rules.
!**/ios/**/default.mode1v3
!**/ios/**/default.mode2v3
!**/ios/**/default.pbxuser
!**/ios/**/default.perspectivev3
!/packages/flutter_tools/test/data/dart_dependencies_test/**/.packages

@ -0,0 +1,10 @@
# This file tracks properties of this Flutter project.
# Used by Flutter tool to assess capabilities and perform upgrades etc.
#
# This file should be version controlled and should not be manually edited.
version:
revision: 2d2a1ffec95cc70a3218872a2cd3f8de4933c42f
channel: stable
project_type: app

@ -0,0 +1,155 @@
# speech_to_text_example
Demonstrates how to use the speech_to_text plugin. This example requires
that the plugin has been installed. It initializes speech recognition,
listens for words and prints them.
## Source
```dart
import 'package:flutter/material.dart';
import 'dart:async';
import 'package:speech_to_text/speech_to_text.dart';
import 'package:speech_to_text/speech_recognition_result.dart';
import 'package:speech_to_text/speech_recognition_error.dart';
void main() => runApp(MyApp());
class MyApp extends StatefulWidget {
@override
_MyAppState createState() => _MyAppState();
}
class _MyAppState extends State<MyApp> {
bool _hasSpeech = false;
String lastWords = "";
String lastError = "";
String lastStatus = "";
final SpeechToText speech = SpeechToText();
@override
void initState() {
super.initState();
initSpeechState();
}
Future<void> initSpeechState() async {
bool hasSpeech = await speech.initialize(onError: errorListener, onStatus: statusListener );
if (!mounted) return;
setState(() {
_hasSpeech = hasSpeech;
});
}
@override
Widget build(BuildContext context) {
return MaterialApp(
home: Scaffold(
appBar: AppBar(
title: const Text('Speech to Text Example'),
),
body: _hasSpeech
? Column(children: [
Expanded(
child: Center(
child: Text('Speech recognition available'),
),
),
Expanded(
child: Row(
mainAxisAlignment: MainAxisAlignment.center,
children: <Widget>[
FlatButton(
child: Text('Start'),
onPressed: startListening,
),
FlatButton(
child: Text('Stop'),
onPressed: stopListening,
),
FlatButton(
child: Text('Cancel'),
onPressed:cancelListening,
),
],
),
),
Expanded(
child: Column(
children: <Widget>[
Center(
child: Text('Recognized Words'),
),
Center(
child: Text(lastWords),
),
],
),
),
Expanded(
child: Column(
children: <Widget>[
Center(
child: Text('Error'),
),
Center(
child: Text(lastError),
),
],
),
),
Expanded(
child: Center(
child: speech.isListening ? Text("I'm listening...") : Text( 'Not listening' ),
),
),
])
: Center( child: Text('Speech recognition unavailable', style: TextStyle(fontSize: 20.0, fontWeight: FontWeight.bold))),
),
);
}
void startListening() {
lastWords = "";
lastError = "";
speech.listen(onResult: resultListener );
setState(() {
});
}
void stopListening() {
speech.stop( );
setState(() {
});
}
void cancelListening() {
speech.cancel( );
setState(() {
});
}
void resultListener(SpeechRecognitionResult result) {
setState(() {
lastWords = "${result.recognizedWords} - ${result.finalResult}";
});
}
void errorListener(SpeechRecognitionError error ) {
setState(() {
lastError = "${error.errorMsg} - ${error.permanent}";
});
}
void statusListener(String status ) {
setState(() {
lastStatus = "$status";
});
}
}
```

@ -0,0 +1,17 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>android___</name>
<comment>Project android___ created by Buildship.</comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.eclipse.buildship.core.gradleprojectbuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.eclipse.buildship.core.gradleprojectnature</nature>
</natures>
</projectDescription>

@ -0,0 +1,2 @@
connection.project.dir=
eclipse.preferences.version=1

@ -0,0 +1,67 @@
def localProperties = new Properties()
def localPropertiesFile = rootProject.file('local.properties')
if (localPropertiesFile.exists()) {
localPropertiesFile.withReader('UTF-8') { reader ->
localProperties.load(reader)
}
}
def flutterRoot = localProperties.getProperty('flutter.sdk')
if (flutterRoot == null) {
throw new GradleException("Flutter SDK not found. Define location with flutter.sdk in the local.properties file.")
}
def flutterVersionCode = localProperties.getProperty('flutter.versionCode')
if (flutterVersionCode == null) {
flutterVersionCode = '1'
}
def flutterVersionName = localProperties.getProperty('flutter.versionName')
if (flutterVersionName == null) {
flutterVersionName = '1.0'
}
apply plugin: 'com.android.application'
apply plugin: 'kotlin-android'
apply from: "$flutterRoot/packages/flutter_tools/gradle/flutter.gradle"
android {
compileSdkVersion 28
sourceSets {
main.java.srcDirs += 'src/main/kotlin'
}
lintOptions {
disable 'InvalidPackage'
}
defaultConfig {
// TODO: Specify your own unique Application ID (https://developer.android.com/studio/build/application-id.html).
applicationId "com.csdcorp.speech_to_text_example"
minSdkVersion 21
targetSdkVersion 28
versionCode flutterVersionCode.toInteger()
versionName flutterVersionName
testInstrumentationRunner "androidx.test.runner.AndroidJUnitRunner"
}
buildTypes {
release {
// TODO: Add your own signing config for the release build.
// Signing with the debug keys for now, so `flutter run --release` works.
signingConfig signingConfigs.debug
}
}
}
flutter {
source '../..'
}
dependencies {
implementation "org.jetbrains.kotlin:kotlin-stdlib-jdk7:$kotlin_version"
testImplementation 'junit:junit:4.12'
androidTestImplementation 'androidx.test:runner:1.1.1'
androidTestImplementation 'androidx.test.espresso:espresso-core:3.1.1'
}

@ -0,0 +1,7 @@
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="com.csdcorp.speech_to_text_example">
<!-- Flutter needs it to communicate with the running application
to allow setting breakpoints, to provide hot reload, etc.
-->
<uses-permission android:name="android.permission.INTERNET"/>
</manifest>

@ -0,0 +1,32 @@
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="com.csdcorp.speech_to_text_example">
<uses-permission android:name="android.permission.RECORD_AUDIO"/>
<uses-permission android:name="android.permission.INTERNET"/>
<!-- io.flutter.app.FlutterApplication is an android.app.Application that
calls FlutterMain.startInitialization(this); in its onCreate method.
In most cases you can leave this as-is, but you if you want to provide
additional functionality it is fine to subclass or reimplement
FlutterApplication and put your custom class here. -->
<application
android:name="io.flutter.app.FlutterApplication"
android:label="speech_to_text_example"
android:icon="@mipmap/ic_launcher">
<activity
android:name=".MainActivity"
android:launchMode="singleTop"
android:theme="@style/LaunchTheme"
android:configChanges="orientation|keyboardHidden|keyboard|screenSize|smallestScreenSize|locale|layoutDirection|fontScale|screenLayout|density|uiMode"
android:hardwareAccelerated="true"
android:windowSoftInputMode="adjustResize">
<intent-filter>
<action android:name="android.intent.action.MAIN"/>
<category android:name="android.intent.category.LAUNCHER"/>
</intent-filter>
</activity>
<!-- Don't delete the meta-data below.
This is used by the Flutter tool to generate GeneratedPluginRegistrant.java -->
<meta-data
android:name="flutterEmbedding"
android:value="2" />
</application>
</manifest>

@ -0,0 +1,12 @@
package com.csdcorp.speech_to_text_example
import androidx.annotation.NonNull;
import io.flutter.embedding.android.FlutterActivity
import io.flutter.embedding.engine.FlutterEngine
import io.flutter.plugins.GeneratedPluginRegistrant
class MainActivity: FlutterActivity() {
override fun configureFlutterEngine(@NonNull flutterEngine: FlutterEngine) {
GeneratedPluginRegistrant.registerWith(flutterEngine);
}
}

@ -0,0 +1,12 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Modify this file to customize your launch splash screen -->
<layer-list xmlns:android="http://schemas.android.com/apk/res/android">
<item android:drawable="@android:color/white" />
<!-- You can insert your own image assets here -->
<!-- <item>
<bitmap
android:gravity="center"
android:src="@mipmap/launch_image" />
</item> -->
</layer-list>

Binary file not shown.

After

Width:  |  Height:  |  Size: 544 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 442 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 721 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.4 KiB

@ -0,0 +1,8 @@
<?xml version="1.0" encoding="utf-8"?>
<resources>
<style name="LaunchTheme" parent="@android:style/Theme.Black.NoTitleBar">
<!-- Show a splash screen on the activity. Automatically removed when
Flutter draws its first frame -->
<item name="android:windowBackground">@drawable/launch_background</item>
</style>
</resources>

@ -0,0 +1,7 @@
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="com.csdcorp.speech_to_text_example">
<!-- Flutter needs it to communicate with the running application
to allow setting breakpoints, to provide hot reload, etc.
-->
<uses-permission android:name="android.permission.INTERNET"/>
</manifest>

@ -0,0 +1,31 @@
buildscript {
ext.kotlin_version = '1.3.50'
repositories {
google()
jcenter()
}
dependencies {
classpath 'com.android.tools.build:gradle:3.6.1'
classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version"
}
}
allprojects {
repositories {
google()
jcenter()
}
}
rootProject.buildDir = '../build'
subprojects {
project.buildDir = "${rootProject.buildDir}/${project.name}"
}
subprojects {
project.evaluationDependsOn(':app')
}
task clean(type: Delete) {
delete rootProject.buildDir
}

@ -0,0 +1,4 @@
org.gradle.jvmargs=-Xmx1536M
android.useAndroidX=true
android.enableJetifier=true
android.enableR8=true

@ -0,0 +1,6 @@
#Mon Mar 16 08:57:32 EDT 2020
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-5.6.4-all.zip

@ -0,0 +1,15 @@
include ':app'
def flutterProjectRoot = rootProject.projectDir.parentFile.toPath()
def plugins = new Properties()
def pluginsFile = new File(flutterProjectRoot.toFile(), '.flutter-plugins')
if (pluginsFile.exists()) {
pluginsFile.withReader('UTF-8') { reader -> plugins.load(reader) }
}
plugins.each { name, path ->
def pluginDirectory = flutterProjectRoot.resolve(path).resolve('android').toFile()
include ":$name"
project(":$name").projectDir = pluginDirectory
}

@ -0,0 +1,26 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleDevelopmentRegion</key>
<string>$(DEVELOPMENT_LANGUAGE)</string>
<key>CFBundleExecutable</key>
<string>App</string>
<key>CFBundleIdentifier</key>
<string>io.flutter.flutter.app</string>
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundleName</key>
<string>App</string>
<key>CFBundlePackageType</key>
<string>FMWK</string>
<key>CFBundleShortVersionString</key>
<string>1.0</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
<string>1.0</string>
<key>MinimumOSVersion</key>
<string>8.0</string>
</dict>
</plist>

@ -0,0 +1,2 @@
#include "Pods/Target Support Files/Pods-Runner/Pods-Runner.debug.xcconfig"
#include "Generated.xcconfig"

@ -0,0 +1,18 @@
#
# NOTE: This podspec is NOT to be published. It is only used as a local source!
#
Pod::Spec.new do |s|
s.name = 'Flutter'
s.version = '1.0.0'
s.summary = 'High-performance, high-fidelity mobile apps.'
s.description = <<-DESC
Flutter provides an easy and productive way to build and deploy high-performance mobile apps for Android and iOS.
DESC
s.homepage = 'https://flutter.io'
s.license = { :type => 'MIT' }
s.author = { 'Flutter Dev Team' => 'flutter-dev@googlegroups.com' }
s.source = { :git => 'https://github.com/flutter/engine', :tag => s.version.to_s }
s.ios.deployment_target = '8.0'
s.vendored_frameworks = 'Flutter.framework'
end

@ -0,0 +1,2 @@
#include "Pods/Target Support Files/Pods-Runner/Pods-Runner.release.xcconfig"
#include "Generated.xcconfig"

@ -0,0 +1,90 @@
# Uncomment this line to define a global platform for your project
platform :ios, '10.0'
# CocoaPods analytics sends network stats synchronously affecting flutter build latency.
ENV['COCOAPODS_DISABLE_STATS'] = 'true'
project 'Runner', {
'Debug' => :debug,
'Profile' => :release,
'Release' => :release,
}
def parse_KV_file(file, separator='=')
file_abs_path = File.expand_path(file)
if !File.exists? file_abs_path
return [];
end
generated_key_values = {}
skip_line_start_symbols = ["#", "/"]
File.foreach(file_abs_path) do |line|
next if skip_line_start_symbols.any? { |symbol| line =~ /^\s*#{symbol}/ }
plugin = line.split(pattern=separator)
if plugin.length == 2
podname = plugin[0].strip()
path = plugin[1].strip()
podpath = File.expand_path("#{path}", file_abs_path)
generated_key_values[podname] = podpath
else
puts "Invalid plugin specification: #{line}"
end
end
generated_key_values
end
target 'Runner' do
use_frameworks!
use_modular_headers!
# Flutter Pod
copied_flutter_dir = File.join(__dir__, 'Flutter')
copied_framework_path = File.join(copied_flutter_dir, 'Flutter.framework')
copied_podspec_path = File.join(copied_flutter_dir, 'Flutter.podspec')
unless File.exist?(copied_framework_path) && File.exist?(copied_podspec_path)
# Copy Flutter.framework and Flutter.podspec to Flutter/ to have something to link against if the xcode backend script has not run yet.
# That script will copy the correct debug/profile/release version of the framework based on the currently selected Xcode configuration.
# CocoaPods will not embed the framework on pod install (before any build phases can generate) if the dylib does not exist.
generated_xcode_build_settings_path = File.join(copied_flutter_dir, 'Generated.xcconfig')
unless File.exist?(generated_xcode_build_settings_path)
raise "Generated.xcconfig must exist. If you're running pod install manually, make sure flutter pub get is executed first"
end
generated_xcode_build_settings = parse_KV_file(generated_xcode_build_settings_path)
cached_framework_dir = generated_xcode_build_settings['FLUTTER_FRAMEWORK_DIR'];
unless File.exist?(copied_framework_path)
FileUtils.cp_r(File.join(cached_framework_dir, 'Flutter.framework'), copied_flutter_dir)
end
unless File.exist?(copied_podspec_path)
FileUtils.cp(File.join(cached_framework_dir, 'Flutter.podspec'), copied_flutter_dir)
end
end
# Keep pod path relative so it can be checked into Podfile.lock.
pod 'Flutter', :path => 'Flutter'
# Plugin Pods
# Prepare symlinks folder. We use symlinks to avoid having Podfile.lock
# referring to absolute paths on developers' machines.
system('rm -rf .symlinks')
system('mkdir -p .symlinks/plugins')
plugin_pods = parse_KV_file('../.flutter-plugins')
plugin_pods.each do |name, path|
symlink = File.join('.symlinks', 'plugins', name)
File.symlink(path, symlink)
pod name, :path => File.join(symlink, 'ios')
end
end
# Prevent Cocoapods from embedding a second Flutter framework and causing an error with the new Xcode build system.
install! 'cocoapods', :disable_input_output_paths => true
post_install do |installer|
installer.pods_project.targets.each do |target|
target.build_configurations.each do |config|
config.build_settings['ENABLE_BITCODE'] = 'NO'
end
end
end

@ -0,0 +1,29 @@
PODS:
- Flutter (1.0.0)
- speech_to_text (0.0.1):
- Flutter
- Try
- Try (2.1.1)
DEPENDENCIES:
- Flutter (from `Flutter`)
- speech_to_text (from `.symlinks/plugins/speech_to_text/ios`)
SPEC REPOS:
trunk:
- Try
EXTERNAL SOURCES:
Flutter:
:path: Flutter
speech_to_text:
:path: ".symlinks/plugins/speech_to_text/ios"
SPEC CHECKSUMS:
Flutter: 0e3d915762c693b495b44d77113d4970485de6ec
speech_to_text: b43a7d99aef037bd758ed8e45d79bbac035d2dfe
Try: 5ef669ae832617b3cee58cb2c6f99fb767a4ff96
PODFILE CHECKSUM: 0ba44ad07df4ab62269dc769727cf0f12b1e453d
COCOAPODS: 1.9.3

@ -0,0 +1,578 @@
// !$*UTF8*$!
{
archiveVersion = 1;
classes = {
};
objectVersion = 46;
objects = {
/* Begin PBXBuildFile section */
1498D2341E8E89220040F4C2 /* GeneratedPluginRegistrant.m in Sources */ = {isa = PBXBuildFile; fileRef = 1498D2331E8E89220040F4C2 /* GeneratedPluginRegistrant.m */; };
3B3967161E833CAA004F5970 /* AppFrameworkInfo.plist in Resources */ = {isa = PBXBuildFile; fileRef = 3B3967151E833CAA004F5970 /* AppFrameworkInfo.plist */; };
74858FAF1ED2DC5600515810 /* AppDelegate.swift in Sources */ = {isa = PBXBuildFile; fileRef = 74858FAE1ED2DC5600515810 /* AppDelegate.swift */; };
9740EEB41CF90195004384FC /* Debug.xcconfig in Resources */ = {isa = PBXBuildFile; fileRef = 9740EEB21CF90195004384FC /* Debug.xcconfig */; };
97C146FC1CF9000F007C117D /* Main.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 97C146FA1CF9000F007C117D /* Main.storyboard */; };
97C146FE1CF9000F007C117D /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 97C146FD1CF9000F007C117D /* Assets.xcassets */; };
97C147011CF9000F007C117D /* LaunchScreen.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 97C146FF1CF9000F007C117D /* LaunchScreen.storyboard */; };
C446300A034BF27D9F1ACEF9 /* Pods_Runner.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = E76E9615C6B4FABD88067D55 /* Pods_Runner.framework */; };
/* End PBXBuildFile section */
/* Begin PBXCopyFilesBuildPhase section */
9705A1C41CF9048500538489 /* Embed Frameworks */ = {
isa = PBXCopyFilesBuildPhase;
buildActionMask = 2147483647;
dstPath = "";
dstSubfolderSpec = 10;
files = (
);
name = "Embed Frameworks";
runOnlyForDeploymentPostprocessing = 0;
};
/* End PBXCopyFilesBuildPhase section */
/* Begin PBXFileReference section */
1498D2321E8E86230040F4C2 /* GeneratedPluginRegistrant.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = GeneratedPluginRegistrant.h; sourceTree = "<group>"; };
1498D2331E8E89220040F4C2 /* GeneratedPluginRegistrant.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GeneratedPluginRegistrant.m; sourceTree = "<group>"; };
3B3967151E833CAA004F5970 /* AppFrameworkInfo.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; name = AppFrameworkInfo.plist; path = Flutter/AppFrameworkInfo.plist; sourceTree = "<group>"; };
59AFE6BB0B596A0E0811BDFF /* Pods-Runner.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-Runner.debug.xcconfig"; path = "Target Support Files/Pods-Runner/Pods-Runner.debug.xcconfig"; sourceTree = "<group>"; };
6280E2A777726D2043BF80B7 /* Pods-Runner.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-Runner.release.xcconfig"; path = "Target Support Files/Pods-Runner/Pods-Runner.release.xcconfig"; sourceTree = "<group>"; };
74858FAD1ED2DC5600515810 /* Runner-Bridging-Header.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = "Runner-Bridging-Header.h"; sourceTree = "<group>"; };
74858FAE1ED2DC5600515810 /* AppDelegate.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = AppDelegate.swift; sourceTree = "<group>"; };
7AFA3C8E1D35360C0083082E /* Release.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; name = Release.xcconfig; path = Flutter/Release.xcconfig; sourceTree = "<group>"; };
9740EEB21CF90195004384FC /* Debug.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; name = Debug.xcconfig; path = Flutter/Debug.xcconfig; sourceTree = "<group>"; };
9740EEB31CF90195004384FC /* Generated.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; name = Generated.xcconfig; path = Flutter/Generated.xcconfig; sourceTree = "<group>"; };
97C146EE1CF9000F007C117D /* Runner.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = Runner.app; sourceTree = BUILT_PRODUCTS_DIR; };
97C146FB1CF9000F007C117D /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/Main.storyboard; sourceTree = "<group>"; };
97C146FD1CF9000F007C117D /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = "<group>"; };
97C147001CF9000F007C117D /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/LaunchScreen.storyboard; sourceTree = "<group>"; };
97C147021CF9000F007C117D /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = "<group>"; };
C3909A4B7EC98A20255210E3 /* Pods-Runner.profile.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-Runner.profile.xcconfig"; path = "Target Support Files/Pods-Runner/Pods-Runner.profile.xcconfig"; sourceTree = "<group>"; };
E76E9615C6B4FABD88067D55 /* Pods_Runner.framework */ = {isa = PBXFileReference; explicitFileType = wrapper.framework; includeInIndex = 0; path = Pods_Runner.framework; sourceTree = BUILT_PRODUCTS_DIR; };
/* End PBXFileReference section */
/* Begin PBXFrameworksBuildPhase section */
97C146EB1CF9000F007C117D /* Frameworks */ = {
isa = PBXFrameworksBuildPhase;
buildActionMask = 2147483647;
files = (
C446300A034BF27D9F1ACEF9 /* Pods_Runner.framework in Frameworks */,
);
runOnlyForDeploymentPostprocessing = 0;
};
/* End PBXFrameworksBuildPhase section */
/* Begin PBXGroup section */
7937AF765430D66F28F7FEEF /* Frameworks */ = {
isa = PBXGroup;
children = (
E76E9615C6B4FABD88067D55 /* Pods_Runner.framework */,
);
name = Frameworks;
sourceTree = "<group>";
};
9740EEB11CF90186004384FC /* Flutter */ = {
isa = PBXGroup;
children = (
3B3967151E833CAA004F5970 /* AppFrameworkInfo.plist */,
9740EEB21CF90195004384FC /* Debug.xcconfig */,
7AFA3C8E1D35360C0083082E /* Release.xcconfig */,
9740EEB31CF90195004384FC /* Generated.xcconfig */,
);
name = Flutter;
sourceTree = "<group>";
};
97C146E51CF9000F007C117D = {
isa = PBXGroup;
children = (
9740EEB11CF90186004384FC /* Flutter */,
97C146F01CF9000F007C117D /* Runner */,
97C146EF1CF9000F007C117D /* Products */,
A68CCF1640763A551D35BD31 /* Pods */,
7937AF765430D66F28F7FEEF /* Frameworks */,
);
sourceTree = "<group>";
};
97C146EF1CF9000F007C117D /* Products */ = {
isa = PBXGroup;
children = (
97C146EE1CF9000F007C117D /* Runner.app */,
);
name = Products;
sourceTree = "<group>";
};
97C146F01CF9000F007C117D /* Runner */ = {
isa = PBXGroup;
children = (
97C146FA1CF9000F007C117D /* Main.storyboard */,
97C146FD1CF9000F007C117D /* Assets.xcassets */,
97C146FF1CF9000F007C117D /* LaunchScreen.storyboard */,
97C147021CF9000F007C117D /* Info.plist */,
97C146F11CF9000F007C117D /* Supporting Files */,
1498D2321E8E86230040F4C2 /* GeneratedPluginRegistrant.h */,
1498D2331E8E89220040F4C2 /* GeneratedPluginRegistrant.m */,
74858FAE1ED2DC5600515810 /* AppDelegate.swift */,
74858FAD1ED2DC5600515810 /* Runner-Bridging-Header.h */,
);
path = Runner;
sourceTree = "<group>";
};
97C146F11CF9000F007C117D /* Supporting Files */ = {
isa = PBXGroup;
children = (
);
name = "Supporting Files";
sourceTree = "<group>";
};
A68CCF1640763A551D35BD31 /* Pods */ = {
isa = PBXGroup;
children = (
59AFE6BB0B596A0E0811BDFF /* Pods-Runner.debug.xcconfig */,
6280E2A777726D2043BF80B7 /* Pods-Runner.release.xcconfig */,
C3909A4B7EC98A20255210E3 /* Pods-Runner.profile.xcconfig */,
);
path = Pods;
sourceTree = "<group>";
};
/* End PBXGroup section */
/* Begin PBXNativeTarget section */
97C146ED1CF9000F007C117D /* Runner */ = {
isa = PBXNativeTarget;
buildConfigurationList = 97C147051CF9000F007C117D /* Build configuration list for PBXNativeTarget "Runner" */;
buildPhases = (
949FCB95217187F2C022D6A9 /* [CP] Check Pods Manifest.lock */,
9740EEB61CF901F6004384FC /* Run Script */,
97C146EA1CF9000F007C117D /* Sources */,
97C146EB1CF9000F007C117D /* Frameworks */,
97C146EC1CF9000F007C117D /* Resources */,
9705A1C41CF9048500538489 /* Embed Frameworks */,
3B06AD1E1E4923F5004D2608 /* Thin Binary */,
8B0988F04B6AE44AA0304FEF /* [CP] Embed Pods Frameworks */,
);
buildRules = (
);
dependencies = (
);
name = Runner;
productName = Runner;
productReference = 97C146EE1CF9000F007C117D /* Runner.app */;
productType = "com.apple.product-type.application";
};
/* End PBXNativeTarget section */
/* Begin PBXProject section */
97C146E61CF9000F007C117D /* Project object */ = {
isa = PBXProject;
attributes = {
LastUpgradeCheck = 1020;
ORGANIZATIONNAME = "The Chromium Authors";
TargetAttributes = {
97C146ED1CF9000F007C117D = {
CreatedOnToolsVersion = 7.3.1;
DevelopmentTeam = 3X949YE9K2;
LastSwiftMigration = 0910;
};
};
};
buildConfigurationList = 97C146E91CF9000F007C117D /* Build configuration list for PBXProject "Runner" */;
compatibilityVersion = "Xcode 3.2";
developmentRegion = en;
hasScannedForEncodings = 0;
knownRegions = (
en,
Base,
);
mainGroup = 97C146E51CF9000F007C117D;
productRefGroup = 97C146EF1CF9000F007C117D /* Products */;
projectDirPath = "";
projectRoot = "";
targets = (
97C146ED1CF9000F007C117D /* Runner */,
);
};
/* End PBXProject section */
/* Begin PBXResourcesBuildPhase section */
97C146EC1CF9000F007C117D /* Resources */ = {
isa = PBXResourcesBuildPhase;
buildActionMask = 2147483647;
files = (
97C147011CF9000F007C117D /* LaunchScreen.storyboard in Resources */,
3B3967161E833CAA004F5970 /* AppFrameworkInfo.plist in Resources */,
9740EEB41CF90195004384FC /* Debug.xcconfig in Resources */,
97C146FE1CF9000F007C117D /* Assets.xcassets in Resources */,
97C146FC1CF9000F007C117D /* Main.storyboard in Resources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
/* End PBXResourcesBuildPhase section */
/* Begin PBXShellScriptBuildPhase section */
3B06AD1E1E4923F5004D2608 /* Thin Binary */ = {
isa = PBXShellScriptBuildPhase;
buildActionMask = 2147483647;
files = (
);
inputPaths = (
);
name = "Thin Binary";
outputPaths = (
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
shellScript = "/bin/sh \"$FLUTTER_ROOT/packages/flutter_tools/bin/xcode_backend.sh\" embed_and_thin";
};
8B0988F04B6AE44AA0304FEF /* [CP] Embed Pods Frameworks */ = {
isa = PBXShellScriptBuildPhase;
buildActionMask = 2147483647;
files = (
);
inputPaths = (
);
name = "[CP] Embed Pods Frameworks";
outputPaths = (
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
shellScript = "\"${PODS_ROOT}/Target Support Files/Pods-Runner/Pods-Runner-frameworks.sh\"\n";
showEnvVarsInLog = 0;
};
949FCB95217187F2C022D6A9 /* [CP] Check Pods Manifest.lock */ = {
isa = PBXShellScriptBuildPhase;
buildActionMask = 2147483647;
files = (
);
inputFileListPaths = (
);
inputPaths = (
"${PODS_PODFILE_DIR_PATH}/Podfile.lock",
"${PODS_ROOT}/Manifest.lock",
);
name = "[CP] Check Pods Manifest.lock";
outputFileListPaths = (
);
outputPaths = (
"$(DERIVED_FILE_DIR)/Pods-Runner-checkManifestLockResult.txt",
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
shellScript = "diff \"${PODS_PODFILE_DIR_PATH}/Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [ $? != 0 ] ; then\n # print error to STDERR\n echo \"error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\" >&2\n exit 1\nfi\n# This output is used by Xcode 'outputs' to avoid re-running this script phase.\necho \"SUCCESS\" > \"${SCRIPT_OUTPUT_FILE_0}\"\n";
showEnvVarsInLog = 0;
};
9740EEB61CF901F6004384FC /* Run Script */ = {
isa = PBXShellScriptBuildPhase;
buildActionMask = 2147483647;
files = (
);
inputPaths = (
);
name = "Run Script";
outputPaths = (
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
shellScript = "/bin/sh \"$FLUTTER_ROOT/packages/flutter_tools/bin/xcode_backend.sh\" build";
};
/* End PBXShellScriptBuildPhase section */
/* Begin PBXSourcesBuildPhase section */
97C146EA1CF9000F007C117D /* Sources */ = {
isa = PBXSourcesBuildPhase;
buildActionMask = 2147483647;
files = (
74858FAF1ED2DC5600515810 /* AppDelegate.swift in Sources */,
1498D2341E8E89220040F4C2 /* GeneratedPluginRegistrant.m in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
/* End PBXSourcesBuildPhase section */
/* Begin PBXVariantGroup section */
97C146FA1CF9000F007C117D /* Main.storyboard */ = {
isa = PBXVariantGroup;
children = (
97C146FB1CF9000F007C117D /* Base */,
);
name = Main.storyboard;
sourceTree = "<group>";
};
97C146FF1CF9000F007C117D /* LaunchScreen.storyboard */ = {
isa = PBXVariantGroup;
children = (
97C147001CF9000F007C117D /* Base */,
);
name = LaunchScreen.storyboard;
sourceTree = "<group>";
};
/* End PBXVariantGroup section */
/* Begin XCBuildConfiguration section */
249021D3217E4FDB00AE95B9 /* Profile */ = {
isa = XCBuildConfiguration;
baseConfigurationReference = 7AFA3C8E1D35360C0083082E /* Release.xcconfig */;
buildSettings = {
ALWAYS_SEARCH_USER_PATHS = NO;
CLANG_ANALYZER_NONNULL = YES;
CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x";
CLANG_CXX_LIBRARY = "libc++";
CLANG_ENABLE_MODULES = YES;
CLANG_ENABLE_OBJC_ARC = YES;
CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;
CLANG_WARN_BOOL_CONVERSION = YES;
CLANG_WARN_COMMA = YES;
CLANG_WARN_CONSTANT_CONVERSION = YES;
CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;
CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
CLANG_WARN_EMPTY_BODY = YES;
CLANG_WARN_ENUM_CONVERSION = YES;
CLANG_WARN_INFINITE_RECURSION = YES;
CLANG_WARN_INT_CONVERSION = YES;
CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;
CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES;
CLANG_WARN_OBJC_LITERAL_CONVERSION = YES;
CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
CLANG_WARN_RANGE_LOOP_ANALYSIS = YES;
CLANG_WARN_STRICT_PROTOTYPES = YES;
CLANG_WARN_SUSPICIOUS_MOVE = YES;
CLANG_WARN_UNREACHABLE_CODE = YES;
CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
"CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer";
COPY_PHASE_STRIP = NO;
DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
ENABLE_NS_ASSERTIONS = NO;
ENABLE_STRICT_OBJC_MSGSEND = YES;
GCC_C_LANGUAGE_STANDARD = gnu99;
GCC_NO_COMMON_BLOCKS = YES;
GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
GCC_WARN_UNDECLARED_SELECTOR = YES;
GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
GCC_WARN_UNUSED_FUNCTION = YES;
GCC_WARN_UNUSED_VARIABLE = YES;
IPHONEOS_DEPLOYMENT_TARGET = 8.0;
MTL_ENABLE_DEBUG_INFO = NO;
SDKROOT = iphoneos;
TARGETED_DEVICE_FAMILY = "1,2";
VALIDATE_PRODUCT = YES;
};
name = Profile;
};
249021D4217E4FDB00AE95B9 /* Profile */ = {
isa = XCBuildConfiguration;
baseConfigurationReference = 7AFA3C8E1D35360C0083082E /* Release.xcconfig */;
buildSettings = {
ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
CLANG_ENABLE_MODULES = YES;
CURRENT_PROJECT_VERSION = "$(FLUTTER_BUILD_NUMBER)";
DEVELOPMENT_TEAM = 3X949YE9K2;
ENABLE_BITCODE = NO;
FRAMEWORK_SEARCH_PATHS = (
"$(inherited)",
"$(PROJECT_DIR)/Flutter",
);
INFOPLIST_FILE = Runner/Info.plist;
IPHONEOS_DEPLOYMENT_TARGET = 10.0;
LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks";
LIBRARY_SEARCH_PATHS = (
"$(inherited)",
"$(PROJECT_DIR)/Flutter",
);
PRODUCT_BUNDLE_IDENTIFIER = com.csdcorp.speechToTextExample;
PRODUCT_NAME = "$(TARGET_NAME)";
SWIFT_OBJC_BRIDGING_HEADER = "Runner/Runner-Bridging-Header.h";
SWIFT_VERSION = 5.0;
VERSIONING_SYSTEM = "apple-generic";
};
name = Profile;
};
97C147031CF9000F007C117D /* Debug */ = {
isa = XCBuildConfiguration;
baseConfigurationReference = 9740EEB21CF90195004384FC /* Debug.xcconfig */;
buildSettings = {
ALWAYS_SEARCH_USER_PATHS = NO;
CLANG_ANALYZER_NONNULL = YES;
CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x";
CLANG_CXX_LIBRARY = "libc++";
CLANG_ENABLE_MODULES = YES;
CLANG_ENABLE_OBJC_ARC = YES;
CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;
CLANG_WARN_BOOL_CONVERSION = YES;
CLANG_WARN_COMMA = YES;
CLANG_WARN_CONSTANT_CONVERSION = YES;
CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;
CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
CLANG_WARN_EMPTY_BODY = YES;
CLANG_WARN_ENUM_CONVERSION = YES;
CLANG_WARN_INFINITE_RECURSION = YES;
CLANG_WARN_INT_CONVERSION = YES;
CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;
CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES;
CLANG_WARN_OBJC_LITERAL_CONVERSION = YES;
CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
CLANG_WARN_RANGE_LOOP_ANALYSIS = YES;
CLANG_WARN_STRICT_PROTOTYPES = YES;
CLANG_WARN_SUSPICIOUS_MOVE = YES;
CLANG_WARN_UNREACHABLE_CODE = YES;
CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
"CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer";
COPY_PHASE_STRIP = NO;
DEBUG_INFORMATION_FORMAT = dwarf;
ENABLE_STRICT_OBJC_MSGSEND = YES;
ENABLE_TESTABILITY = YES;
GCC_C_LANGUAGE_STANDARD = gnu99;
GCC_DYNAMIC_NO_PIC = NO;
GCC_NO_COMMON_BLOCKS = YES;
GCC_OPTIMIZATION_LEVEL = 0;
GCC_PREPROCESSOR_DEFINITIONS = (
"DEBUG=1",
"$(inherited)",
);
GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
GCC_WARN_UNDECLARED_SELECTOR = YES;
GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
GCC_WARN_UNUSED_FUNCTION = YES;
GCC_WARN_UNUSED_VARIABLE = YES;
IPHONEOS_DEPLOYMENT_TARGET = 8.0;
MTL_ENABLE_DEBUG_INFO = YES;
ONLY_ACTIVE_ARCH = YES;
SDKROOT = iphoneos;
TARGETED_DEVICE_FAMILY = "1,2";
};
name = Debug;
};
97C147041CF9000F007C117D /* Release */ = {
isa = XCBuildConfiguration;
baseConfigurationReference = 7AFA3C8E1D35360C0083082E /* Release.xcconfig */;
buildSettings = {
ALWAYS_SEARCH_USER_PATHS = NO;
CLANG_ANALYZER_NONNULL = YES;
CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x";
CLANG_CXX_LIBRARY = "libc++";
CLANG_ENABLE_MODULES = YES;
CLANG_ENABLE_OBJC_ARC = YES;
CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;
CLANG_WARN_BOOL_CONVERSION = YES;
CLANG_WARN_COMMA = YES;
CLANG_WARN_CONSTANT_CONVERSION = YES;
CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;
CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
CLANG_WARN_EMPTY_BODY = YES;
CLANG_WARN_ENUM_CONVERSION = YES;
CLANG_WARN_INFINITE_RECURSION = YES;
CLANG_WARN_INT_CONVERSION = YES;
CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;
CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES;
CLANG_WARN_OBJC_LITERAL_CONVERSION = YES;
CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
CLANG_WARN_RANGE_LOOP_ANALYSIS = YES;
CLANG_WARN_STRICT_PROTOTYPES = YES;
CLANG_WARN_SUSPICIOUS_MOVE = YES;
CLANG_WARN_UNREACHABLE_CODE = YES;
CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
"CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer";
COPY_PHASE_STRIP = NO;
DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
ENABLE_NS_ASSERTIONS = NO;
ENABLE_STRICT_OBJC_MSGSEND = YES;
GCC_C_LANGUAGE_STANDARD = gnu99;
GCC_NO_COMMON_BLOCKS = YES;
GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
GCC_WARN_UNDECLARED_SELECTOR = YES;
GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
GCC_WARN_UNUSED_FUNCTION = YES;
GCC_WARN_UNUSED_VARIABLE = YES;
IPHONEOS_DEPLOYMENT_TARGET = 8.0;
MTL_ENABLE_DEBUG_INFO = NO;
SDKROOT = iphoneos;
SWIFT_OPTIMIZATION_LEVEL = "-Owholemodule";
TARGETED_DEVICE_FAMILY = "1,2";
VALIDATE_PRODUCT = YES;
};
name = Release;
};
97C147061CF9000F007C117D /* Debug */ = {
isa = XCBuildConfiguration;
baseConfigurationReference = 9740EEB21CF90195004384FC /* Debug.xcconfig */;
buildSettings = {
ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
CLANG_ENABLE_MODULES = YES;
CURRENT_PROJECT_VERSION = "$(FLUTTER_BUILD_NUMBER)";
DEVELOPMENT_TEAM = 3X949YE9K2;
ENABLE_BITCODE = NO;
FRAMEWORK_SEARCH_PATHS = (
"$(inherited)",
"$(PROJECT_DIR)/Flutter",
);
INFOPLIST_FILE = Runner/Info.plist;
IPHONEOS_DEPLOYMENT_TARGET = 10.0;
LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks";
LIBRARY_SEARCH_PATHS = (
"$(inherited)",
"$(PROJECT_DIR)/Flutter",
);
PRODUCT_BUNDLE_IDENTIFIER = com.csdcorp.speechToTextExample;
PRODUCT_NAME = "$(TARGET_NAME)";
SWIFT_OBJC_BRIDGING_HEADER = "Runner/Runner-Bridging-Header.h";
SWIFT_OPTIMIZATION_LEVEL = "-Onone";
SWIFT_VERSION = 5.0;
VERSIONING_SYSTEM = "apple-generic";
};
name = Debug;
};
97C147071CF9000F007C117D /* Release */ = {
isa = XCBuildConfiguration;
baseConfigurationReference = 7AFA3C8E1D35360C0083082E /* Release.xcconfig */;
buildSettings = {
ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
CLANG_ENABLE_MODULES = YES;
CURRENT_PROJECT_VERSION = "$(FLUTTER_BUILD_NUMBER)";
DEVELOPMENT_TEAM = 3X949YE9K2;
ENABLE_BITCODE = NO;
FRAMEWORK_SEARCH_PATHS = (
"$(inherited)",
"$(PROJECT_DIR)/Flutter",
);
INFOPLIST_FILE = Runner/Info.plist;
IPHONEOS_DEPLOYMENT_TARGET = 10.0;
LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks";
LIBRARY_SEARCH_PATHS = (
"$(inherited)",
"$(PROJECT_DIR)/Flutter",
);
PRODUCT_BUNDLE_IDENTIFIER = com.csdcorp.speechToTextExample;
PRODUCT_NAME = "$(TARGET_NAME)";
SWIFT_OBJC_BRIDGING_HEADER = "Runner/Runner-Bridging-Header.h";
SWIFT_VERSION = 5.0;
VERSIONING_SYSTEM = "apple-generic";
};
name = Release;
};
/* End XCBuildConfiguration section */
/* Begin XCConfigurationList section */
97C146E91CF9000F007C117D /* Build configuration list for PBXProject "Runner" */ = {
isa = XCConfigurationList;
buildConfigurations = (
97C147031CF9000F007C117D /* Debug */,
97C147041CF9000F007C117D /* Release */,
249021D3217E4FDB00AE95B9 /* Profile */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
};
97C147051CF9000F007C117D /* Build configuration list for PBXNativeTarget "Runner" */ = {
isa = XCConfigurationList;
buildConfigurations = (
97C147061CF9000F007C117D /* Debug */,
97C147071CF9000F007C117D /* Release */,
249021D4217E4FDB00AE95B9 /* Profile */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
};
/* End XCConfigurationList section */
};
rootObject = 97C146E61CF9000F007C117D /* Project object */;
}

@ -0,0 +1,7 @@
<?xml version="1.0" encoding="UTF-8"?>
<Workspace
version = "1.0">
<FileRef
location = "group:Runner.xcodeproj">
</FileRef>
</Workspace>

@ -0,0 +1,91 @@
<?xml version="1.0" encoding="UTF-8"?>
<Scheme
LastUpgradeVersion = "1020"
version = "1.3">
<BuildAction
parallelizeBuildables = "YES"
buildImplicitDependencies = "YES">
<BuildActionEntries>
<BuildActionEntry
buildForTesting = "YES"
buildForRunning = "YES"
buildForProfiling = "YES"
buildForArchiving = "YES"
buildForAnalyzing = "YES">
<BuildableReference
BuildableIdentifier = "primary"
BlueprintIdentifier = "97C146ED1CF9000F007C117D"
BuildableName = "Runner.app"
BlueprintName = "Runner"
ReferencedContainer = "container:Runner.xcodeproj">
</BuildableReference>
</BuildActionEntry>
</BuildActionEntries>
</BuildAction>
<TestAction
buildConfiguration = "Debug"
selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
shouldUseLaunchSchemeArgsEnv = "YES">
<Testables>
</Testables>
<MacroExpansion>
<BuildableReference
BuildableIdentifier = "primary"
BlueprintIdentifier = "97C146ED1CF9000F007C117D"
BuildableName = "Runner.app"
BlueprintName = "Runner"
ReferencedContainer = "container:Runner.xcodeproj">
</BuildableReference>
</MacroExpansion>
<AdditionalOptions>
</AdditionalOptions>
</TestAction>
<LaunchAction
buildConfiguration = "Debug"
selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
launchStyle = "0"
useCustomWorkingDirectory = "NO"
ignoresPersistentStateOnLaunch = "NO"
debugDocumentVersioning = "YES"
debugServiceExtension = "internal"
allowLocationSimulation = "YES">
<BuildableProductRunnable
runnableDebuggingMode = "0">
<BuildableReference
BuildableIdentifier = "primary"
BlueprintIdentifier = "97C146ED1CF9000F007C117D"
BuildableName = "Runner.app"
BlueprintName = "Runner"
ReferencedContainer = "container:Runner.xcodeproj">
</BuildableReference>
</BuildableProductRunnable>
<AdditionalOptions>
</AdditionalOptions>
</LaunchAction>
<ProfileAction
buildConfiguration = "Profile"
shouldUseLaunchSchemeArgsEnv = "YES"
savedToolIdentifier = ""
useCustomWorkingDirectory = "NO"
debugDocumentVersioning = "YES">
<BuildableProductRunnable
runnableDebuggingMode = "0">
<BuildableReference
BuildableIdentifier = "primary"
BlueprintIdentifier = "97C146ED1CF9000F007C117D"
BuildableName = "Runner.app"
BlueprintName = "Runner"
ReferencedContainer = "container:Runner.xcodeproj">
</BuildableReference>
</BuildableProductRunnable>
</ProfileAction>
<AnalyzeAction
buildConfiguration = "Debug">
</AnalyzeAction>
<ArchiveAction
buildConfiguration = "Release"
revealArchiveInOrganizer = "YES">
</ArchiveAction>
</Scheme>

@ -0,0 +1,10 @@
<?xml version="1.0" encoding="UTF-8"?>
<Workspace
version = "1.0">
<FileRef
location = "group:Runner.xcodeproj">
</FileRef>
<FileRef
location = "group:Pods/Pods.xcodeproj">
</FileRef>
</Workspace>

@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>IDEDidComputeMac32BitWarning</key>
<true/>
</dict>
</plist>

@ -0,0 +1,13 @@
import UIKit
import Flutter
@UIApplicationMain
@objc class AppDelegate: FlutterAppDelegate {
override func application(
_ application: UIApplication,
didFinishLaunchingWithOptions launchOptions: [UIApplication.LaunchOptionsKey: Any]?
) -> Bool {
GeneratedPluginRegistrant.register(with: self)
return super.application(application, didFinishLaunchingWithOptions: launchOptions)
}
}

@ -0,0 +1,122 @@
{
"images" : [
{
"size" : "20x20",
"idiom" : "iphone",
"filename" : "Icon-App-20x20@2x.png",
"scale" : "2x"
},
{
"size" : "20x20",
"idiom" : "iphone",
"filename" : "Icon-App-20x20@3x.png",
"scale" : "3x"
},
{
"size" : "29x29",
"idiom" : "iphone",
"filename" : "Icon-App-29x29@1x.png",
"scale" : "1x"
},
{
"size" : "29x29",
"idiom" : "iphone",
"filename" : "Icon-App-29x29@2x.png",
"scale" : "2x"
},
{
"size" : "29x29",
"idiom" : "iphone",
"filename" : "Icon-App-29x29@3x.png",
"scale" : "3x"
},
{
"size" : "40x40",
"idiom" : "iphone",
"filename" : "Icon-App-40x40@2x.png",
"scale" : "2x"
},
{
"size" : "40x40",
"idiom" : "iphone",
"filename" : "Icon-App-40x40@3x.png",
"scale" : "3x"
},
{
"size" : "60x60",
"idiom" : "iphone",
"filename" : "Icon-App-60x60@2x.png",
"scale" : "2x"
},
{
"size" : "60x60",
"idiom" : "iphone",
"filename" : "Icon-App-60x60@3x.png",
"scale" : "3x"
},
{
"size" : "20x20",
"idiom" : "ipad",
"filename" : "Icon-App-20x20@1x.png",
"scale" : "1x"
},
{
"size" : "20x20",
"idiom" : "ipad",
"filename" : "Icon-App-20x20@2x.png",
"scale" : "2x"
},
{
"size" : "29x29",
"idiom" : "ipad",
"filename" : "Icon-App-29x29@1x.png",
"scale" : "1x"
},
{
"size" : "29x29",
"idiom" : "ipad",
"filename" : "Icon-App-29x29@2x.png",
"scale" : "2x"
},
{
"size" : "40x40",
"idiom" : "ipad",
"filename" : "Icon-App-40x40@1x.png",
"scale" : "1x"
},
{
"size" : "40x40",
"idiom" : "ipad",
"filename" : "Icon-App-40x40@2x.png",
"scale" : "2x"
},
{
"size" : "76x76",
"idiom" : "ipad",
"filename" : "Icon-App-76x76@1x.png",
"scale" : "1x"
},
{
"size" : "76x76",
"idiom" : "ipad",
"filename" : "Icon-App-76x76@2x.png",
"scale" : "2x"
},
{
"size" : "83.5x83.5",
"idiom" : "ipad",
"filename" : "Icon-App-83.5x83.5@2x.png",
"scale" : "2x"
},
{
"size" : "1024x1024",
"idiom" : "ios-marketing",
"filename" : "Icon-App-1024x1024@1x.png",
"scale" : "1x"
}
],
"info" : {
"version" : 1,
"author" : "xcode"
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 564 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.2 KiB

@ -0,0 +1,23 @@
{
"images" : [
{
"idiom" : "universal",
"filename" : "LaunchImage.png",
"scale" : "1x"
},
{
"idiom" : "universal",
"filename" : "LaunchImage@2x.png",
"scale" : "2x"
},
{
"idiom" : "universal",
"filename" : "LaunchImage@3x.png",
"scale" : "3x"
}
],
"info" : {
"version" : 1,
"author" : "xcode"
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 68 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 68 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 68 B

@ -0,0 +1,5 @@
# Launch Screen Assets
You can customize the launch screen with your own desired assets by replacing the image files in this directory.
You can also do it by opening your Flutter project's Xcode project with `open ios/Runner.xcworkspace`, selecting `Runner/Assets.xcassets` in the Project Navigator and dropping in the desired images.

@ -0,0 +1,37 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<document type="com.apple.InterfaceBuilder3.CocoaTouch.Storyboard.XIB" version="3.0" toolsVersion="12121" systemVersion="16G29" targetRuntime="iOS.CocoaTouch" propertyAccessControl="none" useAutolayout="YES" launchScreen="YES" colorMatched="YES" initialViewController="01J-lp-oVM">
<dependencies>
<deployment identifier="iOS"/>
<plugIn identifier="com.apple.InterfaceBuilder.IBCocoaTouchPlugin" version="12089"/>
</dependencies>
<scenes>
<!--View Controller-->
<scene sceneID="EHf-IW-A2E">
<objects>
<viewController id="01J-lp-oVM" sceneMemberID="viewController">
<layoutGuides>
<viewControllerLayoutGuide type="top" id="Ydg-fD-yQy"/>
<viewControllerLayoutGuide type="bottom" id="xbc-2k-c8Z"/>
</layoutGuides>
<view key="view" contentMode="scaleToFill" id="Ze5-6b-2t3">
<autoresizingMask key="autoresizingMask" widthSizable="YES" heightSizable="YES"/>
<subviews>
<imageView opaque="NO" clipsSubviews="YES" multipleTouchEnabled="YES" contentMode="center" image="LaunchImage" translatesAutoresizingMaskIntoConstraints="NO" id="YRO-k0-Ey4">
</imageView>
</subviews>
<color key="backgroundColor" red="1" green="1" blue="1" alpha="1" colorSpace="custom" customColorSpace="sRGB"/>
<constraints>
<constraint firstItem="YRO-k0-Ey4" firstAttribute="centerX" secondItem="Ze5-6b-2t3" secondAttribute="centerX" id="1a2-6s-vTC"/>
<constraint firstItem="YRO-k0-Ey4" firstAttribute="centerY" secondItem="Ze5-6b-2t3" secondAttribute="centerY" id="4X2-HB-R7a"/>
</constraints>
</view>
</viewController>
<placeholder placeholderIdentifier="IBFirstResponder" id="iYj-Kq-Ea1" userLabel="First Responder" sceneMemberID="firstResponder"/>
</objects>
<point key="canvasLocation" x="53" y="375"/>
</scene>
</scenes>
<resources>
<image name="LaunchImage" width="168" height="185"/>
</resources>
</document>

@ -0,0 +1,26 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<document type="com.apple.InterfaceBuilder3.CocoaTouch.Storyboard.XIB" version="3.0" toolsVersion="10117" systemVersion="15F34" targetRuntime="iOS.CocoaTouch" propertyAccessControl="none" useAutolayout="YES" useTraitCollections="YES" initialViewController="BYZ-38-t0r">
<dependencies>
<deployment identifier="iOS"/>
<plugIn identifier="com.apple.InterfaceBuilder.IBCocoaTouchPlugin" version="10085"/>
</dependencies>
<scenes>
<!--Flutter View Controller-->
<scene sceneID="tne-QT-ifu">
<objects>
<viewController id="BYZ-38-t0r" customClass="FlutterViewController" sceneMemberID="viewController">
<layoutGuides>
<viewControllerLayoutGuide type="top" id="y3c-jy-aDJ"/>
<viewControllerLayoutGuide type="bottom" id="wfy-db-euE"/>
</layoutGuides>
<view key="view" contentMode="scaleToFill" id="8bC-Xf-vdC">
<rect key="frame" x="0.0" y="0.0" width="600" height="600"/>
<autoresizingMask key="autoresizingMask" widthSizable="YES" heightSizable="YES"/>
<color key="backgroundColor" white="1" alpha="1" colorSpace="custom" customColorSpace="calibratedWhite"/>
</view>
</viewController>
<placeholder placeholderIdentifier="IBFirstResponder" id="dkx-z0-nzr" sceneMemberID="firstResponder"/>
</objects>
</scene>
</scenes>
</document>

@ -0,0 +1,49 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>NSMicrophoneUsageDescription</key>
<string>This example listens for speech on the device microphone on your request.</string>
<key>NSSpeechRecognitionUsageDescription</key>
<string>This example recognizes words as you speak them and displays them. </string>
<key>CFBundleDevelopmentRegion</key>
<string>$(DEVELOPMENT_LANGUAGE)</string>
<key>CFBundleExecutable</key>
<string>$(EXECUTABLE_NAME)</string>
<key>CFBundleIdentifier</key>
<string>$(PRODUCT_BUNDLE_IDENTIFIER)</string>
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundleName</key>
<string>speech_to_text_example</string>
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleShortVersionString</key>
<string>$(FLUTTER_BUILD_NAME)</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
<string>$(FLUTTER_BUILD_NUMBER)</string>
<key>LSRequiresIPhoneOS</key>
<true/>
<key>UILaunchStoryboardName</key>
<string>LaunchScreen</string>
<key>UIMainStoryboardFile</key>
<string>Main</string>
<key>UISupportedInterfaceOrientations</key>
<array>
<string>UIInterfaceOrientationPortrait</string>
<string>UIInterfaceOrientationLandscapeLeft</string>
<string>UIInterfaceOrientationLandscapeRight</string>
</array>
<key>UISupportedInterfaceOrientations~ipad</key>
<array>
<string>UIInterfaceOrientationPortrait</string>
<string>UIInterfaceOrientationPortraitUpsideDown</string>
<string>UIInterfaceOrientationLandscapeLeft</string>
<string>UIInterfaceOrientationLandscapeRight</string>
</array>
<key>UIViewControllerBasedStatusBarAppearance</key>
<false/>
</dict>
</plist>

@ -0,0 +1 @@
#import "GeneratedPluginRegistrant.h"

@ -0,0 +1,275 @@
import 'dart:async';
import 'dart:math';
import 'package:flutter/material.dart';
import 'package:permission_handler/permission_handler.dart';
import 'package:speech_to_text/speech_recognition_error.dart';
import 'package:speech_to_text/speech_recognition_result.dart';
import 'package:speech_to_text/speech_to_text.dart';
void main() => runApp(MyApp());
class MyApp extends StatefulWidget {
@override
_MyAppState createState() => _MyAppState();
}
class _MyAppState extends State<MyApp> {
bool _hasSpeech = false;
double level = 0.0;
double minSoundLevel = 50000;
double maxSoundLevel = -50000;
String lastWords = "";
String lastError = "";
String lastStatus = "";
String _currentLocaleId = "";
List<LocaleName> _localeNames = [];
final SpeechToText speech = SpeechToText();
@override
void initState() {
requestPermissions();
super.initState();
}
Future<void> initSpeechState() async {
bool hasSpeech = await speech.initialize(
onError: errorListener, onStatus: statusListener);
if (hasSpeech) {
_localeNames = await speech.locales();
var systemLocale = await speech.systemLocale();
_currentLocaleId = systemLocale.localeId;
}
if (!mounted) return;
setState(() {
_hasSpeech = hasSpeech;
});
}
void requestPermissions() async{
Map<Permission, PermissionStatus> statuses = await [
Permission.microphone,
].request();
}
@override
Widget build(BuildContext context) {
return MaterialApp(
home: Scaffold(
appBar: AppBar(
title: const Text('Speech to Text CloudSolution'),
),
body: Column(children: [
Center(
child: Text(
'Speech recognition available',
style: TextStyle(fontSize: 22.0),
),
),
Container(
child: Column(
children: <Widget>[
Row(
mainAxisAlignment: MainAxisAlignment.spaceAround,
children: <Widget>[
FlatButton(
child: Text('Initialize'),
onPressed: _hasSpeech ? null : initSpeechState,
),
],
),
Row(
mainAxisAlignment: MainAxisAlignment.spaceAround,
children: <Widget>[
FlatButton(
child: Text('Start'),
onPressed: !_hasSpeech || speech.isListening
? null
: startListening,
),
FlatButton(
child: Text('Stop'),
onPressed: speech.isListening ? stopListening : null,
),
FlatButton(
child: Text('Cancel'),
onPressed: speech.isListening ? cancelListening : null,
),
],
),
Row(
mainAxisAlignment: MainAxisAlignment.spaceAround,
children: <Widget>[
DropdownButton(
onChanged: (selectedVal) => _switchLang(selectedVal),
value: _currentLocaleId,
items: _localeNames
.map(
(localeName) => DropdownMenuItem(
value: localeName.localeId,
child: Text(localeName.name),
),
)
.toList(),
),
],
)
],
),
),
Expanded(
flex: 4,
child: Column(
children: <Widget>[
Center(
child: Text(
'Recognized Words',
style: TextStyle(fontSize: 22.0),
),
),
Expanded(
child: Stack(
children: <Widget>[
Container(
color: Theme.of(context).selectedRowColor,
child: Center(
child: Text(
lastWords,
textAlign: TextAlign.center,
),
),
),
Positioned.fill(
bottom: 10,
child: Align(
alignment: Alignment.bottomCenter,
child: Container(
width: 40,
height: 40,
alignment: Alignment.center,
decoration: BoxDecoration(
boxShadow: [
BoxShadow(
blurRadius: .26,
spreadRadius: level * 1.5,
color: Colors.black.withOpacity(.05))
],
color: Colors.white,
borderRadius:
BorderRadius.all(Radius.circular(50)),
),
child: IconButton(icon: Icon(Icons.mic)),
),
),
),
],
),
),
],
),
),
Expanded(
flex: 1,
child: Column(
children: <Widget>[
Center(
child: Text(
'Error Status',
style: TextStyle(fontSize: 22.0),
),
),
Center(
child: Text(lastError),
),
],
),
),
Container(
padding: EdgeInsets.symmetric(vertical: 20),
color: Theme.of(context).backgroundColor,
child: Center(
child: speech.isListening
? Text(
"I'm listening...",
style: TextStyle(fontWeight: FontWeight.bold),
)
: Text(
'Not listening',
style: TextStyle(fontWeight: FontWeight.bold),
),
),
),
]),
),
);
}
void startListening() {
lastWords = "";
lastError = "";
speech.listen(
onResult: resultListener,
listenFor: Duration(seconds: 10),
localeId: _currentLocaleId,
onSoundLevelChange: soundLevelListener,
cancelOnError: true,
partialResults: true,
onDevice: true,
listenMode: ListenMode.confirmation);
setState(() {});
}
void stopListening() {
speech.stop();
setState(() {
level = 0.0;
});
}
void cancelListening() {
speech.cancel();
setState(() {
level = 0.0;
});
}
void resultListener(SpeechRecognitionResult result) {
setState(() {
lastWords = "${result.recognizedWords} - ${result.finalResult}";
});
}
void soundLevelListener(double level) {
minSoundLevel = min(minSoundLevel, level);
maxSoundLevel = max(maxSoundLevel, level);
// print("sound level $level: $minSoundLevel - $maxSoundLevel ");
setState(() {
this.level = level;
});
}
void errorListener(SpeechRecognitionError error) {
// print("Received error status: $error, listening: ${speech.isListening}");
setState(() {
lastError = "${error.errorMsg} - ${error.permanent}";
});
}
void statusListener(String status) {
// print(
// "Received listener status: $status, listening: ${speech.isListening}");
setState(() {
lastStatus = "$status";
});
}
_switchLang(selectedVal) {
setState(() {
_currentLocaleId = selectedVal;
});
print(selectedVal);
}
}

@ -0,0 +1,245 @@
# Generated by pub
# See https://dart.dev/tools/pub/glossary#lockfile
packages:
archive:
dependency: transitive
description:
name: archive
url: "https://pub.dartlang.org"
source: hosted
version: "2.0.13"
args:
dependency: transitive
description:
name: args
url: "https://pub.dartlang.org"
source: hosted
version: "1.6.0"
async:
dependency: transitive
description:
name: async
url: "https://pub.dartlang.org"
source: hosted
version: "2.4.1"
boolean_selector:
dependency: transitive
description:
name: boolean_selector
url: "https://pub.dartlang.org"
source: hosted
version: "2.0.0"
charcode:
dependency: transitive
description:
name: charcode
url: "https://pub.dartlang.org"
source: hosted
version: "1.1.3"
clock:
dependency: transitive
description:
name: clock
url: "https://pub.dartlang.org"
source: hosted
version: "1.0.1"
collection:
dependency: transitive
description:
name: collection
url: "https://pub.dartlang.org"
source: hosted
version: "1.14.12"
convert:
dependency: transitive
description:
name: convert
url: "https://pub.dartlang.org"
source: hosted
version: "2.1.1"
crypto:
dependency: transitive
description:
name: crypto
url: "https://pub.dartlang.org"
source: hosted
version: "2.1.4"
cupertino_icons:
dependency: "direct main"
description:
name: cupertino_icons
url: "https://pub.dartlang.org"
source: hosted
version: "0.1.3"
flutter:
dependency: "direct main"
description: flutter
source: sdk
version: "0.0.0"
flutter_test:
dependency: "direct dev"
description: flutter
source: sdk
version: "0.0.0"
image:
dependency: transitive
description:
name: image
url: "https://pub.dartlang.org"
source: hosted
version: "2.1.12"
json_annotation:
dependency: transitive
description:
name: json_annotation
url: "https://pub.dartlang.org"
source: hosted
version: "3.0.1"
matcher:
dependency: transitive
description:
name: matcher
url: "https://pub.dartlang.org"
source: hosted
version: "0.12.6"
meta:
dependency: transitive
description:
name: meta
url: "https://pub.dartlang.org"
source: hosted
version: "1.1.8"
nested:
dependency: transitive
description:
name: nested
url: "https://pub.dartlang.org"
source: hosted
version: "0.0.4"
path:
dependency: transitive
description:
name: path
url: "https://pub.dartlang.org"
source: hosted
version: "1.6.4"
permission_handler:
dependency: "direct main"
description:
name: permission_handler
url: "https://pub.dartlang.org"
source: hosted
version: "5.0.1+1"
permission_handler_platform_interface:
dependency: transitive
description:
name: permission_handler_platform_interface
url: "https://pub.dartlang.org"
source: hosted
version: "2.0.1"
petitparser:
dependency: transitive
description:
name: petitparser
url: "https://pub.dartlang.org"
source: hosted
version: "2.4.0"
plugin_platform_interface:
dependency: transitive
description:
name: plugin_platform_interface
url: "https://pub.dartlang.org"
source: hosted
version: "1.0.2"
provider:
dependency: "direct main"
description:
name: provider
url: "https://pub.dartlang.org"
source: hosted
version: "4.3.1"
quiver:
dependency: transitive
description:
name: quiver
url: "https://pub.dartlang.org"
source: hosted
version: "2.1.3"
sky_engine:
dependency: transitive
description: flutter
source: sdk
version: "0.0.99"
source_span:
dependency: transitive
description:
name: source_span
url: "https://pub.dartlang.org"
source: hosted
version: "1.7.0"
speech_to_text:
dependency: "direct dev"
description:
path: ".."
relative: true
source: path
version: "0.0.0"
stack_trace:
dependency: transitive
description:
name: stack_trace
url: "https://pub.dartlang.org"
source: hosted
version: "1.9.3"
stream_channel:
dependency: transitive
description:
name: stream_channel
url: "https://pub.dartlang.org"
source: hosted
version: "2.0.0"
string_scanner:
dependency: transitive
description:
name: string_scanner
url: "https://pub.dartlang.org"
source: hosted
version: "1.0.5"
term_glyph:
dependency: transitive
description:
name: term_glyph
url: "https://pub.dartlang.org"
source: hosted
version: "1.1.0"
test_api:
dependency: transitive
description:
name: test_api
url: "https://pub.dartlang.org"
source: hosted
version: "0.2.15"
typed_data:
dependency: transitive
description:
name: typed_data
url: "https://pub.dartlang.org"
source: hosted
version: "1.1.6"
vector_math:
dependency: transitive
description:
name: vector_math
url: "https://pub.dartlang.org"
source: hosted
version: "2.0.8"
xml:
dependency: transitive
description:
name: xml
url: "https://pub.dartlang.org"
source: hosted
version: "3.6.1"
sdks:
dart: ">=2.7.0 <3.0.0"
flutter: ">=1.16.0 <2.0.0"

@ -0,0 +1,33 @@
name: speech_to_text_example
description: Demonstrates how to use the speech_to_text plugin.
version: 1.1.0
publish_to: 'none'
environment:
sdk: ">=2.1.0 <3.0.0"
dependencies:
flutter:
sdk: flutter
cupertino_icons: ^0.1.2
permission_handler: ^5.0.1+1
provider:
dev_dependencies:
flutter_test:
sdk: flutter
speech_to_text:
path: ../
# The following section is specific to Flutter.
flutter:
uses-material-design: true
assets:
- assets/sounds/speech_to_text_listening.m4r
- assets/sounds/speech_to_text_cancel.m4r
- assets/sounds/speech_to_text_stop.m4r

@ -0,0 +1,27 @@
// This is a basic Flutter widget test.
//
// To perform an interaction with a widget in your test, use the WidgetTester
// utility that Flutter provides. For example, you can send tap and scroll
// gestures. You can also use WidgetTester to find child widgets in the widget
// tree, read text, and verify that the values of widget properties are correct.
import 'package:flutter/material.dart';
import 'package:flutter_test/flutter_test.dart';
import '../lib/main.dart';
void main() {
testWidgets('Verify Platform version', (WidgetTester tester) async {
// Build our app and trigger a frame.
await tester.pumpWidget(MyApp());
// Verify that platform version is retrieved.
expect(
find.byWidgetPredicate(
(Widget widget) =>
widget is Text && widget.data.startsWith('Running on:'),
),
findsOneWidget,
);
});
}

@ -0,0 +1,37 @@
.idea/
.vagrant/
.sconsign.dblite
.svn/
.DS_Store
*.swp
profile
DerivedData/
build/
GeneratedPluginRegistrant.h
GeneratedPluginRegistrant.m
.generated/
*.pbxuser
*.mode1v3
*.mode2v3
*.perspectivev3
!default.pbxuser
!default.mode1v3
!default.mode2v3
!default.perspectivev3
xcuserdata
*.moved-aside
*.pyc
*sync/
Icon?
.tags*
/Flutter/Generated.xcconfig
/Flutter/flutter_export_environment.sh

@ -0,0 +1,4 @@
#import <Flutter/Flutter.h>
@interface SpeechToTextPlugin : NSObject<FlutterPlugin>
@end

@ -0,0 +1,8 @@
#import "SpeechToTextPlugin.h"
#import <speech_to_text/speech_to_text-Swift.h>
@implementation SpeechToTextPlugin
+ (void)registerWithRegistrar:(NSObject<FlutterPluginRegistrar>*)registrar {
[SwiftSpeechToTextPlugin registerWithRegistrar:registrar];
}
@end

@ -0,0 +1,580 @@
import Flutter
import UIKit
import Speech
import os.log
import Try
public enum SwiftSpeechToTextMethods: String {
case has_permission
case initialize
case listen
case stop
case cancel
case locales
case unknown // just for testing
}
public enum SwiftSpeechToTextCallbackMethods: String {
case textRecognition
case notifyStatus
case notifyError
case soundLevelChange
}
public enum SpeechToTextStatus: String {
case listening
case notListening
case unavailable
case available
}
public enum SpeechToTextErrors: String {
case onDeviceError
case noRecognizerError
case listenFailedError
case missingOrInvalidArg
}
public enum ListenMode: Int {
case deviceDefault = 0
case dictation = 1
case search = 2
case confirmation = 3
}
struct SpeechRecognitionWords : Codable {
let recognizedWords: String
let confidence: Decimal
}
struct SpeechRecognitionResult : Codable {
let alternates: [SpeechRecognitionWords]
let finalResult: Bool
}
struct SpeechRecognitionError : Codable {
let errorMsg: String
let permanent: Bool
}
enum SpeechToTextError: Error {
case runtimeError(String)
}
@available(iOS 10.0, *)
public class SwiftSpeechToTextPlugin: NSObject, FlutterPlugin {
private var channel: FlutterMethodChannel
private var registrar: FlutterPluginRegistrar
private var recognizer: SFSpeechRecognizer?
private var currentRequest: SFSpeechAudioBufferRecognitionRequest?
private var currentTask: SFSpeechRecognitionTask?
private var listeningSound: AVAudioPlayer?
private var successSound: AVAudioPlayer?
private var cancelSound: AVAudioPlayer?
private var rememberedAudioCategory: AVAudioSession.Category?
private var previousLocale: Locale?
private var onPlayEnd: (() -> Void)?
private var returnPartialResults: Bool = true
private var failedListen: Bool = false
private var listening = false
private let audioSession = AVAudioSession.sharedInstance()
private let audioEngine = AVAudioEngine()
private let jsonEncoder = JSONEncoder()
private let busForNodeTap = 0
private let speechBufferSize: AVAudioFrameCount = 1024
private static var subsystem = Bundle.main.bundleIdentifier!
private let pluginLog = OSLog(subsystem: "com.csdcorp.speechToText", category: "plugin")
public static func register(with registrar: FlutterPluginRegistrar) {
let channel = FlutterMethodChannel(name: "plugin.csdcorp.com/speech_to_text", binaryMessenger: registrar.messenger())
let instance = SwiftSpeechToTextPlugin( channel, registrar: registrar )
registrar.addMethodCallDelegate(instance, channel: channel )
}
init( _ channel: FlutterMethodChannel, registrar: FlutterPluginRegistrar ) {
self.channel = channel
self.registrar = registrar
}
public func handle(_ call: FlutterMethodCall, result: @escaping FlutterResult) {
switch call.method {
case SwiftSpeechToTextMethods.has_permission.rawValue:
hasPermission( result )
case SwiftSpeechToTextMethods.initialize.rawValue:
initialize( result )
case SwiftSpeechToTextMethods.listen.rawValue:
guard let argsArr = call.arguments as? Dictionary<String,AnyObject>,
let partialResults = argsArr["partialResults"] as? Bool, let onDevice = argsArr["onDevice"] as? Bool, let listenModeIndex = argsArr["listenMode"] as? Int
else {
DispatchQueue.main.async {
result(FlutterError( code: SpeechToTextErrors.missingOrInvalidArg.rawValue,
message:"Missing arg partialResults, onDevice, and listenMode are required",
details: nil ))
}
return
}
var localeStr: String? = nil
if let localeParam = argsArr["localeId"] as? String {
localeStr = localeParam
}
guard let listenMode = ListenMode(rawValue: listenModeIndex) else {
DispatchQueue.main.async {
result(FlutterError( code: SpeechToTextErrors.missingOrInvalidArg.rawValue,
message:"invalid value for listenMode, must be 0-2, was \(listenModeIndex)",
details: nil ))
}
return
}
listenForSpeech( result, localeStr: localeStr, partialResults: partialResults, onDevice: onDevice, listenMode: listenMode )
case SwiftSpeechToTextMethods.stop.rawValue:
stopSpeech( result )
case SwiftSpeechToTextMethods.cancel.rawValue:
cancelSpeech( result )
case SwiftSpeechToTextMethods.locales.rawValue:
locales( result )
default:
os_log("Unrecognized method: %{PUBLIC}@", log: pluginLog, type: .error, call.method)
DispatchQueue.main.async {
result( FlutterMethodNotImplemented)
}
}
}
private func hasPermission( _ result: @escaping FlutterResult) {
let has = SFSpeechRecognizer.authorizationStatus() == SFSpeechRecognizerAuthorizationStatus.authorized &&
AVAudioSession.sharedInstance().recordPermission == AVAudioSession.RecordPermission.granted
DispatchQueue.main.async {
result( has )
}
}
private func initialize( _ result: @escaping FlutterResult) {
var success = false
let status = SFSpeechRecognizer.authorizationStatus()
switch status {
case SFSpeechRecognizerAuthorizationStatus.notDetermined:
SFSpeechRecognizer.requestAuthorization({(status)->Void in
success = status == SFSpeechRecognizerAuthorizationStatus.authorized
if ( success ) {
AVAudioSession.sharedInstance().requestRecordPermission({(granted: Bool)-> Void in
if granted {
self.setupSpeechRecognition(result)
} else{
self.sendBoolResult( false, result );
os_log("User denied permission", log: self.pluginLog, type: .info)
}
})
}
else {
self.sendBoolResult( false, result );
}
});
case SFSpeechRecognizerAuthorizationStatus.denied:
os_log("Permission permanently denied", log: self.pluginLog, type: .info)
sendBoolResult( false, result );
case SFSpeechRecognizerAuthorizationStatus.restricted:
os_log("Device restriction prevented initialize", log: self.pluginLog, type: .info)
sendBoolResult( false, result );
default:
os_log("Has permissions continuing with setup", log: self.pluginLog, type: .debug)
setupSpeechRecognition(result)
}
}
fileprivate func sendBoolResult( _ value: Bool, _ result: @escaping FlutterResult) {
DispatchQueue.main.async {
result( value )
}
}
fileprivate func setupListeningSound() {
listeningSound = loadSound("assets/sounds/speech_to_text_listening.m4r")
successSound = loadSound("assets/sounds/speech_to_text_stop.m4r")
cancelSound = loadSound("assets/sounds/speech_to_text_cancel.m4r")
}
fileprivate func loadSound( _ assetPath: String ) -> AVAudioPlayer? {
var player: AVAudioPlayer? = nil
let soundKey = registrar.lookupKey(forAsset: assetPath )
guard !soundKey.isEmpty else {
return player
}
if let soundPath = Bundle.main.path(forResource: soundKey, ofType:nil) {
let soundUrl = URL(fileURLWithPath: soundPath )
do {
player = try AVAudioPlayer(contentsOf: soundUrl )
player?.delegate = self
} catch {
// no audio
}
}
return player
}
private func setupSpeechRecognition( _ result: @escaping FlutterResult) {
setupRecognizerForLocale( locale: Locale.current )
guard recognizer != nil else {
sendBoolResult( false, result );
return
}
recognizer?.delegate = self
setupListeningSound()
sendBoolResult( true, result );
}
private func setupRecognizerForLocale( locale: Locale ) {
if ( previousLocale == locale ) {
return
}
previousLocale = locale
recognizer = SFSpeechRecognizer( locale: locale )
}
private func getLocale( _ localeStr: String? ) -> Locale {
guard let aLocaleStr = localeStr else {
return Locale.current
}
let locale = Locale(identifier: aLocaleStr)
return locale
}
private func stopSpeech( _ result: @escaping FlutterResult) {
if ( !listening ) {
sendBoolResult( false, result );
return
}
stopAllPlayers()
if let sound = successSound {
onPlayEnd = {() -> Void in
self.currentTask?.finish()
self.stopCurrentListen( )
self.sendBoolResult( true, result )
return
}
sound.play()
}
else {
stopCurrentListen( )
sendBoolResult( true, result );
}
}
private func cancelSpeech( _ result: @escaping FlutterResult) {
if ( !listening ) {
sendBoolResult( false, result );
return
}
stopAllPlayers()
if let sound = cancelSound {
onPlayEnd = {() -> Void in
self.currentTask?.cancel()
self.stopCurrentListen( )
self.sendBoolResult( true, result )
return
}
sound.play()
}
else {
self.currentTask?.cancel()
stopCurrentListen( )
sendBoolResult( true, result );
}
}
private func stopAllPlayers() {
cancelSound?.stop()
successSound?.stop()
listeningSound?.stop()
}
private func stopCurrentListen( ) {
stopAllPlayers()
currentRequest?.endAudio()
do {
try trap {
self.audioEngine.stop()
}
}
catch {
os_log("Error stopping engine: %{PUBLIC}@", log: pluginLog, type: .error, error.localizedDescription)
}
do {
try trap {
let inputNode = self.audioEngine.inputNode
inputNode.removeTap(onBus: self.busForNodeTap);
}
}
catch {
os_log("Error removing trap: %{PUBLIC}@", log: pluginLog, type: .error, error.localizedDescription)
}
do {
if let rememberedAudioCategory = rememberedAudioCategory {
try self.audioSession.setCategory(rememberedAudioCategory)
}
}
catch {
os_log("Error stopping listen: %{PUBLIC}@", log: pluginLog, type: .error, error.localizedDescription)
}
do {
try self.audioSession.setActive(false, options: .notifyOthersOnDeactivation)
}
catch {
os_log("Error deactivation: %{PUBLIC}@", log: pluginLog, type: .info, error.localizedDescription)
}
currentRequest = nil
currentTask = nil
onPlayEnd = nil
listening = false
}
private func listenForSpeech( _ result: @escaping FlutterResult, localeStr: String?, partialResults: Bool, onDevice: Bool, listenMode: ListenMode ) {
if ( nil != currentTask || listening ) {
sendBoolResult( false, result );
return
}
do {
// let inErrorTest = true
failedListen = false
returnPartialResults = partialResults
setupRecognizerForLocale(locale: getLocale(localeStr))
guard let localRecognizer = recognizer else {
result(FlutterError( code: SpeechToTextErrors.noRecognizerError.rawValue,
message:"Failed to create speech recognizer",
details: nil ))
return
}
if ( onDevice ) {
if #available(iOS 13.0, *), !localRecognizer.supportsOnDeviceRecognition {
result(FlutterError( code: SpeechToTextErrors.onDeviceError.rawValue,
message:"on device recognition is not supported on this device",
details: nil ))
}
}
rememberedAudioCategory = self.audioSession.category
try self.audioSession.setCategory(AVAudioSession.Category.playAndRecord, options: .defaultToSpeaker)
// try self.audioSession.setMode(AVAudioSession.Mode.measurement)
try self.audioSession.setMode(AVAudioSession.Mode.default)
try self.audioSession.setActive(true, options: .notifyOthersOnDeactivation)
if let sound = listeningSound {
self.onPlayEnd = {()->Void in
if ( !self.failedListen ) {
self.listening = true
self.invokeFlutter( SwiftSpeechToTextCallbackMethods.notifyStatus, arguments: SpeechToTextStatus.listening.rawValue )
}
}
sound.play()
}
self.audioEngine.reset();
let inputNode = self.audioEngine.inputNode
if(inputNode.inputFormat(forBus: 0).channelCount == 0){
throw SpeechToTextError.runtimeError("Not enough available inputs.")
}
self.currentRequest = SFSpeechAudioBufferRecognitionRequest()
guard let currentRequest = self.currentRequest else {
sendBoolResult( false, result );
return
}
currentRequest.shouldReportPartialResults = true
if #available(iOS 13.0, *), onDevice {
currentRequest.requiresOnDeviceRecognition = true
}
switch listenMode {
case ListenMode.dictation:
currentRequest.taskHint = SFSpeechRecognitionTaskHint.dictation
break
case ListenMode.search:
currentRequest.taskHint = SFSpeechRecognitionTaskHint.search
break
case ListenMode.confirmation:
currentRequest.taskHint = SFSpeechRecognitionTaskHint.confirmation
break
default:
break
}
self.currentTask = self.recognizer?.recognitionTask(with: currentRequest, delegate: self )
let recordingFormat = inputNode.outputFormat(forBus: self.busForNodeTap)
try trap {
inputNode.installTap(onBus: self.busForNodeTap, bufferSize: self.speechBufferSize, format: recordingFormat) { (buffer: AVAudioPCMBuffer, when: AVAudioTime) in
currentRequest.append(buffer)
self.updateSoundLevel( buffer: buffer )
}
}
// if ( inErrorTest ){
// throw SpeechToTextError.runtimeError("for testing only")
// }
self.audioEngine.prepare()
try self.audioEngine.start()
if nil == listeningSound {
listening = true
self.invokeFlutter( SwiftSpeechToTextCallbackMethods.notifyStatus, arguments: SpeechToTextStatus.listening.rawValue )
}
sendBoolResult( true, result );
}
catch {
failedListen = true
os_log("Error starting listen: %{PUBLIC}@", log: pluginLog, type: .error, error.localizedDescription)
stopCurrentListen()
sendBoolResult( false, result );
invokeFlutter( SwiftSpeechToTextCallbackMethods.notifyStatus, arguments: SpeechToTextStatus.notListening.rawValue )
let speechError = SpeechRecognitionError(errorMsg: "error_listen_failed", permanent: true )
do {
let errorResult = try jsonEncoder.encode(speechError)
invokeFlutter( SwiftSpeechToTextCallbackMethods.notifyError, arguments: String( data:errorResult, encoding: .utf8) )
} catch {
os_log("Could not encode JSON", log: pluginLog, type: .error)
}
}
}
private func updateSoundLevel( buffer: AVAudioPCMBuffer) {
guard
let channelData = buffer.floatChannelData
else {
return
}
let channelDataValue = channelData.pointee
let channelDataValueArray = stride(from: 0,
to: Int(buffer.frameLength),
by: buffer.stride).map{ channelDataValue[$0] }
let frameLength = Float(buffer.frameLength)
let rms = sqrt(channelDataValueArray.map{ $0 * $0 }.reduce(0, +) / frameLength )
let avgPower = 20 * log10(rms)
self.invokeFlutter( SwiftSpeechToTextCallbackMethods.soundLevelChange, arguments: avgPower )
}
/// Build a list of localId:name with the current locale first
private func locales( _ result: @escaping FlutterResult ) {
var localeNames = [String]();
let locales = SFSpeechRecognizer.supportedLocales();
let currentLocale = Locale.current
if let idName = buildIdNameForLocale(forIdentifier: currentLocale.identifier ) {
localeNames.append(idName)
}
for locale in locales {
if ( locale.identifier == currentLocale.identifier) {
continue
}
if let idName = buildIdNameForLocale(forIdentifier: locale.identifier ) {
localeNames.append(idName)
}
}
DispatchQueue.main.async {
result(localeNames)
}
}
private func buildIdNameForLocale( forIdentifier: String ) -> String? {
var idName: String?
if let name = Locale.current.localizedString(forIdentifier: forIdentifier ) {
let sanitizedName = name.replacingOccurrences(of: ":", with: " ")
idName = "\(forIdentifier):\(sanitizedName)"
}
return idName
}
private func handleResult( _ transcriptions: [SFTranscription], isFinal: Bool ) {
if ( !isFinal && !returnPartialResults ) {
return
}
var speechWords: [SpeechRecognitionWords] = []
for transcription in transcriptions {
let words: SpeechRecognitionWords = SpeechRecognitionWords(recognizedWords: transcription.formattedString, confidence: confidenceIn( transcription))
speechWords.append( words )
}
let speechInfo = SpeechRecognitionResult(alternates: speechWords, finalResult: isFinal )
do {
let speechMsg = try jsonEncoder.encode(speechInfo)
if let speechStr = String( data:speechMsg, encoding: .utf8) {
os_log("Encoded JSON result: %{PUBLIC}@", log: pluginLog, type: .debug, speechStr )
invokeFlutter( SwiftSpeechToTextCallbackMethods.textRecognition, arguments: speechStr )
}
} catch {
os_log("Could not encode JSON", log: pluginLog, type: .error)
}
}
private func confidenceIn( _ transcription: SFTranscription ) -> Decimal {
guard ( transcription.segments.count > 0 ) else {
return 0;
}
var totalConfidence: Float = 0.0;
for segment in transcription.segments {
totalConfidence += segment.confidence
}
let avgConfidence: Float = totalConfidence / Float(transcription.segments.count )
let confidence: Float = (avgConfidence * 1000).rounded() / 1000
return Decimal( string: String( describing: confidence ) )!
}
private func invokeFlutter( _ method: SwiftSpeechToTextCallbackMethods, arguments: Any? ) {
DispatchQueue.main.async {
self.channel.invokeMethod( method.rawValue, arguments: arguments )
}
}
}
@available(iOS 10.0, *)
extension SwiftSpeechToTextPlugin : SFSpeechRecognizerDelegate {
public func speechRecognizer(_ speechRecognizer: SFSpeechRecognizer, availabilityDidChange available: Bool) {
let availability = available ? SpeechToTextStatus.available.rawValue : SpeechToTextStatus.unavailable.rawValue
os_log("Availability changed: %{PUBLIC}@", log: pluginLog, type: .debug, availability)
invokeFlutter( SwiftSpeechToTextCallbackMethods.notifyStatus, arguments: availability )
}
}
@available(iOS 10.0, *)
extension SwiftSpeechToTextPlugin : SFSpeechRecognitionTaskDelegate {
public func speechRecognitionDidDetectSpeech(_ task: SFSpeechRecognitionTask) {
// Do nothing for now
}
public func speechRecognitionTaskFinishedReadingAudio(_ task: SFSpeechRecognitionTask) {
reportError(source: "FinishedReadingAudio", error: task.error)
invokeFlutter( SwiftSpeechToTextCallbackMethods.notifyStatus, arguments: SpeechToTextStatus.notListening.rawValue )
}
public func speechRecognitionTaskWasCancelled(_ task: SFSpeechRecognitionTask) {
reportError(source: "TaskWasCancelled", error: task.error)
invokeFlutter( SwiftSpeechToTextCallbackMethods.notifyStatus, arguments: SpeechToTextStatus.notListening.rawValue )
}
public func speechRecognitionTask(_ task: SFSpeechRecognitionTask, didFinishSuccessfully successfully: Bool) {
reportError(source: "FinishSuccessfully", error: task.error)
stopCurrentListen( )
}
public func speechRecognitionTask(_ task: SFSpeechRecognitionTask, didHypothesizeTranscription transcription: SFTranscription) {
reportError(source: "HypothesizeTranscription", error: task.error)
handleResult( [transcription], isFinal: false )
}
public func speechRecognitionTask(_ task: SFSpeechRecognitionTask, didFinishRecognition recognitionResult: SFSpeechRecognitionResult) {
reportError(source: "FinishRecognition", error: task.error)
let isFinal = recognitionResult.isFinal
handleResult( recognitionResult.transcriptions, isFinal: isFinal )
}
private func reportError( source: String, error: Error?) {
if ( nil != error) {
os_log("%{PUBLIC}@ with error: %{PUBLIC}@", log: pluginLog, type: .debug, source, error.debugDescription)
}
}
}
@available(iOS 10.0, *)
extension SwiftSpeechToTextPlugin : AVAudioPlayerDelegate {
public func audioPlayerDidFinishPlaying(_ player: AVAudioPlayer,
successfully flag: Bool) {
if let playEnd = self.onPlayEnd {
playEnd()
}
}
}

@ -0,0 +1,22 @@
#
# To learn more about a Podspec see http://guides.cocoapods.org/syntax/podspec.html
#
Pod::Spec.new do |s|
s.name = 'speech_to_text'
s.version = '0.0.1'
s.summary = 'A new flutter plugin project.'
s.description = <<-DESC
A new flutter plugin project.
DESC
s.homepage = 'http://example.com'
s.license = { :file => '../LICENSE' }
s.author = { 'Your Company' => 'email@example.com' }
s.source = { :path => '.' }
s.source_files = 'Classes/**/*'
s.public_header_files = 'Classes/**/*.h'
s.dependency 'Flutter'
s.dependency 'Try'
s.ios.deployment_target = '8.0'
end

@ -0,0 +1,44 @@
import 'package:json_annotation/json_annotation.dart';
part 'speech_recognition_error.g.dart';
/// A single error returned from the underlying speech services.
///
/// Errors are either transient or permanent. Permanent errors
/// block speech recognition from continuing and must be
/// addressed before recogntion will work. Transient errors
/// cause individual recognition sessions to fail but subsequent
/// attempts may well succeed.
@JsonSerializable()
class SpeechRecognitionError {
/// Use this to differentiate the various error conditions.
///
/// Not meant for display to the user.
final String errorMsg;
/// True means that recognition cannot continue until
/// the error is resolved.
final bool permanent;
SpeechRecognitionError(this.errorMsg, this.permanent);
factory SpeechRecognitionError.fromJson(Map<String, dynamic> json) =>
_$SpeechRecognitionErrorFromJson(json);
Map<String, dynamic> toJson() => _$SpeechRecognitionErrorToJson(this);
@override
String toString() {
return "SpeechRecognitionError msg: $errorMsg, permanent: $permanent";
}
@override
bool operator ==(Object other) {
return identical(this, other) ||
other is SpeechRecognitionError &&
errorMsg == other.errorMsg &&
permanent == other.permanent;
}
@override
int get hashCode => errorMsg.hashCode;
}

@ -0,0 +1,22 @@
// GENERATED CODE - DO NOT MODIFY BY HAND
part of 'speech_recognition_error.dart';
// **************************************************************************
// JsonSerializableGenerator
// **************************************************************************
SpeechRecognitionError _$SpeechRecognitionErrorFromJson(
Map<String, dynamic> json) {
return SpeechRecognitionError(
json['errorMsg'] as String,
json['permanent'] as bool,
);
}
Map<String, dynamic> _$SpeechRecognitionErrorToJson(
SpeechRecognitionError instance) =>
<String, dynamic>{
'errorMsg': instance.errorMsg,
'permanent': instance.permanent,
};

@ -0,0 +1,30 @@
import 'package:speech_to_text/speech_recognition_error.dart';
import 'package:speech_to_text/speech_recognition_result.dart';
enum SpeechRecognitionEventType {
finalRecognitionEvent,
partialRecognitionEvent,
errorEvent,
statusChangeEvent,
soundLevelChangeEvent,
}
/// A single event in a stream of speech recognition events.
///
/// Use [eventType] to determine what type of event it is and depending on that
/// use the other properties to get information about it.
class SpeechRecognitionEvent {
final SpeechRecognitionEventType eventType;
final SpeechRecognitionError _error;
final SpeechRecognitionResult _result;
final bool _listening;
final double _level;
SpeechRecognitionEvent(
this.eventType, this._result, this._error, this._listening, this._level);
bool get isListening => _listening;
double get level => _level;
SpeechRecognitionResult get recognitionResult => _result;
SpeechRecognitionError get error => _error;
}

@ -0,0 +1,140 @@
import 'dart:collection';
import 'package:json_annotation/json_annotation.dart';
part 'speech_recognition_result.g.dart';
/// A sequence of recognized words from the speech recognition
/// service.
///
/// Depending on the platform behaviour the words may come in all
/// at once at the end or as partial results as each word is
/// recognized. Use the [finalResult] flag to determine if the
/// result is considered final by the platform.
@JsonSerializable(explicitToJson: true)
class SpeechRecognitionResult {
List<SpeechRecognitionWords> _alternates;
/// Returns a list of possible transcriptions of the speech.
///
/// The first value is always the same as the [recognizedWords]
/// value. Use the confidence for each alternate transcription
/// to determine how likely it is. Note that not all platforms
/// do a good job with confidence, there are convenience methods
/// on [SpeechRecogntionWords] to work with possibly missing
/// confidence values.
List<SpeechRecognitionWords> get alternates =>
UnmodifiableListView(_alternates);
/// The sequence of words that is the best transcription of
/// what was said.
///
/// This is the same as the first value of [alternates].
String get recognizedWords =>
_alternates.isNotEmpty ? _alternates.first.recognizedWords : "";
/// False means the words are an interim result, true means
/// they are the final recognition.
final bool finalResult;
/// The confidence that the [recognizedWords] are correct.
///
/// Confidence is expressed as a value between 0 and 1. -1
/// means that the confidence value was not available.
double get confidence =>
_alternates.isNotEmpty ? _alternates.first.confidence : 0;
/// true if there is confidence in this recognition, false otherwise.
///
/// There are two separate ways for there to be confidence, the first
/// is if the confidence is missing, which is indicated by a value of
/// -1. The second is if the confidence is greater than or equal
/// [threshold]. If [threshold] is not provided it defaults to 0.8.
bool isConfident(
{double threshold = SpeechRecognitionWords.confidenceThreshold}) =>
_alternates.isNotEmpty
? _alternates.first.isConfident(threshold: threshold)
: false;
/// true if [confidence] is not the [missingConfidence] value, false
/// otherwise.
bool get hasConfidenceRating =>
_alternates.isNotEmpty ? _alternates.first.hasConfidenceRating : false;
SpeechRecognitionResult(this._alternates, this.finalResult);
@override
String toString() {
return "SpeechRecognitionResult words: $_alternates, final: $finalResult";
}
@override
bool operator ==(Object other) {
return identical(this, other) ||
other is SpeechRecognitionResult &&
recognizedWords == other.recognizedWords &&
finalResult == other.finalResult;
}
@override
int get hashCode => recognizedWords.hashCode;
factory SpeechRecognitionResult.fromJson(Map<String, dynamic> json) =>
_$SpeechRecognitionResultFromJson(json);
Map<String, dynamic> toJson() => _$SpeechRecognitionResultToJson(this);
}
/// A set of words recognized in a [SpeechRecognitionResult].
///
/// Each result will have one or more [SpeechRecognitionWords]
/// with a varying degree of confidence about each set of words.
@JsonSerializable()
class SpeechRecognitionWords {
/// The sequence of words recognized
final String recognizedWords;
/// The confidence that the [recognizedWords] are correct.
///
/// Confidence is expressed as a value between 0 and 1. 0
/// means that the confidence value was not available. Use
/// [isConfident] which will ignore 0 values automatically.
final double confidence;
static const double confidenceThreshold = 0.8;
static const double missingConfidence = -1;
const SpeechRecognitionWords(this.recognizedWords, this.confidence);
/// true if there is confidence in this recognition, false otherwise.
///
/// There are two separate ways for there to be confidence, the first
/// is if the confidence is missing, which is indicated by a value of
/// -1. The second is if the confidence is greater than or equal
/// [threshold]. If [threshold] is not provided it defaults to 0.8.
bool isConfident({double threshold = confidenceThreshold}) =>
confidence == missingConfidence || confidence >= threshold;
/// true if [confidence] is not the [missingConfidence] value, false
/// otherwise.
bool get hasConfidenceRating => confidence != missingConfidence;
@override
String toString() {
return "SpeechRecognitionWords words: $recognizedWords, confidence: $confidence";
}
@override
bool operator ==(Object other) {
return identical(this, other) ||
other is SpeechRecognitionWords &&
recognizedWords == other.recognizedWords &&
confidence == other.confidence;
}
@override
int get hashCode => recognizedWords.hashCode;
factory SpeechRecognitionWords.fromJson(Map<String, dynamic> json) =>
_$SpeechRecognitionWordsFromJson(json);
Map<String, dynamic> toJson() => _$SpeechRecognitionWordsToJson(this);
}

@ -0,0 +1,41 @@
// GENERATED CODE - DO NOT MODIFY BY HAND
part of 'speech_recognition_result.dart';
// **************************************************************************
// JsonSerializableGenerator
// **************************************************************************
SpeechRecognitionResult _$SpeechRecognitionResultFromJson(
Map<String, dynamic> json) {
return SpeechRecognitionResult(
(json['alternates'] as List)
?.map((e) => e == null
? null
: SpeechRecognitionWords.fromJson(e as Map<String, dynamic>))
?.toList(),
json['finalResult'] as bool,
);
}
Map<String, dynamic> _$SpeechRecognitionResultToJson(
SpeechRecognitionResult instance) =>
<String, dynamic>{
'alternates': instance.alternates?.map((e) => e?.toJson())?.toList(),
'finalResult': instance.finalResult,
};
SpeechRecognitionWords _$SpeechRecognitionWordsFromJson(
Map<String, dynamic> json) {
return SpeechRecognitionWords(
json['recognizedWords'] as String,
(json['confidence'] as num)?.toDouble(),
);
}
Map<String, dynamic> _$SpeechRecognitionWordsToJson(
SpeechRecognitionWords instance) =>
<String, dynamic>{
'recognizedWords': instance.recognizedWords,
'confidence': instance.confidence,
};

@ -0,0 +1,511 @@
import 'dart:async';
import 'dart:convert';
import 'dart:math';
import 'package:clock/clock.dart';
import 'package:flutter/foundation.dart';
import 'package:flutter/services.dart';
import 'package:speech_to_text/speech_recognition_error.dart';
import 'package:speech_to_text/speech_recognition_result.dart';
enum ListenMode {
deviceDefault,
dictation,
search,
confirmation,
}
/// Notified as words are recognized with the current set of recognized words.
///
/// See the [onResult] argument on the [listen] method for use.
typedef SpeechResultListener = void Function(SpeechRecognitionResult result);
/// Notified if errors occur during recognition or intialization.
///
/// Possible errors per the Android docs are described here:
/// https://developer.android.com/reference/android/speech/SpeechRecognizer
/// "error_audio_error"
/// "error_client"
/// "error_permission"
/// "error_network"
/// "error_network_timeout"
/// "error_no_match"
/// "error_busy"
/// "error_server"
/// "error_speech_timeout"
/// See the [onError] argument on the [initialize] method for use.
typedef SpeechErrorListener = void Function(
SpeechRecognitionError errorNotification);
/// Notified when recognition status changes.
///
/// See the [onStatus] argument on the [initialize] method for use.
typedef SpeechStatusListener = void Function(String status);
/// Notified when the sound level changes during a listen method.
///
/// [level] is a measure of the decibels of the current sound on
/// the recognition input. See the [onSoundLevelChange] argument on
/// the [listen] method for use.
typedef SpeechSoundLevelChange = Function(double level);
/// An interface to device specific speech recognition services.
///
/// The general flow of a speech recognition session is as follows:
/// ```Dart
/// SpeechToText speech = SpeechToText();
/// bool isReady = await speech.initialize();
/// if ( isReady ) {
/// await speech.listen( resultListener: resultListener );
/// }
/// ...
/// // At some point later
/// speech.stop();
/// ```
class SpeechToText {
static const String listenMethod = 'listen';
static const String textRecognitionMethod = 'textRecognition';
static const String notifyErrorMethod = 'notifyError';
static const String notifyStatusMethod = 'notifyStatus';
static const String soundLevelChangeMethod = "soundLevelChange";
static const String notListeningStatus = "notListening";
static const String listeningStatus = "listening";
static const MethodChannel speechChannel =
const MethodChannel('plugin.csdcorp.com/speech_to_text');
static final SpeechToText _instance =
SpeechToText.withMethodChannel(speechChannel);
bool _initWorked = false;
bool _recognized = false;
bool _listening = false;
bool _cancelOnError = false;
bool _partialResults = false;
int _listenStartedAt = 0;
int _lastSpeechEventAt = 0;
Duration _pauseFor;
Duration _listenFor;
/// True if not listening or the user called cancel / stop, false
/// if cancel/stop were invoked by timeout or error condition.
bool _userEnded = false;
String _lastRecognized = "";
String _lastStatus = "";
double _lastSoundLevel = 0;
Timer _listenTimer;
LocaleName _systemLocale;
SpeechRecognitionError _lastError;
SpeechResultListener _resultListener;
SpeechErrorListener errorListener;
SpeechStatusListener statusListener;
SpeechSoundLevelChange _soundLevelChange;
final MethodChannel channel;
factory SpeechToText() => _instance;
@visibleForTesting
SpeechToText.withMethodChannel(this.channel);
/// True if words have been recognized during the current [listen] call.
///
/// Goes false as soon as [cancel] is called.
bool get hasRecognized => _recognized;
/// The last set of recognized words received.
///
/// This is maintained across [cancel] calls but cleared on the next
/// [listen].
String get lastRecognizedWords => _lastRecognized;
/// The last status update received, see [initialize] to register
/// an optional listener to be notified when this changes.
String get lastStatus => _lastStatus;
/// The last sound level received during a listen event.
///
/// The sound level is a measure of how loud the current
/// input is during listening. Use the [onSoundLevelChange]
/// argument in the [listen] method to get notified of
/// changes.
double get lastSoundLevel => _lastSoundLevel;
/// True if [initialize] succeeded
bool get isAvailable => _initWorked;
/// True if [listen] succeeded and [stop] or [cancel] has not been called.
///
/// Also goes false when listening times out if listenFor was set.
bool get isListening => _listening;
bool get isNotListening => !isListening;
/// The last error received or null if none, see [initialize] to
/// register an optional listener to be notified of errors.
SpeechRecognitionError get lastError => _lastError;
/// True if an error has been received, see [lastError] for details
bool get hasError => null != lastError;
/// Returns true if the user has already granted permission to access the
/// microphone, does not prompt the user.
///
/// This method can be called before [initialize] to check if permission
/// has already been granted. If this returns false then the [initialize]
/// call will prompt the user for permission if it is allowed to do so.
/// Note that applications cannot ask for permission again if the user has
/// denied them permission in the past.
Future<bool> get hasPermission async {
bool hasPermission = await channel.invokeMethod('has_permission');
return hasPermission;
}
/// Initialize speech recognition services, returns true if
/// successful, false if failed.
///
/// This method must be called before any other speech functions.
/// If this method returns false no further [SpeechToText] methods
/// should be used. Should only be called once if successful but does protect
/// itself if called repeatedly. False usually means that the user has denied
/// permission to use speech. The usual option in that case is to give them
/// instructions on how to open system settings and grant permission.
///
/// [onError] is an optional listener for errors like
/// timeout, or failure of the device speech recognition.
/// [onStatus] is an optional listener for status changes from
/// listening to not listening.
/// [debugLogging] controls whether there is detailed logging from the underlying
/// plugins. It is off by default, usually only useful for troubleshooting issues
/// with a paritcular OS version or device, fairly verbose
Future<bool> initialize(
{SpeechErrorListener onError,
SpeechStatusListener onStatus,
debugLogging = false}) async {
if (_initWorked) {
return Future.value(_initWorked);
}
errorListener = onError;
statusListener = onStatus;
channel.setMethodCallHandler(_handleCallbacks);
_initWorked = await channel
.invokeMethod('initialize', {"debugLogging": debugLogging});
return _initWorked;
}
/// Stops the current listen for speech if active, does nothing if not.
///
/// Stopping a listen session will cause a final result to be sent. Each
/// listen session should be ended with either [stop] or [cancel], for
/// example in the dispose method of a Widget. [cancel] is automatically
/// invoked by a permanent error if [cancelOnError] is set to true in the
/// [listen] call.
///
/// *Note:* Cannot be used until a successful [initialize] call. Should
/// only be used after a successful [listen] call.
Future<void> stop() async {
_userEnded = true;
return _stop();
}
Future<void> _stop() async {
if (!_initWorked) {
return;
}
_shutdownListener();
await channel.invokeMethod('stop');
}
/// Cancels the current listen for speech if active, does nothing if not.
///
/// Canceling means that there will be no final result returned from the
/// recognizer. Each listen session should be ended with either [stop] or
/// [cancel], for example in the dispose method of a Widget. [cancel] is
/// automatically invoked by a permanent error if [cancelOnError] is set
/// to true in the [listen] call.
///
/// *Note* Cannot be used until a successful [initialize] call. Should only
/// be used after a successful [listen] call.
Future<void> cancel() async {
_userEnded = true;
return _cancel();
}
Future<void> _cancel() async {
if (!_initWorked) {
return;
}
_shutdownListener();
await channel.invokeMethod('cancel');
}
/// Starts a listening session for speech and converts it to text,
/// invoking the provided [onResult] method as words are recognized.
///
/// Cannot be used until a successful [initialize] call. There is a
/// time limit on listening imposed by both Android and iOS. The time
/// depends on the device, network, etc. Android is usually quite short,
/// especially if there is no active speech event detected, on the order
/// of ten seconds or so.
///
/// When listening is done always invoke either [cancel] or [stop] to
/// end the session, even if it times out. [cancelOnError] provides an
/// automatic way to ensure this happens.
///
/// [onResult] is an optional listener that is notified when words
/// are recognized.
///
/// [listenFor] sets the maximum duration that it will listen for, after
/// that it automatically stops the listen for you.
///
/// [pauseFor] sets the maximum duration of a pause in speech with no words
/// detected, after that it automatically stops the listen for you.
///
/// [localeId] is an optional locale that can be used to listen in a language
/// other than the current system default. See [locales] to find the list of
/// supported languages for listening.
///
/// [onSoundLevelChange] is an optional listener that is notified when the
/// sound level of the input changes. Use this to update the UI in response to
/// more or less input. The values currently differ between Ancroid and iOS,
/// haven't yet been able to determine from the Android documentation what the
/// value means. On iOS the value returned is in decibels.
///
/// [cancelOnError] if true then listening is automatically canceled on a
/// permanent error. This defaults to false. When false cancel should be
/// called from the error handler.
///
/// [partialResults] if true the listen reports results as they are recognized,
/// when false only final results are reported. Defaults to true.
///
/// [onDevice] if true the listen attempts to recognize locally with speech never
/// leaving the device. If it cannot do this the listen attempt will fail. This is
/// usually only needed for sensitive content where privacy or security is a concern.
Future listen(
{SpeechResultListener onResult,
Duration listenFor,
Duration pauseFor,
String localeId,
SpeechSoundLevelChange onSoundLevelChange,
cancelOnError = false,
partialResults = true,
onDevice = false,
ListenMode listenMode = ListenMode.confirmation}) async {
if (!_initWorked) {
throw SpeechToTextNotInitializedException();
}
_userEnded = false;
_cancelOnError = cancelOnError;
_recognized = false;
_resultListener = onResult;
_soundLevelChange = onSoundLevelChange;
_partialResults = partialResults;
Map<String, dynamic> listenParams = {
"partialResults": partialResults || null != pauseFor,
"onDevice": onDevice,
"listenMode": listenMode.index,
};
if (null != localeId) {
listenParams["localeId"] = localeId;
}
try {
bool started = await channel.invokeMethod(listenMethod, listenParams);
if (started) {
_listenStartedAt = clock.now().millisecondsSinceEpoch;
_setupListenAndPause(pauseFor, listenFor);
}
} on PlatformException catch (e) {
throw ListenFailedException(e.details);
}
}
void _setupListenAndPause(Duration pauseFor, Duration listenFor) {
_pauseFor = null;
_listenFor = null;
if (null == pauseFor && null == listenFor) {
return;
}
var minDuration;
if (null == pauseFor) {
_listenFor = Duration(milliseconds: listenFor.inMilliseconds);
minDuration = listenFor;
} else if (null == listenFor) {
_pauseFor = Duration(milliseconds: pauseFor.inMilliseconds);
minDuration = pauseFor;
} else {
_listenFor = Duration(milliseconds: listenFor.inMilliseconds);
_pauseFor = Duration(milliseconds: pauseFor.inMilliseconds);
var minMillis = min(listenFor.inMilliseconds - _elapsedListenMillis,
pauseFor.inMilliseconds);
minDuration = Duration(milliseconds: minMillis);
}
_listenTimer = Timer(minDuration, _stopOnPauseOrListen);
}
int get _elapsedListenMillis =>
clock.now().millisecondsSinceEpoch - _listenStartedAt;
int get _elapsedSinceSpeechEvent =>
clock.now().millisecondsSinceEpoch - _lastSpeechEventAt;
void _stopOnPauseOrListen() {
if (null != _listenFor &&
_elapsedListenMillis >= _listenFor.inMilliseconds) {
_stop();
} else if (null != _pauseFor &&
_elapsedSinceSpeechEvent >= _pauseFor.inMilliseconds) {
_stop();
} else {
_setupListenAndPause(_pauseFor, _listenFor);
}
}
/// returns the list of speech locales available on the device.
///
/// This method is useful to find the identifier to use
/// for the [listen] method, it is the [localeId] member of the
/// [LocaleName].
///
/// Each [LocaleName] in the returned list has the
/// identifier for the locale as well as a name for
/// display. The name is localized for the system locale on
/// the device.
Future<List<LocaleName>> locales() async {
if (!_initWorked) {
throw SpeechToTextNotInitializedException();
}
final List<dynamic> locales = await channel.invokeMethod('locales');
List<LocaleName> filteredLocales = locales
.map((locale) {
var components = locale.split(":");
if (components.length != 2) {
return null;
}
return LocaleName(components[0], components[1]);
})
.where((item) => item != null)
.toList();
if (filteredLocales.isNotEmpty) {
_systemLocale = filteredLocales.first;
} else {
_systemLocale = null;
}
filteredLocales.sort((ln1, ln2) => ln1.name.compareTo(ln2.name));
return filteredLocales;
}
/// returns the locale that will be used if no localeId is passed
/// to the [listen] method.
Future<LocaleName> systemLocale() async {
if (null == _systemLocale) {
await locales();
}
return Future.value(_systemLocale);
}
Future _handleCallbacks(MethodCall call) async {
// print("SpeechToText call: ${call.method} ${call.arguments}");
switch (call.method) {
case textRecognitionMethod:
if (call.arguments is String) {
_onTextRecognition(call.arguments);
}
break;
case notifyErrorMethod:
if (call.arguments is String) {
await _onNotifyError(call.arguments);
}
break;
case notifyStatusMethod:
if (call.arguments is String) {
_onNotifyStatus(call.arguments);
}
break;
case soundLevelChangeMethod:
if (call.arguments is double) {
_onSoundLevelChange(call.arguments);
}
break;
default:
}
}
void _onTextRecognition(String resultJson) {
_lastSpeechEventAt = clock.now().millisecondsSinceEpoch;
Map<String, dynamic> resultMap = jsonDecode(resultJson);
SpeechRecognitionResult speechResult =
SpeechRecognitionResult.fromJson(resultMap);
if (!_partialResults && !speechResult.finalResult) {
return;
}
_recognized = true;
// print("Recognized text $resultJson");
_lastRecognized = speechResult.recognizedWords;
if (null != _resultListener) {
_resultListener(speechResult);
}
}
Future<void> _onNotifyError(String errorJson) async {
if (isNotListening && _userEnded) {
return;
}
Map<String, dynamic> errorMap = jsonDecode(errorJson);
SpeechRecognitionError speechError =
SpeechRecognitionError.fromJson(errorMap);
_lastError = speechError;
if (null != errorListener) {
errorListener(speechError);
}
if (_cancelOnError && speechError.permanent) {
await _cancel();
}
}
void _onNotifyStatus(String status) {
_lastStatus = status;
_listening = status == listeningStatus;
// print(status);
if (null != statusListener) {
statusListener(status);
}
}
void _onSoundLevelChange(double level) {
if (isNotListening) {
return;
}
_lastSoundLevel = level;
if (null != _soundLevelChange) {
_soundLevelChange(level);
}
}
_shutdownListener() {
_listening = false;
_recognized = false;
_listenTimer?.cancel();
_listenTimer = null;
}
@visibleForTesting
Future processMethodCall(MethodCall call) async {
return await _handleCallbacks(call);
}
}
/// A single locale with a [name], localized to the current system locale,
/// and a [localeId] which can be used in the [listen] method to choose a
/// locale for speech recognition.
class LocaleName {
final String localeId;
final String name;
LocaleName(this.localeId, this.name);
}
/// Thrown when a method is called that requires successful
/// initialization first.
class SpeechToTextNotInitializedException implements Exception {}
/// Thrown when listen fails to properly start a speech listening session
/// on the device
class ListenFailedException implements Exception {
final String details;
ListenFailedException(this.details);
}

@ -0,0 +1,200 @@
import 'dart:async';
import 'package:flutter/material.dart';
import 'package:speech_to_text/speech_recognition_error.dart';
import 'package:speech_to_text/speech_recognition_event.dart';
import 'package:speech_to_text/speech_recognition_result.dart';
import 'package:speech_to_text/speech_to_text.dart';
/// Simplifies interaction with [SpeechToText] by handling all the callbacks and notifying
/// listeners as events happen.
///
/// Here's an example of using the [SpeechToTextProvider]
/// ```
/// var speechProvider = SpeechToTextProvider( SpeechToText());
/// var available = await speechProvider.initialize();
/// StreamSubscription<SpeechRecognitionEvent> _subscription;
/// _subscription = speechProvider.recognitionController.stream.listen((recognitionEvent) {
/// if (recognitionEvent.eventType == SpeechRecognitionEventType.finalRecognitionEvent ) {
/// print("I heard: ${recognitionEvent.recognitionResult.recognizedWords}");
/// }
/// });
/// speechProvider.addListener(() {
/// var words = speechProvider.lastWords;
/// });
class SpeechToTextProvider extends ChangeNotifier {
final StreamController<SpeechRecognitionEvent> _recognitionController =
StreamController.broadcast();
final SpeechToText _speechToText;
SpeechRecognitionResult _lastResult;
double _lastLevel = 0;
List<LocaleName> _locales = [];
LocaleName _systemLocale;
/// Only construct one instance in an application.
///
/// Do not call `initialize` on the [SpeechToText] that is passed as a parameter, instead
/// call the [initialize] method on this class.
SpeechToTextProvider(this._speechToText);
Stream<SpeechRecognitionEvent> get stream => _recognitionController.stream;
/// Returns the last result received, may be null.
SpeechRecognitionResult get lastResult => _lastResult;
/// Returns the last error received, may be null.
SpeechRecognitionError get lastError => _speechToText.lastError;
/// Returns the last sound level received.
///
/// Note this is only available when the `soundLevel` is set to true on
/// a call to [listen], will be 0 at all other times.
double get lastLevel => _lastLevel;
/// Initializes the provider and the contained [SpeechToText] instance.
///
/// Returns true if [SpeechToText] was initialized successful and can now
/// be used, false otherwse.
Future<bool> initialize() async {
if (isAvailable) {
return isAvailable;
}
bool availableBefore = _speechToText.isAvailable;
bool available =
await _speechToText.initialize(onStatus: _onStatus, onError: _onError);
if (available) {
_locales = [];
_locales.addAll(await _speechToText.locales());
_systemLocale = await _speechToText.systemLocale();
}
if (availableBefore != available) {
notifyListeners();
}
return available;
}
/// Returns true if the provider has been initialized and can be used to recognize speech.
bool get isAvailable => _speechToText.isAvailable;
/// Returns true if the provider cannot be used to recognize speech, either because it has not
/// yet been initialized or because initialization failed.
bool get isNotAvailable => !_speechToText.isAvailable;
/// Returns true if [SpeechToText] is listening for new speech.
bool get isListening => _speechToText.isListening;
/// Returns true if [SpeechToText] is not listening for new speech.
bool get isNotListening => _speechToText.isNotListening;
/// Returns true if [SpeechToText] has a previous error.
bool get hasError => _speechToText.hasError;
/// Returns true if [lastResult] has a last result.
bool get hasResults => null != _lastResult;
/// Returns the list of locales that are available on the device for speech recognition.
List<LocaleName> get locales => _locales;
/// Returns the locale that is currently set as active on the device.
LocaleName get systemLocale => _systemLocale;
/// Start listening for new events, set [partialResults] to true to receive interim
/// recognition results.
///
/// [soundLevel] set to true to be notified on changes to the input sound level
/// on the microphone.
///
/// [listenFor] sets the maximum duration that it will listen for, after
/// that it automatically stops the listen for you.
///
/// [pauseFor] sets the maximum duration of a pause in speech with no words
/// detected, after that it automatically stops the listen for you.
///
/// Call this only after a successful [initialize] call
void listen(
{bool partialResults = false,
bool soundLevel = false,
Duration listenFor,
Duration pauseFor}) {
_lastLevel = 0;
_lastResult = null;
if (soundLevel) {
_speechToText.listen(
partialResults: partialResults,
listenFor: listenFor,
pauseFor: pauseFor,
cancelOnError: true,
onResult: _onListenResult,
onSoundLevelChange: _onSoundLevelChange);
} else {
_speechToText.listen(
partialResults: partialResults,
listenFor: listenFor,
pauseFor: pauseFor,
cancelOnError: true,
onResult: _onListenResult);
}
}
/// Stops a current active listening session.
///
/// Call this after calling [listen] to stop the recognizer from listening further
/// and return the current result as final.
void stop() {
_speechToText.stop();
notifyListeners();
}
/// Cancel a current active listening session.
///
/// Call this after calling [listen] to stop the recognizer from listening further
/// and ignore any results recognized so far.
void cancel() {
_speechToText.cancel();
notifyListeners();
}
void _onError(SpeechRecognitionError errorNotification) {
_recognitionController.add(SpeechRecognitionEvent(
SpeechRecognitionEventType.errorEvent,
null,
errorNotification,
isListening,
null));
notifyListeners();
}
void _onStatus(String status) {
_recognitionController.add(SpeechRecognitionEvent(
SpeechRecognitionEventType.statusChangeEvent,
null,
null,
isListening,
null));
notifyListeners();
}
void _onListenResult(SpeechRecognitionResult result) {
_lastResult = result;
_recognitionController.add(SpeechRecognitionEvent(
result.finalResult
? SpeechRecognitionEventType.finalRecognitionEvent
: SpeechRecognitionEventType.partialRecognitionEvent,
result,
null,
isListening,
null));
notifyListeners();
}
void _onSoundLevelChange(double level) {
_lastLevel = level;
_recognitionController.add(SpeechRecognitionEvent(
SpeechRecognitionEventType.soundLevelChangeEvent,
null,
null,
null,
level));
notifyListeners();
}
}

@ -0,0 +1,483 @@
# Generated by pub
# See https://dart.dev/tools/pub/glossary#lockfile
packages:
_fe_analyzer_shared:
dependency: transitive
description:
name: _fe_analyzer_shared
url: "https://pub.dartlang.org"
source: hosted
version: "5.0.0"
analyzer:
dependency: transitive
description:
name: analyzer
url: "https://pub.dartlang.org"
source: hosted
version: "0.39.13"
archive:
dependency: transitive
description:
name: archive
url: "https://pub.dartlang.org"
source: hosted
version: "2.0.13"
args:
dependency: transitive
description:
name: args
url: "https://pub.dartlang.org"
source: hosted
version: "1.6.0"
async:
dependency: transitive
description:
name: async
url: "https://pub.dartlang.org"
source: hosted
version: "2.4.1"
boolean_selector:
dependency: transitive
description:
name: boolean_selector
url: "https://pub.dartlang.org"
source: hosted
version: "2.0.0"
build:
dependency: transitive
description:
name: build
url: "https://pub.dartlang.org"
source: hosted
version: "1.3.0"
build_config:
dependency: transitive
description:
name: build_config
url: "https://pub.dartlang.org"
source: hosted
version: "0.4.2"
build_daemon:
dependency: transitive
description:
name: build_daemon
url: "https://pub.dartlang.org"
source: hosted
version: "2.1.4"
build_resolvers:
dependency: transitive
description:
name: build_resolvers
url: "https://pub.dartlang.org"
source: hosted
version: "1.3.10"
build_runner:
dependency: "direct dev"
description:
name: build_runner
url: "https://pub.dartlang.org"
source: hosted
version: "1.10.0"
build_runner_core:
dependency: transitive
description:
name: build_runner_core
url: "https://pub.dartlang.org"
source: hosted
version: "5.2.0"
built_collection:
dependency: transitive
description:
name: built_collection
url: "https://pub.dartlang.org"
source: hosted
version: "4.3.2"
built_value:
dependency: transitive
description:
name: built_value
url: "https://pub.dartlang.org"
source: hosted
version: "7.1.0"
charcode:
dependency: transitive
description:
name: charcode
url: "https://pub.dartlang.org"
source: hosted
version: "1.1.3"
checked_yaml:
dependency: transitive
description:
name: checked_yaml
url: "https://pub.dartlang.org"
source: hosted
version: "1.0.2"
clock:
dependency: "direct main"
description:
name: clock
url: "https://pub.dartlang.org"
source: hosted
version: "1.0.1"
code_builder:
dependency: transitive
description:
name: code_builder
url: "https://pub.dartlang.org"
source: hosted
version: "3.4.0"
collection:
dependency: transitive
description:
name: collection
url: "https://pub.dartlang.org"
source: hosted
version: "1.14.12"
convert:
dependency: transitive
description:
name: convert
url: "https://pub.dartlang.org"
source: hosted
version: "2.1.1"
crypto:
dependency: transitive
description:
name: crypto
url: "https://pub.dartlang.org"
source: hosted
version: "2.1.4"
csslib:
dependency: transitive
description:
name: csslib
url: "https://pub.dartlang.org"
source: hosted
version: "0.16.1"
dart_style:
dependency: transitive
description:
name: dart_style
url: "https://pub.dartlang.org"
source: hosted
version: "1.3.6"
fake_async:
dependency: "direct dev"
description:
name: fake_async
url: "https://pub.dartlang.org"
source: hosted
version: "1.1.0"
fixnum:
dependency: transitive
description:
name: fixnum
url: "https://pub.dartlang.org"
source: hosted
version: "0.10.11"
flutter:
dependency: "direct main"
description: flutter
source: sdk
version: "0.0.0"
flutter_test:
dependency: "direct dev"
description: flutter
source: sdk
version: "0.0.0"
glob:
dependency: transitive
description:
name: glob
url: "https://pub.dartlang.org"
source: hosted
version: "1.2.0"
graphs:
dependency: transitive
description:
name: graphs
url: "https://pub.dartlang.org"
source: hosted
version: "0.2.0"
html:
dependency: transitive
description:
name: html
url: "https://pub.dartlang.org"
source: hosted
version: "0.14.0+3"
http_multi_server:
dependency: transitive
description:
name: http_multi_server
url: "https://pub.dartlang.org"
source: hosted
version: "2.2.0"
http_parser:
dependency: transitive
description:
name: http_parser
url: "https://pub.dartlang.org"
source: hosted
version: "3.1.4"
image:
dependency: transitive
description:
name: image
url: "https://pub.dartlang.org"
source: hosted
version: "2.1.12"
io:
dependency: transitive
description:
name: io
url: "https://pub.dartlang.org"
source: hosted
version: "0.3.4"
js:
dependency: transitive
description:
name: js
url: "https://pub.dartlang.org"
source: hosted
version: "0.6.2"
json_annotation:
dependency: "direct main"
description:
name: json_annotation
url: "https://pub.dartlang.org"
source: hosted
version: "3.0.1"
json_serializable:
dependency: "direct dev"
description:
name: json_serializable
url: "https://pub.dartlang.org"
source: hosted
version: "3.3.0"
logging:
dependency: transitive
description:
name: logging
url: "https://pub.dartlang.org"
source: hosted
version: "0.11.4"
matcher:
dependency: transitive
description:
name: matcher
url: "https://pub.dartlang.org"
source: hosted
version: "0.12.6"
meta:
dependency: transitive
description:
name: meta
url: "https://pub.dartlang.org"
source: hosted
version: "1.1.8"
mime:
dependency: transitive
description:
name: mime
url: "https://pub.dartlang.org"
source: hosted
version: "0.9.6+3"
node_interop:
dependency: transitive
description:
name: node_interop
url: "https://pub.dartlang.org"
source: hosted
version: "1.1.1"
node_io:
dependency: transitive
description:
name: node_io
url: "https://pub.dartlang.org"
source: hosted
version: "1.1.1"
package_config:
dependency: transitive
description:
name: package_config
url: "https://pub.dartlang.org"
source: hosted
version: "1.9.3"
path:
dependency: transitive
description:
name: path
url: "https://pub.dartlang.org"
source: hosted
version: "1.6.4"
pedantic:
dependency: transitive
description:
name: pedantic
url: "https://pub.dartlang.org"
source: hosted
version: "1.9.0"
petitparser:
dependency: transitive
description:
name: petitparser
url: "https://pub.dartlang.org"
source: hosted
version: "2.4.0"
pool:
dependency: transitive
description:
name: pool
url: "https://pub.dartlang.org"
source: hosted
version: "1.4.0"
pub_semver:
dependency: transitive
description:
name: pub_semver
url: "https://pub.dartlang.org"
source: hosted
version: "1.4.4"
pubspec_parse:
dependency: transitive
description:
name: pubspec_parse
url: "https://pub.dartlang.org"
source: hosted
version: "0.1.5"
quiver:
dependency: transitive
description:
name: quiver
url: "https://pub.dartlang.org"
source: hosted
version: "2.1.3"
shelf:
dependency: transitive
description:
name: shelf
url: "https://pub.dartlang.org"
source: hosted
version: "0.7.7"
shelf_web_socket:
dependency: transitive
description:
name: shelf_web_socket
url: "https://pub.dartlang.org"
source: hosted
version: "0.2.3"
sky_engine:
dependency: transitive
description: flutter
source: sdk
version: "0.0.99"
source_gen:
dependency: transitive
description:
name: source_gen
url: "https://pub.dartlang.org"
source: hosted
version: "0.9.6"
source_span:
dependency: transitive
description:
name: source_span
url: "https://pub.dartlang.org"
source: hosted
version: "1.7.0"
stack_trace:
dependency: transitive
description:
name: stack_trace
url: "https://pub.dartlang.org"
source: hosted
version: "1.9.3"
stream_channel:
dependency: transitive
description:
name: stream_channel
url: "https://pub.dartlang.org"
source: hosted
version: "2.0.0"
stream_transform:
dependency: transitive
description:
name: stream_transform
url: "https://pub.dartlang.org"
source: hosted
version: "1.2.0"
string_scanner:
dependency: transitive
description:
name: string_scanner
url: "https://pub.dartlang.org"
source: hosted
version: "1.0.5"
term_glyph:
dependency: transitive
description:
name: term_glyph
url: "https://pub.dartlang.org"
source: hosted
version: "1.1.0"
test_api:
dependency: transitive
description:
name: test_api
url: "https://pub.dartlang.org"
source: hosted
version: "0.2.15"
timing:
dependency: transitive
description:
name: timing
url: "https://pub.dartlang.org"
source: hosted
version: "0.1.1+2"
typed_data:
dependency: transitive
description:
name: typed_data
url: "https://pub.dartlang.org"
source: hosted
version: "1.1.6"
vector_math:
dependency: transitive
description:
name: vector_math
url: "https://pub.dartlang.org"
source: hosted
version: "2.0.8"
watcher:
dependency: transitive
description:
name: watcher
url: "https://pub.dartlang.org"
source: hosted
version: "0.9.7+15"
web_socket_channel:
dependency: transitive
description:
name: web_socket_channel
url: "https://pub.dartlang.org"
source: hosted
version: "1.1.0"
xml:
dependency: transitive
description:
name: xml
url: "https://pub.dartlang.org"
source: hosted
version: "3.6.1"
yaml:
dependency: transitive
description:
name: yaml
url: "https://pub.dartlang.org"
source: hosted
version: "2.2.1"
sdks:
dart: ">=2.7.0 <3.0.0"
flutter: ">=1.10.0"

@ -0,0 +1,31 @@
name: speech_to_text
description: A Flutter plugin that exposes device specific speech to text recognition capability.
environment:
sdk: ">=2.1.0 <3.0.0"
flutter: ">=1.10.0"
dependencies:
flutter:
sdk: flutter
json_annotation: ^3.0.0
clock: ^1.0.1
dev_dependencies:
flutter_test:
sdk: flutter
build_runner: ^1.0.0
json_serializable: ^3.0.0
fake_async: ^1.0.1
flutter:
plugin:
platforms:
android:
package: com.csdcorp.speech_to_text
pluginClass: SpeechToTextPlugin
ios:
pluginClass: SpeechToTextPlugin

@ -0,0 +1,65 @@
import 'dart:convert';
import 'package:flutter_test/flutter_test.dart';
import 'package:speech_to_text/speech_recognition_error.dart';
void main() {
const String msg1 = "msg1";
setUp(() {});
group('properties', () {
test('equals true for same object', () {
SpeechRecognitionError error = SpeechRecognitionError(msg1, false);
expect(error, error);
});
test('equals true for different object same values', () {
SpeechRecognitionError error1 = SpeechRecognitionError(msg1, false);
SpeechRecognitionError error2 = SpeechRecognitionError(msg1, false);
expect(error1, error2);
});
test('equals false for different object', () {
SpeechRecognitionError error1 = SpeechRecognitionError(msg1, false);
SpeechRecognitionError error2 = SpeechRecognitionError("msg2", false);
expect(error1, isNot(error2));
});
test('hash same for same object', () {
SpeechRecognitionError error = SpeechRecognitionError(msg1, false);
expect(error.hashCode, error.hashCode);
});
test('hash same for different object same values', () {
SpeechRecognitionError error1 = SpeechRecognitionError(msg1, false);
SpeechRecognitionError error2 = SpeechRecognitionError(msg1, false);
expect(error1.hashCode, error2.hashCode);
});
test('hash different for different object', () {
SpeechRecognitionError error1 = SpeechRecognitionError(msg1, false);
SpeechRecognitionError error2 = SpeechRecognitionError("msg2", false);
expect(error1.hashCode, isNot(error2.hashCode));
});
test('toString as expected', () {
SpeechRecognitionError error1 = SpeechRecognitionError(msg1, false);
expect(error1.toString(),
"SpeechRecognitionError msg: $msg1, permanent: false");
});
});
group('json', () {
test('loads properly', () {
var json = jsonDecode('{"errorMsg":"$msg1","permanent":true}');
SpeechRecognitionError error = SpeechRecognitionError.fromJson(json);
expect(error.errorMsg, msg1);
expect(error.permanent, isTrue);
json = jsonDecode('{"errorMsg":"$msg1","permanent":false}');
error = SpeechRecognitionError.fromJson(json);
expect(error.permanent, isFalse);
});
test('roundtrips properly', () {
var json = jsonDecode('{"errorMsg":"$msg1","permanent":true}');
SpeechRecognitionError error = SpeechRecognitionError.fromJson(json);
var roundtripJson = error.toJson();
SpeechRecognitionError roundtripError =
SpeechRecognitionError.fromJson(roundtripJson);
expect(error, roundtripError);
});
});
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save