Open
Description
Steps to reproduce
- Run the app on a real iOS device in profile or release mode
- Accept the microphone and speech permissions
- Use the voice-activated commands "start", "stop" to control the timer (not the UI buttons)
Code sample
See this repo for full instructions and source code:
https://github.com/bizz84/speech_to_text_merged_threads
Code sample
import 'dart:async';
import 'dart:io';
import 'package:collection/collection.dart';
import 'package:flutter/material.dart';
import 'package:flutter/scheduler.dart';
import 'package:flutter_tts/flutter_tts.dart';
import 'package:just_audio/just_audio.dart';
import 'package:permission_handler/permission_handler.dart';
import 'package:speech_to_text/speech_recognition_error.dart';
import 'package:speech_to_text/speech_recognition_result.dart';
import 'package:speech_to_text/speech_to_text.dart';
void main() {
runApp(const MainApp());
}
class MainApp extends StatelessWidget {
const MainApp({super.key});
@override
Widget build(BuildContext context) {
return MaterialApp(
title: 'Voice Timer',
debugShowCheckedModeBanner: false,
themeMode: ThemeMode.dark,
theme: ThemeData.dark().copyWith(
primaryColor: const Color(0xFF8FFF00),
colorScheme: const ColorScheme.dark(
primary: Color(0xFF8FFF00),
secondary: Color(0xFFFF4444),
surface: Color(0xFF1E1E1E),
),
scaffoldBackgroundColor: Colors.black,
appBarTheme: const AppBarTheme(
backgroundColor: Color(0xFF1E1E1E),
elevation: 0,
),
),
home: const StopwatchPage(),
);
}
}
class StopwatchPage extends StatefulWidget {
const StopwatchPage({super.key});
@override
State<StopwatchPage> createState() => _StopwatchPageState();
}
class _StopwatchPageState extends State<StopwatchPage> {
final GlobalKey<StopwatchDisplayState> _stopwatchKey =
GlobalKey<StopwatchDisplayState>();
static const log = Logger(name: 'UI');
// Stopwatch
final _stopwatch = Stopwatch();
Duration get _elapsed => _stopwatch.elapsed;
// Managers
final _voiceRecognitionManager = VoiceRecognitionManager();
final _audioFeedbackManager = AudioFeedbackManager();
final _textToSpeechManager = TextToSpeechManager();
final _debouncer = Debouncer(milliseconds: 300);
// Local state
bool _isRunning = false;
bool _isListeningForVoice = false;
String _inlineLog = '';
@override
void initState() {
super.initState();
_initialize();
}
@override
void dispose() {
_voiceRecognitionManager.dispose();
_textToSpeechManager.dispose();
_audioFeedbackManager.dispose();
super.dispose();
}
Future<void> _initialize() async {
// * Initialize the audio player before starting the voice recognition, otherwise playback will fail
await _audioFeedbackManager.initialize();
// * Permissions
final hasPermission =
await PermissionManager.requestMicrophoneAndSpeechPermissions();
if (!hasPermission) {
if (mounted) {
await PermissionManager.showPermissionDeniedDialog(context);
}
return;
}
await _textToSpeechManager.initialize();
// * Voice recognition
final didInitialize = await _initializeVoiceRecognition();
if (didInitialize) {
setState(() {
_isListeningForVoice = true;
});
}
}
Future<bool> _initializeVoiceRecognition() async {
final didInitialize = await _voiceRecognitionManager.initialize(
onError: (error) {
// * Usually, two errors are received in quick succession
// * This debouncer ensures that only the last error is processed
_debouncer.run(() {
if (error.permanent) {
// SpeechRecognitionError msg: error_listen_failed, permanent: true
unawaited(_initializeVoiceRecognition());
} else {
// Restart listening
unawaited(_startListeningSession());
}
});
},
);
if (didInitialize) {
// Start listening
unawaited(_startListeningSession());
return true;
} else {
return false;
}
}
Future<void> _startListeningSession() async {
try {
// Listen for a command
final command = await _voiceRecognitionManager.startListening(
onResult: (result) {
setState(() {
final lastWord =
result.recognizedWords.toLowerCase().split(' ').last;
_inlineLog = '$lastWord (${result.confidence})';
});
},
);
// Perform audio output
if (command == VoiceCommand.start && !_isRunning ||
command == VoiceCommand.stop && _isRunning ||
command == VoiceCommand.restart ||
(command == VoiceCommand.reset && _elapsed.inMilliseconds > 0)) {
await _audioFeedbackManager.playBeep();
}
// Update UI
(switch (command) {
VoiceCommand.start => _start(),
VoiceCommand.stop => _stop(),
VoiceCommand.reset => _reset(),
VoiceCommand.restart => _restart(),
VoiceCommand.time => await _speakTime(),
});
// Restart listening
_startListeningSession();
} catch (e) {
log('Error in voice recognition: $e');
}
}
// State and UI updates
void _start() {
_toggle(true);
}
void _stop() {
_toggle(false);
}
void _reset() {
setState(() {
_isRunning = false;
_stopwatchKey.currentState?.reset();
});
}
void _restart() {
setState(() {
_stopwatchKey.currentState?.reset();
_isRunning = true;
_stopwatchKey.currentState?.toggleRunning(_isRunning);
});
}
void _toggle(bool isRunning) {
setState(() {
_isRunning = isRunning;
_stopwatchKey.currentState?.toggleRunning(_isRunning);
});
}
Future<void> _speakTime() async {
await _textToSpeechManager.speakElapsedTime(_elapsed);
}
@override
Widget build(BuildContext context) {
final theme = Theme.of(context);
return Scaffold(
backgroundColor: Colors.black,
appBar: AppBar(
title: const Text('Voice Timer'),
actions: [
Stack(
alignment: Alignment.center,
children: [
Icon(
_isListeningForVoice ? Icons.mic : Icons.mic_off,
color: _isListeningForVoice
? theme.colorScheme.primary
: Colors.grey,
),
if (_isListeningForVoice)
Positioned.fill(
child: CircularProgressIndicator(
strokeWidth: 2,
valueColor: AlwaysStoppedAnimation<Color>(
theme.colorScheme.primary.withOpacity(0.7),
),
),
),
],
),
const SizedBox(width: 16),
],
),
body: Column(
crossAxisAlignment: CrossAxisAlignment.stretch,
children: [
// Stopwatch
Expanded(
child: Center(
child: StopwatchDisplay(
key: _stopwatchKey,
stopwatch: _stopwatch,
),
),
),
// Controls
StopwatchControls(
isRunning: _isRunning,
onStartStop: () => _toggle(!_isRunning),
onReset: _reset,
inlineLog: _inlineLog,
),
],
),
);
}
}
/// Widget that manages the Ticker and only rebuilds the UI that depends on it
class StopwatchDisplay extends StatefulWidget {
const StopwatchDisplay({super.key, required this.stopwatch});
final Stopwatch stopwatch;
@override
StopwatchDisplayState createState() => StopwatchDisplayState();
}
class StopwatchDisplayState extends State<StopwatchDisplay>
with SingleTickerProviderStateMixin {
Stopwatch get _stopwatch => widget.stopwatch;
late final Ticker _ticker;
late String _formattedTime = formatTime(Duration.zero);
static String formatTime(Duration duration) {
final seconds = duration.inSeconds.toString().padLeft(2, '0');
final hundreds = (duration.inMilliseconds.remainder(1000) ~/ 10)
.toString()
.padLeft(2, '0');
return '$seconds.$hundreds';
}
@override
void initState() {
super.initState();
_ticker = createTicker((elapsed) {
_updateFormattedTime();
});
}
@override
void dispose() {
_ticker.dispose();
super.dispose();
}
// * Update the formatted time and trigger a rebuild if it has changed
void _updateFormattedTime() {
final formattedTime = formatTime(_stopwatch.elapsed);
if (formattedTime != _formattedTime) {
setState(() {
_formattedTime = formattedTime;
});
}
}
void toggleRunning(bool isRunning) {
if (isRunning) {
_stopwatch.start();
if (!_ticker.isTicking) {
_ticker.start();
}
} else {
_stopwatch.stop();
if (_ticker.isTicking) {
_ticker.stop();
}
}
_updateFormattedTime();
}
void reset() {
_stopwatch.stop();
_stopwatch.reset();
_ticker.stop();
_updateFormattedTime();
}
@override
Widget build(BuildContext context) {
return Text(
_formattedTime,
style: TextStyle(
fontSize: 100,
fontFeatures: [FontFeature.tabularFigures()],
),
);
}
}
class StopwatchControls extends StatelessWidget {
const StopwatchControls({
super.key,
required this.isRunning,
required this.onStartStop,
required this.onReset,
required this.inlineLog,
});
final bool isRunning;
final VoidCallback onStartStop;
final VoidCallback onReset;
final String inlineLog;
@override
Widget build(BuildContext context) {
return Padding(
padding: const EdgeInsets.symmetric(vertical: 40),
child: Column(
crossAxisAlignment: CrossAxisAlignment.stretch,
children: [
Row(
mainAxisAlignment: MainAxisAlignment.center,
children: [
// Start/Stop Button
ElevatedButton(
onPressed: onStartStop,
child: Text(
isRunning ? 'STOP' : 'START',
style: TextStyle(fontSize: 40),
),
),
// Reset Button
ElevatedButton(
onPressed: onReset,
child: Text(
'RESET',
style: TextStyle(fontSize: 40),
),
),
],
),
],
),
);
}
}
class PermissionManager {
static const log = Logger(name: 'PERMISSIONS');
static Future<bool> requestMicrophoneAndSpeechPermissions() async {
final micStatus = await Permission.microphone.request();
final speechStatus = await Permission.speech.request();
log('Mic status: ${micStatus.toString()}, speech status: ${speechStatus.toString()}');
return micStatus == PermissionStatus.granted &&
speechStatus == PermissionStatus.granted;
}
static Future<void> showPermissionDeniedDialog(BuildContext context) async {
final result = await showDialog<bool>(
context: context,
builder: (context) => AlertDialog(
title: const Text('Access Denied'),
content: const Text(
'Voice commands are disabled. Go to Settings > Privacy & Security and enable "Microphone" and "Speech Recognition" to activate them.\n\nThe timer will work normally with manual controls.'),
actions: [
TextButton(
onPressed: () => Navigator.pop(context, false),
child: const Text('OK'),
),
TextButton(
onPressed: () => Navigator.pop(context, true),
child: const Text('Open Settings'),
),
],
),
);
if (result == true) {
await openAppSettings();
}
}
}
enum VoiceCommand {
start,
stop,
restart,
reset,
time;
static VoiceCommand? fromString(String value) {
return values.firstWhereOrNull((e) => e.name == value);
}
}
class VoiceRecognitionManager {
// Note this is a singleton
static final _speech = SpeechToText();
static const log = Logger(name: 'STT');
static const logPerformance = true;
// Call just once during app initialization
Future<bool> initialize(
{required void Function(SpeechRecognitionError) onError}) async {
log('Initializing...');
return await _speech.initialize(
debugLogging: true,
onStatus: (status) {
log('Speech recognition status: $status');
// Triggered as soon as a word is received when partialResults == true
if (status == 'done') {
// no-op
}
},
onError: (error) {
log('Speech recognition error: $error');
onError(error);
},
);
}
Future<VoiceCommand> _listen(
{void Function(SpeechRecognitionResult)? onResult}) async {
log('Listening...');
final completer = Completer<VoiceCommand>();
final stopwatch = Stopwatch()..start();
await _speech.listen(
onResult: (result) {
log('Recognized: "${result.recognizedWords}"');
onResult?.call(result);
final recognizedWords = result.recognizedWords.toLowerCase().split(' ');
final lastWord = recognizedWords.last;
// Check for start commands
final voiceCommand = VoiceCommand.fromString(lastWord);
if (voiceCommand != null) {
if (!completer.isCompleted) {
completer.complete(voiceCommand);
} else {
log('Command already completed: $voiceCommand');
}
}
},
// listenFor: const Duration(seconds: 30),
// pauseFor: const Duration(seconds: 3),
localeId: 'en_US',
listenOptions: SpeechListenOptions(
partialResults: true,
listenMode: ListenMode.confirmation,
//cancelOnError: true,
),
);
if (logPerformance) {
log('_speech.listen time: ${stopwatch.elapsed}');
}
return completer.future;
}
Future<VoiceCommand> startListening(
{void Function(SpeechRecognitionResult)? onResult}) async {
final voiceCommand = await _listen(onResult: onResult);
final stopwatch = Stopwatch()..start();
await _speech.stop();
await _speech.cancel();
if (logPerformance) {
log('_speech.stop time: ${stopwatch.elapsed}');
}
log('Command: ${voiceCommand.name}');
return voiceCommand;
}
Future<void> dispose() async {
await _speech.cancel();
}
}
class TextToSpeechManager {
final FlutterTts _flutterTts = FlutterTts();
bool _isInitialized = false;
static const log = Logger(name: 'TTS');
Future<void> initialize() async {
if (_isInitialized) return;
try {
// Set language
await _flutterTts.setLanguage('en-US');
// Set speech rate (0.0 to 1.0)
await _flutterTts.setSpeechRate(0.55);
// Set volume (0.0 to 1.0)
await _flutterTts.setVolume(1.0);
// Set pitch (0.5 to 2.0)
await _flutterTts.setPitch(1.0);
// iOS specific settings
if (Platform.isIOS) {
await _flutterTts.setSharedInstance(true);
await _flutterTts.setIosAudioCategory(
IosTextToSpeechAudioCategory.playback,
[
IosTextToSpeechAudioCategoryOptions.allowBluetooth,
IosTextToSpeechAudioCategoryOptions.allowBluetoothA2DP,
IosTextToSpeechAudioCategoryOptions.mixWithOthers,
],
IosTextToSpeechAudioMode.voicePrompt,
);
}
_isInitialized = true;
log('TextToSpeech initialized successfully');
} catch (e) {
log('Failed to initialize TextToSpeech: $e');
}
}
Future<void> speak(String text) async {
try {
await initialize();
await _flutterTts.stop(); // Stop any ongoing speech
await _flutterTts.speak(text);
log('Speaking: "$text"');
} catch (e) {
log('Failed to speak: $e');
}
}
Future<void> speakElapsedTime(Duration elapsed) async {
final totalSeconds = elapsed.inMilliseconds / 1000.0;
final tenths = (totalSeconds * 10).truncate() / 10.0;
String timeText;
if (tenths == tenths.toInt()) {
// Whole number, no decimal
timeText =
'${tenths.toInt()} ${tenths.toInt() == 1 ? "second" : "seconds"}';
} else {
// Has decimal
timeText = '${tenths.toStringAsFixed(1)} seconds';
}
await speak(timeText);
}
Future<void> stop() async {
await _flutterTts.stop();
}
void dispose() {
_flutterTts.stop();
}
}
class AudioFeedbackManager {
final AudioPlayer _player = AudioPlayer();
bool _isInitialized = false;
static const log = Logger(name: 'PLAYER');
Future<void> initialize() async {
if (!_isInitialized) {
log('Initializing audio player...');
// Load the beep sound from assets
await _player.setAsset('assets/beep-sound-8333.mp3');
_isInitialized = true;
}
}
Future<void> playBeep() async {
try {
await initialize();
log('Playing beep sound...');
// Set volume to maximum
await _player.setVolume(1.0);
// Seek to beginning and play
await _player.seek(Duration.zero);
// Doesn't play nice with speech_to_text
// https://github.com/csdcorp/speech_to_text/issues/470
await _player.play();
} catch (e) {
// Silently fail if audio playback fails
log('Failed to play beep sound: $e');
}
}
Future<void> dispose() async {
await _player.dispose();
}
}
class Logger {
const Logger({required this.name});
final String name;
void call(String message) {
final now = DateTime.now();
// ignore: avoid_print
print('[$now $name] $message');
}
}
class Debouncer {
Debouncer({required this.milliseconds});
final int milliseconds;
VoidCallback? action;
Timer? _timer;
void run(VoidCallback action) {
if (_timer != null) _timer!.cancel();
_timer = Timer(Duration(milliseconds: milliseconds), action);
}
}
pubspec.yaml
:
name: speech_to_text_merged_threads
description: "A new Flutter project."
publish_to: 'none'
version: 0.1.0
environment:
sdk: ^3.5.0
dependencies:
flutter:
sdk: flutter
speech_to_text: ^7.0.0
permission_handler: ^12.0.0+1
collection: ^1.18.0
flutter_tts: ^4.2.0
just_audio: ^0.9.46
dev_dependencies:
flutter_test:
sdk: flutter
flutter_lints: ^5.0.0
flutter:
uses-material-design: true
assets:
- assets/beep-sound-8333.mp3
Additional iOS configuration:
ios/Runner/Info.plist
:
<key>NSMicrophoneUsageDescription</key>
<string>This app needs microphone access to recognize voice commands like "start" and "stop" for hands-free timer control during exercise.</string>
<key>NSSpeechRecognitionUsageDescription</key>
<string>This app needs speech recognition access to recognize voice commands like "start" and "stop" for hands-free timer control during exercise.</string>
<key>FLTEnableMergedPlatformUIThread</key>
<false/>
ios/Podfile
:
post_install do |installer|
installer.pods_project.targets.each do |target|
flutter_additional_ios_build_settings(target)
# https://github.com/Baseflow/flutter-permission-handler/issues/1391#issuecomment-2392231125
target.build_configurations.each do |config|
# You can remove unused permissions here
# for more information: https://github.com/Baseflow/flutter-permission-handler/blob/main/permission_handler_apple/ios/Classes/PermissionHandlerEnums.h
# e.g. when you don't need camera permission, just add 'PERMISSION_CAMERA=0'
config.build_settings['GCC_PREPROCESSOR_DEFINITIONS'] ||= [
'$(inherited)',
## dart: PermissionGroup.microphone
'PERMISSION_MICROPHONE=1',
## dart: PermissionGroup.speech
'PERMISSION_SPEECH_RECOGNIZER=1',
]
end
end
end
Performance profiling on master channel
- The issue still persists on the master channel
Timeline Traces
Timeline Traces JSON
[Paste the Timeline Traces here]
Video demonstration
This video shows dropped frames when using the voice-activated commands ("start", "stop"):
Video demonstration
RPReplay_Final1749464142.mp4
What target platforms are you seeing this bug on?
iOS
OS/Browser name and version | Device information
iOS 17.6.1
Does the problem occur on emulator/simulator as well as on physical devices?
Can't test on simulator since microphone and speech recognition access is required.
Is the problem only reproducible with Impeller?
Yes
Logs
Logs
A Dart VM Service on Andrea iPhone XR is available at: http://127.0.0.1:57894/uV5VanZjXHU=/
flutter: [2025-06-09 11:15:28.748106 STT] Speech recognition status: available
flutter: [2025-06-09 11:15:28.748699 STT] Speech recognition status: listening
flutter: [2025-06-09 11:15:28.748885 STT] _speech.listen time: 0:00:00.628928
The Flutter DevTools debugger and profiler on Andrea iPhone XR is available at: http://127.0.0.1:57900?uri=http://127.0.0.1:57894/uV5VanZjXHU=/
flutter: [2025-06-09 11:15:31.333639 STT] Recognized: "Start"
flutter: [2025-06-09 11:15:31.678930 STT] _speech.stop time: 0:00:00.343885
flutter: [2025-06-09 11:15:31.679048 STT] Command: start
flutter: [2025-06-09 11:15:31.679108 PLAYER] Playing beep sound...
flutter: [2025-06-09 11:15:31.686721 STT] Recognized: "Start"
flutter: [2025-06-09 11:15:31.687308 STT] Command already completed: VoiceCommand.start
flutter: [2025-06-09 11:15:31.687388 STT] Speech recognition status: done
flutter: [2025-06-09 11:15:31.690578 STT] Speech recognition status: done
flutter: [2025-06-09 11:15:32.082403 STT] Listening...
flutter: [2025-06-09 11:15:34.542229 STT] Recognized: "Stop"
flutter: [2025-06-09 11:15:34.819032 STT] _speech.stop time: 0:00:00.276019
flutter: [2025-06-09 11:15:34.819148 STT] Command: stop
flutter: [2025-06-09 11:15:34.819200 PLAYER] Playing beep sound...
flutter: [2025-06-09 11:15:34.827244 STT] Recognized: "Stop"
flutter: [2025-06-09 11:15:34.827392 STT] Command already completed: VoiceCommand.stop
flutter: [2025-06-09 11:15:34.838595 STT] Speech recognition status: done
flutter: [2025-06-09 11:15:34.842238 STT] Speech recognition status: done
flutter: [2025-06-09 11:15:34.843391 STT] Listening...
flutter: [2025-06-09 11:15:37.153565 STT] Recognized: "Start"
flutter: [2025-06-09 11:15:37.427188 STT] _speech.stop time: 0:00:00.273151
flutter: [2025-06-09 11:15:37.427324 STT] Command: start
flutter: [2025-06-09 11:15:37.427409 PLAYER] Playing beep sound...
flutter: [2025-06-09 11:15:37.429235 STT] Recognized: "Start"
flutter: [2025-06-09 11:15:37.429409 STT] Command already completed: VoiceCommand.start
flutter: [2025-06-09 11:15:37.431020 STT] Speech recognition status: done
flutter: [2025-06-09 11:15:37.436981 STT] Speech recognition status: done
flutter: [2025-06-09 11:15:37.443257 STT] Listening...
flutter: [2025-06-09 11:15:38.100515 STT] Speech recognition status: listening
flutter: [2025-06-09 11:15:38.101618 STT] _speech.listen time: 0:00:00.655650
flutter: [2025-06-09 11:15:39.952882 STT] Recognized: "Stop"
flutter: [2025-06-09 11:15:40.217044 STT] _speech.stop time: 0:00:00.263760
flutter: [2025-06-09 11:15:40.217394 STT] Command: stop
flutter: [2025-06-09 11:15:40.217492 PLAYER] Playing beep sound...
flutter: [2025-06-09 11:15:40.220486 STT] Recognized: "Stop"
flutter: [2025-06-09 11:15:40.220600 STT] Command already completed: VoiceCommand.stop
flutter: [2025-06-09 11:15:40.220652 STT] Speech recognition status: done
flutter: [2025-06-09 11:15:40.229423 STT] Speech recognition status: done
flutter: [2025-06-09 11:15:40.241623 STT] Listening...
Flutter Doctor output
Doctor output
Doctor summary (to see all details, run flutter doctor -v):
[!] Flutter (Channel [user-branch], 3.33.0-1.0.pre.417, on macOS 14.6.1 23G93 darwin-arm64, locale en-GB)
! Flutter version 3.33.0-1.0.pre.417 on channel [user-branch] at /Users/andrea/.puro/envs/master/flutter
Currently on an unknown channel. Run `flutter channel` to switch to an official channel.
If that doesn't fix the issue, reinstall Flutter by following instructions at https://flutter.dev/setup.
! Warning: `flutter` on your path resolves to /Users/andrea/.puro/envs/stable/flutter/bin/flutter, which is not inside your current Flutter SDK checkout at /Users/andrea/.puro/envs/master/flutter. Consider adding
/Users/andrea/.puro/envs/master/flutter/bin to the front of your path.
! Warning: `dart` on your path resolves to /Users/andrea/.puro/envs/stable/flutter/bin/dart, which is not inside your current Flutter SDK checkout at /Users/andrea/.puro/envs/master/flutter. Consider adding
/Users/andrea/.puro/envs/master/flutter/bin to the front of your path.
! Upstream repository unknown source is not a standard remote.
Set environment variable "FLUTTER_GIT_URL" to unknown source to dismiss this error.
[✓] Android toolchain - develop for Android devices (Android SDK version 35.0.0)
[✓] Xcode - develop for iOS and macOS (Xcode 16.1)
[✓] Chrome - develop for the web
[✓] Android Studio (version 2024.2)
[✓] VS Code (version 1.99.3)
[✓] Connected device (5 available)
[✓] Network resources
! Doctor found issues in 1 category.