speech to text fully drafted

This commit is contained in:
William Jordan-Cooley 2024-04-11 16:31:10 -04:00
parent 9501f6f964
commit b1c26f0572
18 changed files with 1467 additions and 217 deletions

View file

@ -145,7 +145,8 @@ class Message extends StatelessWidget {
controller.getPangeaMessageEvent(event.eventId);
ToolbarDisplayController? toolbarController;
if (event.messageType == MessageTypes.Text ||
event.messageType == MessageTypes.Notice) {
event.messageType == MessageTypes.Notice ||
event.messageType == MessageTypes.Audio) {
toolbarController = controller.getToolbarDisplayController(event.eventId);
}
// Pangea#

View file

@ -1,5 +1,6 @@
import 'package:fluffychat/pages/chat/events/html_message.dart';
import 'package:fluffychat/pages/chat/events/video_player.dart';
import 'package:fluffychat/pangea/enum/message_mode_enum.dart';
import 'package:fluffychat/pangea/models/pangea_message_event.dart';
import 'package:fluffychat/pangea/widgets/chat/message_context_menu.dart';
import 'package:fluffychat/pangea/widgets/chat/message_toolbar.dart';
@ -340,7 +341,7 @@ class MessageContent extends StatelessWidget {
),
onListen: () => toolbarController?.showToolbar(
context,
mode: MessageMode.play,
mode: MessageMode.conversion,
),
),
enableInteractiveSelection:

View file

@ -10,6 +10,7 @@ import 'package:fluffychat/pangea/controllers/local_settings.dart';
import 'package:fluffychat/pangea/controllers/message_data_controller.dart';
import 'package:fluffychat/pangea/controllers/my_analytics_controller.dart';
import 'package:fluffychat/pangea/controllers/permissions_controller.dart';
import 'package:fluffychat/pangea/controllers/speech_to_text_controller.dart';
import 'package:fluffychat/pangea/controllers/subscription_controller.dart';
import 'package:fluffychat/pangea/controllers/text_to_speech_controller.dart';
import 'package:fluffychat/pangea/controllers/user_controller.dart';
@ -47,6 +48,7 @@ class PangeaController {
late InstructionsController instructions;
late SubscriptionController subscriptionController;
late TextToSpeechController textToSpeech;
late SpeechToTextController speechToText;
///store Services
late PLocalStore pStoreService;
@ -93,6 +95,7 @@ class PangeaController {
subscriptionController = SubscriptionController(this);
itFeedback = ITFeedbackController(this);
textToSpeech = TextToSpeechController(this);
speechToText = SpeechToTextController(this);
PAuthGaurd.pController = this;
}

View file

@ -0,0 +1,81 @@
import 'dart:async';
import 'dart:convert';
import 'package:fluffychat/pangea/controllers/pangea_controller.dart';
import 'package:fluffychat/pangea/models/speech_to_text_models.dart';
import 'package:flutter/foundation.dart';
import 'package:http/http.dart';
import '../config/environment.dart';
import '../network/requests.dart';
import '../network/urls.dart';
// Assuming SpeechToTextRequestModel, SpeechToTextResponseModel and related models are already defined as in your provided code.
class _SpeechToTextCacheItem {
Future<SpeechToTextResponseModel> data;
_SpeechToTextCacheItem({required this.data});
}
class SpeechToTextController {
static final Map<int, _SpeechToTextCacheItem> _cache = {};
late final PangeaController _pangeaController;
Timer? _cacheClearTimer;
SpeechToTextController(this._pangeaController) {
_initializeCacheClearing();
}
void _initializeCacheClearing() {
const duration = Duration(minutes: 15);
_cacheClearTimer = Timer.periodic(duration, (Timer t) => _clearCache());
}
void _clearCache() {
_cache.clear();
}
void dispose() {
_cacheClearTimer?.cancel();
}
Future<SpeechToTextResponseModel> get(
SpeechToTextRequestModel requestModel) async {
final int cacheKey = requestModel.hashCode;
if (_cache.containsKey(cacheKey)) {
return _cache[cacheKey]!.data;
} else {
final Future<SpeechToTextResponseModel> response = _fetchResponse(
accessToken: await _pangeaController.userController.accessToken,
requestModel: requestModel,
);
_cache[cacheKey] = _SpeechToTextCacheItem(data: response);
return response;
}
}
static Future<SpeechToTextResponseModel> _fetchResponse({
required String accessToken,
required SpeechToTextRequestModel requestModel,
}) async {
final Requests request = Requests(
choreoApiKey: Environment.choreoApiKey,
accessToken: accessToken,
);
final Response res = await request.post(
url: PApiUrls.speechToText,
body: requestModel.toJson(),
);
if (res.statusCode == 200) {
final Map<String, dynamic> json = jsonDecode(utf8.decode(res.bodyBytes));
return SpeechToTextResponseModel.fromJson(json);
} else {
debugPrint('Error converting speech to text: ${res.body}');
throw Exception('Failed to convert speech to text');
}
}
}

View file

@ -126,25 +126,6 @@ class TextToSpeechController {
return TextToSpeechResponse.fromJson(json);
}
// if (json["wave_form"] == null) {
// json["wave_form"] = getWaveForm();
// }
// return TextToSpeechResponse(
// audioContent: String.fromCharCodes(base64Decode(json["audio_content"])),
// mediaType: json["media_type"],
// durationMillis: durationMillis(json["duration_millis"]),
// waveform: getWaveForm(json["audio_content"]),
// );
// }
// static List<int> getWaveForm(audioContent) {
// return [];
// }
// static int durationMillis(audioContent) {
// return 0;
// }
static bool isOggFile(Uint8List bytes) {
// Check if the file has enough bytes for the header

View file

@ -0,0 +1,56 @@
AudioEncodingEnum mimeTypeToAudioEncoding(String mimeType) {
switch (mimeType) {
case 'audio/mpeg':
return AudioEncodingEnum.mp3;
case 'audio/mp4':
return AudioEncodingEnum.mp4;
case 'audio/ogg':
return AudioEncodingEnum.oggOpus;
default:
return AudioEncodingEnum.encodingUnspecified;
}
}
enum AudioEncodingEnum {
encodingUnspecified,
linear16,
flac,
mulaw,
amr,
amrWb,
oggOpus,
speexWithHeaderByte,
mp3,
mp4,
webmOpus,
}
// Utility extension to map enum values to their corresponding string value as used by the API
extension AudioEncodingExtension on AudioEncodingEnum {
String get value {
switch (this) {
case AudioEncodingEnum.linear16:
return 'LINEAR16';
case AudioEncodingEnum.flac:
return 'FLAC';
case AudioEncodingEnum.mulaw:
return 'MULAW';
case AudioEncodingEnum.amr:
return 'AMR';
case AudioEncodingEnum.amrWb:
return 'AMR_WB';
case AudioEncodingEnum.oggOpus:
return 'OGG_OPUS';
case AudioEncodingEnum.speexWithHeaderByte:
return 'SPEEX_WITH_HEADER_BYTE';
case AudioEncodingEnum.mp3:
return 'MP3';
case AudioEncodingEnum.mp4:
return 'MP4';
case AudioEncodingEnum.webmOpus:
return 'WEBM_OPUS';
default:
return 'ENCODING_UNSPECIFIED';
}
}
}

View file

@ -0,0 +1,48 @@
import 'package:flutter/material.dart';
import 'package:flutter_gen/gen_l10n/l10n.dart';
enum MessageMode { conversion, translation, definition }
extension MessageModeExtension on MessageMode {
IconData icon(bool isAudioMessage) {
switch (this) {
case MessageMode.translation:
return Icons.g_translate;
case MessageMode.conversion:
return Icons.play_arrow;
//TODO change icon for audio messages
case MessageMode.definition:
return Icons.book;
default:
return Icons.error; // Icon to indicate an error or unsupported mode
}
}
String title(BuildContext context) {
switch (this) {
case MessageMode.translation:
return L10n.of(context)!.translations;
case MessageMode.conversion:
return L10n.of(context)!.messageAudio;
case MessageMode.definition:
return L10n.of(context)!.definitions;
default:
return L10n.of(context)!
.oopsSomethingWentWrong; // Title to indicate an error or unsupported mode
}
}
String tooltip(BuildContext context) {
switch (this) {
case MessageMode.translation:
return L10n.of(context)!.translationTooltip;
case MessageMode.conversion:
return L10n.of(context)!.audioTooltip;
case MessageMode.definition:
return L10n.of(context)!.define;
default:
return L10n.of(context)!
.oopsSomethingWentWrong; // Title to indicate an error or unsupported mode
}
}
}

View file

@ -148,7 +148,7 @@ class PangeaMessageEvent {
},
);
debugPrint("eventId in getAudioGlobal $eventId");
debugPrint("eventId in getTextToSpeechGlobal $eventId");
final Event? audioEvent =
eventId != null ? await room.getEventById(eventId) : null;
@ -162,10 +162,10 @@ class PangeaMessageEvent {
//get audio for text and language
//if no audio exists, create it
//if audio exists, return it
Future<Event?> getAudioGlobal(String langCode) async {
Future<Event?> getTextToSpeechGlobal(String langCode) async {
final String text = representationByLanguage(langCode)?.text ?? body;
final local = getAudioLocal(langCode, text);
final local = getTextToSpeechLocal(langCode, text);
if (local != null) return Future.value(local);
@ -223,16 +223,16 @@ class PangeaMessageEvent {
// .timeout(
// Durations.long4,
// onTimeout: () {
// debugPrint("timeout in getAudioGlobal");
// debugPrint("timeout in getTextToSpeechGlobal");
// return null;
// },
// );
debugPrint("eventId in getAudioGlobal $eventId");
debugPrint("eventId in getTextToSpeechGlobal $eventId");
return eventId != null ? room.getEventById(eventId) : null;
}
Event? getAudioLocal(String langCode, String text) {
Event? getTextToSpeechLocal(String langCode, String text) {
return allAudio.firstWhereOrNull(
(element) {
// Safely access the transcription map

View file

@ -0,0 +1,137 @@
import 'dart:convert';
import 'package:fluffychat/pangea/enum/audio_encoding_enum.dart';
import 'package:flutter/foundation.dart';
class SpeechToTextAudioConfigModel {
final AudioEncodingEnum encoding;
final int sampleRateHertz;
final bool enableWordConfidence;
final bool enableAutomaticPunctuation;
final String userL1;
final String userL2;
SpeechToTextAudioConfigModel({
required this.encoding,
required this.userL1,
required this.userL2,
this.sampleRateHertz = 16000,
this.enableWordConfidence = true,
this.enableAutomaticPunctuation = true,
});
Map<String, dynamic> toJson() => {
"encoding": encoding.value,
"sample_rate_hertz": sampleRateHertz,
"user_l1": userL1,
"user_l2": userL2,
"enable_word_confidence": enableWordConfidence,
"enable_automatic_punctuation": enableAutomaticPunctuation,
};
}
class SpeechToTextRequestModel {
final Uint8List audioContent;
final SpeechToTextAudioConfigModel config;
SpeechToTextRequestModel({
required this.audioContent,
required this.config,
});
Map<String, dynamic> toJson() => {
"audio_content": base64Encode(audioContent),
"config": config.toJson(),
};
@override
bool operator ==(Object other) {
if (identical(this, other)) return true;
if (other is! SpeechToTextRequestModel) return false;
return listEquals(audioContent, other.audioContent) &&
config == other.config;
}
@override
int get hashCode {
final bytesSample =
audioContent.length > 10 ? audioContent.sublist(0, 10) : audioContent;
return Object.hashAll([
Object.hashAll(bytesSample),
config.hashCode,
]);
}
}
class WordInfo {
final String word;
final Duration? startTime;
final Duration? endTime;
final double? confidence;
WordInfo({
required this.word,
this.startTime,
this.endTime,
this.confidence,
});
factory WordInfo.fromJson(Map<String, dynamic> json) => WordInfo(
word: json['word'],
startTime: json['start_time'] != null
? Duration(milliseconds: json['start_time'])
: null,
endTime: json['end_time'] != null
? Duration(milliseconds: json['end_time'])
: null,
confidence: (json['confidence'] as num?)?.toDouble(),
);
}
class Transcript {
final String transcript;
final double confidence;
final List<WordInfo> words;
Transcript({
required this.transcript,
required this.confidence,
required this.words,
});
factory Transcript.fromJson(Map<String, dynamic> json) => Transcript(
transcript: json['transcript'],
confidence: json['confidence'].toDouble(),
words:
(json['words'] as List).map((e) => WordInfo.fromJson(e)).toList(),
);
}
class SpeechToTextResult {
final List<Transcript> transcripts;
SpeechToTextResult({required this.transcripts});
factory SpeechToTextResult.fromJson(Map<String, dynamic> json) =>
SpeechToTextResult(
transcripts: (json['transcripts'] as List)
.map((e) => Transcript.fromJson(e))
.toList(),
);
}
class SpeechToTextResponseModel {
final List<SpeechToTextResult> results;
SpeechToTextResponseModel({
required this.results,
});
factory SpeechToTextResponseModel.fromJson(Map<String, dynamic> json) =>
SpeechToTextResponseModel(
results: (json['results'] as List)
.map((e) => SpeechToTextResult.fromJson(e))
.toList(),
);
}

View file

@ -51,6 +51,7 @@ class PApiUrls {
static String subseqStep = "/it_step";
static String textToSpeech = "${Environment.choreoApi}/text_to_speech";
static String speechToText = "${Environment.choreoApi}/speech_to_text";
///-------------------------------- revenue cat --------------------------
static String rcApiV1 = "https://api.revenuecat.com/v1";

View file

@ -1,66 +0,0 @@
// import 'dart:async';
// import 'dart:convert';
// import 'package:fluffychat/pangea/config/environment.dart';
// import 'package:fluffychat/pangea/constants/model_keys.dart';
// import 'package:fluffychat/pangea/network/urls.dart';
// import 'package:http/http.dart';
// import '../network/requests.dart';
// class TextToSpeechRequest {
// String text;
// String langCode;
// TextToSpeechRequest({required this.text, required this.langCode});
// Map<String, dynamic> toJson() => {
// ModelKey.text: text,
// ModelKey.langCode: langCode,
// };
// }
// class TextToSpeechResponse {
// String audioContent;
// String mediaType;
// int durationMillis;
// List<int> waveform;
// TextToSpeechResponse({
// required this.audioContent,
// required this.mediaType,
// required this.durationMillis,
// required this.waveform,
// });
// factory TextToSpeechResponse.fromJson(
// Map<String, dynamic> json,
// ) =>
// TextToSpeechResponse(
// audioContent: json["audio_content"],
// mediaType: json["media_type"],
// durationMillis: json["duration_millis"],
// waveform: List<int>.from(json["wave_form"]),
// );
// }
// class TextToSpeechService {
// static Future<TextToSpeechResponse> get({
// required String accessToken,
// required TextToSpeechRequest params,
// }) async {
// final Requests request = Requests(
// choreoApiKey: Environment.choreoApiKey,
// accessToken: accessToken,
// );
// final Response res = await request.post(
// url: PApiUrls.textToSpeech,
// body: params.toJson(),
// );
// final Map<String, dynamic> json = jsonDecode(res.body);
// return TextToSpeechResponse.fromJson(json);
// }
// }

View file

@ -33,7 +33,7 @@ class MessageAudioCardState extends State<MessageAudioCard> {
widget.messageEvent.representationByLanguage(langCode)?.text;
if (text != null) {
final Event? localEvent =
widget.messageEvent.getAudioLocal(langCode, text);
widget.messageEvent.getTextToSpeechLocal(langCode, text);
if (localEvent != null) {
localAudioEvent = localEvent;
if (mounted) setState(() => _isLoading = false);

View file

@ -0,0 +1,187 @@
import 'dart:developer';
import 'package:fluffychat/pangea/enum/audio_encoding_enum.dart';
import 'package:fluffychat/pangea/models/message_data_models.dart';
import 'package:fluffychat/pangea/models/pangea_message_event.dart';
import 'package:fluffychat/pangea/models/speech_to_text_models.dart';
import 'package:fluffychat/pangea/utils/bot_style.dart';
import 'package:fluffychat/pangea/utils/error_handler.dart';
import 'package:fluffychat/utils/localized_exception_extension.dart';
import 'package:fluffychat/widgets/matrix.dart';
import 'package:flutter/foundation.dart';
import 'package:flutter/material.dart';
import 'package:matrix/matrix.dart';
class MessageSpeechToTextCard extends StatefulWidget {
final PangeaMessageEvent messageEvent;
const MessageSpeechToTextCard({
super.key,
required this.messageEvent,
});
@override
MessageSpeechToTextCardState createState() => MessageSpeechToTextCardState();
}
enum AudioFileStatus { notDownloaded, downloading, downloaded }
class MessageSpeechToTextCardState extends State<MessageSpeechToTextCard> {
PangeaRepresentation? repEvent;
String? transcription;
bool _fetchingTranscription = true;
AudioFileStatus status = AudioFileStatus.notDownloaded;
MatrixFile? matrixFile;
// File? audioFile;
String? get l1Code =>
MatrixState.pangeaController.languageController.activeL1Code(
roomID: widget.messageEvent.room.id,
);
String? get l2Code =>
MatrixState.pangeaController.languageController.activeL2Code(
roomID: widget.messageEvent.room.id,
);
// get transcription from local events
Future<String> getLocalTranscription() async {
return "This is a dummy transcription";
}
// This code is duplicated from audio_player.dart. Is there some way to reuse that code?
Future<void> _downloadAction() async {
// #Pangea
// if (status != AudioFileStatus.notDownloaded) return;
if (status != AudioFileStatus.notDownloaded) {
return;
}
// Pangea#
setState(() => status = AudioFileStatus.downloading);
try {
// #Pangea
// final matrixFile = await widget.event.downloadAndDecryptAttachment();
final matrixFile =
await widget.messageEvent.event.downloadAndDecryptAttachment();
// Pangea#
// File? file;
// TODO: Test on mobile and see if we need this case
// if (!kIsWeb) {
// final tempDir = await getTemporaryDirectory();
// final fileName = Uri.encodeComponent(
// // #Pangea
// // widget.event.attachmentOrThumbnailMxcUrl()!.pathSegments.last,
// widget.messageEvent.event
// .attachmentOrThumbnailMxcUrl()!
// .pathSegments
// .last,
// // Pangea#
// );
// file = File('${tempDir.path}/${fileName}_${matrixFile.name}');
// await file.writeAsBytes(matrixFile.bytes);
// }
// audioFile = file;
this.matrixFile = matrixFile;
status = AudioFileStatus.downloaded;
} catch (e, s) {
Logs().v('Could not download audio file', e, s);
ScaffoldMessenger.of(context).showSnackBar(
SnackBar(
content: Text(e.toLocalizedString(context)),
),
);
}
}
AudioEncodingEnum? get encoding {
if (matrixFile == null) return null;
return mimeTypeToAudioEncoding(matrixFile!.mimeType);
}
// call API to transcribe audio
Future<String?> transcribeAudio() async {
await _downloadAction();
final localmatrixFile = matrixFile;
final info = matrixFile?.info;
if (matrixFile == null) {
debugger(when: kDebugMode);
ErrorHandler.logError(
e: 'Audio file or matrix file is null ${widget.messageEvent.eventId}',
s: StackTrace.current,
data: widget.messageEvent.event.content,
);
return null;
}
if (l1Code == null || l2Code == null) {
debugger(when: kDebugMode);
ErrorHandler.logError(
e: 'Language codes are null ${widget.messageEvent.eventId}',
s: StackTrace.current,
data: widget.messageEvent.event.content,
);
return null;
}
final SpeechToTextResponseModel response =
await MatrixState.pangeaController.speechToText.get(
SpeechToTextRequestModel(
audioContent: matrixFile!.bytes,
config: SpeechToTextAudioConfigModel(
encoding: encoding ?? AudioEncodingEnum.encodingUnspecified,
//this is the default in the RecordConfig in record package
sampleRateHertz: 44100,
userL1: l1Code!,
userL2: l2Code!,
),
),
);
return response.results.first.transcripts.first.transcript;
}
// look for transcription in message event
// if not found, call API to transcribe audio
Future<void> loadTranscription() async {
// transcription ??= await getLocalTranscription();
transcription ??= await transcribeAudio();
setState(() => _fetchingTranscription = false);
}
@override
void initState() {
super.initState();
loadTranscription();
}
@override
Widget build(BuildContext context) {
// if (!_fetchingTranscription && repEvent == null && transcription == null) {
// return const CardErrorWidget();
// }
return Padding(
padding: const EdgeInsets.all(8),
child: _fetchingTranscription
? SizedBox(
height: 14,
width: 14,
child: CircularProgressIndicator(
strokeWidth: 2.0,
color: Theme.of(context).colorScheme.primary,
),
)
: transcription != null
? Text(
transcription!,
style: BotStyle.text(context),
)
: Text(
repEvent!.text,
style: BotStyle.text(context),
),
);
}
}

View file

@ -1,25 +1,27 @@
import 'dart:async';
import 'dart:developer';
import 'package:fluffychat/config/themes.dart';
import 'package:fluffychat/pages/chat/chat.dart';
import 'package:fluffychat/pangea/constants/local.key.dart';
import 'package:fluffychat/pangea/enum/message_mode_enum.dart';
import 'package:fluffychat/pangea/models/pangea_message_event.dart';
import 'package:fluffychat/pangea/utils/any_state_holder.dart';
import 'package:fluffychat/pangea/utils/error_handler.dart';
import 'package:fluffychat/pangea/utils/overlay.dart';
import 'package:fluffychat/pangea/widgets/chat/message_audio_card.dart';
import 'package:fluffychat/pangea/widgets/chat/message_speech_to_text_card.dart';
import 'package:fluffychat/pangea/widgets/chat/message_text_selection.dart';
import 'package:fluffychat/pangea/widgets/chat/message_translation_card.dart';
import 'package:fluffychat/pangea/widgets/chat/message_unsubscribed_card.dart';
import 'package:fluffychat/pangea/widgets/chat/overlay_message.dart';
import 'package:fluffychat/pangea/widgets/igc/word_data_card.dart';
import 'package:fluffychat/widgets/matrix.dart';
import 'package:flutter/foundation.dart';
import 'package:flutter/material.dart';
import 'package:flutter_gen/gen_l10n/l10n.dart';
import 'package:matrix/matrix.dart';
enum MessageMode { translation, play, definition }
class ToolbarDisplayController {
final PangeaMessageEvent pangeaMessageEvent;
final String targetId;
@ -90,6 +92,7 @@ class ToolbarDisplayController {
],
);
} catch (err) {
debugger(when: kDebugMode);
ErrorHandler.logError(e: err, s: StackTrace.current);
return;
}
@ -147,53 +150,12 @@ class MessageToolbar extends StatefulWidget {
}
class MessageToolbarState extends State<MessageToolbar> {
Widget? child;
Widget? toolbarContent;
MessageMode? currentMode;
bool updatingMode = false;
late StreamSubscription<String?> selectionStream;
late StreamSubscription<MessageMode> toolbarModeStream;
IconData getIconData(MessageMode mode) {
switch (mode) {
case MessageMode.translation:
return Icons.g_translate;
case MessageMode.play:
return Icons.play_arrow;
case MessageMode.definition:
return Icons.book;
default:
return Icons.error; // Icon to indicate an error or unsupported mode
}
}
String getModeTitle(MessageMode mode) {
switch (mode) {
case MessageMode.translation:
return L10n.of(context)!.translations;
case MessageMode.play:
return L10n.of(context)!.messageAudio;
case MessageMode.definition:
return L10n.of(context)!.definitions;
default:
return L10n.of(context)!
.oopsSomethingWentWrong; // Title to indicate an error or unsupported mode
}
}
String getModeTooltip(MessageMode mode) {
switch (mode) {
case MessageMode.translation:
return L10n.of(context)!.translationTooltip;
case MessageMode.play:
return L10n.of(context)!.audioTooltip;
case MessageMode.definition:
return L10n.of(context)!.define;
default:
return L10n.of(context)!
.oopsSomethingWentWrong; // Title to indicate an error or unsupported mode
}
}
void updateMode(MessageMode newMode) {
if (updatingMode) return;
debugPrint("updating toolbar mode");
@ -204,8 +166,8 @@ class MessageToolbarState extends State<MessageToolbar> {
updatingMode = true;
});
if (!subscribed) {
child = MessageUnsubscribedCard(
languageTool: getModeTitle(newMode),
toolbarContent = MessageUnsubscribedCard(
languageTool: newMode.title(context),
mode: newMode,
toolbarModeStream: widget.toolbarModeStream,
);
@ -214,8 +176,8 @@ class MessageToolbarState extends State<MessageToolbar> {
case MessageMode.translation:
showTranslation();
break;
case MessageMode.play:
playAudio();
case MessageMode.conversion:
showConversion();
break;
case MessageMode.definition:
showDefinition();
@ -231,28 +193,35 @@ class MessageToolbarState extends State<MessageToolbar> {
void showTranslation() {
debugPrint("show translation");
child = MessageTranslationCard(
toolbarContent = MessageTranslationCard(
messageEvent: widget.pangeaMessageEvent,
immersionMode: widget.immersionMode,
selection: widget.textSelection,
);
}
void playAudio() {
debugPrint("play audio");
child = MessageAudioCard(
messageEvent: widget.pangeaMessageEvent,
);
void showConversion() {
debugPrint("show conversion");
if (isAudioMessage) {
debugPrint("is audio message");
toolbarContent = MessageSpeechToTextCard(
messageEvent: widget.pangeaMessageEvent,
);
} else {
toolbarContent = MessageAudioCard(
messageEvent: widget.pangeaMessageEvent,
);
}
}
void showDefinition() {
if (widget.textSelection.selectedText == null ||
widget.textSelection.selectedText!.isEmpty) {
child = const SelectToDefine();
toolbarContent = const SelectToDefine();
return;
}
child = WordDataCard(
toolbarContent = WordDataCard(
word: widget.textSelection.selectedText!,
wordLang: widget.pangeaMessageEvent.messageDisplayLangCode,
fullText: widget.textSelection.messageText,
@ -262,6 +231,10 @@ class MessageToolbarState extends State<MessageToolbar> {
);
}
bool get isAudioMessage {
return widget.pangeaMessageEvent.event.messageType == MessageTypes.Audio;
}
void showImage() {}
void spellCheck() {}
@ -286,7 +259,7 @@ class MessageToolbarState extends State<MessageToolbar> {
) ??
true;
autoplay
? updateMode(MessageMode.play)
? updateMode(MessageMode.conversion)
: updateMode(MessageMode.translation);
});
@ -339,8 +312,8 @@ class MessageToolbarState extends State<MessageToolbar> {
duration: FluffyThemes.animationDuration,
child: Column(
children: [
child ?? const SizedBox(),
SizedBox(height: child == null ? 0 : 20),
toolbarContent ?? const SizedBox(),
SizedBox(height: toolbarContent == null ? 0 : 20),
],
),
),
@ -349,10 +322,13 @@ class MessageToolbarState extends State<MessageToolbar> {
Row(
mainAxisSize: MainAxisSize.min,
children: MessageMode.values.map((mode) {
if (mode == MessageMode.definition && isAudioMessage) {
return const SizedBox.shrink();
}
return Tooltip(
message: getModeTooltip(mode),
message: mode.tooltip(context),
child: IconButton(
icon: Icon(getIconData(mode)),
icon: Icon(mode.icon(isAudioMessage)),
color: currentMode == mode
? Theme.of(context).colorScheme.primary
: null,

View file

@ -2,11 +2,12 @@ import 'dart:async';
import 'package:fluffychat/config/app_config.dart';
import 'package:fluffychat/pangea/utils/bot_style.dart';
import 'package:fluffychat/pangea/widgets/chat/message_toolbar.dart';
import 'package:fluffychat/widgets/matrix.dart';
import 'package:flutter/material.dart';
import 'package:flutter_gen/gen_l10n/l10n.dart';
import '../../enum/message_mode_enum.dart';
class MessageUnsubscribedCard extends StatelessWidget {
final String languageTool;
final MessageMode mode;

View file

@ -50,7 +50,7 @@ class _TextToSpeechButtonState extends State<TextToSpeechButton> {
Event? get localAudioEvent =>
langCode != null && text != null && text!.isNotEmpty
? _pangeaMessageEvent.getAudioLocal(langCode!, text!)
? _pangeaMessageEvent.getTextToSpeechLocal(langCode!, text!)
: null;
String? get langCode =>
@ -69,7 +69,7 @@ class _TextToSpeechButtonState extends State<TextToSpeechButton> {
if (langCode == null || langCode!.isEmpty) return;
setState(() => _isLoading = true);
await _pangeaMessageEvent.getAudioGlobal(langCode!);
await _pangeaMessageEvent.getTextToSpeechGlobal(langCode!);
setState(() => _isLoading = false);
} catch (e) {
setState(() => _isLoading = false);

View file

@ -14,6 +14,7 @@ import 'package:fluffychat/widgets/matrix.dart';
import 'package:flutter/foundation.dart';
import 'package:flutter/material.dart';
import '../../enum/message_mode_enum.dart';
import '../../models/pangea_match_model.dart';
class PangeaRichText extends StatefulWidget {
@ -157,7 +158,7 @@ class PangeaRichTextState extends State<PangeaRichText> {
),
onListen: () => widget.toolbarController?.showToolbar(
context,
mode: MessageMode.play,
mode: MessageMode.conversion,
),
),
TextSpan(

File diff suppressed because it is too large Load diff