fluffychat/lib/pangea/token_info_feedback/token_info_feedback_dialog.dart
wcjord 0e681c4d68
feat: phonetic transcription v2 migration (#5640)
* docs: add PT v2 and token-info-feedback design docs

- Add phonetic-transcription-v2-design.instructions.md (client PT v2 migration)
- Add token-info-feedback-v2.instructions.md (client token feedback v2 migration)

* fix: update applyTo path for token info feedback v2 migration

* feat: Refactor phonetic transcription to v2 models and repository (in progress)

* feat: PT v2 migration - tts_phoneme rename, v1 cleanup, disambiguation, TTS integration

* feat: Update phonetic transcription v2 design document for endpoint changes and response structure

* docs: fix stale _storageKeys claim in pt-v2 design doc

* style: reformat PT v2 files with Dart 3.10 formatter (Flutter 3.38)

* feat: add speakingRate to TTS request model (default 0.85)

Passes speaking_rate to the choreo TTS endpoint. Default preserves
current behavior; can be overridden for single-word playback later.

* feat: use normal speed (1.0) for single-word TTS playback

The 0.85x slowdown is helpful for full sentences but makes single
words sound unnaturally slow. tts_controller._speakFromChoreo now
sends speakingRate=1.0. Full-sentence TTS via pangea_message_event
still defaults to 0.85.

* style: clean up formatting and reduce line breaks in TtsController

* fix: env goofiness

* formatting, fix linter issues

* don't return widgets from functions

---------

Co-authored-by: ggurdin <ggurdin@gmail.com>
Co-authored-by: ggurdin <46800240+ggurdin@users.noreply.github.com>
2026-02-10 16:29:26 -05:00

147 lines
5.2 KiB
Dart

import 'package:flutter/material.dart';
import 'package:fluffychat/l10n/l10n.dart';
import 'package:fluffychat/pangea/common/constants/model_keys.dart';
import 'package:fluffychat/pangea/common/widgets/feedback_dialog.dart';
import 'package:fluffychat/pangea/events/event_wrappers/pangea_message_event.dart';
import 'package:fluffychat/pangea/events/models/language_detection_model.dart';
import 'package:fluffychat/pangea/events/models/pangea_token_model.dart';
import 'package:fluffychat/pangea/events/models/tokens_event_content_model.dart';
import 'package:fluffychat/pangea/extensions/pangea_room_extension.dart';
import 'package:fluffychat/pangea/lemmas/lemma_info_repo.dart';
import 'package:fluffychat/pangea/lemmas/lemma_info_response.dart';
import 'package:fluffychat/pangea/phonetic_transcription/pt_v2_models.dart';
import 'package:fluffychat/pangea/phonetic_transcription/pt_v2_repo.dart';
import 'package:fluffychat/pangea/token_info_feedback/token_info_feedback_repo.dart';
import 'package:fluffychat/pangea/token_info_feedback/token_info_feedback_request.dart';
import 'package:fluffychat/pangea/token_info_feedback/token_info_feedback_response.dart';
import 'package:fluffychat/pangea/toolbar/word_card/word_zoom_widget.dart';
import 'package:fluffychat/widgets/future_loading_dialog.dart';
class TokenInfoFeedbackDialog extends StatelessWidget {
final TokenInfoFeedbackRequestData requestData;
final String langCode;
final PangeaMessageEvent? event;
const TokenInfoFeedbackDialog({
super.key,
required this.requestData,
required this.langCode,
this.event,
});
Future<String> _submitFeedback(String feedback) async {
final request = TokenInfoFeedbackRequest(
userFeedback: feedback,
data: requestData,
);
final TokenInfoFeedbackResponse response =
await TokenInfoFeedbackRepo.submitFeedback(request);
final originalToken = requestData.tokens[requestData.selectedToken];
final token = response.updatedToken ?? originalToken;
// first, update lemma info if changed
if (response.updatedLemmaInfo != null) {
await _updateLemmaInfo(token, response.updatedLemmaInfo!);
}
// second, update the phonetic info if changed
if (response.updatedPhonetics != null) {
await _updatePhoneticTranscription(response.updatedPhonetics!);
}
final originalSent = event?.originalSent;
// if no other changes, just return the message
final hasTokenUpdate = response.updatedToken != null;
final hasLangUpdate =
originalSent != null &&
response.updatedLanguage != null &&
response.updatedLanguage != originalSent.langCode;
if (!hasTokenUpdate && !hasLangUpdate) {
return response.userFriendlyMessage;
}
// update the tokens to be sent in the message edit
final tokens = List<PangeaToken>.from(requestData.tokens);
if (hasTokenUpdate) {
tokens[requestData.selectedToken] = response.updatedToken!;
}
final updatedLanguage =
response.updatedLanguage ?? event?.originalSent?.langCode;
final tokensSent = PangeaMessageTokens(
tokens: tokens,
detections: [
if (updatedLanguage != null)
LanguageDetectionModel(langCode: updatedLanguage, confidence: 1),
],
);
if (requestData.fullText != null && event != null) {
await event!.room.pangeaSendTextEvent(
requestData.fullText!,
editEventId: event!.eventId,
originalWritten: event!.originalWritten?.content,
tokensSent: tokensSent,
tokensWritten: event!.originalWritten?.tokens != null
? PangeaMessageTokens(
tokens: event!.originalWritten!.tokens!,
detections: event!.originalWritten?.detections,
)
: null,
choreo: originalSent?.choreo,
messageTag: ModelKey.tokenFeedbackEdit,
);
}
return response.userFriendlyMessage;
}
Future<void> _submit(String feedback, BuildContext context) async {
final resp = await showFutureLoadingDialog(
context: context,
future: () => _submitFeedback(feedback),
);
if (!resp.isError) {
Navigator.of(context).pop(resp.result!);
}
}
Future<void> _updateLemmaInfo(
PangeaToken token,
LemmaInfoResponse response,
) => LemmaInfoRepo.set(
token.vocabConstructID.lemmaInfoRequest(event?.event.content ?? {}),
response,
);
Future<void> _updatePhoneticTranscription(PTResponse response) async {
// Use the original request from the feedback data to write to v2 cache
final ptRequest = requestData.ptRequest;
if (ptRequest == null) return;
await PTV2Repo.set(ptRequest, response);
}
@override
Widget build(BuildContext context) {
final selectedToken = requestData.tokens[requestData.selectedToken];
return FeedbackDialog(
title: L10n.of(context).tokenInfoFeedbackDialogTitle,
onSubmit: (feedback) => _submit(feedback, context),
extraContent: WordZoomWidget(
token: selectedToken.text,
construct: selectedToken.vocabConstructID,
pos: selectedToken.pos,
morph: selectedToken.morph.map((k, v) => MapEntry(k.name, v)),
langCode: langCode,
enableEmojiSelection: false,
),
);
}
}