5327 audio activities (#5596)

* feat: sentence based audio activities

WIP

* chore: fix up audio activities

- add continue button and show example message upon selecting correct answers
- token and lemma fixes
- fully fetch audio before displaying activity
- don't repeat messages in the same session

* chore: cleanup unnecessary widget

* merge conflict
This commit is contained in:
avashilling 2026-02-09 15:42:01 -05:00 committed by GitHub
parent 8088b6a605
commit 507fee84fe
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
13 changed files with 724 additions and 139 deletions

View file

@ -5072,6 +5072,7 @@
"autoIGCToolName": "Enable writing assistance",
"autoIGCToolDescription": "Automatically run Pangea Chat tools to correct sent messages to target language.",
"emptyAudioError": "Recording failed. Please check your audio permissions and try again.",
"selectAllWords": "Select all the words you hear in the audio",
"aboutMeHint": "About me",
"changeEmail": "Change email",
"withTheseAddressesDescription": "With these email addresses you can log in, recover your password, and manage subscriptions.",

View file

@ -4,14 +4,93 @@ import 'package:matrix/matrix.dart';
import 'package:fluffychat/pangea/analytics_misc/client_analytics_extension.dart';
import 'package:fluffychat/pangea/analytics_misc/construct_use_model.dart';
import 'package:fluffychat/pangea/analytics_practice/analytics_practice_session_model.dart';
import 'package:fluffychat/pangea/events/event_wrappers/pangea_message_event.dart';
import 'package:fluffychat/pangea/events/models/pangea_token_model.dart';
/// Internal result class that holds all computed data from building an example message.
class _ExampleMessageResult {
final List<InlineSpan> displaySpans;
final List<PangeaToken> includedTokens;
final String text;
final int adjustedTargetIndex;
final String? eventId;
final String? roomId;
_ExampleMessageResult({
required this.displaySpans,
required this.includedTokens,
required this.text,
required this.adjustedTargetIndex,
this.eventId,
this.roomId,
});
List<InlineSpan> toSpans() => displaySpans;
AudioExampleMessage toAudioExampleMessage() => AudioExampleMessage(
tokens: includedTokens,
eventId: eventId,
roomId: roomId,
exampleMessage: ExampleMessageInfo(exampleMessage: displaySpans),
);
}
class ExampleMessageUtil {
static Future<List<InlineSpan>?> getExampleMessage(
ConstructUses construct,
Client client, {
String? form,
bool noBold = false,
}) async {
final result = await _getExampleMessageResult(
construct,
client,
form: form,
noBold: noBold,
);
return result?.toSpans();
}
static Future<AudioExampleMessage?> getAudioExampleMessage(
ConstructUses construct,
Client client, {
String? form,
bool noBold = false,
}) async {
final result = await _getExampleMessageResult(
construct,
client,
form: form,
noBold: noBold,
);
return result?.toAudioExampleMessage();
}
static Future<List<List<InlineSpan>>> getExampleMessages(
ConstructUses construct,
Client client,
int maxMessages, {
bool noBold = false,
}) async {
final List<List<InlineSpan>> allSpans = [];
for (final use in construct.cappedUses) {
if (allSpans.length >= maxMessages) break;
final event = await client.getEventByConstructUse(use);
if (event == null) continue;
final result = _buildExampleMessage(use.form, event, noBold: noBold);
if (result != null) {
allSpans.add(result.toSpans());
}
}
return allSpans;
}
static Future<_ExampleMessageResult?> _getExampleMessageResult(
ConstructUses construct,
Client client, {
String? form,
bool noBold = false,
}) async {
for (final use in construct.cappedUses) {
if (form != null && use.form != form) continue;
@ -19,36 +98,17 @@ class ExampleMessageUtil {
final event = await client.getEventByConstructUse(use);
if (event == null) continue;
final spans = _buildExampleMessage(use.form, event);
if (spans != null) return spans;
final result = _buildExampleMessage(use.form, event, noBold: noBold);
if (result != null) return result;
}
return null;
}
static Future<List<List<InlineSpan>>> getExampleMessages(
ConstructUses construct,
Client client,
int maxMessages,
) async {
final List<List<InlineSpan>> allSpans = [];
for (final use in construct.cappedUses) {
if (allSpans.length >= maxMessages) break;
final event = await client.getEventByConstructUse(use);
if (event == null) continue;
final spans = _buildExampleMessage(use.form, event);
if (spans != null) {
allSpans.add(spans);
}
}
return allSpans;
}
static List<InlineSpan>? _buildExampleMessage(
static _ExampleMessageResult? _buildExampleMessage(
String? form,
PangeaMessageEvent messageEvent,
) {
PangeaMessageEvent messageEvent, {
bool noBold = false,
}) {
String? text;
List<PangeaToken>? tokens;
int targetTokenIndex = -1;
@ -99,6 +159,7 @@ class ExampleMessageUtil {
// ---------- BEFORE ----------
int beforeStartOffset = 0;
bool trimmedBefore = false;
int firstIncludedTokenIndex = 0;
if (beforeAvailable > beforeBudget) {
final desiredStart = targetStart - beforeBudget;
@ -110,6 +171,7 @@ class ExampleMessageUtil {
if (tokenEnd > desiredStart) {
beforeStartOffset = token.text.offset;
firstIncludedTokenIndex = i;
trimmedBefore = true;
break;
}
@ -124,6 +186,7 @@ class ExampleMessageUtil {
// ---------- AFTER ----------
int afterEndOffset = totalChars;
bool trimmedAfter = false;
int lastIncludedTokenIndex = tokens.length - 1;
if (afterAvailable > afterBudget) {
final desiredEnd = targetEnd + afterBudget;
@ -132,6 +195,7 @@ class ExampleMessageUtil {
final token = tokens[i];
if (token.text.offset >= desiredEnd) {
afterEndOffset = token.text.offset;
lastIncludedTokenIndex = i - 1;
trimmedAfter = true;
break;
}
@ -144,15 +208,34 @@ class ExampleMessageUtil {
.toString()
.trimRight();
return [
final displaySpans = [
if (trimmedBefore) const TextSpan(text: ''),
TextSpan(text: before),
TextSpan(
text: targetToken.text.content,
style: const TextStyle(fontWeight: FontWeight.bold),
style: noBold ? null : const TextStyle(fontWeight: FontWeight.bold),
),
TextSpan(text: after),
if (trimmedAfter) const TextSpan(text: ''),
];
// Extract only the tokens that are included in the displayed text
final includedTokens =
tokens.sublist(firstIncludedTokenIndex, lastIncludedTokenIndex + 1);
// Adjust target token index relative to the included tokens
final adjustedTargetIndex = targetTokenIndex - firstIncludedTokenIndex;
return _ExampleMessageResult(
displaySpans: displaySpans,
includedTokens: includedTokens,
text: text.characters
.skip(beforeStartOffset)
.take(afterEndOffset - beforeStartOffset)
.toString(),
adjustedTargetIndex: adjustedTargetIndex,
eventId: messageEvent.eventId,
roomId: messageEvent.room.id,
);
}
}

View file

@ -1,10 +1,7 @@
import 'dart:async';
import 'dart:collection';
import 'package:flutter/material.dart';
import 'package:collection/collection.dart';
import 'package:fluffychat/l10n/l10n.dart';
import 'package:fluffychat/pangea/analytics_data/analytics_data_service.dart';
import 'package:fluffychat/pangea/analytics_data/analytics_updater_mixin.dart';
@ -19,15 +16,19 @@ import 'package:fluffychat/pangea/analytics_practice/analytics_practice_session_
import 'package:fluffychat/pangea/analytics_practice/analytics_practice_view.dart';
import 'package:fluffychat/pangea/common/utils/async_state.dart';
import 'package:fluffychat/pangea/constructs/construct_identifier.dart';
import 'package:fluffychat/pangea/events/event_wrappers/pangea_message_event.dart';
import 'package:fluffychat/pangea/lemmas/lemma_info_repo.dart';
import 'package:fluffychat/pangea/morphs/morph_features_enum.dart';
import 'package:fluffychat/pangea/practice_activities/activity_type_enum.dart';
import 'package:fluffychat/pangea/practice_activities/message_activity_request.dart';
import 'package:fluffychat/pangea/practice_activities/practice_activity_model.dart';
import 'package:fluffychat/pangea/practice_activities/practice_generation_repo.dart';
import 'package:fluffychat/pangea/text_to_speech/tts_controller.dart';
import 'package:fluffychat/pangea/toolbar/message_practice/message_audio_card.dart';
import 'package:fluffychat/pangea/toolbar/message_practice/practice_record_controller.dart';
import 'package:fluffychat/widgets/future_loading_dialog.dart';
import 'package:fluffychat/widgets/matrix.dart';
import 'package:flutter/material.dart';
class SelectedMorphChoice {
final MorphFeaturesEnum feature;
@ -102,11 +103,17 @@ class AnalyticsPracticeState extends State<AnalyticsPractice>
ValueNotifier<SelectedMorphChoice?>(null);
final ValueNotifier<bool> hintPressedNotifier = ValueNotifier<bool>(false);
final Set<String> _selectedCorrectAnswers = {};
// Track if we're showing the completion message for audio activities
final ValueNotifier<bool> showingAudioCompletion = ValueNotifier<bool>(false);
final ValueNotifier<int> hintsUsedNotifier = ValueNotifier<int>(0);
static const int maxHints = 5;
final Map<String, Map<String, String>> _choiceTexts = {};
final Map<String, Map<String, String?>> _choiceEmojis = {};
final Map<String, PangeaAudioFile> _audioFiles = {};
StreamSubscription<void>? _languageStreamSubscription;
@ -130,6 +137,7 @@ class AnalyticsPracticeState extends State<AnalyticsPractice>
enableChoicesNotifier.dispose();
selectedMorphChoice.dispose();
hintPressedNotifier.dispose();
showingAudioCompletion.dispose();
hintsUsedNotifier.dispose();
super.dispose();
}
@ -220,6 +228,7 @@ class AnalyticsPracticeState extends State<AnalyticsPractice>
hintsUsedNotifier.value = 0;
enableChoicesNotifier.value = true;
progressNotifier.value = 0.0;
showingAudioCompletion.value = false;
_queue.clear();
_choiceTexts.clear();
_choiceEmojis.clear();
@ -236,7 +245,11 @@ class AnalyticsPracticeState extends State<AnalyticsPractice>
void _playAudio() {
if (activityTarget.value == null) return;
if (widget.type != ConstructTypeEnum.vocab) return;
if (widget.type == ConstructTypeEnum.vocab &&
_currentActivity is VocabMeaningPracticeActivityModel) {
} else {
return;
}
TtsController.tryToSpeak(
activityTarget.value!.target.tokens.first.vocabConstructID.lemma,
langCode: MatrixState.pangeaController.userController.userL2!.langCode,
@ -323,6 +336,7 @@ class AnalyticsPracticeState extends State<AnalyticsPractice>
if (_continuing) return;
_continuing = true;
enableChoicesNotifier.value = true;
showingAudioCompletion.value = false;
try {
if (activityState.value
@ -334,6 +348,7 @@ class AnalyticsPracticeState extends State<AnalyticsPractice>
activityState.value = const AsyncState.loading();
selectedMorphChoice.value = null;
hintPressedNotifier.value = false;
_selectedCorrectAnswers.clear();
final nextActivityCompleter = _queue.removeFirst();
try {
@ -435,9 +450,57 @@ class AnalyticsPracticeState extends State<AnalyticsPractice>
await _fetchLemmaInfo(activityModel.storageKey, choices);
}
// Prefetch audio for audio activities before marking ready
if (activityModel is VocabAudioPracticeActivityModel) {
await _loadAudioForActivity(activityModel);
}
return activityModel;
}
Future<void> _loadAudioForActivity(
VocabAudioPracticeActivityModel activity,
) async {
final eventId = activity.eventId;
final roomId = activity.roomId;
if (eventId == null || roomId == null) {
throw L10n.of(context).oopsSomethingWentWrong;
}
final client = MatrixState.pangeaController.matrixState.client;
final room = client.getRoomById(roomId);
if (room == null) {
throw L10n.of(context).oopsSomethingWentWrong;
}
final event = await room.getEventById(eventId);
if (event == null) {
throw L10n.of(context).oopsSomethingWentWrong;
}
final pangeaEvent = PangeaMessageEvent(
event: event,
timeline: await room.getTimeline(),
ownMessage: event.senderId == client.userID,
);
// Prefetch the audio file
final audioFile = await pangeaEvent.requestTextToSpeech(
activity.langCode,
MatrixState.pangeaController.userController.voice,
);
// Store the audio file with the eventId as key
_audioFiles[eventId] = audioFile;
}
PangeaAudioFile? getAudioFile(String? eventId) {
if (eventId == null) return null;
return _audioFiles[eventId];
}
Future<void> _fetchLemmaInfo(
String requestKey,
List<String> choiceIds,
@ -495,6 +558,22 @@ class AnalyticsPracticeState extends State<AnalyticsPractice>
hintPressedNotifier.value = true;
}
Future<void> onAudioContinuePressed() async {
showingAudioCompletion.value = false;
//Mark this activity as completed, and either load the next or complete the session
_sessionLoader.value!.completeActivity();
progressNotifier.value = _sessionLoader.value!.progress;
if (_queue.isEmpty) {
await _completeSession();
} else if (_isComplete) {
await _completeSession();
} else {
await _continueSession();
}
}
Future<void> onSelectChoice(
String choiceContent,
) async {
@ -508,8 +587,17 @@ class AnalyticsPracticeState extends State<AnalyticsPractice>
tag: choiceContent,
);
}
final isCorrect = activity.multipleChoiceContent.isCorrect(choiceContent);
if (isCorrect) {
final isAudioActivity =
activity.activityType == ActivityTypeEnum.lemmaAudio;
if (isAudioActivity && isCorrect) {
_selectedCorrectAnswers.add(choiceContent);
}
if (isCorrect && !isAudioActivity) {
// Non-audio activities disable choices after first correct answer
enableChoicesNotifier.value = false;
}
@ -525,7 +613,24 @@ class AnalyticsPracticeState extends State<AnalyticsPractice>
await _analyticsService.updateService
.addAnalytics(choiceTargetId(choiceContent), [use]);
if (!activity.multipleChoiceContent.isCorrect(choiceContent)) return;
if (!isCorrect) return;
// For audio activities, check if all answers have been selected
if (isAudioActivity) {
final allAnswers = activity.multipleChoiceContent.answers;
final allSelected = allAnswers
.every((answer) => _selectedCorrectAnswers.contains(answer));
if (!allSelected) {
return;
}
// All answers selected, disable choices and show completion message
enableChoicesNotifier.value = false;
await Future.delayed(const Duration(milliseconds: 1000));
showingAudioCompletion.value = true;
return;
}
_playAudio();
@ -553,7 +658,7 @@ class AnalyticsPracticeState extends State<AnalyticsPractice>
final construct = target.targetTokenConstructID(token);
if (widget.type == ConstructTypeEnum.morph) {
return activityRequest.morphExampleInfo?.exampleMessage;
return activityRequest.exampleMessage?.exampleMessage;
}
return ExampleMessageUtil.getExampleMessage(
@ -562,6 +667,14 @@ class AnalyticsPracticeState extends State<AnalyticsPractice>
);
}
List<InlineSpan>? getAudioExampleMessage() {
final activity = _currentActivity;
if (activity is VocabAudioPracticeActivityModel) {
return activity.exampleMessage.exampleMessage;
}
return null;
}
Future<DerivedAnalyticsDataModel> get derivedAnalyticsData =>
_analyticsService.derivedData;

View file

@ -3,13 +3,14 @@ import 'package:flutter/painting.dart';
import 'package:fluffychat/pangea/analytics_misc/construct_use_type_enum.dart';
import 'package:fluffychat/pangea/analytics_misc/constructs_model.dart';
import 'package:fluffychat/pangea/analytics_practice/analytics_practice_constants.dart';
import 'package:fluffychat/pangea/events/models/pangea_token_model.dart';
import 'package:fluffychat/pangea/practice_activities/message_activity_request.dart';
import 'package:fluffychat/pangea/practice_activities/practice_target.dart';
class MorphExampleInfo {
class ExampleMessageInfo {
final List<InlineSpan> exampleMessage;
const MorphExampleInfo({
const ExampleMessageInfo({
required this.exampleMessage,
});
@ -30,7 +31,7 @@ class MorphExampleInfo {
};
}
factory MorphExampleInfo.fromJson(Map<String, dynamic> json) {
factory ExampleMessageInfo.fromJson(Map<String, dynamic> json) {
final segments = json['segments'] as List<dynamic>? ?? [];
final spans = <InlineSpan>[];
@ -46,25 +47,60 @@ class MorphExampleInfo {
);
}
return MorphExampleInfo(exampleMessage: spans);
return ExampleMessageInfo(exampleMessage: spans);
}
}
/// An extended example message that includes both formatted display spans and tokens to generate audio practice activities.
/// eventId/roomId are needed for audio playback.
class AudioExampleMessage {
final List<PangeaToken> tokens;
final String? eventId;
final String? roomId;
final ExampleMessageInfo exampleMessage;
const AudioExampleMessage({
required this.tokens,
this.eventId,
this.roomId,
required this.exampleMessage,
});
Map<String, dynamic> toJson() {
return {
'eventId': eventId,
'roomId': roomId,
};
}
factory AudioExampleMessage.fromJson(Map<String, dynamic> json) {
return AudioExampleMessage(
tokens: const [],
eventId: json['eventId'] as String?,
roomId: json['roomId'] as String?,
exampleMessage: const ExampleMessageInfo(exampleMessage: []),
);
}
}
class AnalyticsActivityTarget {
final PracticeTarget target;
final GrammarErrorRequestInfo? grammarErrorInfo;
final MorphExampleInfo? morphExampleInfo;
final ExampleMessageInfo? exampleMessage;
final AudioExampleMessage? audioExampleMessage;
AnalyticsActivityTarget({
required this.target,
this.grammarErrorInfo,
this.morphExampleInfo,
this.exampleMessage,
this.audioExampleMessage,
});
Map<String, dynamic> toJson() => {
'target': target.toJson(),
'grammarErrorInfo': grammarErrorInfo?.toJson(),
'morphExampleInfo': morphExampleInfo?.toJson(),
'exampleMessage': exampleMessage?.toJson(),
'audioExampleMessage': audioExampleMessage?.toJson(),
};
factory AnalyticsActivityTarget.fromJson(Map<String, dynamic> json) =>
@ -73,8 +109,11 @@ class AnalyticsActivityTarget {
grammarErrorInfo: json['grammarErrorInfo'] != null
? GrammarErrorRequestInfo.fromJson(json['grammarErrorInfo'])
: null,
morphExampleInfo: json['morphExampleInfo'] != null
? MorphExampleInfo.fromJson(json['morphExampleInfo'])
exampleMessage: json['exampleMessage'] != null
? ExampleMessageInfo.fromJson(json['exampleMessage'])
: null,
audioExampleMessage: json['audioExampleMessage'] != null
? AudioExampleMessage.fromJson(json['audioExampleMessage'])
: null,
);
}
@ -131,7 +170,8 @@ class AnalyticsPracticeSessionModel {
activityQualityFeedback: null,
target: target.target,
grammarErrorInfo: target.grammarErrorInfo,
morphExampleInfo: target.morphExampleInfo,
exampleMessage: target.exampleMessage,
audioExampleMessage: target.audioExampleMessage,
);
}).toList();
}

View file

@ -34,29 +34,44 @@ class AnalyticsPracticeSessionRepo {
throw UnsubscribedException();
}
final r = Random();
final activityTypes = ActivityTypeEnum.analyticsPracticeTypes(type);
final types = List.generate(
AnalyticsPracticeConstants.practiceGroupSize +
AnalyticsPracticeConstants.errorBufferSize,
(_) => activityTypes[r.nextInt(activityTypes.length)],
);
final List<AnalyticsActivityTarget> targets = [];
if (type == ConstructTypeEnum.vocab) {
final constructs = await _fetchVocab();
final targetCount = min(constructs.length, types.length);
targets.addAll([
for (var i = 0; i < targetCount; i++)
const totalNeeded = AnalyticsPracticeConstants.practiceGroupSize +
AnalyticsPracticeConstants.errorBufferSize;
final halfNeeded = (totalNeeded / 2).ceil();
// Fetch audio constructs (with example messages)
final audioMap = await _fetchAudio();
final audioCount = min(audioMap.length, halfNeeded);
// Fetch vocab constructs to fill the rest
final vocabNeeded = totalNeeded - audioCount;
final vocabConstructs = await _fetchVocab();
final vocabCount = min(vocabConstructs.length, vocabNeeded);
for (final entry in audioMap.entries.take(audioCount)) {
targets.add(
AnalyticsActivityTarget(
target: PracticeTarget(
tokens: [constructs[i].asToken],
activityType: types[i],
tokens: [entry.key.asToken],
activityType: ActivityTypeEnum.lemmaAudio,
),
audioExampleMessage: entry.value,
),
);
}
for (var i = 0; i < vocabCount; i++) {
targets.add(
AnalyticsActivityTarget(
target: PracticeTarget(
tokens: [vocabConstructs[i].asToken],
activityType: ActivityTypeEnum.lemmaMeaning,
),
),
]);
);
}
targets.shuffle();
} else {
final errorTargets = await _fetchErrors();
targets.addAll(errorTargets);
@ -77,7 +92,7 @@ class AnalyticsPracticeSessionRepo {
activityType: ActivityTypeEnum.grammarCategory,
morphFeature: entry.feature,
),
morphExampleInfo: MorphExampleInfo(
exampleMessage: ExampleMessageInfo(
exampleMessage: entry.exampleMessage,
),
),
@ -132,6 +147,62 @@ class AnalyticsPracticeSessionRepo {
return targets;
}
static Future<Map<ConstructIdentifier, AudioExampleMessage>>
_fetchAudio() async {
final constructs = await MatrixState
.pangeaController.matrixState.analyticsDataService
.getAggregatedConstructs(ConstructTypeEnum.vocab)
.then((map) => map.values.toList());
// sort by last used descending, nulls first
constructs.sort((a, b) {
final dateA = a.lastUsed;
final dateB = b.lastUsed;
if (dateA == null && dateB == null) return 0;
if (dateA == null) return -1;
if (dateB == null) return 1;
return dateA.compareTo(dateB);
});
final Set<String> seenLemmas = {};
final Set<String> seenEventIds = {};
final targets = <ConstructIdentifier, AudioExampleMessage>{};
for (final construct in constructs) {
if (targets.length >=
(AnalyticsPracticeConstants.practiceGroupSize +
AnalyticsPracticeConstants.errorBufferSize)) {
break;
}
if (seenLemmas.contains(construct.lemma)) continue;
// Try to get an audio example message with token data for this lemma
final audioExampleMessage =
await ExampleMessageUtil.getAudioExampleMessage(
await MatrixState.pangeaController.matrixState.analyticsDataService
.getConstructUse(construct.id),
MatrixState.pangeaController.matrixState.client,
noBold: true,
);
// Only add to targets if we found an example message AND its eventId hasn't been used
if (audioExampleMessage != null) {
final eventId = audioExampleMessage.eventId;
if (eventId != null && seenEventIds.contains(eventId)) {
continue;
}
seenLemmas.add(construct.lemma);
if (eventId != null) {
seenEventIds.add(eventId);
}
targets[construct.id] = audioExampleMessage;
}
}
return targets;
}
static Future<List<MorphPracticeTarget>> _fetchMorphs() async {
final constructs = await MatrixState
.pangeaController.matrixState.analyticsDataService

View file

@ -1,13 +1,11 @@
import 'package:flutter/material.dart';
import 'package:fluffychat/config/app_config.dart';
import 'package:fluffychat/config/themes.dart';
import 'package:fluffychat/l10n/l10n.dart';
import 'package:fluffychat/pages/chat/events/audio_player.dart';
import 'package:fluffychat/pangea/analytics_details_popup/morph_meaning_widget.dart';
import 'package:fluffychat/pangea/analytics_misc/construct_type_enum.dart';
import 'package:fluffychat/pangea/analytics_practice/analytics_practice_page.dart';
import 'package:fluffychat/pangea/analytics_practice/analytics_practice_session_model.dart';
import 'package:fluffychat/pangea/analytics_practice/choice_cards/audio_choice_card.dart';
import 'package:fluffychat/pangea/analytics_practice/choice_cards/game_choice_card.dart';
import 'package:fluffychat/pangea/analytics_practice/choice_cards/grammar_choice_card.dart';
import 'package:fluffychat/pangea/analytics_practice/choice_cards/meaning_choice_card.dart';
@ -25,6 +23,7 @@ import 'package:fluffychat/pangea/practice_activities/practice_activity_model.da
import 'package:fluffychat/utils/localized_exception_extension.dart';
import 'package:fluffychat/widgets/layouts/max_width_body.dart';
import 'package:fluffychat/widgets/matrix.dart';
import 'package:flutter/material.dart';
class AnalyticsPracticeView extends StatelessWidget {
final AnalyticsPracticeState controller;
@ -119,60 +118,79 @@ class _AnalyticsActivityView extends StatelessWidget {
: Theme.of(context).textTheme.titleMedium;
titleStyle = titleStyle?.copyWith(fontWeight: FontWeight.bold);
return ListView(
return Column(
children: [
//Hints counter bar for grammar activities only
if (controller.widget.type == ConstructTypeEnum.morph)
Padding(
padding: const EdgeInsets.only(bottom: 16.0),
child: _HintsCounterBar(controller: controller),
),
//per-activity instructions, add switch statement once there are more types
const InstructionsInlineTooltip(
instructionsEnum: InstructionsEnum.selectMeaning,
padding: EdgeInsets.symmetric(
vertical: 8.0,
),
),
SizedBox(
height: 75.0,
child: ValueListenableBuilder(
valueListenable: controller.activityTarget,
builder: (context, target, __) => target != null
? Column(
children: [
Text(
target.promptText(context),
textAlign: TextAlign.center,
style: titleStyle,
maxLines: 2,
overflow: TextOverflow.ellipsis,
),
if (controller.widget.type == ConstructTypeEnum.vocab)
PhoneticTranscriptionWidget(
text:
target.target.tokens.first.vocabConstructID.lemma,
textLanguage: MatrixState
.pangeaController.userController.userL2!,
style: const TextStyle(fontSize: 14.0),
Expanded(
child: ListView(
children: [
//Hints counter bar for grammar activities only
if (controller.widget.type == ConstructTypeEnum.morph)
Padding(
padding: const EdgeInsets.only(bottom: 16.0),
child: _HintsCounterBar(controller: controller),
),
//per-activity instructions, add switch statement once there are more types
const InstructionsInlineTooltip(
instructionsEnum: InstructionsEnum.selectMeaning,
padding: EdgeInsets.symmetric(
vertical: 8.0,
),
),
SizedBox(
height: 75.0,
child: ValueListenableBuilder(
valueListenable: controller.activityTarget,
builder: (context, target, __) {
if (target == null) return const SizedBox.shrink();
final isAudioActivity = target.target.activityType ==
ActivityTypeEnum.lemmaAudio;
final isVocabType =
controller.widget.type == ConstructTypeEnum.vocab;
return Column(
children: [
Text(
isAudioActivity && isVocabType
? L10n.of(context).selectAllWords
: target.promptText(context),
textAlign: TextAlign.center,
style: titleStyle,
maxLines: 2,
overflow: TextOverflow.ellipsis,
),
],
)
: const SizedBox.shrink(),
if (isVocabType && !isAudioActivity)
PhoneticTranscriptionWidget(
text: target
.target.tokens.first.vocabConstructID.lemma,
textLanguage: MatrixState
.pangeaController.userController.userL2!,
style: const TextStyle(fontSize: 14.0),
),
],
);
},
),
),
const SizedBox(height: 16.0),
Center(
child: _AnalyticsPracticeCenterContent(controller: controller),
),
const SizedBox(height: 16.0),
(controller.widget.type == ConstructTypeEnum.morph)
? Center(child: _HintSection(controller: controller))
: const SizedBox.shrink(),
const SizedBox(height: 16.0),
_ActivityChoicesWidget(controller),
const SizedBox(height: 16.0),
_WrongAnswerFeedback(controller: controller),
],
),
),
const SizedBox(height: 16.0),
Center(
child: _AnalyticsPracticeCenterContent(controller: controller),
Container(
alignment: Alignment.bottomCenter,
child: _AudioContinueButton(controller: controller),
),
const SizedBox(height: 16.0),
(controller.widget.type == ConstructTypeEnum.morph)
? Center(child: _HintSection(controller: controller))
: const SizedBox.shrink(),
const SizedBox(height: 16.0),
_ActivityChoicesWidget(controller),
const SizedBox(height: 16.0),
_WrongAnswerFeedback(controller: controller),
],
);
}
@ -222,6 +240,32 @@ class _AnalyticsPracticeCenterContent extends StatelessWidget {
controller.getExampleMessage(target!),
),
),
ActivityTypeEnum.lemmaAudio => ValueListenableBuilder(
valueListenable: controller.activityState,
builder: (context, state, __) => switch (state) {
AsyncLoaded(
value: final VocabAudioPracticeActivityModel activity
) =>
SizedBox(
height: 100.0,
child: Center(
child: AudioPlayerWidget(
null,
color: Theme.of(context).colorScheme.primary,
linkColor: Theme.of(context).colorScheme.secondary,
fontSize:
AppConfig.fontSizeFactor * AppConfig.messageFontSize,
eventId: '${activity.eventId}_practice',
roomId: activity.roomId!,
senderId: Matrix.of(context).client.userID!,
matrixFile: controller.getAudioFile(activity.eventId)!,
autoplay: true,
),
),
),
_ => const SizedBox(height: 100.0),
},
),
_ => SizedBox(
height: 100.0,
child: Center(
@ -235,6 +279,50 @@ class _AnalyticsPracticeCenterContent extends StatelessWidget {
}
}
class _AudioCompletionWidget extends StatelessWidget {
final AnalyticsPracticeState controller;
const _AudioCompletionWidget({
super.key,
required this.controller,
});
@override
Widget build(BuildContext context) {
final exampleMessage = controller.getAudioExampleMessage();
if (exampleMessage == null || exampleMessage.isEmpty) {
return const SizedBox(height: 100.0);
}
return Padding(
padding: const EdgeInsets.all(16.0),
child: Container(
padding: const EdgeInsets.symmetric(
horizontal: 12,
vertical: 8,
),
decoration: BoxDecoration(
color: Color.alphaBlend(
Colors.white.withAlpha(180),
ThemeData.dark().colorScheme.primary,
),
borderRadius: BorderRadius.circular(16),
),
child: RichText(
text: TextSpan(
style: TextStyle(
color: Theme.of(context).colorScheme.onPrimaryFixed,
fontSize: AppConfig.fontSizeFactor * AppConfig.messageFontSize,
),
children: exampleMessage,
),
),
),
);
}
}
class _ExampleMessageWidget extends StatelessWidget {
final Future<List<InlineSpan>?> future;
@ -629,6 +717,63 @@ class _ActivityChoicesWidget extends StatelessWidget {
valueListenable: controller.enableChoicesNotifier,
builder: (context, enabled, __) {
final choices = controller.filteredChoices(value);
final isAudioActivity =
value.activityType == ActivityTypeEnum.lemmaAudio;
if (isAudioActivity) {
// For audio activities, use AnimatedSwitcher to fade between choices and example message
return ValueListenableBuilder(
valueListenable: controller.showingAudioCompletion,
builder: (context, showingCompletion, __) {
return AnimatedSwitcher(
duration: const Duration(milliseconds: 500),
layoutBuilder: (currentChild, previousChildren) {
return Stack(
alignment: Alignment.topCenter,
children: <Widget>[
...previousChildren,
if (currentChild != null) currentChild,
],
);
},
child: showingCompletion
? _AudioCompletionWidget(
key: const ValueKey('completion'),
controller: controller,
)
: Padding(
key: const ValueKey('choices'),
padding: const EdgeInsets.all(16.0),
child: Wrap(
alignment: WrapAlignment.center,
spacing: 8.0,
runSpacing: 8.0,
children: choices
.map(
(choice) => _ChoiceCard(
activity: value,
targetId: controller
.choiceTargetId(choice.choiceId),
choiceId: choice.choiceId,
onPressed: () =>
controller.onSelectChoice(
choice.choiceId,
),
cardHeight: 48.0,
choiceText: choice.choiceText,
choiceEmoji: choice.choiceEmoji,
enabled: enabled,
shrinkWrap: true,
),
)
.toList(),
),
),
);
},
);
}
return Column(
spacing: 8.0,
mainAxisAlignment: MainAxisAlignment.center,
@ -663,6 +808,56 @@ class _ActivityChoicesWidget extends StatelessWidget {
}
}
class _AudioContinueButton extends StatelessWidget {
final AnalyticsPracticeState controller;
const _AudioContinueButton({
required this.controller,
});
@override
Widget build(BuildContext context) {
return ValueListenableBuilder(
valueListenable: controller.activityState,
builder: (context, state, __) {
// Only show for audio activities
if (state is! AsyncLoaded<MultipleChoicePracticeActivityModel>) {
return const SizedBox.shrink();
}
final activity = state.value;
if (activity.activityType != ActivityTypeEnum.lemmaAudio) {
return const SizedBox.shrink();
}
return ValueListenableBuilder(
valueListenable: controller.showingAudioCompletion,
builder: (context, showingCompletion, __) {
return Padding(
padding: const EdgeInsets.all(16.0),
child: ElevatedButton(
onPressed: showingCompletion
? controller.onAudioContinuePressed
: null,
style: ElevatedButton.styleFrom(
padding: const EdgeInsets.symmetric(
horizontal: 48.0,
vertical: 16.0,
),
),
child: Text(
L10n.of(context).continueText,
style: const TextStyle(fontSize: 18.0),
),
),
);
},
);
},
);
}
}
class _ChoiceCard extends StatelessWidget {
final MultipleChoicePracticeActivityModel activity;
final String choiceId;
@ -673,6 +868,7 @@ class _ChoiceCard extends StatelessWidget {
final String choiceText;
final String? choiceEmoji;
final bool enabled;
final bool shrinkWrap;
const _ChoiceCard({
required this.activity,
@ -683,6 +879,7 @@ class _ChoiceCard extends StatelessWidget {
required this.choiceText,
required this.choiceEmoji,
this.enabled = true,
this.shrinkWrap = false,
});
@override
@ -708,16 +905,21 @@ class _ChoiceCard extends StatelessWidget {
);
case ActivityTypeEnum.lemmaAudio:
return AudioChoiceCard(
return GameChoiceCard(
key: ValueKey(
'${constructId.string}_${activityType.name}_audio_$choiceId',
),
text: choiceId,
shouldFlip: false,
targetId: targetId,
onPressed: onPressed,
isCorrect: isCorrect,
height: cardHeight,
isEnabled: enabled,
shrinkWrap: shrinkWrap,
child: Text(
choiceText,
textAlign: TextAlign.center,
),
);
case ActivityTypeEnum.grammarCategory:

View file

@ -14,6 +14,7 @@ class GameChoiceCard extends StatefulWidget {
final bool shouldFlip;
final String targetId;
final bool isEnabled;
final bool shrinkWrap;
const GameChoiceCard({
required this.child,
@ -24,6 +25,7 @@ class GameChoiceCard extends StatefulWidget {
this.height = 72.0,
this.shouldFlip = false,
this.isEnabled = true,
this.shrinkWrap = false,
super.key,
});
@ -90,7 +92,7 @@ class _GameChoiceCardState extends State<GameChoiceCard>
link: MatrixState.pAnyState.layerLinkAndKey(widget.targetId).link,
child: HoverBuilder(
builder: (context, hovered) => SizedBox(
width: double.infinity,
width: widget.shrinkWrap ? null : double.infinity,
height: widget.height,
child: GestureDetector(
onTap: _handleTap,
@ -109,6 +111,7 @@ class _GameChoiceCardState extends State<GameChoiceCard>
overlayColor: _revealed
? tintColor
: (hovered ? hoverColor : Colors.transparent),
shrinkWrap: widget.shrinkWrap,
child: Opacity(
opacity: showContent ? 1 : 0,
child: _revealed ? widget.altChild! : widget.child,
@ -123,6 +126,7 @@ class _GameChoiceCardState extends State<GameChoiceCard>
overlayColor: _clicked
? tintColor
: (hovered ? hoverColor : Colors.transparent),
shrinkWrap: widget.shrinkWrap,
child: widget.child,
),
),
@ -137,19 +141,24 @@ class _CardContainer extends StatelessWidget {
final Color baseColor;
final Color overlayColor;
final Widget child;
final bool shrinkWrap;
const _CardContainer({
required this.height,
required this.baseColor,
required this.overlayColor,
required this.child,
this.shrinkWrap = false,
});
@override
Widget build(BuildContext context) {
return Container(
height: height,
alignment: Alignment.center,
height: shrinkWrap ? null : height,
padding: shrinkWrap
? const EdgeInsets.symmetric(horizontal: 16.0, vertical: 12.0)
: null,
alignment: shrinkWrap ? null : Alignment.center,
decoration: BoxDecoration(
color: baseColor,
borderRadius: BorderRadius.circular(16),

View file

@ -59,8 +59,8 @@ class MorphCategoryActivityGenerator {
choices: choices.toSet(),
answers: {morphTag},
),
morphExampleInfo:
req.morphExampleInfo ?? const MorphExampleInfo(exampleMessage: []),
exampleMessageInfo:
req.exampleMessage ?? const ExampleMessageInfo(exampleMessage: []),
),
);
}

View file

@ -1,3 +1,4 @@
import 'package:fluffychat/pangea/analytics_practice/analytics_practice_session_model.dart';
import 'package:fluffychat/pangea/practice_activities/lemma_activity_generator.dart';
import 'package:fluffychat/pangea/practice_activities/message_activity_request.dart';
import 'package:fluffychat/pangea/practice_activities/multiple_choice_activity_model.dart';
@ -8,20 +9,61 @@ class VocabAudioActivityGenerator {
MessageActivityRequest req,
) async {
final token = req.target.tokens.first;
final choices =
await LemmaActivityGenerator.lemmaActivityDistractors(token);
final audioExample = req.audioExampleMessage;
final choicesList = choices.map((c) => c.lemma).toList();
choicesList.shuffle();
final Set<String> answers = {token.text.content.toLowerCase()};
final Set<String> wordsInMessage = {};
if (audioExample != null) {
for (final t in audioExample.tokens) {
wordsInMessage.add(t.text.content.toLowerCase());
}
// Extract up to 3 additional words as answers
final otherWords = audioExample.tokens
.where(
(t) =>
t.lemma.saveVocab &&
t.text.content.toLowerCase() !=
token.text.content.toLowerCase() &&
t.text.content.trim().isNotEmpty,
)
.take(3)
.map((t) => t.text.content.toLowerCase())
.toList();
answers.addAll(otherWords);
}
// Generate distractors, filtering out anything in the message or answers
final choices = await LemmaActivityGenerator.lemmaActivityDistractors(
token,
maxChoices: 20,
);
final choicesList = choices
.map((c) => c.lemma)
.where(
(lemma) =>
!answers.contains(lemma.toLowerCase()) &&
!wordsInMessage.contains(lemma.toLowerCase()),
)
.take(4)
.toList();
final allChoices = [...choicesList, ...answers];
allChoices.shuffle();
return MessageActivityResponse(
activity: VocabAudioPracticeActivityModel(
tokens: req.target.tokens,
langCode: req.userL2,
multipleChoiceContent: MultipleChoiceActivity(
choices: choicesList.toSet(),
answers: {token.lemma.text},
choices: allChoices.toSet(),
answers: answers,
),
roomId: audioExample?.roomId,
eventId: audioExample?.eventId,
exampleMessage: audioExample?.exampleMessage ??
const ExampleMessageInfo(exampleMessage: []),
),
);
}

View file

@ -246,7 +246,7 @@ enum ActivityTypeEnum {
static List<ActivityTypeEnum> get _vocabPracticeTypes => [
ActivityTypeEnum.lemmaMeaning,
// ActivityTypeEnum.lemmaAudio,
ActivityTypeEnum.lemmaAudio,
];
static List<ActivityTypeEnum> get _grammarPracticeTypes => [

View file

@ -33,8 +33,9 @@ class LemmaActivityGenerator {
}
static Future<Set<ConstructIdentifier>> lemmaActivityDistractors(
PangeaToken token,
) async {
PangeaToken token, {
int? maxChoices = 4,
}) async {
final constructs = await MatrixState
.pangeaController.matrixState.analyticsDataService
.getAggregatedConstructs(ConstructTypeEnum.vocab);
@ -54,13 +55,13 @@ class LemmaActivityGenerator {
// Skip the first 7 lemmas (to avoid very similar and conjugated forms of verbs) if we have enough lemmas
final int startIndex = sortedLemmas.length > 11 ? 7 : 0;
// Take up to 4 lemmas ensuring uniqueness by lemma text
// Take up to 4 (or maxChoices) lemmas ensuring uniqueness by lemma text
final List<ConstructIdentifier> uniqueByLemma = [];
for (int i = startIndex; i < sortedLemmas.length; i++) {
final cid = sortedLemmas[i];
if (!uniqueByLemma.any((c) => c.lemma == cid.lemma)) {
uniqueByLemma.add(cid);
if (uniqueByLemma.length == 4) break;
if (uniqueByLemma.length == maxChoices) break;
}
}

View file

@ -80,7 +80,8 @@ class MessageActivityRequest {
final PracticeTarget target;
final ActivityQualityFeedback? activityQualityFeedback;
final GrammarErrorRequestInfo? grammarErrorInfo;
final MorphExampleInfo? morphExampleInfo;
final ExampleMessageInfo? exampleMessage;
final AudioExampleMessage? audioExampleMessage;
MessageActivityRequest({
required this.userL1,
@ -88,7 +89,8 @@ class MessageActivityRequest {
required this.activityQualityFeedback,
required this.target,
this.grammarErrorInfo,
this.morphExampleInfo,
this.exampleMessage,
this.audioExampleMessage,
}) {
if (target.tokens.isEmpty) {
throw Exception('Target tokens must not be empty');

View file

@ -112,9 +112,9 @@ sealed class PracticeActivityModel {
tokens: tokens,
morphFeature: morph!,
multipleChoiceContent: multipleChoiceContent!,
morphExampleInfo: json['morph_example_info'] != null
? MorphExampleInfo.fromJson(json['morph_example_info'])
: const MorphExampleInfo(exampleMessage: []),
exampleMessageInfo: json['example_message_info'] != null
? ExampleMessageInfo.fromJson(json['example_message_info'])
: const ExampleMessageInfo(exampleMessage: []),
);
case ActivityTypeEnum.lemmaAudio:
assert(
@ -125,6 +125,11 @@ sealed class PracticeActivityModel {
langCode: langCode,
tokens: tokens,
multipleChoiceContent: multipleChoiceContent!,
roomId: json['room_id'] as String?,
eventId: json['event_id'] as String?,
exampleMessage: json['example_message'] != null
? ExampleMessageInfo.fromJson(json['example_message'])
: const ExampleMessageInfo(exampleMessage: []),
);
case ActivityTypeEnum.lemmaMeaning:
assert(
@ -306,13 +311,13 @@ sealed class MorphPracticeActivityModel
}
class MorphCategoryPracticeActivityModel extends MorphPracticeActivityModel {
final MorphExampleInfo morphExampleInfo;
final ExampleMessageInfo exampleMessageInfo;
MorphCategoryPracticeActivityModel({
required super.tokens,
required super.langCode,
required super.morphFeature,
required super.multipleChoiceContent,
required this.morphExampleInfo,
required this.exampleMessageInfo,
});
@override
@ -340,7 +345,7 @@ class MorphCategoryPracticeActivityModel extends MorphPracticeActivityModel {
@override
Map<String, dynamic> toJson() {
final json = super.toJson();
json['morph_example_info'] = morphExampleInfo.toJson();
json['example_message_info'] = exampleMessageInfo.toJson();
return json;
}
}
@ -356,11 +361,27 @@ class MorphMatchPracticeActivityModel extends MorphPracticeActivityModel {
class VocabAudioPracticeActivityModel
extends MultipleChoicePracticeActivityModel {
final String? roomId;
final String? eventId;
final ExampleMessageInfo exampleMessage;
VocabAudioPracticeActivityModel({
required super.tokens,
required super.langCode,
required super.multipleChoiceContent,
this.roomId,
this.eventId,
required this.exampleMessage,
});
@override
Map<String, dynamic> toJson() {
final json = super.toJson();
json['room_id'] = roomId;
json['event_id'] = eventId;
json['example_message'] = exampleMessage.toJson();
return json;
}
}
class VocabMeaningPracticeActivityModel