feat(sync/v3): Remove TL size config option in favor of using the sync filter
This commit is contained in:
parent
b20000fcf3
commit
3b8b9d4b5c
5 changed files with 33 additions and 24 deletions
|
|
@ -1196,14 +1196,6 @@
|
|||
#
|
||||
#typing_client_timeout_max_s = 45
|
||||
|
||||
# The maximum number of events to send at once for non-limited legacy
|
||||
# syncs. Has no effect on sliding sync. This parameter also affects how
|
||||
# many messages from each room are sent to the client on initial syncs;
|
||||
# larger values will make initial syncs slower. The default of 10 is
|
||||
# reasonable for most use cases.
|
||||
#
|
||||
#incremental_sync_max_timeline_size = 10
|
||||
|
||||
# Set this to true for continuwuity to compress HTTP response bodies using
|
||||
# zstd. This option does nothing if continuwuity was not built with
|
||||
# `zstd_compression` feature. Please be aware that enabling HTTP
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ use super::{load_timeline, share_encrypted_room};
|
|||
use crate::client::{
|
||||
ignored_filter,
|
||||
sync::v3::{
|
||||
DeviceListUpdates, SyncContext, prepare_lazily_loaded_members,
|
||||
DEFAULT_TIMELINE_LIMIT, DeviceListUpdates, SyncContext, prepare_lazily_loaded_members,
|
||||
state::{build_state_incremental, build_state_initial},
|
||||
},
|
||||
};
|
||||
|
|
@ -63,6 +63,7 @@ pub(super) async fn load_joined_room(
|
|||
last_sync_end_count,
|
||||
current_count,
|
||||
full_state,
|
||||
filter,
|
||||
..
|
||||
} = sync_context;
|
||||
let mut device_list_updates = DeviceListUpdates::new();
|
||||
|
|
@ -92,6 +93,16 @@ pub(super) async fn load_joined_room(
|
|||
try_join(current_shortstatehash, last_sync_end_shortstatehash).await?;
|
||||
|
||||
// load recent timeline events.
|
||||
// if the filter specifies a limit, that will be used, otherwise
|
||||
// `DEFAULT_TIMELINE_LIMIT` will be used. `DEFAULT_TIMELINE_LIMIT` will also be
|
||||
// used if the limit is somehow greater than usize::MAX.
|
||||
|
||||
let timeline_limit = filter
|
||||
.room
|
||||
.timeline
|
||||
.limit
|
||||
.and_then(|limit| limit.try_into().ok())
|
||||
.unwrap_or(DEFAULT_TIMELINE_LIMIT);
|
||||
|
||||
let timeline = load_timeline(
|
||||
services,
|
||||
|
|
@ -99,7 +110,7 @@ pub(super) async fn load_joined_room(
|
|||
room_id,
|
||||
last_sync_end_count.map(PduCount::Normal),
|
||||
Some(PduCount::Normal(current_count)),
|
||||
services.config.incremental_sync_max_timeline_size,
|
||||
timeline_limit,
|
||||
);
|
||||
|
||||
let receipt_events = services
|
||||
|
|
|
|||
|
|
@ -21,7 +21,10 @@ use crate::client::{
|
|||
TimelinePdus, ignored_filter,
|
||||
sync::{
|
||||
load_timeline,
|
||||
v3::{SyncContext, prepare_lazily_loaded_members, state::build_state_initial},
|
||||
v3::{
|
||||
DEFAULT_TIMELINE_LIMIT, SyncContext, prepare_lazily_loaded_members,
|
||||
state::build_state_initial,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
|
|
@ -148,19 +151,29 @@ pub(super) async fn load_left_room(
|
|||
.await?
|
||||
.saturating_sub(1)
|
||||
};
|
||||
|
||||
// end the timeline at the user's leave event
|
||||
let timeline_end_count = services
|
||||
.rooms
|
||||
.timeline
|
||||
.get_pdu_count(leave_pdu.event_id())
|
||||
.await?;
|
||||
|
||||
// limit the timeline using the same logic as for joined rooms
|
||||
let timeline_limit = filter
|
||||
.room
|
||||
.timeline
|
||||
.limit
|
||||
.and_then(|limit| limit.try_into().ok())
|
||||
.unwrap_or(DEFAULT_TIMELINE_LIMIT);
|
||||
|
||||
let timeline = load_timeline(
|
||||
services,
|
||||
syncing_user,
|
||||
room_id,
|
||||
Some(timeline_start_count),
|
||||
Some(timeline_end_count),
|
||||
services.config.incremental_sync_max_timeline_size,
|
||||
timeline_limit,
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
|
|
|||
|
|
@ -52,6 +52,11 @@ use crate::{
|
|||
},
|
||||
};
|
||||
|
||||
/// The default maximum number of events to return in the `timeline` key of
|
||||
/// joined and left rooms. If the number of events sent since the last sync
|
||||
/// exceeds this number, the `timeline` will be `limited`.
|
||||
const DEFAULT_TIMELINE_LIMIT: usize = 30;
|
||||
|
||||
/// A collection of updates to users' device lists, used for E2EE.
|
||||
struct DeviceListUpdates {
|
||||
changed: HashSet<OwnedUserId>,
|
||||
|
|
|
|||
|
|
@ -1384,16 +1384,6 @@ pub struct Config {
|
|||
#[serde(default = "default_typing_client_timeout_max_s")]
|
||||
pub typing_client_timeout_max_s: u64,
|
||||
|
||||
/// The maximum number of events to send at once for non-limited legacy
|
||||
/// syncs. Has no effect on sliding sync. This parameter also affects how
|
||||
/// many messages from each room are sent to the client on initial syncs;
|
||||
/// larger values will make initial syncs slower. The default of 10 is
|
||||
/// reasonable for most use cases.
|
||||
///
|
||||
/// default: 10
|
||||
#[serde(default = "default_incremental_sync_max_timeline_size")]
|
||||
pub incremental_sync_max_timeline_size: usize,
|
||||
|
||||
/// Set this to true for continuwuity to compress HTTP response bodies using
|
||||
/// zstd. This option does nothing if continuwuity was not built with
|
||||
/// `zstd_compression` feature. Please be aware that enabling HTTP
|
||||
|
|
@ -2459,8 +2449,6 @@ fn default_typing_client_timeout_min_s() -> u64 { 15 }
|
|||
|
||||
fn default_typing_client_timeout_max_s() -> u64 { 45 }
|
||||
|
||||
fn default_incremental_sync_max_timeline_size() -> usize { 10 }
|
||||
|
||||
fn default_rocksdb_recovery_mode() -> u8 { 1 }
|
||||
|
||||
fn default_rocksdb_log_level() -> String { "error".to_owned() }
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue