refactor(spaces): extract hooks into service methods, minimize touchpoints
Some checks failed
Documentation / Build and Deploy Documentation (pull_request) Has been skipped
Checks / Prek / Pre-commit & Formatting (pull_request) Failing after 5s
Checks / Prek / Clippy and Cargo Tests (pull_request) Failing after 5s
Update flake hashes / update-flake-hashes (pull_request) Failing after 5s

Move all space roles logic out of append.rs, build.rs, and join.rs
into service methods (on_pdu_appended, validate_pl_change,
check_join_allowed). Existing files now have single-line call sites
instead of inline logic. Extract flush_caches helper to deduplicate
cache clearing.
This commit is contained in:
ember33 2026-03-19 18:32:05 +01:00
parent 07c38c3339
commit 5740f72b4d
4 changed files with 179 additions and 159 deletions

View file

@ -347,30 +347,11 @@ pub async fn join_room_by_id_helper(
}
}
let parent_spaces = if services.rooms.roles.is_enabled() {
services.rooms.roles.get_parent_spaces(room_id).await
} else {
Vec::new()
};
if !parent_spaces.is_empty() {
let mut qualifies_in_any = false;
for parent_space in &parent_spaces {
if services
.rooms
.roles
.user_qualifies_for_room(parent_space, room_id, sender_user)
.await
{
qualifies_in_any = true;
break;
}
}
if !qualifies_in_any {
return Err!(Request(Forbidden(
"You do not have the required Space roles to join this room"
)));
}
}
services
.rooms
.roles
.check_join_allowed(room_id, sender_user)
.await?;
if server_in_room {
join_room_by_id_helper_local(services, sender_user, room_id, reason, servers, state_lock)

View file

@ -9,7 +9,9 @@ use std::{
use async_trait::async_trait;
use conduwuit::{
Event, Result, Server, debug, debug_warn, implement, info, matrix::pdu::PduBuilder, warn,
Err, Event, Result, Server, debug, debug_warn, implement, info,
matrix::pdu::{PduBuilder, PduEvent},
warn,
};
use conduwuit_core::{
matrix::space_roles::{
@ -40,6 +42,15 @@ use tokio::sync::RwLock;
use crate::{Dep, globals, rooms};
#[implement(Service)]
async fn flush_caches(&self) {
self.roles.write().await.clear();
self.user_roles.write().await.clear();
self.room_requirements.write().await.clear();
self.room_to_space.write().await.clear();
self.space_to_rooms.write().await.clear();
}
fn roles_event_type() -> StateEventType {
StateEventType::from(SPACE_ROLES_EVENT_TYPE.to_owned())
}
@ -132,12 +143,7 @@ impl crate::Service for Service {
if !self.is_enabled() {
return;
}
self.roles.write().await.clear();
self.user_roles.write().await.clear();
self.room_requirements.write().await.clear();
self.room_to_space.write().await.clear();
self.space_to_rooms.write().await.clear();
self.flush_caches().await;
}
async fn worker(self: Arc<Self>) -> Result<()> {
@ -254,11 +260,7 @@ pub async fn populate_space(&self, space_id: &RoomId) {
>= usize::try_from(self.server.config.space_roles_cache_flush_threshold)
.unwrap_or(usize::MAX)
{
self.roles.write().await.clear();
self.user_roles.write().await.clear();
self.room_requirements.write().await.clear();
self.room_to_space.write().await.clear();
self.space_to_rooms.write().await.clear();
self.flush_caches().await;
debug_warn!("Space roles cache exceeded capacity, cleared");
}
@ -669,6 +671,154 @@ async fn invite_and_join_user(
Ok(())
}
/// Called from append_pdu after a PDU is persisted. Dispatches to the
/// appropriate handler based on event type.
#[implement(Service)]
pub fn on_pdu_appended(self: &Arc<Self>, room_id: &RoomId, pdu: &PduEvent) {
if let Some(state_key) = pdu.state_key() {
let event_type_str = pdu.event_type().to_string();
match event_type_str.as_str() {
| SPACE_ROLES_EVENT_TYPE
| SPACE_ROLE_MEMBER_EVENT_TYPE
| SPACE_ROLE_ROOM_EVENT_TYPE
| SPACE_CASCADING_EVENT_TYPE => {
self.handle_state_event_change(
room_id.to_owned(),
event_type_str,
state_key.to_owned(),
);
},
| _ => {
if *pdu.kind() == ruma::events::TimelineEventType::SpaceChild {
if let Ok(child_room_id) = RoomId::parse(&*state_key) {
self.handle_space_child_change(
room_id.to_owned(),
child_room_id.to_owned(),
);
}
}
if *pdu.kind() == ruma::events::TimelineEventType::RoomMember {
if let Ok(content) = pdu.get_content::<RoomMemberEventContent>() {
if content.membership == MembershipState::Join {
if let Ok(user_id) = UserId::parse(&*state_key) {
self.handle_space_member_join(
room_id.to_owned(),
user_id.to_owned(),
);
}
}
}
}
},
}
}
}
/// Called from build_and_append_pdu to validate PL changes don't conflict
/// with space-granted power levels.
#[implement(Service)]
pub async fn validate_pl_change(
&self,
room_id: &RoomId,
sender: &UserId,
proposed: &RoomPowerLevelsEventContent,
) -> Result {
if sender == <OwnedUserId as AsRef<UserId>>::as_ref(&self.services.globals.server_user) {
return Ok(());
}
let parent_spaces = self.get_parent_spaces(room_id).await;
if parent_spaces.is_empty() {
return Ok(());
}
let space_data: Vec<(Vec<(OwnedUserId, HashSet<String>)>, BTreeMap<String, RoleDefinition>)> = {
let user_roles_guard = self.user_roles.read().await;
let roles_guard = self.roles.read().await;
parent_spaces
.iter()
.filter_map(|ps| {
let space_users = user_roles_guard.get(ps)?;
let role_defs = roles_guard.get(ps)?;
Some((
space_users
.iter()
.map(|(u, r)| (u.clone(), r.clone()))
.collect(),
role_defs.clone(),
))
})
.collect()
};
for (space_users, role_defs) in &space_data {
for (user_id, assigned_roles) in space_users {
if !self.services.state_cache.is_joined(user_id, room_id).await {
continue;
}
let space_pl = assigned_roles
.iter()
.filter_map(|r| role_defs.get(r)?.power_level)
.max();
if let Some(space_pl) = space_pl {
match proposed.users.get(user_id) {
| None if i64::from(proposed.users_default) != space_pl => {
debug_warn!(
user_id = %user_id,
room_id = %room_id,
space_pl,
"Rejecting PL change: space-managed user omitted"
);
return Err!(Request(Forbidden(
"Cannot omit a user whose power level is managed by Space roles"
)));
},
| Some(pl) if i64::from(*pl) != space_pl => {
debug_warn!(
user_id = %user_id,
room_id = %room_id,
proposed_pl = i64::from(*pl),
space_pl,
"Rejecting PL change conflicting with space role"
);
return Err!(Request(Forbidden(
"Cannot change power level that is set by Space roles"
)));
},
| _ => {},
}
}
}
}
Ok(())
}
/// Called from join_room_by_id_helper to check if a user has the required
/// Space roles to join a room.
#[implement(Service)]
pub async fn check_join_allowed(&self, room_id: &RoomId, user_id: &UserId) -> Result {
if !self.is_enabled() {
return Ok(());
}
let parent_spaces = self.get_parent_spaces(room_id).await;
if parent_spaces.is_empty() {
return Ok(());
}
for parent_space in &parent_spaces {
if self
.user_qualifies_for_room(parent_space, room_id, user_id)
.await
{
return Ok(());
}
}
Err!(Request(Forbidden("You do not have the required Space roles to join this room")))
}
#[implement(Service)]
async fn sync_power_levels_for_children(&self, space_id: &RoomId) {
let child_rooms = self.get_child_rooms(space_id).await;

View file

@ -9,10 +9,6 @@ use conduwuit_core::{
matrix::{
event::Event,
pdu::{PduCount, PduEvent, PduId, RawPduId},
space_roles::{
SPACE_CASCADING_EVENT_TYPE, SPACE_ROLE_MEMBER_EVENT_TYPE, SPACE_ROLE_ROOM_EVENT_TYPE,
SPACE_ROLES_EVENT_TYPE,
},
},
utils::{self, ReadyExt},
};
@ -363,47 +359,7 @@ where
| _ => {},
}
let roles: Arc<crate::rooms::roles::Service> = Arc::clone(&*self.services.roles);
if let Some(state_key) = pdu.state_key() {
let event_type_str = pdu.event_type().to_string();
match event_type_str.as_str() {
| SPACE_ROLES_EVENT_TYPE
| SPACE_ROLE_MEMBER_EVENT_TYPE
| SPACE_ROLE_ROOM_EVENT_TYPE
| SPACE_CASCADING_EVENT_TYPE => {
if matches!(
self.services.state_accessor.get_room_type(room_id).await,
Ok(ruma::room::RoomType::Space)
) {
roles.handle_state_event_change(
room_id.to_owned(),
event_type_str,
state_key.to_owned(),
);
}
},
| _ => {},
}
}
if let Some(state_key) = pdu.state_key() {
if *pdu.kind() == TimelineEventType::SpaceChild {
if let Ok(child_room_id) = ruma::RoomId::parse(state_key) {
roles.handle_space_child_change(room_id.to_owned(), child_room_id.to_owned());
}
}
}
if *pdu.kind() == TimelineEventType::RoomMember
&& let Some(state_key) = pdu.state_key()
&& let Ok(content) =
pdu.get_content::<ruma::events::room::member::RoomMemberEventContent>()
&& content.membership == ruma::events::room::member::MembershipState::Join
&& let Ok(user_id) = UserId::parse(state_key)
&& matches!(
self.services.state_accessor.get_room_type(room_id).await,
Ok(ruma::room::RoomType::Space)
) {
roles.handle_space_member_join(room_id.to_owned(), user_id.to_owned());
}
self.services.roles.on_pdu_appended(room_id, &pdu);
// CONCERN: If we receive events with a relation out-of-order, we never write
// their relation / thread. We need some kind of way to trigger when we receive

View file

@ -1,17 +1,14 @@
use std::{
collections::{BTreeMap, HashSet},
iter::once,
};
use std::{collections::HashSet, iter::once};
use conduwuit::{debug_warn, trace};
use conduwuit::trace;
use conduwuit_core::{
Err, Result, implement,
matrix::{event::Event, pdu::PduBuilder, space_roles::RoleDefinition},
matrix::{event::Event, pdu::PduBuilder},
utils::{IterStream, ReadyExt},
};
use futures::{FutureExt, StreamExt};
use ruma::{
OwnedEventId, OwnedServerName, OwnedUserId, RoomId, RoomVersionId, UserId,
OwnedEventId, OwnedServerName, RoomId, RoomVersionId, UserId,
events::{
TimelineEventType,
room::{
@ -100,78 +97,14 @@ pub async fn build_and_append_pdu(
)));
}
}
type SpaceEnforcementData =
(Vec<(OwnedUserId, HashSet<String>)>, BTreeMap<String, RoleDefinition>);
if *pdu.kind() == TimelineEventType::RoomPowerLevels
&& pdu.sender()
!= <OwnedUserId as AsRef<UserId>>::as_ref(&self.services.globals.server_user)
{
use ruma::events::room::power_levels::RoomPowerLevelsEventContent;
let parent_spaces = self.services.roles.get_parent_spaces(&room_id).await;
if !parent_spaces.is_empty()
&& let Ok(proposed) = pdu.get_content::<RoomPowerLevelsEventContent>()
if *pdu.kind() == TimelineEventType::RoomPowerLevels {
if let Ok(proposed) =
pdu.get_content::<ruma::events::room::power_levels::RoomPowerLevelsEventContent>()
{
let space_data: Vec<SpaceEnforcementData> = {
let user_roles_guard = self.services.roles.user_roles.read().await;
let roles_guard = self.services.roles.roles.read().await;
parent_spaces
.iter()
.filter_map(|ps| {
let space_users = user_roles_guard.get(ps)?;
let role_defs = roles_guard.get(ps)?;
Some((
space_users
.iter()
.map(|(u, r)| (u.clone(), r.clone()))
.collect(),
role_defs.clone(),
))
})
.collect()
};
for (space_users, role_defs) in &space_data {
for (user_id, assigned_roles) in space_users {
if !self.services.state_cache.is_joined(user_id, &room_id).await {
continue;
}
let space_pl = assigned_roles
.iter()
.filter_map(|r| role_defs.get(r)?.power_level)
.max();
if let Some(space_pl) = space_pl {
match proposed.users.get(user_id) {
| None if i64::from(proposed.users_default) != space_pl => {
debug_warn!(
user_id = %user_id,
room_id = %room_id,
space_pl,
"Rejecting PL change: space-managed user omitted"
);
return Err!(Request(Forbidden(
"Cannot omit a user whose power level is managed by Space \
roles"
)));
},
| Some(pl) if i64::from(*pl) != space_pl => {
debug_warn!(
user_id = %user_id,
room_id = %room_id,
proposed_pl = i64::from(*pl),
space_pl,
"Rejecting PL change conflicting with space role"
);
return Err!(Request(Forbidden(
"Cannot change power level that is set by Space roles"
)));
},
| _ => {},
}
}
}
}
self.services
.roles
.validate_pl_change(&room_id, pdu.sender(), &proposed)
.await?;
}
}