fix(spaces): address 10-agent review findings
Some checks failed
Documentation / Build and Deploy Documentation (pull_request) Has been skipped
Checks / Prek / Pre-commit & Formatting (pull_request) Failing after 4s
Checks / Prek / Clippy and Cargo Tests (pull_request) Failing after 5s
Update flake hashes / update-flake-hashes (pull_request) Failing after 5s

- Make cache fields private, add get_user_roles_in_space and
  get_room_requirements_in_space accessor methods for admin layer
- Add flush_space_from_cache and call it when cascading is disabled
  for a space (prevents stale enforcement data)
- Fix err!(Err("...")) -> err!("...") (redundant variant wrapper)
- Fix variable naming: sender -> server_user in ensure_default_roles
- Fix UFCS turbofish in validate_pl_change to simpler .as_str()
- Import Semaphore instead of inline tokio::sync::Semaphore path
- Add power_level bounds validation in add command (Matrix Int range)
- Add room-is-child-of-space validation in require command
- Handle SPACE_CASCADING_EVENT_TYPE in enforcement dispatch to flush
  cache when a space is disabled
This commit is contained in:
ember33 2026-03-19 19:31:27 +01:00
parent 2fbbf76692
commit 5cc90faa42
2 changed files with 105 additions and 50 deletions

View file

@ -73,9 +73,8 @@ macro_rules! custom_state_pdu {
($event_type:expr, $state_key:expr, $content:expr) => {
PduBuilder {
event_type: $event_type.to_owned().into(),
content: to_raw_value($content).map_err(|e| {
conduwuit::err!(Err("Failed to serialize state event content: {e}"))
})?,
content: to_raw_value($content)
.map_err(|e| conduwuit::err!("Failed to serialize state event content: {e}"))?,
state_key: Some($state_key.to_owned().into()),
..PduBuilder::default()
}
@ -211,6 +210,17 @@ async fn add(
power_level: Option<i64>,
) -> Result {
let space_id = resolve_space!(self, space);
if let Some(pl) = power_level {
if pl > i64::from(ruma::Int::MAX) || pl < i64::from(ruma::Int::MIN) {
return Err!(
"Power level must be between {} and {}.",
ruma::Int::MIN,
ruma::Int::MAX
);
}
}
let roles_event_type = roles_event_type();
let mut content: SpaceRolesEventContent = self
@ -421,6 +431,11 @@ async fn require(
) -> Result {
let space_id = resolve_space!(self, space);
let child_rooms = self.services.rooms.roles.get_child_rooms(&space_id).await;
if !child_rooms.contains(&room_id) {
return Err!("Room {room_id} is not a child of space {space_id}.");
}
let roles_event_type = roles_event_type();
let role_defs: SpaceRolesEventContent = self
.services
@ -495,22 +510,21 @@ async fn unrequire(
async fn user(&self, space: OwnedRoomOrAliasId, user_id: OwnedUserId) -> Result {
let space_id = resolve_space!(self, space);
let user_roles = self.services.rooms.roles.user_roles.read().await;
let roles = user_roles
.get(&space_id)
.and_then(|space_users| space_users.get(&user_id));
let roles = self
.services
.rooms
.roles
.get_user_roles_in_space(&space_id, &user_id)
.await;
match roles {
| Some(roles) if !roles.is_empty() => {
let roles_list: Vec<&String> = roles.iter().collect();
self.write_str(&format!(
"Roles for {user_id} in space {space_id}:\n```\n{}\n```",
roles_list
let list: String = roles
.iter()
.map(|r| format!("- {r}"))
.collect::<Vec<_>>()
.join("\n")
))
.join("\n");
self.write_str(&format!("Roles for {user_id} in space {space_id}:\n```\n{list}\n```"))
.await
},
| _ =>
@ -523,21 +537,22 @@ async fn user(&self, space: OwnedRoomOrAliasId, user_id: OwnedUserId) -> Result
async fn room(&self, space: OwnedRoomOrAliasId, room_id: OwnedRoomId) -> Result {
let space_id = resolve_space!(self, space);
let room_reqs = self.services.rooms.roles.room_requirements.read().await;
let requirements = room_reqs
.get(&space_id)
.and_then(|space_rooms| space_rooms.get(&room_id));
let reqs = self
.services
.rooms
.roles
.get_room_requirements_in_space(&space_id, &room_id)
.await;
match requirements {
match reqs {
| Some(reqs) if !reqs.is_empty() => {
let reqs_list: Vec<&String> = reqs.iter().collect();
self.write_str(&format!(
"Required roles for room {room_id} in space {space_id}:\n```\n{}\n```",
reqs_list
let list: String = reqs
.iter()
.map(|r| format!("- {r}"))
.collect::<Vec<_>>()
.join("\n")
.join("\n");
self.write_str(&format!(
"Required roles for room {room_id} in space {space_id}:\n```\n{list}\n```"
))
.await
},

View file

@ -38,10 +38,25 @@ use ruma::{
room::RoomType,
};
use serde_json::value::to_raw_value;
use tokio::sync::RwLock;
use tokio::sync::{RwLock, Semaphore};
use crate::{Dep, globals, rooms};
#[implement(Service)]
pub async fn flush_space_from_cache(&self, space_id: &RoomId) {
self.roles.write().await.remove(space_id);
self.user_roles.write().await.remove(space_id);
self.room_requirements.write().await.remove(space_id);
{
let mut room_to_space = self.room_to_space.write().await;
room_to_space.retain(|_, parents| {
parents.remove(space_id);
!parents.is_empty()
});
}
self.space_to_rooms.write().await.remove(space_id);
}
#[implement(Service)]
async fn flush_caches(&self) {
self.roles.write().await.clear();
@ -70,19 +85,12 @@ fn cascading_event_type() -> StateEventType {
pub struct Service {
services: Services,
server: Arc<Server>,
/// Space ID -> role name -> role definition
pub roles: RwLock<HashMap<OwnedRoomId, BTreeMap<String, RoleDefinition>>>,
/// Space ID -> user ID -> assigned role names
pub user_roles: RwLock<HashMap<OwnedRoomId, HashMap<OwnedUserId, HashSet<String>>>>,
/// Space ID -> child room ID -> required role names
pub room_requirements: RwLock<HashMap<OwnedRoomId, HashMap<OwnedRoomId, HashSet<String>>>>,
/// Child room ID -> parent space IDs (a room can be in multiple spaces)
pub room_to_space: RwLock<HashMap<OwnedRoomId, HashSet<OwnedRoomId>>>,
/// Space ID -> child room IDs (forward index for fast child lookups)
pub space_to_rooms: RwLock<HashMap<OwnedRoomId, HashSet<OwnedRoomId>>>,
/// Semaphore to limit concurrent enforcement tasks
pub enforcement_semaphore: tokio::sync::Semaphore,
/// Spaces that currently have an enforcement task in progress
roles: RwLock<HashMap<OwnedRoomId, BTreeMap<String, RoleDefinition>>>,
user_roles: RwLock<HashMap<OwnedRoomId, HashMap<OwnedUserId, HashSet<String>>>>,
room_requirements: RwLock<HashMap<OwnedRoomId, HashMap<OwnedRoomId, HashSet<String>>>>,
room_to_space: RwLock<HashMap<OwnedRoomId, HashSet<OwnedRoomId>>>,
space_to_rooms: RwLock<HashMap<OwnedRoomId, HashSet<OwnedRoomId>>>,
enforcement_semaphore: Semaphore,
pending_enforcement: RwLock<HashSet<OwnedRoomId>>,
}
@ -114,7 +122,7 @@ impl crate::Service for Service {
room_requirements: RwLock::new(HashMap::new()),
room_to_space: RwLock::new(HashMap::new()),
space_to_rooms: RwLock::new(HashMap::new()),
enforcement_semaphore: tokio::sync::Semaphore::new(4),
enforcement_semaphore: Semaphore::new(4),
pending_enforcement: RwLock::new(HashSet::new()),
}))
}
@ -205,7 +213,7 @@ pub async fn ensure_default_roles(&self, space_id: &RoomId) -> Result {
return Ok(());
}
let sender = self.services.globals.server_user.as_ref();
let server_user = self.services.globals.server_user.as_ref();
let state_lock = self.services.state.mutex.lock(space_id).await;
let roles_event_type = roles_event_type();
@ -233,16 +241,15 @@ pub async fn ensure_default_roles(&self, space_id: &RoomId) -> Result {
let pdu = PduBuilder {
event_type: ruma::events::TimelineEventType::from(SPACE_ROLES_EVENT_TYPE.to_owned()),
content: to_raw_value(&content).map_err(|e| {
conduwuit::err!(Err("Failed to serialize SpaceRolesEventContent: {e}"))
})?,
content: to_raw_value(&content)
.map_err(|e| conduwuit::err!("Failed to serialize SpaceRolesEventContent: {e}"))?,
state_key: Some(String::new().into()),
..PduBuilder::default()
};
self.services
.timeline
.build_and_append_pdu(pdu, sender, Some(space_id), &state_lock)
.build_and_append_pdu(pdu, server_user, Some(space_id), &state_lock)
.await?;
debug!(space_id = %space_id, event_type = SPACE_ROLES_EVENT_TYPE, "Sent default space roles event");
@ -494,6 +501,34 @@ pub async fn get_child_rooms(&self, space_id: &RoomId) -> Vec<OwnedRoomId> {
.unwrap_or_default()
}
#[implement(Service)]
pub async fn get_user_roles_in_space(
&self,
space_id: &RoomId,
user_id: &UserId,
) -> Option<HashSet<String>> {
self.user_roles
.read()
.await
.get(space_id)?
.get(user_id)
.cloned()
}
#[implement(Service)]
pub async fn get_room_requirements_in_space(
&self,
space_id: &RoomId,
room_id: &RoomId,
) -> Option<HashSet<String>> {
self.room_requirements
.read()
.await
.get(space_id)?
.get(room_id)
.cloned()
}
#[implement(Service)]
pub async fn sync_power_levels(&self, space_id: &RoomId, room_id: &RoomId) -> Result {
if !self.is_enabled_for_space(space_id).await {
@ -723,7 +758,7 @@ pub async fn validate_pl_change(
sender: &UserId,
proposed: &RoomPowerLevelsEventContent,
) -> Result {
if sender == <OwnedUserId as AsRef<UserId>>::as_ref(&self.services.globals.server_user) {
if sender == self.services.globals.server_user.as_str() {
return Ok(());
}
@ -911,6 +946,11 @@ impl Service {
}
}
},
| SPACE_CASCADING_EVENT_TYPE => {
if !this.is_enabled_for_space(&space_id).await {
this.flush_space_from_cache(&space_id).await;
}
},
| _ => {},
}
}