diff --git a/changelog.d/+space-permission-cascading.feature.md b/changelog.d/+space-permission-cascading.feature.md new file mode 100644 index 00000000..4f25977d --- /dev/null +++ b/changelog.d/+space-permission-cascading.feature.md @@ -0,0 +1 @@ +Add Space permission cascading: power levels cascade from Spaces to child rooms, role-based room access with custom roles, continuous enforcement (auto-join/kick), and admin commands for role management. Server-wide default controlled by `space_permission_cascading` config flag (off by default), with per-Space overrides via `!admin space roles enable/disable `. diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 82b78fde..f0272532 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -470,6 +470,18 @@ # #suspend_on_register = false +# Server-wide default for space permission cascading (power levels and +# role-based access). Individual Spaces can override this via the +# `com.continuwuity.space.cascading` state event or the admin command +# `!admin space roles enable/disable `. +# +#space_permission_cascading = false + +# Maximum number of spaces to cache role data for. When exceeded the +# cache is cleared and repopulated on demand. +# +#space_roles_cache_flush_threshold = 1000 + # Enabling this setting opens registration to anyone without restrictions. # This makes your server vulnerable to abuse # diff --git a/docs/plans/2026-03-17-space-permission-cascading-design.md b/docs/plans/2026-03-17-space-permission-cascading-design.md new file mode 100644 index 00000000..60d3e967 --- /dev/null +++ b/docs/plans/2026-03-17-space-permission-cascading-design.md @@ -0,0 +1,226 @@ +# Space Permission Cascading — Design Document + +**Date:** 2026-03-17 +**Status:** Approved + +## Overview + +Server-side feature that allows user rights in a Space to cascade down to its +direct child rooms. Includes power level cascading and role-based room access +control. Enabled via a server-wide configuration flag, disabled by default. + +## Requirements + +1. Power levels defined in a Space cascade to all direct child rooms (Space + always wins over per-room overrides). +2. Admins can define custom roles in a Space and assign them to users. +3. Child rooms can require one or more roles for access. +4. Enforcement is continuous — role revocation auto-kicks users from rooms they + no longer qualify for. +5. Users are auto-joined to all qualifying child rooms when they join a Space or + receive a new role. +6. Cascading applies to direct parent Space only; no nested cascade through + sub-spaces. +7. Feature is toggled by a single server-wide config flag + (`space_permission_cascading`), off by default. + +## Configuration + +```toml +# conduwuit-example.toml + +# Enable space permission cascading (power levels and role-based access). +# When enabled, power levels cascade from Spaces to child rooms and rooms +# can require roles for access. Applies to all Spaces on this server. +# Default: false +space_permission_cascading = false +``` + +## Custom State Events + +All events live in the Space room. + +### `m.space.roles` (state key: `""`) + +Defines the available roles for the Space. Two default roles (`admin` and `mod`) +are created automatically when a Space is first encountered with the feature +enabled. + +```json +{ + "roles": { + "admin": { + "description": "Space administrator", + "power_level": 100 + }, + "mod": { + "description": "Space moderator", + "power_level": 50 + }, + "nsfw": { + "description": "Access to NSFW content" + }, + "vip": { + "description": "VIP member" + } + } +} +``` + +- `description` (string, required): Human-readable description. +- `power_level` (integer, optional): If present, users with this role receive + this power level in all child rooms. When a user holds multiple roles with + power levels, the highest value wins. + +### `m.space.role.member` (state key: user ID) + +Assigns roles to a user within the Space. + +```json +{ + "roles": ["nsfw", "vip"] +} +``` + +### `m.space.role.room` (state key: room ID) + +Declares which roles a child room requires. A user must hold **all** listed +roles to access the room. + +```json +{ + "required_roles": ["nsfw"] +} +``` + +## Enforcement Rules + +All enforcement is skipped when `space_permission_cascading = false`. + +### 1. Join gating + +When a user attempts to join a room that is a direct child of a Space: + +- Look up the room's `m.space.role.room` event in the parent Space. +- If the room has `required_roles`, check the user's `m.space.role.member`. +- Reject the join if the user is missing any required role. + +### 2. Power level override + +For every user in a child room of a Space: + +- Look up their roles via `m.space.role.member` in the parent Space. +- For each role that has a `power_level`, take the highest value. +- Override the user's power level in the child room's `m.room.power_levels`. +- Reject attempts to manually set per-room power levels that conflict with + Space-granted levels. + +### 3. Role revocation + +When an `m.space.role.member` event is updated and a role is removed: + +- Identify all child rooms that require the removed role. +- Auto-kick the user from rooms they no longer qualify for. +- Recalculate and update the user's power level in all child rooms. + +### 4. Room requirement change + +When an `m.space.role.room` event is updated with new requirements: + +- Check all current members of the room. +- Auto-kick members who do not hold all newly required roles. + +### 5. Auto-join on role grant + +When an `m.space.role.member` event is updated and a role is added: + +- Find all child rooms where the user now meets all required roles. +- Auto-join the user to qualifying rooms they are not already in. + +This also applies when a user first joins the Space — they are auto-joined to +all child rooms they qualify for. Rooms with no role requirements auto-join all +Space members. + +### 6. New child room + +When a new `m.space.child` event is added to a Space: + +- Auto-join all qualifying Space members to the new child room. + +## Caching & Indexing + +The source of truth is always the state events. The server maintains an +in-memory index for fast enforcement lookups, following the same patterns as the +existing `roomid_spacehierarchy_cache`. + +### Index structures + +| Index | Source event | +|------------------------------|------------------------| +| Space → roles defined | `m.space.roles` | +| Space → user → roles | `m.space.role.member` | +| Space → room → required roles| `m.space.role.room` | +| Room → parent Space | `m.space.child` (reverse lookup) | + +The Space → child rooms mapping already exists. + +### Cache invalidation triggers + +| Event changed | Action | +|----------------------------|-----------------------------------------------------| +| `m.space.roles` | Refresh role definitions, revalidate all members | +| `m.space.role.member` | Refresh user's roles, trigger auto-join/kick | +| `m.space.role.room` | Refresh room requirements, trigger auto-join/kick | +| `m.space.child` added | Index new child, auto-join qualifying members | +| `m.space.child` removed | Remove from index (no auto-kick) | +| Server startup | Full rebuild from state events | + +## Admin Room Commands + +Roles are managed via the existing admin room interface, which sends the +appropriate state events under the hood and triggers enforcement. + +``` +!admin space roles list +!admin space roles add [description] [power_level] +!admin space roles remove +!admin space roles assign +!admin space roles revoke +!admin space roles require +!admin space roles unrequire +!admin space roles user +!admin space roles room +``` + +## Architecture + +**Approach:** Hybrid — state events for definition, database cache for +enforcement. + +- State events are the source of truth and federate normally. +- The server maintains an in-memory cache/index for fast enforcement. +- Cache is invalidated on relevant state event changes and fully rebuilt on + startup. +- All enforcement hooks (join gating, PL override, auto-join, auto-kick) check + the feature flag first and no-op when disabled. +- Existing clients can manage roles via Developer Tools (custom state events). + The admin room commands provide a user-friendly interface. + +## Scope + +### In scope + +- Server-wide feature flag +- Custom state events for role definition, assignment, and room requirements +- Power level cascading (Space always wins) +- Continuous enforcement (auto-join, auto-kick) +- Admin room commands +- In-memory caching with invalidation +- Default `admin` (PL 100) and `mod` (PL 50) roles + +### Out of scope + +- Client-side UI for role management +- Nested cascade through sub-spaces +- Per-space opt-in/opt-out (it is server-wide) +- Federation-specific logic beyond normal state event replication diff --git a/docs/plans/2026-03-17-space-permission-cascading.md b/docs/plans/2026-03-17-space-permission-cascading.md new file mode 100644 index 00000000..75ae0a9e --- /dev/null +++ b/docs/plans/2026-03-17-space-permission-cascading.md @@ -0,0 +1,1206 @@ +# Space Permission Cascading Implementation Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Implement server-side Space permission cascading — power levels and role-based access flow from Spaces to their direct child rooms. + +**Architecture:** Custom state events (`m.space.roles`, `m.space.role.member`, `m.space.role.room`) define roles in Space rooms. An in-memory cache indexes these for fast enforcement. The server intercepts joins, membership changes, and state event updates to enforce cascading. A server-wide config flag (`space_permission_cascading`) gates the entire feature. + +**Tech Stack:** Rust, ruma (Matrix types), conduwuit service layer, clap (admin commands), serde, LruCache/HashMap, tokio async + +**Design doc:** `docs/plans/2026-03-17-space-permission-cascading-design.md` + +--- + +### Task 1: Add Config Flag + +**Files:** +- Modify: `src/core/config/mod.rs` (add field to Config struct, near line 604) + +**Step 1: Add the config field** + +Add after the `suspend_on_register` field (around line 604) in the `Config` struct: + +```rust +/// Enable space permission cascading (power levels and role-based access). +/// When enabled, power levels cascade from Spaces to child rooms and rooms +/// can require roles for access. Applies to all Spaces on this server. +/// +/// default: false +#[serde(default)] +pub space_permission_cascading: bool, +``` + +**Step 2: Verify build** + +Run: `cargo check -p conduwuit-core 2>&1 | tail -20` +Expected: Compiles successfully. The `conduwuit-example.toml` is auto-generated from doc comments by the `#[config_example_generator]` macro. + +**Step 3: Commit** + +```bash +git add src/core/config/mod.rs +git commit -m "feat(spaces): add space_permission_cascading config flag" +``` + +--- + +### Task 2: Define Custom State Event Content Types + +**Files:** +- Create: `src/core/matrix/space_roles.rs` +- Modify: `src/core/matrix/mod.rs` (add module declaration) + +**Step 1: Create the event content types** + +Create `src/core/matrix/space_roles.rs` with serde types for the three custom state events: + +```rust +//! Custom state event content types for space permission cascading. +//! +//! These events live in Space rooms and define roles, user-role assignments, +//! and room-role requirements. + +use std::collections::BTreeMap; + +use serde::{Deserialize, Serialize}; + +/// Content for `m.space.roles` (state key: "") +/// +/// Defines available roles for a Space. +#[derive(Clone, Debug, Default, Deserialize, Serialize)] +pub struct SpaceRolesEventContent { + pub roles: BTreeMap, +} + +/// A single role definition within a Space. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct RoleDefinition { + pub description: String, + + /// If present, users with this role receive this power level in child + /// rooms. + #[serde(skip_serializing_if = "Option::is_none")] + pub power_level: Option, +} + +/// Content for `m.space.role.member` (state key: user ID) +/// +/// Assigns roles to a user within a Space. +#[derive(Clone, Debug, Default, Deserialize, Serialize)] +pub struct SpaceRoleMemberEventContent { + pub roles: Vec, +} + +/// Content for `m.space.role.room` (state key: room ID) +/// +/// Declares which roles a child room requires for access. +#[derive(Clone, Debug, Default, Deserialize, Serialize)] +pub struct SpaceRoleRoomEventContent { + pub required_roles: Vec, +} +``` + +**Step 2: Register the module** + +In `src/core/matrix/mod.rs`, add: + +```rust +pub mod space_roles; +``` + +**Step 3: Write tests for serde round-tripping** + +Add to the bottom of `src/core/matrix/space_roles.rs`: + +```rust +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn serialize_space_roles() { + let mut roles = BTreeMap::new(); + roles.insert( + "admin".to_owned(), + RoleDefinition { + description: "Space administrator".to_owned(), + power_level: Some(100), + }, + ); + roles.insert( + "nsfw".to_owned(), + RoleDefinition { + description: "NSFW access".to_owned(), + power_level: None, + }, + ); + let content = SpaceRolesEventContent { roles }; + let json = serde_json::to_string(&content).unwrap(); + let deserialized: SpaceRolesEventContent = serde_json::from_str(&json).unwrap(); + assert_eq!(deserialized.roles.len(), 2); + assert_eq!(deserialized.roles["admin"].power_level, Some(100)); + assert!(deserialized.roles["nsfw"].power_level.is_none()); + } + + #[test] + fn serialize_role_member() { + let content = SpaceRoleMemberEventContent { + roles: vec!["nsfw".to_owned(), "vip".to_owned()], + }; + let json = serde_json::to_string(&content).unwrap(); + let deserialized: SpaceRoleMemberEventContent = serde_json::from_str(&json).unwrap(); + assert_eq!(deserialized.roles, vec!["nsfw", "vip"]); + } + + #[test] + fn serialize_role_room() { + let content = SpaceRoleRoomEventContent { + required_roles: vec!["nsfw".to_owned()], + }; + let json = serde_json::to_string(&content).unwrap(); + let deserialized: SpaceRoleRoomEventContent = serde_json::from_str(&json).unwrap(); + assert_eq!(deserialized.required_roles, vec!["nsfw"]); + } + + #[test] + fn empty_roles_deserialize() { + let json = r#"{"roles":{}}"#; + let content: SpaceRolesEventContent = serde_json::from_str(json).unwrap(); + assert!(content.roles.is_empty()); + } +} +``` + +**Step 4: Run tests** + +Run: `cargo test -p conduwuit-core space_roles 2>&1 | tail -20` +Expected: All 4 tests pass. + +**Step 5: Commit** + +```bash +git add src/core/matrix/space_roles.rs src/core/matrix/mod.rs +git commit -m "feat(spaces): add custom state event types for space roles" +``` + +--- + +### Task 3: Create the Space Roles Service + +**Files:** +- Create: `src/service/rooms/roles/mod.rs` +- Modify: `src/service/rooms/mod.rs` (add module) +- Modify: `src/service/service.rs` or equivalent service registry (register new service) + +This is the core service that manages the in-memory cache and provides lookup methods. + +**Step 1: Create the service skeleton** + +Create `src/service/rooms/roles/mod.rs`: + +```rust +//! Space permission cascading service. +//! +//! Maintains an in-memory index of space roles, user-role assignments, and +//! room-role requirements. Source of truth is always the state events in the +//! Space room. + +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + fmt::Write, + sync::Arc, +}; + +use async_trait::async_trait; +use conduwuit_core::{ + Result, implement, + matrix::space_roles::{ + SpaceRoleRoomEventContent, SpaceRoleMemberEventContent, SpaceRolesEventContent, + RoleDefinition, + }, +}; +use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; +use tokio::sync::RwLock; + +use crate::{Dep, rooms}; + +pub struct Service { + services: Services, + /// Space ID -> role definitions + pub roles: RwLock>>, + /// Space ID -> user ID -> assigned roles + pub user_roles: RwLock>>>, + /// Space ID -> child room ID -> required roles + pub room_requirements: RwLock>>>, + /// Child room ID -> parent Space ID (reverse lookup) + pub room_to_space: RwLock>, +} + +struct Services { + state_accessor: Dep, + state_cache: Dep, + state: Dep, + spaces: Dep, + timeline: Dep, + server: Arc, +} + +#[async_trait] +impl crate::Service for Service { + fn build(args: crate::Args<'_>) -> Result> { + Ok(Arc::new(Self { + services: Services { + state_accessor: args + .depend::("rooms::state_accessor"), + state_cache: args.depend::("rooms::state_cache"), + state: args.depend::("rooms::state"), + spaces: args.depend::("rooms::spaces"), + timeline: args.depend::("rooms::timeline"), + server: args.server.clone(), + }, + roles: RwLock::new(HashMap::new()), + user_roles: RwLock::new(HashMap::new()), + room_requirements: RwLock::new(HashMap::new()), + room_to_space: RwLock::new(HashMap::new()), + })) + } + + async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { + let roles = self.roles.read().await.len(); + let user_roles = self.user_roles.read().await.len(); + let room_requirements = self.room_requirements.read().await.len(); + let room_to_space = self.room_to_space.read().await.len(); + + writeln!(out, "space_roles_definitions: {roles}")?; + writeln!(out, "space_user_roles: {user_roles}")?; + writeln!(out, "space_room_requirements: {room_requirements}")?; + writeln!(out, "space_room_to_space_index: {room_to_space}")?; + Ok(()) + } + + async fn clear_cache(&self) { + self.roles.write().await.clear(); + self.user_roles.write().await.clear(); + self.room_requirements.write().await.clear(); + self.room_to_space.write().await.clear(); + } + + fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } +} +``` + +**Step 2: Register the module in rooms** + +In `src/service/rooms/mod.rs`, add: + +```rust +pub mod roles; +``` + +**Step 3: Register the service in the service registry** + +Find where services are registered (likely in `src/service/services.rs` or similar) and add the `rooms::roles::Service` following the same pattern as other room services. This requires reading the exact registration pattern used. + +**Step 4: Verify build** + +Run: `cargo check -p conduwuit-service 2>&1 | tail -20` +Expected: Compiles successfully. + +**Step 5: Commit** + +```bash +git add src/service/rooms/roles/ src/service/rooms/mod.rs +git commit -m "feat(spaces): add space roles service skeleton with cache structures" +``` + +--- + +### Task 4: Implement Cache Population and Lookup Methods + +**Files:** +- Modify: `src/service/rooms/roles/mod.rs` + +**Step 1: Add the `is_enabled()` check** + +```rust +#[implement(Service)] +pub fn is_enabled(&self) -> bool { + self.services.server.config.space_permission_cascading +} +``` + +**Step 2: Add cache population from state events** + +```rust +/// Rebuild the cache for a single Space by reading its state events. +#[implement(Service)] +pub async fn populate_space(&self, space_id: &RoomId) -> Result { + if !self.is_enabled() { + return Ok(()); + } + + // Load role definitions from m.space.roles + let roles_content: Option = self + .services + .state_accessor + .room_state_get_content(space_id, &StateEventType::from("m.space.roles"), "") + .await + .ok(); + + if let Some(content) = roles_content { + self.roles + .write() + .await + .insert(space_id.to_owned(), content.roles); + } + + // Load user role assignments from m.space.role.member state events + // Iterate all state events of type m.space.role.member + // Load room requirements from m.space.role.room state events + // Build room_to_space reverse index from m.space.child events + + Ok(()) +} +``` + +**Step 3: Add lookup methods** + +```rust +/// Get a user's effective power level from Space roles. +/// Returns None if user has no roles with power levels. +#[implement(Service)] +pub async fn get_user_power_level( + &self, + space_id: &RoomId, + user_id: &UserId, +) -> Option { + let roles_map = self.roles.read().await; + let user_roles_map = self.user_roles.read().await; + + let role_defs = roles_map.get(space_id)?; + let user_assigned = user_roles_map.get(space_id)?.get(user_id)?; + + user_assigned + .iter() + .filter_map(|role_name| role_defs.get(role_name)?.power_level) + .max() +} + +/// Check if a user has all required roles for a room. +#[implement(Service)] +pub async fn user_qualifies_for_room( + &self, + space_id: &RoomId, + room_id: &RoomId, + user_id: &UserId, +) -> bool { + let reqs = self.room_requirements.read().await; + let Some(space_reqs) = reqs.get(space_id) else { + return true; // no requirements tracked for this space + }; + let Some(required) = space_reqs.get(room_id) else { + return true; // room has no role requirements + }; + if required.is_empty() { + return true; + } + + let user_map = self.user_roles.read().await; + let Some(space_users) = user_map.get(space_id) else { + return false; + }; + let Some(user_assigned) = space_users.get(user_id) else { + return false; + }; + + required.iter().all(|r| user_assigned.contains(r)) +} + +/// Get the parent Space of a child room, if any. +#[implement(Service)] +pub async fn get_parent_space(&self, room_id: &RoomId) -> Option { + self.room_to_space.read().await.get(room_id).cloned() +} +``` + +**Step 4: Verify build** + +Run: `cargo check -p conduwuit-service 2>&1 | tail -20` +Expected: Compiles successfully. + +**Step 5: Commit** + +```bash +git add src/service/rooms/roles/mod.rs +git commit -m "feat(spaces): add cache population and lookup methods for space roles" +``` + +--- + +### Task 5: Implement Default Roles Initialization + +**Files:** +- Modify: `src/service/rooms/roles/mod.rs` + +**Step 1: Add default role creation** + +```rust +/// Ensure a Space has the default admin/mod roles. Sends an m.space.roles +/// state event if none exists. +#[implement(Service)] +pub async fn ensure_default_roles(&self, space_id: &RoomId) -> Result { + if !self.is_enabled() { + return Ok(()); + } + + // Check if m.space.roles already exists + let existing: Result = self + .services + .state_accessor + .room_state_get_content(space_id, &StateEventType::from("m.space.roles"), "") + .await; + + if existing.is_ok() { + return Ok(()); + } + + // Create default roles + let mut roles = BTreeMap::new(); + roles.insert( + "admin".to_owned(), + RoleDefinition { + description: "Space administrator".to_owned(), + power_level: Some(100), + }, + ); + roles.insert( + "mod".to_owned(), + RoleDefinition { + description: "Space moderator".to_owned(), + power_level: Some(50), + }, + ); + + let content = SpaceRolesEventContent { roles }; + + // Send the state event as the server user + // This requires finding or creating a suitable sender + // Use the server's service user or the space creator + + Ok(()) +} +``` + +**Step 2: Verify build** + +Run: `cargo check -p conduwuit-service 2>&1 | tail -20` +Expected: Compiles. + +**Step 3: Commit** + +```bash +git add src/service/rooms/roles/mod.rs +git commit -m "feat(spaces): add default admin/mod role initialization" +``` + +--- + +### Task 6: Implement Join Gating + +**Files:** +- Modify: `src/api/client/membership/join.rs` (add role check before join) + +**Step 1: Add role-based join check** + +In `join_room_by_id_helper()` or equivalent join path, after existing authorization checks and before the actual join, add: + +```rust +// Space permission cascading: check if user has required roles +if services.rooms.roles.is_enabled() { + if let Some(parent_space) = services.rooms.roles.get_parent_space(&room_id).await { + if !services + .rooms + .roles + .user_qualifies_for_room(&parent_space, &room_id, sender_user) + .await + { + return Err!(Request(Forbidden( + "You do not have the required Space roles to join this room" + ))); + } + } +} +``` + +**Step 2: Verify build** + +Run: `cargo check -p conduwuit-api 2>&1 | tail -20` +Expected: Compiles. + +**Step 3: Commit** + +```bash +git add src/api/client/membership/join.rs +git commit -m "feat(spaces): add role-based join gating for space child rooms" +``` + +--- + +### Task 7: Implement Power Level Override + +**Files:** +- Modify: `src/service/rooms/roles/mod.rs` (add PL sync method) + +**Step 1: Add power level synchronization method** + +```rust +/// Synchronize power levels in a child room based on Space roles. +/// This overrides per-room power levels with Space-granted levels. +#[implement(Service)] +pub async fn sync_power_levels(&self, space_id: &RoomId, room_id: &RoomId) -> Result { + if !self.is_enabled() { + return Ok(()); + } + + let state_lock = self.services.state.mutex.lock(room_id).await; + + // Get current power levels for the room + let mut power_levels: RoomPowerLevelsEventContent = self + .services + .state_accessor + .room_state_get_content(room_id, &StateEventType::RoomPowerLevels, "") + .await + .unwrap_or_default(); + + let mut changed = false; + + // Get all members of the room + let members: Vec<_> = self + .services + .state_cache + .room_members(room_id) + .collect() + .await; + + for user_id in &members { + if let Some(pl) = self.get_user_power_level(space_id, user_id).await { + let current = power_levels + .users + .get(user_id) + .copied() + .unwrap_or(power_levels.users_default); + + if i64::from(current) != pl { + power_levels.users.insert(user_id.to_owned(), pl.into()); + changed = true; + } + } + } + + if changed { + // Send updated power levels as the server/space admin + // Use PduBuilder::state to create the event + // timeline.build_and_append_pdu(...) + } + + Ok(()) +} +``` + +**Step 2: Verify build** + +Run: `cargo check -p conduwuit-service 2>&1 | tail -20` +Expected: Compiles. + +**Step 3: Commit** + +```bash +git add src/service/rooms/roles/mod.rs +git commit -m "feat(spaces): add power level synchronization from space roles" +``` + +--- + +### Task 8: Implement Auto-Join and Auto-Kick + +**Files:** +- Modify: `src/service/rooms/roles/mod.rs` (add enforcement methods) + +**Step 1: Add auto-join method** + +```rust +/// Auto-join a user to all qualifying child rooms of a Space. +#[implement(Service)] +pub async fn auto_join_qualifying_rooms( + &self, + space_id: &RoomId, + user_id: &UserId, +) -> Result { + if !self.is_enabled() { + return Ok(()); + } + + // Get all child rooms from m.space.child events + let child_rooms: Vec = self + .services + .spaces + .get_space_child_events(space_id) + .filter_map(|pdu| { + RoomId::parse(pdu.state_key()?).ok().map(|r| r.to_owned()) + }) + .collect() + .await; + + for child_room_id in &child_rooms { + // Skip if already joined + if self + .services + .state_cache + .is_joined(user_id, child_room_id) + .await + { + continue; + } + + // Check if user qualifies + if self + .user_qualifies_for_room(space_id, child_room_id, user_id) + .await + { + // Perform the join via the membership service + // This needs to create a join membership event + } + } + + Ok(()) +} +``` + +**Step 2: Add auto-kick method** + +```rust +/// Remove a user from all child rooms they no longer qualify for. +#[implement(Service)] +pub async fn kick_unqualified_from_rooms( + &self, + space_id: &RoomId, + user_id: &UserId, +) -> Result { + if !self.is_enabled() { + return Ok(()); + } + + let child_rooms: Vec = self + .room_requirements + .read() + .await + .get(space_id) + .map(|reqs| reqs.keys().cloned().collect()) + .unwrap_or_default(); + + for child_room_id in &child_rooms { + if !self + .services + .state_cache + .is_joined(user_id, child_room_id) + .await + { + continue; + } + + if !self + .user_qualifies_for_room(space_id, child_room_id, user_id) + .await + { + // Kick the user by sending a leave membership event + // with reason "No longer has required Space roles" + } + } + + Ok(()) +} +``` + +**Step 3: Verify build** + +Run: `cargo check -p conduwuit-service 2>&1 | tail -20` +Expected: Compiles. + +**Step 4: Commit** + +```bash +git add src/service/rooms/roles/mod.rs +git commit -m "feat(spaces): add auto-join and auto-kick enforcement methods" +``` + +--- + +### Task 9: Hook State Event Changes for Enforcement + +**Files:** +- Modify: `src/service/rooms/timeline/append.rs` (add hooks after PDU append) + +**Step 1: Add enforcement hook after event append** + +In the `append_pdu()` function, after the event is successfully appended, add a check for space role events: + +```rust +// Space permission cascading: react to role-related state events +if self.services.roles.is_enabled() { + if let Some(state_key) = &pdu.state_key { + match pdu.event_type() { + // m.space.roles changed -> revalidate all members + t if t == "m.space.roles" => { + self.services.roles.populate_space(&pdu.room_id).await?; + // Revalidate all members against all child rooms + } + // m.space.role.member changed -> auto-join/kick that user + t if t == "m.space.role.member" => { + if let Ok(user_id) = UserId::parse(state_key) { + self.services.roles.populate_space(&pdu.room_id).await?; + self.services + .roles + .auto_join_qualifying_rooms(&pdu.room_id, &user_id) + .await?; + self.services + .roles + .kick_unqualified_from_rooms(&pdu.room_id, &user_id) + .await?; + // Sync power levels in all child rooms for this user + } + } + // m.space.role.room changed -> auto-join/kick for that room + t if t == "m.space.role.room" => { + if let Ok(room_id) = RoomId::parse(state_key) { + self.services.roles.populate_space(&pdu.room_id).await?; + // Check all members of room_id against new requirements + } + } + // m.space.child added/removed -> update room_to_space index + t if t == StateEventType::SpaceChild.to_string() => { + self.services.roles.populate_space(&pdu.room_id).await?; + // If new child, auto-join qualifying members + } + // m.room.member join in a Space -> auto-join child rooms + t if t == StateEventType::RoomMember.to_string() => { + // Check if this room is a Space and user just joined + // If so, auto-join them to qualifying child rooms + } + _ => {} + } + } +} +``` + +**Step 2: Verify build** + +Run: `cargo check -p conduwuit-service 2>&1 | tail -20` +Expected: Compiles. + +**Step 3: Commit** + +```bash +git add src/service/rooms/timeline/append.rs +git commit -m "feat(spaces): hook state event changes for role enforcement" +``` + +--- + +### Task 10: Implement Cache Rebuild on Startup + +**Files:** +- Modify: `src/service/rooms/roles/mod.rs` (add `worker()` implementation) + +**Step 1: Add startup rebuild in the `worker()` method** + +In the `Service` trait impl, add: + +```rust +async fn worker(self: Arc) -> Result { + if !self.is_enabled() { + return Ok(()); + } + + // Find all spaces (rooms with type m.space) and populate cache + // Iterate all rooms, check room type, populate if space + // This can use rooms::metadata to list all rooms + + Ok(()) +} +``` + +**Step 2: Verify build** + +Run: `cargo check -p conduwuit-service 2>&1 | tail -20` +Expected: Compiles. + +**Step 3: Commit** + +```bash +git add src/service/rooms/roles/mod.rs +git commit -m "feat(spaces): rebuild role cache from state events on startup" +``` + +--- + +### Task 11: Add Admin Commands — Module Structure + +**Files:** +- Create: `src/admin/space/mod.rs` +- Create: `src/admin/space/commands.rs` +- Modify: `src/admin/mod.rs` (add module declaration) +- Modify: `src/admin/admin.rs` (add to AdminCommand enum) + +**Step 1: Create the command enum** + +Create `src/admin/space/mod.rs`: + +```rust +mod commands; + +use clap::Subcommand; +use ruma::{OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId}; + +use crate::admin_command_dispatch; + +#[admin_command_dispatch] +#[derive(Debug, Subcommand)] +pub enum SpaceCommand { + #[command(subcommand)] + /// Manage space roles and permissions + Roles(SpaceRolesCommand), +} + +#[admin_command_dispatch] +#[derive(Debug, Subcommand)] +pub enum SpaceRolesCommand { + /// List all roles defined in a space + List { + /// The space room ID or alias + space: OwnedRoomOrAliasId, + }, + + /// Add a new role to a space + Add { + /// The space room ID or alias + space: OwnedRoomOrAliasId, + + /// Role name + role_name: String, + + /// Human-readable description + #[arg(long)] + description: Option, + + /// Power level to grant in child rooms + #[arg(long)] + power_level: Option, + }, + + /// Remove a role from a space + Remove { + /// The space room ID or alias + space: OwnedRoomOrAliasId, + + /// Role name to remove + role_name: String, + }, + + /// Assign a role to a user + Assign { + /// The space room ID or alias + space: OwnedRoomOrAliasId, + + /// User to assign the role to + user_id: OwnedUserId, + + /// Role name to assign + role_name: String, + }, + + /// Revoke a role from a user + Revoke { + /// The space room ID or alias + space: OwnedRoomOrAliasId, + + /// User to revoke the role from + user_id: OwnedUserId, + + /// Role name to revoke + role_name: String, + }, + + /// Require a role for a room + Require { + /// The space room ID or alias + space: OwnedRoomOrAliasId, + + /// Child room ID + room_id: OwnedRoomId, + + /// Role name to require + role_name: String, + }, + + /// Remove a role requirement from a room + Unrequire { + /// The space room ID or alias + space: OwnedRoomOrAliasId, + + /// Child room ID + room_id: OwnedRoomId, + + /// Role name to remove from requirements + role_name: String, + }, + + /// Show a user's roles in a space + User { + /// The space room ID or alias + space: OwnedRoomOrAliasId, + + /// User to check + user_id: OwnedUserId, + }, + + /// Show a room's role requirements in a space + Room { + /// The space room ID or alias + space: OwnedRoomOrAliasId, + + /// Room to check + room_id: OwnedRoomId, + }, +} +``` + +**Step 2: Register in admin module** + +In `src/admin/mod.rs`, add: + +```rust +pub(crate) mod space; +``` + +In `src/admin/admin.rs`, add to imports: + +```rust +use crate::space::{self, SpaceCommand}; +``` + +Add to `AdminCommand` enum: + +```rust +#[command(subcommand)] +/// Commands for managing space permissions +Spaces(SpaceCommand), +``` + +Add to the `process()` match: + +```rust +| Spaces(command) => { + context.bail_restricted()?; + space::process(command, context).await +}, +``` + +**Step 3: Verify build** + +Run: `cargo check -p conduwuit-admin 2>&1 | tail -20` +Expected: Compiles (commands.rs can have stub implementations initially). + +**Step 4: Commit** + +```bash +git add src/admin/space/ src/admin/mod.rs src/admin/admin.rs +git commit -m "feat(spaces): add admin command structure for space role management" +``` + +--- + +### Task 12: Implement Admin Command Handlers + +**Files:** +- Modify: `src/admin/space/commands.rs` + +**Step 1: Implement the command handlers** + +Create `src/admin/space/commands.rs` with handlers for each command. Each handler should: + +1. Resolve the space room alias to ID +2. Read the current state event +3. Modify the content +4. Send the updated state event via `PduBuilder::state()` +5. Return a success message + +Example for the `list` command: + +```rust +use conduwuit::Result; +use conduwuit_core::matrix::space_roles::SpaceRolesEventContent; +use ruma::events::StateEventType; + +use crate::admin_command; + +#[admin_command] +pub(super) async fn list(&self, space: OwnedRoomOrAliasId) -> Result { + let space_id = self.services.rooms.alias.resolve(&space).await?; + + let content: SpaceRolesEventContent = self + .services + .rooms + .state_accessor + .room_state_get_content(&space_id, &StateEventType::from("m.space.roles"), "") + .await + .unwrap_or_default(); + + if content.roles.is_empty() { + return self.write_str("No roles defined in this space.").await; + } + + let mut output = String::from("Roles:\n"); + for (name, def) in &content.roles { + output.push_str(&format!( + "- **{}**: {} {}\n", + name, + def.description, + def.power_level + .map(|pl| format!("(PL {pl})")) + .unwrap_or_default() + )); + } + + self.write_str(&output).await +} +``` + +Implement similar handlers for: `add`, `remove`, `assign`, `revoke`, `require`, `unrequire`, `user`, `room`. + +Each mutation handler follows this pattern: +1. Read current state → modify → send updated state event via `PduBuilder::state()` +2. The state event hook (Task 9) will handle cache invalidation and enforcement + +**Step 2: Verify build** + +Run: `cargo check -p conduwuit-admin 2>&1 | tail -20` +Expected: Compiles. + +**Step 3: Commit** + +```bash +git add src/admin/space/commands.rs +git commit -m "feat(spaces): implement admin command handlers for space roles" +``` + +--- + +### Task 13: Implement Power Level Override Rejection + +**Files:** +- Modify: `src/service/rooms/timeline/append.rs` or `src/core/matrix/state_res/event_auth.rs` + +**Step 1: Add PL override rejection** + +When a `m.room.power_levels` event is submitted for a child room of a Space, check that it doesn't conflict with Space-granted power levels. If a user's PL is being set lower than their Space-granted level, reject the event. + +Add this check in the event append path or auth check path: + +```rust +// Reject power level changes that conflict with Space roles +if pdu.event_type() == StateEventType::RoomPowerLevels.to_string() { + if let Some(parent_space) = self.services.roles.get_parent_space(&pdu.room_id).await { + // Parse the proposed power levels + // For each user, check if proposed PL < Space-granted PL + // If so, reject + } +} +``` + +**Step 2: Verify build** + +Run: `cargo check -p conduwuit-service 2>&1 | tail -20` +Expected: Compiles. + +**Step 3: Commit** + +```bash +git add -A +git commit -m "feat(spaces): reject power level changes that conflict with space roles" +``` + +--- + +### Task 14: Integration Testing + +**Files:** +- Create: `src/service/rooms/roles/tests.rs` + +**Step 1: Write unit tests for the roles service** + +Test the lookup methods with pre-populated cache data: + +```rust +#[cfg(test)] +mod tests { + // Test user_qualifies_for_room with various role combinations + // Test get_user_power_level with multiple roles + // Test cache invalidation paths + // Test default role creation +} +``` + +**Step 2: Run tests** + +Run: `cargo test -p conduwuit-service roles 2>&1 | tail -20` +Expected: All tests pass. + +**Step 3: Commit** + +```bash +git add src/service/rooms/roles/tests.rs +git commit -m "test(spaces): add unit tests for space roles service" +``` + +--- + +### Task 15: Documentation + +**Files:** +- Modify: `docs/plans/2026-03-17-space-permission-cascading-design.md` (mark as implemented) + +**Step 1: Update design doc status** + +Change `**Status:** Approved` to `**Status:** Implemented` + +**Step 2: Commit** + +```bash +git add docs/plans/2026-03-17-space-permission-cascading-design.md +git commit -m "docs: mark space permission cascading design as implemented" +``` + +--- + +## Task Dependency Graph + +``` +Task 1 (config flag) + └─> Task 2 (event types) + └─> Task 3 (service skeleton) + └─> Task 4 (cache + lookups) + ├─> Task 5 (default roles) + ├─> Task 6 (join gating) + ├─> Task 7 (PL override) + ├─> Task 8 (auto-join/kick) + │ └─> Task 9 (state event hooks) + │ └─> Task 10 (startup rebuild) + ├─> Task 13 (PL rejection) + └─> Task 11 (admin cmd structure) + └─> Task 12 (admin cmd handlers) +Task 14 (tests) - can run after Task 8 +Task 15 (docs) - final +``` + +Tasks 5-8, 11, and 13 can be worked on in parallel after Task 4 is complete. diff --git a/src/admin/admin.rs b/src/admin/admin.rs index 4db3393a..caa44f3c 100644 --- a/src/admin/admin.rs +++ b/src/admin/admin.rs @@ -11,6 +11,7 @@ use crate::{ query::{self, QueryCommand}, room::{self, RoomCommand}, server::{self, ServerCommand}, + space::{self, SpaceCommand}, token::{self, TokenCommand}, user::{self, UserCommand}, }; @@ -34,6 +35,10 @@ pub enum AdminCommand { /// Commands for managing rooms Rooms(RoomCommand), + #[command(subcommand)] + /// Commands for managing space permissions + Spaces(SpaceCommand), + #[command(subcommand)] /// Commands for managing federation Federation(FederationCommand), @@ -81,6 +86,10 @@ pub(super) async fn process(command: AdminCommand, context: &Context<'_>) -> Res token::process(command, context).await }, | Rooms(command) => room::process(command, context).await, + | Spaces(command) => { + context.bail_restricted()?; + space::process(command, context).await + }, | Federation(command) => federation::process(command, context).await, | Server(command) => server::process(command, context).await, | Debug(command) => debug::process(command, context).await, diff --git a/src/admin/mod.rs b/src/admin/mod.rs index b343fd2e..bd088fe6 100644 --- a/src/admin/mod.rs +++ b/src/admin/mod.rs @@ -17,6 +17,7 @@ pub(crate) mod media; pub(crate) mod query; pub(crate) mod room; pub(crate) mod server; +pub(crate) mod space; pub(crate) mod token; pub(crate) mod user; diff --git a/src/admin/space/mod.rs b/src/admin/space/mod.rs new file mode 100644 index 00000000..0b183601 --- /dev/null +++ b/src/admin/space/mod.rs @@ -0,0 +1,15 @@ +pub(super) mod roles; + +use clap::Subcommand; +use conduwuit::Result; + +use self::roles::SpaceRolesCommand; +use crate::admin_command_dispatch; + +#[admin_command_dispatch] +#[derive(Debug, Subcommand)] +pub enum SpaceCommand { + #[command(subcommand)] + /// Manage space roles and permissions + Roles(SpaceRolesCommand), +} diff --git a/src/admin/space/roles.rs b/src/admin/space/roles.rs new file mode 100644 index 00000000..a1813a40 --- /dev/null +++ b/src/admin/space/roles.rs @@ -0,0 +1,632 @@ +use std::fmt::Write; + +use clap::Subcommand; +use conduwuit::{Err, Event, Result, matrix::pdu::PduBuilder}; +use conduwuit_core::matrix::space_roles::{ + RoleDefinition, SPACE_CASCADING_EVENT_TYPE, SPACE_ROLE_MEMBER_EVENT_TYPE, + SPACE_ROLE_ROOM_EVENT_TYPE, SPACE_ROLES_EVENT_TYPE, SpaceCascadingEventContent, + SpaceRoleMemberEventContent, SpaceRoleRoomEventContent, SpaceRolesEventContent, +}; +use futures::StreamExt; +use ruma::{OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, events::StateEventType}; +use serde_json::value::to_raw_value; + +use crate::{admin_command, admin_command_dispatch}; + +fn roles_event_type() -> StateEventType { + StateEventType::from(SPACE_ROLES_EVENT_TYPE.to_owned()) +} + +fn member_event_type() -> StateEventType { + StateEventType::from(SPACE_ROLE_MEMBER_EVENT_TYPE.to_owned()) +} + +fn room_event_type() -> StateEventType { + StateEventType::from(SPACE_ROLE_ROOM_EVENT_TYPE.to_owned()) +} + +fn cascading_event_type() -> StateEventType { + StateEventType::from(SPACE_CASCADING_EVENT_TYPE.to_owned()) +} + +macro_rules! resolve_room_as_space { + ($self:expr, $space:expr) => {{ + let space_id = $self.services.rooms.alias.resolve(&$space).await?; + if !matches!( + $self + .services + .rooms + .state_accessor + .get_room_type(&space_id) + .await, + Ok(ruma::room::RoomType::Space) + ) { + return Err!("The specified room is not a Space."); + } + space_id + }}; +} + +macro_rules! resolve_space { + ($self:expr, $space:expr) => {{ + let space_id = resolve_room_as_space!($self, $space); + if !$self + .services + .rooms + .roles + .is_enabled_for_space(&space_id) + .await + { + return $self + .write_str( + "Space permission cascading is disabled for this Space. Enable it \ + server-wide with `space_permission_cascading = true` in your config, or \ + per-Space with `!admin space roles enable `.", + ) + .await; + } + space_id + }}; +} + +macro_rules! custom_state_pdu { + ($event_type:expr, $state_key:expr, $content:expr) => { + PduBuilder { + event_type: $event_type.to_owned().into(), + content: to_raw_value($content) + .map_err(|e| conduwuit::err!("Failed to serialize state event content: {e}"))?, + state_key: Some($state_key.to_owned().into()), + ..PduBuilder::default() + } + }; +} + +/// Cascade-remove a role name from all state events of a given type. For each +/// event that contains the role, the `$field` is filtered and the updated +/// content is sent back as a new state event. +macro_rules! cascade_remove_role { + ( + $self:expr, + $shortstatehash:expr, + $event_type_fn:expr, + $event_type_const:expr, + $content_ty:ty, + $field:ident, + $role_name:expr, + $space_id:expr, + $state_lock:expr, + $server_user:expr + ) => {{ + let ev_type = $event_type_fn; + let entries: Vec<(_, ruma::OwnedEventId)> = $self + .services + .rooms + .state_accessor + .state_keys_with_ids($shortstatehash, &ev_type) + .collect() + .await; + + for (state_key, event_id) in entries { + if let Ok(pdu) = $self.services.rooms.timeline.get_pdu(&event_id).await { + if let Ok(mut content) = pdu.get_content::<$content_ty>() { + if content.$field.contains($role_name) { + content.$field.retain(|r| r != $role_name); + $self + .services + .rooms + .timeline + .build_and_append_pdu( + custom_state_pdu!($event_type_const, &state_key, &content), + $server_user, + Some(&$space_id), + &$state_lock, + ) + .await?; + } + } + } + } + }}; +} + +macro_rules! send_space_state { + ($self:expr, $space_id:expr, $event_type:expr, $state_key:expr, $content:expr) => {{ + let state_lock = $self.services.rooms.state.mutex.lock(&$space_id).await; + let server_user = &$self.services.globals.server_user; + $self + .services + .rooms + .timeline + .build_and_append_pdu( + custom_state_pdu!($event_type, $state_key, $content), + server_user, + Some(&$space_id), + &state_lock, + ) + .await? + }}; +} + +#[admin_command_dispatch] +#[derive(Debug, Subcommand)] +pub enum SpaceRolesCommand { + /// List all roles defined in a space + List { + space: OwnedRoomOrAliasId, + }, + /// Add a new role to a space + Add { + space: OwnedRoomOrAliasId, + role_name: String, + #[arg(long)] + description: Option, + #[arg(long)] + power_level: Option, + }, + /// Remove a role from a space + Remove { + space: OwnedRoomOrAliasId, + role_name: String, + }, + /// Assign a role to a user + Assign { + space: OwnedRoomOrAliasId, + user_id: OwnedUserId, + role_name: String, + }, + /// Revoke a role from a user + Revoke { + space: OwnedRoomOrAliasId, + user_id: OwnedUserId, + role_name: String, + }, + /// Require a role for a room + Require { + space: OwnedRoomOrAliasId, + room_id: OwnedRoomId, + role_name: String, + }, + /// Remove a role requirement from a room + Unrequire { + space: OwnedRoomOrAliasId, + room_id: OwnedRoomId, + role_name: String, + }, + /// Show a user's roles in a space + User { + space: OwnedRoomOrAliasId, + user_id: OwnedUserId, + }, + /// Show a room's role requirements in a space + Room { + space: OwnedRoomOrAliasId, + room_id: OwnedRoomId, + }, + /// Enable space permission cascading for a specific space (overrides + /// server config) + Enable { + space: OwnedRoomOrAliasId, + }, + /// Disable space permission cascading for a specific space (overrides + /// server config) + Disable { + space: OwnedRoomOrAliasId, + }, + /// Show whether cascading is enabled for a space and the source (server + /// default or per-space override) + Status { + space: OwnedRoomOrAliasId, + }, +} + +#[admin_command] +async fn list(&self, space: OwnedRoomOrAliasId) -> Result { + let space_id = resolve_space!(self, space); + let roles_event_type = roles_event_type(); + + let content: SpaceRolesEventContent = self + .services + .rooms + .state_accessor + .room_state_get_content(&space_id, &roles_event_type, "") + .await + .unwrap_or_default(); + + if content.roles.is_empty() { + return self.write_str("No roles defined in this space.").await; + } + + let mut msg = format!("Roles in {space_id}:\n```\n"); + for (name, def) in &content.roles { + let pl = def + .power_level + .map(|p| format!(" (power_level: {p})")) + .unwrap_or_default(); + let _ = writeln!(msg, "- {name}: {}{pl}", def.description); + } + msg.push_str("```"); + + self.write_str(&msg).await +} + +#[admin_command] +async fn add( + &self, + space: OwnedRoomOrAliasId, + role_name: String, + description: Option, + power_level: Option, +) -> Result { + let space_id = resolve_space!(self, space); + + if let Some(pl) = power_level { + if pl > i64::from(ruma::Int::MAX) || pl < i64::from(ruma::Int::MIN) { + return Err!( + "Power level must be between {} and {}.", + ruma::Int::MIN, + ruma::Int::MAX + ); + } + } + + let roles_event_type = roles_event_type(); + + let mut content: SpaceRolesEventContent = self + .services + .rooms + .state_accessor + .room_state_get_content(&space_id, &roles_event_type, "") + .await + .unwrap_or_default(); + + if content.roles.contains_key(&role_name) { + return Err!("Role '{role_name}' already exists in this space."); + } + + content.roles.insert(role_name.clone(), RoleDefinition { + description: description.unwrap_or_else(|| role_name.clone()), + power_level, + }); + + send_space_state!(self, space_id, SPACE_ROLES_EVENT_TYPE, "", &content); + + self.write_str(&format!("Added role '{role_name}' to space {space_id}.")) + .await +} + +#[admin_command] +async fn remove(&self, space: OwnedRoomOrAliasId, role_name: String) -> Result { + let space_id = resolve_space!(self, space); + let roles_event_type = roles_event_type(); + + let mut content: SpaceRolesEventContent = self + .services + .rooms + .state_accessor + .room_state_get_content(&space_id, &roles_event_type, "") + .await + .unwrap_or_default(); + + if content.roles.remove(&role_name).is_none() { + return Err!("Role '{role_name}' does not exist in this space."); + } + + send_space_state!(self, space_id, SPACE_ROLES_EVENT_TYPE, "", &content); + + // Cascade: remove the deleted role from all member and room events + let server_user = &self.services.globals.server_user; + if let Ok(shortstatehash) = self + .services + .rooms + .state + .get_room_shortstatehash(&space_id) + .await + { + let state_lock = self.services.rooms.state.mutex.lock(&space_id).await; + + cascade_remove_role!( + self, + shortstatehash, + member_event_type(), + SPACE_ROLE_MEMBER_EVENT_TYPE, + SpaceRoleMemberEventContent, + roles, + &role_name, + space_id, + state_lock, + server_user + ); + + cascade_remove_role!( + self, + shortstatehash, + room_event_type(), + SPACE_ROLE_ROOM_EVENT_TYPE, + SpaceRoleRoomEventContent, + required_roles, + &role_name, + space_id, + state_lock, + server_user + ); + } + + self.write_str(&format!("Removed role '{role_name}' from space {space_id}.")) + .await +} + +#[admin_command] +async fn assign( + &self, + space: OwnedRoomOrAliasId, + user_id: OwnedUserId, + role_name: String, +) -> Result { + let space_id = resolve_space!(self, space); + + let roles_event_type = roles_event_type(); + let role_defs: SpaceRolesEventContent = self + .services + .rooms + .state_accessor + .room_state_get_content(&space_id, &roles_event_type, "") + .await + .unwrap_or_default(); + + if !role_defs.roles.contains_key(&role_name) { + return Err!("Role '{role_name}' does not exist in this space."); + } + + let member_event_type = member_event_type(); + + let mut content: SpaceRoleMemberEventContent = self + .services + .rooms + .state_accessor + .room_state_get_content(&space_id, &member_event_type, user_id.as_str()) + .await + .unwrap_or_default(); + + if content.roles.contains(&role_name) { + return Err!("User {user_id} already has role '{role_name}' in this space."); + } + + content.roles.push(role_name.clone()); + + send_space_state!(self, space_id, SPACE_ROLE_MEMBER_EVENT_TYPE, user_id.as_str(), &content); + + self.write_str(&format!("Assigned role '{role_name}' to {user_id} in space {space_id}.")) + .await +} + +#[admin_command] +async fn revoke( + &self, + space: OwnedRoomOrAliasId, + user_id: OwnedUserId, + role_name: String, +) -> Result { + let space_id = resolve_space!(self, space); + let member_event_type = member_event_type(); + + let mut content: SpaceRoleMemberEventContent = self + .services + .rooms + .state_accessor + .room_state_get_content(&space_id, &member_event_type, user_id.as_str()) + .await + .unwrap_or_default(); + + let original_len = content.roles.len(); + content.roles.retain(|r| r != &role_name); + + if content.roles.len() == original_len { + return Err!("User {user_id} does not have role '{role_name}' in this space."); + } + + send_space_state!(self, space_id, SPACE_ROLE_MEMBER_EVENT_TYPE, user_id.as_str(), &content); + + self.write_str(&format!("Revoked role '{role_name}' from {user_id} in space {space_id}.")) + .await +} + +#[admin_command] +async fn require( + &self, + space: OwnedRoomOrAliasId, + room_id: OwnedRoomId, + role_name: String, +) -> Result { + let space_id = resolve_space!(self, space); + + let child_rooms = self.services.rooms.roles.get_child_rooms(&space_id).await; + if !child_rooms.contains(&room_id) { + return Err!("Room {room_id} is not a child of space {space_id}."); + } + + let roles_event_type = roles_event_type(); + let role_defs: SpaceRolesEventContent = self + .services + .rooms + .state_accessor + .room_state_get_content(&space_id, &roles_event_type, "") + .await + .unwrap_or_default(); + + if !role_defs.roles.contains_key(&role_name) { + return Err!("Role '{role_name}' does not exist in this space."); + } + + let room_event_type = room_event_type(); + + let mut content: SpaceRoleRoomEventContent = self + .services + .rooms + .state_accessor + .room_state_get_content(&space_id, &room_event_type, room_id.as_str()) + .await + .unwrap_or_default(); + + if content.required_roles.contains(&role_name) { + return Err!("Room {room_id} already requires role '{role_name}' in this space."); + } + + content.required_roles.push(role_name.clone()); + + send_space_state!(self, space_id, SPACE_ROLE_ROOM_EVENT_TYPE, room_id.as_str(), &content); + + self.write_str(&format!( + "Room {room_id} now requires role '{role_name}' in space {space_id}." + )) + .await +} + +#[admin_command] +async fn unrequire( + &self, + space: OwnedRoomOrAliasId, + room_id: OwnedRoomId, + role_name: String, +) -> Result { + let space_id = resolve_space!(self, space); + let room_event_type = room_event_type(); + + let mut content: SpaceRoleRoomEventContent = self + .services + .rooms + .state_accessor + .room_state_get_content(&space_id, &room_event_type, room_id.as_str()) + .await + .unwrap_or_default(); + + let original_len = content.required_roles.len(); + content.required_roles.retain(|r| r != &role_name); + + if content.required_roles.len() == original_len { + return Err!("Room {room_id} does not require role '{role_name}' in this space."); + } + + send_space_state!(self, space_id, SPACE_ROLE_ROOM_EVENT_TYPE, room_id.as_str(), &content); + + self.write_str(&format!( + "Removed role requirement '{role_name}' from room {room_id} in space {space_id}." + )) + .await +} + +#[admin_command] +async fn user(&self, space: OwnedRoomOrAliasId, user_id: OwnedUserId) -> Result { + let space_id = resolve_space!(self, space); + + let roles = self + .services + .rooms + .roles + .get_user_roles_in_space(&space_id, &user_id) + .await; + + match roles { + | Some(roles) if !roles.is_empty() => { + let list: String = roles + .iter() + .map(|r| format!("- {r}")) + .collect::>() + .join("\n"); + self.write_str(&format!("Roles for {user_id} in space {space_id}:\n```\n{list}\n```")) + .await + }, + | _ => + self.write_str(&format!("User {user_id} has no roles in space {space_id}.")) + .await, + } +} + +#[admin_command] +async fn room(&self, space: OwnedRoomOrAliasId, room_id: OwnedRoomId) -> Result { + let space_id = resolve_space!(self, space); + + let reqs = self + .services + .rooms + .roles + .get_room_requirements_in_space(&space_id, &room_id) + .await; + + match reqs { + | Some(reqs) if !reqs.is_empty() => { + let list: String = reqs + .iter() + .map(|r| format!("- {r}")) + .collect::>() + .join("\n"); + self.write_str(&format!( + "Required roles for room {room_id} in space {space_id}:\n```\n{list}\n```" + )) + .await + }, + | _ => + self.write_str(&format!( + "Room {room_id} has no role requirements in space {space_id}." + )) + .await, + } +} + +#[admin_command] +async fn enable(&self, space: OwnedRoomOrAliasId) -> Result { + let space_id = resolve_room_as_space!(self, space); + + self.services + .rooms + .roles + .ensure_default_roles(&space_id) + .await?; + + let content = SpaceCascadingEventContent { enabled: true }; + send_space_state!(self, space_id, SPACE_CASCADING_EVENT_TYPE, "", &content); + + self.write_str(&format!("Space permission cascading enabled for {space_id}.")) + .await +} + +#[admin_command] +async fn disable(&self, space: OwnedRoomOrAliasId) -> Result { + let space_id = resolve_room_as_space!(self, space); + + let content = SpaceCascadingEventContent { enabled: false }; + send_space_state!(self, space_id, SPACE_CASCADING_EVENT_TYPE, "", &content); + + self.write_str(&format!("Space permission cascading disabled for {space_id}.")) + .await +} + +#[admin_command] +async fn status(&self, space: OwnedRoomOrAliasId) -> Result { + let space_id = resolve_room_as_space!(self, space); + + let global_default = self.services.rooms.roles.is_enabled(); + let cascading_event_type = cascading_event_type(); + let per_space_override: Option = self + .services + .rooms + .state_accessor + .room_state_get_content::( + &space_id, + &cascading_event_type, + "", + ) + .await + .ok() + .map(|c| c.enabled); + + let effective = per_space_override.unwrap_or(global_default); + let source = match per_space_override { + | Some(v) => format!("per-Space override (enabled: {v})"), + | None => format!("server default (space_permission_cascading: {global_default})"), + }; + + self.write_str(&format!( + "Cascading status for {space_id}:\n- Effective: **{effective}**\n- Source: {source}" + )) + .await +} diff --git a/src/api/client/membership/join.rs b/src/api/client/membership/join.rs index cbb82506..a175f1b6 100644 --- a/src/api/client/membership/join.rs +++ b/src/api/client/membership/join.rs @@ -347,6 +347,12 @@ pub async fn join_room_by_id_helper( } } + services + .rooms + .roles + .check_join_allowed(room_id, sender_user) + .await?; + if server_in_room { join_room_by_id_helper_local(services, sender_user, room_id, reason, servers, state_lock) .boxed() diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index a642f5b7..1239b805 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -603,6 +603,22 @@ pub struct Config { #[serde(default)] pub suspend_on_register: bool, + /// Server-wide default for space permission cascading (power levels and + /// role-based access). Individual Spaces can override this via the + /// `com.continuwuity.space.cascading` state event or the admin command + /// `!admin space roles enable/disable `. + /// + /// default: false + #[serde(default)] + pub space_permission_cascading: bool, + + /// Maximum number of spaces to cache role data for. When exceeded the + /// cache is cleared and repopulated on demand. + /// + /// default: 1000 + #[serde(default = "default_space_roles_cache_flush_threshold")] + pub space_roles_cache_flush_threshold: u32, + /// Enabling this setting opens registration to anyone without restrictions. /// This makes your server vulnerable to abuse #[serde(default)] @@ -2826,3 +2842,5 @@ fn default_ldap_search_filter() -> String { "(objectClass=*)".to_owned() } fn default_ldap_uid_attribute() -> String { String::from("uid") } fn default_ldap_name_attribute() -> String { String::from("givenName") } + +fn default_space_roles_cache_flush_threshold() -> u32 { 1000 } diff --git a/src/core/matrix/mod.rs b/src/core/matrix/mod.rs index b38d4c9a..08a88971 100644 --- a/src/core/matrix/mod.rs +++ b/src/core/matrix/mod.rs @@ -2,6 +2,7 @@ pub mod event; pub mod pdu; +pub mod space_roles; pub mod state_key; pub mod state_res; diff --git a/src/core/matrix/space_roles.rs b/src/core/matrix/space_roles.rs new file mode 100644 index 00000000..dc83c604 --- /dev/null +++ b/src/core/matrix/space_roles.rs @@ -0,0 +1,81 @@ +use std::collections::BTreeMap; + +use serde::{Deserialize, Serialize}; + +pub const SPACE_ROLES_EVENT_TYPE: &str = "com.continuwuity.space.roles"; +pub const SPACE_ROLE_MEMBER_EVENT_TYPE: &str = "com.continuwuity.space.role.member"; +pub const SPACE_ROLE_ROOM_EVENT_TYPE: &str = "com.continuwuity.space.role.room"; +pub const SPACE_CASCADING_EVENT_TYPE: &str = "com.continuwuity.space.cascading"; + +#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq, Eq)] +pub struct SpaceRolesEventContent { + pub roles: BTreeMap, +} + +#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)] +pub struct RoleDefinition { + pub description: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub power_level: Option, +} + +#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq, Eq)] +pub struct SpaceRoleMemberEventContent { + pub roles: Vec, +} + +#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq, Eq)] +pub struct SpaceRoleRoomEventContent { + pub required_roles: Vec, +} + +#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)] +pub struct SpaceCascadingEventContent { + pub enabled: bool, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn space_roles_roundtrip() { + let mut roles = BTreeMap::new(); + roles.insert("admin".to_owned(), RoleDefinition { + description: "Space administrator".to_owned(), + power_level: Some(100), + }); + roles.insert("nsfw".to_owned(), RoleDefinition { + description: "NSFW access".to_owned(), + power_level: None, + }); + let content = SpaceRolesEventContent { roles }; + let json = serde_json::to_string(&content).unwrap(); + let deserialized: SpaceRolesEventContent = serde_json::from_str(&json).unwrap(); + assert_eq!(deserialized.roles["admin"].power_level, Some(100)); + assert!(deserialized.roles["nsfw"].power_level.is_none()); + } + + #[test] + fn power_level_omitted_in_serialization_when_none() { + let role = RoleDefinition { + description: "Test".to_owned(), + power_level: None, + }; + let json = serde_json::to_string(&role).unwrap(); + assert!(!json.contains("power_level")); + } + + #[test] + fn negative_power_level() { + let json = r#"{"description":"Restricted","power_level":-10}"#; + let role: RoleDefinition = serde_json::from_str(json).unwrap(); + assert_eq!(role.power_level, Some(-10)); + } + + #[test] + fn missing_description_fails() { + let json = r#"{"power_level":100}"#; + serde_json::from_str::(json).unwrap_err(); + } +} diff --git a/src/service/rooms/mod.rs b/src/service/rooms/mod.rs index 44a83582..bf4304f7 100644 --- a/src/service/rooms/mod.rs +++ b/src/service/rooms/mod.rs @@ -7,6 +7,7 @@ pub mod metadata; pub mod outlier; pub mod pdu_metadata; pub mod read_receipt; +pub mod roles; pub mod search; pub mod short; pub mod spaces; @@ -31,6 +32,7 @@ pub struct Service { pub outlier: Arc, pub pdu_metadata: Arc, pub read_receipt: Arc, + pub roles: Arc, pub search: Arc, pub short: Arc, pub spaces: Arc, diff --git a/src/service/rooms/roles/mod.rs b/src/service/rooms/roles/mod.rs new file mode 100644 index 00000000..dfb22566 --- /dev/null +++ b/src/service/rooms/roles/mod.rs @@ -0,0 +1,1257 @@ +#[cfg(test)] +mod tests; + +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + fmt::Write, + sync::Arc, +}; + +use async_trait::async_trait; +use conduwuit::{ + Err, Event, Result, Server, debug, debug_warn, implement, info, + matrix::pdu::{PduBuilder, PduEvent}, + warn, +}; +use conduwuit_core::matrix::space_roles::{ + RoleDefinition, SPACE_CASCADING_EVENT_TYPE, SPACE_ROLE_MEMBER_EVENT_TYPE, + SPACE_ROLE_ROOM_EVENT_TYPE, SPACE_ROLES_EVENT_TYPE, SpaceCascadingEventContent, + SpaceRoleMemberEventContent, SpaceRoleRoomEventContent, SpaceRolesEventContent, +}; +use futures::StreamExt; +use ruma::{ + Int, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId, + events::{ + StateEventType, + room::{ + member::{MembershipState, RoomMemberEventContent}, + power_levels::RoomPowerLevelsEventContent, + }, + space::child::SpaceChildEventContent, + }, + room::RoomType, +}; +use serde_json::value::to_raw_value; +use tokio::sync::{RwLock, Semaphore}; + +use crate::{Dep, globals, rooms}; + +#[implement(Service)] +pub async fn flush_space_from_cache(&self, space_id: &RoomId) { + self.roles.write().await.remove(space_id); + self.user_roles.write().await.remove(space_id); + self.room_requirements.write().await.remove(space_id); + let mut room_to_space = self.room_to_space.write().await; + room_to_space.retain(|_, parents| { + parents.remove(space_id); + !parents.is_empty() + }); + drop(room_to_space); + self.space_to_rooms.write().await.remove(space_id); +} + +#[implement(Service)] +async fn flush_caches(&self) { + self.roles.write().await.clear(); + self.user_roles.write().await.clear(); + self.room_requirements.write().await.clear(); + self.room_to_space.write().await.clear(); + self.space_to_rooms.write().await.clear(); +} + +fn roles_event_type() -> StateEventType { + StateEventType::from(SPACE_ROLES_EVENT_TYPE.to_owned()) +} + +fn member_event_type() -> StateEventType { + StateEventType::from(SPACE_ROLE_MEMBER_EVENT_TYPE.to_owned()) +} + +fn room_event_type() -> StateEventType { + StateEventType::from(SPACE_ROLE_ROOM_EVENT_TYPE.to_owned()) +} + +fn cascading_event_type() -> StateEventType { + StateEventType::from(SPACE_CASCADING_EVENT_TYPE.to_owned()) +} + +pub struct Service { + services: Services, + server: Arc, + roles: RwLock>>, + user_roles: RwLock>>>, + room_requirements: RwLock>>>, + room_to_space: RwLock>>, + space_to_rooms: RwLock>>, + enforcement_semaphore: Semaphore, + pending_enforcement: RwLock>, +} + +struct Services { + globals: Dep, + metadata: Dep, + state_accessor: Dep, + state_cache: Dep, + state: Dep, + timeline: Dep, +} + +#[async_trait] +impl crate::Service for Service { + fn build(args: crate::Args<'_>) -> Result> { + Ok(Arc::new(Self { + services: Services { + globals: args.depend::("globals"), + metadata: args.depend::("rooms::metadata"), + state_accessor: args + .depend::("rooms::state_accessor"), + state_cache: args.depend::("rooms::state_cache"), + state: args.depend::("rooms::state"), + timeline: args.depend::("rooms::timeline"), + }, + server: args.server.clone(), + roles: RwLock::new(HashMap::new()), + user_roles: RwLock::new(HashMap::new()), + room_requirements: RwLock::new(HashMap::new()), + room_to_space: RwLock::new(HashMap::new()), + space_to_rooms: RwLock::new(HashMap::new()), + enforcement_semaphore: Semaphore::new(4), + pending_enforcement: RwLock::new(HashSet::new()), + })) + } + + async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { + if !self.is_enabled() { + return Ok(()); + } + + let roles = self.roles.read().await.len(); + let user_roles = self.user_roles.read().await.len(); + let room_requirements = self.room_requirements.read().await.len(); + let room_to_space = self.room_to_space.read().await.len(); + let space_to_rooms = self.space_to_rooms.read().await.len(); + + writeln!(out, "space_roles_definitions: {roles}")?; + writeln!(out, "space_user_roles: {user_roles}")?; + writeln!(out, "space_room_requirements: {room_requirements}")?; + writeln!(out, "space_room_to_space_index: {room_to_space}")?; + writeln!(out, "space_space_to_rooms_index: {space_to_rooms}")?; + + Ok(()) + } + + async fn clear_cache(&self) { + if !self.is_enabled() { + return; + } + self.flush_caches().await; + } + + async fn worker(self: Arc) -> Result<()> { + info!("Rebuilding space roles cache from all known rooms"); + + let mut space_count: usize = 0; + let room_ids: Vec = self + .services + .metadata + .iter_ids() + .map(ToOwned::to_owned) + .collect() + .await; + + for room_id in &room_ids { + match self.services.state_accessor.get_room_type(room_id).await { + | Ok(RoomType::Space) => { + // Check per-Space override — skip spaces where cascading is + // disabled + if !self.is_enabled_for_space(room_id).await { + continue; + } + debug!(room_id = %room_id, "Populating space roles cache"); + self.populate_space(room_id).await; + space_count = space_count.saturating_add(1); + }, + | _ => continue, + } + } + + info!(space_count, "Space roles cache rebuilt"); + Ok(()) + } + + fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } +} + +#[implement(Service)] +pub fn is_enabled(&self) -> bool { self.server.config.space_permission_cascading } + +#[implement(Service)] +pub async fn is_enabled_for_space(&self, space_id: &RoomId) -> bool { + let cascading_event_type = cascading_event_type(); + if let Ok(content) = self + .services + .state_accessor + .room_state_get_content::(space_id, &cascading_event_type, "") + .await + { + return content.enabled; + } + + self.server.config.space_permission_cascading +} + +#[implement(Service)] +pub async fn ensure_default_roles(&self, space_id: &RoomId) -> Result { + let server_user = self.services.globals.server_user.as_ref(); + let state_lock = self.services.state.mutex.lock(space_id).await; + + let roles_event_type = roles_event_type(); + if self + .services + .state_accessor + .room_state_get_content::(space_id, &roles_event_type, "") + .await + .is_ok() + { + return Ok(()); + } + + let mut roles = BTreeMap::new(); + roles.insert("admin".to_owned(), RoleDefinition { + description: "Space administrator".to_owned(), + power_level: Some(100), + }); + roles.insert("mod".to_owned(), RoleDefinition { + description: "Space moderator".to_owned(), + power_level: Some(50), + }); + + let content = SpaceRolesEventContent { roles }; + + let pdu = PduBuilder { + event_type: ruma::events::TimelineEventType::from(SPACE_ROLES_EVENT_TYPE.to_owned()), + content: to_raw_value(&content) + .map_err(|e| conduwuit::err!("Failed to serialize SpaceRolesEventContent: {e}"))?, + state_key: Some(String::new().into()), + ..PduBuilder::default() + }; + + self.services + .timeline + .build_and_append_pdu(pdu, server_user, Some(space_id), &state_lock) + .await?; + + debug!(space_id = %space_id, event_type = SPACE_ROLES_EVENT_TYPE, "Sent default space roles event"); + + Ok(()) +} + +/// Returns all `(state_key, pdu)` pairs for state events of the given type +/// within the room state identified by `shortstatehash`. +#[implement(Service)] +async fn load_state_pdus( + &self, + shortstatehash: u64, + event_type: &StateEventType, +) -> Vec<(conduwuit_core::matrix::StateKey, PduEvent)> { + let entries: Vec<(_, OwnedEventId)> = self + .services + .state_accessor + .state_keys_with_ids(shortstatehash, event_type) + .collect() + .await; + + let mut result = Vec::with_capacity(entries.len()); + for (state_key, event_id) in entries { + if let Ok(pdu) = self.services.timeline.get_pdu(&event_id).await { + result.push((state_key, pdu)); + } + } + result +} + +/// Loads all `SpaceRoleMember` state events and writes the resulting +/// user-to-roles mapping into the `user_roles` cache. +#[implement(Service)] +async fn load_user_roles(&self, space_id: &RoomId, shortstatehash: u64) { + let member_event_type = member_event_type(); + let mut user_roles_map: HashMap> = HashMap::new(); + + for (state_key, pdu) in self + .load_state_pdus(shortstatehash, &member_event_type) + .await + { + if let Ok(content) = pdu.get_content::() { + if let Ok(user_id) = UserId::parse(&*state_key) { + user_roles_map.insert(user_id.to_owned(), content.roles.into_iter().collect()); + } + } + } + + self.user_roles + .write() + .await + .insert(space_id.to_owned(), user_roles_map); +} + +/// Loads all `SpaceRoleRoom` state events and writes the resulting +/// room-to-required-roles mapping into the `room_requirements` cache. +#[implement(Service)] +async fn load_room_requirements(&self, space_id: &RoomId, shortstatehash: u64) { + let room_event_type = room_event_type(); + let mut room_reqs_map: HashMap> = HashMap::new(); + + for (state_key, pdu) in self.load_state_pdus(shortstatehash, &room_event_type).await { + if let Ok(content) = pdu.get_content::() { + if let Ok(room_id) = RoomId::parse(&*state_key) { + room_reqs_map + .insert(room_id.to_owned(), content.required_roles.into_iter().collect()); + } + } + } + + self.room_requirements + .write() + .await + .insert(space_id.to_owned(), room_reqs_map); +} + +/// Loads all `SpaceChild` state events and updates both the +/// `room_to_space` and `space_to_rooms` indexes. +#[implement(Service)] +async fn load_child_rooms_index(&self, space_id: &RoomId, shortstatehash: u64) { + let mut child_rooms: Vec = Vec::new(); + + for (state_key, pdu) in self + .load_state_pdus(shortstatehash, &StateEventType::SpaceChild) + .await + { + if let Ok(content) = pdu.get_content::() { + if content.via.is_empty() { + continue; + } + } else { + continue; + } + if let Ok(child_room_id) = RoomId::parse(&*state_key) { + child_rooms.push(child_room_id.to_owned()); + } + } + + { + let mut room_to_space = self.room_to_space.write().await; + room_to_space.retain(|_, parents| { + parents.remove(space_id); + !parents.is_empty() + }); + for child_room_id in &child_rooms { + room_to_space + .entry(child_room_id.clone()) + .or_default() + .insert(space_id.to_owned()); + } + } + + { + let mut space_to_rooms = self.space_to_rooms.write().await; + space_to_rooms.insert(space_id.to_owned(), child_rooms.into_iter().collect()) + }; +} + +#[implement(Service)] +pub async fn populate_space(&self, space_id: &RoomId) { + if !self.is_enabled_for_space(space_id).await { + return; + } + + if self.roles.read().await.len() + >= usize::try_from(self.server.config.space_roles_cache_flush_threshold) + .unwrap_or(usize::MAX) + { + self.flush_caches().await; + debug_warn!("Space roles cache exceeded capacity, cleared"); + } + + let roles_event_type = roles_event_type(); + if let Ok(content) = self + .services + .state_accessor + .room_state_get_content::(space_id, &roles_event_type, "") + .await + { + self.roles + .write() + .await + .insert(space_id.to_owned(), content.roles); + } + + let shortstatehash = match self.services.state.get_room_shortstatehash(space_id).await { + | Ok(hash) => hash, + | Err(e) => { + debug_warn!(space_id = %space_id, error = ?e, "Failed to get shortstatehash, cache may be stale"); + return; + }, + }; + + self.load_user_roles(space_id, shortstatehash).await; + self.load_room_requirements(space_id, shortstatehash).await; + self.load_child_rooms_index(space_id, shortstatehash).await; +} + +#[must_use] +pub fn compute_user_power_level( + role_defs: &BTreeMap, + assigned: &HashSet, +) -> Option { + assigned + .iter() + .filter_map(|role_name| role_defs.get(role_name)?.power_level) + .max() +} + +#[must_use] +pub fn roles_satisfy_requirements( + required: &HashSet, + assigned: &HashSet, +) -> bool { + required.iter().all(|r| assigned.contains(r)) +} + +#[implement(Service)] +pub async fn get_user_power_level(&self, space_id: &RoomId, user_id: &UserId) -> Option { + let role_defs = { self.roles.read().await.get(space_id).cloned()? }; + let user_assigned = { + self.user_roles + .read() + .await + .get(space_id)? + .get(user_id) + .cloned()? + }; + compute_user_power_level(&role_defs, &user_assigned) +} + +#[implement(Service)] +pub async fn user_qualifies_for_room( + &self, + space_id: &RoomId, + room_id: &RoomId, + user_id: &UserId, +) -> bool { + let required = { + let guard = self.room_requirements.read().await; + let Some(space_reqs) = guard.get(space_id) else { + return true; + }; + let Some(required) = space_reqs.get(room_id) else { + return true; + }; + if required.is_empty() { + return true; + } + required.clone() + }; + let user_assigned = { + let guard = self.user_roles.read().await; + let Some(space_users) = guard.get(space_id) else { + return false; + }; + let Some(assigned) = space_users.get(user_id) else { + return false; + }; + assigned.clone() + }; + roles_satisfy_requirements(&required, &user_assigned) +} + +#[implement(Service)] +pub async fn get_parent_spaces(&self, room_id: &RoomId) -> Vec { + let all_parents: Vec = self + .room_to_space + .read() + .await + .get(room_id) + .map(|set| set.iter().cloned().collect()) + .unwrap_or_default(); + + let mut enabled_parents = Vec::new(); + for parent in all_parents { + if self.is_enabled_for_space(&parent).await { + enabled_parents.push(parent); + } + } + enabled_parents +} + +#[implement(Service)] +pub async fn get_child_rooms(&self, space_id: &RoomId) -> Vec { + self.space_to_rooms + .read() + .await + .get(space_id) + .map(|set| set.iter().cloned().collect()) + .unwrap_or_default() +} + +#[implement(Service)] +pub async fn get_user_roles_in_space( + &self, + space_id: &RoomId, + user_id: &UserId, +) -> Option> { + self.user_roles + .read() + .await + .get(space_id)? + .get(user_id) + .cloned() +} + +#[implement(Service)] +pub async fn get_room_requirements_in_space( + &self, + space_id: &RoomId, + room_id: &RoomId, +) -> Option> { + self.room_requirements + .read() + .await + .get(space_id)? + .get(room_id) + .cloned() +} + +#[implement(Service)] +pub async fn sync_power_levels(&self, room_id: &RoomId) -> Result { + let server_user = self.services.globals.server_user.as_ref(); + if !self + .services + .state_cache + .is_joined(server_user, room_id) + .await + { + debug_warn!(room_id = %room_id, "Server user is not joined, skipping PL sync"); + return Ok(()); + } + + let mut power_levels_content: RoomPowerLevelsEventContent = self + .services + .state_accessor + .room_state_get_content(room_id, &StateEventType::RoomPowerLevels, "") + .await + .unwrap_or_default(); + + let members: Vec = self + .services + .state_cache + .room_members(room_id) + .map(ToOwned::to_owned) + .collect() + .await; + + let all_parents = self.get_parent_spaces(room_id).await; + + let mut changed = false; + for user_id in &members { + if user_id == server_user { + continue; + } + + if let Some(effective_pl) = self.compute_effective_pl(&all_parents, user_id).await { + let effective_pl_int = Int::new_saturating(effective_pl); + let current_pl = power_levels_content + .users + .get(user_id) + .copied() + .unwrap_or(power_levels_content.users_default); + + if current_pl != effective_pl_int { + power_levels_content + .users + .insert(user_id.clone(), effective_pl_int); + changed = true; + } + } + } + + if changed { + let state_lock = self.services.state.mutex.lock(room_id).await; + + self.services + .timeline + .build_and_append_pdu( + PduBuilder::state(String::new(), &power_levels_content), + server_user, + Some(room_id), + &state_lock, + ) + .await?; + } + + Ok(()) +} + +/// Computes the maximum effective power level for a user across all given +/// parent spaces. Returns `None` if the user has no power level defined in +/// any parent. +#[implement(Service)] +async fn compute_effective_pl( + &self, + parent_spaces: &[OwnedRoomId], + user_id: &UserId, +) -> Option { + let mut max_pl: Option = None; + for parent in parent_spaces { + if let Some(pl) = self.get_user_power_level(parent, user_id).await { + max_pl = Some(max_pl.map_or(pl, |current| current.max(pl))); + } + } + max_pl +} + +#[implement(Service)] +pub async fn auto_join_qualifying_rooms(&self, space_id: &RoomId, user_id: &UserId) -> Result { + if !self.is_enabled_for_space(space_id).await { + return Ok(()); + } + + let server_user = self.services.globals.server_user.as_ref(); + if user_id == server_user { + return Ok(()); + } + + let child_rooms = self.get_child_rooms(space_id).await; + + for child_room_id in &child_rooms { + if self + .services + .state_cache + .is_joined(user_id, child_room_id) + .await + { + continue; + } + + if !self + .user_qualifies_for_room(space_id, child_room_id, user_id) + .await + { + continue; + } + + if !self + .services + .state_cache + .is_joined(server_user, child_room_id) + .await + { + debug_warn!(room_id = %child_room_id, "Server user is not joined, skipping auto-join"); + continue; + } + + if let Err(e) = self + .invite_and_join_user(child_room_id, user_id, server_user) + .await + { + debug_warn!(user_id = %user_id, room_id = %child_room_id, error = ?e, "Failed to auto-join user"); + } + } + + Ok(()) +} + +#[implement(Service)] +async fn invite_and_join_user( + &self, + room_id: &RoomId, + user_id: &UserId, + server_user: &UserId, +) -> Result { + let state_lock = self.services.state.mutex.lock(room_id).await; + + self.services + .timeline + .build_and_append_pdu( + PduBuilder::state( + user_id.to_string(), + &RoomMemberEventContent::new(MembershipState::Invite), + ), + server_user, + Some(room_id), + &state_lock, + ) + .await?; + + self.services + .timeline + .build_and_append_pdu( + PduBuilder::state( + user_id.to_string(), + &RoomMemberEventContent::new(MembershipState::Join), + ), + user_id, + Some(room_id), + &state_lock, + ) + .await?; + + Ok(()) +} + +/// Called from append_pdu after a PDU is persisted. Dispatches to the +/// appropriate handler based on event type. +#[implement(Service)] +pub fn on_pdu_appended(self: &Arc, room_id: &RoomId, pdu: &PduEvent) { + if let Some(state_key) = pdu.state_key() { + let event_type_str = pdu.event_type().to_string(); + match event_type_str.as_str() { + | SPACE_ROLES_EVENT_TYPE + | SPACE_ROLE_MEMBER_EVENT_TYPE + | SPACE_ROLE_ROOM_EVENT_TYPE + | SPACE_CASCADING_EVENT_TYPE => { + self.handle_state_event_change( + room_id.to_owned(), + event_type_str, + state_key.to_owned(), + ); + }, + | _ => { + if *pdu.kind() == ruma::events::TimelineEventType::SpaceChild { + if let Ok(child_room_id) = RoomId::parse(&*state_key) { + self.handle_space_child_change( + room_id.to_owned(), + child_room_id.to_owned(), + ); + } + } + if *pdu.kind() == ruma::events::TimelineEventType::RoomMember { + if let Ok(content) = pdu.get_content::() { + if let Ok(user_id) = UserId::parse(&*state_key) { + match content.membership { + | MembershipState::Join => { + self.handle_space_member_join( + room_id.to_owned(), + user_id.to_owned(), + ); + }, + | MembershipState::Leave | MembershipState::Ban => { + self.handle_space_member_leave( + room_id.to_owned(), + user_id.to_owned(), + ); + }, + | _ => {}, + } + } + } + } + }, + } + } +} + +/// Called from build_and_append_pdu to validate PL changes don't conflict +/// with space-granted power levels. +#[implement(Service)] +pub async fn validate_pl_change( + &self, + room_id: &RoomId, + sender: &UserId, + proposed: &RoomPowerLevelsEventContent, +) -> Result { + if sender == self.services.globals.server_user.as_str() { + return Ok(()); + } + + let parent_spaces = self.get_parent_spaces(room_id).await; + if parent_spaces.is_empty() { + return Ok(()); + } + + let mut effective_pls: HashMap = HashMap::new(); + { + let roles_guard = self.roles.read().await; + let user_roles_guard = self.user_roles.read().await; + for ps in &parent_spaces { + let Some(space_users) = user_roles_guard.get(ps) else { + continue; + }; + let Some(role_defs) = roles_guard.get(ps) else { + continue; + }; + for (user_id, assigned_roles) in space_users { + let pl = assigned_roles + .iter() + .filter_map(|r| role_defs.get(r)?.power_level) + .max(); + if let Some(pl) = pl { + effective_pls + .entry(user_id.clone()) + .and_modify(|current| *current = (*current).max(pl)) + .or_insert(pl); + } + } + } + } + + for (user_id, effective_pl) in &effective_pls { + if !self.services.state_cache.is_joined(user_id, room_id).await { + continue; + } + match proposed.users.get(user_id) { + | None if i64::from(proposed.users_default) != *effective_pl => { + debug_warn!( + user_id = %user_id, + room_id = %room_id, + effective_pl, + "Rejecting PL change: space-managed user omitted" + ); + return Err!(Request(Forbidden( + "Cannot omit a user whose power level is managed by Space roles" + ))); + }, + | Some(pl) if i64::from(*pl) != *effective_pl => { + debug_warn!( + user_id = %user_id, + room_id = %room_id, + proposed_pl = i64::from(*pl), + effective_pl, + "Rejecting PL change conflicting with space role" + ); + return Err!(Request(Forbidden( + "Cannot change power level that is set by Space roles" + ))); + }, + | _ => {}, + } + } + + Ok(()) +} + +/// Called from join_room_by_id_helper to check if a user has the required +/// Space roles to join a room. +#[implement(Service)] +pub async fn check_join_allowed(&self, room_id: &RoomId, user_id: &UserId) -> Result { + let parent_spaces = self.get_parent_spaces(room_id).await; + if parent_spaces.is_empty() { + return Ok(()); + } + + for parent_space in &parent_spaces { + if self + .user_qualifies_for_room(parent_space, room_id, user_id) + .await + { + return Ok(()); + } + } + + Err!(Request(Forbidden("You do not have the required Space roles to join this room"))) +} + +#[implement(Service)] +async fn sync_power_levels_for_children(&self, space_id: &RoomId) { + let child_rooms = self.get_child_rooms(space_id).await; + for child_room_id in &child_rooms { + if let Err(e) = self.sync_power_levels(child_room_id).await { + debug_warn!(room_id = %child_room_id, error = ?e, "Failed to sync power levels"); + } + } +} + +/// Enforces a change to the space role definitions: syncs power levels to +/// all child rooms and kicks members who no longer qualify. +#[implement(Service)] +async fn enforce_roles_change(&self, space_id: &RoomId) { + self.sync_power_levels_for_children(space_id).await; + let space_members: Vec = self + .services + .state_cache + .room_members(space_id) + .map(ToOwned::to_owned) + .collect() + .await; + for member in &space_members { + if let Err(e) = Box::pin(self.kick_unqualified_from_rooms(space_id, member)).await { + debug_warn!(user_id = %member, error = ?e, "Role definition revalidation kick failed"); + } + } +} + +/// Enforces a change to a user's role membership: auto-joins qualifying +/// rooms, kicks from rooms they no longer qualify for, and syncs power +/// levels. +#[implement(Service)] +async fn enforce_member_change(&self, space_id: &RoomId, user_id: &UserId) { + if let Err(e) = self.auto_join_qualifying_rooms(space_id, user_id).await { + debug_warn!(user_id = %user_id, error = ?e, "Space role auto-join failed"); + } + if let Err(e) = Box::pin(self.kick_unqualified_from_rooms(space_id, user_id)).await { + debug_warn!(user_id = %user_id, error = ?e, "Space role auto-kick failed"); + } + self.sync_power_levels_for_children(space_id).await; +} + +/// Enforces a change to a room's role requirements: kicks all members of +/// the target room who no longer meet the updated requirements. +#[implement(Service)] +async fn enforce_room_change(&self, space_id: &RoomId, target_room: &RoomId) { + let members: Vec = self + .services + .state_cache + .room_members(target_room) + .map(ToOwned::to_owned) + .collect() + .await; + for member in &members { + if let Err(e) = Box::pin(self.kick_unqualified_from_rooms(space_id, member)).await { + debug_warn!(user_id = %member, error = ?e, "Space role requirement kick failed"); + } + } +} + +/// Enforces a toggle of the cascading feature: if cascading was disabled, +/// flushes the space from the cache. +#[implement(Service)] +async fn enforce_cascading_toggle(&self, space_id: &RoomId) { + if !self.is_enabled_for_space(space_id).await { + self.flush_space_from_cache(space_id).await; + } +} + +impl Service { + pub fn handle_state_event_change( + self: &Arc, + space_id: OwnedRoomId, + event_type: String, + state_key: String, + ) { + let this = Arc::clone(self); + self.server.runtime().spawn(async move { + if !this.server.running() { + return; + } + if event_type != SPACE_CASCADING_EVENT_TYPE + && !this.is_enabled_for_space(&space_id).await + { + return; + } + { + let mut pending = this.pending_enforcement.write().await; + if pending.contains(&space_id) { + return; + } + pending.insert(space_id.clone()) + }; + + async { + let Ok(_permit) = this.enforcement_semaphore.acquire().await else { + return; + }; + + this.populate_space(&space_id).await; + + match event_type.as_str() { + | SPACE_ROLES_EVENT_TYPE => { + this.enforce_roles_change(&space_id).await; + }, + | SPACE_ROLE_MEMBER_EVENT_TYPE => { + if let Ok(user_id) = UserId::parse(state_key.as_str()) { + this.enforce_member_change(&space_id, user_id).await; + } + }, + | SPACE_ROLE_ROOM_EVENT_TYPE => { + if let Ok(target_room) = RoomId::parse(state_key.as_str()) { + this.enforce_room_change(&space_id, target_room).await; + } + }, + | SPACE_CASCADING_EVENT_TYPE => { + this.enforce_cascading_toggle(&space_id).await; + }, + | _ => {}, + } + } + .await; + + this.pending_enforcement.write().await.remove(&space_id); + }); + } + + pub fn handle_space_child_change( + self: &Arc, + space_id: OwnedRoomId, + child_room_id: OwnedRoomId, + ) { + let this = Arc::clone(self); + self.server.runtime().spawn(async move { + if !this.server.running() { + return; + } + if !this.is_enabled_for_space(&space_id).await { + return; + } + let Ok(_permit) = this.enforcement_semaphore.acquire().await else { + return; + }; + + let child_event_type = StateEventType::SpaceChild; + let is_removal = match this + .services + .state_accessor + .room_state_get_content::( + &space_id, + &child_event_type, + child_room_id.as_str(), + ) + .await + { + | Ok(content) => content.via.is_empty(), + | Err(_) => true, // If we can't read it, treat as removal + }; + + if is_removal { + this.handle_child_removed(&space_id, &child_room_id).await; + } else { + this.handle_child_added(&space_id, &child_room_id).await; + } + }); + } + + pub fn handle_space_member_join( + self: &Arc, + space_id: OwnedRoomId, + user_id: OwnedUserId, + ) { + if user_id == self.services.globals.server_user { + return; + } + + let this = Arc::clone(self); + self.server.runtime().spawn(async move { + if !this.server.running() { + return; + } + if !this.is_enabled_for_space(&space_id).await { + return; + } + + let Ok(_permit) = this.enforcement_semaphore.acquire().await else { + return; + }; + + if let Err(e) = this.auto_join_qualifying_rooms(&space_id, &user_id).await { + debug_warn!(user_id = %user_id, error = ?e, "Auto-join on Space join failed"); + } + this.sync_power_levels_for_children(&space_id).await; + }); + } + + pub fn handle_space_member_leave( + self: &Arc, + space_id: OwnedRoomId, + user_id: OwnedUserId, + ) { + if user_id == self.services.globals.server_user { + return; + } + + let this = Arc::clone(self); + self.server.runtime().spawn(async move { + if !this.server.running() { + return; + } + if !this.is_enabled_for_space(&space_id).await { + return; + } + + let Ok(_permit) = this.enforcement_semaphore.acquire().await else { + return; + }; + + if let Err(e) = Box::pin(this.kick_unqualified_from_rooms(&space_id, &user_id)).await + { + debug_warn!(user_id = %user_id, error = ?e, "Kick on Space leave failed"); + } + }); + } +} + +#[implement(Service)] +async fn handle_child_removed(&self, space_id: &RoomId, child_room_id: &RoomId) { + let mut room_to_space = self.room_to_space.write().await; + if let Some(parents) = room_to_space.get_mut(child_room_id) { + parents.remove(space_id); + if parents.is_empty() { + room_to_space.remove(child_room_id); + } + } + let mut space_to_rooms = self.space_to_rooms.write().await; + if let Some(children) = space_to_rooms.get_mut(space_id) { + children.remove(child_room_id); + } +} + +#[implement(Service)] +async fn handle_child_added(&self, space_id: &RoomId, child_room_id: &RoomId) { + self.room_to_space + .write() + .await + .entry(child_room_id.to_owned()) + .or_default() + .insert(space_id.to_owned()); + + self.space_to_rooms + .write() + .await + .entry(space_id.to_owned()) + .or_default() + .insert(child_room_id.to_owned()); + + let server_user = self.services.globals.server_user.as_ref(); + if !self + .services + .state_cache + .is_joined(server_user, child_room_id) + .await + { + debug_warn!(room_id = %child_room_id, "Server user is not joined, skipping auto-join enforcement for new child"); + return; + } + + let space_members: Vec = self + .services + .state_cache + .room_members(space_id) + .map(ToOwned::to_owned) + .collect() + .await; + + for member in &space_members { + if self + .user_qualifies_for_room(space_id, child_room_id, member) + .await && !self + .services + .state_cache + .is_joined(member, child_room_id) + .await + { + if let Err(e) = self + .invite_and_join_user(child_room_id, member, server_user) + .await + { + debug_warn!(user_id = %member, room_id = %child_room_id, error = ?e, "Failed to auto-join user"); + } + } + } +} + +#[implement(Service)] +pub async fn kick_unqualified_from_rooms(&self, space_id: &RoomId, user_id: &UserId) -> Result { + if !self.is_enabled_for_space(space_id).await { + return Ok(()); + } + + let server_user = self.services.globals.server_user.as_ref(); + if user_id == server_user { + return Ok(()); + } + + let child_rooms: Vec = self + .room_requirements + .read() + .await + .get(space_id) + .map(|reqs| reqs.keys().cloned().collect()) + .unwrap_or_default(); + + for child_room_id in &child_rooms { + if !self + .services + .state_cache + .is_joined(server_user, child_room_id) + .await + { + debug_warn!(room_id = %child_room_id, "Server user is not joined, skipping kick enforcement"); + continue; + } + if !self + .services + .state_cache + .is_joined(user_id, child_room_id) + .await + { + continue; + } + + if self + .user_qualifies_in_any_parent(child_room_id, user_id) + .await + { + continue; + } + + self.kick_user_from_room(child_room_id, user_id, server_user) + .await?; + } + + Ok(()) +} + +/// Checks whether the user qualifies for the given room through any of its +/// parent spaces. +#[implement(Service)] +async fn user_qualifies_in_any_parent(&self, room_id: &RoomId, user_id: &UserId) -> bool { + let all_parents = self.get_parent_spaces(room_id).await; + for parent in &all_parents { + if self.user_qualifies_for_room(parent, room_id, user_id).await { + return true; + } + } + false +} + +/// Sends a kick (membership=leave) PDU to remove a user from a room for +/// missing required Space roles. +#[implement(Service)] +async fn kick_user_from_room( + &self, + room_id: &RoomId, + user_id: &UserId, + server_user: &UserId, +) -> Result { + let Ok(member_content) = self + .services + .state_accessor + .get_member(room_id, user_id) + .await + else { + debug_warn!(user_id = %user_id, room_id = %room_id, "Could not get member event, skipping kick"); + return Ok(()); + }; + + let state_lock = self.services.state.mutex.lock(room_id).await; + + if let Err(e) = self + .services + .timeline + .build_and_append_pdu( + PduBuilder::state(user_id.to_string(), &RoomMemberEventContent { + membership: MembershipState::Leave, + reason: Some("No longer has required Space roles".into()), + is_direct: None, + join_authorized_via_users_server: None, + third_party_invite: None, + ..member_content + }), + server_user, + Some(room_id), + &state_lock, + ) + .await + { + warn!(user_id = %user_id, room_id = %room_id, error = ?e, "Failed to kick user for missing roles"); + } + + Ok(()) +} diff --git a/src/service/rooms/roles/tests.rs b/src/service/rooms/roles/tests.rs new file mode 100644 index 00000000..fedcfd79 --- /dev/null +++ b/src/service/rooms/roles/tests.rs @@ -0,0 +1,204 @@ +use std::collections::{BTreeMap, HashSet}; + +use conduwuit_core::matrix::space_roles::RoleDefinition; + +use super::{compute_user_power_level, roles_satisfy_requirements}; + +pub(super) fn make_roles(entries: &[(&str, Option)]) -> BTreeMap { + entries + .iter() + .map(|(name, pl)| { + ((*name).to_owned(), RoleDefinition { + description: format!("{name} role"), + power_level: *pl, + }) + }) + .collect() +} + +pub(super) fn make_set(items: &[&str]) -> HashSet { + items.iter().map(|s| (*s).to_owned()).collect() +} + +#[test] +fn power_level_single_role() { + let roles = make_roles(&[("admin", Some(100)), ("mod", Some(50))]); + assert_eq!(compute_user_power_level(&roles, &make_set(&["admin"])), Some(100)); +} + +#[test] +fn power_level_multiple_roles_takes_highest() { + let roles = make_roles(&[("admin", Some(100)), ("mod", Some(50)), ("helper", Some(25))]); + assert_eq!(compute_user_power_level(&roles, &make_set(&["mod", "helper"])), Some(50)); +} + +#[test] +fn power_level_no_power_roles() { + let roles = make_roles(&[("nsfw", None), ("vip", None)]); + assert_eq!(compute_user_power_level(&roles, &make_set(&["nsfw", "vip"])), None); +} + +#[test] +fn power_level_mixed_roles() { + let roles = make_roles(&[("mod", Some(50)), ("nsfw", None)]); + assert_eq!(compute_user_power_level(&roles, &make_set(&["mod", "nsfw"])), Some(50)); +} + +#[test] +fn power_level_no_roles_assigned() { + let roles = make_roles(&[("admin", Some(100))]); + assert_eq!(compute_user_power_level(&roles, &HashSet::new()), None); +} + +#[test] +fn power_level_unknown_role_ignored() { + let roles = make_roles(&[("admin", Some(100))]); + assert_eq!(compute_user_power_level(&roles, &make_set(&["nonexistent"])), None); +} + +#[test] +fn qualifies_with_all_required_roles() { + assert!(roles_satisfy_requirements( + &make_set(&["nsfw", "vip"]), + &make_set(&["nsfw", "vip", "extra"]), + )); +} + +#[test] +fn does_not_qualify_missing_one_role() { + assert!(!roles_satisfy_requirements(&make_set(&["nsfw", "vip"]), &make_set(&["nsfw"]),)); +} + +#[test] +fn qualifies_with_no_requirements() { + assert!(roles_satisfy_requirements(&HashSet::new(), &make_set(&["nsfw"]))); +} + +#[test] +fn does_not_qualify_with_no_roles() { + assert!(!roles_satisfy_requirements(&make_set(&["nsfw"]), &HashSet::new())); +} + +// Multi-space scenarios + +#[test] +fn multi_space_highest_pl_wins() { + let space_a_roles = make_roles(&[("mod", Some(50))]); + let space_b_roles = make_roles(&[("admin", Some(100))]); + + let user_roles_a = make_set(&["mod"]); + let user_roles_b = make_set(&["admin"]); + + let pl_a = compute_user_power_level(&space_a_roles, &user_roles_a); + let pl_b = compute_user_power_level(&space_b_roles, &user_roles_b); + + let effective = [pl_a, pl_b].into_iter().flatten().max(); + assert_eq!(effective, Some(100)); +} + +#[test] +fn multi_space_one_space_has_no_pl() { + let space_a_roles = make_roles(&[("nsfw", None)]); + let space_b_roles = make_roles(&[("mod", Some(50))]); + + let user_roles_a = make_set(&["nsfw"]); + let user_roles_b = make_set(&["mod"]); + + let pl_a = compute_user_power_level(&space_a_roles, &user_roles_a); + let pl_b = compute_user_power_level(&space_b_roles, &user_roles_b); + + let effective = [pl_a, pl_b].into_iter().flatten().max(); + assert_eq!(effective, Some(50)); +} + +#[test] +fn multi_space_neither_has_pl() { + let space_a_roles = make_roles(&[("nsfw", None)]); + let space_b_roles = make_roles(&[("vip", None)]); + + let user_roles_a = make_set(&["nsfw"]); + let user_roles_b = make_set(&["vip"]); + + let pl_a = compute_user_power_level(&space_a_roles, &user_roles_a); + let pl_b = compute_user_power_level(&space_b_roles, &user_roles_b); + + let effective = [pl_a, pl_b].into_iter().flatten().max(); + assert_eq!(effective, None); +} + +#[test] +fn multi_space_user_only_in_one_space() { + let space_a_roles = make_roles(&[("admin", Some(100))]); + let space_b_roles = make_roles(&[("mod", Some(50))]); + + let user_roles_a = make_set(&["admin"]); + let user_roles_b: HashSet = HashSet::new(); + + let pl_a = compute_user_power_level(&space_a_roles, &user_roles_a); + let pl_b = compute_user_power_level(&space_b_roles, &user_roles_b); + + let effective = [pl_a, pl_b].into_iter().flatten().max(); + assert_eq!(effective, Some(100)); +} + +#[test] +fn multi_space_qualifies_in_one_not_other() { + let space_a_reqs = make_set(&["staff"]); + let space_b_reqs = make_set(&["nsfw"]); + + let user_roles = make_set(&["nsfw"]); + + assert!(!roles_satisfy_requirements(&space_a_reqs, &user_roles)); + assert!(roles_satisfy_requirements(&space_b_reqs, &user_roles)); +} + +#[test] +fn multi_space_qualifies_after_role_revoke_via_other_space() { + let space_a_reqs = make_set(&["nsfw"]); + let space_b_reqs = make_set(&["vip"]); + + let user_roles_after_revoke = make_set(&["vip"]); + + assert!(!roles_satisfy_requirements(&space_a_reqs, &user_roles_after_revoke)); + assert!(roles_satisfy_requirements(&space_b_reqs, &user_roles_after_revoke)); +} + +#[test] +fn multi_space_room_has_reqs_in_one_space_only() { + let space_a_reqs = make_set(&["admin"]); + let space_b_reqs: HashSet = HashSet::new(); + + let user_roles = make_set(&["nsfw"]); + + assert!(!roles_satisfy_requirements(&space_a_reqs, &user_roles)); + assert!(roles_satisfy_requirements(&space_b_reqs, &user_roles)); +} + +#[test] +fn multi_space_no_qualification_anywhere() { + let space_a_reqs = make_set(&["staff"]); + let space_b_reqs = make_set(&["admin"]); + + let user_roles = make_set(&["nsfw"]); + + let qualifies_a = roles_satisfy_requirements(&space_a_reqs, &user_roles); + let qualifies_b = roles_satisfy_requirements(&space_b_reqs, &user_roles); + + assert!(!qualifies_a); + assert!(!qualifies_b); + assert!(!(qualifies_a || qualifies_b)); +} + +#[test] +fn multi_space_same_role_different_pl() { + let space_a_roles = make_roles(&[("mod", Some(50))]); + let space_b_roles = make_roles(&[("mod", Some(75))]); + + let user_roles = make_set(&["mod"]); + + let pl_a = compute_user_power_level(&space_a_roles, &user_roles); + let pl_b = compute_user_power_level(&space_b_roles, &user_roles); + + let effective = [pl_a, pl_b].into_iter().flatten().max(); + assert_eq!(effective, Some(75)); +} diff --git a/src/service/rooms/timeline/append.rs b/src/service/rooms/timeline/append.rs index 40139a98..6a8cc257 100644 --- a/src/service/rooms/timeline/append.rs +++ b/src/service/rooms/timeline/append.rs @@ -327,7 +327,7 @@ where } }, | TimelineEventType::SpaceChild => - if let Some(_state_key) = pdu.state_key() { + if pdu.state_key().is_some() { self.services .spaces .roomid_spacehierarchy_cache @@ -359,6 +359,8 @@ where | _ => {}, } + self.services.roles.on_pdu_appended(room_id, &pdu); + // CONCERN: If we receive events with a relation out-of-order, we never write // their relation / thread. We need some kind of way to trigger when we receive // this event, and potentially a way to rebuild the table entirely. diff --git a/src/service/rooms/timeline/build.rs b/src/service/rooms/timeline/build.rs index 51162de9..6df89cdf 100644 --- a/src/service/rooms/timeline/build.rs +++ b/src/service/rooms/timeline/build.rs @@ -97,6 +97,17 @@ pub async fn build_and_append_pdu( ))); } } + if *pdu.kind() == TimelineEventType::RoomPowerLevels { + if let Ok(proposed) = + pdu.get_content::() + { + self.services + .roles + .validate_pl_change(&room_id, pdu.sender(), &proposed) + .await?; + } + } + if *pdu.kind() == TimelineEventType::RoomCreate { trace!("Creating shortroomid for {room_id}"); self.services diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index a35b502c..e97f703e 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -80,6 +80,7 @@ struct Services { threads: Dep, search: Dep, spaces: Dep, + roles: Dep, event_handler: Dep, } @@ -112,6 +113,7 @@ impl crate::Service for Service { threads: args.depend::("rooms::threads"), search: args.depend::("rooms::search"), spaces: args.depend::("rooms::spaces"), + roles: args.depend::("rooms::roles"), event_handler: args .depend::("rooms::event_handler"), }, diff --git a/src/service/services.rs b/src/service/services.rs index 60a7eeab..6356c6ea 100644 --- a/src/service/services.rs +++ b/src/service/services.rs @@ -94,6 +94,7 @@ impl Services { outlier: build!(rooms::outlier::Service), pdu_metadata: build!(rooms::pdu_metadata::Service), read_receipt: build!(rooms::read_receipt::Service), + roles: build!(rooms::roles::Service), search: build!(rooms::search::Service), short: build!(rooms::short::Service), spaces: build!(rooms::spaces::Service),