Compare commits
108 commits
nex/feat/p
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 20b1529dc4 | |||
| bba5318ce8 | |||
| 1f91a74b27 | |||
| 5f901a560b | |||
| 59401e1786 | |||
| 95fa3b022a | |||
|
|
6b013bcf60 | ||
|
|
05a49ceb60 | ||
|
|
728c5828ba | ||
|
|
50c94d85a1 | ||
|
|
0cc188f62c | ||
|
|
6451671f66 | ||
|
|
ca21a885d5 | ||
|
|
4af4110f6d | ||
|
|
51b450c05c | ||
|
|
f9d1f71343 | ||
|
|
7901e4b996 | ||
|
|
7b6bf4b78e | ||
|
|
67d5619ccb | ||
|
|
bf001f96d6 | ||
|
|
ae2b87f03f | ||
|
|
957cd3502f | ||
|
|
a109542eb8 | ||
|
|
8c4844b00b | ||
|
|
eec7103910 | ||
|
|
43aa172829 | ||
|
|
9b4c483b6d | ||
|
|
b885e206ce | ||
|
|
07a935f625 | ||
|
|
d13801e976 | ||
|
|
5716c36b47 | ||
|
|
f11943b956 | ||
|
|
8b726a9c94 | ||
|
|
ffa3c53847 | ||
|
|
da8833fca4 | ||
|
|
267feb3c09 | ||
|
|
3d50af0943 | ||
|
|
9515019641 | ||
|
|
f0f53dfada | ||
| 3bfd10efab | |||
| 835d434d92 | |||
|
|
acef746d26 | ||
|
|
3356b60e97 | ||
|
|
c988c2b387 | ||
|
|
3121229707 | ||
|
|
ff85145ee8 | ||
|
|
f61d1a11e0 | ||
|
|
11ba8979ff | ||
|
|
f6956ccf12 | ||
|
|
977a5ac8c1 | ||
|
|
906c3df953 | ||
|
|
33e5fdc16f | ||
|
|
77ac17855a | ||
|
|
65ffcd2884 | ||
|
|
7ec88bdbfe | ||
|
|
da3fac8cb4 | ||
|
|
3366113939 | ||
|
|
9039784f41 | ||
|
|
7f165e5bbe | ||
|
|
c97111e3ca | ||
|
|
e8746760fa | ||
|
|
9dbd75e740 | ||
|
|
85b2fd91b9 | ||
|
|
6420c218a9 | ||
|
|
ec9402a328 | ||
|
|
d01f06a5c2 | ||
|
|
aee51b3b0d | ||
|
|
afcbccd9dd | ||
|
|
02448000f9 | ||
|
|
6af8918aa8 | ||
|
|
08f83cc438 | ||
|
|
a0468db121 | ||
|
|
4f23d566ed | ||
|
|
dac619b5f8 | ||
|
|
fdc9cc8074 | ||
|
|
40b1dabcca | ||
|
|
94c5af40cf | ||
|
|
36a3144757 | ||
|
|
220b61c589 | ||
|
|
38e93cde3e | ||
|
|
7e501cdb09 | ||
|
|
da182c162d | ||
|
|
9a3f7f4af7 | ||
|
|
5ce1f682f6 | ||
|
|
5feb08dff2 | ||
|
|
1e527c1075 | ||
|
|
c6943ae683 | ||
|
|
8932dacdc4 | ||
|
|
0be3d850ac | ||
|
|
57e7cf7057 | ||
|
|
1005585ccb | ||
|
|
1188566dbd | ||
|
|
0058212757 | ||
|
|
dbf8fd3320 | ||
|
|
ce295b079e | ||
|
|
5eb74bc1dd | ||
|
|
da561ab792 | ||
|
|
80c9bb4796 | ||
|
|
22a47d1e59 | ||
|
|
83883a002c | ||
|
|
8dd4b71e0e | ||
|
|
6fe3b1563c | ||
|
|
44d3825c8e | ||
|
|
d6c5484c3a | ||
|
|
1fd6056f3f | ||
|
|
525a0ae52b | ||
|
|
60210754d9 | ||
|
|
08dd787083 |
126 changed files with 7574 additions and 1338 deletions
|
|
@ -44,7 +44,7 @@ runs:
|
|||
|
||||
- name: Login to builtin registry
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@v4
|
||||
with:
|
||||
registry: ${{ env.BUILTIN_REGISTRY }}
|
||||
username: ${{ inputs.registry_user }}
|
||||
|
|
@ -52,7 +52,7 @@ runs:
|
|||
|
||||
- name: Set up Docker Buildx
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@v4
|
||||
with:
|
||||
# Use persistent BuildKit if BUILDKIT_ENDPOINT is set (e.g. tcp://buildkit:8125)
|
||||
driver: ${{ env.BUILDKIT_ENDPOINT != '' && 'remote' || 'docker-container' }}
|
||||
|
|
@ -61,7 +61,7 @@ runs:
|
|||
- name: Extract metadata (tags) for Docker
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
uses: docker/metadata-action@v6
|
||||
with:
|
||||
flavor: |
|
||||
latest=auto
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@ runs:
|
|||
uses: ./.forgejo/actions/rust-toolchain
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@v4
|
||||
with:
|
||||
# Use persistent BuildKit if BUILDKIT_ENDPOINT is set (e.g. tcp://buildkit:8125)
|
||||
driver: ${{ env.BUILDKIT_ENDPOINT != '' && 'remote' || 'docker-container' }}
|
||||
|
|
@ -79,7 +79,7 @@ runs:
|
|||
|
||||
- name: Login to builtin registry
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@v4
|
||||
with:
|
||||
registry: ${{ env.BUILTIN_REGISTRY }}
|
||||
username: ${{ inputs.registry_user }}
|
||||
|
|
@ -87,7 +87,7 @@ runs:
|
|||
|
||||
- name: Extract metadata (labels, annotations) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
uses: docker/metadata-action@v6
|
||||
with:
|
||||
images: ${{ inputs.images }}
|
||||
# default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509
|
||||
|
|
@ -152,7 +152,7 @@ runs:
|
|||
|
||||
- name: inject cache into docker
|
||||
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
||||
uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.3.0
|
||||
uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.3.2
|
||||
with:
|
||||
cache-map: |
|
||||
{
|
||||
|
|
|
|||
|
|
@ -62,10 +62,6 @@ sync:
|
|||
target: registry.gitlab.com/continuwuity/continuwuity
|
||||
type: repository
|
||||
<<: *tags-main
|
||||
- source: *source
|
||||
target: git.nexy7574.co.uk/mirrored/continuwuity
|
||||
type: repository
|
||||
<<: *tags-releases
|
||||
- source: *source
|
||||
target: ghcr.io/continuwuity/continuwuity
|
||||
type: repository
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ jobs:
|
|||
registry_password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
|
||||
- name: Build and push Docker image by digest
|
||||
id: build
|
||||
uses: docker/build-push-action@v6
|
||||
uses: docker/build-push-action@v7
|
||||
with:
|
||||
context: .
|
||||
file: "docker/Dockerfile"
|
||||
|
|
@ -146,7 +146,7 @@ jobs:
|
|||
registry_password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
|
||||
- name: Build and push max-perf Docker image by digest
|
||||
id: build
|
||||
uses: docker/build-push-action@v6
|
||||
uses: docker/build-push-action@v7
|
||||
with:
|
||||
context: .
|
||||
file: "docker/Dockerfile"
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ jobs:
|
|||
name: Renovate
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: ghcr.io/renovatebot/renovate:42.70.2@sha256:3c2ac1b94fa92ef2fa4d1a0493f2c3ba564454720a32fdbcac2db2846ff1ee47
|
||||
image: ghcr.io/renovatebot/renovate:43.59.4@sha256:f951508dea1e7d71cbe6deca298ab0a05488e7631229304813f630cc06010892
|
||||
options: --tmpfs /tmp:exec
|
||||
steps:
|
||||
- name: Checkout
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ jobs:
|
|||
persist-credentials: true
|
||||
token: ${{ secrets.FORGEJO_TOKEN }}
|
||||
|
||||
- uses: https://github.com/cachix/install-nix-action@4e002c8ec80594ecd40e759629461e26c8abed15 # v31.9.0
|
||||
- uses: https://github.com/cachix/install-nix-action@19effe9fe722874e6d46dd7182e4b8b7a43c4a99 # v31.10.0
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
|
||||
|
|
|
|||
4
.github/FUNDING.yml
vendored
4
.github/FUNDING.yml
vendored
|
|
@ -1,4 +1,4 @@
|
|||
github: [JadedBlueEyes, nexy7574, gingershaped]
|
||||
custom:
|
||||
- https://ko-fi.com/nexy7574
|
||||
- https://ko-fi.com/JadedBlueEyes
|
||||
- https://timedout.uk/donate.html
|
||||
- https://jade.ellis.link/sponsors
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
default_install_hook_types:
|
||||
- pre-commit
|
||||
- pre-push
|
||||
- commit-msg
|
||||
default_stages:
|
||||
- pre-commit
|
||||
|
|
@ -23,7 +24,7 @@ repos:
|
|||
- id: check-added-large-files
|
||||
|
||||
- repo: https://github.com/crate-ci/typos
|
||||
rev: v1.43.5
|
||||
rev: v1.44.0
|
||||
hooks:
|
||||
- id: typos
|
||||
- id: typos
|
||||
|
|
@ -31,7 +32,7 @@ repos:
|
|||
stages: [commit-msg]
|
||||
|
||||
- repo: https://github.com/crate-ci/committed
|
||||
rev: v1.1.10
|
||||
rev: v1.1.11
|
||||
hooks:
|
||||
- id: committed
|
||||
|
||||
|
|
@ -45,3 +46,14 @@ repos:
|
|||
pass_filenames: false
|
||||
stages:
|
||||
- pre-commit
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: cargo-clippy
|
||||
name: cargo clippy
|
||||
entry: cargo clippy -- -D warnings
|
||||
language: system
|
||||
pass_filenames: false
|
||||
types: [rust]
|
||||
stages:
|
||||
- pre-push
|
||||
|
|
|
|||
|
|
@ -22,22 +22,18 @@ Continuwuity uses pre-commit hooks to enforce various coding standards and catch
|
|||
- Validating YAML, JSON, and TOML files
|
||||
- Checking for merge conflicts
|
||||
|
||||
You can run these checks locally by installing [prefligit](https://github.com/j178/prefligit):
|
||||
You can run these checks locally by installing [prek](https://github.com/j178/prek):
|
||||
|
||||
|
||||
```bash
|
||||
# Requires UV: https://docs.astral.sh/uv/getting-started/installation/
|
||||
# Mac/linux: curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
# Windows: powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex"
|
||||
|
||||
# Install prefligit using cargo-binstall
|
||||
cargo binstall prefligit
|
||||
# Install prek using cargo-binstall
|
||||
cargo binstall prek
|
||||
|
||||
# Install git hooks to run checks automatically
|
||||
prefligit install
|
||||
prek install
|
||||
|
||||
# Run all checks
|
||||
prefligit --all-files
|
||||
prek --all-files
|
||||
```
|
||||
|
||||
Alternatively, you can use [pre-commit](https://pre-commit.com/):
|
||||
|
|
@ -54,7 +50,7 @@ pre-commit install
|
|||
pre-commit run --all-files
|
||||
```
|
||||
|
||||
These same checks are run in CI via the prefligit-checks workflow to ensure consistency. These must pass before the PR is merged.
|
||||
These same checks are run in CI via the prek-checks workflow to ensure consistency. These must pass before the PR is merged.
|
||||
|
||||
### Running tests locally
|
||||
|
||||
|
|
|
|||
839
Cargo.lock
generated
839
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
11
Cargo.toml
11
Cargo.toml
|
|
@ -12,7 +12,7 @@ license = "Apache-2.0"
|
|||
# See also `rust-toolchain.toml`
|
||||
readme = "README.md"
|
||||
repository = "https://forgejo.ellis.link/continuwuation/continuwuity"
|
||||
version = "0.5.6"
|
||||
version = "0.5.7-alpha.1"
|
||||
|
||||
[workspace.metadata.crane]
|
||||
name = "conduwuit"
|
||||
|
|
@ -99,7 +99,7 @@ features = [
|
|||
[workspace.dependencies.axum-extra]
|
||||
version = "0.12.0"
|
||||
default-features = false
|
||||
features = ["typed-header", "tracing"]
|
||||
features = ["typed-header", "tracing", "cookie"]
|
||||
|
||||
[workspace.dependencies.axum-server]
|
||||
version = "0.7.2"
|
||||
|
|
@ -159,7 +159,7 @@ features = ["raw_value"]
|
|||
|
||||
# Used for appservice registration files
|
||||
[workspace.dependencies.serde-saphyr]
|
||||
version = "0.0.19"
|
||||
version = "0.0.21"
|
||||
|
||||
# Used to load forbidden room/user regex from config
|
||||
[workspace.dependencies.serde_regex]
|
||||
|
|
@ -344,7 +344,7 @@ version = "0.1.2"
|
|||
[workspace.dependencies.ruma]
|
||||
git = "https://forgejo.ellis.link/continuwuation/ruwuma"
|
||||
#branch = "conduwuit-changes"
|
||||
rev = "6c65b295d2c109ab31165c2db016097f3e74d02e"
|
||||
rev = "bb12ed288a31a23aa11b10ba0fad22b7f985eb88"
|
||||
features = [
|
||||
"compat",
|
||||
"rand",
|
||||
|
|
@ -969,3 +969,6 @@ needless_raw_string_hashes = "allow"
|
|||
|
||||
# TODO: Enable this lint & fix all instances
|
||||
collapsible_if = "allow"
|
||||
|
||||
# TODO: break these apart
|
||||
cognitive_complexity = "allow"
|
||||
|
|
|
|||
|
|
@ -6,10 +6,10 @@ set -euo pipefail
|
|||
COMPLEMENT_SRC="${COMPLEMENT_SRC:-$1}"
|
||||
|
||||
# A `.jsonl` file to write test logs to
|
||||
LOG_FILE="${2:-complement_test_logs.jsonl}"
|
||||
LOG_FILE="${2:-tests/test_results/complement/test_logs.jsonl}"
|
||||
|
||||
# A `.jsonl` file to write test results to
|
||||
RESULTS_FILE="${3:-complement_test_results.jsonl}"
|
||||
RESULTS_FILE="${3:-tests/test_results/complement/test_results.jsonl}"
|
||||
|
||||
# The base docker image to use for complement tests
|
||||
# You can build the default with `docker build -t continuwuity:complement -f ./docker/complement.Dockerfile .`
|
||||
|
|
|
|||
1
changelog.d/+6368729a.feature.md
Normal file
1
changelog.d/+6368729a.feature.md
Normal file
|
|
@ -0,0 +1 @@
|
|||
Added support for using an admin command to issue self-service password reset links.
|
||||
1
changelog.d/+6e57599d.bugfix.md
Normal file
1
changelog.d/+6e57599d.bugfix.md
Normal file
|
|
@ -0,0 +1 @@
|
|||
Stopped left rooms from being unconditionally sent on initial sync, hopefully fixing spurious appearances of left rooms in some clients (and making sync faster as a bonus). Contributed by @ginger
|
||||
1
changelog.d/+space-permission-cascading.feature.md
Normal file
1
changelog.d/+space-permission-cascading.feature.md
Normal file
|
|
@ -0,0 +1 @@
|
|||
Add Space permission cascading: power levels cascade from Spaces to child rooms, role-based room access with custom roles, continuous enforcement (auto-join/kick), and admin commands for role management. Server-wide default controlled by `space_permission_cascading` config flag (off by default), with per-Space overrides via `!admin space roles enable/disable <space>`.
|
||||
1
changelog.d/1265.bugfix
Normal file
1
changelog.d/1265.bugfix
Normal file
|
|
@ -0,0 +1 @@
|
|||
Fixed corrupted appservice registrations causing the server to enter a crash loop. Contributed by @nex.
|
||||
1
changelog.d/1371.feature.md
Normal file
1
changelog.d/1371.feature.md
Normal file
|
|
@ -0,0 +1 @@
|
|||
Re-added support for reading registration tokens from a file. Contributed by @ginger and @benbot.
|
||||
1
changelog.d/1448.bugfix
Normal file
1
changelog.d/1448.bugfix
Normal file
|
|
@ -0,0 +1 @@
|
|||
Prevent removing the admin room alias (`#admins`) to avoid accidentally breaking admin room functionality. Contributed by @0xnim
|
||||
|
|
@ -1 +0,0 @@
|
|||
Updated [MSC4284: Policy Servers](https://github.com/matrix-org/matrix-spec-proposals/pull/4284) implementation to support the newly stabilised proposal. Contributed by @nex.
|
||||
1
changelog.d/1527.feature.md
Normal file
1
changelog.d/1527.feature.md
Normal file
|
|
@ -0,0 +1 @@
|
|||
Add new config option to allow or disallow search engine indexing through a `<meta ../>` tag. Defaults to blocking indexing (`content="noindex"`). Contributed by @s1lv3r and @ginger.
|
||||
|
|
@ -11,7 +11,7 @@ allow_guest_registration = true
|
|||
allow_public_room_directory_over_federation = true
|
||||
allow_registration = true
|
||||
database_path = "/database"
|
||||
log = "trace,h2=debug,hyper=debug"
|
||||
log = "trace,h2=debug,hyper=debug,conduwuit_database=warn,conduwuit_service::manager=info,conduwuit_api::router=error,conduwuit_router=error,tower_http=error"
|
||||
port = [8008, 8448]
|
||||
trusted_servers = []
|
||||
only_query_trusted_key_servers = false
|
||||
|
|
@ -24,7 +24,7 @@ url_preview_domain_explicit_denylist = ["*"]
|
|||
media_compat_file_link = false
|
||||
media_startup_check = true
|
||||
prune_missing_media = true
|
||||
log_colors = true
|
||||
log_colors = false
|
||||
admin_room_notices = false
|
||||
allow_check_for_updates = false
|
||||
intentionally_unknown_config_option_for_testing = true
|
||||
|
|
@ -47,6 +47,7 @@ federation_idle_timeout = 300
|
|||
sender_timeout = 300
|
||||
sender_idle_timeout = 300
|
||||
sender_retry_backoff_limit = 300
|
||||
force_disable_first_run_mode = true
|
||||
|
||||
[global.tls]
|
||||
dual_protocol = true
|
||||
|
|
|
|||
|
|
@ -25,6 +25,10 @@
|
|||
#
|
||||
# Also see the `[global.well_known]` config section at the very bottom.
|
||||
#
|
||||
# If `client` is not set under `[global.well_known]`, the server name will
|
||||
# be used as the base domain for user-facing links (such as password
|
||||
# reset links) created by Continuwuity.
|
||||
#
|
||||
# Examples of delegation:
|
||||
# - https://continuwuity.org/.well-known/matrix/server
|
||||
# - https://continuwuity.org/.well-known/matrix/client
|
||||
|
|
@ -470,24 +474,43 @@
|
|||
#
|
||||
#suspend_on_register = false
|
||||
|
||||
# Server-wide default for space permission cascading (power levels and
|
||||
# role-based access). Individual Spaces can override this via the
|
||||
# `com.continuwuity.space.cascading` state event or the admin command
|
||||
# `!admin space roles enable/disable <space>`.
|
||||
#
|
||||
#space_permission_cascading = false
|
||||
|
||||
# Maximum number of spaces to cache role data for. When exceeded the
|
||||
# cache is cleared and repopulated on demand.
|
||||
#
|
||||
#space_roles_cache_flush_threshold = 1000
|
||||
|
||||
# Enabling this setting opens registration to anyone without restrictions.
|
||||
# This makes your server vulnerable to abuse
|
||||
#
|
||||
#yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse = false
|
||||
|
||||
# A static registration token that new users will have to provide when
|
||||
# creating an account. If unset and `allow_registration` is true,
|
||||
# you must set
|
||||
# `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse`
|
||||
# to true to allow open registration without any conditions.
|
||||
#
|
||||
# If you do not want to set a static token, the `!admin token` commands
|
||||
# may also be used to manage registration tokens.
|
||||
# creating an account. This token does not supersede tokens from other
|
||||
# sources, such as the `!admin token` command or the
|
||||
# `registration_token_file` configuration option.
|
||||
#
|
||||
# example: "o&^uCtes4HPf0Vu@F20jQeeWE7"
|
||||
#
|
||||
#registration_token =
|
||||
|
||||
# A path to a file containing static registration tokens, one per line.
|
||||
# Tokens in this file do not supersede tokens from other sources, such as
|
||||
# the `!admin token` command or the `registration_token` configuration
|
||||
# option.
|
||||
#
|
||||
# The file will be read once, when Continuwuity starts. It is not
|
||||
# currently reread when the server configuration is reloaded. If the file
|
||||
# cannot be read, Continuwuity will fail to start.
|
||||
#
|
||||
#registration_token_file =
|
||||
|
||||
# The public site key for reCaptcha. If this is provided, reCaptcha
|
||||
# becomes required during registration. If both captcha *and*
|
||||
# registration token are enabled, both will be required during
|
||||
|
|
@ -1498,6 +1521,11 @@
|
|||
#
|
||||
#url_preview_user_agent = "continuwuity/<version> (bot; +https://continuwuity.org)"
|
||||
|
||||
# Determines whether audio and video files will be downloaded for URL
|
||||
# previews.
|
||||
#
|
||||
#url_preview_allow_audio_video = false
|
||||
|
||||
# List of forbidden room aliases and room IDs as strings of regex
|
||||
# patterns.
|
||||
#
|
||||
|
|
@ -1783,6 +1811,11 @@
|
|||
#
|
||||
#config_reload_signal = true
|
||||
|
||||
# Allow search engines and crawlers to index Continuwuity's built-in
|
||||
# webpages served under the `/_continuwuity/` prefix.
|
||||
#
|
||||
#allow_web_indexing = false
|
||||
|
||||
[global.tls]
|
||||
|
||||
# Path to a valid TLS certificate file.
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ EOF
|
|||
|
||||
# Developer tool versions
|
||||
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
|
||||
ENV BINSTALL_VERSION=1.17.5
|
||||
ENV BINSTALL_VERSION=1.17.7
|
||||
# renovate: datasource=github-releases depName=psastras/sbom-rs
|
||||
ENV CARGO_SBOM_VERSION=0.9.1
|
||||
# renovate: datasource=crate depName=lddtree
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ RUN --mount=type=cache,target=/etc/apk/cache apk add \
|
|||
|
||||
# Developer tool versions
|
||||
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
|
||||
ENV BINSTALL_VERSION=1.17.5
|
||||
ENV BINSTALL_VERSION=1.17.7
|
||||
# renovate: datasource=github-releases depName=psastras/sbom-rs
|
||||
ENV CARGO_SBOM_VERSION=0.9.1
|
||||
# renovate: datasource=crate depName=lddtree
|
||||
|
|
|
|||
|
|
@ -34,6 +34,11 @@
|
|||
"name": "troubleshooting",
|
||||
"label": "Troubleshooting"
|
||||
},
|
||||
{
|
||||
"type": "dir",
|
||||
"name": "advanced",
|
||||
"label": "Advanced"
|
||||
},
|
||||
"security",
|
||||
{
|
||||
"type": "dir-section-header",
|
||||
|
|
@ -64,6 +69,11 @@
|
|||
"label": "Configuration Reference",
|
||||
"name": "/reference/config"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"label": "Environment Variables",
|
||||
"name": "/reference/environment-variables"
|
||||
},
|
||||
{
|
||||
"type": "dir",
|
||||
"label": "Admin Command Reference",
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
{
|
||||
"text": "Guide",
|
||||
"link": "/introduction",
|
||||
"activeMatch": "^/(introduction|configuration|deploying|calls|appservices|maintenance|troubleshooting)"
|
||||
"activeMatch": "^/(introduction|configuration|deploying|calls|appservices|maintenance|troubleshooting|advanced)"
|
||||
},
|
||||
{
|
||||
"text": "Development",
|
||||
|
|
|
|||
7
docs/advanced/_meta.json
Normal file
7
docs/advanced/_meta.json
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
[
|
||||
{
|
||||
"type": "file",
|
||||
"name": "delegation",
|
||||
"label": "Delegation / split-domain"
|
||||
}
|
||||
]
|
||||
206
docs/advanced/delegation.mdx
Normal file
206
docs/advanced/delegation.mdx
Normal file
|
|
@ -0,0 +1,206 @@
|
|||
# Delegation/split-domain deployment
|
||||
|
||||
Matrix allows clients and servers to discover a homeserver's "true" destination via **`.well-known` delegation**. This is especially useful if you would like to:
|
||||
|
||||
- Serve Continuwuity on a subdomain while having only the base domain for your usernames
|
||||
- Use a port other than `:8448` for server-to-server connections
|
||||
|
||||
This guide will show you how to have `@user:example.com` usernames while serving Continuwuity on `https://matrix.example.com`. It assumes you are using port 443 for both client-to-server connections and server-to-server federation.
|
||||
|
||||
## Configuration
|
||||
|
||||
First, ensure you have set up A/AAAA records for `matrix.example.com` and `example.com` pointing to your IP.
|
||||
|
||||
Then, ensure that the `server_name` field matches your intended username suffix. If this is not the case, you **MUST** wipe the database directory and reinstall Continuwuity with your desired `server_name`.
|
||||
|
||||
Then, in the `[global.well_known]` section of your config file, add the following fields:
|
||||
|
||||
```toml
|
||||
[global.well_known]
|
||||
|
||||
client = "https://matrix.example.com"
|
||||
|
||||
# port number MUST be specified
|
||||
server = "matrix.example.com:443"
|
||||
|
||||
# (optional) customize your support contacts
|
||||
#support_page =
|
||||
#support_role = "m.role.admin"
|
||||
#support_email =
|
||||
#support_mxid = "@user:example.com"
|
||||
```
|
||||
|
||||
Alternatively if you are using Docker, you can set the `CONTINUWUITY_WELL_KNOWN` environment variable as below:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
continuwuity:
|
||||
...
|
||||
environment:
|
||||
CONTINUWUITY_WELL_KNOWN: |
|
||||
{
|
||||
client=https://matrix.example.com,
|
||||
server=matrix.example.com:443
|
||||
}
|
||||
```
|
||||
|
||||
## Serving with a reverse proxy
|
||||
|
||||
After doing the steps above, Continuwuity will serve these 3 JSON files:
|
||||
|
||||
- `/.well-known/matrix/client`: for Client-Server discovery
|
||||
- `/.well-known/matrix/server`: for Server-Server (federation) discovery
|
||||
- `/.well-known/matrix/support`: admin contact details (strongly recommended to have)
|
||||
|
||||
To enable full discovery, you will need to reverse proxy these paths from the base domain back to Continuwuity.
|
||||
|
||||
<details>
|
||||
|
||||
<summary>For Caddy</summary>
|
||||
|
||||
```
|
||||
matrix.example.com:443 {
|
||||
reverse_proxy 127.0.0.1:8008
|
||||
}
|
||||
|
||||
example.com:443 {
|
||||
reverse_proxy /.well-known/matrix* 127.0.0.1:8008
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
|
||||
<summary>For Traefik (via Docker labels)</summary>
|
||||
|
||||
```
|
||||
services:
|
||||
continuwuity:
|
||||
...
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.continuwuity.rule=(Host(`matrix.example.com`) || (Host(`example.com`) && PathPrefix(`/.well-known/matrix`)))"
|
||||
- "traefik.http.routers.continuwuity.service=continuwuity"
|
||||
- "traefik.http.services.continuwuity.loadbalancer.server.port=8008"
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
Restart Continuwuity and your reverse proxy. Once that's done, visit these routes and check that the responses match the examples below:
|
||||
|
||||
<details open>
|
||||
|
||||
<summary>`https://example.com/.well-known/matrix/server`</summary>
|
||||
|
||||
```json
|
||||
{
|
||||
"m.server": "matrix.example.com:443"
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details open>
|
||||
|
||||
<summary>`https://example.com/.well-known/matrix/client`</summary>
|
||||
|
||||
```json
|
||||
{
|
||||
"m.homeserver": {
|
||||
"base_url": "https://matrix.example.com/"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Cannot log in with web clients
|
||||
|
||||
Make sure there is an `Access-Control-Allow-Origin: *` header in your `/.well-known/matrix/client` path. While Continuwuity serves this header by default, it may be dropped by reverse proxies or other middlewares.
|
||||
|
||||
---
|
||||
|
||||
## Using SRV records (not recommended)
|
||||
|
||||
:::warning
|
||||
The following methods are **not recommended** due to increased complexity with little benefits. If you have already set up `.well-known` delegation as above, you can safely skip this part.
|
||||
:::
|
||||
|
||||
The following methods uses SRV DNS records and only work with federation traffic. They are only included for completeness.
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Using only SRV records</summary>
|
||||
|
||||
If you can't set up `/.well-known/matrix/server` on :443 for some reason, you can set up a SRV record (via your DNS provider) as below:
|
||||
|
||||
- Service and name: `_matrix-fed._tcp.example.com.`
|
||||
- Priority: `10` (can be any number)
|
||||
- Weight: `10` (can be any number)
|
||||
- Port: `443`
|
||||
- Target: `matrix.example.com.`
|
||||
|
||||
On the target's IP at port 443, you must configure a valid route and cert for your server name, `example.com`. Therefore, this method only works to redirect traffic into the right IP/port combo, and can not delegate your federation to a different domain.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Using SRV records + .well-known</summary>
|
||||
|
||||
You can also set up `/.well-known/matrix/server` with a delegated domain but no ports:
|
||||
|
||||
```toml
|
||||
[global.well_known]
|
||||
server = "matrix.example.com"
|
||||
```
|
||||
|
||||
Then, set up a SRV record (via your DNS provider) to announce the port number as below:
|
||||
|
||||
- Service and name: `_matrix-fed._tcp.matrix.example.com.`
|
||||
- Priority: `10` (can be any number)
|
||||
- Weight: `10` (can be any number)
|
||||
- Port: `443`
|
||||
- Target: `matrix.example.com.`
|
||||
|
||||
On the target's IP at port 443, you'll need to provide a valid route and cert for `matrix.example.com`. It provides the same feature as pure `.well-known` delegation, albeit with more parts to handle.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Using SRV records as a fallback for .well-known delegation</summary>
|
||||
|
||||
Assume your delegation is as below:
|
||||
|
||||
```toml
|
||||
[global.well_known]
|
||||
server = "example.com:443"
|
||||
```
|
||||
|
||||
If your Continuwuity instance becomes temporarily unreachable, other servers will not be able to find your `/.well-known/matrix/server` file, and defaults to using `server_name:8448`. This incorrect cache can persist for a long time, and would hinder re-federation when your server eventually comes back online.
|
||||
|
||||
If you want other servers to default to using port :443 even when it is offline, you could set up a SRV record (via your DNS provider) as follows:
|
||||
|
||||
- Service and name: `_matrix-fed._tcp.example.com.`
|
||||
- Priority: `10` (can be any number)
|
||||
- Weight: `10` (can be any number)
|
||||
- Port: `443`
|
||||
- Target: `example.com.`
|
||||
|
||||
On the target's IP at port 443, you'll need to provide a valid route and cert for `example.com`.
|
||||
|
||||
</details>
|
||||
|
||||
---
|
||||
|
||||
## Related Documentation
|
||||
|
||||
See the following Matrix Specs for full details on client/server resolution mechanisms:
|
||||
|
||||
- [Server-to-Server resolution](https://spec.matrix.org/v1.17/server-server-api/#resolving-server-names) (see this for more information on SRV records)
|
||||
- [Client-to-Server resolution](https://spec.matrix.org/v1.17/client-server-api/#server-discovery)
|
||||
- [MSC1929: Homeserver Admin Contact and Support page](https://github.com/matrix-org/matrix-spec-proposals/pull/1929)
|
||||
|
|
@ -13,8 +13,9 @@ settings.
|
|||
|
||||
The config file to use can be specified on the commandline when running
|
||||
Continuwuity by specifying the `-c`, `--config` flag. Alternatively, you can use
|
||||
the environment variable `CONDUWUIT_CONFIG` to specify the config file to used.
|
||||
Conduit's environment variables are supported for backwards compatibility.
|
||||
the environment variable `CONTINUWUITY_CONFIG` to specify the config file to be
|
||||
used; see [the section on environment variables](#environment-variables) for
|
||||
more information.
|
||||
|
||||
## Option commandline flag
|
||||
|
||||
|
|
@ -52,13 +53,15 @@ This commandline argument can be paired with the `--option` flag.
|
|||
|
||||
All of the settings that are found in the config file can be specified by using
|
||||
environment variables. The environment variable names should be all caps and
|
||||
prefixed with `CONDUWUIT_`.
|
||||
prefixed with `CONTINUWUITY_`.
|
||||
|
||||
For example, if the setting you are changing is `max_request_size`, then the
|
||||
environment variable to set is `CONDUWUIT_MAX_REQUEST_SIZE`.
|
||||
environment variable to set is `CONTINUWUITY_MAX_REQUEST_SIZE`.
|
||||
|
||||
To modify config options not in the `[global]` context such as
|
||||
`[global.well_known]`, use the `__` suffix split: `CONDUWUIT_WELL_KNOWN__SERVER`
|
||||
`[global.well_known]`, use the `__` suffix split:
|
||||
`CONTINUWUITY_WELL_KNOWN__SERVER`
|
||||
|
||||
Conduit's environment variables are supported for backwards compatibility (e.g.
|
||||
Conduit and conduwuit's environment variables are also supported for backwards
|
||||
compatibility, via the `CONDUIT_` and `CONDUWUIT_` prefixes respectively (e.g.
|
||||
`CONDUIT_SERVER_NAME`).
|
||||
|
|
|
|||
|
|
@ -6,9 +6,9 @@ services:
|
|||
### then you are ready to go.
|
||||
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||
restart: unless-stopped
|
||||
command: /sbin/conduwuit
|
||||
volumes:
|
||||
- db:/var/lib/continuwuity
|
||||
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.
|
||||
#- ./continuwuity.toml:/etc/continuwuity.toml
|
||||
networks:
|
||||
- proxy
|
||||
|
|
|
|||
|
|
@ -16,14 +16,14 @@ services:
|
|||
restart: unless-stopped
|
||||
labels:
|
||||
caddy: example.com
|
||||
caddy.0_respond: /.well-known/matrix/server {"m.server":"matrix.example.com:443"}
|
||||
caddy.1_respond: /.well-known/matrix/client {"m.server":{"base_url":"https://matrix.example.com"},"m.homeserver":{"base_url":"https://matrix.example.com"},"org.matrix.msc3575.proxy":{"url":"https://matrix.example.com"}}
|
||||
caddy.reverse_proxy: /.well-known/matrix/* homeserver:6167
|
||||
|
||||
homeserver:
|
||||
### If you already built the Continuwuity image with 'docker build' or want to use a registry image,
|
||||
### then you are ready to go.
|
||||
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||
restart: unless-stopped
|
||||
command: /sbin/conduwuit
|
||||
volumes:
|
||||
- db:/var/lib/continuwuity
|
||||
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.
|
||||
|
|
@ -42,6 +42,10 @@ services:
|
|||
#CONTINUWUITY_LOG: warn,state_res=warn
|
||||
CONTINUWUITY_ADDRESS: 0.0.0.0
|
||||
#CONTINUWUITY_CONFIG: '/etc/continuwuity.toml' # Uncomment if you mapped config toml above
|
||||
|
||||
# Required for .well-known delegation - edit these according to your chosen domain
|
||||
CONTINUWUITY_WELL_KNOWN__CLIENT: https://matrix.example.com
|
||||
CONTINUWUITY_WELL_KNOWN__SERVER: matrix.example.com:443
|
||||
networks:
|
||||
- caddy
|
||||
labels:
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ services:
|
|||
### then you are ready to go.
|
||||
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||
restart: unless-stopped
|
||||
command: /sbin/conduwuit
|
||||
volumes:
|
||||
- db:/var/lib/continuwuity
|
||||
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ services:
|
|||
### then you are ready to go.
|
||||
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||
restart: unless-stopped
|
||||
command: /sbin/conduwuit
|
||||
ports:
|
||||
- 8448:6167
|
||||
volumes:
|
||||
|
|
|
|||
|
|
@ -2,28 +2,26 @@
|
|||
|
||||
## Docker
|
||||
|
||||
To run Continuwuity with Docker, you can either build the image yourself or pull it
|
||||
from a registry.
|
||||
To run Continuwuity with Docker, you can either build the image yourself or pull
|
||||
it from a registry.
|
||||
|
||||
### Use a registry
|
||||
|
||||
OCI images for Continuwuity are available in the registries listed below.
|
||||
Available OCI images:
|
||||
|
||||
| Registry | Image | Notes |
|
||||
| --------------- | --------------------------------------------------------------- | -----------------------|
|
||||
| Forgejo Registry| [forgejo.ellis.link/continuwuation/continuwuity:latest](https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity/latest) | Latest tagged image. |
|
||||
| Forgejo Registry| [forgejo.ellis.link/continuwuation/continuwuity:main](https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity/main) | Main branch image. |
|
||||
| Forgejo Registry| [forgejo.ellis.link/continuwuation/continuwuity:latest-maxperf](https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity/latest-maxperf) | [Performance optimised version.](./generic.mdx#performance-optimised-builds) |
|
||||
| Forgejo Registry| [forgejo.ellis.link/continuwuation/continuwuity:main-maxperf](https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity/main-maxperf) | [Performance optimised version.](./generic.mdx#performance-optimised-builds) |
|
||||
| Registry | Image | Notes |
|
||||
| ---------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------- |
|
||||
| Forgejo Registry | [forgejo.ellis.link/continuwuation/continuwuity:latest](https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity/latest) | Latest tagged image. |
|
||||
| Forgejo Registry | [forgejo.ellis.link/continuwuation/continuwuity:main](https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity/main) | Main branch image. |
|
||||
| Forgejo Registry | [forgejo.ellis.link/continuwuation/continuwuity:latest-maxperf](https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity/latest-maxperf) | [Performance optimised version.](./generic.mdx#performance-optimised-builds) |
|
||||
| Forgejo Registry | [forgejo.ellis.link/continuwuation/continuwuity:main-maxperf](https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity/main-maxperf) | [Performance optimised version.](./generic.mdx#performance-optimised-builds) |
|
||||
|
||||
Use
|
||||
**Example:**
|
||||
|
||||
```bash
|
||||
docker image pull $LINK
|
||||
docker image pull forgejo.ellis.link/continuwuation/continuwuity:main-maxperf
|
||||
```
|
||||
|
||||
to pull it to your machine.
|
||||
|
||||
#### Mirrors
|
||||
|
||||
Images are mirrored to multiple locations automatically, on a schedule:
|
||||
|
|
@ -33,39 +31,146 @@ Images are mirrored to multiple locations automatically, on a schedule:
|
|||
- `registry.gitlab.com/continuwuity/continuwuity`
|
||||
- `git.nexy7574.co.uk/mirrored/continuwuity` (releases only, no `main`)
|
||||
|
||||
### Run
|
||||
### Quick Run
|
||||
|
||||
When you have the image, you can simply run it with
|
||||
Get a working Continuwuity server with an admin user in four steps:
|
||||
|
||||
#### Prerequisites
|
||||
|
||||
Continuwuity requires HTTPS for Matrix federation. You'll need:
|
||||
|
||||
- A domain name pointing to your server
|
||||
- A reverse proxy with SSL/TLS certificates (Traefik, Caddy, nginx, etc.)
|
||||
|
||||
See [Docker Compose](#docker-compose) for complete examples.
|
||||
|
||||
#### Environment Variables
|
||||
|
||||
- `CONTINUWUITY_SERVER_NAME` - Your Matrix server's domain name
|
||||
- `CONTINUWUITY_DATABASE_PATH` - Where to store your database (must match the
|
||||
volume mount)
|
||||
- `CONTINUWUITY_ADDRESS` - Bind address (use `0.0.0.0` to listen on all
|
||||
interfaces)
|
||||
- `CONTINUWUITY_ALLOW_REGISTRATION` - Set to `false` to disable registration, or
|
||||
use with `CONTINUWUITY_REGISTRATION_TOKEN` to require a token (see
|
||||
[reference](../reference/environment-variables.mdx#registration--user-configuration)
|
||||
for details)
|
||||
|
||||
See the
|
||||
[Environment Variables Reference](../reference/environment-variables.mdx) for
|
||||
more configuration options.
|
||||
|
||||
#### 1. Pull the image
|
||||
|
||||
```bash
|
||||
docker run -d -p 8448:6167 \
|
||||
-v db:/var/lib/continuwuity/ \
|
||||
-e CONTINUWUITY_SERVER_NAME="your.server.name" \
|
||||
-e CONTINUWUITY_ALLOW_REGISTRATION=false \
|
||||
--name continuwuity $LINK
|
||||
docker pull forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||
```
|
||||
|
||||
or you can use [Docker Compose](#docker-compose).
|
||||
#### 2. Start the server with initial admin user
|
||||
|
||||
The `-d` flag lets the container run in detached mode. You may supply an
|
||||
optional `continuwuity.toml` config file, the example config can be found
|
||||
[here](../reference/config.mdx). You can pass in different env vars to
|
||||
change config values on the fly. You can even configure Continuwuity completely by
|
||||
using env vars. For an overview of possible values, please take a look at the
|
||||
<a href="/examples/docker-compose.yml" target="_blank">`docker-compose.yml`</a> file.
|
||||
```bash
|
||||
docker run -d \
|
||||
-p 6167:6167 \
|
||||
-v continuwuity_db:/var/lib/continuwuity \
|
||||
-e CONTINUWUITY_SERVER_NAME="matrix.example.com" \
|
||||
-e CONTINUWUITY_DATABASE_PATH="/var/lib/continuwuity" \
|
||||
-e CONTINUWUITY_ADDRESS="0.0.0.0" \
|
||||
-e CONTINUWUITY_ALLOW_REGISTRATION="false" \
|
||||
--name continuwuity \
|
||||
forgejo.ellis.link/continuwuation/continuwuity:latest \
|
||||
/sbin/conduwuit --execute "users create-user admin"
|
||||
```
|
||||
|
||||
If you just want to test Continuwuity for a short time, you can use the `--rm`
|
||||
flag, which cleans up everything related to your container after you stop
|
||||
it.
|
||||
Replace `matrix.example.com` with your actual server name and `admin` with
|
||||
your preferred username.
|
||||
|
||||
#### 3. Get your admin password
|
||||
|
||||
```bash
|
||||
docker logs continuwuity 2>&1 | grep "Created user"
|
||||
```
|
||||
|
||||
You'll see output like:
|
||||
|
||||
```
|
||||
Created user with user_id: @admin:matrix.example.com and password: `[auto-generated-password]`
|
||||
```
|
||||
|
||||
#### 4. Configure your reverse proxy
|
||||
|
||||
Configure your reverse proxy to forward HTTPS traffic to Continuwuity. See
|
||||
[Docker Compose](#docker-compose) for examples.
|
||||
|
||||
Once configured, log in with any Matrix client using `@admin:matrix.example.com`
|
||||
and the generated password. You'll automatically be invited to the admin room
|
||||
where you can manage your server.
|
||||
|
||||
### Docker Compose
|
||||
|
||||
If the `docker run` command is not suitable for you or your setup, you can also use one
|
||||
of the provided `docker-compose` files.
|
||||
Docker Compose is the recommended deployment method. These examples include
|
||||
reverse proxy configurations for Matrix federation.
|
||||
|
||||
Depending on your proxy setup, you can use one of the following files:
|
||||
#### Matrix Federation Requirements
|
||||
|
||||
### For existing Traefik setup
|
||||
For Matrix federation to work, you need to serve `.well-known/matrix/client` and
|
||||
`.well-known/matrix/server` endpoints. You can achieve this either by:
|
||||
|
||||
1. **Using a well-known service** - The compose files below include an nginx
|
||||
container to serve these files
|
||||
2. **Using Continuwuity's built-in delegation** (easier for Traefik) - Configure
|
||||
delegation files in your config, then proxy `/.well-known/matrix/*` to
|
||||
Continuwuity
|
||||
|
||||
**Traefik example using built-in delegation:**
|
||||
|
||||
```yaml
|
||||
labels:
|
||||
traefik.http.routers.continuwuity.rule: >-
|
||||
(Host(`matrix.example.com`) ||
|
||||
(Host(`example.com`) && PathPrefix(`/.well-known/matrix`)))
|
||||
```
|
||||
|
||||
This routes your Matrix domain and well-known paths to Continuwuity.
|
||||
|
||||
#### Creating Your First Admin User
|
||||
|
||||
Add the `--execute` command to create an admin user on first startup. In your
|
||||
compose file, add under the `continuwuity` service:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
continuwuity:
|
||||
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||
command: /sbin/conduwuit --execute "users create-user admin"
|
||||
# ... rest of configuration
|
||||
```
|
||||
|
||||
Then retrieve the auto-generated password:
|
||||
|
||||
```bash
|
||||
docker compose logs continuwuity | grep "Created user"
|
||||
```
|
||||
|
||||
#### Choose Your Reverse Proxy
|
||||
|
||||
Select the compose file that matches your setup:
|
||||
|
||||
:::note DNS Performance
|
||||
Docker's default DNS resolver can cause performance issues with Matrix
|
||||
federation. If you experience slow federation or DNS timeouts, you may need to
|
||||
use your host's DNS resolver instead. Add this volume mount to the
|
||||
`continuwuity` service:
|
||||
|
||||
```yaml
|
||||
volumes:
|
||||
- /etc/resolv.conf:/etc/resolv.conf:ro
|
||||
```
|
||||
|
||||
See [Troubleshooting - DNS Issues](../troubleshooting.mdx#potential-dns-issues-when-using-docker)
|
||||
for more details and alternative solutions.
|
||||
:::
|
||||
|
||||
##### For existing Traefik setup
|
||||
|
||||
<details>
|
||||
<summary>docker-compose.for-traefik.yml</summary>
|
||||
|
|
@ -76,7 +181,7 @@ Depending on your proxy setup, you can use one of the following files:
|
|||
|
||||
</details>
|
||||
|
||||
### With Traefik included
|
||||
##### With Traefik included
|
||||
|
||||
<details>
|
||||
<summary>docker-compose.with-traefik.yml</summary>
|
||||
|
|
@ -87,7 +192,7 @@ Depending on your proxy setup, you can use one of the following files:
|
|||
|
||||
</details>
|
||||
|
||||
### With Caddy Docker Proxy
|
||||
##### With Caddy Docker Proxy
|
||||
|
||||
<details>
|
||||
<summary>docker-compose.with-caddy.yml</summary>
|
||||
|
|
@ -98,9 +203,15 @@ Replace all `example.com` placeholders with your own domain.
|
|||
|
||||
```
|
||||
|
||||
If you don't already have a network for Caddy to monitor, create one first:
|
||||
|
||||
```bash
|
||||
docker network create caddy
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### For other reverse proxies
|
||||
##### For other reverse proxies
|
||||
|
||||
<details>
|
||||
<summary>docker-compose.yml</summary>
|
||||
|
|
@ -111,7 +222,7 @@ Replace all `example.com` placeholders with your own domain.
|
|||
|
||||
</details>
|
||||
|
||||
### Override file
|
||||
##### Override file for customisation
|
||||
|
||||
<details>
|
||||
<summary>docker-compose.override.yml</summary>
|
||||
|
|
@ -122,98 +233,24 @@ Replace all `example.com` placeholders with your own domain.
|
|||
|
||||
</details>
|
||||
|
||||
When picking the Traefik-related compose file, rename it to
|
||||
`docker-compose.yml`, and rename the override file to
|
||||
`docker-compose.override.yml`. Edit the latter with the values you want for your
|
||||
server.
|
||||
#### Starting Your Server
|
||||
|
||||
When picking the `caddy-docker-proxy` compose file, it's important to first
|
||||
create the `caddy` network before spinning up the containers:
|
||||
|
||||
```bash
|
||||
docker network create caddy
|
||||
```
|
||||
|
||||
After that, you can rename it to `docker-compose.yml` and spin up the
|
||||
containers!
|
||||
|
||||
Additional info about deploying Continuwuity can be found [here](generic.mdx).
|
||||
|
||||
### Build
|
||||
|
||||
Official Continuwuity images are built using **Docker Buildx** and the Dockerfile found at [`docker/Dockerfile`][dockerfile-path]. This approach uses common Docker tooling and enables efficient multi-platform builds.
|
||||
|
||||
The resulting images are widely compatible with Docker and other container runtimes like Podman or containerd.
|
||||
|
||||
The images *do not contain a shell*. They contain only the Continuwuity binary, required libraries, TLS certificates, and metadata.
|
||||
|
||||
<details>
|
||||
<summary>Click to view the Dockerfile</summary>
|
||||
|
||||
You can also <a href="https://forgejo.ellis.link/continuwuation/continuwuation/src/branch/main/docker/Dockerfile" target="_blank">view the Dockerfile on Forgejo</a>.
|
||||
|
||||
```dockerfile file="../../docker/Dockerfile"
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
To build an image locally using Docker Buildx, you can typically run a command like:
|
||||
|
||||
```bash
|
||||
# Build for the current platform and load into the local Docker daemon
|
||||
docker buildx build --load --tag continuwuity:latest -f docker/Dockerfile .
|
||||
|
||||
# Example: Build for specific platforms and push to a registry.
|
||||
# docker buildx build --platform linux/amd64,linux/arm64 --tag registry.io/org/continuwuity:latest -f docker/Dockerfile . --push
|
||||
|
||||
# Example: Build binary optimised for the current CPU (standard release profile)
|
||||
# docker buildx build --load \
|
||||
# --tag continuwuity:latest \
|
||||
# --build-arg TARGET_CPU=native \
|
||||
# -f docker/Dockerfile .
|
||||
|
||||
# Example: Build maxperf variant (release-max-perf profile with LTO)
|
||||
# Optimised for runtime performance and smaller binary size, but requires longer build time
|
||||
# docker buildx build --load \
|
||||
# --tag continuwuity:latest-maxperf \
|
||||
# --build-arg TARGET_CPU=native \
|
||||
# --build-arg RUST_PROFILE=release-max-perf \
|
||||
# -f docker/Dockerfile .
|
||||
```
|
||||
|
||||
Refer to the Docker Buildx documentation for more advanced build options.
|
||||
|
||||
[dockerfile-path]: https://forgejo.ellis.link/continuwuation/continuwuation/src/branch/main/docker/Dockerfile
|
||||
|
||||
### Run
|
||||
|
||||
If you have already built the image or want to use one from the registries, you
|
||||
can start the container and everything else in the compose file in detached
|
||||
mode with:
|
||||
1. Choose your compose file and rename it to `docker-compose.yml`
|
||||
2. If using the override file, rename it to `docker-compose.override.yml` and
|
||||
edit your values
|
||||
3. Start the server:
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
> **Note:** Don't forget to modify and adjust the compose file to your needs.
|
||||
See the [generic deployment guide](generic.mdx) for more deployment options.
|
||||
|
||||
### Use Traefik as Proxy
|
||||
### Building Custom Images
|
||||
|
||||
As a container user, you probably know about Traefik. It is an easy-to-use
|
||||
reverse proxy for making containerized apps and services available through the
|
||||
web. With the Traefik-related docker-compose files provided above, it is equally easy
|
||||
to deploy and use Continuwuity, with a small caveat. If you have already looked at
|
||||
the files, you should have seen the `well-known` service, which is the
|
||||
small caveat. Traefik is simply a proxy and load balancer and cannot
|
||||
serve any kind of content. For Continuwuity to federate, we need to either
|
||||
expose ports `443` and `8448` or serve two endpoints: `.well-known/matrix/client`
|
||||
and `.well-known/matrix/server`.
|
||||
|
||||
With the service `well-known`, we use a single `nginx` container that serves
|
||||
those two files.
|
||||
|
||||
Alternatively, you can use Continuwuity's built-in delegation file capability. Set up the delegation files in the configuration file, and then proxy paths under `/.well-known/matrix` to continuwuity. For example, the label ``traefik.http.routers.continuwuity.rule=(Host(`matrix.ellis.link`) || (Host(`ellis.link`) && PathPrefix(`/.well-known/matrix`)))`` does this for the domain `ellis.link`.
|
||||
For information on building your own Continuwuity Docker images, see the
|
||||
[Building Docker Images](../development/index.mdx#building-docker-images)
|
||||
section in the development documentation.
|
||||
|
||||
## Voice communication
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
# Continuwuity for FreeBSD
|
||||
|
||||
Continuwuity currently does not provide FreeBSD builds or FreeBSD packaging. However, Continuwuity does build and work on FreeBSD using the system-provided RocksDB.
|
||||
Continuwuity doesn't provide official FreeBSD packages; however, a community-maintained set of packages is available on [Forgejo](https://forgejo.ellis.link/katie/continuwuity-bsd). Note that these are provided as standalone packages and are not part of a FreeBSD package repository (yet), so updates need to be downloaded and installed manually.
|
||||
|
||||
Contributions to get Continuwuity packaged for FreeBSD are welcome.
|
||||
Please see the installation instructions in that repository. Direct any questions to its issue tracker or to [@katie:kat5.dev](https://matrix.to/#/@katie:kat5.dev).
|
||||
|
||||
Please join our [Continuwuity BSD](https://matrix.to/#/%23bsd:continuwuity.org) community room.
|
||||
For general BSD support, please join our [Continuwuity BSD](https://matrix.to/#/%23bsd:continuwuity.org) community room.
|
||||
|
|
|
|||
|
|
@ -39,6 +39,7 @@ spec:
|
|||
- name: continuwuity
|
||||
# use a sha hash <3
|
||||
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||
command: ["/sbin/conduwuit"]
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
|
|
|
|||
|
|
@ -2,7 +2,8 @@
|
|||
|
||||
Information about developing the project. If you are only interested in using
|
||||
it, you can safely ignore this page. If you plan on contributing, see the
|
||||
[contributor's guide](./contributing.mdx) and [code style guide](./code_style.mdx).
|
||||
[contributor's guide](./contributing.mdx) and
|
||||
[code style guide](./code_style.mdx).
|
||||
|
||||
## Continuwuity project layout
|
||||
|
||||
|
|
@ -12,86 +13,98 @@ members are under `src/`. The workspace definition is at the top level / root
|
|||
`Cargo.toml`.
|
||||
|
||||
The crate names are generally self-explanatory:
|
||||
|
||||
- `admin` is the admin room
|
||||
- `api` is the HTTP API, Matrix C-S and S-S endpoints, etc
|
||||
- `core` is core Continuwuity functionality like config loading, error definitions,
|
||||
global utilities, logging infrastructure, etc
|
||||
- `database` is RocksDB methods, helpers, RocksDB config, and general database definitions,
|
||||
utilities, or functions
|
||||
- `macros` are Continuwuity Rust [macros][macros] like general helper macros, logging
|
||||
and error handling macros, and [syn][syn] and [procedural macros][proc-macro]
|
||||
used for admin room commands and others
|
||||
- `core` is core Continuwuity functionality like config loading, error
|
||||
definitions, global utilities, logging infrastructure, etc
|
||||
- `database` is RocksDB methods, helpers, RocksDB config, and general database
|
||||
definitions, utilities, or functions
|
||||
- `macros` are Continuwuity Rust [macros][macros] like general helper macros,
|
||||
logging and error handling macros, and [syn][syn] and [procedural
|
||||
macros][proc-macro] used for admin room commands and others
|
||||
- `main` is the "primary" sub-crate. This is where the `main()` function lives,
|
||||
tokio worker and async initialisation, Sentry initialisation, [clap][clap] init,
|
||||
and signal handling. If you are adding new [Rust features][features], they *must*
|
||||
go here.
|
||||
- `router` is the webserver and request handling bits, using axum, tower, tower-http,
|
||||
hyper, etc, and the [global server state][state] to access `services`.
|
||||
tokio worker and async initialisation, Sentry initialisation, [clap][clap]
|
||||
init, and signal handling. If you are adding new [Rust features][features],
|
||||
they _must_ go here.
|
||||
- `router` is the webserver and request handling bits, using axum, tower,
|
||||
tower-http, hyper, etc, and the [global server state][state] to access
|
||||
`services`.
|
||||
- `service` is the high-level database definitions and functions for data,
|
||||
outbound/sending code, and other business logic such as media fetching.
|
||||
outbound/sending code, and other business logic such as media fetching.
|
||||
|
||||
It is highly unlikely you will ever need to add a new workspace member, but
|
||||
if you truly find yourself needing to, we recommend reaching out to us in
|
||||
the Matrix room for discussions about it beforehand.
|
||||
It is highly unlikely you will ever need to add a new workspace member, but if
|
||||
you truly find yourself needing to, we recommend reaching out to us in the
|
||||
Matrix room for discussions about it beforehand.
|
||||
|
||||
The primary inspiration for this design was apart of hot reloadable development,
|
||||
to support "Continuwuity as a library" where specific parts can simply be swapped out.
|
||||
There is evidence Conduit wanted to go this route too as `axum` is technically an
|
||||
optional feature in Conduit, and can be compiled without the binary or axum library
|
||||
for handling inbound web requests; but it was never completed or worked.
|
||||
to support "Continuwuity as a library" where specific parts can simply be
|
||||
swapped out. There is evidence Conduit wanted to go this route too as `axum` is
|
||||
technically an optional feature in Conduit, and can be compiled without the
|
||||
binary or axum library for handling inbound web requests; but it was never
|
||||
completed or worked.
|
||||
|
||||
See the Rust documentation on [Workspaces][workspaces] for general questions
|
||||
and information on Cargo workspaces.
|
||||
See the Rust documentation on [Workspaces][workspaces] for general questions and
|
||||
information on Cargo workspaces.
|
||||
|
||||
## Adding compile-time [features][features]
|
||||
|
||||
If you'd like to add a compile-time feature, you must first define it in
|
||||
the `main` workspace crate located in `src/main/Cargo.toml`. The feature must
|
||||
enable a feature in the other workspace crate(s) you intend to use it in. Then
|
||||
the said workspace crate(s) must define the feature there in its `Cargo.toml`.
|
||||
If you'd like to add a compile-time feature, you must first define it in the
|
||||
`main` workspace crate located in `src/main/Cargo.toml`. The feature must enable
|
||||
a feature in the other workspace crate(s) you intend to use it in. Then the said
|
||||
workspace crate(s) must define the feature there in its `Cargo.toml`.
|
||||
|
||||
So, if this is adding a feature to the API such as `woof`, you define the feature
|
||||
in the `api` crate's `Cargo.toml` as `woof = []`. The feature definition in `main`'s
|
||||
`Cargo.toml` will be `woof = ["conduwuit-api/woof"]`.
|
||||
So, if this is adding a feature to the API such as `woof`, you define the
|
||||
feature in the `api` crate's `Cargo.toml` as `woof = []`. The feature definition
|
||||
in `main`'s `Cargo.toml` will be `woof = ["conduwuit-api/woof"]`.
|
||||
|
||||
The rationale for this is due to Rust / Cargo not supporting
|
||||
["workspace level features"][9], we must make a choice of; either scattering
|
||||
features all over the workspace crates, making it difficult for anyone to add
|
||||
or remove default features; or define all the features in one central workspace
|
||||
crate that propagate down/up to the other workspace crates. It is a Cargo pitfall,
|
||||
and we'd like to see better developer UX in Rust's Workspaces.
|
||||
The rationale for this is due to Rust / Cargo not supporting ["workspace level
|
||||
features"][9], we must make a choice of; either scattering features all over the
|
||||
workspace crates, making it difficult for anyone to add or remove default
|
||||
features; or define all the features in one central workspace crate that
|
||||
propagate down/up to the other workspace crates. It is a Cargo pitfall, and we'd
|
||||
like to see better developer UX in Rust's Workspaces.
|
||||
|
||||
Additionally, the definition of one single place makes "feature collection" in our
|
||||
Nix flake a million times easier instead of collecting and deduping them all from
|
||||
searching in all the workspace crates' `Cargo.toml`s. Though we wouldn't need to
|
||||
do this if Rust supported workspace-level features to begin with.
|
||||
Additionally, the definition of one single place makes "feature collection" in
|
||||
our Nix flake a million times easier instead of collecting and deduping them all
|
||||
from searching in all the workspace crates' `Cargo.toml`s. Though we wouldn't
|
||||
need to do this if Rust supported workspace-level features to begin with.
|
||||
|
||||
## List of forked dependencies
|
||||
|
||||
During Continuwuity (and prior projects) development, we have had to fork some dependencies to support our use-cases.
|
||||
These forks exist for various reasons including features that upstream projects won't accept,
|
||||
faster-paced development, Continuwuity-specific usecases, or lack of time to upstream changes.
|
||||
During Continuwuity (and prior projects) development, we have had to fork some
|
||||
dependencies to support our use-cases. These forks exist for various reasons
|
||||
including features that upstream projects won't accept, faster-paced
|
||||
development, Continuwuity-specific usecases, or lack of time to upstream
|
||||
changes.
|
||||
|
||||
All forked dependencies are maintained under the [continuwuation organization on Forgejo](https://forgejo.ellis.link/continuwuation):
|
||||
All forked dependencies are maintained under the
|
||||
[continuwuation organization on Forgejo](https://forgejo.ellis.link/continuwuation):
|
||||
|
||||
- [ruwuma][continuwuation-ruwuma] - Fork of [ruma/ruma][ruma] with various performance improvements, more features and better client/server interop
|
||||
- [rocksdb][continuwuation-rocksdb] - Fork of [facebook/rocksdb][rocksdb] via [`@zaidoon1`][8] with liburing build fixes and GCC debug build fixes
|
||||
- [jemallocator][continuwuation-jemallocator] - Fork of [tikv/jemallocator][jemallocator] fixing musl builds, suspicious code,
|
||||
and adding support for redzones in Valgrind
|
||||
- [rustyline-async][continuwuation-rustyline-async] - Fork of [zyansheep/rustyline-async][rustyline-async] with tab completion callback
|
||||
and `CTRL+\` signal quit event for Continuwuity console CLI
|
||||
- [rust-rocksdb][continuwuation-rust-rocksdb] - Fork of [rust-rocksdb/rust-rocksdb][rust-rocksdb] fixing musl build issues,
|
||||
removing unnecessary `gtest` include, and using our RocksDB and jemallocator forks
|
||||
- [tracing][continuwuation-tracing] - Fork of [tokio-rs/tracing][tracing] implementing `Clone` for `EnvFilter` to
|
||||
support dynamically changing tracing environments
|
||||
- [ruwuma][continuwuation-ruwuma] - Fork of [ruma/ruma][ruma] with various
|
||||
performance improvements, more features and better client/server interop
|
||||
- [rocksdb][continuwuation-rocksdb] - Fork of [facebook/rocksdb][rocksdb] via
|
||||
[`@zaidoon1`][8] with liburing build fixes and GCC debug build fixes
|
||||
- [jemallocator][continuwuation-jemallocator] - Fork of
|
||||
[tikv/jemallocator][jemallocator] fixing musl builds, suspicious code, and
|
||||
adding support for redzones in Valgrind
|
||||
- [rustyline-async][continuwuation-rustyline-async] - Fork of
|
||||
[zyansheep/rustyline-async][rustyline-async] with tab completion callback and
|
||||
`CTRL+\` signal quit event for Continuwuity console CLI
|
||||
- [rust-rocksdb][continuwuation-rust-rocksdb] - Fork of
|
||||
[rust-rocksdb/rust-rocksdb][rust-rocksdb] fixing musl build issues, removing
|
||||
unnecessary `gtest` include, and using our RocksDB and jemallocator forks
|
||||
- [tracing][continuwuation-tracing] - Fork of [tokio-rs/tracing][tracing]
|
||||
implementing `Clone` for `EnvFilter` to support dynamically changing tracing
|
||||
environments
|
||||
|
||||
## Debugging with `tokio-console`
|
||||
|
||||
[`tokio-console`][7] can be a useful tool for debugging and profiling. To make a
|
||||
`tokio-console`-enabled build of Continuwuity, enable the `tokio_console` feature,
|
||||
disable the default `release_max_log_level` feature, and set the `--cfg
|
||||
tokio_unstable` flag to enable experimental tokio APIs. A build might look like
|
||||
this:
|
||||
`tokio-console`-enabled build of Continuwuity, enable the `tokio_console`
|
||||
feature, disable the default `release_max_log_level` feature, and set the
|
||||
`--cfg tokio_unstable` flag to enable experimental tokio APIs. A build might
|
||||
look like this:
|
||||
|
||||
```bash
|
||||
RUSTFLAGS="--cfg tokio_unstable" cargo +nightly build \
|
||||
|
|
@ -100,34 +113,84 @@ RUSTFLAGS="--cfg tokio_unstable" cargo +nightly build \
|
|||
--features=systemd,element_hacks,gzip_compression,brotli_compression,zstd_compression,tokio_console
|
||||
```
|
||||
|
||||
You will also need to enable the `tokio_console` config option in Continuwuity when
|
||||
starting it. This was due to tokio-console causing gradual memory leak/usage
|
||||
if left enabled.
|
||||
You will also need to enable the `tokio_console` config option in Continuwuity
|
||||
when starting it. This was due to tokio-console causing gradual memory
|
||||
leak/usage if left enabled.
|
||||
|
||||
## Building Docker Images
|
||||
|
||||
To build a Docker image for Continuwuity, use the standard Docker build command:
|
||||
Official Continuwuity images are built using **Docker Buildx** and the
|
||||
Dockerfile found at [`docker/Dockerfile`][dockerfile-path].
|
||||
|
||||
The images are compatible with Docker and other container runtimes like Podman
|
||||
or containerd.
|
||||
|
||||
The images _do not contain a shell_. They contain only the Continuwuity binary,
|
||||
required libraries, TLS certificates, and metadata.
|
||||
|
||||
<details>
|
||||
<summary>Click to view the Dockerfile</summary>
|
||||
|
||||
You can also
|
||||
|
||||
<a
|
||||
href="<https://forgejo.ellis.link/continuwuation/continuwuation/src/branch/main/docker/Dockerfile>"
|
||||
target="_blank"
|
||||
>
|
||||
view the Dockerfile on Forgejo
|
||||
</a>
|
||||
.
|
||||
|
||||
```dockerfile file="../../docker/Dockerfile"
|
||||
|
||||
```bash
|
||||
docker build -f docker/Dockerfile .
|
||||
```
|
||||
|
||||
The image can be cross-compiled for different architectures.
|
||||
</details>
|
||||
|
||||
### Building Locally
|
||||
|
||||
To build an image locally using Docker Buildx:
|
||||
|
||||
```bash
|
||||
# Build for the current platform and load into the local Docker daemon
|
||||
docker buildx build --load --tag continuwuity:latest -f docker/Dockerfile .
|
||||
|
||||
# Example: Build for specific platforms and push to a registry
|
||||
# docker buildx build --platform linux/amd64,linux/arm64 --tag registry.io/org/continuwuity:latest -f docker/Dockerfile . --push
|
||||
|
||||
# Example: Build binary optimised for the current CPU (standard release profile)
|
||||
# docker buildx build --load \
|
||||
# --tag continuwuity:latest \
|
||||
# --build-arg TARGET_CPU=native \
|
||||
# -f docker/Dockerfile .
|
||||
|
||||
# Example: Build maxperf variant (release-max-perf profile with LTO)
|
||||
# docker buildx build --load \
|
||||
# --tag continuwuity:latest-maxperf \
|
||||
# --build-arg TARGET_CPU=native \
|
||||
# --build-arg RUST_PROFILE=release-max-perf \
|
||||
# -f docker/Dockerfile .
|
||||
```
|
||||
|
||||
Refer to the Docker Buildx documentation for more advanced build options.
|
||||
|
||||
[dockerfile-path]:
|
||||
https://forgejo.ellis.link/continuwuation/continuwuation/src/branch/main/docker/Dockerfile
|
||||
[continuwuation-ruwuma]: https://forgejo.ellis.link/continuwuation/ruwuma
|
||||
[continuwuation-rocksdb]: https://forgejo.ellis.link/continuwuation/rocksdb
|
||||
[continuwuation-jemallocator]: https://forgejo.ellis.link/continuwuation/jemallocator
|
||||
[continuwuation-rustyline-async]: https://forgejo.ellis.link/continuwuation/rustyline-async
|
||||
[continuwuation-rust-rocksdb]: https://forgejo.ellis.link/continuwuation/rust-rocksdb
|
||||
[continuwuation-jemallocator]:
|
||||
https://forgejo.ellis.link/continuwuation/jemallocator
|
||||
[continuwuation-rustyline-async]:
|
||||
https://forgejo.ellis.link/continuwuation/rustyline-async
|
||||
[continuwuation-rust-rocksdb]:
|
||||
https://forgejo.ellis.link/continuwuation/rust-rocksdb
|
||||
[continuwuation-tracing]: https://forgejo.ellis.link/continuwuation/tracing
|
||||
|
||||
[ruma]: https://github.com/ruma/ruma/
|
||||
[rocksdb]: https://github.com/facebook/rocksdb/
|
||||
[jemallocator]: https://github.com/tikv/jemallocator/
|
||||
[rustyline-async]: https://github.com/zyansheep/rustyline-async/
|
||||
[rust-rocksdb]: https://github.com/rust-rocksdb/rust-rocksdb/
|
||||
[tracing]: https://github.com/tokio-rs/tracing/
|
||||
|
||||
[7]: https://docs.rs/tokio-console/latest/tokio_console/
|
||||
[8]: https://github.com/zaidoon1/
|
||||
[9]: https://github.com/rust-lang/cargo/issues/12162
|
||||
|
|
|
|||
226
docs/plans/2026-03-17-space-permission-cascading-design.md
Normal file
226
docs/plans/2026-03-17-space-permission-cascading-design.md
Normal file
|
|
@ -0,0 +1,226 @@
|
|||
# Space Permission Cascading — Design Document
|
||||
|
||||
**Date:** 2026-03-17
|
||||
**Status:** Approved
|
||||
|
||||
## Overview
|
||||
|
||||
Server-side feature that allows user rights in a Space to cascade down to its
|
||||
direct child rooms. Includes power level cascading and role-based room access
|
||||
control. Enabled via a server-wide configuration flag, disabled by default.
|
||||
|
||||
## Requirements
|
||||
|
||||
1. Power levels defined in a Space cascade to all direct child rooms (Space
|
||||
always wins over per-room overrides).
|
||||
2. Admins can define custom roles in a Space and assign them to users.
|
||||
3. Child rooms can require one or more roles for access.
|
||||
4. Enforcement is continuous — role revocation auto-kicks users from rooms they
|
||||
no longer qualify for.
|
||||
5. Users are auto-joined to all qualifying child rooms when they join a Space or
|
||||
receive a new role.
|
||||
6. Cascading applies to direct parent Space only; no nested cascade through
|
||||
sub-spaces.
|
||||
7. Feature is toggled by a single server-wide config flag
|
||||
(`space_permission_cascading`), off by default.
|
||||
|
||||
## Configuration
|
||||
|
||||
```toml
|
||||
# conduwuit-example.toml
|
||||
|
||||
# Enable space permission cascading (power levels and role-based access).
|
||||
# When enabled, power levels cascade from Spaces to child rooms and rooms
|
||||
# can require roles for access. Applies to all Spaces on this server.
|
||||
# Default: false
|
||||
space_permission_cascading = false
|
||||
```
|
||||
|
||||
## Custom State Events
|
||||
|
||||
All events live in the Space room.
|
||||
|
||||
### `m.space.roles` (state key: `""`)
|
||||
|
||||
Defines the available roles for the Space. Two default roles (`admin` and `mod`)
|
||||
are created automatically when a Space is first encountered with the feature
|
||||
enabled.
|
||||
|
||||
```json
|
||||
{
|
||||
"roles": {
|
||||
"admin": {
|
||||
"description": "Space administrator",
|
||||
"power_level": 100
|
||||
},
|
||||
"mod": {
|
||||
"description": "Space moderator",
|
||||
"power_level": 50
|
||||
},
|
||||
"nsfw": {
|
||||
"description": "Access to NSFW content"
|
||||
},
|
||||
"vip": {
|
||||
"description": "VIP member"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- `description` (string, required): Human-readable description.
|
||||
- `power_level` (integer, optional): If present, users with this role receive
|
||||
this power level in all child rooms. When a user holds multiple roles with
|
||||
power levels, the highest value wins.
|
||||
|
||||
### `m.space.role.member` (state key: user ID)
|
||||
|
||||
Assigns roles to a user within the Space.
|
||||
|
||||
```json
|
||||
{
|
||||
"roles": ["nsfw", "vip"]
|
||||
}
|
||||
```
|
||||
|
||||
### `m.space.role.room` (state key: room ID)
|
||||
|
||||
Declares which roles a child room requires. A user must hold **all** listed
|
||||
roles to access the room.
|
||||
|
||||
```json
|
||||
{
|
||||
"required_roles": ["nsfw"]
|
||||
}
|
||||
```
|
||||
|
||||
## Enforcement Rules
|
||||
|
||||
All enforcement is skipped when `space_permission_cascading = false`.
|
||||
|
||||
### 1. Join gating
|
||||
|
||||
When a user attempts to join a room that is a direct child of a Space:
|
||||
|
||||
- Look up the room's `m.space.role.room` event in the parent Space.
|
||||
- If the room has `required_roles`, check the user's `m.space.role.member`.
|
||||
- Reject the join if the user is missing any required role.
|
||||
|
||||
### 2. Power level override
|
||||
|
||||
For every user in a child room of a Space:
|
||||
|
||||
- Look up their roles via `m.space.role.member` in the parent Space.
|
||||
- For each role that has a `power_level`, take the highest value.
|
||||
- Override the user's power level in the child room's `m.room.power_levels`.
|
||||
- Reject attempts to manually set per-room power levels that conflict with
|
||||
Space-granted levels.
|
||||
|
||||
### 3. Role revocation
|
||||
|
||||
When an `m.space.role.member` event is updated and a role is removed:
|
||||
|
||||
- Identify all child rooms that require the removed role.
|
||||
- Auto-kick the user from rooms they no longer qualify for.
|
||||
- Recalculate and update the user's power level in all child rooms.
|
||||
|
||||
### 4. Room requirement change
|
||||
|
||||
When an `m.space.role.room` event is updated with new requirements:
|
||||
|
||||
- Check all current members of the room.
|
||||
- Auto-kick members who do not hold all newly required roles.
|
||||
|
||||
### 5. Auto-join on role grant
|
||||
|
||||
When an `m.space.role.member` event is updated and a role is added:
|
||||
|
||||
- Find all child rooms where the user now meets all required roles.
|
||||
- Auto-join the user to qualifying rooms they are not already in.
|
||||
|
||||
This also applies when a user first joins the Space — they are auto-joined to
|
||||
all child rooms they qualify for. Rooms with no role requirements auto-join all
|
||||
Space members.
|
||||
|
||||
### 6. New child room
|
||||
|
||||
When a new `m.space.child` event is added to a Space:
|
||||
|
||||
- Auto-join all qualifying Space members to the new child room.
|
||||
|
||||
## Caching & Indexing
|
||||
|
||||
The source of truth is always the state events. The server maintains an
|
||||
in-memory index for fast enforcement lookups, following the same patterns as the
|
||||
existing `roomid_spacehierarchy_cache`.
|
||||
|
||||
### Index structures
|
||||
|
||||
| Index | Source event |
|
||||
|------------------------------|------------------------|
|
||||
| Space → roles defined | `m.space.roles` |
|
||||
| Space → user → roles | `m.space.role.member` |
|
||||
| Space → room → required roles| `m.space.role.room` |
|
||||
| Room → parent Space | `m.space.child` (reverse lookup) |
|
||||
|
||||
The Space → child rooms mapping already exists.
|
||||
|
||||
### Cache invalidation triggers
|
||||
|
||||
| Event changed | Action |
|
||||
|----------------------------|-----------------------------------------------------|
|
||||
| `m.space.roles` | Refresh role definitions, revalidate all members |
|
||||
| `m.space.role.member` | Refresh user's roles, trigger auto-join/kick |
|
||||
| `m.space.role.room` | Refresh room requirements, trigger auto-join/kick |
|
||||
| `m.space.child` added | Index new child, auto-join qualifying members |
|
||||
| `m.space.child` removed | Remove from index (no auto-kick) |
|
||||
| Server startup | Full rebuild from state events |
|
||||
|
||||
## Admin Room Commands
|
||||
|
||||
Roles are managed via the existing admin room interface, which sends the
|
||||
appropriate state events under the hood and triggers enforcement.
|
||||
|
||||
```
|
||||
!admin space roles list <space>
|
||||
!admin space roles add <space> <role_name> [description] [power_level]
|
||||
!admin space roles remove <space> <role_name>
|
||||
!admin space roles assign <space> <user_id> <role_name>
|
||||
!admin space roles revoke <space> <user_id> <role_name>
|
||||
!admin space roles require <space> <room_id> <role_name>
|
||||
!admin space roles unrequire <space> <room_id> <role_name>
|
||||
!admin space roles user <space> <user_id>
|
||||
!admin space roles room <space> <room_id>
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
**Approach:** Hybrid — state events for definition, database cache for
|
||||
enforcement.
|
||||
|
||||
- State events are the source of truth and federate normally.
|
||||
- The server maintains an in-memory cache/index for fast enforcement.
|
||||
- Cache is invalidated on relevant state event changes and fully rebuilt on
|
||||
startup.
|
||||
- All enforcement hooks (join gating, PL override, auto-join, auto-kick) check
|
||||
the feature flag first and no-op when disabled.
|
||||
- Existing clients can manage roles via Developer Tools (custom state events).
|
||||
The admin room commands provide a user-friendly interface.
|
||||
|
||||
## Scope
|
||||
|
||||
### In scope
|
||||
|
||||
- Server-wide feature flag
|
||||
- Custom state events for role definition, assignment, and room requirements
|
||||
- Power level cascading (Space always wins)
|
||||
- Continuous enforcement (auto-join, auto-kick)
|
||||
- Admin room commands
|
||||
- In-memory caching with invalidation
|
||||
- Default `admin` (PL 100) and `mod` (PL 50) roles
|
||||
|
||||
### Out of scope
|
||||
|
||||
- Client-side UI for role management
|
||||
- Nested cascade through sub-spaces
|
||||
- Per-space opt-in/opt-out (it is server-wide)
|
||||
- Federation-specific logic beyond normal state event replication
|
||||
1206
docs/plans/2026-03-17-space-permission-cascading.md
Normal file
1206
docs/plans/2026-03-17-space-permission-cascading.md
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -6,10 +6,10 @@
|
|||
"message": "Welcome to Continuwuity! Important announcements about the project will appear here."
|
||||
},
|
||||
{
|
||||
"id": 9,
|
||||
"id": 10,
|
||||
"mention_room": false,
|
||||
"date": "2026-02-09",
|
||||
"message": "Yesterday we released [v0.5.4](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.4). Bugfixes, performance improvements and more moderation features! There's also a security fix, so please update as soon as possible. Don't forget to join [our announcements channel](https://matrix.to/#/!jIdNjSM5X-V5JVx2h2kAhUZIIQ08GyzPL55NFZAH1vM/%2489TY9CqRg4-ff1MGo3Ulc5r5X4pakfdzT-99RD8Docc?via=ellis.link&via=explodie.org&via=matrix.org) to get important information sooner <3 "
|
||||
"date": "2026-03-03",
|
||||
"message": "We've just released [v0.5.6](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.6), which contains a few security improvements - plus significant reliability and performance improvements. Please update as soon as possible. \n\nWe released [v0.5.5](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.5) two weeks ago, but it skipped your admin room straight to [our announcements channel](https://matrix.to/#/!jIdNjSM5X-V5JVx2h2kAhUZIIQ08GyzPL55NFZAH1vM?via=ellis.link&via=gingershaped.computer&via=matrix.org). Make sure you're there to get important information as soon as we announce it! [Our space](https://matrix.to/#/!8cR4g-i9ucof69E4JHNg9LbPVkGprHb3SzcrGBDDJgk?via=continuwuity.org&via=ellis.link&via=matrix.org) has also gained a bunch of new and interesting rooms - be there or be square."
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
{"m.homeserver":{"base_url": "https://matrix.continuwuity.org"},"org.matrix.msc3575.proxy":{"url": "https://matrix.continuwuity.org"},"org.matrix.msc4143.rtc_foci":[{"type":"livekit","livekit_service_url":"https://livekit.ellis.link"}]}
|
||||
{"m.homeserver":{"base_url": "https://matrix.continuwuity.org"},"org.matrix.msc4143.rtc_foci":[{"type":"livekit","livekit_service_url":"https://livekit.ellis.link"}]}
|
||||
|
|
|
|||
|
|
@ -4,6 +4,11 @@
|
|||
"name": "config",
|
||||
"label": "Configuration"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"name": "environment-variables",
|
||||
"label": "Environment Variables"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"name": "admin",
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ default.
|
|||
* Delete all remote and local media from 3 days ago, up until now:
|
||||
|
||||
`!admin media delete-past-remote-media -a 3d
|
||||
-yes-i-want-to-delete-local-media`
|
||||
--yes-i-want-to-delete-local-media`
|
||||
|
||||
## `!admin media delete-all-from-user`
|
||||
|
||||
|
|
|
|||
281
docs/reference/environment-variables.mdx
Normal file
281
docs/reference/environment-variables.mdx
Normal file
|
|
@ -0,0 +1,281 @@
|
|||
# Environment Variables
|
||||
|
||||
Continuwuity can be configured entirely through environment variables, making it
|
||||
ideal for containerised deployments and infrastructure-as-code scenarios.
|
||||
|
||||
This is a convenience reference and may not be exhaustive. The
|
||||
[Configuration Reference](./config.mdx) is the primary source for all
|
||||
configuration options.
|
||||
|
||||
## Prefix System
|
||||
|
||||
Continuwuity supports three environment variable prefixes for backwards
|
||||
compatibility:
|
||||
|
||||
- `CONTINUWUITY_*` (current, recommended)
|
||||
- `CONDUWUIT_*` (compatibility)
|
||||
- `CONDUIT_*` (legacy)
|
||||
|
||||
All three prefixes work identically. Use double underscores (`__`) to represent
|
||||
nested configuration sections from the TOML config.
|
||||
|
||||
**Examples:**
|
||||
|
||||
```bash
|
||||
# Simple top-level config
|
||||
CONTINUWUITY_SERVER_NAME="matrix.example.com"
|
||||
CONTINUWUITY_PORT="8008"
|
||||
|
||||
# Nested config sections use double underscores
|
||||
# This maps to [database] section in TOML
|
||||
CONTINUWUITY_DATABASE__PATH="/var/lib/continuwuity"
|
||||
|
||||
# This maps to [tls] section in TOML
|
||||
CONTINUWUITY_TLS__CERTS="/path/to/cert.pem"
|
||||
```
|
||||
|
||||
## Configuration File Override
|
||||
|
||||
You can specify a custom configuration file path:
|
||||
|
||||
- `CONTINUWUITY_CONFIG` - Path to continuwuity.toml (current)
|
||||
- `CONDUWUIT_CONFIG` - Path to config file (compatibility)
|
||||
- `CONDUIT_CONFIG` - Path to config file (legacy)
|
||||
|
||||
## Essential Variables
|
||||
|
||||
These are the minimum variables needed for a working deployment:
|
||||
|
||||
| Variable | Description | Default |
|
||||
| ---------------------------- | ---------------------------------- | ---------------------- |
|
||||
| `CONTINUWUITY_SERVER_NAME` | Your Matrix server's domain name | Required |
|
||||
| `CONTINUWUITY_DATABASE_PATH` | Path to RocksDB database directory | `/var/lib/conduwuit` |
|
||||
| `CONTINUWUITY_ADDRESS` | IP address to bind to | `["127.0.0.1", "::1"]` |
|
||||
| `CONTINUWUITY_PORT` | Port to listen on | `8008` |
|
||||
|
||||
## Network Configuration
|
||||
|
||||
| Variable | Description | Default |
|
||||
| -------------------------------- | ----------------------------------------------- | ---------------------- |
|
||||
| `CONTINUWUITY_ADDRESS` | Bind address (use `0.0.0.0` for all interfaces) | `["127.0.0.1", "::1"]` |
|
||||
| `CONTINUWUITY_PORT` | HTTP port | `8008` |
|
||||
| `CONTINUWUITY_UNIX_SOCKET_PATH` | UNIX socket path (alternative to TCP) | - |
|
||||
| `CONTINUWUITY_UNIX_SOCKET_PERMS` | Socket permissions (octal) | `660` |
|
||||
|
||||
## Database Configuration
|
||||
|
||||
| Variable | Description | Default |
|
||||
| ------------------------------------------ | --------------------------- | -------------------- |
|
||||
| `CONTINUWUITY_DATABASE_PATH` | RocksDB data directory | `/var/lib/conduwuit` |
|
||||
| `CONTINUWUITY_DATABASE_BACKUP_PATH` | Backup directory | - |
|
||||
| `CONTINUWUITY_DATABASE_BACKUPS_TO_KEEP` | Number of backups to retain | `1` |
|
||||
| `CONTINUWUITY_DB_CACHE_CAPACITY_MB` | Database read cache (MB) | - |
|
||||
| `CONTINUWUITY_DB_WRITE_BUFFER_CAPACITY_MB` | Write cache (MB) | - |
|
||||
|
||||
## Cache Configuration
|
||||
|
||||
| Variable | Description |
|
||||
| ---------------------------------------- | ------------------------ |
|
||||
| `CONTINUWUITY_CACHE_CAPACITY_MODIFIER` | LRU cache multiplier |
|
||||
| `CONTINUWUITY_PDU_CACHE_CAPACITY` | PDU cache entries |
|
||||
| `CONTINUWUITY_AUTH_CHAIN_CACHE_CAPACITY` | Auth chain cache entries |
|
||||
|
||||
## DNS Configuration
|
||||
|
||||
Configure DNS resolution behaviour for federation and external requests.
|
||||
|
||||
| Variable | Description | Default |
|
||||
| ------------------------------------ | ---------------------------- | -------- |
|
||||
| `CONTINUWUITY_DNS_CACHE_ENTRIES` | Max DNS cache entries | `32768` |
|
||||
| `CONTINUWUITY_DNS_MIN_TTL` | Minimum cache TTL (seconds) | `10800` |
|
||||
| `CONTINUWUITY_DNS_MIN_TTL_NXDOMAIN` | NXDOMAIN cache TTL (seconds) | `259200` |
|
||||
| `CONTINUWUITY_DNS_ATTEMPTS` | Retry attempts | - |
|
||||
| `CONTINUWUITY_DNS_TIMEOUT` | Query timeout (seconds) | - |
|
||||
| `CONTINUWUITY_DNS_TCP_FALLBACK` | Allow TCP fallback | - |
|
||||
| `CONTINUWUITY_QUERY_ALL_NAMESERVERS` | Query all nameservers | - |
|
||||
| `CONTINUWUITY_QUERY_OVER_TCP_ONLY` | TCP-only queries | - |
|
||||
|
||||
## Request Configuration
|
||||
|
||||
| Variable | Description |
|
||||
| ------------------------------------ | ----------------------------- |
|
||||
| `CONTINUWUITY_MAX_REQUEST_SIZE` | Max HTTP request size (bytes) |
|
||||
| `CONTINUWUITY_REQUEST_CONN_TIMEOUT` | Connection timeout (seconds) |
|
||||
| `CONTINUWUITY_REQUEST_TIMEOUT` | Overall request timeout |
|
||||
| `CONTINUWUITY_REQUEST_TOTAL_TIMEOUT` | Total timeout |
|
||||
| `CONTINUWUITY_REQUEST_IDLE_TIMEOUT` | Idle timeout |
|
||||
| `CONTINUWUITY_REQUEST_IDLE_PER_HOST` | Idle connections per host |
|
||||
|
||||
## Federation Configuration
|
||||
|
||||
Control how your server federates with other Matrix servers.
|
||||
|
||||
| Variable | Description | Default |
|
||||
| ---------------------------------------------- | ----------------------------- | ------- |
|
||||
| `CONTINUWUITY_ALLOW_FEDERATION` | Enable federation | `true` |
|
||||
| `CONTINUWUITY_FEDERATION_LOOPBACK` | Allow loopback federation | - |
|
||||
| `CONTINUWUITY_FEDERATION_CONN_TIMEOUT` | Connection timeout | - |
|
||||
| `CONTINUWUITY_FEDERATION_TIMEOUT` | Request timeout | - |
|
||||
| `CONTINUWUITY_FEDERATION_IDLE_TIMEOUT` | Idle timeout | - |
|
||||
| `CONTINUWUITY_FEDERATION_IDLE_PER_HOST` | Idle connections per host | - |
|
||||
| `CONTINUWUITY_TRUSTED_SERVERS` | JSON array of trusted servers | - |
|
||||
| `CONTINUWUITY_QUERY_TRUSTED_KEY_SERVERS_FIRST` | Query trusted first | - |
|
||||
| `CONTINUWUITY_ONLY_QUERY_TRUSTED_KEY_SERVERS` | Only query trusted | - |
|
||||
|
||||
**Example:**
|
||||
|
||||
```bash
|
||||
# Trust matrix.org for key verification
|
||||
CONTINUWUITY_TRUSTED_SERVERS='["matrix.org"]'
|
||||
```
|
||||
|
||||
## Registration & User Configuration
|
||||
|
||||
Control user registration and account creation behaviour.
|
||||
|
||||
| Variable | Description | Default |
|
||||
| ------------------------------------------ | --------------------- | ------- |
|
||||
| `CONTINUWUITY_ALLOW_REGISTRATION` | Enable registration | `true` |
|
||||
| `CONTINUWUITY_REGISTRATION_TOKEN` | Token requirement | - |
|
||||
| `CONTINUWUITY_SUSPEND_ON_REGISTER` | Suspend new accounts | - |
|
||||
| `CONTINUWUITY_NEW_USER_DISPLAYNAME_SUFFIX` | Display name suffix | 🏳️⚧️ |
|
||||
| `CONTINUWUITY_RECAPTCHA_SITE_KEY` | reCAPTCHA site key | - |
|
||||
| `CONTINUWUITY_RECAPTCHA_PRIVATE_SITE_KEY` | reCAPTCHA private key | - |
|
||||
|
||||
**Example:**
|
||||
|
||||
```bash
|
||||
# Disable open registration
|
||||
CONTINUWUITY_ALLOW_REGISTRATION="false"
|
||||
|
||||
# Require a registration token
|
||||
CONTINUWUITY_REGISTRATION_TOKEN="your_secret_token_here"
|
||||
```
|
||||
|
||||
## Feature Configuration
|
||||
|
||||
| Variable | Description | Default |
|
||||
| ---------------------------------------------------------- | -------------------------- | ------- |
|
||||
| `CONTINUWUITY_ALLOW_ENCRYPTION` | Enable E2EE | `true` |
|
||||
| `CONTINUWUITY_ALLOW_ROOM_CREATION` | Enable room creation | - |
|
||||
| `CONTINUWUITY_ALLOW_UNSTABLE_ROOM_VERSIONS` | Allow unstable versions | - |
|
||||
| `CONTINUWUITY_DEFAULT_ROOM_VERSION` | Default room version | `v11` |
|
||||
| `CONTINUWUITY_REQUIRE_AUTH_FOR_PROFILE_REQUESTS` | Auth for profiles | - |
|
||||
| `CONTINUWUITY_ALLOW_PUBLIC_ROOM_DIRECTORY_OVER_FEDERATION` | Federate directory | - |
|
||||
| `CONTINUWUITY_ALLOW_PUBLIC_ROOM_DIRECTORY_WITHOUT_AUTH` | Unauth directory | - |
|
||||
| `CONTINUWUITY_ALLOW_DEVICE_NAME_FEDERATION` | Device names in federation | - |
|
||||
|
||||
## TLS Configuration
|
||||
|
||||
Built-in TLS support is primarily for testing. **For production deployments,
|
||||
especially when federating on the internet, use a reverse proxy** (Traefik,
|
||||
Caddy, nginx) to handle TLS termination.
|
||||
|
||||
| Variable | Description |
|
||||
| --------------------------------- | ------------------------- |
|
||||
| `CONTINUWUITY_TLS__CERTS` | TLS certificate file path |
|
||||
| `CONTINUWUITY_TLS__KEY` | TLS private key path |
|
||||
| `CONTINUWUITY_TLS__DUAL_PROTOCOL` | Support TLS 1.2 + 1.3 |
|
||||
|
||||
**Example (testing only):**
|
||||
|
||||
```bash
|
||||
CONTINUWUITY_TLS__CERTS="/etc/letsencrypt/live/matrix.example.com/fullchain.pem"
|
||||
CONTINUWUITY_TLS__KEY="/etc/letsencrypt/live/matrix.example.com/privkey.pem"
|
||||
```
|
||||
|
||||
## Logging Configuration
|
||||
|
||||
Control log output format and verbosity.
|
||||
|
||||
| Variable | Description | Default |
|
||||
| ------------------------------ | ------------------ | ------- |
|
||||
| `CONTINUWUITY_LOG` | Log filter level | - |
|
||||
| `CONTINUWUITY_LOG_COLORS` | ANSI colours | `true` |
|
||||
| `CONTINUWUITY_LOG_SPAN_EVENTS` | Log span events | `none` |
|
||||
| `CONTINUWUITY_LOG_THREAD_IDS` | Include thread IDs | - |
|
||||
|
||||
**Examples:**
|
||||
|
||||
```bash
|
||||
# Set log level to info
|
||||
CONTINUWUITY_LOG="info"
|
||||
|
||||
# Enable debug logging for specific modules
|
||||
CONTINUWUITY_LOG="warn,continuwuity::api=debug"
|
||||
|
||||
# Disable colours for log aggregation
|
||||
CONTINUWUITY_LOG_COLORS="false"
|
||||
```
|
||||
|
||||
## Observability Configuration
|
||||
|
||||
| Variable | Description |
|
||||
| ---------------------------------------- | --------------------- |
|
||||
| `CONTINUWUITY_ALLOW_OTLP` | Enable OpenTelemetry |
|
||||
| `CONTINUWUITY_OTLP_FILTER` | OTLP filter level |
|
||||
| `CONTINUWUITY_OTLP_PROTOCOL` | Protocol (http/grpc) |
|
||||
| `CONTINUWUITY_TRACING_FLAME` | Enable flame graphs |
|
||||
| `CONTINUWUITY_TRACING_FLAME_FILTER` | Flame graph filter |
|
||||
| `CONTINUWUITY_TRACING_FLAME_OUTPUT_PATH` | Output directory |
|
||||
| `CONTINUWUITY_SENTRY` | Enable Sentry |
|
||||
| `CONTINUWUITY_SENTRY_ENDPOINT` | Sentry DSN |
|
||||
| `CONTINUWUITY_SENTRY_SEND_SERVER_NAME` | Include server name |
|
||||
| `CONTINUWUITY_SENTRY_TRACES_SAMPLE_RATE` | Sample rate (0.0-1.0) |
|
||||
|
||||
## Admin Configuration
|
||||
|
||||
Configure admin users and automated command execution.
|
||||
|
||||
| Variable | Description | Default |
|
||||
| ------------------------------------------ | -------------------------------- | ----------------- |
|
||||
| `CONTINUWUITY_ADMINS_LIST` | JSON array of admin user IDs | - |
|
||||
| `CONTINUWUITY_ADMINS_FROM_ROOM` | Derive admins from room | - |
|
||||
| `CONTINUWUITY_ADMIN_ESCAPE_COMMANDS` | Allow `\` prefix in public rooms | - |
|
||||
| `CONTINUWUITY_ADMIN_CONSOLE_AUTOMATIC` | Auto-activate console | - |
|
||||
| `CONTINUWUITY_ADMIN_EXECUTE` | JSON array of startup commands | - |
|
||||
| `CONTINUWUITY_ADMIN_EXECUTE_ERRORS_IGNORE` | Ignore command errors | - |
|
||||
| `CONTINUWUITY_ADMIN_SIGNAL_EXECUTE` | Commands on SIGUSR2 | - |
|
||||
| `CONTINUWUITY_ADMIN_ROOM_TAG` | Admin room tag | `m.server_notice` |
|
||||
|
||||
**Examples:**
|
||||
|
||||
```bash
|
||||
# Create admin user on startup
|
||||
CONTINUWUITY_ADMIN_EXECUTE='["users create-user admin", "users make-user-admin admin"]'
|
||||
|
||||
# Specify admin users directly
|
||||
CONTINUWUITY_ADMINS_LIST='["@alice:example.com", "@bob:example.com"]'
|
||||
```
|
||||
|
||||
## Media & URL Preview Configuration
|
||||
|
||||
| Variable | Description |
|
||||
| ---------------------------------------------------- | ------------------ |
|
||||
| `CONTINUWUITY_URL_PREVIEW_BOUND_INTERFACE` | Bind interface |
|
||||
| `CONTINUWUITY_URL_PREVIEW_DOMAIN_CONTAINS_ALLOWLIST` | Domain allowlist |
|
||||
| `CONTINUWUITY_URL_PREVIEW_DOMAIN_EXPLICIT_ALLOWLIST` | Explicit allowlist |
|
||||
| `CONTINUWUITY_URL_PREVIEW_DOMAIN_EXPLICIT_DENYLIST` | Explicit denylist |
|
||||
| `CONTINUWUITY_URL_PREVIEW_MAX_SPIDER_SIZE` | Max fetch size |
|
||||
| `CONTINUWUITY_URL_PREVIEW_TIMEOUT` | Fetch timeout |
|
||||
| `CONTINUWUITY_IP_RANGE_DENYLIST` | IP range denylist |
|
||||
|
||||
## Tokio Runtime Configuration
|
||||
|
||||
These can be set as environment variables or CLI arguments:
|
||||
|
||||
| Variable | Description |
|
||||
| ----------------------------------------- | -------------------------- |
|
||||
| `TOKIO_WORKER_THREADS` | Worker thread count |
|
||||
| `TOKIO_GLOBAL_QUEUE_INTERVAL` | Global queue interval |
|
||||
| `TOKIO_EVENT_INTERVAL` | Event interval |
|
||||
| `TOKIO_MAX_IO_EVENTS_PER_TICK` | Max I/O events per tick |
|
||||
| `CONTINUWUITY_RUNTIME_HISTOGRAM_INTERVAL` | Histogram bucket size (μs) |
|
||||
| `CONTINUWUITY_RUNTIME_HISTOGRAM_BUCKETS` | Bucket count |
|
||||
| `CONTINUWUITY_RUNTIME_WORKER_AFFINITY` | Enable worker affinity |
|
||||
|
||||
## See Also
|
||||
|
||||
- [Configuration Reference](./config.mdx) - Complete TOML configuration
|
||||
documentation
|
||||
- [Admin Commands](./admin/) - Admin command reference
|
||||
|
|
@ -6,7 +6,7 @@ misconfigurations to cause issues, particularly with networking and permissions.
|
|||
Please check that your issues are not due to problems with your Docker setup.
|
||||
:::
|
||||
|
||||
## Continuwuity and Matrix issues
|
||||
## Continuwuity issues
|
||||
|
||||
### Slow joins to rooms
|
||||
|
||||
|
|
@ -23,6 +23,16 @@ which is a longstanding bug with synchronizing room joins to clients. In this si
|
|||
the bug caused your homeserver to forget to tell your client. **To fix this, clear your client's cache.** Both Element and Cinny
|
||||
have a button to clear their cache in the "About" section of their settings.
|
||||
|
||||
### Configuration not working as expected
|
||||
|
||||
Sometimes you can make a mistake in your configuration that
|
||||
means things don't get passed to Continuwuity correctly.
|
||||
This is particularly easy to do with environment variables.
|
||||
To check what configuration Continuwuity actually sees, you can
|
||||
use the `!admin server show-config` command in your admin room.
|
||||
Beware that this prints out any secrets in your configuration,
|
||||
so you might want to delete the result afterwards!
|
||||
|
||||
### Lost access to admin room
|
||||
|
||||
You can reinvite yourself to the admin room through the following methods:
|
||||
|
|
@ -33,17 +43,7 @@ argument once to invite yourslf to the admin room on startup
|
|||
- Or specify the `emergency_password` config option to allow you to temporarily
|
||||
log into the server account (`@conduit`) from a web client
|
||||
|
||||
## General potential issues
|
||||
|
||||
### Configuration not working as expected
|
||||
|
||||
Sometimes you can make a mistake in your configuration that
|
||||
means things don't get passed to Continuwuity correctly.
|
||||
This is particularly easy to do with environment variables.
|
||||
To check what configuration Continuwuity actually sees, you can
|
||||
use the `!admin server show-config` command in your admin room.
|
||||
Beware that this prints out any secrets in your configuration,
|
||||
so you might want to delete the result afterwards!
|
||||
## DNS issues
|
||||
|
||||
### Potential DNS issues when using Docker
|
||||
|
||||
|
|
|
|||
54
flake.lock
generated
54
flake.lock
generated
|
|
@ -3,11 +3,11 @@
|
|||
"advisory-db": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1766324728,
|
||||
"narHash": "sha256-9C+WyE5U3y5w4WQXxmb0ylRyMMsPyzxielWXSHrcDpE=",
|
||||
"lastModified": 1773786698,
|
||||
"narHash": "sha256-o/J7ZculgwSs1L4H4UFlFZENOXTJzq1X0n71x6oNNvY=",
|
||||
"owner": "rustsec",
|
||||
"repo": "advisory-db",
|
||||
"rev": "c88b88c62bda077be8aa621d4e89d8701e39cb5d",
|
||||
"rev": "99e9de91bb8b61f06ef234ff84e11f758ecd5384",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
|
@ -18,11 +18,11 @@
|
|||
},
|
||||
"crane": {
|
||||
"locked": {
|
||||
"lastModified": 1766194365,
|
||||
"narHash": "sha256-4AFsUZ0kl6MXSm4BaQgItD0VGlEKR3iq7gIaL7TjBvc=",
|
||||
"lastModified": 1773189535,
|
||||
"narHash": "sha256-E1G/Or6MWeP+L6mpQ0iTFLpzSzlpGrITfU2220Gq47g=",
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"rev": "7d8ec2c71771937ab99790b45e6d9b93d15d9379",
|
||||
"rev": "6fa2fb4cf4a89ba49fc9dd5a3eb6cde99d388269",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
|
@ -39,11 +39,11 @@
|
|||
"rust-analyzer-src": "rust-analyzer-src"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1766299592,
|
||||
"narHash": "sha256-7u+q5hexu2eAxL2VjhskHvaUKg+GexmelIR2ve9Nbb4=",
|
||||
"lastModified": 1773732206,
|
||||
"narHash": "sha256-HKibxaUXyWd4Hs+ZUnwo6XslvaFqFqJh66uL9tphU4Q=",
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"rev": "381579dee168d5ced412e2990e9637ecc7cf1c5d",
|
||||
"rev": "0aa13c1b54063a8d8679b28a5cd357ba98f4a56b",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
|
@ -55,11 +55,11 @@
|
|||
"flake-compat": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1765121682,
|
||||
"narHash": "sha256-4VBOP18BFeiPkyhy9o4ssBNQEvfvv1kXkasAYd0+rrA=",
|
||||
"lastModified": 1767039857,
|
||||
"narHash": "sha256-vNpUSpF5Nuw8xvDLj2KCwwksIbjua2LZCqhV1LNRDns=",
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"rev": "65f23138d8d09a92e30f1e5c87611b23ef451bf3",
|
||||
"rev": "5edf11c44bc78a0d334f6334cdaf7d60d732daab",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
|
@ -74,11 +74,11 @@
|
|||
"nixpkgs-lib": "nixpkgs-lib"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1765835352,
|
||||
"narHash": "sha256-XswHlK/Qtjasvhd1nOa1e8MgZ8GS//jBoTqWtrS1Giw=",
|
||||
"lastModified": 1772408722,
|
||||
"narHash": "sha256-rHuJtdcOjK7rAHpHphUb1iCvgkU3GpfvicLMwwnfMT0=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"rev": "a34fae9c08a15ad73f295041fec82323541400a9",
|
||||
"rev": "f20dc5d9b8027381c474144ecabc9034d6a839a3",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
|
@ -89,11 +89,11 @@
|
|||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1766070988,
|
||||
"narHash": "sha256-G/WVghka6c4bAzMhTwT2vjLccg/awmHkdKSd2JrycLc=",
|
||||
"lastModified": 1773734432,
|
||||
"narHash": "sha256-IF5ppUWh6gHGHYDbtVUyhwy/i7D261P7fWD1bPefOsw=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "c6245e83d836d0433170a16eb185cefe0572f8b8",
|
||||
"rev": "cda48547b432e8d3b18b4180ba07473762ec8558",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
|
@ -105,11 +105,11 @@
|
|||
},
|
||||
"nixpkgs-lib": {
|
||||
"locked": {
|
||||
"lastModified": 1765674936,
|
||||
"narHash": "sha256-k00uTP4JNfmejrCLJOwdObYC9jHRrr/5M/a/8L2EIdo=",
|
||||
"lastModified": 1772328832,
|
||||
"narHash": "sha256-e+/T/pmEkLP6BHhYjx6GmwP5ivonQQn0bJdH9YrRB+Q=",
|
||||
"owner": "nix-community",
|
||||
"repo": "nixpkgs.lib",
|
||||
"rev": "2075416fcb47225d9b68ac469a5c4801a9c4dd85",
|
||||
"rev": "c185c7a5e5dd8f9add5b2f8ebeff00888b070742",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
|
@ -132,11 +132,11 @@
|
|||
"rust-analyzer-src": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1766253897,
|
||||
"narHash": "sha256-ChK07B1aOlJ4QzWXpJo+y8IGAxp1V9yQ2YloJ+RgHRw=",
|
||||
"lastModified": 1773697963,
|
||||
"narHash": "sha256-xdKI77It9PM6eNrCcDZsnP4SKulZwk8VkDgBRVMnCb8=",
|
||||
"owner": "rust-lang",
|
||||
"repo": "rust-analyzer",
|
||||
"rev": "765b7bdb432b3740f2d564afccfae831d5a972e4",
|
||||
"rev": "2993637174252ff60a582fd1f55b9ab52c39db6d",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
|
@ -153,11 +153,11 @@
|
|||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1766000401,
|
||||
"narHash": "sha256-+cqN4PJz9y0JQXfAK5J1drd0U05D5fcAGhzhfVrDlsI=",
|
||||
"lastModified": 1773297127,
|
||||
"narHash": "sha256-6E/yhXP7Oy/NbXtf1ktzmU8SdVqJQ09HC/48ebEGBpk=",
|
||||
"owner": "numtide",
|
||||
"repo": "treefmt-nix",
|
||||
"rev": "42d96e75aa56a3f70cab7e7dc4a32868db28e8fd",
|
||||
"rev": "71b125cd05fbfd78cab3e070b73544abe24c5016",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
|
|
|||
|
|
@ -12,7 +12,6 @@
|
|||
|
||||
rocksdbAllFeatures = self'.packages.rocksdb.override {
|
||||
enableJemalloc = true;
|
||||
enableLiburing = true;
|
||||
};
|
||||
|
||||
commonAttrs = (uwulib.build.commonAttrs { }) // {
|
||||
|
|
|
|||
|
|
@ -27,7 +27,6 @@
|
|||
commonAttrsArgs.profile = "release";
|
||||
rocksdb = self'.packages.rocksdb.override {
|
||||
enableJemalloc = true;
|
||||
enableLiburing = true;
|
||||
};
|
||||
features = {
|
||||
enabledFeatures = "all";
|
||||
|
|
|
|||
|
|
@ -7,7 +7,6 @@
|
|||
rust-jemalloc-sys-unprefixed,
|
||||
|
||||
enableJemalloc ? false,
|
||||
enableLiburing ? false,
|
||||
|
||||
fetchFromGitea,
|
||||
|
||||
|
|
@ -32,7 +31,7 @@ in
|
|||
|
||||
# for some reason enableLiburing in nixpkgs rocksdb is default true
|
||||
# which breaks Darwin entirely
|
||||
enableLiburing = enableLiburing && notDarwin;
|
||||
enableLiburing = notDarwin;
|
||||
}).overrideAttrs
|
||||
(old: {
|
||||
src = fetchFromGitea {
|
||||
|
|
@ -74,7 +73,7 @@ in
|
|||
"USE_RTTI"
|
||||
]);
|
||||
|
||||
enableLiburing = enableLiburing && notDarwin;
|
||||
enableLiburing = notDarwin;
|
||||
|
||||
# outputs has "tools" which we don't need or use
|
||||
outputs = [ "out" ];
|
||||
|
|
|
|||
|
|
@ -11,7 +11,6 @@
|
|||
uwulib = inputs.self.uwulib.init pkgs;
|
||||
rocksdbAllFeatures = self'.packages.rocksdb.override {
|
||||
enableJemalloc = true;
|
||||
enableLiburing = true;
|
||||
};
|
||||
in
|
||||
{
|
||||
|
|
|
|||
618
package-lock.json
generated
618
package-lock.json
generated
File diff suppressed because it is too large
Load diff
|
|
@ -18,6 +18,7 @@ Environment="CONTINUWUITY_DATABASE_PATH=%S/conduwuit"
|
|||
Environment="CONTINUWUITY_CONFIG_RELOAD_SIGNAL=true"
|
||||
|
||||
LoadCredential=conduwuit.toml:/etc/conduwuit/conduwuit.toml
|
||||
RefreshOnReload=yes
|
||||
|
||||
ExecStart=/usr/bin/conduwuit --config ${CREDENTIALS_DIRECTORY}/conduwuit.toml
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
||||
"extends": ["config:recommended", "replacements:all"],
|
||||
"extends": ["config:recommended", "replacements:all", ":semanticCommitTypeAll(chore)"],
|
||||
"dependencyDashboard": true,
|
||||
"osvVulnerabilityAlerts": true,
|
||||
"lockFileMaintenance": {
|
||||
|
|
@ -36,10 +36,18 @@
|
|||
},
|
||||
"packageRules": [
|
||||
{
|
||||
"description": "Batch patch-level Rust dependency updates",
|
||||
"description": "Batch minor and patch Rust dependency updates",
|
||||
"matchManagers": ["cargo"],
|
||||
"matchUpdateTypes": ["minor", "patch"],
|
||||
"matchCurrentVersion": ">=1.0.0",
|
||||
"groupName": "rust-non-major"
|
||||
},
|
||||
{
|
||||
"description": "Batch patch-level zerover Rust dependency updates",
|
||||
"matchManagers": ["cargo"],
|
||||
"matchUpdateTypes": ["patch"],
|
||||
"groupName": "rust-patch-updates"
|
||||
"matchCurrentVersion": ">=0.1.0,<1.0.0",
|
||||
"groupName": "rust-zerover-patch-updates"
|
||||
},
|
||||
{
|
||||
"description": "Limit concurrent Cargo PRs",
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ use crate::{
|
|||
query::{self, QueryCommand},
|
||||
room::{self, RoomCommand},
|
||||
server::{self, ServerCommand},
|
||||
space::{self, SpaceCommand},
|
||||
token::{self, TokenCommand},
|
||||
user::{self, UserCommand},
|
||||
};
|
||||
|
|
@ -34,6 +35,10 @@ pub enum AdminCommand {
|
|||
/// Commands for managing rooms
|
||||
Rooms(RoomCommand),
|
||||
|
||||
#[command(subcommand)]
|
||||
/// Commands for managing space permissions
|
||||
Spaces(SpaceCommand),
|
||||
|
||||
#[command(subcommand)]
|
||||
/// Commands for managing federation
|
||||
Federation(FederationCommand),
|
||||
|
|
@ -81,6 +86,10 @@ pub(super) async fn process(command: AdminCommand, context: &Context<'_>) -> Res
|
|||
token::process(command, context).await
|
||||
},
|
||||
| Rooms(command) => room::process(command, context).await,
|
||||
| Spaces(command) => {
|
||||
context.bail_restricted()?;
|
||||
space::process(command, context).await
|
||||
},
|
||||
| Federation(command) => federation::process(command, context).await,
|
||||
| Server(command) => server::process(command, context).await,
|
||||
| Debug(command) => debug::process(command, context).await,
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ pub enum MediaCommand {
|
|||
/// * Delete all remote and local media from 3 days ago, up until now:
|
||||
///
|
||||
/// `!admin media delete-past-remote-media -a 3d
|
||||
///-yes-i-want-to-delete-local-media`
|
||||
///--yes-i-want-to-delete-local-media`
|
||||
#[command(verbatim_doc_comment)]
|
||||
DeletePastRemoteMedia {
|
||||
/// The relative time (e.g. 30s, 5m, 7d) from now within which to
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ pub(crate) mod media;
|
|||
pub(crate) mod query;
|
||||
pub(crate) mod room;
|
||||
pub(crate) mod server;
|
||||
pub(crate) mod space;
|
||||
pub(crate) mod token;
|
||||
pub(crate) mod user;
|
||||
|
||||
|
|
|
|||
15
src/admin/space/mod.rs
Normal file
15
src/admin/space/mod.rs
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
pub(super) mod roles;
|
||||
|
||||
use clap::Subcommand;
|
||||
use conduwuit::Result;
|
||||
|
||||
use self::roles::SpaceRolesCommand;
|
||||
use crate::admin_command_dispatch;
|
||||
|
||||
#[admin_command_dispatch]
|
||||
#[derive(Debug, Subcommand)]
|
||||
pub enum SpaceCommand {
|
||||
#[command(subcommand)]
|
||||
/// Manage space roles and permissions
|
||||
Roles(SpaceRolesCommand),
|
||||
}
|
||||
632
src/admin/space/roles.rs
Normal file
632
src/admin/space/roles.rs
Normal file
|
|
@ -0,0 +1,632 @@
|
|||
use std::fmt::Write;
|
||||
|
||||
use clap::Subcommand;
|
||||
use conduwuit::{Err, Event, Result, matrix::pdu::PduBuilder};
|
||||
use conduwuit_core::matrix::space_roles::{
|
||||
RoleDefinition, SPACE_CASCADING_EVENT_TYPE, SPACE_ROLE_MEMBER_EVENT_TYPE,
|
||||
SPACE_ROLE_ROOM_EVENT_TYPE, SPACE_ROLES_EVENT_TYPE, SpaceCascadingEventContent,
|
||||
SpaceRoleMemberEventContent, SpaceRoleRoomEventContent, SpaceRolesEventContent,
|
||||
};
|
||||
use futures::StreamExt;
|
||||
use ruma::{OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, events::StateEventType};
|
||||
use serde_json::value::to_raw_value;
|
||||
|
||||
use crate::{admin_command, admin_command_dispatch};
|
||||
|
||||
fn roles_event_type() -> StateEventType {
|
||||
StateEventType::from(SPACE_ROLES_EVENT_TYPE.to_owned())
|
||||
}
|
||||
|
||||
fn member_event_type() -> StateEventType {
|
||||
StateEventType::from(SPACE_ROLE_MEMBER_EVENT_TYPE.to_owned())
|
||||
}
|
||||
|
||||
fn room_event_type() -> StateEventType {
|
||||
StateEventType::from(SPACE_ROLE_ROOM_EVENT_TYPE.to_owned())
|
||||
}
|
||||
|
||||
fn cascading_event_type() -> StateEventType {
|
||||
StateEventType::from(SPACE_CASCADING_EVENT_TYPE.to_owned())
|
||||
}
|
||||
|
||||
macro_rules! resolve_room_as_space {
|
||||
($self:expr, $space:expr) => {{
|
||||
let space_id = $self.services.rooms.alias.resolve(&$space).await?;
|
||||
if !matches!(
|
||||
$self
|
||||
.services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_room_type(&space_id)
|
||||
.await,
|
||||
Ok(ruma::room::RoomType::Space)
|
||||
) {
|
||||
return Err!("The specified room is not a Space.");
|
||||
}
|
||||
space_id
|
||||
}};
|
||||
}
|
||||
|
||||
macro_rules! resolve_space {
|
||||
($self:expr, $space:expr) => {{
|
||||
let space_id = resolve_room_as_space!($self, $space);
|
||||
if !$self
|
||||
.services
|
||||
.rooms
|
||||
.roles
|
||||
.is_enabled_for_space(&space_id)
|
||||
.await
|
||||
{
|
||||
return $self
|
||||
.write_str(
|
||||
"Space permission cascading is disabled for this Space. Enable it \
|
||||
server-wide with `space_permission_cascading = true` in your config, or \
|
||||
per-Space with `!admin space roles enable <space>`.",
|
||||
)
|
||||
.await;
|
||||
}
|
||||
space_id
|
||||
}};
|
||||
}
|
||||
|
||||
macro_rules! custom_state_pdu {
|
||||
($event_type:expr, $state_key:expr, $content:expr) => {
|
||||
PduBuilder {
|
||||
event_type: $event_type.to_owned().into(),
|
||||
content: to_raw_value($content)
|
||||
.map_err(|e| conduwuit::err!("Failed to serialize state event content: {e}"))?,
|
||||
state_key: Some($state_key.to_owned().into()),
|
||||
..PduBuilder::default()
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Cascade-remove a role name from all state events of a given type. For each
|
||||
/// event that contains the role, the `$field` is filtered and the updated
|
||||
/// content is sent back as a new state event.
|
||||
macro_rules! cascade_remove_role {
|
||||
(
|
||||
$self:expr,
|
||||
$shortstatehash:expr,
|
||||
$event_type_fn:expr,
|
||||
$event_type_const:expr,
|
||||
$content_ty:ty,
|
||||
$field:ident,
|
||||
$role_name:expr,
|
||||
$space_id:expr,
|
||||
$state_lock:expr,
|
||||
$server_user:expr
|
||||
) => {{
|
||||
let ev_type = $event_type_fn;
|
||||
let entries: Vec<(_, ruma::OwnedEventId)> = $self
|
||||
.services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.state_keys_with_ids($shortstatehash, &ev_type)
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
for (state_key, event_id) in entries {
|
||||
if let Ok(pdu) = $self.services.rooms.timeline.get_pdu(&event_id).await {
|
||||
if let Ok(mut content) = pdu.get_content::<$content_ty>() {
|
||||
if content.$field.contains($role_name) {
|
||||
content.$field.retain(|r| r != $role_name);
|
||||
$self
|
||||
.services
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
custom_state_pdu!($event_type_const, &state_key, &content),
|
||||
$server_user,
|
||||
Some(&$space_id),
|
||||
&$state_lock,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
macro_rules! send_space_state {
|
||||
($self:expr, $space_id:expr, $event_type:expr, $state_key:expr, $content:expr) => {{
|
||||
let state_lock = $self.services.rooms.state.mutex.lock(&$space_id).await;
|
||||
let server_user = &$self.services.globals.server_user;
|
||||
$self
|
||||
.services
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
custom_state_pdu!($event_type, $state_key, $content),
|
||||
server_user,
|
||||
Some(&$space_id),
|
||||
&state_lock,
|
||||
)
|
||||
.await?
|
||||
}};
|
||||
}
|
||||
|
||||
#[admin_command_dispatch]
|
||||
#[derive(Debug, Subcommand)]
|
||||
pub enum SpaceRolesCommand {
|
||||
/// List all roles defined in a space
|
||||
List {
|
||||
space: OwnedRoomOrAliasId,
|
||||
},
|
||||
/// Add a new role to a space
|
||||
Add {
|
||||
space: OwnedRoomOrAliasId,
|
||||
role_name: String,
|
||||
#[arg(long)]
|
||||
description: Option<String>,
|
||||
#[arg(long)]
|
||||
power_level: Option<i64>,
|
||||
},
|
||||
/// Remove a role from a space
|
||||
Remove {
|
||||
space: OwnedRoomOrAliasId,
|
||||
role_name: String,
|
||||
},
|
||||
/// Assign a role to a user
|
||||
Assign {
|
||||
space: OwnedRoomOrAliasId,
|
||||
user_id: OwnedUserId,
|
||||
role_name: String,
|
||||
},
|
||||
/// Revoke a role from a user
|
||||
Revoke {
|
||||
space: OwnedRoomOrAliasId,
|
||||
user_id: OwnedUserId,
|
||||
role_name: String,
|
||||
},
|
||||
/// Require a role for a room
|
||||
Require {
|
||||
space: OwnedRoomOrAliasId,
|
||||
room_id: OwnedRoomId,
|
||||
role_name: String,
|
||||
},
|
||||
/// Remove a role requirement from a room
|
||||
Unrequire {
|
||||
space: OwnedRoomOrAliasId,
|
||||
room_id: OwnedRoomId,
|
||||
role_name: String,
|
||||
},
|
||||
/// Show a user's roles in a space
|
||||
User {
|
||||
space: OwnedRoomOrAliasId,
|
||||
user_id: OwnedUserId,
|
||||
},
|
||||
/// Show a room's role requirements in a space
|
||||
Room {
|
||||
space: OwnedRoomOrAliasId,
|
||||
room_id: OwnedRoomId,
|
||||
},
|
||||
/// Enable space permission cascading for a specific space (overrides
|
||||
/// server config)
|
||||
Enable {
|
||||
space: OwnedRoomOrAliasId,
|
||||
},
|
||||
/// Disable space permission cascading for a specific space (overrides
|
||||
/// server config)
|
||||
Disable {
|
||||
space: OwnedRoomOrAliasId,
|
||||
},
|
||||
/// Show whether cascading is enabled for a space and the source (server
|
||||
/// default or per-space override)
|
||||
Status {
|
||||
space: OwnedRoomOrAliasId,
|
||||
},
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn list(&self, space: OwnedRoomOrAliasId) -> Result {
|
||||
let space_id = resolve_space!(self, space);
|
||||
let roles_event_type = roles_event_type();
|
||||
|
||||
let content: SpaceRolesEventContent = self
|
||||
.services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get_content(&space_id, &roles_event_type, "")
|
||||
.await
|
||||
.unwrap_or_default();
|
||||
|
||||
if content.roles.is_empty() {
|
||||
return self.write_str("No roles defined in this space.").await;
|
||||
}
|
||||
|
||||
let mut msg = format!("Roles in {space_id}:\n```\n");
|
||||
for (name, def) in &content.roles {
|
||||
let pl = def
|
||||
.power_level
|
||||
.map(|p| format!(" (power_level: {p})"))
|
||||
.unwrap_or_default();
|
||||
let _ = writeln!(msg, "- {name}: {}{pl}", def.description);
|
||||
}
|
||||
msg.push_str("```");
|
||||
|
||||
self.write_str(&msg).await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn add(
|
||||
&self,
|
||||
space: OwnedRoomOrAliasId,
|
||||
role_name: String,
|
||||
description: Option<String>,
|
||||
power_level: Option<i64>,
|
||||
) -> Result {
|
||||
let space_id = resolve_space!(self, space);
|
||||
|
||||
if let Some(pl) = power_level {
|
||||
if pl > i64::from(ruma::Int::MAX) || pl < i64::from(ruma::Int::MIN) {
|
||||
return Err!(
|
||||
"Power level must be between {} and {}.",
|
||||
ruma::Int::MIN,
|
||||
ruma::Int::MAX
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let roles_event_type = roles_event_type();
|
||||
|
||||
let mut content: SpaceRolesEventContent = self
|
||||
.services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get_content(&space_id, &roles_event_type, "")
|
||||
.await
|
||||
.unwrap_or_default();
|
||||
|
||||
if content.roles.contains_key(&role_name) {
|
||||
return Err!("Role '{role_name}' already exists in this space.");
|
||||
}
|
||||
|
||||
content.roles.insert(role_name.clone(), RoleDefinition {
|
||||
description: description.unwrap_or_else(|| role_name.clone()),
|
||||
power_level,
|
||||
});
|
||||
|
||||
send_space_state!(self, space_id, SPACE_ROLES_EVENT_TYPE, "", &content);
|
||||
|
||||
self.write_str(&format!("Added role '{role_name}' to space {space_id}."))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn remove(&self, space: OwnedRoomOrAliasId, role_name: String) -> Result {
|
||||
let space_id = resolve_space!(self, space);
|
||||
let roles_event_type = roles_event_type();
|
||||
|
||||
let mut content: SpaceRolesEventContent = self
|
||||
.services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get_content(&space_id, &roles_event_type, "")
|
||||
.await
|
||||
.unwrap_or_default();
|
||||
|
||||
if content.roles.remove(&role_name).is_none() {
|
||||
return Err!("Role '{role_name}' does not exist in this space.");
|
||||
}
|
||||
|
||||
send_space_state!(self, space_id, SPACE_ROLES_EVENT_TYPE, "", &content);
|
||||
|
||||
// Cascade: remove the deleted role from all member and room events
|
||||
let server_user = &self.services.globals.server_user;
|
||||
if let Ok(shortstatehash) = self
|
||||
.services
|
||||
.rooms
|
||||
.state
|
||||
.get_room_shortstatehash(&space_id)
|
||||
.await
|
||||
{
|
||||
let state_lock = self.services.rooms.state.mutex.lock(&space_id).await;
|
||||
|
||||
cascade_remove_role!(
|
||||
self,
|
||||
shortstatehash,
|
||||
member_event_type(),
|
||||
SPACE_ROLE_MEMBER_EVENT_TYPE,
|
||||
SpaceRoleMemberEventContent,
|
||||
roles,
|
||||
&role_name,
|
||||
space_id,
|
||||
state_lock,
|
||||
server_user
|
||||
);
|
||||
|
||||
cascade_remove_role!(
|
||||
self,
|
||||
shortstatehash,
|
||||
room_event_type(),
|
||||
SPACE_ROLE_ROOM_EVENT_TYPE,
|
||||
SpaceRoleRoomEventContent,
|
||||
required_roles,
|
||||
&role_name,
|
||||
space_id,
|
||||
state_lock,
|
||||
server_user
|
||||
);
|
||||
}
|
||||
|
||||
self.write_str(&format!("Removed role '{role_name}' from space {space_id}."))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn assign(
|
||||
&self,
|
||||
space: OwnedRoomOrAliasId,
|
||||
user_id: OwnedUserId,
|
||||
role_name: String,
|
||||
) -> Result {
|
||||
let space_id = resolve_space!(self, space);
|
||||
|
||||
let roles_event_type = roles_event_type();
|
||||
let role_defs: SpaceRolesEventContent = self
|
||||
.services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get_content(&space_id, &roles_event_type, "")
|
||||
.await
|
||||
.unwrap_or_default();
|
||||
|
||||
if !role_defs.roles.contains_key(&role_name) {
|
||||
return Err!("Role '{role_name}' does not exist in this space.");
|
||||
}
|
||||
|
||||
let member_event_type = member_event_type();
|
||||
|
||||
let mut content: SpaceRoleMemberEventContent = self
|
||||
.services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get_content(&space_id, &member_event_type, user_id.as_str())
|
||||
.await
|
||||
.unwrap_or_default();
|
||||
|
||||
if content.roles.contains(&role_name) {
|
||||
return Err!("User {user_id} already has role '{role_name}' in this space.");
|
||||
}
|
||||
|
||||
content.roles.push(role_name.clone());
|
||||
|
||||
send_space_state!(self, space_id, SPACE_ROLE_MEMBER_EVENT_TYPE, user_id.as_str(), &content);
|
||||
|
||||
self.write_str(&format!("Assigned role '{role_name}' to {user_id} in space {space_id}."))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn revoke(
|
||||
&self,
|
||||
space: OwnedRoomOrAliasId,
|
||||
user_id: OwnedUserId,
|
||||
role_name: String,
|
||||
) -> Result {
|
||||
let space_id = resolve_space!(self, space);
|
||||
let member_event_type = member_event_type();
|
||||
|
||||
let mut content: SpaceRoleMemberEventContent = self
|
||||
.services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get_content(&space_id, &member_event_type, user_id.as_str())
|
||||
.await
|
||||
.unwrap_or_default();
|
||||
|
||||
let original_len = content.roles.len();
|
||||
content.roles.retain(|r| r != &role_name);
|
||||
|
||||
if content.roles.len() == original_len {
|
||||
return Err!("User {user_id} does not have role '{role_name}' in this space.");
|
||||
}
|
||||
|
||||
send_space_state!(self, space_id, SPACE_ROLE_MEMBER_EVENT_TYPE, user_id.as_str(), &content);
|
||||
|
||||
self.write_str(&format!("Revoked role '{role_name}' from {user_id} in space {space_id}."))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn require(
|
||||
&self,
|
||||
space: OwnedRoomOrAliasId,
|
||||
room_id: OwnedRoomId,
|
||||
role_name: String,
|
||||
) -> Result {
|
||||
let space_id = resolve_space!(self, space);
|
||||
|
||||
let child_rooms = self.services.rooms.roles.get_child_rooms(&space_id).await;
|
||||
if !child_rooms.contains(&room_id) {
|
||||
return Err!("Room {room_id} is not a child of space {space_id}.");
|
||||
}
|
||||
|
||||
let roles_event_type = roles_event_type();
|
||||
let role_defs: SpaceRolesEventContent = self
|
||||
.services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get_content(&space_id, &roles_event_type, "")
|
||||
.await
|
||||
.unwrap_or_default();
|
||||
|
||||
if !role_defs.roles.contains_key(&role_name) {
|
||||
return Err!("Role '{role_name}' does not exist in this space.");
|
||||
}
|
||||
|
||||
let room_event_type = room_event_type();
|
||||
|
||||
let mut content: SpaceRoleRoomEventContent = self
|
||||
.services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get_content(&space_id, &room_event_type, room_id.as_str())
|
||||
.await
|
||||
.unwrap_or_default();
|
||||
|
||||
if content.required_roles.contains(&role_name) {
|
||||
return Err!("Room {room_id} already requires role '{role_name}' in this space.");
|
||||
}
|
||||
|
||||
content.required_roles.push(role_name.clone());
|
||||
|
||||
send_space_state!(self, space_id, SPACE_ROLE_ROOM_EVENT_TYPE, room_id.as_str(), &content);
|
||||
|
||||
self.write_str(&format!(
|
||||
"Room {room_id} now requires role '{role_name}' in space {space_id}."
|
||||
))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn unrequire(
|
||||
&self,
|
||||
space: OwnedRoomOrAliasId,
|
||||
room_id: OwnedRoomId,
|
||||
role_name: String,
|
||||
) -> Result {
|
||||
let space_id = resolve_space!(self, space);
|
||||
let room_event_type = room_event_type();
|
||||
|
||||
let mut content: SpaceRoleRoomEventContent = self
|
||||
.services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get_content(&space_id, &room_event_type, room_id.as_str())
|
||||
.await
|
||||
.unwrap_or_default();
|
||||
|
||||
let original_len = content.required_roles.len();
|
||||
content.required_roles.retain(|r| r != &role_name);
|
||||
|
||||
if content.required_roles.len() == original_len {
|
||||
return Err!("Room {room_id} does not require role '{role_name}' in this space.");
|
||||
}
|
||||
|
||||
send_space_state!(self, space_id, SPACE_ROLE_ROOM_EVENT_TYPE, room_id.as_str(), &content);
|
||||
|
||||
self.write_str(&format!(
|
||||
"Removed role requirement '{role_name}' from room {room_id} in space {space_id}."
|
||||
))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn user(&self, space: OwnedRoomOrAliasId, user_id: OwnedUserId) -> Result {
|
||||
let space_id = resolve_space!(self, space);
|
||||
|
||||
let roles = self
|
||||
.services
|
||||
.rooms
|
||||
.roles
|
||||
.get_user_roles_in_space(&space_id, &user_id)
|
||||
.await;
|
||||
|
||||
match roles {
|
||||
| Some(roles) if !roles.is_empty() => {
|
||||
let list: String = roles
|
||||
.iter()
|
||||
.map(|r| format!("- {r}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
self.write_str(&format!("Roles for {user_id} in space {space_id}:\n```\n{list}\n```"))
|
||||
.await
|
||||
},
|
||||
| _ =>
|
||||
self.write_str(&format!("User {user_id} has no roles in space {space_id}."))
|
||||
.await,
|
||||
}
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn room(&self, space: OwnedRoomOrAliasId, room_id: OwnedRoomId) -> Result {
|
||||
let space_id = resolve_space!(self, space);
|
||||
|
||||
let reqs = self
|
||||
.services
|
||||
.rooms
|
||||
.roles
|
||||
.get_room_requirements_in_space(&space_id, &room_id)
|
||||
.await;
|
||||
|
||||
match reqs {
|
||||
| Some(reqs) if !reqs.is_empty() => {
|
||||
let list: String = reqs
|
||||
.iter()
|
||||
.map(|r| format!("- {r}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
self.write_str(&format!(
|
||||
"Required roles for room {room_id} in space {space_id}:\n```\n{list}\n```"
|
||||
))
|
||||
.await
|
||||
},
|
||||
| _ =>
|
||||
self.write_str(&format!(
|
||||
"Room {room_id} has no role requirements in space {space_id}."
|
||||
))
|
||||
.await,
|
||||
}
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn enable(&self, space: OwnedRoomOrAliasId) -> Result {
|
||||
let space_id = resolve_room_as_space!(self, space);
|
||||
|
||||
self.services
|
||||
.rooms
|
||||
.roles
|
||||
.ensure_default_roles(&space_id)
|
||||
.await?;
|
||||
|
||||
let content = SpaceCascadingEventContent { enabled: true };
|
||||
send_space_state!(self, space_id, SPACE_CASCADING_EVENT_TYPE, "", &content);
|
||||
|
||||
self.write_str(&format!("Space permission cascading enabled for {space_id}."))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn disable(&self, space: OwnedRoomOrAliasId) -> Result {
|
||||
let space_id = resolve_room_as_space!(self, space);
|
||||
|
||||
let content = SpaceCascadingEventContent { enabled: false };
|
||||
send_space_state!(self, space_id, SPACE_CASCADING_EVENT_TYPE, "", &content);
|
||||
|
||||
self.write_str(&format!("Space permission cascading disabled for {space_id}."))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn status(&self, space: OwnedRoomOrAliasId) -> Result {
|
||||
let space_id = resolve_room_as_space!(self, space);
|
||||
|
||||
let global_default = self.services.rooms.roles.is_enabled();
|
||||
let cascading_event_type = cascading_event_type();
|
||||
let per_space_override: Option<bool> = self
|
||||
.services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get_content::<SpaceCascadingEventContent>(
|
||||
&space_id,
|
||||
&cascading_event_type,
|
||||
"",
|
||||
)
|
||||
.await
|
||||
.ok()
|
||||
.map(|c| c.enabled);
|
||||
|
||||
let effective = per_space_override.unwrap_or(global_default);
|
||||
let source = match per_space_override {
|
||||
| Some(v) => format!("per-Space override (enabled: {v})"),
|
||||
| None => format!("server default (space_permission_cascading: {global_default})"),
|
||||
};
|
||||
|
||||
self.write_str(&format!(
|
||||
"Cascading status for {space_id}:\n- Effective: **{effective}**\n- Source: {source}"
|
||||
))
|
||||
.await
|
||||
}
|
||||
|
|
@ -296,6 +296,31 @@ pub(super) async fn reset_password(
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn issue_password_reset_link(&self, username: String) -> Result {
|
||||
use conduwuit_service::password_reset::{PASSWORD_RESET_PATH, RESET_TOKEN_QUERY_PARAM};
|
||||
|
||||
self.bail_restricted()?;
|
||||
|
||||
let mut reset_url = self
|
||||
.services
|
||||
.config
|
||||
.get_client_domain()
|
||||
.join(PASSWORD_RESET_PATH)
|
||||
.unwrap();
|
||||
|
||||
let user_id = parse_local_user_id(self.services, &username)?;
|
||||
let token = self.services.password_reset.issue_token(user_id).await?;
|
||||
reset_url
|
||||
.query_pairs_mut()
|
||||
.append_pair(RESET_TOKEN_QUERY_PARAM, &token.token);
|
||||
|
||||
self.write_str(&format!("Password reset link issued for {username}: {reset_url}"))
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) -> Result {
|
||||
if self.body.len() < 2
|
||||
|
|
|
|||
|
|
@ -29,6 +29,12 @@ pub enum UserCommand {
|
|||
password: Option<String>,
|
||||
},
|
||||
|
||||
/// Issue a self-service password reset link for a user.
|
||||
IssuePasswordResetLink {
|
||||
/// Username of the user who may use the link
|
||||
username: String,
|
||||
},
|
||||
|
||||
/// Deactivate a user
|
||||
///
|
||||
/// User will be removed from all rooms by default.
|
||||
|
|
|
|||
|
|
@ -347,6 +347,12 @@ pub async fn join_room_by_id_helper(
|
|||
}
|
||||
}
|
||||
|
||||
services
|
||||
.rooms
|
||||
.roles
|
||||
.check_join_allowed(room_id, sender_user)
|
||||
.await?;
|
||||
|
||||
if server_in_room {
|
||||
join_room_by_id_helper_local(services, sender_user, room_id, reason, servers, state_lock)
|
||||
.boxed()
|
||||
|
|
|
|||
|
|
@ -270,7 +270,7 @@ async fn build_state_and_timeline(
|
|||
// joined since the last sync, that being the syncing user's join event. if
|
||||
// it's empty something is wrong.
|
||||
if joined_since_last_sync && timeline.pdus.is_empty() {
|
||||
warn!("timeline for newly joined room is empty");
|
||||
debug_warn!("timeline for newly joined room is empty");
|
||||
}
|
||||
|
||||
let (summary, device_list_updates) = try_join(
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
use conduwuit::{
|
||||
Event, PduCount, PduEvent, Result, at, debug_warn,
|
||||
Event, PduEvent, Result, at, debug_warn,
|
||||
pdu::EventHash,
|
||||
trace,
|
||||
utils::{self, IterStream, future::ReadyEqExt, stream::WidebandExt as _},
|
||||
|
|
@ -68,9 +68,13 @@ pub(super) async fn load_left_room(
|
|||
return Ok(None);
|
||||
}
|
||||
|
||||
// return early if this is an incremental sync, and we've already synced this
|
||||
// leave to the user, and `include_leave` isn't set on the filter.
|
||||
if !filter.room.include_leave && last_sync_end_count >= Some(left_count) {
|
||||
// return early if:
|
||||
// - this is an initial sync and the room filter doesn't include leaves, or
|
||||
// - this is an incremental sync, and we've already synced the leave, and the
|
||||
// room filter doesn't include leaves
|
||||
if last_sync_end_count.is_none_or(|last_sync_end_count| last_sync_end_count >= left_count)
|
||||
&& !filter.room.include_leave
|
||||
{
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
|
|
@ -195,27 +199,13 @@ async fn build_left_state_and_timeline(
|
|||
leave_shortstatehash: ShortStateHash,
|
||||
prev_membership_event: PduEvent,
|
||||
) -> Result<(TimelinePdus, Vec<PduEvent>)> {
|
||||
let SyncContext {
|
||||
syncing_user,
|
||||
last_sync_end_count,
|
||||
filter,
|
||||
..
|
||||
} = sync_context;
|
||||
let SyncContext { syncing_user, filter, .. } = sync_context;
|
||||
|
||||
let timeline_start_count = if let Some(last_sync_end_count) = last_sync_end_count {
|
||||
// for incremental syncs, start the timeline after `since`
|
||||
PduCount::Normal(last_sync_end_count)
|
||||
} else {
|
||||
// for initial syncs, start the timeline after the previous membership
|
||||
// event. we don't want to include the membership event itself
|
||||
// because clients get confused when they see a `join`
|
||||
// membership event in a `leave` room.
|
||||
services
|
||||
.rooms
|
||||
.timeline
|
||||
.get_pdu_count(&prev_membership_event.event_id)
|
||||
.await?
|
||||
};
|
||||
let timeline_start_count = services
|
||||
.rooms
|
||||
.timeline
|
||||
.get_pdu_count(&prev_membership_event.event_id)
|
||||
.await?;
|
||||
|
||||
// end the timeline at the user's leave event
|
||||
let timeline_end_count = services
|
||||
|
|
|
|||
|
|
@ -297,12 +297,18 @@ pub(crate) async fn build_sync_events(
|
|||
.rooms
|
||||
.state_cache
|
||||
.rooms_left(syncing_user)
|
||||
.broad_filter_map(|(room_id, leave_pdu)| {
|
||||
load_left_room(services, context, room_id.clone(), leave_pdu)
|
||||
.map_ok(move |left_room| (room_id, left_room))
|
||||
.ok()
|
||||
.broad_filter_map(|(room_id, leave_pdu)| async {
|
||||
let left_room = load_left_room(services, context, room_id.clone(), leave_pdu).await;
|
||||
|
||||
match left_room {
|
||||
| Ok(Some(left_room)) => Some((room_id, left_room)),
|
||||
| Ok(None) => None,
|
||||
| Err(err) => {
|
||||
warn!(?err, %room_id, "error loading joined room");
|
||||
None
|
||||
},
|
||||
}
|
||||
})
|
||||
.ready_filter_map(|(room_id, left_room)| left_room.map(|left_room| (room_id, left_room)))
|
||||
.collect();
|
||||
|
||||
let invited_rooms = services
|
||||
|
|
|
|||
|
|
@ -174,6 +174,7 @@ pub fn check(config: &Config) -> Result {
|
|||
if config.allow_registration
|
||||
&& config.yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse
|
||||
&& config.registration_token.is_none()
|
||||
&& config.registration_token_file.is_none()
|
||||
{
|
||||
warn!(
|
||||
"Open registration is enabled via setting \
|
||||
|
|
|
|||
|
|
@ -68,6 +68,10 @@ pub struct Config {
|
|||
///
|
||||
/// Also see the `[global.well_known]` config section at the very bottom.
|
||||
///
|
||||
/// If `client` is not set under `[global.well_known]`, the server name will
|
||||
/// be used as the base domain for user-facing links (such as password
|
||||
/// reset links) created by Continuwuity.
|
||||
///
|
||||
/// Examples of delegation:
|
||||
/// - https://continuwuity.org/.well-known/matrix/server
|
||||
/// - https://continuwuity.org/.well-known/matrix/client
|
||||
|
|
@ -603,25 +607,47 @@ pub struct Config {
|
|||
#[serde(default)]
|
||||
pub suspend_on_register: bool,
|
||||
|
||||
/// Server-wide default for space permission cascading (power levels and
|
||||
/// role-based access). Individual Spaces can override this via the
|
||||
/// `com.continuwuity.space.cascading` state event or the admin command
|
||||
/// `!admin space roles enable/disable <space>`.
|
||||
///
|
||||
/// default: false
|
||||
#[serde(default)]
|
||||
pub space_permission_cascading: bool,
|
||||
|
||||
/// Maximum number of spaces to cache role data for. When exceeded the
|
||||
/// cache is cleared and repopulated on demand.
|
||||
///
|
||||
/// default: 1000
|
||||
#[serde(default = "default_space_roles_cache_flush_threshold")]
|
||||
pub space_roles_cache_flush_threshold: u32,
|
||||
|
||||
/// Enabling this setting opens registration to anyone without restrictions.
|
||||
/// This makes your server vulnerable to abuse
|
||||
#[serde(default)]
|
||||
pub yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse: bool,
|
||||
|
||||
/// A static registration token that new users will have to provide when
|
||||
/// creating an account. If unset and `allow_registration` is true,
|
||||
/// you must set
|
||||
/// `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse`
|
||||
/// to true to allow open registration without any conditions.
|
||||
///
|
||||
/// If you do not want to set a static token, the `!admin token` commands
|
||||
/// may also be used to manage registration tokens.
|
||||
/// creating an account. This token does not supersede tokens from other
|
||||
/// sources, such as the `!admin token` command or the
|
||||
/// `registration_token_file` configuration option.
|
||||
///
|
||||
/// example: "o&^uCtes4HPf0Vu@F20jQeeWE7"
|
||||
///
|
||||
/// display: sensitive
|
||||
pub registration_token: Option<String>,
|
||||
|
||||
/// A path to a file containing static registration tokens, one per line.
|
||||
/// Tokens in this file do not supersede tokens from other sources, such as
|
||||
/// the `!admin token` command or the `registration_token` configuration
|
||||
/// option.
|
||||
///
|
||||
/// The file will be read once, when Continuwuity starts. It is not
|
||||
/// currently reread when the server configuration is reloaded. If the file
|
||||
/// cannot be read, Continuwuity will fail to start.
|
||||
pub registration_token_file: Option<PathBuf>,
|
||||
|
||||
/// The public site key for reCaptcha. If this is provided, reCaptcha
|
||||
/// becomes required during registration. If both captcha *and*
|
||||
/// registration token are enabled, both will be required during
|
||||
|
|
@ -1729,6 +1755,11 @@ pub struct Config {
|
|||
/// default: "continuwuity/<version> (bot; +https://continuwuity.org)"
|
||||
pub url_preview_user_agent: Option<String>,
|
||||
|
||||
/// Determines whether audio and video files will be downloaded for URL
|
||||
/// previews.
|
||||
#[serde(default)]
|
||||
pub url_preview_allow_audio_video: bool,
|
||||
|
||||
/// List of forbidden room aliases and room IDs as strings of regex
|
||||
/// patterns.
|
||||
///
|
||||
|
|
@ -2068,6 +2099,23 @@ pub struct Config {
|
|||
pub allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure:
|
||||
bool,
|
||||
|
||||
/// Forcibly disables first-run mode.
|
||||
///
|
||||
/// This is intended to be used for Complement testing to allow the test
|
||||
/// suite to register users, because first-run mode interferes with open
|
||||
/// registration.
|
||||
///
|
||||
/// display: hidden
|
||||
#[serde(default)]
|
||||
pub force_disable_first_run_mode: bool,
|
||||
|
||||
/// Allow search engines and crawlers to index Continuwuity's built-in
|
||||
/// webpages served under the `/_continuwuity/` prefix.
|
||||
///
|
||||
/// default: false
|
||||
#[serde(default)]
|
||||
pub allow_web_indexing: bool,
|
||||
|
||||
/// display: nested
|
||||
#[serde(default)]
|
||||
pub ldap: LdapConfig,
|
||||
|
|
@ -2805,3 +2853,5 @@ fn default_ldap_search_filter() -> String { "(objectClass=*)".to_owned() }
|
|||
fn default_ldap_uid_attribute() -> String { String::from("uid") }
|
||||
|
||||
fn default_ldap_name_attribute() -> String { String::from("givenName") }
|
||||
|
||||
fn default_space_roles_cache_flush_threshold() -> u32 { 1000 }
|
||||
|
|
|
|||
|
|
@ -4,9 +4,7 @@ mod panic;
|
|||
mod response;
|
||||
mod serde;
|
||||
|
||||
use std::{any::Any, borrow::Cow, convert::Infallible, sync::PoisonError, time::Duration};
|
||||
|
||||
use ruma::api::client::error::{ErrorKind, RetryAfter::Delay};
|
||||
use std::{any::Any, borrow::Cow, convert::Infallible, sync::PoisonError};
|
||||
|
||||
pub use self::{err::visit, log::*};
|
||||
|
||||
|
|
@ -93,7 +91,7 @@ pub enum Error {
|
|||
#[error("Arithmetic operation failed: {0}")]
|
||||
Arithmetic(Cow<'static, str>),
|
||||
#[error("{0}: {1}")]
|
||||
BadRequest(ErrorKind, &'static str), //TODO: remove
|
||||
BadRequest(ruma::api::client::error::ErrorKind, &'static str), //TODO: remove
|
||||
#[error("{0}")]
|
||||
BadServerResponse(Cow<'static, str>),
|
||||
#[error(transparent)]
|
||||
|
|
@ -123,7 +121,7 @@ pub enum Error {
|
|||
#[error("from {0}: {1}")]
|
||||
Redaction(ruma::OwnedServerName, ruma::canonical_json::RedactionError),
|
||||
#[error("{0}: {1}")]
|
||||
Request(ErrorKind, Cow<'static, str>, http::StatusCode),
|
||||
Request(ruma::api::client::error::ErrorKind, Cow<'static, str>, http::StatusCode),
|
||||
#[error(transparent)]
|
||||
Ruma(#[from] ruma::api::client::error::Error),
|
||||
#[error(transparent)]
|
||||
|
|
@ -168,7 +166,7 @@ impl Error {
|
|||
|
||||
/// Returns the Matrix error code / error kind
|
||||
#[inline]
|
||||
pub fn kind(&self) -> ErrorKind {
|
||||
pub fn kind(&self) -> ruma::api::client::error::ErrorKind {
|
||||
use ruma::api::client::error::ErrorKind::{FeatureDisabled, Unknown};
|
||||
|
||||
match self {
|
||||
|
|
@ -193,6 +191,7 @@ impl Error {
|
|||
| Self::Reqwest(error) => error.status().unwrap_or(StatusCode::INTERNAL_SERVER_ERROR),
|
||||
| Self::Conflict(_) => StatusCode::CONFLICT,
|
||||
| Self::Io(error) => response::io_error_code(error.kind()),
|
||||
| Self::Uiaa(_) => StatusCode::UNAUTHORIZED,
|
||||
| _ => StatusCode::INTERNAL_SERVER_ERROR,
|
||||
}
|
||||
}
|
||||
|
|
@ -203,16 +202,6 @@ impl Error {
|
|||
/// Result where Ok(None) is instead Err(e) if e.is_not_found().
|
||||
#[inline]
|
||||
pub fn is_not_found(&self) -> bool { self.status_code() == http::StatusCode::NOT_FOUND }
|
||||
|
||||
pub fn retry_after(&self) -> Option<Duration> {
|
||||
match self {
|
||||
| Self::BadRequest(
|
||||
ErrorKind::LimitExceeded { retry_after: Some(Delay(retry_after)) },
|
||||
..,
|
||||
) => Some(*retry_after),
|
||||
| _ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for Error {
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
pub mod event;
|
||||
pub mod pdu;
|
||||
pub mod space_roles;
|
||||
pub mod state_key;
|
||||
pub mod state_res;
|
||||
|
||||
|
|
|
|||
81
src/core/matrix/space_roles.rs
Normal file
81
src/core/matrix/space_roles.rs
Normal file
|
|
@ -0,0 +1,81 @@
|
|||
use std::collections::BTreeMap;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
pub const SPACE_ROLES_EVENT_TYPE: &str = "com.continuwuity.space.roles";
|
||||
pub const SPACE_ROLE_MEMBER_EVENT_TYPE: &str = "com.continuwuity.space.role.member";
|
||||
pub const SPACE_ROLE_ROOM_EVENT_TYPE: &str = "com.continuwuity.space.role.room";
|
||||
pub const SPACE_CASCADING_EVENT_TYPE: &str = "com.continuwuity.space.cascading";
|
||||
|
||||
#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq, Eq)]
|
||||
pub struct SpaceRolesEventContent {
|
||||
pub roles: BTreeMap<String, RoleDefinition>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)]
|
||||
pub struct RoleDefinition {
|
||||
pub description: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub power_level: Option<i64>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq, Eq)]
|
||||
pub struct SpaceRoleMemberEventContent {
|
||||
pub roles: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq, Eq)]
|
||||
pub struct SpaceRoleRoomEventContent {
|
||||
pub required_roles: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)]
|
||||
pub struct SpaceCascadingEventContent {
|
||||
pub enabled: bool,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn space_roles_roundtrip() {
|
||||
let mut roles = BTreeMap::new();
|
||||
roles.insert("admin".to_owned(), RoleDefinition {
|
||||
description: "Space administrator".to_owned(),
|
||||
power_level: Some(100),
|
||||
});
|
||||
roles.insert("nsfw".to_owned(), RoleDefinition {
|
||||
description: "NSFW access".to_owned(),
|
||||
power_level: None,
|
||||
});
|
||||
let content = SpaceRolesEventContent { roles };
|
||||
let json = serde_json::to_string(&content).unwrap();
|
||||
let deserialized: SpaceRolesEventContent = serde_json::from_str(&json).unwrap();
|
||||
assert_eq!(deserialized.roles["admin"].power_level, Some(100));
|
||||
assert!(deserialized.roles["nsfw"].power_level.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn power_level_omitted_in_serialization_when_none() {
|
||||
let role = RoleDefinition {
|
||||
description: "Test".to_owned(),
|
||||
power_level: None,
|
||||
};
|
||||
let json = serde_json::to_string(&role).unwrap();
|
||||
assert!(!json.contains("power_level"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn negative_power_level() {
|
||||
let json = r#"{"description":"Restricted","power_level":-10}"#;
|
||||
let role: RoleDefinition = serde_json::from_str(json).unwrap();
|
||||
assert_eq!(role.power_level, Some(-10));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn missing_description_fails() {
|
||||
let json = r#"{"power_level":100}"#;
|
||||
serde_json::from_str::<RoleDefinition>(json).unwrap_err();
|
||||
}
|
||||
}
|
||||
|
|
@ -1224,6 +1224,7 @@ fn can_send_event(event: &impl Event, ple: Option<&impl Event>, user_level: Int)
|
|||
}
|
||||
|
||||
/// Confirm that the event sender has the required power levels.
|
||||
#[allow(clippy::cognitive_complexity)]
|
||||
fn check_power_levels(
|
||||
room_version: &RoomVersion,
|
||||
power_event: &impl Event,
|
||||
|
|
|
|||
|
|
@ -75,6 +75,7 @@ type Result<T, E = Error> = crate::Result<T, E>;
|
|||
/// event is part of the same room.
|
||||
//#[tracing::instrument(level = "debug", skip(state_sets, auth_chain_sets,
|
||||
//#[tracing::instrument(level event_fetch))]
|
||||
#[allow(clippy::cognitive_complexity)]
|
||||
pub async fn resolve<'a, Pdu, Sets, SetIter, Hasher, Fetch, FetchFut, Exists, ExistsFut>(
|
||||
room_version: &RoomVersionId,
|
||||
state_sets: Sets,
|
||||
|
|
|
|||
|
|
@ -112,6 +112,10 @@ pub(super) static MAPS: &[Descriptor] = &[
|
|||
name: "onetimekeyid_onetimekeys",
|
||||
..descriptor::RANDOM_SMALL
|
||||
},
|
||||
Descriptor {
|
||||
name: "passwordresettoken_info",
|
||||
..descriptor::RANDOM_SMALL
|
||||
},
|
||||
Descriptor {
|
||||
name: "pduid_pdu",
|
||||
cache_disp: CacheDisp::SharedWith("eventid_outlierpdu"),
|
||||
|
|
|
|||
|
|
@ -18,5 +18,5 @@ pub(crate) fn build(services: &Arc<Services>) -> (Router, Guard) {
|
|||
}
|
||||
|
||||
async fn not_found(_uri: Uri) -> impl IntoResponse {
|
||||
Error::Request(ErrorKind::Unrecognized, "Not Found".into(), StatusCode::NOT_FOUND)
|
||||
Error::Request(ErrorKind::Unrecognized, "not found :(".into(), StatusCode::NOT_FOUND)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -121,9 +121,8 @@ webpage.workspace = true
|
|||
webpage.optional = true
|
||||
blurhash.workspace = true
|
||||
blurhash.optional = true
|
||||
recaptcha-verify = { version = "0.1.5", default-features = false }
|
||||
recaptcha-verify = { version = "0.2.0", default-features = false }
|
||||
yansi.workspace = true
|
||||
ed25519-dalek = "2.2.0"
|
||||
|
||||
[target.'cfg(all(unix, target_os = "linux"))'.dependencies]
|
||||
sd-notify.workspace = true
|
||||
|
|
|
|||
|
|
@ -272,7 +272,10 @@ impl Service {
|
|||
.get(id)
|
||||
.await
|
||||
.and_then(|ref bytes| serde_saphyr::from_slice(bytes).map_err(Into::into))
|
||||
.map_err(|e| err!(Database("Invalid appservice {id:?} registration: {e:?}")))
|
||||
.map_err(|e| {
|
||||
self.db.id_appserviceregistrations.remove(id);
|
||||
err!(Database("Invalid appservice {id:?} registration: {e:?}. Removed."))
|
||||
})
|
||||
}
|
||||
|
||||
pub fn read(&self) -> impl Future<Output = RwLockReadGuard<'_, Registrations>> + Send {
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ use conduwuit::{
|
|||
config::{Config, check},
|
||||
error, implement,
|
||||
};
|
||||
use url::Url;
|
||||
|
||||
use crate::registration_tokens::{ValidToken, ValidTokenSource};
|
||||
|
||||
|
|
@ -19,9 +20,20 @@ impl Service {
|
|||
/// Get the registration token set in the config file, if it exists.
|
||||
#[must_use]
|
||||
pub fn get_config_file_token(&self) -> Option<ValidToken> {
|
||||
self.registration_token.clone().map(|token| ValidToken {
|
||||
token,
|
||||
source: ValidTokenSource::ConfigFile,
|
||||
self.registration_token
|
||||
.clone()
|
||||
.map(|token| ValidToken { token, source: ValidTokenSource::Config })
|
||||
}
|
||||
|
||||
/// Get the base domain to use for user-facing URLs.
|
||||
#[must_use]
|
||||
pub fn get_client_domain(&self) -> Url {
|
||||
self.well_known.client.clone().unwrap_or_else(|| {
|
||||
let host = self.server_name.host();
|
||||
format!("https://{host}")
|
||||
.as_str()
|
||||
.try_into()
|
||||
.expect("server name should be a valid host")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -67,15 +67,17 @@ impl crate::Service for Service {
|
|||
fn name(&self) -> &str { crate::service::make_name(std::module_path!()) }
|
||||
|
||||
async fn worker(self: Arc<Self>) -> Result {
|
||||
// first run mode will be enabled if there are no local users
|
||||
let is_first_run = self
|
||||
.services
|
||||
.users
|
||||
.list_local_users()
|
||||
.ready_filter(|user| *user != self.services.globals.server_user)
|
||||
.next()
|
||||
.await
|
||||
.is_none();
|
||||
// first run mode will be enabled if there are no local users, provided it's not
|
||||
// forcibly disabled for Complement tests
|
||||
let is_first_run = !self.services.config.force_disable_first_run_mode
|
||||
&& self
|
||||
.services
|
||||
.users
|
||||
.list_local_users()
|
||||
.ready_filter(|user| *user != self.services.globals.server_user)
|
||||
.next()
|
||||
.await
|
||||
.is_none();
|
||||
|
||||
self.first_run_marker
|
||||
.set(if is_first_run {
|
||||
|
|
|
|||
|
|
@ -142,6 +142,10 @@ impl Service {
|
|||
self.server.config.url_preview_check_root_domain
|
||||
}
|
||||
|
||||
pub fn url_preview_allow_audio_video(&self) -> bool {
|
||||
self.server.config.url_preview_allow_audio_video
|
||||
}
|
||||
|
||||
pub fn forbidden_alias_names(&self) -> &RegexSet { &self.server.config.forbidden_alias_names }
|
||||
|
||||
pub fn forbidden_usernames(&self) -> &RegexSet { &self.server.config.forbidden_usernames }
|
||||
|
|
|
|||
|
|
@ -207,6 +207,28 @@ impl Data {
|
|||
value.extend_from_slice(&data.image_width.unwrap_or(0).to_be_bytes());
|
||||
value.push(0xFF);
|
||||
value.extend_from_slice(&data.image_height.unwrap_or(0).to_be_bytes());
|
||||
value.push(0xFF);
|
||||
value.extend_from_slice(
|
||||
data.video
|
||||
.as_ref()
|
||||
.map(String::as_bytes)
|
||||
.unwrap_or_default(),
|
||||
);
|
||||
value.push(0xFF);
|
||||
value.extend_from_slice(&data.video_size.unwrap_or(0).to_be_bytes());
|
||||
value.push(0xFF);
|
||||
value.extend_from_slice(&data.video_width.unwrap_or(0).to_be_bytes());
|
||||
value.push(0xFF);
|
||||
value.extend_from_slice(&data.video_height.unwrap_or(0).to_be_bytes());
|
||||
value.push(0xFF);
|
||||
value.extend_from_slice(
|
||||
data.audio
|
||||
.as_ref()
|
||||
.map(String::as_bytes)
|
||||
.unwrap_or_default(),
|
||||
);
|
||||
value.push(0xFF);
|
||||
value.extend_from_slice(&data.audio_size.unwrap_or(0).to_be_bytes());
|
||||
|
||||
self.url_previews.insert(url.as_bytes(), &value);
|
||||
|
||||
|
|
@ -267,6 +289,48 @@ impl Data {
|
|||
| Some(0) => None,
|
||||
| x => x,
|
||||
};
|
||||
let video = match values
|
||||
.next()
|
||||
.and_then(|b| String::from_utf8(b.to_vec()).ok())
|
||||
{
|
||||
| Some(s) if s.is_empty() => None,
|
||||
| x => x,
|
||||
};
|
||||
let video_size = match values
|
||||
.next()
|
||||
.map(|b| usize::from_be_bytes(b.try_into().unwrap_or_default()))
|
||||
{
|
||||
| Some(0) => None,
|
||||
| x => x,
|
||||
};
|
||||
let video_width = match values
|
||||
.next()
|
||||
.map(|b| u32::from_be_bytes(b.try_into().unwrap_or_default()))
|
||||
{
|
||||
| Some(0) => None,
|
||||
| x => x,
|
||||
};
|
||||
let video_height = match values
|
||||
.next()
|
||||
.map(|b| u32::from_be_bytes(b.try_into().unwrap_or_default()))
|
||||
{
|
||||
| Some(0) => None,
|
||||
| x => x,
|
||||
};
|
||||
let audio = match values
|
||||
.next()
|
||||
.and_then(|b| String::from_utf8(b.to_vec()).ok())
|
||||
{
|
||||
| Some(s) if s.is_empty() => None,
|
||||
| x => x,
|
||||
};
|
||||
let audio_size = match values
|
||||
.next()
|
||||
.map(|b| usize::from_be_bytes(b.try_into().unwrap_or_default()))
|
||||
{
|
||||
| Some(0) => None,
|
||||
| x => x,
|
||||
};
|
||||
|
||||
Ok(UrlPreviewData {
|
||||
title,
|
||||
|
|
@ -275,6 +339,12 @@ impl Data {
|
|||
image_size,
|
||||
image_width,
|
||||
image_height,
|
||||
video,
|
||||
video_size,
|
||||
video_width,
|
||||
video_height,
|
||||
audio,
|
||||
audio_size,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,6 +10,8 @@ use std::time::SystemTime;
|
|||
use conduwuit::{Err, Result, debug, err, utils::response::LimitReadExt};
|
||||
use conduwuit_core::implement;
|
||||
use ipaddress::IPAddress;
|
||||
#[cfg(feature = "url_preview")]
|
||||
use ruma::OwnedMxcUri;
|
||||
use serde::Serialize;
|
||||
use url::Url;
|
||||
|
||||
|
|
@ -29,6 +31,18 @@ pub struct UrlPreviewData {
|
|||
pub image_width: Option<u32>,
|
||||
#[serde(skip_serializing_if = "Option::is_none", rename(serialize = "og:image:height"))]
|
||||
pub image_height: Option<u32>,
|
||||
#[serde(skip_serializing_if = "Option::is_none", rename(serialize = "og:video"))]
|
||||
pub video: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none", rename(serialize = "matrix:video:size"))]
|
||||
pub video_size: Option<usize>,
|
||||
#[serde(skip_serializing_if = "Option::is_none", rename(serialize = "og:video:width"))]
|
||||
pub video_width: Option<u32>,
|
||||
#[serde(skip_serializing_if = "Option::is_none", rename(serialize = "og:video:height"))]
|
||||
pub video_height: Option<u32>,
|
||||
#[serde(skip_serializing_if = "Option::is_none", rename(serialize = "og:audio"))]
|
||||
pub audio: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none", rename(serialize = "matrix:audio:size"))]
|
||||
pub audio_size: Option<usize>,
|
||||
}
|
||||
|
||||
#[implement(Service)]
|
||||
|
|
@ -96,7 +110,9 @@ async fn request_url_preview(&self, url: &Url) -> Result<UrlPreviewData> {
|
|||
|
||||
let data = match content_type {
|
||||
| html if html.starts_with("text/html") => self.download_html(url.as_str()).await?,
|
||||
| img if img.starts_with("image/") => self.download_image(url.as_str()).await?,
|
||||
| img if img.starts_with("image/") => self.download_image(url.as_str(), None).await?,
|
||||
| video if video.starts_with("video/") => self.download_video(url.as_str(), None).await?,
|
||||
| audio if audio.starts_with("audio/") => self.download_audio(url.as_str(), None).await?,
|
||||
| _ => return Err!(Request(Unknown("Unsupported Content-Type"))),
|
||||
};
|
||||
|
||||
|
|
@ -107,11 +123,17 @@ async fn request_url_preview(&self, url: &Url) -> Result<UrlPreviewData> {
|
|||
|
||||
#[cfg(feature = "url_preview")]
|
||||
#[implement(Service)]
|
||||
pub async fn download_image(&self, url: &str) -> Result<UrlPreviewData> {
|
||||
pub async fn download_image(
|
||||
&self,
|
||||
url: &str,
|
||||
preview_data: Option<UrlPreviewData>,
|
||||
) -> Result<UrlPreviewData> {
|
||||
use conduwuit::utils::random_string;
|
||||
use image::ImageReader;
|
||||
use ruma::Mxc;
|
||||
|
||||
let mut preview_data = preview_data.unwrap_or_default();
|
||||
|
||||
let image = self
|
||||
.services
|
||||
.client
|
||||
|
|
@ -128,6 +150,7 @@ pub async fn download_image(&self, url: &str) -> Result<UrlPreviewData> {
|
|||
.expect("u64 should fit in usize"),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mxc = Mxc {
|
||||
server_name: self.services.globals.server_name(),
|
||||
media_id: &random_string(super::MXC_LENGTH),
|
||||
|
|
@ -135,27 +158,125 @@ pub async fn download_image(&self, url: &str) -> Result<UrlPreviewData> {
|
|||
|
||||
self.create(&mxc, None, None, None, &image).await?;
|
||||
|
||||
let cursor = std::io::Cursor::new(&image);
|
||||
let (width, height) = match ImageReader::new(cursor).with_guessed_format() {
|
||||
| Err(_) => (None, None),
|
||||
| Ok(reader) => match reader.into_dimensions() {
|
||||
preview_data.image = Some(mxc.to_string());
|
||||
if preview_data.image_height.is_none() || preview_data.image_width.is_none() {
|
||||
let cursor = std::io::Cursor::new(&image);
|
||||
let (width, height) = match ImageReader::new(cursor).with_guessed_format() {
|
||||
| Err(_) => (None, None),
|
||||
| Ok((width, height)) => (Some(width), Some(height)),
|
||||
},
|
||||
| Ok(reader) => match reader.into_dimensions() {
|
||||
| Err(_) => (None, None),
|
||||
| Ok((width, height)) => (Some(width), Some(height)),
|
||||
},
|
||||
};
|
||||
|
||||
preview_data.image_width = width;
|
||||
preview_data.image_height = height;
|
||||
}
|
||||
|
||||
Ok(preview_data)
|
||||
}
|
||||
|
||||
#[cfg(feature = "url_preview")]
|
||||
#[implement(Service)]
|
||||
pub async fn download_video(
|
||||
&self,
|
||||
url: &str,
|
||||
preview_data: Option<UrlPreviewData>,
|
||||
) -> Result<UrlPreviewData> {
|
||||
let mut preview_data = preview_data.unwrap_or_default();
|
||||
|
||||
if self.services.globals.url_preview_allow_audio_video() {
|
||||
let (url, size) = self.download_media(url).await?;
|
||||
preview_data.video = Some(url.to_string());
|
||||
preview_data.video_size = Some(size);
|
||||
}
|
||||
|
||||
Ok(preview_data)
|
||||
}
|
||||
|
||||
#[cfg(feature = "url_preview")]
|
||||
#[implement(Service)]
|
||||
pub async fn download_audio(
|
||||
&self,
|
||||
url: &str,
|
||||
preview_data: Option<UrlPreviewData>,
|
||||
) -> Result<UrlPreviewData> {
|
||||
let mut preview_data = preview_data.unwrap_or_default();
|
||||
|
||||
if self.services.globals.url_preview_allow_audio_video() {
|
||||
let (url, size) = self.download_media(url).await?;
|
||||
preview_data.audio = Some(url.to_string());
|
||||
preview_data.audio_size = Some(size);
|
||||
}
|
||||
|
||||
Ok(preview_data)
|
||||
}
|
||||
|
||||
#[cfg(feature = "url_preview")]
|
||||
#[implement(Service)]
|
||||
pub async fn download_media(&self, url: &str) -> Result<(OwnedMxcUri, usize)> {
|
||||
use conduwuit::utils::random_string;
|
||||
use http::header::CONTENT_TYPE;
|
||||
use ruma::Mxc;
|
||||
|
||||
let response = self.services.client.url_preview.get(url).send().await?;
|
||||
let content_type = response.headers().get(CONTENT_TYPE).cloned();
|
||||
let media = response
|
||||
.limit_read(
|
||||
self.services
|
||||
.server
|
||||
.config
|
||||
.max_request_size
|
||||
.try_into()
|
||||
.expect("u64 should fit in usize"),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mxc = Mxc {
|
||||
server_name: self.services.globals.server_name(),
|
||||
media_id: &random_string(super::MXC_LENGTH),
|
||||
};
|
||||
|
||||
Ok(UrlPreviewData {
|
||||
image: Some(mxc.to_string()),
|
||||
image_size: Some(image.len()),
|
||||
image_width: width,
|
||||
image_height: height,
|
||||
..Default::default()
|
||||
})
|
||||
let content_type = content_type.and_then(|v| v.to_str().map(ToOwned::to_owned).ok());
|
||||
self.create(&mxc, None, None, content_type.as_deref(), &media)
|
||||
.await?;
|
||||
|
||||
Ok((OwnedMxcUri::from(mxc.to_string()), media.len()))
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "url_preview"))]
|
||||
#[implement(Service)]
|
||||
pub async fn download_image(&self, _url: &str) -> Result<UrlPreviewData> {
|
||||
pub async fn download_image(
|
||||
&self,
|
||||
_url: &str,
|
||||
_preview_data: Option<UrlPreviewData>,
|
||||
) -> Result<UrlPreviewData> {
|
||||
Err!(FeatureDisabled("url_preview"))
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "url_preview"))]
|
||||
#[implement(Service)]
|
||||
pub async fn download_video(
|
||||
&self,
|
||||
_url: &str,
|
||||
_preview_data: Option<UrlPreviewData>,
|
||||
) -> Result<UrlPreviewData> {
|
||||
Err!(FeatureDisabled("url_preview"))
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "url_preview"))]
|
||||
#[implement(Service)]
|
||||
pub async fn download_audio(
|
||||
&self,
|
||||
_url: &str,
|
||||
_preview_data: Option<UrlPreviewData>,
|
||||
) -> Result<UrlPreviewData> {
|
||||
Err!(FeatureDisabled("url_preview"))
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "url_preview"))]
|
||||
#[implement(Service)]
|
||||
pub async fn download_media(&self, _url: &str) -> Result<UrlPreviewData> {
|
||||
Err!(FeatureDisabled("url_preview"))
|
||||
}
|
||||
|
||||
|
|
@ -182,18 +303,29 @@ async fn download_html(&self, url: &str) -> Result<UrlPreviewData> {
|
|||
return Err!(Request(Unknown("Failed to parse HTML")));
|
||||
};
|
||||
|
||||
let mut data = match html.opengraph.images.first() {
|
||||
| None => UrlPreviewData::default(),
|
||||
| Some(obj) => self.download_image(&obj.url).await?,
|
||||
};
|
||||
let mut preview_data = UrlPreviewData::default();
|
||||
|
||||
if let Some(obj) = html.opengraph.images.first() {
|
||||
preview_data = self.download_image(&obj.url, Some(preview_data)).await?;
|
||||
}
|
||||
|
||||
if let Some(obj) = html.opengraph.videos.first() {
|
||||
preview_data = self.download_video(&obj.url, Some(preview_data)).await?;
|
||||
preview_data.video_width = obj.properties.get("width").and_then(|v| v.parse().ok());
|
||||
preview_data.video_height = obj.properties.get("height").and_then(|v| v.parse().ok());
|
||||
}
|
||||
|
||||
if let Some(obj) = html.opengraph.audios.first() {
|
||||
preview_data = self.download_audio(&obj.url, Some(preview_data)).await?;
|
||||
}
|
||||
|
||||
let props = html.opengraph.properties;
|
||||
|
||||
/* use OpenGraph title/description, but fall back to HTML if not available */
|
||||
data.title = props.get("title").cloned().or(html.title);
|
||||
data.description = props.get("description").cloned().or(html.description);
|
||||
preview_data.title = props.get("title").cloned().or(html.title);
|
||||
preview_data.description = props.get("description").cloned().or(html.description);
|
||||
|
||||
Ok(data)
|
||||
Ok(preview_data)
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "url_preview"))]
|
||||
|
|
|
|||
|
|
@ -1,8 +1,9 @@
|
|||
use std::{cmp, collections::HashMap, future::ready};
|
||||
|
||||
use conduwuit::{
|
||||
Err, Event, Pdu, Result, debug, debug_info, debug_warn, error, info,
|
||||
Err, Event, Pdu, Result, debug, debug_info, debug_warn, err, error, info,
|
||||
result::NotFound,
|
||||
trace,
|
||||
utils::{
|
||||
IterStream, ReadyExt,
|
||||
stream::{TryExpect, TryIgnore},
|
||||
|
|
@ -57,6 +58,7 @@ pub(crate) async fn migrations(services: &Services) -> Result<()> {
|
|||
}
|
||||
|
||||
async fn fresh(services: &Services) -> Result<()> {
|
||||
info!("Creating new fresh database");
|
||||
let db = &services.db;
|
||||
|
||||
services.globals.db.bump_database_version(DATABASE_VERSION);
|
||||
|
|
@ -66,11 +68,18 @@ async fn fresh(services: &Services) -> Result<()> {
|
|||
db["global"].insert(b"retroactively_fix_bad_data_from_roomuserid_joined", []);
|
||||
db["global"].insert(b"fix_referencedevents_missing_sep", []);
|
||||
db["global"].insert(b"fix_readreceiptid_readreceipt_duplicates", []);
|
||||
db["global"].insert(b"fix_corrupt_msc4133_fields", []);
|
||||
db["global"].insert(b"populate_userroomid_leftstate_table", []);
|
||||
db["global"].insert(b"fix_local_invite_state", []);
|
||||
|
||||
// Create the admin room and server user on first run
|
||||
crate::admin::create_admin_room(services).boxed().await?;
|
||||
info!("Creating admin room and server user");
|
||||
crate::admin::create_admin_room(services)
|
||||
.boxed()
|
||||
.await
|
||||
.inspect_err(|e| error!("Failed to create admin room during db init: {e}"))?;
|
||||
|
||||
warn!("Created new RocksDB database with version {DATABASE_VERSION}");
|
||||
info!("Created new database with version {DATABASE_VERSION}");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
@ -88,19 +97,33 @@ async fn migrate(services: &Services) -> Result<()> {
|
|||
}
|
||||
|
||||
if services.globals.db.database_version().await < 12 {
|
||||
db_lt_12(services).await?;
|
||||
db_lt_12(services)
|
||||
.await
|
||||
.map_err(|e| err!("Failed to run v12 migrations: {e}"))?;
|
||||
}
|
||||
|
||||
// This migration can be reused as-is anytime the server-default rules are
|
||||
// updated.
|
||||
if services.globals.db.database_version().await < 13 {
|
||||
db_lt_13(services).await?;
|
||||
db_lt_13(services)
|
||||
.await
|
||||
.map_err(|e| err!("Failed to run v13 migrations: {e}"))?;
|
||||
}
|
||||
|
||||
if db["global"].get(b"feat_sha256_media").await.is_not_found() {
|
||||
media::migrations::migrate_sha256_media(services).await?;
|
||||
media::migrations::migrate_sha256_media(services)
|
||||
.await
|
||||
.map_err(|e| err!("Failed to run SHA256 media migration: {e}"))?;
|
||||
} else if config.media_startup_check {
|
||||
media::migrations::checkup_sha256_media(services).await?;
|
||||
info!("Starting media startup integrity check.");
|
||||
let now = std::time::Instant::now();
|
||||
media::migrations::checkup_sha256_media(services)
|
||||
.await
|
||||
.map_err(|e| err!("Failed to verify media integrity: {e}"))?;
|
||||
info!(
|
||||
"Finished media startup integrity check in {} seconds.",
|
||||
now.elapsed().as_secs_f32()
|
||||
);
|
||||
}
|
||||
|
||||
if db["global"]
|
||||
|
|
@ -108,7 +131,12 @@ async fn migrate(services: &Services) -> Result<()> {
|
|||
.await
|
||||
.is_not_found()
|
||||
{
|
||||
fix_bad_double_separator_in_state_cache(services).await?;
|
||||
info!("Running migration 'fix_bad_double_separator_in_state_cache'");
|
||||
fix_bad_double_separator_in_state_cache(services)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
err!("Failed to run 'fix_bad_double_separator_in_state_cache' migration: {e}")
|
||||
})?;
|
||||
}
|
||||
|
||||
if db["global"]
|
||||
|
|
@ -116,7 +144,15 @@ async fn migrate(services: &Services) -> Result<()> {
|
|||
.await
|
||||
.is_not_found()
|
||||
{
|
||||
retroactively_fix_bad_data_from_roomuserid_joined(services).await?;
|
||||
info!("Running migration 'retroactively_fix_bad_data_from_roomuserid_joined'");
|
||||
retroactively_fix_bad_data_from_roomuserid_joined(services)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
err!(
|
||||
"Failed to run 'retroactively_fix_bad_data_from_roomuserid_joined' \
|
||||
migration: {e}"
|
||||
)
|
||||
})?;
|
||||
}
|
||||
|
||||
if db["global"]
|
||||
|
|
@ -125,7 +161,12 @@ async fn migrate(services: &Services) -> Result<()> {
|
|||
.is_not_found()
|
||||
|| services.globals.db.database_version().await < 17
|
||||
{
|
||||
fix_referencedevents_missing_sep(services).await?;
|
||||
info!("Running migration 'fix_referencedevents_missing_sep'");
|
||||
fix_referencedevents_missing_sep(services)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
err!("Failed to run 'fix_referencedevents_missing_sep' migration': {e}")
|
||||
})?;
|
||||
}
|
||||
|
||||
if db["global"]
|
||||
|
|
@ -134,7 +175,12 @@ async fn migrate(services: &Services) -> Result<()> {
|
|||
.is_not_found()
|
||||
|| services.globals.db.database_version().await < 17
|
||||
{
|
||||
fix_readreceiptid_readreceipt_duplicates(services).await?;
|
||||
info!("Running migration 'fix_readreceiptid_readreceipt_duplicates'");
|
||||
fix_readreceiptid_readreceipt_duplicates(services)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
err!("Failed to run 'fix_readreceiptid_readreceipt_duplicates' migration': {e}")
|
||||
})?;
|
||||
}
|
||||
|
||||
if services.globals.db.database_version().await < 17 {
|
||||
|
|
@ -147,7 +193,10 @@ async fn migrate(services: &Services) -> Result<()> {
|
|||
.await
|
||||
.is_not_found()
|
||||
{
|
||||
fix_corrupt_msc4133_fields(services).await?;
|
||||
info!("Running migration 'fix_corrupt_msc4133_fields'");
|
||||
fix_corrupt_msc4133_fields(services)
|
||||
.await
|
||||
.map_err(|e| err!("Failed to run 'fix_corrupt_msc4133_fields' migration': {e}"))?;
|
||||
}
|
||||
|
||||
if services.globals.db.database_version().await < 18 {
|
||||
|
|
@ -160,7 +209,12 @@ async fn migrate(services: &Services) -> Result<()> {
|
|||
.await
|
||||
.is_not_found()
|
||||
{
|
||||
populate_userroomid_leftstate_table(services).await?;
|
||||
info!("Running migration 'populate_userroomid_leftstate_table'");
|
||||
populate_userroomid_leftstate_table(services)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
err!("Failed to run 'populate_userroomid_leftstate_table' migration': {e}")
|
||||
})?;
|
||||
}
|
||||
|
||||
if db["global"]
|
||||
|
|
@ -168,14 +222,17 @@ async fn migrate(services: &Services) -> Result<()> {
|
|||
.await
|
||||
.is_not_found()
|
||||
{
|
||||
fix_local_invite_state(services).await?;
|
||||
info!("Running migration 'fix_local_invite_state'");
|
||||
fix_local_invite_state(services)
|
||||
.await
|
||||
.map_err(|e| err!("Failed to run 'fix_local_invite_state' migration': {e}"))?;
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
services.globals.db.database_version().await,
|
||||
DATABASE_VERSION,
|
||||
"Failed asserting local database version {} is equal to known latest conduwuit database \
|
||||
version {}",
|
||||
"Failed asserting local database version {} is equal to known latest continuwuity \
|
||||
database version {}",
|
||||
services.globals.db.database_version().await,
|
||||
DATABASE_VERSION,
|
||||
);
|
||||
|
|
@ -370,7 +427,7 @@ async fn db_lt_13(services: &Services) -> Result<()> {
|
|||
}
|
||||
|
||||
async fn fix_bad_double_separator_in_state_cache(services: &Services) -> Result<()> {
|
||||
warn!("Fixing bad double separator in state_cache roomuserid_joined");
|
||||
info!("Fixing bad double separator in state_cache roomuserid_joined");
|
||||
|
||||
let db = &services.db;
|
||||
let roomuserid_joined = &db["roomuserid_joined"];
|
||||
|
|
@ -414,7 +471,7 @@ async fn fix_bad_double_separator_in_state_cache(services: &Services) -> Result<
|
|||
}
|
||||
|
||||
async fn retroactively_fix_bad_data_from_roomuserid_joined(services: &Services) -> Result<()> {
|
||||
warn!("Retroactively fixing bad data from broken roomuserid_joined");
|
||||
info!("Retroactively fixing bad data from broken roomuserid_joined");
|
||||
|
||||
let db = &services.db;
|
||||
let _cork = db.cork_and_sync();
|
||||
|
|
@ -504,7 +561,7 @@ async fn retroactively_fix_bad_data_from_roomuserid_joined(services: &Services)
|
|||
}
|
||||
|
||||
async fn fix_referencedevents_missing_sep(services: &Services) -> Result {
|
||||
warn!("Fixing missing record separator between room_id and event_id in referencedevents");
|
||||
info!("Fixing missing record separator between room_id and event_id in referencedevents");
|
||||
|
||||
let db = &services.db;
|
||||
let cork = db.cork_and_sync();
|
||||
|
|
@ -552,7 +609,7 @@ async fn fix_readreceiptid_readreceipt_duplicates(services: &Services) -> Result
|
|||
type ArrayId = ArrayString<MAX_BYTES>;
|
||||
type Key<'a> = (&'a RoomId, u64, &'a UserId);
|
||||
|
||||
warn!("Fixing undeleted entries in readreceiptid_readreceipt...");
|
||||
info!("Fixing undeleted entries in readreceiptid_readreceipt...");
|
||||
|
||||
let db = &services.db;
|
||||
let cork = db.cork_and_sync();
|
||||
|
|
@ -606,7 +663,7 @@ async fn fix_corrupt_msc4133_fields(services: &Services) -> Result {
|
|||
use serde_json::{Value, from_slice};
|
||||
type KeyVal<'a> = ((OwnedUserId, String), &'a [u8]);
|
||||
|
||||
warn!("Fixing corrupted `us.cloke.msc4175.tz` fields...");
|
||||
info!("Fixing corrupted `us.cloke.msc4175.tz` fields...");
|
||||
|
||||
let db = &services.db;
|
||||
let cork = db.cork_and_sync();
|
||||
|
|
@ -746,7 +803,18 @@ async fn fix_local_invite_state(services: &Services) -> Result {
|
|||
let fixed = userroomid_invitestate.stream()
|
||||
// if they're a local user on this homeserver
|
||||
.try_filter(|((user_id, _), _): &KeyVal<'_>| ready(services.globals.user_is_local(user_id)))
|
||||
.and_then(async |((user_id, room_id), stripped_state): KeyVal<'_>| Ok::<_, conduwuit::Error>((user_id.to_owned(), room_id.to_owned(), stripped_state.deserialize()?)))
|
||||
.and_then(async |((user_id, room_id), stripped_state): KeyVal<'_>| Ok::<_,
|
||||
conduwuit::Error>((user_id.to_owned(), room_id.to_owned(), stripped_state.deserialize
|
||||
().unwrap_or_else(|e| {
|
||||
trace!("Failed to deserialize: {:?}", stripped_state.json());
|
||||
warn!(
|
||||
%user_id,
|
||||
%room_id,
|
||||
"Failed to deserialize stripped state for invite, removing from db: {e}"
|
||||
);
|
||||
userroomid_invitestate.del((user_id, room_id));
|
||||
vec![]
|
||||
}))))
|
||||
.try_fold(0_usize, async |mut fixed, (user_id, room_id, stripped_state)| {
|
||||
// and their invite state is None
|
||||
if stripped_state.is_empty()
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ pub mod globals;
|
|||
pub mod key_backups;
|
||||
pub mod media;
|
||||
pub mod moderation;
|
||||
pub mod password_reset;
|
||||
pub mod presence;
|
||||
pub mod pusher;
|
||||
pub mod registration_tokens;
|
||||
|
|
|
|||
68
src/service/password_reset/data.rs
Normal file
68
src/service/password_reset/data.rs
Normal file
|
|
@ -0,0 +1,68 @@
|
|||
use std::{
|
||||
sync::Arc,
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
|
||||
use conduwuit::utils::{ReadyExt, stream::TryExpect};
|
||||
use database::{Database, Deserialized, Json, Map};
|
||||
use ruma::{OwnedUserId, UserId};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
pub(super) struct Data {
|
||||
passwordresettoken_info: Arc<Map>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ResetTokenInfo {
|
||||
pub user: OwnedUserId,
|
||||
pub issued_at: SystemTime,
|
||||
}
|
||||
|
||||
impl ResetTokenInfo {
|
||||
// one hour
|
||||
const MAX_TOKEN_AGE: Duration = Duration::from_secs(60 * 60);
|
||||
|
||||
pub fn is_valid(&self) -> bool {
|
||||
let now = SystemTime::now();
|
||||
|
||||
now.duration_since(self.issued_at)
|
||||
.is_ok_and(|duration| duration < Self::MAX_TOKEN_AGE)
|
||||
}
|
||||
}
|
||||
|
||||
impl Data {
|
||||
pub(super) fn new(db: &Arc<Database>) -> Self {
|
||||
Self {
|
||||
passwordresettoken_info: db["passwordresettoken_info"].clone(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Associate a reset token with its info in the database.
|
||||
pub(super) fn save_token(&self, token: &str, info: &ResetTokenInfo) {
|
||||
self.passwordresettoken_info.raw_put(token, Json(info));
|
||||
}
|
||||
|
||||
/// Lookup the info for a reset token.
|
||||
pub(super) async fn lookup_token_info(&self, token: &str) -> Option<ResetTokenInfo> {
|
||||
self.passwordresettoken_info
|
||||
.get(token)
|
||||
.await
|
||||
.deserialized()
|
||||
.ok()
|
||||
}
|
||||
|
||||
/// Find a user's existing reset token, if any.
|
||||
pub(super) async fn find_token_for_user(
|
||||
&self,
|
||||
user: &UserId,
|
||||
) -> Option<(String, ResetTokenInfo)> {
|
||||
self.passwordresettoken_info
|
||||
.stream::<'_, String, ResetTokenInfo>()
|
||||
.expect_ok()
|
||||
.ready_find(|(_, info)| info.user == user)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Remove a reset token.
|
||||
pub(super) fn remove_token(&self, token: &str) { self.passwordresettoken_info.remove(token); }
|
||||
}
|
||||
120
src/service/password_reset/mod.rs
Normal file
120
src/service/password_reset/mod.rs
Normal file
|
|
@ -0,0 +1,120 @@
|
|||
mod data;
|
||||
|
||||
use std::{sync::Arc, time::SystemTime};
|
||||
|
||||
use conduwuit::{Err, Result, utils};
|
||||
use data::{Data, ResetTokenInfo};
|
||||
use ruma::OwnedUserId;
|
||||
|
||||
use crate::{Dep, globals, users};
|
||||
|
||||
pub const PASSWORD_RESET_PATH: &str = "/_continuwuity/account/reset_password";
|
||||
pub const RESET_TOKEN_QUERY_PARAM: &str = "token";
|
||||
const RESET_TOKEN_LENGTH: usize = 32;
|
||||
|
||||
pub struct Service {
|
||||
db: Data,
|
||||
services: Services,
|
||||
}
|
||||
|
||||
struct Services {
|
||||
users: Dep<users::Service>,
|
||||
globals: Dep<globals::Service>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ValidResetToken {
|
||||
pub token: String,
|
||||
pub info: ResetTokenInfo,
|
||||
}
|
||||
|
||||
impl crate::Service for Service {
|
||||
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
|
||||
Ok(Arc::new(Self {
|
||||
db: Data::new(args.db),
|
||||
services: Services {
|
||||
users: args.depend::<users::Service>("users"),
|
||||
globals: args.depend::<globals::Service>("globals"),
|
||||
},
|
||||
}))
|
||||
}
|
||||
|
||||
fn name(&self) -> &str { crate::service::make_name(std::module_path!()) }
|
||||
}
|
||||
|
||||
impl Service {
|
||||
/// Generate a random string suitable to be used as a password reset token.
|
||||
#[must_use]
|
||||
pub fn generate_token_string() -> String { utils::random_string(RESET_TOKEN_LENGTH) }
|
||||
|
||||
/// Issue a password reset token for `user`, who must be a local user with
|
||||
/// the `password` origin.
|
||||
pub async fn issue_token(&self, user_id: OwnedUserId) -> Result<ValidResetToken> {
|
||||
if !self.services.globals.user_is_local(&user_id) {
|
||||
return Err!("Cannot issue a password reset token for remote user {user_id}");
|
||||
}
|
||||
|
||||
if user_id == self.services.globals.server_user {
|
||||
return Err!("Cannot issue a password reset token for the server user");
|
||||
}
|
||||
|
||||
if self
|
||||
.services
|
||||
.users
|
||||
.origin(&user_id)
|
||||
.await
|
||||
.unwrap_or_else(|_| "password".to_owned())
|
||||
!= "password"
|
||||
{
|
||||
return Err!("Cannot issue a password reset token for non-internal user {user_id}");
|
||||
}
|
||||
|
||||
if self.services.users.is_deactivated(&user_id).await? {
|
||||
return Err!("Cannot issue a password reset token for deactivated user {user_id}");
|
||||
}
|
||||
|
||||
if let Some((existing_token, _)) = self.db.find_token_for_user(&user_id).await {
|
||||
self.db.remove_token(&existing_token);
|
||||
}
|
||||
|
||||
let token = Self::generate_token_string();
|
||||
let info = ResetTokenInfo {
|
||||
user: user_id,
|
||||
issued_at: SystemTime::now(),
|
||||
};
|
||||
|
||||
self.db.save_token(&token, &info);
|
||||
|
||||
Ok(ValidResetToken { token, info })
|
||||
}
|
||||
|
||||
/// Check if `token` represents a valid, non-expired password reset token.
|
||||
pub async fn check_token(&self, token: &str) -> Option<ValidResetToken> {
|
||||
self.db.lookup_token_info(token).await.and_then(|info| {
|
||||
if info.is_valid() {
|
||||
Some(ValidResetToken { token: token.to_owned(), info })
|
||||
} else {
|
||||
self.db.remove_token(token);
|
||||
None
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Consume the supplied valid token, using it to change its user's password
|
||||
/// to `new_password`.
|
||||
pub async fn consume_token(
|
||||
&self,
|
||||
ValidResetToken { token, info }: ValidResetToken,
|
||||
new_password: &str,
|
||||
) -> Result<()> {
|
||||
if info.is_valid() {
|
||||
self.db.remove_token(&token);
|
||||
self.services
|
||||
.users
|
||||
.set_password(&info.user, Some(new_password))
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
@ -2,7 +2,7 @@ mod data;
|
|||
|
||||
use std::{future::ready, pin::Pin, sync::Arc};
|
||||
|
||||
use conduwuit::{Err, Result, utils};
|
||||
use conduwuit::{Err, Result, err, utils};
|
||||
use data::Data;
|
||||
pub use data::{DatabaseTokenInfo, TokenExpires};
|
||||
use futures::{
|
||||
|
|
@ -18,6 +18,9 @@ const RANDOM_TOKEN_LENGTH: usize = 16;
|
|||
pub struct Service {
|
||||
db: Data,
|
||||
services: Services,
|
||||
/// The registration tokens which were read from the registration token
|
||||
/// file, if one is configured.
|
||||
registration_tokens_from_file: Vec<String>,
|
||||
}
|
||||
|
||||
struct Services {
|
||||
|
|
@ -45,34 +48,54 @@ impl PartialEq<str> for ValidToken {
|
|||
/// The source of a valid database token.
|
||||
#[derive(Debug)]
|
||||
pub enum ValidTokenSource {
|
||||
/// The static token set in the homeserver's config file, which is
|
||||
/// always valid.
|
||||
ConfigFile,
|
||||
/// The static token set in the homeserver's config file.
|
||||
Config,
|
||||
/// A database token which has been checked to be valid.
|
||||
Database(DatabaseTokenInfo),
|
||||
/// The single-use token which may be used to create the homeserver's first
|
||||
/// account.
|
||||
FirstAccount,
|
||||
/// A registration token read from the registration token file set in the
|
||||
/// config.
|
||||
TokenFile,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ValidTokenSource {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
| Self::ConfigFile => write!(f, "Token defined in config."),
|
||||
| Self::Config => write!(f, "Static token set in the server configuration."),
|
||||
| Self::Database(info) => info.fmt(f),
|
||||
| Self::FirstAccount => write!(f, "Initial setup token."),
|
||||
| Self::TokenFile => write!(f, "Static token set in the registration token file."),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl crate::Service for Service {
|
||||
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
|
||||
let registration_tokens_from_file = args.server.config.registration_token_file
|
||||
.clone()
|
||||
// If the token file option was set, read the path it points to
|
||||
.map(std::fs::read_to_string)
|
||||
.transpose()
|
||||
.map_err(|err| err!("Failed to read registration token file: {err}"))
|
||||
.map(|tokens| {
|
||||
if let Some(tokens) = tokens {
|
||||
// If the token file option was set, return the file's lines as tokens
|
||||
tokens.lines().map(ToOwned::to_owned).collect()
|
||||
} else {
|
||||
// Otherwise, if the option wasn't set, return no tokens
|
||||
vec![]
|
||||
}
|
||||
})?;
|
||||
|
||||
Ok(Arc::new(Self {
|
||||
db: Data::new(args.db),
|
||||
services: Services {
|
||||
config: args.depend::<config::Service>("config"),
|
||||
firstrun: args.depend::<firstrun::Service>("firstrun"),
|
||||
},
|
||||
registration_tokens_from_file,
|
||||
}))
|
||||
}
|
||||
|
||||
|
|
@ -97,12 +120,23 @@ impl Service {
|
|||
(token, info)
|
||||
}
|
||||
|
||||
/// Get all the "special" registration tokens that aren't defined in the
|
||||
/// Get all the static registration tokens that aren't defined in the
|
||||
/// database.
|
||||
fn iterate_static_tokens(&self) -> impl Iterator<Item = ValidToken> {
|
||||
// This does not include the first-account token, because it's special:
|
||||
// no other registration tokens are valid when it is set.
|
||||
self.services.config.get_config_file_token().into_iter()
|
||||
// This does not include the first-account token, because it has special
|
||||
// behavior: no other registration tokens are valid when it is set.
|
||||
self.services
|
||||
.config
|
||||
.get_config_file_token()
|
||||
.into_iter()
|
||||
.chain(
|
||||
self.registration_tokens_from_file
|
||||
.iter()
|
||||
.map(|token_string| ValidToken {
|
||||
token: token_string.clone(),
|
||||
source: ValidTokenSource::TokenFile,
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
/// Validate a registration token.
|
||||
|
|
@ -158,7 +192,7 @@ impl Service {
|
|||
/// revoked.
|
||||
pub fn revoke_token(&self, ValidToken { token, source }: ValidToken) -> Result {
|
||||
match source {
|
||||
| ValidTokenSource::ConfigFile => {
|
||||
| ValidTokenSource::Config => {
|
||||
Err!(
|
||||
"The token set in the config file cannot be revoked. Edit the config file \
|
||||
to change it."
|
||||
|
|
@ -171,6 +205,12 @@ impl Service {
|
|||
| ValidTokenSource::FirstAccount => {
|
||||
Err!("The initial setup token cannot be revoked.")
|
||||
},
|
||||
| ValidTokenSource::TokenFile => {
|
||||
Err!(
|
||||
"Tokens set in the registration token file cannot be revoked. Edit the \
|
||||
registration token file and restart Continuwuity to change them."
|
||||
)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -94,6 +94,12 @@ impl Service {
|
|||
|
||||
#[tracing::instrument(skip(self))]
|
||||
pub async fn remove_alias(&self, alias: &RoomAliasId, user_id: &UserId) -> Result<()> {
|
||||
if alias == self.services.globals.admin_alias
|
||||
&& user_id != self.services.globals.server_user
|
||||
{
|
||||
return Err!(Request(Forbidden("Only the server user can remove this alias")));
|
||||
}
|
||||
|
||||
if !self.user_can_remove_alias(alias, user_id).await? {
|
||||
return Err!(Request(Forbidden("User is not permitted to remove this alias.")));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -134,7 +134,7 @@ pub async fn handle_incoming_pdu<'a>(
|
|||
);
|
||||
return Err!(Request(TooLarge("PDU is too large")));
|
||||
}
|
||||
trace!("processing incoming pdu from {origin} for room {room_id} with event id {event_id}");
|
||||
trace!("processing incoming PDU from {origin} for room {room_id} with event id {event_id}");
|
||||
|
||||
// 1.1 Check we even know about the room
|
||||
let meta_exists = self.services.metadata.exists(room_id).map(Ok);
|
||||
|
|
@ -164,7 +164,7 @@ pub async fn handle_incoming_pdu<'a>(
|
|||
sender_acl_check.map(|o| o.unwrap_or(Ok(()))),
|
||||
)
|
||||
.await
|
||||
.inspect_err(|e| debug_error!("failed to handle incoming PDU: {e}"))?;
|
||||
.inspect_err(|e| debug_error!(%origin, "failed to handle incoming PDU {event_id}: {e}"))?;
|
||||
|
||||
if is_disabled {
|
||||
return Err!(Request(Forbidden("Federation of this room is disabled by this server.")));
|
||||
|
|
@ -195,6 +195,7 @@ pub async fn handle_incoming_pdu<'a>(
|
|||
}
|
||||
info!(
|
||||
%origin,
|
||||
%room_id,
|
||||
"Dropping inbound PDU for room we aren't participating in"
|
||||
);
|
||||
return Err!(Request(NotFound("This server is not participating in that room.")));
|
||||
|
|
|
|||
|
|
@ -6,63 +6,18 @@
|
|||
use std::{collections::BTreeMap, time::Duration};
|
||||
|
||||
use conduwuit::{
|
||||
Err, Error, Event, PduEvent, Result, debug, debug_error, debug_info, error, implement, info,
|
||||
trace, warn,
|
||||
Err, Event, PduEvent, Result, debug, debug_error, debug_info, debug_warn, implement, trace,
|
||||
warn,
|
||||
};
|
||||
use ed25519_dalek::{Signature, Verifier, VerifyingKey};
|
||||
use ruma::{
|
||||
CanonicalJsonObject, CanonicalJsonValue, KeyId, RoomId, ServerName,
|
||||
api::federation::room::policy_sign::unstable::Request as PolicySignRequest,
|
||||
CanonicalJsonObject, CanonicalJsonValue, KeyId, RoomId, ServerName, SigningKeyId,
|
||||
api::federation::room::{
|
||||
policy_check::unstable::Request as PolicyCheckRequest,
|
||||
policy_sign::unstable::Request as PolicySignRequest,
|
||||
},
|
||||
events::{StateEventType, room::policy::RoomPolicyEventContent},
|
||||
serde::{Base64, base64::UrlSafe},
|
||||
signatures::canonical_json,
|
||||
};
|
||||
use serde_json::value::RawValue;
|
||||
use tokio::time::sleep;
|
||||
|
||||
pub(super) fn verify_policy_signature(
|
||||
via: &ServerName,
|
||||
ps_key: &Base64<UrlSafe, Vec<u8>>,
|
||||
pdu_json: &CanonicalJsonObject,
|
||||
) -> bool {
|
||||
let signature = pdu_json
|
||||
.get("signatures")
|
||||
.and_then(|sigs| sigs.as_object())
|
||||
.and_then(|sigs_map| sigs_map.get(via.as_str()))
|
||||
.and_then(|sigs_for_server| sigs_for_server.as_object())
|
||||
.and_then(|sigs_for_server_map| sigs_for_server_map.get("ed25519:policy_server"))
|
||||
.and_then(|sig| sig.as_str())
|
||||
.and_then(|sig_str| Base64::<UrlSafe, Vec<u8>>::parse(sig_str).ok())
|
||||
.and_then(|sig_b64| {
|
||||
Signature::from_slice(sig_b64.as_bytes())
|
||||
.map(Some)
|
||||
.unwrap_or(None)
|
||||
});
|
||||
let vk = match VerifyingKey::try_from(ps_key.as_bytes()) {
|
||||
| Ok(vk) => vk,
|
||||
| Err(e) => {
|
||||
debug!(
|
||||
error=%e,
|
||||
"Failed to parse policy server public key; cannot verify signature"
|
||||
);
|
||||
return false;
|
||||
},
|
||||
};
|
||||
let cj = match canonical_json(pdu_json.clone()) {
|
||||
| Ok(cj) => cj,
|
||||
| Err(e) => {
|
||||
debug!(
|
||||
error=%e,
|
||||
"Failed to convert event JSON to canonical form; cannot verify policy server signature"
|
||||
);
|
||||
return false;
|
||||
},
|
||||
};
|
||||
match signature {
|
||||
| Some(ref sig) => vk.verify(cj.as_bytes(), sig).is_ok(),
|
||||
| None => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Asks a remote policy server if the event is allowed.
|
||||
///
|
||||
|
|
@ -76,24 +31,29 @@ pub(super) fn verify_policy_signature(
|
|||
/// contacted for whatever reason, Err(e) is returned, which generally is a
|
||||
/// fail-open operation.
|
||||
#[implement(super::Service)]
|
||||
#[tracing::instrument(skip(self, pdu, pdu_json), level = "info")]
|
||||
pub async fn policy_server_allows_event(
|
||||
#[tracing::instrument(skip(self, pdu, pdu_json, room_id), level = "info")]
|
||||
pub async fn ask_policy_server(
|
||||
&self,
|
||||
pdu: &PduEvent,
|
||||
pdu_json: &mut CanonicalJsonObject,
|
||||
room_id: &RoomId,
|
||||
incoming: bool,
|
||||
) -> Result<()> {
|
||||
) -> Result<bool> {
|
||||
if !self.services.server.config.enable_msc4284_policy_servers {
|
||||
trace!("policy server checking is disabled");
|
||||
return Ok(true); // don't ever contact policy servers
|
||||
}
|
||||
|
||||
if *pdu.event_type() == StateEventType::RoomPolicy.into() {
|
||||
debug!(
|
||||
room_id = %room_id,
|
||||
event_type = ?pdu.event_type(),
|
||||
"Skipping spam check for policy server meta-event"
|
||||
);
|
||||
return Ok(());
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
let Ok(ps) = self
|
||||
let Ok(policyserver) = self
|
||||
.services
|
||||
.state_accessor
|
||||
.room_state_get_content(room_id, &StateEventType::RoomPolicy, "")
|
||||
|
|
@ -105,144 +65,128 @@ pub async fn policy_server_allows_event(
|
|||
})
|
||||
.map(|c: RoomPolicyEventContent| c)
|
||||
else {
|
||||
debug!("room has no policy server configured, skipping spam check");
|
||||
return Ok(());
|
||||
debug!("room has no policy server configured");
|
||||
return Ok(true);
|
||||
};
|
||||
|
||||
let ps_key = match ps.effective_key() {
|
||||
| Ok(key) => key,
|
||||
| Err(e) => {
|
||||
debug!(
|
||||
error=%e,
|
||||
"room has a policy server configured, but no valid public keys; skipping spam check"
|
||||
);
|
||||
return Ok(());
|
||||
},
|
||||
};
|
||||
|
||||
let Some(via) = ps
|
||||
.via
|
||||
.as_ref()
|
||||
.and_then(|via| ServerName::parse(via).map(Some).unwrap_or(None))
|
||||
else {
|
||||
trace!("No via configured for room policy server, skipping spam check");
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
if via.is_empty() {
|
||||
trace!("Policy server is empty for room {room_id}, skipping spam check");
|
||||
return Ok(());
|
||||
if self.services.server.config.policy_server_check_own_events
|
||||
&& !incoming
|
||||
&& policyserver.public_key.is_none()
|
||||
{
|
||||
// don't contact policy servers for locally generated events, but only when the
|
||||
// policy server does not require signatures
|
||||
trace!("won't contact policy server for locally generated event");
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
let via = match policyserver.via {
|
||||
| Some(ref via) => ServerName::parse(via)?,
|
||||
| None => {
|
||||
trace!("No policy server configured for room {room_id}");
|
||||
return Ok(true);
|
||||
},
|
||||
};
|
||||
if via.is_empty() {
|
||||
trace!("Policy server is empty for room {room_id}, skipping spam check");
|
||||
return Ok(true);
|
||||
}
|
||||
if !self.services.state_cache.server_in_room(via, room_id).await {
|
||||
debug!(
|
||||
via = %via,
|
||||
"Policy server is not in the room, skipping spam check"
|
||||
);
|
||||
return Ok(());
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
if incoming {
|
||||
// Verify the signature instead of calling a check
|
||||
if verify_policy_signature(via, &ps_key, pdu_json) {
|
||||
debug!(
|
||||
via = %via,
|
||||
"Event is incoming and has a valid policy server signature"
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
debug_info!(
|
||||
via = %via,
|
||||
"Event is incoming but does not have a valid policy server signature; asking policy \
|
||||
server to sign it now"
|
||||
);
|
||||
}
|
||||
|
||||
let outgoing = self
|
||||
.services
|
||||
.sending
|
||||
.convert_to_outgoing_federation_event(pdu_json.clone())
|
||||
.await;
|
||||
|
||||
info!(
|
||||
if policyserver.public_key.is_some() {
|
||||
if !incoming {
|
||||
debug_info!(
|
||||
via = %via,
|
||||
outgoing = ?pdu_json,
|
||||
"Getting policy server signature on event"
|
||||
);
|
||||
return self
|
||||
.fetch_policy_server_signature(pdu, pdu_json, via, outgoing, room_id)
|
||||
.await;
|
||||
}
|
||||
// for incoming events, is it signed by <via> with the key
|
||||
// "ed25519:policy_server"?
|
||||
if let Some(CanonicalJsonValue::Object(sigs)) = pdu_json.get("signatures") {
|
||||
if let Some(CanonicalJsonValue::Object(server_sigs)) = sigs.get(via.as_str()) {
|
||||
let wanted_key_id: &KeyId<ruma::SigningKeyAlgorithm, ruma::Base64PublicKey> =
|
||||
SigningKeyId::parse("ed25519:policy_server")?;
|
||||
if let Some(CanonicalJsonValue::String(_sig_value)) =
|
||||
server_sigs.get(wanted_key_id.as_str())
|
||||
{
|
||||
// TODO: verify signature
|
||||
}
|
||||
}
|
||||
}
|
||||
debug!(
|
||||
"Event is not local and has no policy server signature, performing legacy spam check"
|
||||
);
|
||||
}
|
||||
debug_info!(
|
||||
via = %via,
|
||||
"Asking policy server to sign event"
|
||||
"Checking event for spam with policy server via legacy check"
|
||||
);
|
||||
self.fetch_policy_server_signature(pdu, pdu_json, via, outgoing, room_id, ps_key, 0)
|
||||
.await
|
||||
}
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[implement(super::Service)]
|
||||
async fn handle_policy_server_error(
|
||||
&self,
|
||||
error: Error,
|
||||
pdu: &PduEvent,
|
||||
pdu_json: &mut CanonicalJsonObject,
|
||||
via: &ServerName,
|
||||
outgoing: Box<RawValue>,
|
||||
room_id: &RoomId,
|
||||
policy_server_key: Base64<UrlSafe, Vec<u8>>,
|
||||
retries: u8,
|
||||
timeout: Duration,
|
||||
) -> Result<()> {
|
||||
if let Some(retry_after) = error.retry_after() {
|
||||
if retries >= 3 {
|
||||
let response = tokio::time::timeout(
|
||||
Duration::from_secs(self.services.server.config.policy_server_request_timeout),
|
||||
self.services
|
||||
.sending
|
||||
.send_federation_request(via, PolicyCheckRequest {
|
||||
event_id: pdu.event_id().to_owned(),
|
||||
pdu: Some(outgoing),
|
||||
}),
|
||||
)
|
||||
.await;
|
||||
let response = match response {
|
||||
| Ok(Ok(response)) => {
|
||||
debug!("Response from policy server: {:?}", response);
|
||||
response
|
||||
},
|
||||
| Ok(Err(e)) => {
|
||||
warn!(
|
||||
via = %via,
|
||||
event_id = %pdu.event_id(),
|
||||
room_id = %room_id,
|
||||
retries,
|
||||
"Policy server rate-limited us too many times; giving up"
|
||||
"Failed to contact policy server: {e}"
|
||||
);
|
||||
return Err(error); // Error should be passed to c2s
|
||||
}
|
||||
let saturated = retry_after.min(timeout);
|
||||
// ^ don't wait more than 60 seconds
|
||||
// Network or policy server errors are treated as non-fatal: event is allowed by
|
||||
// default.
|
||||
return Err(e);
|
||||
},
|
||||
| Err(elapsed) => {
|
||||
warn!(
|
||||
%via,
|
||||
event_id = %pdu.event_id(),
|
||||
%room_id,
|
||||
%elapsed,
|
||||
"Policy server request timed out after 10 seconds"
|
||||
);
|
||||
return Err!("Request to policy server timed out");
|
||||
},
|
||||
};
|
||||
trace!("Recommendation from policy server was {}", response.recommendation);
|
||||
if response.recommendation == "spam" {
|
||||
warn!(
|
||||
via = %via,
|
||||
event_id = %pdu.event_id(),
|
||||
room_id = %room_id,
|
||||
retry_after = %saturated.as_secs(),
|
||||
retries,
|
||||
"Policy server rate-limited us; retrying after {retry_after:?}"
|
||||
);
|
||||
// TODO: select between this sleep and shutdown signal
|
||||
sleep(saturated).await;
|
||||
return Box::pin(self.fetch_policy_server_signature(
|
||||
pdu,
|
||||
pdu_json,
|
||||
via,
|
||||
outgoing,
|
||||
room_id,
|
||||
policy_server_key,
|
||||
retries.saturating_add(1),
|
||||
))
|
||||
.await;
|
||||
}
|
||||
if error.status_code().is_client_error() {
|
||||
warn!(
|
||||
via = %via,
|
||||
event_id = %pdu.event_id(),
|
||||
room_id = %room_id,
|
||||
error = ?error,
|
||||
"Policy server marked the event as spam"
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
via = %via,
|
||||
event_id = %pdu.event_id(),
|
||||
room_id = %room_id,
|
||||
error = ?error,
|
||||
"Failed to contact policy server"
|
||||
"Event was marked as spam by policy server",
|
||||
);
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
Err(error)
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
/// Asks a remote policy server for a signature on this event.
|
||||
/// If the policy server signs this event, the original data is mutated.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[implement(super::Service)]
|
||||
#[tracing::instrument(skip_all, fields(event_id=%pdu.event_id(), via=%via), level = "info")]
|
||||
pub async fn fetch_policy_server_signature(
|
||||
|
|
@ -252,16 +196,13 @@ pub async fn fetch_policy_server_signature(
|
|||
via: &ServerName,
|
||||
outgoing: Box<RawValue>,
|
||||
room_id: &RoomId,
|
||||
policy_server_key: Base64<UrlSafe, Vec<u8>>,
|
||||
retries: u8,
|
||||
) -> Result<()> {
|
||||
let timeout = Duration::from_secs(self.services.server.config.policy_server_request_timeout);
|
||||
) -> Result<bool> {
|
||||
debug!("Requesting policy server signature");
|
||||
let response = tokio::time::timeout(
|
||||
timeout,
|
||||
Duration::from_secs(self.services.server.config.policy_server_request_timeout),
|
||||
self.services
|
||||
.sending
|
||||
.send_federation_request(via, PolicySignRequest { pdu: outgoing.clone() }),
|
||||
.send_federation_request(via, PolicySignRequest { pdu: outgoing }),
|
||||
)
|
||||
.await;
|
||||
|
||||
|
|
@ -271,19 +212,15 @@ pub async fn fetch_policy_server_signature(
|
|||
response
|
||||
},
|
||||
| Ok(Err(e)) => {
|
||||
return self
|
||||
.handle_policy_server_error(
|
||||
e,
|
||||
pdu,
|
||||
pdu_json,
|
||||
via,
|
||||
outgoing,
|
||||
room_id,
|
||||
policy_server_key,
|
||||
retries,
|
||||
timeout,
|
||||
)
|
||||
.await;
|
||||
warn!(
|
||||
via = %via,
|
||||
event_id = %pdu.event_id(),
|
||||
room_id = %room_id,
|
||||
"Failed to contact policy server: {e}"
|
||||
);
|
||||
// Network or policy server errors are treated as non-fatal: event is allowed by
|
||||
// default.
|
||||
return Err(e);
|
||||
},
|
||||
| Err(elapsed) => {
|
||||
warn!(
|
||||
|
|
@ -291,34 +228,34 @@ pub async fn fetch_policy_server_signature(
|
|||
event_id = %pdu.event_id(),
|
||||
%room_id,
|
||||
%elapsed,
|
||||
"Policy server signature request timed out"
|
||||
"Policy server request timed out after 10 seconds"
|
||||
);
|
||||
return Err!(Request(Forbidden("Policy server did not respond in time")));
|
||||
return Err!("Request to policy server timed out");
|
||||
},
|
||||
};
|
||||
|
||||
if !response.signatures.contains_key(via) {
|
||||
error!(
|
||||
if response.signatures.is_none() {
|
||||
debug!("Policy server refused to sign event");
|
||||
return Ok(false);
|
||||
}
|
||||
let sigs: ruma::Signatures<ruma::OwnedServerName, ruma::ServerSigningKeyVersion> =
|
||||
response.signatures.unwrap();
|
||||
if !sigs.contains_key(via) {
|
||||
debug_warn!(
|
||||
"Policy server returned signatures, but did not include the expected server name \
|
||||
'{}': {:?}",
|
||||
via, response.signatures
|
||||
via,
|
||||
sigs
|
||||
);
|
||||
return Err!(BadServerResponse(
|
||||
"Policy server did not include expected server name in signatures"
|
||||
));
|
||||
return Ok(false);
|
||||
}
|
||||
let keypairs = response.signatures.get(via).unwrap();
|
||||
// TODO: need to be able to verify other algorithms
|
||||
let keypairs = sigs.get(via).unwrap();
|
||||
let wanted_key_id = KeyId::parse("ed25519:policy_server")?;
|
||||
if !keypairs.contains_key(wanted_key_id) {
|
||||
error!(
|
||||
signatures = ?response.signatures,
|
||||
debug_warn!(
|
||||
"Policy server returned signature, but did not use the key ID \
|
||||
'ed25519:policy_server'."
|
||||
);
|
||||
return Err!(BadServerResponse(
|
||||
"Policy server signed the event, but did not use the expected key ID"
|
||||
));
|
||||
return Ok(false);
|
||||
}
|
||||
let signatures_entry = pdu_json
|
||||
.entry("signatures".to_owned())
|
||||
|
|
@ -336,12 +273,12 @@ pub async fn fetch_policy_server_signature(
|
|||
);
|
||||
},
|
||||
| Some(_) => {
|
||||
// This should never happen
|
||||
unreachable!(
|
||||
debug_warn!(
|
||||
"Existing `signatures[{}]` field is not an object; cannot insert policy \
|
||||
signature",
|
||||
via
|
||||
);
|
||||
return Ok(false);
|
||||
},
|
||||
| None => {
|
||||
let mut inner = BTreeMap::new();
|
||||
|
|
@ -356,12 +293,11 @@ pub async fn fetch_policy_server_signature(
|
|||
signatures_map.insert(via.as_str().to_owned(), CanonicalJsonValue::Object(inner));
|
||||
},
|
||||
}
|
||||
// TODO: verify signature value was made with the policy_server_key
|
||||
// rather than the expected key.
|
||||
} else {
|
||||
unreachable!(
|
||||
debug_warn!(
|
||||
"Existing `signatures` field is not an object; cannot insert policy signature"
|
||||
);
|
||||
return Ok(false);
|
||||
}
|
||||
Ok(())
|
||||
Ok(true)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -256,7 +256,7 @@ where
|
|||
if incoming_pdu.state_key.is_none() {
|
||||
debug!(event_id = %incoming_pdu.event_id, "Checking policy server for event");
|
||||
match self
|
||||
.policy_server_allows_event(
|
||||
.ask_policy_server(
|
||||
&incoming_pdu,
|
||||
&mut incoming_pdu.to_canonical_object(),
|
||||
room_id,
|
||||
|
|
@ -264,10 +264,9 @@ where
|
|||
)
|
||||
.await
|
||||
{
|
||||
| Err(e) => {
|
||||
| Ok(false) => {
|
||||
warn!(
|
||||
event_id = %incoming_pdu.event_id,
|
||||
error = %e,
|
||||
"Event has been marked as spam by policy server"
|
||||
);
|
||||
soft_fail = true;
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ pub mod metadata;
|
|||
pub mod outlier;
|
||||
pub mod pdu_metadata;
|
||||
pub mod read_receipt;
|
||||
pub mod roles;
|
||||
pub mod search;
|
||||
pub mod short;
|
||||
pub mod spaces;
|
||||
|
|
@ -31,6 +32,7 @@ pub struct Service {
|
|||
pub outlier: Arc<outlier::Service>,
|
||||
pub pdu_metadata: Arc<pdu_metadata::Service>,
|
||||
pub read_receipt: Arc<read_receipt::Service>,
|
||||
pub roles: Arc<roles::Service>,
|
||||
pub search: Arc<search::Service>,
|
||||
pub short: Arc<short::Service>,
|
||||
pub spaces: Arc<spaces::Service>,
|
||||
|
|
|
|||
1257
src/service/rooms/roles/mod.rs
Normal file
1257
src/service/rooms/roles/mod.rs
Normal file
File diff suppressed because it is too large
Load diff
204
src/service/rooms/roles/tests.rs
Normal file
204
src/service/rooms/roles/tests.rs
Normal file
|
|
@ -0,0 +1,204 @@
|
|||
use std::collections::{BTreeMap, HashSet};
|
||||
|
||||
use conduwuit_core::matrix::space_roles::RoleDefinition;
|
||||
|
||||
use super::{compute_user_power_level, roles_satisfy_requirements};
|
||||
|
||||
pub(super) fn make_roles(entries: &[(&str, Option<i64>)]) -> BTreeMap<String, RoleDefinition> {
|
||||
entries
|
||||
.iter()
|
||||
.map(|(name, pl)| {
|
||||
((*name).to_owned(), RoleDefinition {
|
||||
description: format!("{name} role"),
|
||||
power_level: *pl,
|
||||
})
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub(super) fn make_set(items: &[&str]) -> HashSet<String> {
|
||||
items.iter().map(|s| (*s).to_owned()).collect()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn power_level_single_role() {
|
||||
let roles = make_roles(&[("admin", Some(100)), ("mod", Some(50))]);
|
||||
assert_eq!(compute_user_power_level(&roles, &make_set(&["admin"])), Some(100));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn power_level_multiple_roles_takes_highest() {
|
||||
let roles = make_roles(&[("admin", Some(100)), ("mod", Some(50)), ("helper", Some(25))]);
|
||||
assert_eq!(compute_user_power_level(&roles, &make_set(&["mod", "helper"])), Some(50));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn power_level_no_power_roles() {
|
||||
let roles = make_roles(&[("nsfw", None), ("vip", None)]);
|
||||
assert_eq!(compute_user_power_level(&roles, &make_set(&["nsfw", "vip"])), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn power_level_mixed_roles() {
|
||||
let roles = make_roles(&[("mod", Some(50)), ("nsfw", None)]);
|
||||
assert_eq!(compute_user_power_level(&roles, &make_set(&["mod", "nsfw"])), Some(50));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn power_level_no_roles_assigned() {
|
||||
let roles = make_roles(&[("admin", Some(100))]);
|
||||
assert_eq!(compute_user_power_level(&roles, &HashSet::new()), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn power_level_unknown_role_ignored() {
|
||||
let roles = make_roles(&[("admin", Some(100))]);
|
||||
assert_eq!(compute_user_power_level(&roles, &make_set(&["nonexistent"])), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn qualifies_with_all_required_roles() {
|
||||
assert!(roles_satisfy_requirements(
|
||||
&make_set(&["nsfw", "vip"]),
|
||||
&make_set(&["nsfw", "vip", "extra"]),
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn does_not_qualify_missing_one_role() {
|
||||
assert!(!roles_satisfy_requirements(&make_set(&["nsfw", "vip"]), &make_set(&["nsfw"]),));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn qualifies_with_no_requirements() {
|
||||
assert!(roles_satisfy_requirements(&HashSet::new(), &make_set(&["nsfw"])));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn does_not_qualify_with_no_roles() {
|
||||
assert!(!roles_satisfy_requirements(&make_set(&["nsfw"]), &HashSet::new()));
|
||||
}
|
||||
|
||||
// Multi-space scenarios
|
||||
|
||||
#[test]
|
||||
fn multi_space_highest_pl_wins() {
|
||||
let space_a_roles = make_roles(&[("mod", Some(50))]);
|
||||
let space_b_roles = make_roles(&[("admin", Some(100))]);
|
||||
|
||||
let user_roles_a = make_set(&["mod"]);
|
||||
let user_roles_b = make_set(&["admin"]);
|
||||
|
||||
let pl_a = compute_user_power_level(&space_a_roles, &user_roles_a);
|
||||
let pl_b = compute_user_power_level(&space_b_roles, &user_roles_b);
|
||||
|
||||
let effective = [pl_a, pl_b].into_iter().flatten().max();
|
||||
assert_eq!(effective, Some(100));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn multi_space_one_space_has_no_pl() {
|
||||
let space_a_roles = make_roles(&[("nsfw", None)]);
|
||||
let space_b_roles = make_roles(&[("mod", Some(50))]);
|
||||
|
||||
let user_roles_a = make_set(&["nsfw"]);
|
||||
let user_roles_b = make_set(&["mod"]);
|
||||
|
||||
let pl_a = compute_user_power_level(&space_a_roles, &user_roles_a);
|
||||
let pl_b = compute_user_power_level(&space_b_roles, &user_roles_b);
|
||||
|
||||
let effective = [pl_a, pl_b].into_iter().flatten().max();
|
||||
assert_eq!(effective, Some(50));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn multi_space_neither_has_pl() {
|
||||
let space_a_roles = make_roles(&[("nsfw", None)]);
|
||||
let space_b_roles = make_roles(&[("vip", None)]);
|
||||
|
||||
let user_roles_a = make_set(&["nsfw"]);
|
||||
let user_roles_b = make_set(&["vip"]);
|
||||
|
||||
let pl_a = compute_user_power_level(&space_a_roles, &user_roles_a);
|
||||
let pl_b = compute_user_power_level(&space_b_roles, &user_roles_b);
|
||||
|
||||
let effective = [pl_a, pl_b].into_iter().flatten().max();
|
||||
assert_eq!(effective, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn multi_space_user_only_in_one_space() {
|
||||
let space_a_roles = make_roles(&[("admin", Some(100))]);
|
||||
let space_b_roles = make_roles(&[("mod", Some(50))]);
|
||||
|
||||
let user_roles_a = make_set(&["admin"]);
|
||||
let user_roles_b: HashSet<String> = HashSet::new();
|
||||
|
||||
let pl_a = compute_user_power_level(&space_a_roles, &user_roles_a);
|
||||
let pl_b = compute_user_power_level(&space_b_roles, &user_roles_b);
|
||||
|
||||
let effective = [pl_a, pl_b].into_iter().flatten().max();
|
||||
assert_eq!(effective, Some(100));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn multi_space_qualifies_in_one_not_other() {
|
||||
let space_a_reqs = make_set(&["staff"]);
|
||||
let space_b_reqs = make_set(&["nsfw"]);
|
||||
|
||||
let user_roles = make_set(&["nsfw"]);
|
||||
|
||||
assert!(!roles_satisfy_requirements(&space_a_reqs, &user_roles));
|
||||
assert!(roles_satisfy_requirements(&space_b_reqs, &user_roles));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn multi_space_qualifies_after_role_revoke_via_other_space() {
|
||||
let space_a_reqs = make_set(&["nsfw"]);
|
||||
let space_b_reqs = make_set(&["vip"]);
|
||||
|
||||
let user_roles_after_revoke = make_set(&["vip"]);
|
||||
|
||||
assert!(!roles_satisfy_requirements(&space_a_reqs, &user_roles_after_revoke));
|
||||
assert!(roles_satisfy_requirements(&space_b_reqs, &user_roles_after_revoke));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn multi_space_room_has_reqs_in_one_space_only() {
|
||||
let space_a_reqs = make_set(&["admin"]);
|
||||
let space_b_reqs: HashSet<String> = HashSet::new();
|
||||
|
||||
let user_roles = make_set(&["nsfw"]);
|
||||
|
||||
assert!(!roles_satisfy_requirements(&space_a_reqs, &user_roles));
|
||||
assert!(roles_satisfy_requirements(&space_b_reqs, &user_roles));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn multi_space_no_qualification_anywhere() {
|
||||
let space_a_reqs = make_set(&["staff"]);
|
||||
let space_b_reqs = make_set(&["admin"]);
|
||||
|
||||
let user_roles = make_set(&["nsfw"]);
|
||||
|
||||
let qualifies_a = roles_satisfy_requirements(&space_a_reqs, &user_roles);
|
||||
let qualifies_b = roles_satisfy_requirements(&space_b_reqs, &user_roles);
|
||||
|
||||
assert!(!qualifies_a);
|
||||
assert!(!qualifies_b);
|
||||
assert!(!(qualifies_a || qualifies_b));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn multi_space_same_role_different_pl() {
|
||||
let space_a_roles = make_roles(&[("mod", Some(50))]);
|
||||
let space_b_roles = make_roles(&[("mod", Some(75))]);
|
||||
|
||||
let user_roles = make_set(&["mod"]);
|
||||
|
||||
let pl_a = compute_user_power_level(&space_a_roles, &user_roles);
|
||||
let pl_b = compute_user_power_level(&space_b_roles, &user_roles);
|
||||
|
||||
let effective = [pl_a, pl_b].into_iter().flatten().max();
|
||||
assert_eq!(effective, Some(75));
|
||||
}
|
||||
|
|
@ -327,7 +327,7 @@ where
|
|||
}
|
||||
},
|
||||
| TimelineEventType::SpaceChild =>
|
||||
if let Some(_state_key) = pdu.state_key() {
|
||||
if pdu.state_key().is_some() {
|
||||
self.services
|
||||
.spaces
|
||||
.roomid_spacehierarchy_cache
|
||||
|
|
@ -359,6 +359,8 @@ where
|
|||
| _ => {},
|
||||
}
|
||||
|
||||
self.services.roles.on_pdu_appended(room_id, &pdu);
|
||||
|
||||
// CONCERN: If we receive events with a relation out-of-order, we never write
|
||||
// their relation / thread. We need some kind of way to trigger when we receive
|
||||
// this event, and potentially a way to rebuild the table entirely.
|
||||
|
|
|
|||
|
|
@ -97,6 +97,17 @@ pub async fn build_and_append_pdu(
|
|||
)));
|
||||
}
|
||||
}
|
||||
if *pdu.kind() == TimelineEventType::RoomPowerLevels {
|
||||
if let Ok(proposed) =
|
||||
pdu.get_content::<ruma::events::room::power_levels::RoomPowerLevelsEventContent>()
|
||||
{
|
||||
self.services
|
||||
.roles
|
||||
.validate_pl_change(&room_id, pdu.sender(), &proposed)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
if *pdu.kind() == TimelineEventType::RoomCreate {
|
||||
trace!("Creating shortroomid for {room_id}");
|
||||
self.services
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ use conduwuit_core::{
|
|||
state_res::{self, RoomVersion},
|
||||
},
|
||||
utils::{self, IterStream, ReadyExt, stream::TryIgnore},
|
||||
warn,
|
||||
};
|
||||
use futures::{StreamExt, TryStreamExt, future, future::ready};
|
||||
use ruma::{
|
||||
|
|
@ -297,15 +298,23 @@ pub async fn create_hash_and_sign_event(
|
|||
"Checking event in room {} with policy server",
|
||||
pdu.room_id.as_ref().map_or("None", |id| id.as_str())
|
||||
);
|
||||
self.services
|
||||
match self
|
||||
.services
|
||||
.event_handler
|
||||
.policy_server_allows_event(
|
||||
&pdu,
|
||||
&mut pdu_json,
|
||||
pdu.room_id().expect("has room ID"),
|
||||
false,
|
||||
)
|
||||
.await?;
|
||||
.ask_policy_server(&pdu, &mut pdu_json, pdu.room_id().expect("has room ID"), false)
|
||||
.await
|
||||
{
|
||||
| Ok(true) => {},
|
||||
| Ok(false) => {
|
||||
return Err!(Request(Forbidden(debug_warn!(
|
||||
"Policy server marked this event as spam"
|
||||
))));
|
||||
},
|
||||
| Err(e) => {
|
||||
// fail open
|
||||
warn!("Failed to check event with policy server: {e}");
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Generate short event id
|
||||
|
|
|
|||
|
|
@ -80,6 +80,7 @@ struct Services {
|
|||
threads: Dep<rooms::threads::Service>,
|
||||
search: Dep<rooms::search::Service>,
|
||||
spaces: Dep<rooms::spaces::Service>,
|
||||
roles: Dep<rooms::roles::Service>,
|
||||
event_handler: Dep<rooms::event_handler::Service>,
|
||||
}
|
||||
|
||||
|
|
@ -112,6 +113,7 @@ impl crate::Service for Service {
|
|||
threads: args.depend::<rooms::threads::Service>("rooms::threads"),
|
||||
search: args.depend::<rooms::search::Service>("rooms::search"),
|
||||
spaces: args.depend::<rooms::spaces::Service>("rooms::spaces"),
|
||||
roles: args.depend::<rooms::roles::Service>("rooms::roles"),
|
||||
event_handler: args
|
||||
.depend::<rooms::event_handler::Service>("rooms::event_handler"),
|
||||
},
|
||||
|
|
|
|||
|
|
@ -228,7 +228,7 @@ async fn acquire_notary_result(&self, missing: &mut Batch, server_keys: ServerSi
|
|||
self.add_signing_keys(server_keys.clone()).await;
|
||||
|
||||
if let Some(key_ids) = missing.get_mut(server) {
|
||||
key_ids.retain(|key_id| key_exists(&server_keys, key_id));
|
||||
key_ids.retain(|key_id| !key_exists(&server_keys, key_id));
|
||||
if key_ids.is_empty() {
|
||||
missing.remove(server);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
use std::{any::Any, collections::BTreeMap, sync::Arc};
|
||||
|
||||
use conduwuit::{
|
||||
Result, Server, SyncRwLock, debug, debug_info, info, trace, utils::stream::IterStream,
|
||||
Result, Server, SyncRwLock, debug, debug_info, error, info, trace, utils::stream::IterStream,
|
||||
};
|
||||
use database::Database;
|
||||
use futures::{Stream, StreamExt, TryStreamExt};
|
||||
|
|
@ -11,8 +11,8 @@ use crate::{
|
|||
account_data, admin, announcements, antispam, appservice, client, config, emergency,
|
||||
federation, firstrun, globals, key_backups,
|
||||
manager::Manager,
|
||||
media, moderation, presence, pusher, registration_tokens, resolver, rooms, sending,
|
||||
server_keys,
|
||||
media, moderation, password_reset, presence, pusher, registration_tokens, resolver, rooms,
|
||||
sending, server_keys,
|
||||
service::{self, Args, Map, Service},
|
||||
sync, transactions, uiaa, users,
|
||||
};
|
||||
|
|
@ -27,6 +27,7 @@ pub struct Services {
|
|||
pub globals: Arc<globals::Service>,
|
||||
pub key_backups: Arc<key_backups::Service>,
|
||||
pub media: Arc<media::Service>,
|
||||
pub password_reset: Arc<password_reset::Service>,
|
||||
pub presence: Arc<presence::Service>,
|
||||
pub pusher: Arc<pusher::Service>,
|
||||
pub registration_tokens: Arc<registration_tokens::Service>,
|
||||
|
|
@ -81,6 +82,7 @@ impl Services {
|
|||
globals: build!(globals::Service),
|
||||
key_backups: build!(key_backups::Service),
|
||||
media: build!(media::Service),
|
||||
password_reset: build!(password_reset::Service),
|
||||
presence: build!(presence::Service),
|
||||
pusher: build!(pusher::Service),
|
||||
registration_tokens: build!(registration_tokens::Service),
|
||||
|
|
@ -94,6 +96,7 @@ impl Services {
|
|||
outlier: build!(rooms::outlier::Service),
|
||||
pdu_metadata: build!(rooms::pdu_metadata::Service),
|
||||
read_receipt: build!(rooms::read_receipt::Service),
|
||||
roles: build!(rooms::roles::Service),
|
||||
search: build!(rooms::search::Service),
|
||||
short: build!(rooms::short::Service),
|
||||
spaces: build!(rooms::spaces::Service),
|
||||
|
|
@ -125,10 +128,12 @@ impl Services {
|
|||
}
|
||||
|
||||
pub async fn start(self: &Arc<Self>) -> Result<Arc<Self>> {
|
||||
debug_info!("Starting services...");
|
||||
info!("Starting services...");
|
||||
|
||||
self.admin.set_services(Some(Arc::clone(self)).as_ref());
|
||||
super::migrations::migrations(self).await?;
|
||||
super::migrations::migrations(self)
|
||||
.await
|
||||
.inspect_err(|e| error!("Migrations failed: {e}"))?;
|
||||
self.manager
|
||||
.lock()
|
||||
.await
|
||||
|
|
@ -147,7 +152,7 @@ impl Services {
|
|||
.await;
|
||||
}
|
||||
|
||||
debug_info!("Services startup complete.");
|
||||
info!("Services startup complete.");
|
||||
|
||||
Ok(Arc::clone(self))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -181,20 +181,11 @@ pub async fn try_auth(
|
|||
uiaainfo.completed.push(AuthType::Password);
|
||||
},
|
||||
| AuthData::ReCaptcha(r) => {
|
||||
if self.services.config.recaptcha_private_site_key.is_none() {
|
||||
let Some(ref private_site_key) = self.services.config.recaptcha_private_site_key
|
||||
else {
|
||||
return Err!(Request(Forbidden("ReCaptcha is not configured.")));
|
||||
}
|
||||
match recaptcha_verify::verify(
|
||||
self.services
|
||||
.config
|
||||
.recaptcha_private_site_key
|
||||
.as_ref()
|
||||
.unwrap(),
|
||||
r.response.as_str(),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
{
|
||||
};
|
||||
match recaptcha_verify::verify_v3(private_site_key, r.response.as_str(), None).await {
|
||||
| Ok(()) => {
|
||||
uiaainfo.completed.push(AuthType::ReCaptcha);
|
||||
},
|
||||
|
|
|
|||
|
|
@ -20,12 +20,25 @@ crate-type = [
|
|||
[dependencies]
|
||||
conduwuit-build-metadata.workspace = true
|
||||
conduwuit-service.workspace = true
|
||||
conduwuit-core.workspace = true
|
||||
async-trait.workspace = true
|
||||
askama.workspace = true
|
||||
axum.workspace = true
|
||||
axum-extra.workspace = true
|
||||
base64.workspace = true
|
||||
futures.workspace = true
|
||||
tracing.workspace = true
|
||||
rand.workspace = true
|
||||
ruma.workspace = true
|
||||
thiserror.workspace = true
|
||||
tower-http.workspace = true
|
||||
serde.workspace = true
|
||||
memory-serve = "2.1.0"
|
||||
validator = { version = "0.20.0", features = ["derive"] }
|
||||
tower-sec-fetch = { version = "0.1.2", features = ["tracing"] }
|
||||
|
||||
[build-dependencies]
|
||||
memory-serve = "2.1.0"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue