Compare commits
286 commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 20b1529dc4 | |||
| bba5318ce8 | |||
| 1f91a74b27 | |||
| 5f901a560b | |||
| 59401e1786 | |||
| 95fa3b022a | |||
|
|
6b013bcf60 | ||
|
|
05a49ceb60 | ||
|
|
728c5828ba | ||
|
|
50c94d85a1 | ||
|
|
0cc188f62c | ||
|
|
6451671f66 | ||
|
|
ca21a885d5 | ||
|
|
4af4110f6d | ||
|
|
51b450c05c | ||
|
|
f9d1f71343 | ||
|
|
7901e4b996 | ||
|
|
7b6bf4b78e | ||
|
|
67d5619ccb | ||
|
|
bf001f96d6 | ||
|
|
ae2b87f03f | ||
|
|
957cd3502f | ||
|
|
a109542eb8 | ||
|
|
8c4844b00b | ||
|
|
eec7103910 | ||
|
|
43aa172829 | ||
|
|
9b4c483b6d | ||
|
|
b885e206ce | ||
|
|
07a935f625 | ||
|
|
d13801e976 | ||
|
|
5716c36b47 | ||
|
|
f11943b956 | ||
|
|
8b726a9c94 | ||
|
|
ffa3c53847 | ||
|
|
da8833fca4 | ||
|
|
267feb3c09 | ||
|
|
3d50af0943 | ||
|
|
9515019641 | ||
|
|
f0f53dfada | ||
| 3bfd10efab | |||
| 835d434d92 | |||
|
|
acef746d26 | ||
|
|
3356b60e97 | ||
|
|
c988c2b387 | ||
|
|
3121229707 | ||
|
|
ff85145ee8 | ||
|
|
f61d1a11e0 | ||
|
|
11ba8979ff | ||
|
|
f6956ccf12 | ||
|
|
977a5ac8c1 | ||
|
|
906c3df953 | ||
|
|
33e5fdc16f | ||
|
|
77ac17855a | ||
|
|
65ffcd2884 | ||
|
|
7ec88bdbfe | ||
|
|
da3fac8cb4 | ||
|
|
3366113939 | ||
|
|
9039784f41 | ||
|
|
7f165e5bbe | ||
|
|
c97111e3ca | ||
|
|
e8746760fa | ||
|
|
9dbd75e740 | ||
|
|
85b2fd91b9 | ||
|
|
6420c218a9 | ||
|
|
ec9402a328 | ||
|
|
d01f06a5c2 | ||
|
|
aee51b3b0d | ||
|
|
afcbccd9dd | ||
|
|
02448000f9 | ||
|
|
6af8918aa8 | ||
|
|
08f83cc438 | ||
|
|
a0468db121 | ||
|
|
4f23d566ed | ||
|
|
dac619b5f8 | ||
|
|
fdc9cc8074 | ||
|
|
40b1dabcca | ||
|
|
94c5af40cf | ||
|
|
36a3144757 | ||
|
|
220b61c589 | ||
|
|
38e93cde3e | ||
|
|
7e501cdb09 | ||
|
|
da182c162d | ||
|
|
9a3f7f4af7 | ||
|
|
5ce1f682f6 | ||
|
|
5feb08dff2 | ||
|
|
1e527c1075 | ||
|
|
c6943ae683 | ||
|
|
8932dacdc4 | ||
|
|
0be3d850ac | ||
|
|
57e7cf7057 | ||
|
|
1005585ccb | ||
|
|
1188566dbd | ||
|
|
0058212757 | ||
|
|
dbf8fd3320 | ||
|
|
ce295b079e | ||
|
|
5eb74bc1dd | ||
|
|
da561ab792 | ||
|
|
80c9bb4796 | ||
|
|
22a47d1e59 | ||
|
|
83883a002c | ||
|
|
8dd4b71e0e | ||
|
|
6fe3b1563c | ||
|
|
44d3825c8e | ||
|
|
d6c5484c3a | ||
|
|
1fd6056f3f | ||
|
|
525a0ae52b | ||
|
|
60210754d9 | ||
|
|
08dd787083 | ||
|
|
2c7233812b | ||
|
|
d725e98220 | ||
|
|
0226ca1e83 | ||
|
|
1695b6d19e | ||
|
|
c40cc3b236 | ||
|
|
754959e80d | ||
|
|
37888fb670 | ||
|
|
7207398a9e | ||
|
|
1a7bda209b | ||
|
|
7e1950b3d2 | ||
|
|
b507898c62 | ||
|
|
f4af67575e | ||
|
|
6adb99397e | ||
|
|
8ce83a8a14 | ||
|
|
052c4dfa21 | ||
|
|
a43dee1728 | ||
|
|
763d9b3de8 | ||
|
|
1e6d95583c | ||
|
|
8a254a33cc | ||
|
|
c97dd54766 | ||
|
|
8ddb7c70c0 | ||
|
|
cb9786466b | ||
|
|
18d2662b01 | ||
|
|
558262dd1f | ||
|
|
d311b87579 | ||
|
|
8702f55cf5 | ||
|
|
d4481b07ac | ||
|
|
92351df925 | ||
|
|
47e2733ea1 | ||
|
|
6637e4c6a7 | ||
|
|
35e441452f | ||
|
|
66bbb655bf | ||
|
|
81b202ce51 | ||
|
|
4657844d46 | ||
|
|
9016cd11a6 | ||
|
|
dd70094719 | ||
|
|
fcd49b7ab3 | ||
|
|
470c9b52dd | ||
|
|
0d8cafc329 | ||
|
|
2f9956ddca | ||
|
|
21a97cdd0b | ||
|
|
e986cd4536 | ||
|
|
526d862296 | ||
|
|
fbeb5bf186 | ||
|
|
a336f2df44 | ||
|
|
19b78ec73e | ||
|
|
27ff2d9363 | ||
|
|
50fa8c3abf | ||
|
|
18c4be869f | ||
|
|
fc00b96d8b | ||
|
|
fa4156d8a6 | ||
|
|
23638cd714 | ||
|
|
9f1a483e76 | ||
|
|
688ef727e5 | ||
|
|
3de026160e | ||
|
|
9fe761513d | ||
|
|
abf1e1195a | ||
|
|
d9537e9b55 | ||
|
|
0d1de70d8f | ||
|
|
4aa03a71eb | ||
|
|
f847918575 | ||
|
|
7569a0545b | ||
|
|
b6c5991e1f | ||
|
|
efd879fcd8 | ||
|
|
92a848f74d | ||
|
|
776b5865ba | ||
|
|
722bacbe89 | ||
|
|
46907e3dce | ||
|
|
31e2195e56 | ||
|
|
7ecac93ddc | ||
|
|
6a0b103722 | ||
|
|
23d77b614f | ||
|
|
e01aa44b16 | ||
|
|
a08739c246 | ||
|
|
c14864b881 | ||
|
|
1773e72e68 | ||
|
|
0f94d55689 | ||
|
|
abfb6377c2 | ||
|
|
91d64f5b24 | ||
|
|
9a3f3f6e78 | ||
|
|
b3e31a4aad | ||
|
|
8cda431cc6 | ||
|
|
02b9a3f713 | ||
|
|
d40893730c | ||
|
|
28fae58cf6 | ||
|
|
f458f6ab76 | ||
|
|
fdf9cea533 | ||
|
|
ecb1b73c84 | ||
|
|
e03082480a | ||
|
|
f9e7f019ad | ||
|
|
12069e7c86 | ||
|
|
77928a62b4 | ||
|
|
c73cb5c1bf | ||
|
|
a140eacb04 | ||
|
|
40536b13da | ||
|
|
cacd8681d1 | ||
|
|
b095518e6f | ||
|
|
a91add4aca | ||
|
|
7fec48423a | ||
|
|
2f6b7c7a40 | ||
|
|
48ab6adec1 | ||
|
|
592244d5aa | ||
|
|
091893f8bc | ||
|
|
6eba6a838e | ||
|
|
1a11c784f5 | ||
|
|
55ccfdb973 | ||
|
|
a9a39e6d5e | ||
|
|
38bf1ccbcc | ||
|
|
b7a8cbdb42 | ||
|
|
4e1dac32a5 | ||
|
|
7b21c3fd9f | ||
|
|
f566ca1b93 | ||
|
|
debe411e23 | ||
|
|
dc0d6a9220 | ||
|
|
2efdb6fb0d | ||
|
|
576348a445 | ||
|
|
f322b6dca0 | ||
|
|
a1ed77a99c | ||
|
|
01b5dffeee | ||
|
|
ea3c00da43 | ||
|
|
047eba0442 | ||
|
|
11a088be5d | ||
|
|
dc6bd4e541 | ||
|
|
2bf9207cc4 | ||
|
|
b2a87e2fb9 | ||
|
|
7d0686f33c | ||
|
|
082c44f355 | ||
|
|
117c581948 | ||
|
|
cb846a3ad1 | ||
|
|
81b984b2cc | ||
|
|
e2961390ee | ||
|
|
cb75e836e0 | ||
|
|
cb7a988b1b | ||
|
|
aa5400bcef | ||
|
|
ff4dddd673 | ||
|
|
c22b17fb29 | ||
|
|
3da7fa24db | ||
|
|
d15ac1d3c1 | ||
|
|
a9ebdf58e2 | ||
|
|
f1ab27d344 | ||
|
|
8bc6e6ccca | ||
|
|
60a3abe752 | ||
|
|
e3b874d336 | ||
|
|
f3f82831b4 | ||
|
|
26aac1408e | ||
|
|
be8f62396a | ||
|
|
40996a6602 | ||
|
|
9cae531f90 | ||
|
|
56eea935b6 | ||
|
|
fcb646f8c4 | ||
|
|
57b21c1b32 | ||
|
|
8d66500c99 | ||
|
|
abacf1dc20 | ||
|
|
134e5cadaf | ||
|
|
8ec0f0d830 | ||
|
|
0453544036 | ||
|
|
89ad809270 | ||
|
|
ecd3a4eb41 | ||
|
|
5506997ca0 | ||
|
|
abc0683d59 | ||
|
|
dd60beb9fb | ||
|
|
d9520f9382 | ||
|
|
40bb5366bb | ||
|
|
f82bd77073 | ||
|
|
7d84ba5ff2 | ||
|
|
69a8937584 | ||
|
|
b2ec13d342 | ||
|
|
4e55e1ea90 | ||
|
|
f5f3108d5f | ||
|
|
d1e1ee6156 | ||
|
|
ae16a45515 | ||
|
|
077bda23a6 | ||
|
|
a2bf0c1223 | ||
|
|
b9b1ff87f2 | ||
|
|
3c0146d437 | ||
|
|
7485d4aa91 | ||
|
|
39bdb4c5a2 | ||
|
|
55fb3b8848 |
227 changed files with 11321 additions and 2848 deletions
|
|
@ -1,9 +1,9 @@
|
|||
# Local build and dev artifacts
|
||||
target/
|
||||
!target/debug/conduwuit
|
||||
|
||||
# Docker files
|
||||
Dockerfile*
|
||||
docker/
|
||||
|
||||
# IDE files
|
||||
.vscode
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ runs:
|
|||
|
||||
- name: Login to builtin registry
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@v4
|
||||
with:
|
||||
registry: ${{ env.BUILTIN_REGISTRY }}
|
||||
username: ${{ inputs.registry_user }}
|
||||
|
|
@ -52,7 +52,7 @@ runs:
|
|||
|
||||
- name: Set up Docker Buildx
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@v4
|
||||
with:
|
||||
# Use persistent BuildKit if BUILDKIT_ENDPOINT is set (e.g. tcp://buildkit:8125)
|
||||
driver: ${{ env.BUILDKIT_ENDPOINT != '' && 'remote' || 'docker-container' }}
|
||||
|
|
@ -61,7 +61,7 @@ runs:
|
|||
- name: Extract metadata (tags) for Docker
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
uses: docker/metadata-action@v6
|
||||
with:
|
||||
flavor: |
|
||||
latest=auto
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@ runs:
|
|||
uses: ./.forgejo/actions/rust-toolchain
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@v4
|
||||
with:
|
||||
# Use persistent BuildKit if BUILDKIT_ENDPOINT is set (e.g. tcp://buildkit:8125)
|
||||
driver: ${{ env.BUILDKIT_ENDPOINT != '' && 'remote' || 'docker-container' }}
|
||||
|
|
@ -79,7 +79,7 @@ runs:
|
|||
|
||||
- name: Login to builtin registry
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@v4
|
||||
with:
|
||||
registry: ${{ env.BUILTIN_REGISTRY }}
|
||||
username: ${{ inputs.registry_user }}
|
||||
|
|
@ -87,7 +87,7 @@ runs:
|
|||
|
||||
- name: Extract metadata (labels, annotations) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
uses: docker/metadata-action@v6
|
||||
with:
|
||||
images: ${{ inputs.images }}
|
||||
# default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509
|
||||
|
|
@ -152,7 +152,7 @@ runs:
|
|||
|
||||
- name: inject cache into docker
|
||||
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
||||
uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.3.0
|
||||
uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.3.2
|
||||
with:
|
||||
cache-map: |
|
||||
{
|
||||
|
|
|
|||
|
|
@ -62,10 +62,6 @@ sync:
|
|||
target: registry.gitlab.com/continuwuity/continuwuity
|
||||
type: repository
|
||||
<<: *tags-main
|
||||
- source: *source
|
||||
target: git.nexy7574.co.uk/mirrored/continuwuity
|
||||
type: repository
|
||||
<<: *tags-releases
|
||||
- source: *source
|
||||
target: ghcr.io/continuwuity/continuwuity
|
||||
type: repository
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
container: ["ubuntu-latest", "ubuntu-previous", "debian-latest", "debian-oldstable"]
|
||||
container: [ "ubuntu-latest", "ubuntu-previous", "debian-latest", "debian-oldstable" ]
|
||||
container:
|
||||
image: "ghcr.io/tcpipuk/act-runner:${{ matrix.container }}"
|
||||
|
||||
|
|
@ -30,6 +30,28 @@ jobs:
|
|||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
echo "distribution=$DISTRIBUTION" >> $GITHUB_OUTPUT
|
||||
echo "Debian distribution: $DISTRIBUTION ($VERSION)"
|
||||
#- name: Work around llvm-project#153385
|
||||
# id: llvm-workaround
|
||||
# run: |
|
||||
# if [ -f /usr/share/apt/default-sequoia.config ]; then
|
||||
# echo "Applying workaround for llvm-project#153385"
|
||||
# mkdir -p /etc/crypto-policies/back-ends/
|
||||
# cp /usr/share/apt/default-sequoia.config /etc/crypto-policies/back-ends/apt-sequoia.config
|
||||
# sed -i 's/\(sha1\.second_preimage_resistance = \)2026-02-01/\12026-06-01/' /etc/crypto-policies/back-ends/apt-sequoia.config
|
||||
# else
|
||||
# echo "No workaround needed for llvm-project#153385"
|
||||
# fi
|
||||
- name: Pick compatible clang version
|
||||
id: clang-version
|
||||
run: |
|
||||
# both latest need to use clang-23, but oldstable and previous can just use clang
|
||||
if [[ "${{ matrix.container }}" == "ubuntu-latest" ]]; then
|
||||
echo "Using clang-23 package for ${{ matrix.container }}"
|
||||
echo "version=clang-23" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Using default clang package for ${{ matrix.container }}"
|
||||
echo "version=clang" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Checkout repository with full history
|
||||
uses: actions/checkout@v6
|
||||
|
|
@ -105,7 +127,7 @@ jobs:
|
|||
run: |
|
||||
apt-get update -y
|
||||
# Build dependencies for rocksdb
|
||||
apt-get install -y clang liburing-dev
|
||||
apt-get install -y liburing-dev ${{ steps.clang-version.outputs.version }}
|
||||
|
||||
- name: Run cargo-deb
|
||||
id: cargo-deb
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ jobs:
|
|||
registry_password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
|
||||
- name: Build and push Docker image by digest
|
||||
id: build
|
||||
uses: docker/build-push-action@v6
|
||||
uses: docker/build-push-action@v7
|
||||
with:
|
||||
context: .
|
||||
file: "docker/Dockerfile"
|
||||
|
|
@ -146,7 +146,7 @@ jobs:
|
|||
registry_password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
|
||||
- name: Build and push max-perf Docker image by digest
|
||||
id: build
|
||||
uses: docker/build-push-action@v6
|
||||
uses: docker/build-push-action@v7
|
||||
with:
|
||||
context: .
|
||||
file: "docker/Dockerfile"
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ jobs:
|
|||
name: Renovate
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: ghcr.io/renovatebot/renovate:42.70.2@sha256:3c2ac1b94fa92ef2fa4d1a0493f2c3ba564454720a32fdbcac2db2846ff1ee47
|
||||
image: ghcr.io/renovatebot/renovate:43.59.4@sha256:f951508dea1e7d71cbe6deca298ab0a05488e7631229304813f630cc06010892
|
||||
options: --tmpfs /tmp:exec
|
||||
steps:
|
||||
- name: Checkout
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ jobs:
|
|||
persist-credentials: true
|
||||
token: ${{ secrets.FORGEJO_TOKEN }}
|
||||
|
||||
- uses: https://github.com/cachix/install-nix-action@4e002c8ec80594ecd40e759629461e26c8abed15 # v31.9.0
|
||||
- uses: https://github.com/cachix/install-nix-action@19effe9fe722874e6d46dd7182e4b8b7a43c4a99 # v31.10.0
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
|
||||
|
|
|
|||
4
.github/FUNDING.yml
vendored
4
.github/FUNDING.yml
vendored
|
|
@ -1,4 +1,4 @@
|
|||
github: [JadedBlueEyes, nexy7574, gingershaped]
|
||||
custom:
|
||||
- https://ko-fi.com/nexy7574
|
||||
- https://ko-fi.com/JadedBlueEyes
|
||||
- https://timedout.uk/donate.html
|
||||
- https://jade.ellis.link/sponsors
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
default_install_hook_types:
|
||||
- pre-commit
|
||||
- pre-push
|
||||
- commit-msg
|
||||
default_stages:
|
||||
- pre-commit
|
||||
|
|
@ -23,7 +24,7 @@ repos:
|
|||
- id: check-added-large-files
|
||||
|
||||
- repo: https://github.com/crate-ci/typos
|
||||
rev: v1.43.2
|
||||
rev: v1.44.0
|
||||
hooks:
|
||||
- id: typos
|
||||
- id: typos
|
||||
|
|
@ -31,7 +32,7 @@ repos:
|
|||
stages: [commit-msg]
|
||||
|
||||
- repo: https://github.com/crate-ci/committed
|
||||
rev: v1.1.10
|
||||
rev: v1.1.11
|
||||
hooks:
|
||||
- id: committed
|
||||
|
||||
|
|
@ -45,3 +46,14 @@ repos:
|
|||
pass_filenames: false
|
||||
stages:
|
||||
- pre-commit
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: cargo-clippy
|
||||
name: cargo clippy
|
||||
entry: cargo clippy -- -D warnings
|
||||
language: system
|
||||
pass_filenames: false
|
||||
types: [rust]
|
||||
stages:
|
||||
- pre-push
|
||||
|
|
|
|||
|
|
@ -24,3 +24,5 @@ extend-ignore-re = [
|
|||
"continuwity" = "continuwuity"
|
||||
"execuse" = "execuse"
|
||||
"oltp" = "OTLP"
|
||||
|
||||
rememvering = "remembering"
|
||||
|
|
|
|||
115
CHANGELOG.md
115
CHANGELOG.md
|
|
@ -1,25 +1,94 @@
|
|||
# Continuwuity 0.5.6 (2026-03-03)
|
||||
|
||||
## Security
|
||||
|
||||
- Admin escape commands received over federation will never be executed, as this is never valid in a genuine situation. Contributed by @Jade.
|
||||
- Fixed data amplification vulnerability (CWE-409) that affected configurations with server-side compression enabled (non-default). Contributed by @nex.
|
||||
|
||||
## Features
|
||||
|
||||
- Outgoing presence is now disabled by default, and the config option documentation has been adjusted to more accurately represent the weight of presence, typing indicators, and read receipts. Contributed by @nex. ([#1399](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1399))
|
||||
- Improved the concurrency handling of federation transactions, vastly improving performance and reliability by more accurately handling inbound transactions and reducing the amount of repeated wasted work. Contributed by @nex and @Jade. ([#1428](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1428))
|
||||
- Added [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202) Device masquerading (not all of MSC3202). This should fix issues with enabling [MSC4190](https://github.com/matrix-org/matrix-spec-proposals/pull/4190) for some Mautrix bridges. Contributed by @Jade ([#1435](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1435))
|
||||
- Added [MSC3814](https://github.com/matrix-org/matrix-spec-proposals/pull/3814) Dehydrated Devices - you can now decrypt messages sent while all devices were logged out. ([#1436](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1436))
|
||||
- Implement [MSC4143](https://github.com/matrix-org/matrix-spec-proposals/pull/4143) MatrixRTC transport discovery endpoint. Move RTC foci configuration from `[global.well_known]` to a new `[global.matrix_rtc]` section with a `foci` field. Contributed by @0xnim ([#1442](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1442))
|
||||
- Updated `list-backups` admin command to output one backup per line. ([#1394](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1394))
|
||||
- Improved URL preview fetching with a more compatible user agent for sites like YouTube Music. Added `!admin media delete-url-preview <url>` command to clear cached URL previews that were stuck and broken. ([#1434](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1434))
|
||||
|
||||
## Bugfixes
|
||||
|
||||
- Removed non-compliant nor functional room alias lookups over federation. Contributed by @nex ([#1393](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1393))
|
||||
- Removed ability to set rocksdb as read only. Doing so would cause unintentional and buggy behaviour. Contributed by @Terryiscool160. ([#1418](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1418))
|
||||
- Fixed a startup crash in the sender service if we can't detect the number of CPU cores, even if the `sender_workers` config option is set correctly. Contributed by @katie. ([#1421](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1421))
|
||||
- Removed the `allow_public_room_directory_without_auth` config option. Contributed by @0xnim. ([#1441](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1441))
|
||||
- Fixed sliding sync v5 list ranges always starting from 0, causing extra rooms to be unnecessarily processed and returned. Contributed by @0xnim ([#1445](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1445))
|
||||
- Fixed a bug that (repairably) caused a room split between continuwuity and non-continuwuity servers when the room had both `m.room.policy` and `org.matrix.msc4284.policy` in its room state. Contributed by @nex ([#1481](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1481))
|
||||
- Fixed `!admin media delete --mxc <url>` responding with an error message when the media was deleted successfully. Contributed by @lynxize
|
||||
- Fixed spurious 404 media errors in the logs. Contributed by @benbot.
|
||||
- Fixed spurious warn about needed backfill via federation for non-federated rooms. Contributed by @kraem.
|
||||
|
||||
# Continuwuity v0.5.5 (2026-02-15)
|
||||
|
||||
## Features
|
||||
|
||||
- Added unstable support for [MSC4406:
|
||||
`M_SENDER_IGNORED`](https://github.com/matrix-org/matrix-spec-proposals/pull/4406).
|
||||
Contributed by @nex ([#1308](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1308))
|
||||
- Introduce a resolver command to allow flushing a server from the cache or to flush the complete cache. Contributed by
|
||||
@Omar007 ([#1349](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1349))
|
||||
- Improved the handling of restricted join rules and improved the performance of local-first joins. Contributed by
|
||||
@nex. ([#1368](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1368))
|
||||
- You can now set a custom User Agent for URL previews; the default one has been modified to be less likely to be
|
||||
rejected. Contributed by @trashpanda ([#1372](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1372))
|
||||
- Improved the first-time setup experience for new homeserver administrators:
|
||||
- Account registration is disabled on the first run, except for with a new special registration token that is logged
|
||||
to the console.
|
||||
- Other helpful information is logged to the console as well, including a giant warning if open registration is
|
||||
enabled.
|
||||
- The default index page now says to check the console for setup instructions if no accounts have been created.
|
||||
- Once the first admin account is created, an improved welcome message is sent to the admin room.
|
||||
|
||||
Contributed by @ginger.
|
||||
|
||||
## Bugfixes
|
||||
|
||||
- Fixed invites sent to other users in the same homeserver not being properly sent down sync. Users with missing or
|
||||
broken invites should clear their client caches after updating to make them appear. ([#1249](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1249))
|
||||
- LDAP-enabled servers will no longer have all admins demoted when LDAP-controlled admins are not configured.
|
||||
Contributed by @Jade ([#1307](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1307))
|
||||
- Fixed sliding sync not resolving wildcard state key requests, enabling Video/Audio calls in Element X. ([#1370](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1370))
|
||||
|
||||
## Misc
|
||||
|
||||
- #1344
|
||||
|
||||
# Continuwuity v0.5.4 (2026-02-08)
|
||||
|
||||
## Features
|
||||
|
||||
- The announcement checker will now announce errors it encounters in the first run to the admin room, plus a few other
|
||||
misc improvements. Contributed by @Jade ([#1288](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1288))
|
||||
- Drastically improved the performance and reliability of account deactivations. Contributed by @nex ([#1314](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1314))
|
||||
- Drastically improved the performance and reliability of account deactivations. Contributed by
|
||||
@nex ([#1314](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1314))
|
||||
- Refuse to process requests for and events in rooms that we no longer have any local users in (reduces state resets
|
||||
and improves performance). Contributed by @nex ([#1316](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1316))
|
||||
and improves performance). Contributed by
|
||||
@nex ([#1316](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1316))
|
||||
- Added server-specific admin API routes to ban and unban rooms, for use with moderation bots. Contributed by @nex
|
||||
([#1301](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1301))
|
||||
|
||||
## Bugfixes
|
||||
|
||||
- Fix the generated configuration containing uncommented optional sections. Contributed by @Jade ([#1290](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1290))
|
||||
- Fixed specification non-compliance when handling remote media errors. Contributed by @nex ([#1298](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1298))
|
||||
- Fix the generated configuration containing uncommented optional sections. Contributed by
|
||||
@Jade ([#1290](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1290))
|
||||
- Fixed specification non-compliance when handling remote media errors. Contributed by
|
||||
@nex ([#1298](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1298))
|
||||
- UIAA requests which check for out-of-band success (sent by matrix-js-sdk) will no longer create unhelpful errors in
|
||||
the logs. Contributed by @ginger ([#1305](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1305))
|
||||
- Use exists instead of contains to save writing to a buffer in `src/service/users/mod.rs`: `is_login_disabled`.
|
||||
Contributed
|
||||
by @aprilgrimoire. ([#1340](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1340))
|
||||
- Fixed backtraces being swallowed during panics. Contributed by @jade ([#1337](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1337))
|
||||
- Fixed backtraces being swallowed during panics. Contributed by
|
||||
@jade ([#1337](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1337))
|
||||
- Fixed a potential vulnerability that could allow an evil remote server to return malicious events during the room join
|
||||
and knock process. Contributed by @nex, reported by violet & [mat](https://matdoes.dev).
|
||||
- Fixed a race condition that could result in outlier PDUs being incorrectly marked as visible to a remote server.
|
||||
|
|
@ -28,25 +97,30 @@
|
|||
|
||||
## Docs
|
||||
|
||||
- Fixed Fedora install instructions. Contributed by @julian45 ([#1342](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1342))
|
||||
- Fixed Fedora install instructions. Contributed by
|
||||
@julian45 ([#1342](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1342))
|
||||
|
||||
# Continuwuity 0.5.3 (2026-01-12)
|
||||
|
||||
## Features
|
||||
|
||||
- Improve the display of nested configuration with the `!admin server show-config` command. Contributed by @Jade ([#1279](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1279))
|
||||
- Improve the display of nested configuration with the `!admin server show-config` command. Contributed by
|
||||
@Jade ([#1279](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1279))
|
||||
|
||||
## Bugfixes
|
||||
|
||||
- Fixed `M_BAD_JSON` error when sending invites to other servers or when providing joins. Contributed by @nex ([#1286](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1286))
|
||||
- Fixed `M_BAD_JSON` error when sending invites to other servers or when providing joins. Contributed by
|
||||
@nex ([#1286](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1286))
|
||||
|
||||
## Docs
|
||||
|
||||
- Improve admin command documentation generation. Contributed by @ginger ([#1280](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1280))
|
||||
- Improve admin command documentation generation. Contributed by
|
||||
@ginger ([#1280](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1280))
|
||||
|
||||
## Misc
|
||||
|
||||
- Improve timeout-related code for federation and URL previews. Contributed by @Jade ([#1278](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1278))
|
||||
- Improve timeout-related code for federation and URL previews. Contributed by
|
||||
@Jade ([#1278](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1278))
|
||||
|
||||
# Continuwuity 0.5.2 (2026-01-09)
|
||||
|
||||
|
|
@ -57,11 +131,14 @@
|
|||
after a certain amount of time has passed. Additionally, the `registration_token_file` configuration option is
|
||||
superseded by this feature and **has been removed**. Use the new `!admin token` command family to manage registration
|
||||
tokens. Contributed by @ginger (#783).
|
||||
- Implemented a configuration defined admin list independent of the admin room. Contributed by @Terryiscool160. ([#1253](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1253))
|
||||
- Implemented a configuration defined admin list independent of the admin room. Contributed by
|
||||
@Terryiscool160. ([#1253](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1253))
|
||||
- Added support for invite and join anti-spam via Draupnir and Meowlnir, similar to that of synapse-http-antispam.
|
||||
Contributed by @nex. ([#1263](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1263))
|
||||
- Implemented account locking functionality, to complement user suspension. Contributed by @nex. ([#1266](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1266))
|
||||
- Added admin command to forcefully log out all of a user's existing sessions. Contributed by @nex. ([#1271](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1271))
|
||||
- Implemented account locking functionality, to complement user suspension. Contributed by
|
||||
@nex. ([#1266](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1266))
|
||||
- Added admin command to forcefully log out all of a user's existing sessions. Contributed by
|
||||
@nex. ([#1271](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1271))
|
||||
- Implemented toggling the ability for an account to log in without mutating any of its data. Contributed by @nex. (
|
||||
[#1272](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1272))
|
||||
- Add support for custom room create event timestamps, to allow generating custom prefixes in hashed room IDs.
|
||||
|
|
@ -71,7 +148,8 @@
|
|||
|
||||
## Bugfixes
|
||||
|
||||
- Fixed unreliable room summary fetching and improved error messages. Contributed by @nex. ([#1257](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1257))
|
||||
- Fixed unreliable room summary fetching and improved error messages. Contributed by
|
||||
@nex. ([#1257](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1257))
|
||||
- Client requested timeout parameter is now applied to e2ee key lookups and claims. Related federation requests are now
|
||||
also concurrent. Contributed by @nex. ([#1261](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1261))
|
||||
- Fixed the whoami endpoint returning HTTP 404 instead of HTTP 403, which confused some appservices. Contributed by
|
||||
|
|
@ -90,9 +168,12 @@
|
|||
|
||||
## Features
|
||||
|
||||
- Enabled the OTLP exporter in default builds, and allow configuring the exporter protocol. (@Jade). ([#1251](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1251))
|
||||
- Enabled the OTLP exporter in default builds, and allow configuring the exporter protocol. (
|
||||
@Jade). ([#1251](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1251))
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
- Don't allow admin room upgrades, as this can break the admin room (@timedout) ([#1245](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1245))
|
||||
- Fix invalid creators in power levels during upgrade to v12 (@timedout) ([#1245](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1245))
|
||||
- Don't allow admin room upgrades, as this can break the admin room (
|
||||
@timedout) ([#1245](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1245))
|
||||
- Fix invalid creators in power levels during upgrade to v12 (
|
||||
@timedout) ([#1245](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1245))
|
||||
|
|
|
|||
|
|
@ -22,22 +22,18 @@ Continuwuity uses pre-commit hooks to enforce various coding standards and catch
|
|||
- Validating YAML, JSON, and TOML files
|
||||
- Checking for merge conflicts
|
||||
|
||||
You can run these checks locally by installing [prefligit](https://github.com/j178/prefligit):
|
||||
You can run these checks locally by installing [prek](https://github.com/j178/prek):
|
||||
|
||||
|
||||
```bash
|
||||
# Requires UV: https://docs.astral.sh/uv/getting-started/installation/
|
||||
# Mac/linux: curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
# Windows: powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex"
|
||||
|
||||
# Install prefligit using cargo-binstall
|
||||
cargo binstall prefligit
|
||||
# Install prek using cargo-binstall
|
||||
cargo binstall prek
|
||||
|
||||
# Install git hooks to run checks automatically
|
||||
prefligit install
|
||||
prek install
|
||||
|
||||
# Run all checks
|
||||
prefligit --all-files
|
||||
prek --all-files
|
||||
```
|
||||
|
||||
Alternatively, you can use [pre-commit](https://pre-commit.com/):
|
||||
|
|
@ -54,7 +50,7 @@ pre-commit install
|
|||
pre-commit run --all-files
|
||||
```
|
||||
|
||||
These same checks are run in CI via the prefligit-checks workflow to ensure consistency. These must pass before the PR is merged.
|
||||
These same checks are run in CI via the prek-checks workflow to ensure consistency. These must pass before the PR is merged.
|
||||
|
||||
### Running tests locally
|
||||
|
||||
|
|
@ -85,24 +81,31 @@ If your changes are done to fix Matrix tests, please note that in your pull requ
|
|||
|
||||
### Writing documentation
|
||||
|
||||
Continuwuity's website uses [`mdbook`][mdbook] and is deployed via CI using Cloudflare Pages
|
||||
Continuwuity's website uses [`rspress`][rspress] and is deployed via CI using Cloudflare Pages
|
||||
in the [`documentation.yml`][documentation.yml] workflow file. All documentation is in the `docs/`
|
||||
directory at the top level.
|
||||
|
||||
To build the documentation locally:
|
||||
To load the documentation locally:
|
||||
|
||||
1. Install NodeJS and npm from their [official website][nodejs-download] or via your package manager of choice
|
||||
|
||||
2. From the project's root directory, install the relevant npm modules
|
||||
|
||||
1. Install mdbook if you don't have it already:
|
||||
```bash
|
||||
cargo install mdbook # or cargo binstall, or another method
|
||||
npm ci
|
||||
```
|
||||
|
||||
2. Build the documentation:
|
||||
3. Make changes to the document pages as you see fit
|
||||
|
||||
4. Generate a live preview of the documentation
|
||||
|
||||
```bash
|
||||
mdbook build
|
||||
npm run docs:dev
|
||||
```
|
||||
|
||||
The output of the mdbook generation is in `public/`. You can open the HTML files directly in your browser without needing a web server.
|
||||
A webserver for the docs will be spun up for you (e.g. at `http://localhost:3000`). Any changes you make to the documentation will be live-reloaded on the webpage.
|
||||
|
||||
Alternatively, you can build the documentation using `npm run docs:build` - the output of this will be in the `/doc_build` directory. Once you're happy with your documentation updates, you can commit the changes.
|
||||
|
||||
### Commit Messages
|
||||
|
||||
|
|
@ -169,5 +172,6 @@ continuwuity Matrix rooms for Code of Conduct violations.
|
|||
[continuwuity-matrix]: https://matrix.to/#/#continuwuity:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org
|
||||
[complement]: https://github.com/matrix-org/complement/
|
||||
[sytest]: https://github.com/matrix-org/sytest/
|
||||
[mdbook]: https://rust-lang.github.io/mdBook/
|
||||
[nodejs-download]: https://nodejs.org/en/download
|
||||
[rspress]: https://rspress.rs/
|
||||
[documentation.yml]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/.forgejo/workflows/documentation.yml
|
||||
|
|
|
|||
1425
Cargo.lock
generated
1425
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
46
Cargo.toml
46
Cargo.toml
|
|
@ -12,7 +12,7 @@ license = "Apache-2.0"
|
|||
# See also `rust-toolchain.toml`
|
||||
readme = "README.md"
|
||||
repository = "https://forgejo.ellis.link/continuwuation/continuwuity"
|
||||
version = "0.5.4"
|
||||
version = "0.5.7-alpha.1"
|
||||
|
||||
[workspace.metadata.crane]
|
||||
name = "conduwuit"
|
||||
|
|
@ -68,7 +68,7 @@ default-features = false
|
|||
version = "0.1.3"
|
||||
|
||||
[workspace.dependencies.rand]
|
||||
version = "0.8.5"
|
||||
version = "0.10.0"
|
||||
|
||||
# Used for the http request / response body type for Ruma endpoints used with reqwest
|
||||
[workspace.dependencies.bytes]
|
||||
|
|
@ -84,7 +84,7 @@ version = "1.3.1"
|
|||
version = "1.11.1"
|
||||
|
||||
[workspace.dependencies.axum]
|
||||
version = "0.7.9"
|
||||
version = "0.8.8"
|
||||
default-features = false
|
||||
features = [
|
||||
"form",
|
||||
|
|
@ -97,9 +97,9 @@ features = [
|
|||
]
|
||||
|
||||
[workspace.dependencies.axum-extra]
|
||||
version = "0.9.6"
|
||||
version = "0.12.0"
|
||||
default-features = false
|
||||
features = ["typed-header", "tracing"]
|
||||
features = ["typed-header", "tracing", "cookie"]
|
||||
|
||||
[workspace.dependencies.axum-server]
|
||||
version = "0.7.2"
|
||||
|
|
@ -110,7 +110,7 @@ default-features = false
|
|||
version = "0.7"
|
||||
|
||||
[workspace.dependencies.axum-client-ip]
|
||||
version = "0.6.1"
|
||||
version = "0.7"
|
||||
|
||||
[workspace.dependencies.tower]
|
||||
version = "0.5.2"
|
||||
|
|
@ -118,7 +118,7 @@ default-features = false
|
|||
features = ["util"]
|
||||
|
||||
[workspace.dependencies.tower-http]
|
||||
version = "0.6.2"
|
||||
version = "0.6.8"
|
||||
default-features = false
|
||||
features = [
|
||||
"add-extension",
|
||||
|
|
@ -144,6 +144,7 @@ features = [
|
|||
"socks",
|
||||
"hickory-dns",
|
||||
"http2",
|
||||
"stream",
|
||||
]
|
||||
|
||||
[workspace.dependencies.serde]
|
||||
|
|
@ -158,7 +159,7 @@ features = ["raw_value"]
|
|||
|
||||
# Used for appservice registration files
|
||||
[workspace.dependencies.serde-saphyr]
|
||||
version = "0.0.17"
|
||||
version = "0.0.21"
|
||||
|
||||
# Used to load forbidden room/user regex from config
|
||||
[workspace.dependencies.serde_regex]
|
||||
|
|
@ -253,7 +254,7 @@ features = [
|
|||
version = "0.4.0"
|
||||
|
||||
[workspace.dependencies.libloading]
|
||||
version = "0.8.6"
|
||||
version = "0.9.0"
|
||||
|
||||
# Validating urls in config, was already a transitive dependency
|
||||
[workspace.dependencies.url]
|
||||
|
|
@ -298,7 +299,7 @@ default-features = false
|
|||
features = ["env", "toml"]
|
||||
|
||||
[workspace.dependencies.hickory-resolver]
|
||||
version = "0.25.1"
|
||||
version = "0.25.2"
|
||||
default-features = false
|
||||
features = [
|
||||
"serde",
|
||||
|
|
@ -342,7 +343,8 @@ version = "0.1.2"
|
|||
# Used for matrix spec type definitions and helpers
|
||||
[workspace.dependencies.ruma]
|
||||
git = "https://forgejo.ellis.link/continuwuation/ruwuma"
|
||||
rev = "458d52bdc7f9a07c497be94a1420ebd3d87d7b2b"
|
||||
#branch = "conduwuit-changes"
|
||||
rev = "bb12ed288a31a23aa11b10ba0fad22b7f985eb88"
|
||||
features = [
|
||||
"compat",
|
||||
"rand",
|
||||
|
|
@ -362,6 +364,7 @@ features = [
|
|||
"unstable-msc2870",
|
||||
"unstable-msc3026",
|
||||
"unstable-msc3061",
|
||||
"unstable-msc3814",
|
||||
"unstable-msc3245",
|
||||
"unstable-msc3266",
|
||||
"unstable-msc3381", # polls
|
||||
|
|
@ -378,7 +381,9 @@ features = [
|
|||
"unstable-msc4210", # remove legacy mentions
|
||||
"unstable-extensible-events",
|
||||
"unstable-pdu",
|
||||
"unstable-msc4155"
|
||||
"unstable-msc4155",
|
||||
"unstable-msc4143", # livekit well_known response
|
||||
"unstable-msc4284"
|
||||
]
|
||||
|
||||
[workspace.dependencies.rust-rocksdb]
|
||||
|
|
@ -423,7 +428,7 @@ features = ["http", "grpc-tonic", "trace", "logs", "metrics"]
|
|||
|
||||
# optional sentry metrics for crash/panic reporting
|
||||
[workspace.dependencies.sentry]
|
||||
version = "0.45.0"
|
||||
version = "0.46.0"
|
||||
default-features = false
|
||||
features = [
|
||||
"backtrace",
|
||||
|
|
@ -439,9 +444,9 @@ features = [
|
|||
]
|
||||
|
||||
[workspace.dependencies.sentry-tracing]
|
||||
version = "0.45.0"
|
||||
version = "0.46.0"
|
||||
[workspace.dependencies.sentry-tower]
|
||||
version = "0.45.0"
|
||||
version = "0.46.0"
|
||||
|
||||
# jemalloc usage
|
||||
[workspace.dependencies.tikv-jemalloc-sys]
|
||||
|
|
@ -470,7 +475,7 @@ features = ["use_std"]
|
|||
version = "0.5"
|
||||
|
||||
[workspace.dependencies.nix]
|
||||
version = "0.30.1"
|
||||
version = "0.31.0"
|
||||
default-features = false
|
||||
features = ["resource"]
|
||||
|
||||
|
|
@ -548,6 +553,12 @@ features = ["sync", "tls-rustls", "rustls-provider"]
|
|||
[workspace.dependencies.resolv-conf]
|
||||
version = "0.7.5"
|
||||
|
||||
[workspace.dependencies.yansi]
|
||||
version = "1.0.1"
|
||||
|
||||
[workspace.dependencies.askama]
|
||||
version = "0.15.0"
|
||||
|
||||
#
|
||||
# Patches
|
||||
#
|
||||
|
|
@ -958,3 +969,6 @@ needless_raw_string_hashes = "allow"
|
|||
|
||||
# TODO: Enable this lint & fix all instances
|
||||
collapsible_if = "allow"
|
||||
|
||||
# TODO: break these apart
|
||||
cognitive_complexity = "allow"
|
||||
|
|
|
|||
11
README.md
11
README.md
|
|
@ -57,10 +57,15 @@ Continuwuity aims to:
|
|||
|
||||
### Can I try it out?
|
||||
|
||||
Check out the [documentation](https://continuwuity.org) for installation instructions, or join one of these vetted public homeservers running Continuwuity to get a feel for things!
|
||||
Check out the [documentation](https://continuwuity.org) for installation instructions.
|
||||
|
||||
- https://continuwuity.rocks -- A public demo server operated by the Continuwuity Team.
|
||||
- https://federated.nexus -- Federated Nexus is a community resource hosting multiple FOSS (especially federated) services, including Matrix and Forgejo.
|
||||
If you want to try it out as a user, we have some partnered homeservers you can use:
|
||||
* You can head over to [https://federated.nexus](https://federated.nexus/) in your browser.
|
||||
* Hit the `Apply to Join` button. Once your request has been accepted, you will receive an email with your username and password.
|
||||
* Head over to [https://app.federated.nexus](https://app.federated.nexus/) and you can sign in there, or use any other matrix chat client you wish elsewhere.
|
||||
* Your username for matrix will be in the form of `@username:federated.nexus`, however you can simply use the `username` part to log in. Your password is your password.
|
||||
|
||||
* There's also [https://continuwuity.rocks/](https://continuwuity.rocks/). You can register a new account using Cinny via [this convenient link](https://app.cinny.in/register/continuwuity.rocks), or you can use Element or another matrix client *that supports registration*.
|
||||
|
||||
### What are we working on?
|
||||
|
||||
|
|
|
|||
|
|
@ -6,10 +6,10 @@ set -euo pipefail
|
|||
COMPLEMENT_SRC="${COMPLEMENT_SRC:-$1}"
|
||||
|
||||
# A `.jsonl` file to write test logs to
|
||||
LOG_FILE="${2:-complement_test_logs.jsonl}"
|
||||
LOG_FILE="${2:-tests/test_results/complement/test_logs.jsonl}"
|
||||
|
||||
# A `.jsonl` file to write test results to
|
||||
RESULTS_FILE="${3:-complement_test_results.jsonl}"
|
||||
RESULTS_FILE="${3:-tests/test_results/complement/test_results.jsonl}"
|
||||
|
||||
# The base docker image to use for complement tests
|
||||
# You can build the default with `docker build -t continuwuity:complement -f ./docker/complement.Dockerfile .`
|
||||
|
|
|
|||
1
changelog.d/+6368729a.feature.md
Normal file
1
changelog.d/+6368729a.feature.md
Normal file
|
|
@ -0,0 +1 @@
|
|||
Added support for using an admin command to issue self-service password reset links.
|
||||
1
changelog.d/+6e57599d.bugfix.md
Normal file
1
changelog.d/+6e57599d.bugfix.md
Normal file
|
|
@ -0,0 +1 @@
|
|||
Stopped left rooms from being unconditionally sent on initial sync, hopefully fixing spurious appearances of left rooms in some clients (and making sync faster as a bonus). Contributed by @ginger
|
||||
1
changelog.d/+space-permission-cascading.feature.md
Normal file
1
changelog.d/+space-permission-cascading.feature.md
Normal file
|
|
@ -0,0 +1 @@
|
|||
Add Space permission cascading: power levels cascade from Spaces to child rooms, role-based room access with custom roles, continuous enforcement (auto-join/kick), and admin commands for role management. Server-wide default controlled by `space_permission_cascading` config flag (off by default), with per-Space overrides via `!admin space roles enable/disable <space>`.
|
||||
1
changelog.d/1265.bugfix
Normal file
1
changelog.d/1265.bugfix
Normal file
|
|
@ -0,0 +1 @@
|
|||
Fixed corrupted appservice registrations causing the server to enter a crash loop. Contributed by @nex.
|
||||
1
changelog.d/1371.feature.md
Normal file
1
changelog.d/1371.feature.md
Normal file
|
|
@ -0,0 +1 @@
|
|||
Re-added support for reading registration tokens from a file. Contributed by @ginger and @benbot.
|
||||
1
changelog.d/1448.bugfix
Normal file
1
changelog.d/1448.bugfix
Normal file
|
|
@ -0,0 +1 @@
|
|||
Prevent removing the admin room alias (`#admins`) to avoid accidentally breaking admin room functionality. Contributed by @0xnim
|
||||
1
changelog.d/1527.feature.md
Normal file
1
changelog.d/1527.feature.md
Normal file
|
|
@ -0,0 +1 @@
|
|||
Add new config option to allow or disallow search engine indexing through a `<meta ../>` tag. Defaults to blocking indexing (`content="noindex"`). Contributed by @s1lv3r and @ginger.
|
||||
18
clippy.toml
18
clippy.toml
|
|
@ -15,6 +15,18 @@ disallowed-macros = [
|
|||
{ path = "log::trace", reason = "use conduwuit_core::trace" },
|
||||
]
|
||||
|
||||
disallowed-methods = [
|
||||
{ path = "tokio::spawn", reason = "use and pass conduuwit_core::server::Server::runtime() to spawn from" },
|
||||
]
|
||||
[[disallowed-methods]]
|
||||
path = "tokio::spawn"
|
||||
reason = "use and pass conduwuit_core::server::Server::runtime() to spawn from"
|
||||
|
||||
[[disallowed-methods]]
|
||||
path = "reqwest::Response::bytes"
|
||||
reason = "bytes is unsafe, use limit_read via the conduwuit_core::utils::LimitReadExt trait instead"
|
||||
|
||||
[[disallowed-methods]]
|
||||
path = "reqwest::Response::text"
|
||||
reason = "text is unsafe, use limit_read_text via the conduwuit_core::utils::LimitReadExt trait instead"
|
||||
|
||||
[[disallowed-methods]]
|
||||
path = "reqwest::Response::json"
|
||||
reason = "json is unsafe, use limit_read_text via the conduwuit_core::utils::LimitReadExt trait instead"
|
||||
|
|
|
|||
|
|
@ -9,10 +9,9 @@ address = "0.0.0.0"
|
|||
allow_device_name_federation = true
|
||||
allow_guest_registration = true
|
||||
allow_public_room_directory_over_federation = true
|
||||
allow_public_room_directory_without_auth = true
|
||||
allow_registration = true
|
||||
database_path = "/database"
|
||||
log = "trace,h2=debug,hyper=debug"
|
||||
log = "trace,h2=debug,hyper=debug,conduwuit_database=warn,conduwuit_service::manager=info,conduwuit_api::router=error,conduwuit_router=error,tower_http=error"
|
||||
port = [8008, 8448]
|
||||
trusted_servers = []
|
||||
only_query_trusted_key_servers = false
|
||||
|
|
@ -25,7 +24,7 @@ url_preview_domain_explicit_denylist = ["*"]
|
|||
media_compat_file_link = false
|
||||
media_startup_check = true
|
||||
prune_missing_media = true
|
||||
log_colors = true
|
||||
log_colors = false
|
||||
admin_room_notices = false
|
||||
allow_check_for_updates = false
|
||||
intentionally_unknown_config_option_for_testing = true
|
||||
|
|
@ -48,6 +47,7 @@ federation_idle_timeout = 300
|
|||
sender_timeout = 300
|
||||
sender_idle_timeout = 300
|
||||
sender_retry_backoff_limit = 300
|
||||
force_disable_first_run_mode = true
|
||||
|
||||
[global.tls]
|
||||
dual_protocol = true
|
||||
|
|
|
|||
|
|
@ -25,6 +25,10 @@
|
|||
#
|
||||
# Also see the `[global.well_known]` config section at the very bottom.
|
||||
#
|
||||
# If `client` is not set under `[global.well_known]`, the server name will
|
||||
# be used as the base domain for user-facing links (such as password
|
||||
# reset links) created by Continuwuity.
|
||||
#
|
||||
# Examples of delegation:
|
||||
# - https://continuwuity.org/.well-known/matrix/server
|
||||
# - https://continuwuity.org/.well-known/matrix/client
|
||||
|
|
@ -290,6 +294,25 @@
|
|||
#
|
||||
#max_fetch_prev_events = 192
|
||||
|
||||
# How many incoming federation transactions the server is willing to be
|
||||
# processing at any given time before it becomes overloaded and starts
|
||||
# rejecting further transactions until some slots become available.
|
||||
#
|
||||
# Setting this value too low or too high may result in unstable
|
||||
# federation, and setting it too high may cause runaway resource usage.
|
||||
#
|
||||
#max_concurrent_inbound_transactions = 150
|
||||
|
||||
# Maximum age (in seconds) for cached federation transaction responses.
|
||||
# Entries older than this will be removed during cleanup.
|
||||
#
|
||||
#transaction_id_cache_max_age_secs = 7200 (2 hours)
|
||||
|
||||
# Maximum number of cached federation transaction responses.
|
||||
# When the cache exceeds this limit, older entries will be removed.
|
||||
#
|
||||
#transaction_id_cache_max_entries = 8192
|
||||
|
||||
# Default/base connection timeout (seconds). This is used only by URL
|
||||
# previews and update/news endpoint checks.
|
||||
#
|
||||
|
|
@ -433,7 +456,7 @@
|
|||
# If you would like registration only via token reg, please configure
|
||||
# `registration_token`.
|
||||
#
|
||||
#allow_registration = false
|
||||
#allow_registration = true
|
||||
|
||||
# If registration is enabled, and this setting is true, new users
|
||||
# registered after the first admin user will be automatically suspended
|
||||
|
|
@ -451,24 +474,43 @@
|
|||
#
|
||||
#suspend_on_register = false
|
||||
|
||||
# Server-wide default for space permission cascading (power levels and
|
||||
# role-based access). Individual Spaces can override this via the
|
||||
# `com.continuwuity.space.cascading` state event or the admin command
|
||||
# `!admin space roles enable/disable <space>`.
|
||||
#
|
||||
#space_permission_cascading = false
|
||||
|
||||
# Maximum number of spaces to cache role data for. When exceeded the
|
||||
# cache is cleared and repopulated on demand.
|
||||
#
|
||||
#space_roles_cache_flush_threshold = 1000
|
||||
|
||||
# Enabling this setting opens registration to anyone without restrictions.
|
||||
# This makes your server vulnerable to abuse
|
||||
#
|
||||
#yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse = false
|
||||
|
||||
# A static registration token that new users will have to provide when
|
||||
# creating an account. If unset and `allow_registration` is true,
|
||||
# you must set
|
||||
# `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse`
|
||||
# to true to allow open registration without any conditions.
|
||||
#
|
||||
# If you do not want to set a static token, the `!admin token` commands
|
||||
# may also be used to manage registration tokens.
|
||||
# creating an account. This token does not supersede tokens from other
|
||||
# sources, such as the `!admin token` command or the
|
||||
# `registration_token_file` configuration option.
|
||||
#
|
||||
# example: "o&^uCtes4HPf0Vu@F20jQeeWE7"
|
||||
#
|
||||
#registration_token =
|
||||
|
||||
# A path to a file containing static registration tokens, one per line.
|
||||
# Tokens in this file do not supersede tokens from other sources, such as
|
||||
# the `!admin token` command or the `registration_token` configuration
|
||||
# option.
|
||||
#
|
||||
# The file will be read once, when Continuwuity starts. It is not
|
||||
# currently reread when the server configuration is reloaded. If the file
|
||||
# cannot be read, Continuwuity will fail to start.
|
||||
#
|
||||
#registration_token_file =
|
||||
|
||||
# The public site key for reCaptcha. If this is provided, reCaptcha
|
||||
# becomes required during registration. If both captcha *and*
|
||||
# registration token are enabled, both will be required during
|
||||
|
|
@ -527,12 +569,6 @@
|
|||
#
|
||||
#allow_public_room_directory_over_federation = false
|
||||
|
||||
# Set this to true to allow your server's public room directory to be
|
||||
# queried without client authentication (access token) through the Client
|
||||
# APIs. Set this to false to protect against /publicRooms spiders.
|
||||
#
|
||||
#allow_public_room_directory_without_auth = false
|
||||
|
||||
# Allow guests/unauthenticated users to access TURN credentials.
|
||||
#
|
||||
# This is the equivalent of Synapse's `turn_allow_guests` config option.
|
||||
|
|
@ -1056,14 +1092,6 @@
|
|||
#
|
||||
#rocksdb_repair = false
|
||||
|
||||
# This item is undocumented. Please contribute documentation for it.
|
||||
#
|
||||
#rocksdb_read_only = false
|
||||
|
||||
# This item is undocumented. Please contribute documentation for it.
|
||||
#
|
||||
#rocksdb_secondary = false
|
||||
|
||||
# Enables idle CPU priority for compaction thread. This is not enabled by
|
||||
# default to prevent compaction from falling too far behind on busy
|
||||
# systems.
|
||||
|
|
@ -1120,27 +1148,34 @@
|
|||
|
||||
# Allow local (your server only) presence updates/requests.
|
||||
#
|
||||
# Note that presence on continuwuity is very fast unlike Synapse's. If
|
||||
# using outgoing presence, this MUST be enabled.
|
||||
# Local presence must be enabled for outgoing presence to function.
|
||||
#
|
||||
# Note that local presence is not as heavy on the CPU as federated
|
||||
# presence, but will still become more expensive the more local users you
|
||||
# have.
|
||||
#
|
||||
#allow_local_presence = true
|
||||
|
||||
# Allow incoming federated presence updates/requests.
|
||||
# Allow incoming federated presence updates.
|
||||
#
|
||||
# This option receives presence updates from other servers, but does not
|
||||
# send any unless `allow_outgoing_presence` is true. Note that presence on
|
||||
# continuwuity is very fast unlike Synapse's.
|
||||
# This option enables processing inbound presence updates from other
|
||||
# servers. Without it, remote users will appear as if they are always
|
||||
# offline to your local users. This does not affect typing indicators or
|
||||
# read receipts.
|
||||
#
|
||||
#allow_incoming_presence = true
|
||||
|
||||
# Allow outgoing presence updates/requests.
|
||||
#
|
||||
# This option sends presence updates to other servers, but does not
|
||||
# receive any unless `allow_incoming_presence` is true. Note that presence
|
||||
# on continuwuity is very fast unlike Synapse's. If using outgoing
|
||||
# presence, you MUST enable `allow_local_presence` as well.
|
||||
# This option sends presence updates to other servers, and requires that
|
||||
# `allow_local_presence` is also enabled.
|
||||
#
|
||||
#allow_outgoing_presence = true
|
||||
# Note that outgoing presence is very heavy on the CPU and network, and
|
||||
# will typically cause extreme strain and slowdowns for no real benefit.
|
||||
# There are only a few clients that even implement presence, so you
|
||||
# probably don't want to enable this.
|
||||
#
|
||||
#allow_outgoing_presence = false
|
||||
|
||||
# How many seconds without presence updates before you become idle.
|
||||
# Defaults to 5 minutes.
|
||||
|
|
@ -1174,6 +1209,10 @@
|
|||
|
||||
# Allow sending read receipts to remote servers.
|
||||
#
|
||||
# Note that sending read receipts to remote servers in large rooms with
|
||||
# lots of other homeservers may cause additional strain on the CPU and
|
||||
# network.
|
||||
#
|
||||
#allow_outgoing_read_receipts = true
|
||||
|
||||
# Allow local typing updates.
|
||||
|
|
@ -1185,6 +1224,10 @@
|
|||
|
||||
# Allow outgoing typing updates to federation.
|
||||
#
|
||||
# Note that sending typing indicators to remote servers in large rooms
|
||||
# with lots of other homeservers may cause additional strain on the CPU
|
||||
# and network.
|
||||
#
|
||||
#allow_outgoing_typing = true
|
||||
|
||||
# Allow incoming typing updates from federation.
|
||||
|
|
@ -1318,7 +1361,7 @@
|
|||
# sender user's server name, inbound federation X-Matrix origin, and
|
||||
# outbound federation handler.
|
||||
#
|
||||
# You can set this to ["*"] to block all servers by default, and then
|
||||
# You can set this to [".*"] to block all servers by default, and then
|
||||
# use `allowed_remote_server_names` to allow only specific servers.
|
||||
#
|
||||
# example: ["badserver\\.tld$", "badphrase", "19dollarfortnitecards"]
|
||||
|
|
@ -1474,6 +1517,15 @@
|
|||
#
|
||||
#url_preview_check_root_domain = false
|
||||
|
||||
# User agent that is used specifically when fetching url previews.
|
||||
#
|
||||
#url_preview_user_agent = "continuwuity/<version> (bot; +https://continuwuity.org)"
|
||||
|
||||
# Determines whether audio and video files will be downloaded for URL
|
||||
# previews.
|
||||
#
|
||||
#url_preview_allow_audio_video = false
|
||||
|
||||
# List of forbidden room aliases and room IDs as strings of regex
|
||||
# patterns.
|
||||
#
|
||||
|
|
@ -1759,6 +1811,11 @@
|
|||
#
|
||||
#config_reload_signal = true
|
||||
|
||||
# Allow search engines and crawlers to index Continuwuity's built-in
|
||||
# webpages served under the `/_continuwuity/` prefix.
|
||||
#
|
||||
#allow_web_indexing = false
|
||||
|
||||
[global.tls]
|
||||
|
||||
# Path to a valid TLS certificate file.
|
||||
|
|
@ -1820,6 +1877,16 @@
|
|||
#
|
||||
#support_mxid =
|
||||
|
||||
# **DEPRECATED**: Use `[global.matrix_rtc].foci` instead.
|
||||
#
|
||||
# A list of MatrixRTC foci URLs which will be served as part of the
|
||||
# MSC4143 client endpoint at /.well-known/matrix/client.
|
||||
#
|
||||
# This option is deprecated and will be removed in a future release.
|
||||
# Please migrate to the new `[global.matrix_rtc]` config section.
|
||||
#
|
||||
#rtc_focus_server_urls = []
|
||||
|
||||
[global.blurhashing]
|
||||
|
||||
# blurhashing x component, 4 is recommended by https://blurha.sh/
|
||||
|
|
@ -1838,6 +1905,23 @@
|
|||
#
|
||||
#blurhash_max_raw_size = 33554432
|
||||
|
||||
[global.matrix_rtc]
|
||||
|
||||
# A list of MatrixRTC foci (transports) which will be served via the
|
||||
# MSC4143 RTC transports endpoint at
|
||||
# `/_matrix/client/v1/rtc/transports`. If you're setting up livekit,
|
||||
# you'd want something like:
|
||||
# ```toml
|
||||
# [global.matrix_rtc]
|
||||
# foci = [
|
||||
# { type = "livekit", livekit_service_url = "https://livekit.example.com" },
|
||||
# ]
|
||||
# ```
|
||||
#
|
||||
# To disable, set this to an empty list (`[]`).
|
||||
#
|
||||
#foci = []
|
||||
|
||||
[global.ldap]
|
||||
|
||||
# Whether to enable LDAP login.
|
||||
|
|
|
|||
|
|
@ -48,11 +48,11 @@ EOF
|
|||
|
||||
# Developer tool versions
|
||||
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
|
||||
ENV BINSTALL_VERSION=1.17.4
|
||||
ENV BINSTALL_VERSION=1.17.7
|
||||
# renovate: datasource=github-releases depName=psastras/sbom-rs
|
||||
ENV CARGO_SBOM_VERSION=0.9.1
|
||||
# renovate: datasource=crate depName=lddtree
|
||||
ENV LDDTREE_VERSION=0.4.0
|
||||
ENV LDDTREE_VERSION=0.5.0
|
||||
# renovate: datasource=crate depName=timelord-cli
|
||||
ENV TIMELORD_VERSION=3.0.1
|
||||
|
||||
|
|
@ -162,6 +162,7 @@ ENV CONDUWUIT_VERSION_EXTRA=$CONDUWUIT_VERSION_EXTRA
|
|||
ENV CONTINUWUITY_VERSION_EXTRA=$CONTINUWUITY_VERSION_EXTRA
|
||||
|
||||
ARG RUST_PROFILE=release
|
||||
ARG CARGO_FEATURES="default,http3"
|
||||
|
||||
# Build the binary
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||
|
|
@ -171,18 +172,32 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
|||
set -o allexport
|
||||
set -o xtrace
|
||||
. /etc/environment
|
||||
|
||||
# Check if http3 feature is enabled and set appropriate RUSTFLAGS
|
||||
if echo "${CARGO_FEATURES}" | grep -q "http3"; then
|
||||
export RUSTFLAGS="${RUSTFLAGS} --cfg reqwest_unstable"
|
||||
else
|
||||
export RUSTFLAGS="${RUSTFLAGS}"
|
||||
fi
|
||||
|
||||
RUST_PROFILE_DIR="${RUST_PROFILE}"
|
||||
if [[ "${RUST_PROFILE}" == "dev" ]]; then
|
||||
RUST_PROFILE_DIR="debug"
|
||||
fi
|
||||
|
||||
TARGET_DIR=($(cargo metadata --no-deps --format-version 1 | \
|
||||
jq -r ".target_directory"))
|
||||
mkdir /out/sbin
|
||||
PACKAGE=conduwuit
|
||||
xx-cargo build --locked --profile ${RUST_PROFILE} \
|
||||
--no-default-features --features ${CARGO_FEATURES} \
|
||||
-p $PACKAGE;
|
||||
BINARIES=($(cargo metadata --no-deps --format-version 1 | \
|
||||
jq -r ".packages[] | select(.name == \"$PACKAGE\") | .targets[] | select( .kind | map(. == \"bin\") | any ) | .name"))
|
||||
for BINARY in "${BINARIES[@]}"; do
|
||||
echo $BINARY
|
||||
xx-verify $TARGET_DIR/$(xx-cargo --print-target-triple)/${RUST_PROFILE}/$BINARY
|
||||
cp $TARGET_DIR/$(xx-cargo --print-target-triple)/${RUST_PROFILE}/$BINARY /out/sbin/$BINARY
|
||||
xx-verify $TARGET_DIR/$(xx-cargo --print-target-triple)/${RUST_PROFILE_DIR}/$BINARY
|
||||
cp $TARGET_DIR/$(xx-cargo --print-target-triple)/${RUST_PROFILE_DIR}/$BINARY /out/sbin/$BINARY
|
||||
done
|
||||
EOF
|
||||
|
||||
|
|
|
|||
|
|
@ -2,9 +2,9 @@ FROM ubuntu:latest
|
|||
EXPOSE 8008
|
||||
EXPOSE 8448
|
||||
RUN apt-get update && apt-get install -y ca-certificates liburing2 && rm -rf /var/lib/apt/lists/*
|
||||
RUN mkdir -p /etc/continuwuity /var/lib/continuwuity
|
||||
COPY docker/complement-entrypoint.sh /usr/local/bin/complement-entrypoint.sh
|
||||
COPY docker/complement.config.toml /etc/continuwuity/config.toml
|
||||
RUN mkdir -p /etc/continuwuity /var/lib/continuwuity /usr/local/bin/
|
||||
COPY complement/complement-entrypoint.sh /usr/local/bin/complement-entrypoint.sh
|
||||
COPY complement/complement.config.toml /etc/continuwuity/config.toml
|
||||
COPY target/debug/conduwuit /usr/local/bin/conduwuit
|
||||
RUN chmod +x /usr/local/bin/conduwuit /usr/local/bin/complement-entrypoint.sh
|
||||
#HEALTHCHECK --interval=30s --timeout=5s CMD curl --fail http://localhost:8008/_continuwuity/server_version || exit 1
|
||||
|
|
|
|||
|
|
@ -18,11 +18,11 @@ RUN --mount=type=cache,target=/etc/apk/cache apk add \
|
|||
|
||||
# Developer tool versions
|
||||
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
|
||||
ENV BINSTALL_VERSION=1.17.4
|
||||
ENV BINSTALL_VERSION=1.17.7
|
||||
# renovate: datasource=github-releases depName=psastras/sbom-rs
|
||||
ENV CARGO_SBOM_VERSION=0.9.1
|
||||
# renovate: datasource=crate depName=lddtree
|
||||
ENV LDDTREE_VERSION=0.4.0
|
||||
ENV LDDTREE_VERSION=0.5.0
|
||||
|
||||
# Install unpackaged tools
|
||||
RUN <<EOF
|
||||
|
|
|
|||
|
|
@ -15,9 +15,9 @@
|
|||
"label": "Deploying"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"name": "turn",
|
||||
"label": "TURN"
|
||||
"type": "dir",
|
||||
"name": "calls",
|
||||
"label": "Calls"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
|
|
@ -34,6 +34,11 @@
|
|||
"name": "troubleshooting",
|
||||
"label": "Troubleshooting"
|
||||
},
|
||||
{
|
||||
"type": "dir",
|
||||
"name": "advanced",
|
||||
"label": "Advanced"
|
||||
},
|
||||
"security",
|
||||
{
|
||||
"type": "dir-section-header",
|
||||
|
|
@ -64,6 +69,11 @@
|
|||
"label": "Configuration Reference",
|
||||
"name": "/reference/config"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"label": "Environment Variables",
|
||||
"name": "/reference/environment-variables"
|
||||
},
|
||||
{
|
||||
"type": "dir",
|
||||
"label": "Admin Command Reference",
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
{
|
||||
"text": "Guide",
|
||||
"link": "/introduction",
|
||||
"activeMatch": "^/(introduction|configuration|deploying|turn|appservices|maintenance|troubleshooting)"
|
||||
"activeMatch": "^/(introduction|configuration|deploying|calls|appservices|maintenance|troubleshooting|advanced)"
|
||||
},
|
||||
{
|
||||
"text": "Development",
|
||||
|
|
|
|||
7
docs/advanced/_meta.json
Normal file
7
docs/advanced/_meta.json
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
[
|
||||
{
|
||||
"type": "file",
|
||||
"name": "delegation",
|
||||
"label": "Delegation / split-domain"
|
||||
}
|
||||
]
|
||||
206
docs/advanced/delegation.mdx
Normal file
206
docs/advanced/delegation.mdx
Normal file
|
|
@ -0,0 +1,206 @@
|
|||
# Delegation/split-domain deployment
|
||||
|
||||
Matrix allows clients and servers to discover a homeserver's "true" destination via **`.well-known` delegation**. This is especially useful if you would like to:
|
||||
|
||||
- Serve Continuwuity on a subdomain while having only the base domain for your usernames
|
||||
- Use a port other than `:8448` for server-to-server connections
|
||||
|
||||
This guide will show you how to have `@user:example.com` usernames while serving Continuwuity on `https://matrix.example.com`. It assumes you are using port 443 for both client-to-server connections and server-to-server federation.
|
||||
|
||||
## Configuration
|
||||
|
||||
First, ensure you have set up A/AAAA records for `matrix.example.com` and `example.com` pointing to your IP.
|
||||
|
||||
Then, ensure that the `server_name` field matches your intended username suffix. If this is not the case, you **MUST** wipe the database directory and reinstall Continuwuity with your desired `server_name`.
|
||||
|
||||
Then, in the `[global.well_known]` section of your config file, add the following fields:
|
||||
|
||||
```toml
|
||||
[global.well_known]
|
||||
|
||||
client = "https://matrix.example.com"
|
||||
|
||||
# port number MUST be specified
|
||||
server = "matrix.example.com:443"
|
||||
|
||||
# (optional) customize your support contacts
|
||||
#support_page =
|
||||
#support_role = "m.role.admin"
|
||||
#support_email =
|
||||
#support_mxid = "@user:example.com"
|
||||
```
|
||||
|
||||
Alternatively if you are using Docker, you can set the `CONTINUWUITY_WELL_KNOWN` environment variable as below:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
continuwuity:
|
||||
...
|
||||
environment:
|
||||
CONTINUWUITY_WELL_KNOWN: |
|
||||
{
|
||||
client=https://matrix.example.com,
|
||||
server=matrix.example.com:443
|
||||
}
|
||||
```
|
||||
|
||||
## Serving with a reverse proxy
|
||||
|
||||
After doing the steps above, Continuwuity will serve these 3 JSON files:
|
||||
|
||||
- `/.well-known/matrix/client`: for Client-Server discovery
|
||||
- `/.well-known/matrix/server`: for Server-Server (federation) discovery
|
||||
- `/.well-known/matrix/support`: admin contact details (strongly recommended to have)
|
||||
|
||||
To enable full discovery, you will need to reverse proxy these paths from the base domain back to Continuwuity.
|
||||
|
||||
<details>
|
||||
|
||||
<summary>For Caddy</summary>
|
||||
|
||||
```
|
||||
matrix.example.com:443 {
|
||||
reverse_proxy 127.0.0.1:8008
|
||||
}
|
||||
|
||||
example.com:443 {
|
||||
reverse_proxy /.well-known/matrix* 127.0.0.1:8008
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
|
||||
<summary>For Traefik (via Docker labels)</summary>
|
||||
|
||||
```
|
||||
services:
|
||||
continuwuity:
|
||||
...
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.continuwuity.rule=(Host(`matrix.example.com`) || (Host(`example.com`) && PathPrefix(`/.well-known/matrix`)))"
|
||||
- "traefik.http.routers.continuwuity.service=continuwuity"
|
||||
- "traefik.http.services.continuwuity.loadbalancer.server.port=8008"
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
Restart Continuwuity and your reverse proxy. Once that's done, visit these routes and check that the responses match the examples below:
|
||||
|
||||
<details open>
|
||||
|
||||
<summary>`https://example.com/.well-known/matrix/server`</summary>
|
||||
|
||||
```json
|
||||
{
|
||||
"m.server": "matrix.example.com:443"
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details open>
|
||||
|
||||
<summary>`https://example.com/.well-known/matrix/client`</summary>
|
||||
|
||||
```json
|
||||
{
|
||||
"m.homeserver": {
|
||||
"base_url": "https://matrix.example.com/"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Cannot log in with web clients
|
||||
|
||||
Make sure there is an `Access-Control-Allow-Origin: *` header in your `/.well-known/matrix/client` path. While Continuwuity serves this header by default, it may be dropped by reverse proxies or other middlewares.
|
||||
|
||||
---
|
||||
|
||||
## Using SRV records (not recommended)
|
||||
|
||||
:::warning
|
||||
The following methods are **not recommended** due to increased complexity with little benefits. If you have already set up `.well-known` delegation as above, you can safely skip this part.
|
||||
:::
|
||||
|
||||
The following methods uses SRV DNS records and only work with federation traffic. They are only included for completeness.
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Using only SRV records</summary>
|
||||
|
||||
If you can't set up `/.well-known/matrix/server` on :443 for some reason, you can set up a SRV record (via your DNS provider) as below:
|
||||
|
||||
- Service and name: `_matrix-fed._tcp.example.com.`
|
||||
- Priority: `10` (can be any number)
|
||||
- Weight: `10` (can be any number)
|
||||
- Port: `443`
|
||||
- Target: `matrix.example.com.`
|
||||
|
||||
On the target's IP at port 443, you must configure a valid route and cert for your server name, `example.com`. Therefore, this method only works to redirect traffic into the right IP/port combo, and can not delegate your federation to a different domain.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Using SRV records + .well-known</summary>
|
||||
|
||||
You can also set up `/.well-known/matrix/server` with a delegated domain but no ports:
|
||||
|
||||
```toml
|
||||
[global.well_known]
|
||||
server = "matrix.example.com"
|
||||
```
|
||||
|
||||
Then, set up a SRV record (via your DNS provider) to announce the port number as below:
|
||||
|
||||
- Service and name: `_matrix-fed._tcp.matrix.example.com.`
|
||||
- Priority: `10` (can be any number)
|
||||
- Weight: `10` (can be any number)
|
||||
- Port: `443`
|
||||
- Target: `matrix.example.com.`
|
||||
|
||||
On the target's IP at port 443, you'll need to provide a valid route and cert for `matrix.example.com`. It provides the same feature as pure `.well-known` delegation, albeit with more parts to handle.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Using SRV records as a fallback for .well-known delegation</summary>
|
||||
|
||||
Assume your delegation is as below:
|
||||
|
||||
```toml
|
||||
[global.well_known]
|
||||
server = "example.com:443"
|
||||
```
|
||||
|
||||
If your Continuwuity instance becomes temporarily unreachable, other servers will not be able to find your `/.well-known/matrix/server` file, and defaults to using `server_name:8448`. This incorrect cache can persist for a long time, and would hinder re-federation when your server eventually comes back online.
|
||||
|
||||
If you want other servers to default to using port :443 even when it is offline, you could set up a SRV record (via your DNS provider) as follows:
|
||||
|
||||
- Service and name: `_matrix-fed._tcp.example.com.`
|
||||
- Priority: `10` (can be any number)
|
||||
- Weight: `10` (can be any number)
|
||||
- Port: `443`
|
||||
- Target: `example.com.`
|
||||
|
||||
On the target's IP at port 443, you'll need to provide a valid route and cert for `example.com`.
|
||||
|
||||
</details>
|
||||
|
||||
---
|
||||
|
||||
## Related Documentation
|
||||
|
||||
See the following Matrix Specs for full details on client/server resolution mechanisms:
|
||||
|
||||
- [Server-to-Server resolution](https://spec.matrix.org/v1.17/server-server-api/#resolving-server-names) (see this for more information on SRV records)
|
||||
- [Client-to-Server resolution](https://spec.matrix.org/v1.17/client-server-api/#server-discovery)
|
||||
- [MSC1929: Homeserver Admin Contact and Support page](https://github.com/matrix-org/matrix-spec-proposals/pull/1929)
|
||||
13
docs/calls.mdx
Normal file
13
docs/calls.mdx
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
# Calls
|
||||
|
||||
Matrix supports two types of calls:
|
||||
|
||||
- Element Call powered by [MatrixRTC](https://half-shot.github.io/msc-crafter/#msc/4143) and [LiveKit](https://github.com/livekit/livekit)
|
||||
- Legacy calls, sometimes using Jitsi
|
||||
|
||||
Both types of calls are supported by different sets of clients, but most clients are moving towards MatrixRTC / Element Call.
|
||||
|
||||
For either one to work correctly, you have to do some additional setup.
|
||||
|
||||
- For legacy calls to work, you need to set up a TURN/STUN server. [Read the TURN guide for tips on how to set up coturn](./calls/turn.mdx)
|
||||
- For MatrixRTC / Element Call to work, you have to set up the LiveKit backend (foci). LiveKit also uses TURN/STUN to increase reliability, so you might want to configure your TURN server first. [Read the LiveKit guide](./calls/livekit.mdx)
|
||||
12
docs/calls/_meta.json
Normal file
12
docs/calls/_meta.json
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
[
|
||||
{
|
||||
"type": "file",
|
||||
"name": "turn",
|
||||
"label": "TURN"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"name": "livekit",
|
||||
"label": "MatrixRTC / LiveKit"
|
||||
}
|
||||
]
|
||||
240
docs/calls/livekit.mdx
Normal file
240
docs/calls/livekit.mdx
Normal file
|
|
@ -0,0 +1,240 @@
|
|||
# Matrix RTC/Element Call Setup
|
||||
|
||||
:::info
|
||||
This guide assumes that you are using docker compose for deployment. LiveKit only provides Docker images.
|
||||
:::
|
||||
|
||||
## Instructions
|
||||
|
||||
### 1. Domain
|
||||
|
||||
LiveKit should live on its own domain or subdomain. In this guide we use `livekit.example.com` - this should be replaced with a domain you control.
|
||||
|
||||
Make sure the DNS record for the (sub)domain you plan to use is pointed to your server.
|
||||
|
||||
### 2. Services
|
||||
|
||||
Using LiveKit with Matrix requires two services - Livekit itself, and a service (`lk-jwt-service`) that grants Matrix users permission to connect to it.
|
||||
|
||||
You must generate a key and secret to allow the Matrix service to authenticate with LiveKit. `LK_MATRIX_KEY` should be around 20 random characters, and `LK_MATRIX_SECRET` should be around 64. Remember to replace these with the actual values!
|
||||
|
||||
:::tip Generating the secrets
|
||||
LiveKit provides a utility to generate secure random keys
|
||||
```bash
|
||||
docker run --rm livekit/livekit-server:latest generate-keys
|
||||
```
|
||||
:::
|
||||
|
||||
```yaml
|
||||
services:
|
||||
lk-jwt-service:
|
||||
image: ghcr.io/element-hq/lk-jwt-service:latest
|
||||
container_name: lk-jwt-service
|
||||
environment:
|
||||
- LIVEKIT_JWT_BIND=:8081
|
||||
- LIVEKIT_URL=wss://livekit.example.com
|
||||
- LIVEKIT_KEY=LK_MATRIX_KEY
|
||||
- LIVEKIT_SECRET=LK_MATRIX_SECRET
|
||||
- LIVEKIT_FULL_ACCESS_HOMESERVERS=example.com
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "8081:8081"
|
||||
|
||||
livekit:
|
||||
image: livekit/livekit-server:latest
|
||||
container_name: livekit
|
||||
command: --config /etc/livekit.yaml
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./livekit.yaml:/etc/livekit.yaml:ro
|
||||
network_mode: "host" # /!\ LiveKit binds to all addresses by default.
|
||||
# Make sure port 7880 is blocked by your firewall to prevent access bypassing your reverse proxy
|
||||
# Alternatively, uncomment the lines below and comment `network_mode: "host"` above to specify port mappings.
|
||||
# ports:
|
||||
# - "127.0.0.1:7880:7880/tcp"
|
||||
# - "7881:7881/tcp"
|
||||
# - "50100-50200:50100-50200/udp"
|
||||
```
|
||||
|
||||
Next, we need to configure LiveKit. In the same directory, create `livekit.yaml` with the following content - remembering to replace `LK_MATRIX_KEY` and `LK_MATRIX_SECRET` with the values you generated:
|
||||
|
||||
```yaml
|
||||
port: 7880
|
||||
bind_addresses:
|
||||
- ""
|
||||
rtc:
|
||||
tcp_port: 7881
|
||||
port_range_start: 50100
|
||||
port_range_end: 50200
|
||||
use_external_ip: true
|
||||
enable_loopback_candidate: false
|
||||
keys:
|
||||
LK_MATRIX_KEY: LK_MATRIX_SECRET
|
||||
```
|
||||
|
||||
#### Firewall hints
|
||||
|
||||
You will need to allow ports `7881/tcp` and `50100:50200/udp` through your firewall. If you use UFW, the commands are: `ufw allow 7881/tcp` and `ufw allow 50100:50200/udp`.
|
||||
|
||||
### 3. Telling clients where to find LiveKit
|
||||
|
||||
To tell clients where to find LiveKit, you need to add the address of your `lk-jwt-service` to the `[global.matrix_rtc]` config section using the `foci` option.
|
||||
|
||||
The variable should be a list of servers serving as MatrixRTC endpoints. Clients discover these via the `/_matrix/client/v1/rtc/transports` endpoint (MSC4143).
|
||||
|
||||
```toml
|
||||
[global.matrix_rtc]
|
||||
foci = [
|
||||
{ type = "livekit", livekit_service_url = "https://livekit.example.com" },
|
||||
]
|
||||
```
|
||||
|
||||
Remember to replace the URL with the address you are deploying your instance of lk-jwt-service to.
|
||||
|
||||
### 4. Configure your Reverse Proxy
|
||||
|
||||
Reverse proxies can be configured in many different ways - so we can't provide a step by step for this.
|
||||
|
||||
By default, all routes should be forwarded to Livekit with the exception of the following path prefixes, which should be forwarded to the JWT/Authentication service:
|
||||
|
||||
- `/sfu/get`
|
||||
- `/healthz`
|
||||
- `/get_token`
|
||||
|
||||
<details>
|
||||
<summary>Example caddy config</summary>
|
||||
```
|
||||
matrix-rtc.example.com {
|
||||
|
||||
# for lk-jwt-service
|
||||
@lk-jwt-service path /sfu/get* /healthz* /get_token*
|
||||
route @lk-jwt-service {
|
||||
reverse_proxy 127.0.0.1:8081
|
||||
}
|
||||
|
||||
# for livekit
|
||||
reverse_proxy 127.0.0.1:7880
|
||||
}
|
||||
```
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Example nginx config</summary>
|
||||
```
|
||||
server {
|
||||
server_name matrix-rtc.example.com;
|
||||
|
||||
# for lk-jwt-service
|
||||
location ~ ^/(sfu/get|healthz|get_token) {
|
||||
proxy_pass http://127.0.0.1:8081$request_uri;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_buffering off;
|
||||
}
|
||||
|
||||
# for livekit
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:7880$request_uri;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_buffering off;
|
||||
|
||||
# websocket
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Note that for websockets to work, you need to have this somewhere outside your server block:
|
||||
```
|
||||
map $http_upgrade $connection_upgrade {
|
||||
default upgrade;
|
||||
'' close;
|
||||
}
|
||||
```
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Example traefik router</summary>
|
||||
```
|
||||
# on LiveKit itself
|
||||
traefik.http.routers.livekit.rule=Host(`livekit.example.com`)
|
||||
# on the JWT service
|
||||
traefik.http.routers.livekit-jwt.rule=Host(`livekit.example.com`) && (PathPrefix(`/sfu/get`) || PathPrefix(`/healthz`) || PathPrefix(`/get_token`))
|
||||
```
|
||||
</details>
|
||||
|
||||
|
||||
### 6. Start Everything
|
||||
|
||||
Start up the services using your usual method - for example `docker compose up -d`.
|
||||
|
||||
## Additional Configuration
|
||||
|
||||
### TURN Integration
|
||||
|
||||
If you've already set up coturn, there may be a port clash between the two services. To fix this, make sure the `min-port` and `max-port` for coturn so it doesn't overlap with LiveKit's range:
|
||||
|
||||
```ini
|
||||
min-port=50201
|
||||
max-port=65535
|
||||
```
|
||||
|
||||
To improve LiveKit's reliability, you can configure it to use your coturn server.
|
||||
|
||||
Generate a long random secret for LiveKit, and add it to your coturn config under the `static-auth-secret` option. You can add as many secrets as you want - so set a different one for each thing using your TURN server.
|
||||
|
||||
Then configure livekit, making sure to replace `COTURN_SECRET`:
|
||||
|
||||
```yaml
|
||||
# livekit.yaml
|
||||
rtc:
|
||||
turn_servers:
|
||||
- host: coturn.ellis.link
|
||||
port: 3478
|
||||
protocol: tcp
|
||||
secret: "COTURN_SECRET"
|
||||
- host: coturn.ellis.link
|
||||
port: 5349
|
||||
protocol: tls # Only if you've set up TLS in your coturn
|
||||
secret: "COTURN_SECRET"
|
||||
- host: coturn.ellis.link
|
||||
port: 3478
|
||||
protocol: udp
|
||||
secret: "COTURN_SECRET"
|
||||
```
|
||||
|
||||
## LiveKit's built in TURN server
|
||||
|
||||
Livekit includes a built in TURN server which can be used in place of an external option. This TURN server will only work with Livekit, so you can't use it for legacy Matrix calling - or anything else.
|
||||
|
||||
If you don't want to set up a separate TURN server, you can enable this with the following changes:
|
||||
|
||||
```yaml
|
||||
### add this to livekit.yaml ###
|
||||
turn:
|
||||
enabled: true
|
||||
udp_port: 3478
|
||||
relay_range_start: 50300
|
||||
relay_range_end: 50400
|
||||
domain: matrix-rtc.example.com
|
||||
```
|
||||
|
||||
```yaml
|
||||
### Add these to docker-compose ###
|
||||
- "3478:3478/udp"
|
||||
- "50300-50400:50300-50400/udp"
|
||||
```
|
||||
|
||||
### Related Documentation
|
||||
|
||||
- [LiveKit GitHub](https://github.com/livekit/livekit)
|
||||
- [LiveKit Connection Tester](https://livekit.io/connection-test) - use with the token returned by `/sfu/get` or `/get_token`
|
||||
- [MatrixRTC proposal](https://half-shot.github.io/msc-crafter/#msc/4143)
|
||||
- [Synapse documentation](https://github.com/element-hq/element-call/blob/livekit/docs/self-hosting.md)
|
||||
- [Community guide](https://tomfos.tr/matrix/livekit/)
|
||||
- [Community guide](https://blog.kimiblock.top/2024/12/24/hosting-element-call/)
|
||||
214
docs/calls/turn.mdx
Normal file
214
docs/calls/turn.mdx
Normal file
|
|
@ -0,0 +1,214 @@
|
|||
# Setting up TURN/STUN
|
||||
|
||||
[TURN](https://en.wikipedia.org/wiki/Traversal_Using_Relays_around_NAT) and [STUN](https://en.wikipedia.org/wiki/STUN) are used as a component in many calling systems. Matrix uses them directly for legacy calls and indirectly for MatrixRTC via Livekit.
|
||||
|
||||
Continuwuity recommends using [Coturn](https://github.com/coturn/coturn) as your TURN/STUN server, which is available as a Docker image or a distro package.
|
||||
|
||||
## Installing Coturn
|
||||
|
||||
### Configuration
|
||||
|
||||
Create a configuration file called `coturn.conf` containing:
|
||||
|
||||
```ini
|
||||
use-auth-secret
|
||||
static-auth-secret=<a secret key>
|
||||
realm=<your server domain>
|
||||
```
|
||||
|
||||
:::tip Generating a secure secret
|
||||
A common way to generate a suitable alphanumeric secret key is by using:
|
||||
```bash
|
||||
pwgen -s 64 1
|
||||
```
|
||||
:::
|
||||
|
||||
#### Port Configuration
|
||||
|
||||
By default, coturn uses the following ports:
|
||||
- `3478` (UDP/TCP): Standard TURN/STUN port
|
||||
- `5349` (UDP/TCP): TURN/STUN over TLS
|
||||
- `49152-65535` (UDP): Media relay ports
|
||||
|
||||
If you're also running LiveKit, you'll need to avoid port conflicts. Configure non-overlapping port ranges:
|
||||
|
||||
```ini
|
||||
# In coturn.conf
|
||||
min-port=50201
|
||||
max-port=65535
|
||||
```
|
||||
|
||||
This leaves ports `50100-50200` available for LiveKit's default configuration.
|
||||
|
||||
### Running with Docker
|
||||
|
||||
Run the [Coturn](https://hub.docker.com/r/coturn/coturn) image using:
|
||||
|
||||
```bash
|
||||
docker run -d --network=host \
|
||||
-v $(pwd)/coturn.conf:/etc/coturn/turnserver.conf \
|
||||
coturn/coturn
|
||||
```
|
||||
|
||||
### Running with Docker Compose
|
||||
|
||||
Create a `docker-compose.yml` file and run `docker compose up -d`:
|
||||
|
||||
```yaml
|
||||
version: '3'
|
||||
services:
|
||||
turn:
|
||||
container_name: coturn-server
|
||||
image: docker.io/coturn/coturn
|
||||
restart: unless-stopped
|
||||
network_mode: "host"
|
||||
volumes:
|
||||
- ./coturn.conf:/etc/coturn/turnserver.conf
|
||||
```
|
||||
|
||||
:::info Why host networking?
|
||||
Coturn uses host networking mode because it needs to bind to multiple ports and work with various network protocols. Using host networking is better for performance, and reduces configuration complexity. To understand alternative configuration options, visit [Coturn's Docker documentation](https://github.com/coturn/coturn/blob/master/docker/coturn/README.md).
|
||||
:::
|
||||
|
||||
### Security Recommendations
|
||||
|
||||
For security best practices, see Synapse's [Coturn documentation](https://element-hq.github.io/synapse/latest/turn-howto.html), which includes important firewall and access control recommendations.
|
||||
|
||||
## Configuring Continuwuity
|
||||
|
||||
Once your TURN server is running, configure Continuwuity to provide credentials to clients. Add the following to your Continuwuity configuration file:
|
||||
|
||||
### Shared Secret Authentication (Recommended)
|
||||
|
||||
This is the most secure method and generates time-limited credentials automatically:
|
||||
|
||||
```toml
|
||||
# TURN URIs that clients should connect to
|
||||
turn_uris = [
|
||||
"turn:coturn.example.com?transport=udp",
|
||||
"turn:coturn.example.com?transport=tcp",
|
||||
"turns:coturn.example.com?transport=udp",
|
||||
"turns:coturn.example.com?transport=tcp"
|
||||
]
|
||||
|
||||
# Shared secret for generating credentials (must match coturn's static-auth-secret)
|
||||
turn_secret = "<your coturn static-auth-secret>"
|
||||
|
||||
# Optional: Read secret from a file instead (takes priority over turn_secret)
|
||||
# turn_secret_file = "/etc/continuwuity/.turn_secret"
|
||||
|
||||
# TTL for generated credentials in seconds (default: 86400 = 24 hours)
|
||||
turn_ttl = 86400
|
||||
```
|
||||
|
||||
:::tip Using TLS
|
||||
The `turns:` URI prefix instructs clients to connect to TURN over TLS, which is highly recommended for security. Make sure you've configured TLS in your coturn server first.
|
||||
:::
|
||||
|
||||
### Static Credentials (Alternative)
|
||||
|
||||
If you prefer static username/password credentials instead of shared secrets:
|
||||
|
||||
```toml
|
||||
turn_uris = [
|
||||
"turn:coturn.example.com?transport=udp",
|
||||
"turn:coturn.example.com?transport=tcp"
|
||||
]
|
||||
|
||||
turn_username = "your_username"
|
||||
turn_password = "your_password"
|
||||
```
|
||||
|
||||
:::warning
|
||||
Static credentials are less secure than shared secrets because they don't expire and must be configured in coturn separately. It is strongly advised you use shared secret authentication.
|
||||
:::
|
||||
|
||||
### Guest Access
|
||||
|
||||
By default, TURN credentials require client authentication. To allow unauthenticated access:
|
||||
|
||||
```toml
|
||||
turn_allow_guests = true
|
||||
```
|
||||
|
||||
:::caution
|
||||
This is not recommended as it allows unauthenticated users to access your TURN server, potentially enabling abuse by bots. All major Matrix clients that support legacy calls *also* support authenticated TURN access.
|
||||
:::
|
||||
|
||||
### Important Notes
|
||||
|
||||
- Replace `coturn.example.com` with your actual TURN server domain (the `realm` from coturn.conf)
|
||||
- The `turn_secret` must match the `static-auth-secret` in your coturn configuration
|
||||
- Restart or reload Continuwuity after making configuration changes
|
||||
|
||||
## Testing Your TURN Server
|
||||
|
||||
### Testing Credentials
|
||||
|
||||
Verify that Continuwuity is correctly serving TURN credentials to clients:
|
||||
|
||||
```bash
|
||||
curl "https://matrix.example.com/_matrix/client/r0/voip/turnServer" \
|
||||
-H "Authorization: Bearer <your_client_token>" | jq
|
||||
```
|
||||
|
||||
You should receive a response like this:
|
||||
|
||||
```json
|
||||
{
|
||||
"username": "1752792167:@jade:example.com",
|
||||
"password": "KjlDlawdPbU9mvP4bhdV/2c/h65=",
|
||||
"uris": [
|
||||
"turns:coturn.example.com?transport=udp",
|
||||
"turns:coturn.example.com?transport=tcp",
|
||||
"turn:coturn.example.com?transport=udp",
|
||||
"turn:coturn.example.com?transport=tcp"
|
||||
],
|
||||
"ttl": 86400
|
||||
}
|
||||
```
|
||||
|
||||
:::note MSC4166 Compliance
|
||||
If no TURN URIs are configured (`turn_uris` is empty), Continuwuity will return a 404 Not Found response, as specified in MSC4166.
|
||||
:::
|
||||
|
||||
### Testing Connectivity
|
||||
|
||||
Use [Trickle ICE](https://webrtc.github.io/samples/src/content/peerconnection/trickle-ice/) to verify that the TURN credentials actually work:
|
||||
|
||||
1. Copy the credentials from the response above
|
||||
2. Paste them into the Trickle ICE testing tool
|
||||
3. Click "Gather candidates"
|
||||
4. Look for successful `relay` candidates in the results
|
||||
|
||||
If you see relay candidates, your TURN server is working correctly!
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Clients can't connect to TURN server
|
||||
|
||||
- Verify firewall rules allow the necessary ports (3478, 5349, and your media port range)
|
||||
- Check that DNS resolves correctly for your TURN domain
|
||||
- Ensure your `turn_secret` matches coturn's `static-auth-secret`
|
||||
- Test with Trickle ICE to isolate the issue
|
||||
|
||||
### Port conflicts with LiveKit
|
||||
|
||||
- Make sure coturn's `min-port` starts above LiveKit's `port_range_end` (default: 50200)
|
||||
- Or adjust LiveKit's port range to avoid coturn's default range
|
||||
|
||||
### 404 when calling turnServer endpoint
|
||||
|
||||
- Verify that `turn_uris` is not empty in your Continuwuity config
|
||||
- This behavior is correct per MSC4166 if no TURN URIs are configured
|
||||
|
||||
### Credentials expire too quickly
|
||||
|
||||
- Adjust the `turn_ttl` value in your Continuwuity configuration
|
||||
- Default is 86400 seconds (24 hours)
|
||||
|
||||
### Related Documentation
|
||||
|
||||
- [MatrixRTC/LiveKit Setup](./livekit.mdx) - Configure group calling with LiveKit
|
||||
- [Coturn GitHub](https://github.com/coturn/coturn) - Official coturn repository
|
||||
- [Synapse TURN Guide](https://element-hq.github.io/synapse/latest/turn-howto.html) - Additional security recommendations
|
||||
|
|
@ -13,8 +13,9 @@ settings.
|
|||
|
||||
The config file to use can be specified on the commandline when running
|
||||
Continuwuity by specifying the `-c`, `--config` flag. Alternatively, you can use
|
||||
the environment variable `CONDUWUIT_CONFIG` to specify the config file to used.
|
||||
Conduit's environment variables are supported for backwards compatibility.
|
||||
the environment variable `CONTINUWUITY_CONFIG` to specify the config file to be
|
||||
used; see [the section on environment variables](#environment-variables) for
|
||||
more information.
|
||||
|
||||
## Option commandline flag
|
||||
|
||||
|
|
@ -52,13 +53,15 @@ This commandline argument can be paired with the `--option` flag.
|
|||
|
||||
All of the settings that are found in the config file can be specified by using
|
||||
environment variables. The environment variable names should be all caps and
|
||||
prefixed with `CONDUWUIT_`.
|
||||
prefixed with `CONTINUWUITY_`.
|
||||
|
||||
For example, if the setting you are changing is `max_request_size`, then the
|
||||
environment variable to set is `CONDUWUIT_MAX_REQUEST_SIZE`.
|
||||
environment variable to set is `CONTINUWUITY_MAX_REQUEST_SIZE`.
|
||||
|
||||
To modify config options not in the `[global]` context such as
|
||||
`[global.well_known]`, use the `__` suffix split: `CONDUWUIT_WELL_KNOWN__SERVER`
|
||||
`[global.well_known]`, use the `__` suffix split:
|
||||
`CONTINUWUITY_WELL_KNOWN__SERVER`
|
||||
|
||||
Conduit's environment variables are supported for backwards compatibility (e.g.
|
||||
Conduit and conduwuit's environment variables are also supported for backwards
|
||||
compatibility, via the `CONDUIT_` and `CONDUWUIT_` prefixes respectively (e.g.
|
||||
`CONDUIT_SERVER_NAME`).
|
||||
|
|
|
|||
|
|
@ -6,9 +6,9 @@ services:
|
|||
### then you are ready to go.
|
||||
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||
restart: unless-stopped
|
||||
command: /sbin/conduwuit
|
||||
volumes:
|
||||
- db:/var/lib/continuwuity
|
||||
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.
|
||||
#- ./continuwuity.toml:/etc/continuwuity.toml
|
||||
networks:
|
||||
- proxy
|
||||
|
|
|
|||
|
|
@ -16,14 +16,14 @@ services:
|
|||
restart: unless-stopped
|
||||
labels:
|
||||
caddy: example.com
|
||||
caddy.0_respond: /.well-known/matrix/server {"m.server":"matrix.example.com:443"}
|
||||
caddy.1_respond: /.well-known/matrix/client {"m.server":{"base_url":"https://matrix.example.com"},"m.homeserver":{"base_url":"https://matrix.example.com"},"org.matrix.msc3575.proxy":{"url":"https://matrix.example.com"}}
|
||||
caddy.reverse_proxy: /.well-known/matrix/* homeserver:6167
|
||||
|
||||
homeserver:
|
||||
### If you already built the Continuwuity image with 'docker build' or want to use a registry image,
|
||||
### then you are ready to go.
|
||||
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||
restart: unless-stopped
|
||||
command: /sbin/conduwuit
|
||||
volumes:
|
||||
- db:/var/lib/continuwuity
|
||||
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.
|
||||
|
|
@ -42,6 +42,10 @@ services:
|
|||
#CONTINUWUITY_LOG: warn,state_res=warn
|
||||
CONTINUWUITY_ADDRESS: 0.0.0.0
|
||||
#CONTINUWUITY_CONFIG: '/etc/continuwuity.toml' # Uncomment if you mapped config toml above
|
||||
|
||||
# Required for .well-known delegation - edit these according to your chosen domain
|
||||
CONTINUWUITY_WELL_KNOWN__CLIENT: https://matrix.example.com
|
||||
CONTINUWUITY_WELL_KNOWN__SERVER: matrix.example.com:443
|
||||
networks:
|
||||
- caddy
|
||||
labels:
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ services:
|
|||
### then you are ready to go.
|
||||
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||
restart: unless-stopped
|
||||
command: /sbin/conduwuit
|
||||
volumes:
|
||||
- db:/var/lib/continuwuity
|
||||
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ services:
|
|||
### then you are ready to go.
|
||||
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||
restart: unless-stopped
|
||||
command: /sbin/conduwuit
|
||||
ports:
|
||||
- 8448:6167
|
||||
volumes:
|
||||
|
|
|
|||
|
|
@ -2,28 +2,26 @@
|
|||
|
||||
## Docker
|
||||
|
||||
To run Continuwuity with Docker, you can either build the image yourself or pull it
|
||||
from a registry.
|
||||
To run Continuwuity with Docker, you can either build the image yourself or pull
|
||||
it from a registry.
|
||||
|
||||
### Use a registry
|
||||
|
||||
OCI images for Continuwuity are available in the registries listed below.
|
||||
Available OCI images:
|
||||
|
||||
| Registry | Image | Notes |
|
||||
| --------------- | --------------------------------------------------------------- | -----------------------|
|
||||
| Forgejo Registry| [forgejo.ellis.link/continuwuation/continuwuity:latest](https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity/latest) | Latest tagged image. |
|
||||
| Forgejo Registry| [forgejo.ellis.link/continuwuation/continuwuity:main](https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity/main) | Main branch image. |
|
||||
| Forgejo Registry| [forgejo.ellis.link/continuwuation/continuwuity:latest-maxperf](https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity/latest-maxperf) | [Performance optimised version.](./generic.mdx#performance-optimised-builds) |
|
||||
| Forgejo Registry| [forgejo.ellis.link/continuwuation/continuwuity:main-maxperf](https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity/main-maxperf) | [Performance optimised version.](./generic.mdx#performance-optimised-builds) |
|
||||
| Registry | Image | Notes |
|
||||
| ---------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------- |
|
||||
| Forgejo Registry | [forgejo.ellis.link/continuwuation/continuwuity:latest](https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity/latest) | Latest tagged image. |
|
||||
| Forgejo Registry | [forgejo.ellis.link/continuwuation/continuwuity:main](https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity/main) | Main branch image. |
|
||||
| Forgejo Registry | [forgejo.ellis.link/continuwuation/continuwuity:latest-maxperf](https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity/latest-maxperf) | [Performance optimised version.](./generic.mdx#performance-optimised-builds) |
|
||||
| Forgejo Registry | [forgejo.ellis.link/continuwuation/continuwuity:main-maxperf](https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity/main-maxperf) | [Performance optimised version.](./generic.mdx#performance-optimised-builds) |
|
||||
|
||||
Use
|
||||
**Example:**
|
||||
|
||||
```bash
|
||||
docker image pull $LINK
|
||||
docker image pull forgejo.ellis.link/continuwuation/continuwuity:main-maxperf
|
||||
```
|
||||
|
||||
to pull it to your machine.
|
||||
|
||||
#### Mirrors
|
||||
|
||||
Images are mirrored to multiple locations automatically, on a schedule:
|
||||
|
|
@ -33,39 +31,146 @@ Images are mirrored to multiple locations automatically, on a schedule:
|
|||
- `registry.gitlab.com/continuwuity/continuwuity`
|
||||
- `git.nexy7574.co.uk/mirrored/continuwuity` (releases only, no `main`)
|
||||
|
||||
### Run
|
||||
### Quick Run
|
||||
|
||||
When you have the image, you can simply run it with
|
||||
Get a working Continuwuity server with an admin user in four steps:
|
||||
|
||||
#### Prerequisites
|
||||
|
||||
Continuwuity requires HTTPS for Matrix federation. You'll need:
|
||||
|
||||
- A domain name pointing to your server
|
||||
- A reverse proxy with SSL/TLS certificates (Traefik, Caddy, nginx, etc.)
|
||||
|
||||
See [Docker Compose](#docker-compose) for complete examples.
|
||||
|
||||
#### Environment Variables
|
||||
|
||||
- `CONTINUWUITY_SERVER_NAME` - Your Matrix server's domain name
|
||||
- `CONTINUWUITY_DATABASE_PATH` - Where to store your database (must match the
|
||||
volume mount)
|
||||
- `CONTINUWUITY_ADDRESS` - Bind address (use `0.0.0.0` to listen on all
|
||||
interfaces)
|
||||
- `CONTINUWUITY_ALLOW_REGISTRATION` - Set to `false` to disable registration, or
|
||||
use with `CONTINUWUITY_REGISTRATION_TOKEN` to require a token (see
|
||||
[reference](../reference/environment-variables.mdx#registration--user-configuration)
|
||||
for details)
|
||||
|
||||
See the
|
||||
[Environment Variables Reference](../reference/environment-variables.mdx) for
|
||||
more configuration options.
|
||||
|
||||
#### 1. Pull the image
|
||||
|
||||
```bash
|
||||
docker run -d -p 8448:6167 \
|
||||
-v db:/var/lib/continuwuity/ \
|
||||
-e CONTINUWUITY_SERVER_NAME="your.server.name" \
|
||||
-e CONTINUWUITY_ALLOW_REGISTRATION=false \
|
||||
--name continuwuity $LINK
|
||||
docker pull forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||
```
|
||||
|
||||
or you can use [Docker Compose](#docker-compose).
|
||||
#### 2. Start the server with initial admin user
|
||||
|
||||
The `-d` flag lets the container run in detached mode. You may supply an
|
||||
optional `continuwuity.toml` config file, the example config can be found
|
||||
[here](../reference/config.mdx). You can pass in different env vars to
|
||||
change config values on the fly. You can even configure Continuwuity completely by
|
||||
using env vars. For an overview of possible values, please take a look at the
|
||||
<a href="/examples/docker-compose.yml" target="_blank">`docker-compose.yml`</a> file.
|
||||
```bash
|
||||
docker run -d \
|
||||
-p 6167:6167 \
|
||||
-v continuwuity_db:/var/lib/continuwuity \
|
||||
-e CONTINUWUITY_SERVER_NAME="matrix.example.com" \
|
||||
-e CONTINUWUITY_DATABASE_PATH="/var/lib/continuwuity" \
|
||||
-e CONTINUWUITY_ADDRESS="0.0.0.0" \
|
||||
-e CONTINUWUITY_ALLOW_REGISTRATION="false" \
|
||||
--name continuwuity \
|
||||
forgejo.ellis.link/continuwuation/continuwuity:latest \
|
||||
/sbin/conduwuit --execute "users create-user admin"
|
||||
```
|
||||
|
||||
If you just want to test Continuwuity for a short time, you can use the `--rm`
|
||||
flag, which cleans up everything related to your container after you stop
|
||||
it.
|
||||
Replace `matrix.example.com` with your actual server name and `admin` with
|
||||
your preferred username.
|
||||
|
||||
#### 3. Get your admin password
|
||||
|
||||
```bash
|
||||
docker logs continuwuity 2>&1 | grep "Created user"
|
||||
```
|
||||
|
||||
You'll see output like:
|
||||
|
||||
```
|
||||
Created user with user_id: @admin:matrix.example.com and password: `[auto-generated-password]`
|
||||
```
|
||||
|
||||
#### 4. Configure your reverse proxy
|
||||
|
||||
Configure your reverse proxy to forward HTTPS traffic to Continuwuity. See
|
||||
[Docker Compose](#docker-compose) for examples.
|
||||
|
||||
Once configured, log in with any Matrix client using `@admin:matrix.example.com`
|
||||
and the generated password. You'll automatically be invited to the admin room
|
||||
where you can manage your server.
|
||||
|
||||
### Docker Compose
|
||||
|
||||
If the `docker run` command is not suitable for you or your setup, you can also use one
|
||||
of the provided `docker-compose` files.
|
||||
Docker Compose is the recommended deployment method. These examples include
|
||||
reverse proxy configurations for Matrix federation.
|
||||
|
||||
Depending on your proxy setup, you can use one of the following files:
|
||||
#### Matrix Federation Requirements
|
||||
|
||||
### For existing Traefik setup
|
||||
For Matrix federation to work, you need to serve `.well-known/matrix/client` and
|
||||
`.well-known/matrix/server` endpoints. You can achieve this either by:
|
||||
|
||||
1. **Using a well-known service** - The compose files below include an nginx
|
||||
container to serve these files
|
||||
2. **Using Continuwuity's built-in delegation** (easier for Traefik) - Configure
|
||||
delegation files in your config, then proxy `/.well-known/matrix/*` to
|
||||
Continuwuity
|
||||
|
||||
**Traefik example using built-in delegation:**
|
||||
|
||||
```yaml
|
||||
labels:
|
||||
traefik.http.routers.continuwuity.rule: >-
|
||||
(Host(`matrix.example.com`) ||
|
||||
(Host(`example.com`) && PathPrefix(`/.well-known/matrix`)))
|
||||
```
|
||||
|
||||
This routes your Matrix domain and well-known paths to Continuwuity.
|
||||
|
||||
#### Creating Your First Admin User
|
||||
|
||||
Add the `--execute` command to create an admin user on first startup. In your
|
||||
compose file, add under the `continuwuity` service:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
continuwuity:
|
||||
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||
command: /sbin/conduwuit --execute "users create-user admin"
|
||||
# ... rest of configuration
|
||||
```
|
||||
|
||||
Then retrieve the auto-generated password:
|
||||
|
||||
```bash
|
||||
docker compose logs continuwuity | grep "Created user"
|
||||
```
|
||||
|
||||
#### Choose Your Reverse Proxy
|
||||
|
||||
Select the compose file that matches your setup:
|
||||
|
||||
:::note DNS Performance
|
||||
Docker's default DNS resolver can cause performance issues with Matrix
|
||||
federation. If you experience slow federation or DNS timeouts, you may need to
|
||||
use your host's DNS resolver instead. Add this volume mount to the
|
||||
`continuwuity` service:
|
||||
|
||||
```yaml
|
||||
volumes:
|
||||
- /etc/resolv.conf:/etc/resolv.conf:ro
|
||||
```
|
||||
|
||||
See [Troubleshooting - DNS Issues](../troubleshooting.mdx#potential-dns-issues-when-using-docker)
|
||||
for more details and alternative solutions.
|
||||
:::
|
||||
|
||||
##### For existing Traefik setup
|
||||
|
||||
<details>
|
||||
<summary>docker-compose.for-traefik.yml</summary>
|
||||
|
|
@ -76,7 +181,7 @@ Depending on your proxy setup, you can use one of the following files:
|
|||
|
||||
</details>
|
||||
|
||||
### With Traefik included
|
||||
##### With Traefik included
|
||||
|
||||
<details>
|
||||
<summary>docker-compose.with-traefik.yml</summary>
|
||||
|
|
@ -87,7 +192,7 @@ Depending on your proxy setup, you can use one of the following files:
|
|||
|
||||
</details>
|
||||
|
||||
### With Caddy Docker Proxy
|
||||
##### With Caddy Docker Proxy
|
||||
|
||||
<details>
|
||||
<summary>docker-compose.with-caddy.yml</summary>
|
||||
|
|
@ -98,9 +203,15 @@ Replace all `example.com` placeholders with your own domain.
|
|||
|
||||
```
|
||||
|
||||
If you don't already have a network for Caddy to monitor, create one first:
|
||||
|
||||
```bash
|
||||
docker network create caddy
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### For other reverse proxies
|
||||
##### For other reverse proxies
|
||||
|
||||
<details>
|
||||
<summary>docker-compose.yml</summary>
|
||||
|
|
@ -111,7 +222,7 @@ Replace all `example.com` placeholders with your own domain.
|
|||
|
||||
</details>
|
||||
|
||||
### Override file
|
||||
##### Override file for customisation
|
||||
|
||||
<details>
|
||||
<summary>docker-compose.override.yml</summary>
|
||||
|
|
@ -122,99 +233,25 @@ Replace all `example.com` placeholders with your own domain.
|
|||
|
||||
</details>
|
||||
|
||||
When picking the Traefik-related compose file, rename it to
|
||||
`docker-compose.yml`, and rename the override file to
|
||||
`docker-compose.override.yml`. Edit the latter with the values you want for your
|
||||
server.
|
||||
#### Starting Your Server
|
||||
|
||||
When picking the `caddy-docker-proxy` compose file, it's important to first
|
||||
create the `caddy` network before spinning up the containers:
|
||||
|
||||
```bash
|
||||
docker network create caddy
|
||||
```
|
||||
|
||||
After that, you can rename it to `docker-compose.yml` and spin up the
|
||||
containers!
|
||||
|
||||
Additional info about deploying Continuwuity can be found [here](generic.mdx).
|
||||
|
||||
### Build
|
||||
|
||||
Official Continuwuity images are built using **Docker Buildx** and the Dockerfile found at [`docker/Dockerfile`][dockerfile-path]. This approach uses common Docker tooling and enables efficient multi-platform builds.
|
||||
|
||||
The resulting images are widely compatible with Docker and other container runtimes like Podman or containerd.
|
||||
|
||||
The images *do not contain a shell*. They contain only the Continuwuity binary, required libraries, TLS certificates, and metadata.
|
||||
|
||||
<details>
|
||||
<summary>Click to view the Dockerfile</summary>
|
||||
|
||||
You can also <a href="https://forgejo.ellis.link/continuwuation/continuwuation/src/branch/main/docker/Dockerfile" target="_blank">view the Dockerfile on Forgejo</a>.
|
||||
|
||||
```dockerfile file="../../docker/Dockerfile"
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
To build an image locally using Docker Buildx, you can typically run a command like:
|
||||
|
||||
```bash
|
||||
# Build for the current platform and load into the local Docker daemon
|
||||
docker buildx build --load --tag continuwuity:latest -f docker/Dockerfile .
|
||||
|
||||
# Example: Build for specific platforms and push to a registry.
|
||||
# docker buildx build --platform linux/amd64,linux/arm64 --tag registry.io/org/continuwuity:latest -f docker/Dockerfile . --push
|
||||
|
||||
# Example: Build binary optimised for the current CPU (standard release profile)
|
||||
# docker buildx build --load \
|
||||
# --tag continuwuity:latest \
|
||||
# --build-arg TARGET_CPU=native \
|
||||
# -f docker/Dockerfile .
|
||||
|
||||
# Example: Build maxperf variant (release-max-perf profile with LTO)
|
||||
# Optimised for runtime performance and smaller binary size, but requires longer build time
|
||||
# docker buildx build --load \
|
||||
# --tag continuwuity:latest-maxperf \
|
||||
# --build-arg TARGET_CPU=native \
|
||||
# --build-arg RUST_PROFILE=release-max-perf \
|
||||
# -f docker/Dockerfile .
|
||||
```
|
||||
|
||||
Refer to the Docker Buildx documentation for more advanced build options.
|
||||
|
||||
[dockerfile-path]: https://forgejo.ellis.link/continuwuation/continuwuation/src/branch/main/docker/Dockerfile
|
||||
|
||||
### Run
|
||||
|
||||
If you have already built the image or want to use one from the registries, you
|
||||
can start the container and everything else in the compose file in detached
|
||||
mode with:
|
||||
1. Choose your compose file and rename it to `docker-compose.yml`
|
||||
2. If using the override file, rename it to `docker-compose.override.yml` and
|
||||
edit your values
|
||||
3. Start the server:
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
> **Note:** Don't forget to modify and adjust the compose file to your needs.
|
||||
See the [generic deployment guide](generic.mdx) for more deployment options.
|
||||
|
||||
### Use Traefik as Proxy
|
||||
### Building Custom Images
|
||||
|
||||
As a container user, you probably know about Traefik. It is an easy-to-use
|
||||
reverse proxy for making containerized apps and services available through the
|
||||
web. With the Traefik-related docker-compose files provided above, it is equally easy
|
||||
to deploy and use Continuwuity, with a small caveat. If you have already looked at
|
||||
the files, you should have seen the `well-known` service, which is the
|
||||
small caveat. Traefik is simply a proxy and load balancer and cannot
|
||||
serve any kind of content. For Continuwuity to federate, we need to either
|
||||
expose ports `443` and `8448` or serve two endpoints: `.well-known/matrix/client`
|
||||
and `.well-known/matrix/server`.
|
||||
|
||||
With the service `well-known`, we use a single `nginx` container that serves
|
||||
those two files.
|
||||
|
||||
Alternatively, you can use Continuwuity's built-in delegation file capability. Set up the delegation files in the configuration file, and then proxy paths under `/.well-known/matrix` to continuwuity. For example, the label ``traefik.http.routers.continuwuity.rule=(Host(`matrix.ellis.link`) || (Host(`ellis.link`) && PathPrefix(`/.well-known/matrix`)))`` does this for the domain `ellis.link`.
|
||||
For information on building your own Continuwuity Docker images, see the
|
||||
[Building Docker Images](../development/index.mdx#building-docker-images)
|
||||
section in the development documentation.
|
||||
|
||||
## Voice communication
|
||||
|
||||
See the [TURN](../turn.md) page.
|
||||
See the [Calls](../calls.mdx) page.
|
||||
|
|
|
|||
|
|
@ -1,5 +1,7 @@
|
|||
# Continuwuity for FreeBSD
|
||||
|
||||
Continuwuity currently does not provide FreeBSD builds or FreeBSD packaging. However, Continuwuity does build and work on FreeBSD using the system-provided RocksDB.
|
||||
Continuwuity doesn't provide official FreeBSD packages; however, a community-maintained set of packages is available on [Forgejo](https://forgejo.ellis.link/katie/continuwuity-bsd). Note that these are provided as standalone packages and are not part of a FreeBSD package repository (yet), so updates need to be downloaded and installed manually.
|
||||
|
||||
Contributions to get Continuwuity packaged for FreeBSD are welcome.
|
||||
Please see the installation instructions in that repository. Direct any questions to its issue tracker or to [@katie:kat5.dev](https://matrix.to/#/@katie:kat5.dev).
|
||||
|
||||
For general BSD support, please join our [Continuwuity BSD](https://matrix.to/#/%23bsd:continuwuity.org) community room.
|
||||
|
|
|
|||
|
|
@ -56,6 +56,8 @@ If wanting to build using standard Rust toolchains, make sure you install:
|
|||
|
||||
You can build Continuwuity using `cargo build --release`.
|
||||
|
||||
Continuwuity supports various optional features that can be enabled during compilation. Please see the Cargo.toml file for a comprehensive list, or ask in our rooms.
|
||||
|
||||
### Building with Nix
|
||||
|
||||
If you prefer, you can use Nix (or [Lix](https://lix.systems)) to build Continuwuity. This provides improved reproducibility and makes it easy to set up a build environment and generate output. This approach also allows for easy cross-compilation.
|
||||
|
|
@ -269,7 +271,7 @@ curl https://your.server.name:8448/_matrix/federation/v1/version
|
|||
```
|
||||
|
||||
- To check if your server can communicate with other homeservers, use the
|
||||
[Matrix Federation Tester](https://federationtester.matrix.org/). If you can
|
||||
[Matrix Federation Tester](https://federationtester.mtrnord.blog/). If you can
|
||||
register but cannot join federated rooms, check your configuration and verify
|
||||
that port 8448 is open and forwarded correctly.
|
||||
|
||||
|
|
@ -277,7 +279,7 @@ that port 8448 is open and forwarded correctly.
|
|||
|
||||
## Audio/Video calls
|
||||
|
||||
For Audio/Video call functionality see the [TURN Guide](../turn.md).
|
||||
For Audio/Video call functionality see the [Calls](../calls.md) page.
|
||||
|
||||
## Appservices
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,110 @@
|
|||
# Continuwuity for Kubernetes
|
||||
|
||||
Continuwuity doesn't support horizontal scalability or distributed loading
|
||||
natively. However, [a community-maintained Helm Chart is available here to run
|
||||
natively. However, a deployment in Kubernetes is very similar to the docker
|
||||
setup. This is because Continuwuity can be fully configured using environment
|
||||
variables. A sample StatefulSet is shared below. The only thing missing is
|
||||
a PVC definition (named `continuwuity-data`) for the volume mounted to
|
||||
the StatefulSet, an Ingress resources to point your webserver to the
|
||||
Continuwuity Pods, and a Service resource (targeting `app.kubernetes.io/name: continuwuity`)
|
||||
to glue the Ingress and Pod together.
|
||||
|
||||
Carefully go through the `env` section and add, change, and remove any env vars you like using the [Configuration reference](https://continuwuity.org/reference/config.html)
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: continuwuity
|
||||
namespace: matrix
|
||||
labels:
|
||||
app.kubernetes.io/name: continuwuity
|
||||
spec:
|
||||
replicas: 1
|
||||
serviceName: continuwuity
|
||||
podManagementPolicy: Parallel
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: continuwuity
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: continuwuity
|
||||
spec:
|
||||
securityContext:
|
||||
sysctls:
|
||||
- name: net.ipv4.ip_unprivileged_port_start
|
||||
value: "0"
|
||||
containers:
|
||||
- name: continuwuity
|
||||
# use a sha hash <3
|
||||
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||
command: ["/sbin/conduwuit"]
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
volumeMounts:
|
||||
- mountPath: /data
|
||||
name: data
|
||||
subPath: data
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
env:
|
||||
- name: TOKIO_WORKER_THREADS
|
||||
value: "2"
|
||||
- name: CONTINUWUITY_SERVER_NAME
|
||||
value: "example.com"
|
||||
- name: CONTINUWUITY_DATABASE_PATH
|
||||
value: "/data/db"
|
||||
- name: CONTINUWUITY_DATABASE_BACKEND
|
||||
value: "rocksdb"
|
||||
- name: CONTINUWUITY_PORT
|
||||
value: "80"
|
||||
- name: CONTINUWUITY_MAX_REQUEST_SIZE
|
||||
value: "20000000"
|
||||
- name: CONTINUWUITY_ALLOW_FEDERATION
|
||||
value: "true"
|
||||
- name: CONTINUWUITY_TRUSTED_SERVERS
|
||||
value: '["matrix.org"]'
|
||||
- name: CONTINUWUITY_ADDRESS
|
||||
value: "0.0.0.0"
|
||||
- name: CONTINUWUITY_ROCKSDB_PARALLELISM_THREADS
|
||||
value: "1"
|
||||
- name: CONTINUWUITY_WELL_KNOWN__SERVER
|
||||
value: "matrix.example.com:443"
|
||||
- name: CONTINUWUITY_WELL_KNOWN__CLIENT
|
||||
value: "https://matrix.example.com"
|
||||
- name: CONTINUWUITY_ALLOW_REGISTRATION
|
||||
value: "false"
|
||||
- name: RUST_LOG
|
||||
value: info
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /_matrix/federation/v1/version
|
||||
port: http
|
||||
periodSeconds: 4
|
||||
failureThreshold: 5
|
||||
resources:
|
||||
# Continuwuity might use quite some RAM :3
|
||||
requests:
|
||||
cpu: "2"
|
||||
memory: "512Mi"
|
||||
limits:
|
||||
cpu: "4"
|
||||
memory: "2048Mi"
|
||||
volumes:
|
||||
- name: data
|
||||
persistentVolumeClaim:
|
||||
claimName: continuwuity-data
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
Apart from manually configuring the containers,
|
||||
[a community-maintained Helm Chart is available here to run
|
||||
conduwuit on Kubernetes](https://gitlab.cronce.io/charts/conduwuit)
|
||||
|
||||
This should be compatible with Continuwuity, but you will need to change the image reference.
|
||||
|
|
|
|||
|
|
@ -2,7 +2,8 @@
|
|||
|
||||
Information about developing the project. If you are only interested in using
|
||||
it, you can safely ignore this page. If you plan on contributing, see the
|
||||
[contributor's guide](./contributing.mdx) and [code style guide](./code_style.mdx).
|
||||
[contributor's guide](./contributing.mdx) and
|
||||
[code style guide](./code_style.mdx).
|
||||
|
||||
## Continuwuity project layout
|
||||
|
||||
|
|
@ -12,86 +13,98 @@ members are under `src/`. The workspace definition is at the top level / root
|
|||
`Cargo.toml`.
|
||||
|
||||
The crate names are generally self-explanatory:
|
||||
|
||||
- `admin` is the admin room
|
||||
- `api` is the HTTP API, Matrix C-S and S-S endpoints, etc
|
||||
- `core` is core Continuwuity functionality like config loading, error definitions,
|
||||
global utilities, logging infrastructure, etc
|
||||
- `database` is RocksDB methods, helpers, RocksDB config, and general database definitions,
|
||||
utilities, or functions
|
||||
- `macros` are Continuwuity Rust [macros][macros] like general helper macros, logging
|
||||
and error handling macros, and [syn][syn] and [procedural macros][proc-macro]
|
||||
used for admin room commands and others
|
||||
- `core` is core Continuwuity functionality like config loading, error
|
||||
definitions, global utilities, logging infrastructure, etc
|
||||
- `database` is RocksDB methods, helpers, RocksDB config, and general database
|
||||
definitions, utilities, or functions
|
||||
- `macros` are Continuwuity Rust [macros][macros] like general helper macros,
|
||||
logging and error handling macros, and [syn][syn] and [procedural
|
||||
macros][proc-macro] used for admin room commands and others
|
||||
- `main` is the "primary" sub-crate. This is where the `main()` function lives,
|
||||
tokio worker and async initialisation, Sentry initialisation, [clap][clap] init,
|
||||
and signal handling. If you are adding new [Rust features][features], they *must*
|
||||
go here.
|
||||
- `router` is the webserver and request handling bits, using axum, tower, tower-http,
|
||||
hyper, etc, and the [global server state][state] to access `services`.
|
||||
tokio worker and async initialisation, Sentry initialisation, [clap][clap]
|
||||
init, and signal handling. If you are adding new [Rust features][features],
|
||||
they _must_ go here.
|
||||
- `router` is the webserver and request handling bits, using axum, tower,
|
||||
tower-http, hyper, etc, and the [global server state][state] to access
|
||||
`services`.
|
||||
- `service` is the high-level database definitions and functions for data,
|
||||
outbound/sending code, and other business logic such as media fetching.
|
||||
outbound/sending code, and other business logic such as media fetching.
|
||||
|
||||
It is highly unlikely you will ever need to add a new workspace member, but
|
||||
if you truly find yourself needing to, we recommend reaching out to us in
|
||||
the Matrix room for discussions about it beforehand.
|
||||
It is highly unlikely you will ever need to add a new workspace member, but if
|
||||
you truly find yourself needing to, we recommend reaching out to us in the
|
||||
Matrix room for discussions about it beforehand.
|
||||
|
||||
The primary inspiration for this design was apart of hot reloadable development,
|
||||
to support "Continuwuity as a library" where specific parts can simply be swapped out.
|
||||
There is evidence Conduit wanted to go this route too as `axum` is technically an
|
||||
optional feature in Conduit, and can be compiled without the binary or axum library
|
||||
for handling inbound web requests; but it was never completed or worked.
|
||||
to support "Continuwuity as a library" where specific parts can simply be
|
||||
swapped out. There is evidence Conduit wanted to go this route too as `axum` is
|
||||
technically an optional feature in Conduit, and can be compiled without the
|
||||
binary or axum library for handling inbound web requests; but it was never
|
||||
completed or worked.
|
||||
|
||||
See the Rust documentation on [Workspaces][workspaces] for general questions
|
||||
and information on Cargo workspaces.
|
||||
See the Rust documentation on [Workspaces][workspaces] for general questions and
|
||||
information on Cargo workspaces.
|
||||
|
||||
## Adding compile-time [features][features]
|
||||
|
||||
If you'd like to add a compile-time feature, you must first define it in
|
||||
the `main` workspace crate located in `src/main/Cargo.toml`. The feature must
|
||||
enable a feature in the other workspace crate(s) you intend to use it in. Then
|
||||
the said workspace crate(s) must define the feature there in its `Cargo.toml`.
|
||||
If you'd like to add a compile-time feature, you must first define it in the
|
||||
`main` workspace crate located in `src/main/Cargo.toml`. The feature must enable
|
||||
a feature in the other workspace crate(s) you intend to use it in. Then the said
|
||||
workspace crate(s) must define the feature there in its `Cargo.toml`.
|
||||
|
||||
So, if this is adding a feature to the API such as `woof`, you define the feature
|
||||
in the `api` crate's `Cargo.toml` as `woof = []`. The feature definition in `main`'s
|
||||
`Cargo.toml` will be `woof = ["conduwuit-api/woof"]`.
|
||||
So, if this is adding a feature to the API such as `woof`, you define the
|
||||
feature in the `api` crate's `Cargo.toml` as `woof = []`. The feature definition
|
||||
in `main`'s `Cargo.toml` will be `woof = ["conduwuit-api/woof"]`.
|
||||
|
||||
The rationale for this is due to Rust / Cargo not supporting
|
||||
["workspace level features"][9], we must make a choice of; either scattering
|
||||
features all over the workspace crates, making it difficult for anyone to add
|
||||
or remove default features; or define all the features in one central workspace
|
||||
crate that propagate down/up to the other workspace crates. It is a Cargo pitfall,
|
||||
and we'd like to see better developer UX in Rust's Workspaces.
|
||||
The rationale for this is due to Rust / Cargo not supporting ["workspace level
|
||||
features"][9], we must make a choice of; either scattering features all over the
|
||||
workspace crates, making it difficult for anyone to add or remove default
|
||||
features; or define all the features in one central workspace crate that
|
||||
propagate down/up to the other workspace crates. It is a Cargo pitfall, and we'd
|
||||
like to see better developer UX in Rust's Workspaces.
|
||||
|
||||
Additionally, the definition of one single place makes "feature collection" in our
|
||||
Nix flake a million times easier instead of collecting and deduping them all from
|
||||
searching in all the workspace crates' `Cargo.toml`s. Though we wouldn't need to
|
||||
do this if Rust supported workspace-level features to begin with.
|
||||
Additionally, the definition of one single place makes "feature collection" in
|
||||
our Nix flake a million times easier instead of collecting and deduping them all
|
||||
from searching in all the workspace crates' `Cargo.toml`s. Though we wouldn't
|
||||
need to do this if Rust supported workspace-level features to begin with.
|
||||
|
||||
## List of forked dependencies
|
||||
|
||||
During Continuwuity (and prior projects) development, we have had to fork some dependencies to support our use-cases.
|
||||
These forks exist for various reasons including features that upstream projects won't accept,
|
||||
faster-paced development, Continuwuity-specific usecases, or lack of time to upstream changes.
|
||||
During Continuwuity (and prior projects) development, we have had to fork some
|
||||
dependencies to support our use-cases. These forks exist for various reasons
|
||||
including features that upstream projects won't accept, faster-paced
|
||||
development, Continuwuity-specific usecases, or lack of time to upstream
|
||||
changes.
|
||||
|
||||
All forked dependencies are maintained under the [continuwuation organization on Forgejo](https://forgejo.ellis.link/continuwuation):
|
||||
All forked dependencies are maintained under the
|
||||
[continuwuation organization on Forgejo](https://forgejo.ellis.link/continuwuation):
|
||||
|
||||
- [ruwuma][continuwuation-ruwuma] - Fork of [ruma/ruma][ruma] with various performance improvements, more features and better client/server interop
|
||||
- [rocksdb][continuwuation-rocksdb] - Fork of [facebook/rocksdb][rocksdb] via [`@zaidoon1`][8] with liburing build fixes and GCC debug build fixes
|
||||
- [jemallocator][continuwuation-jemallocator] - Fork of [tikv/jemallocator][jemallocator] fixing musl builds, suspicious code,
|
||||
and adding support for redzones in Valgrind
|
||||
- [rustyline-async][continuwuation-rustyline-async] - Fork of [zyansheep/rustyline-async][rustyline-async] with tab completion callback
|
||||
and `CTRL+\` signal quit event for Continuwuity console CLI
|
||||
- [rust-rocksdb][continuwuation-rust-rocksdb] - Fork of [rust-rocksdb/rust-rocksdb][rust-rocksdb] fixing musl build issues,
|
||||
removing unnecessary `gtest` include, and using our RocksDB and jemallocator forks
|
||||
- [tracing][continuwuation-tracing] - Fork of [tokio-rs/tracing][tracing] implementing `Clone` for `EnvFilter` to
|
||||
support dynamically changing tracing environments
|
||||
- [ruwuma][continuwuation-ruwuma] - Fork of [ruma/ruma][ruma] with various
|
||||
performance improvements, more features and better client/server interop
|
||||
- [rocksdb][continuwuation-rocksdb] - Fork of [facebook/rocksdb][rocksdb] via
|
||||
[`@zaidoon1`][8] with liburing build fixes and GCC debug build fixes
|
||||
- [jemallocator][continuwuation-jemallocator] - Fork of
|
||||
[tikv/jemallocator][jemallocator] fixing musl builds, suspicious code, and
|
||||
adding support for redzones in Valgrind
|
||||
- [rustyline-async][continuwuation-rustyline-async] - Fork of
|
||||
[zyansheep/rustyline-async][rustyline-async] with tab completion callback and
|
||||
`CTRL+\` signal quit event for Continuwuity console CLI
|
||||
- [rust-rocksdb][continuwuation-rust-rocksdb] - Fork of
|
||||
[rust-rocksdb/rust-rocksdb][rust-rocksdb] fixing musl build issues, removing
|
||||
unnecessary `gtest` include, and using our RocksDB and jemallocator forks
|
||||
- [tracing][continuwuation-tracing] - Fork of [tokio-rs/tracing][tracing]
|
||||
implementing `Clone` for `EnvFilter` to support dynamically changing tracing
|
||||
environments
|
||||
|
||||
## Debugging with `tokio-console`
|
||||
|
||||
[`tokio-console`][7] can be a useful tool for debugging and profiling. To make a
|
||||
`tokio-console`-enabled build of Continuwuity, enable the `tokio_console` feature,
|
||||
disable the default `release_max_log_level` feature, and set the `--cfg
|
||||
tokio_unstable` flag to enable experimental tokio APIs. A build might look like
|
||||
this:
|
||||
`tokio-console`-enabled build of Continuwuity, enable the `tokio_console`
|
||||
feature, disable the default `release_max_log_level` feature, and set the
|
||||
`--cfg tokio_unstable` flag to enable experimental tokio APIs. A build might
|
||||
look like this:
|
||||
|
||||
```bash
|
||||
RUSTFLAGS="--cfg tokio_unstable" cargo +nightly build \
|
||||
|
|
@ -100,34 +113,84 @@ RUSTFLAGS="--cfg tokio_unstable" cargo +nightly build \
|
|||
--features=systemd,element_hacks,gzip_compression,brotli_compression,zstd_compression,tokio_console
|
||||
```
|
||||
|
||||
You will also need to enable the `tokio_console` config option in Continuwuity when
|
||||
starting it. This was due to tokio-console causing gradual memory leak/usage
|
||||
if left enabled.
|
||||
You will also need to enable the `tokio_console` config option in Continuwuity
|
||||
when starting it. This was due to tokio-console causing gradual memory
|
||||
leak/usage if left enabled.
|
||||
|
||||
## Building Docker Images
|
||||
|
||||
To build a Docker image for Continuwuity, use the standard Docker build command:
|
||||
Official Continuwuity images are built using **Docker Buildx** and the
|
||||
Dockerfile found at [`docker/Dockerfile`][dockerfile-path].
|
||||
|
||||
The images are compatible with Docker and other container runtimes like Podman
|
||||
or containerd.
|
||||
|
||||
The images _do not contain a shell_. They contain only the Continuwuity binary,
|
||||
required libraries, TLS certificates, and metadata.
|
||||
|
||||
<details>
|
||||
<summary>Click to view the Dockerfile</summary>
|
||||
|
||||
You can also
|
||||
|
||||
<a
|
||||
href="<https://forgejo.ellis.link/continuwuation/continuwuation/src/branch/main/docker/Dockerfile>"
|
||||
target="_blank"
|
||||
>
|
||||
view the Dockerfile on Forgejo
|
||||
</a>
|
||||
.
|
||||
|
||||
```dockerfile file="../../docker/Dockerfile"
|
||||
|
||||
```bash
|
||||
docker build -f docker/Dockerfile .
|
||||
```
|
||||
|
||||
The image can be cross-compiled for different architectures.
|
||||
</details>
|
||||
|
||||
### Building Locally
|
||||
|
||||
To build an image locally using Docker Buildx:
|
||||
|
||||
```bash
|
||||
# Build for the current platform and load into the local Docker daemon
|
||||
docker buildx build --load --tag continuwuity:latest -f docker/Dockerfile .
|
||||
|
||||
# Example: Build for specific platforms and push to a registry
|
||||
# docker buildx build --platform linux/amd64,linux/arm64 --tag registry.io/org/continuwuity:latest -f docker/Dockerfile . --push
|
||||
|
||||
# Example: Build binary optimised for the current CPU (standard release profile)
|
||||
# docker buildx build --load \
|
||||
# --tag continuwuity:latest \
|
||||
# --build-arg TARGET_CPU=native \
|
||||
# -f docker/Dockerfile .
|
||||
|
||||
# Example: Build maxperf variant (release-max-perf profile with LTO)
|
||||
# docker buildx build --load \
|
||||
# --tag continuwuity:latest-maxperf \
|
||||
# --build-arg TARGET_CPU=native \
|
||||
# --build-arg RUST_PROFILE=release-max-perf \
|
||||
# -f docker/Dockerfile .
|
||||
```
|
||||
|
||||
Refer to the Docker Buildx documentation for more advanced build options.
|
||||
|
||||
[dockerfile-path]:
|
||||
https://forgejo.ellis.link/continuwuation/continuwuation/src/branch/main/docker/Dockerfile
|
||||
[continuwuation-ruwuma]: https://forgejo.ellis.link/continuwuation/ruwuma
|
||||
[continuwuation-rocksdb]: https://forgejo.ellis.link/continuwuation/rocksdb
|
||||
[continuwuation-jemallocator]: https://forgejo.ellis.link/continuwuation/jemallocator
|
||||
[continuwuation-rustyline-async]: https://forgejo.ellis.link/continuwuation/rustyline-async
|
||||
[continuwuation-rust-rocksdb]: https://forgejo.ellis.link/continuwuation/rust-rocksdb
|
||||
[continuwuation-jemallocator]:
|
||||
https://forgejo.ellis.link/continuwuation/jemallocator
|
||||
[continuwuation-rustyline-async]:
|
||||
https://forgejo.ellis.link/continuwuation/rustyline-async
|
||||
[continuwuation-rust-rocksdb]:
|
||||
https://forgejo.ellis.link/continuwuation/rust-rocksdb
|
||||
[continuwuation-tracing]: https://forgejo.ellis.link/continuwuation/tracing
|
||||
|
||||
[ruma]: https://github.com/ruma/ruma/
|
||||
[rocksdb]: https://github.com/facebook/rocksdb/
|
||||
[jemallocator]: https://github.com/tikv/jemallocator/
|
||||
[rustyline-async]: https://github.com/zyansheep/rustyline-async/
|
||||
[rust-rocksdb]: https://github.com/rust-rocksdb/rust-rocksdb/
|
||||
[tracing]: https://github.com/tokio-rs/tracing/
|
||||
|
||||
[7]: https://docs.rs/tokio-console/latest/tokio_console/
|
||||
[8]: https://github.com/zaidoon1/
|
||||
[9]: https://github.com/rust-lang/cargo/issues/12162
|
||||
|
|
|
|||
|
|
@ -19,6 +19,16 @@ hero:
|
|||
src: /assets/logo.svg
|
||||
alt: continuwuity logo
|
||||
|
||||
beforeFeatures:
|
||||
- title: Matrix for Discord users
|
||||
details: New to Matrix? Learn how Matrix compares to Discord
|
||||
link: https://joinmatrix.org/guide/matrix-vs-discord/
|
||||
buttonText: Find Out the Difference
|
||||
- title: How Matrix Works
|
||||
details: Learn how Matrix works under the hood, and what that means
|
||||
link: https://matrix.org/docs/matrix-concepts/elements-of-matrix/
|
||||
buttonText: Read the Guide
|
||||
|
||||
features:
|
||||
- title: 🚀 High Performance
|
||||
details: Built with Rust for exceptional speed and efficiency. Designed to run smoothly even on modest hardware.
|
||||
|
|
|
|||
|
|
@ -51,7 +51,13 @@ continuwuity aims to:
|
|||
|
||||
Check out the [documentation](https://continuwuity.org) for installation instructions.
|
||||
|
||||
There are currently no open registration continuwuity instances available.
|
||||
If you want to try it out as a user, we have some partnered homeservers you can use:
|
||||
* You can head over to [https://federated.nexus](https://federated.nexus/) in your browser.
|
||||
* Hit the `Apply to Join` button. Once your request has been accepted, you will receive an email with your username and password.
|
||||
* Head over to [https://app.federated.nexus](https://app.federated.nexus/) and you can sign in there, or use any other matrix chat client you wish elsewhere.
|
||||
* Your username for matrix will be in the form of `@username:federated.nexus`, however you can simply use the `username` part to log in. Your password is your password.
|
||||
|
||||
* There's also [https://continuwuity.rocks/](https://continuwuity.rocks/). You can register a new account using Cinny via [this convenient link](https://app.cinny.in/register/continuwuity.rocks), or you can use Element or another matrix client *that supports registration*.
|
||||
|
||||
## What are we working on?
|
||||
|
||||
|
|
|
|||
226
docs/plans/2026-03-17-space-permission-cascading-design.md
Normal file
226
docs/plans/2026-03-17-space-permission-cascading-design.md
Normal file
|
|
@ -0,0 +1,226 @@
|
|||
# Space Permission Cascading — Design Document
|
||||
|
||||
**Date:** 2026-03-17
|
||||
**Status:** Approved
|
||||
|
||||
## Overview
|
||||
|
||||
Server-side feature that allows user rights in a Space to cascade down to its
|
||||
direct child rooms. Includes power level cascading and role-based room access
|
||||
control. Enabled via a server-wide configuration flag, disabled by default.
|
||||
|
||||
## Requirements
|
||||
|
||||
1. Power levels defined in a Space cascade to all direct child rooms (Space
|
||||
always wins over per-room overrides).
|
||||
2. Admins can define custom roles in a Space and assign them to users.
|
||||
3. Child rooms can require one or more roles for access.
|
||||
4. Enforcement is continuous — role revocation auto-kicks users from rooms they
|
||||
no longer qualify for.
|
||||
5. Users are auto-joined to all qualifying child rooms when they join a Space or
|
||||
receive a new role.
|
||||
6. Cascading applies to direct parent Space only; no nested cascade through
|
||||
sub-spaces.
|
||||
7. Feature is toggled by a single server-wide config flag
|
||||
(`space_permission_cascading`), off by default.
|
||||
|
||||
## Configuration
|
||||
|
||||
```toml
|
||||
# conduwuit-example.toml
|
||||
|
||||
# Enable space permission cascading (power levels and role-based access).
|
||||
# When enabled, power levels cascade from Spaces to child rooms and rooms
|
||||
# can require roles for access. Applies to all Spaces on this server.
|
||||
# Default: false
|
||||
space_permission_cascading = false
|
||||
```
|
||||
|
||||
## Custom State Events
|
||||
|
||||
All events live in the Space room.
|
||||
|
||||
### `m.space.roles` (state key: `""`)
|
||||
|
||||
Defines the available roles for the Space. Two default roles (`admin` and `mod`)
|
||||
are created automatically when a Space is first encountered with the feature
|
||||
enabled.
|
||||
|
||||
```json
|
||||
{
|
||||
"roles": {
|
||||
"admin": {
|
||||
"description": "Space administrator",
|
||||
"power_level": 100
|
||||
},
|
||||
"mod": {
|
||||
"description": "Space moderator",
|
||||
"power_level": 50
|
||||
},
|
||||
"nsfw": {
|
||||
"description": "Access to NSFW content"
|
||||
},
|
||||
"vip": {
|
||||
"description": "VIP member"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- `description` (string, required): Human-readable description.
|
||||
- `power_level` (integer, optional): If present, users with this role receive
|
||||
this power level in all child rooms. When a user holds multiple roles with
|
||||
power levels, the highest value wins.
|
||||
|
||||
### `m.space.role.member` (state key: user ID)
|
||||
|
||||
Assigns roles to a user within the Space.
|
||||
|
||||
```json
|
||||
{
|
||||
"roles": ["nsfw", "vip"]
|
||||
}
|
||||
```
|
||||
|
||||
### `m.space.role.room` (state key: room ID)
|
||||
|
||||
Declares which roles a child room requires. A user must hold **all** listed
|
||||
roles to access the room.
|
||||
|
||||
```json
|
||||
{
|
||||
"required_roles": ["nsfw"]
|
||||
}
|
||||
```
|
||||
|
||||
## Enforcement Rules
|
||||
|
||||
All enforcement is skipped when `space_permission_cascading = false`.
|
||||
|
||||
### 1. Join gating
|
||||
|
||||
When a user attempts to join a room that is a direct child of a Space:
|
||||
|
||||
- Look up the room's `m.space.role.room` event in the parent Space.
|
||||
- If the room has `required_roles`, check the user's `m.space.role.member`.
|
||||
- Reject the join if the user is missing any required role.
|
||||
|
||||
### 2. Power level override
|
||||
|
||||
For every user in a child room of a Space:
|
||||
|
||||
- Look up their roles via `m.space.role.member` in the parent Space.
|
||||
- For each role that has a `power_level`, take the highest value.
|
||||
- Override the user's power level in the child room's `m.room.power_levels`.
|
||||
- Reject attempts to manually set per-room power levels that conflict with
|
||||
Space-granted levels.
|
||||
|
||||
### 3. Role revocation
|
||||
|
||||
When an `m.space.role.member` event is updated and a role is removed:
|
||||
|
||||
- Identify all child rooms that require the removed role.
|
||||
- Auto-kick the user from rooms they no longer qualify for.
|
||||
- Recalculate and update the user's power level in all child rooms.
|
||||
|
||||
### 4. Room requirement change
|
||||
|
||||
When an `m.space.role.room` event is updated with new requirements:
|
||||
|
||||
- Check all current members of the room.
|
||||
- Auto-kick members who do not hold all newly required roles.
|
||||
|
||||
### 5. Auto-join on role grant
|
||||
|
||||
When an `m.space.role.member` event is updated and a role is added:
|
||||
|
||||
- Find all child rooms where the user now meets all required roles.
|
||||
- Auto-join the user to qualifying rooms they are not already in.
|
||||
|
||||
This also applies when a user first joins the Space — they are auto-joined to
|
||||
all child rooms they qualify for. Rooms with no role requirements auto-join all
|
||||
Space members.
|
||||
|
||||
### 6. New child room
|
||||
|
||||
When a new `m.space.child` event is added to a Space:
|
||||
|
||||
- Auto-join all qualifying Space members to the new child room.
|
||||
|
||||
## Caching & Indexing
|
||||
|
||||
The source of truth is always the state events. The server maintains an
|
||||
in-memory index for fast enforcement lookups, following the same patterns as the
|
||||
existing `roomid_spacehierarchy_cache`.
|
||||
|
||||
### Index structures
|
||||
|
||||
| Index | Source event |
|
||||
|------------------------------|------------------------|
|
||||
| Space → roles defined | `m.space.roles` |
|
||||
| Space → user → roles | `m.space.role.member` |
|
||||
| Space → room → required roles| `m.space.role.room` |
|
||||
| Room → parent Space | `m.space.child` (reverse lookup) |
|
||||
|
||||
The Space → child rooms mapping already exists.
|
||||
|
||||
### Cache invalidation triggers
|
||||
|
||||
| Event changed | Action |
|
||||
|----------------------------|-----------------------------------------------------|
|
||||
| `m.space.roles` | Refresh role definitions, revalidate all members |
|
||||
| `m.space.role.member` | Refresh user's roles, trigger auto-join/kick |
|
||||
| `m.space.role.room` | Refresh room requirements, trigger auto-join/kick |
|
||||
| `m.space.child` added | Index new child, auto-join qualifying members |
|
||||
| `m.space.child` removed | Remove from index (no auto-kick) |
|
||||
| Server startup | Full rebuild from state events |
|
||||
|
||||
## Admin Room Commands
|
||||
|
||||
Roles are managed via the existing admin room interface, which sends the
|
||||
appropriate state events under the hood and triggers enforcement.
|
||||
|
||||
```
|
||||
!admin space roles list <space>
|
||||
!admin space roles add <space> <role_name> [description] [power_level]
|
||||
!admin space roles remove <space> <role_name>
|
||||
!admin space roles assign <space> <user_id> <role_name>
|
||||
!admin space roles revoke <space> <user_id> <role_name>
|
||||
!admin space roles require <space> <room_id> <role_name>
|
||||
!admin space roles unrequire <space> <room_id> <role_name>
|
||||
!admin space roles user <space> <user_id>
|
||||
!admin space roles room <space> <room_id>
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
**Approach:** Hybrid — state events for definition, database cache for
|
||||
enforcement.
|
||||
|
||||
- State events are the source of truth and federate normally.
|
||||
- The server maintains an in-memory cache/index for fast enforcement.
|
||||
- Cache is invalidated on relevant state event changes and fully rebuilt on
|
||||
startup.
|
||||
- All enforcement hooks (join gating, PL override, auto-join, auto-kick) check
|
||||
the feature flag first and no-op when disabled.
|
||||
- Existing clients can manage roles via Developer Tools (custom state events).
|
||||
The admin room commands provide a user-friendly interface.
|
||||
|
||||
## Scope
|
||||
|
||||
### In scope
|
||||
|
||||
- Server-wide feature flag
|
||||
- Custom state events for role definition, assignment, and room requirements
|
||||
- Power level cascading (Space always wins)
|
||||
- Continuous enforcement (auto-join, auto-kick)
|
||||
- Admin room commands
|
||||
- In-memory caching with invalidation
|
||||
- Default `admin` (PL 100) and `mod` (PL 50) roles
|
||||
|
||||
### Out of scope
|
||||
|
||||
- Client-side UI for role management
|
||||
- Nested cascade through sub-spaces
|
||||
- Per-space opt-in/opt-out (it is server-wide)
|
||||
- Federation-specific logic beyond normal state event replication
|
||||
1206
docs/plans/2026-03-17-space-permission-cascading.md
Normal file
1206
docs/plans/2026-03-17-space-permission-cascading.md
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -6,10 +6,10 @@
|
|||
"message": "Welcome to Continuwuity! Important announcements about the project will appear here."
|
||||
},
|
||||
{
|
||||
"id": 8,
|
||||
"id": 10,
|
||||
"mention_room": false,
|
||||
"date": "2026-01-12",
|
||||
"message": "Hey everyone!\n\nJust letting you know we've released [v0.5.3](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.3) - this one is a bit of a hotfix for an issue with inviting and allowing others to join rooms.\n\nIf you appreceate the round-the-clock work we've been doing to keep your servers secure over this holiday period, we'd really appreciate your support - you can sponsor individuals on our team using the 'sponsor' button at the top of [our GitHub repository](https://github.com/continuwuity/continuwuity). If you can't do that, even a star helps - spreading the word and advocating for our project helps keep it going.\n\nHave a lovely rest of your year \\\n[Jade \\(she/her\\)](https://matrix.to/#/%40jade%3Aellis.link) \n🩵"
|
||||
"date": "2026-03-03",
|
||||
"message": "We've just released [v0.5.6](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.6), which contains a few security improvements - plus significant reliability and performance improvements. Please update as soon as possible. \n\nWe released [v0.5.5](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.5) two weeks ago, but it skipped your admin room straight to [our announcements channel](https://matrix.to/#/!jIdNjSM5X-V5JVx2h2kAhUZIIQ08GyzPL55NFZAH1vM?via=ellis.link&via=gingershaped.computer&via=matrix.org). Make sure you're there to get important information as soon as we announce it! [Our space](https://matrix.to/#/!8cR4g-i9ucof69E4JHNg9LbPVkGprHb3SzcrGBDDJgk?via=continuwuity.org&via=ellis.link&via=matrix.org) has also gained a bunch of new and interesting rooms - be there or be square."
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
{"m.homeserver":{"base_url": "https://matrix.continuwuity.org"},"org.matrix.msc3575.proxy":{"url": "https://matrix.continuwuity.org"},"org.matrix.msc4143.rtc_foci":[{"type":"livekit","livekit_service_url":"https://livekit.ellis.link"}]}
|
||||
{"m.homeserver":{"base_url": "https://matrix.continuwuity.org"},"org.matrix.msc4143.rtc_foci":[{"type":"livekit","livekit_service_url":"https://livekit.ellis.link"}]}
|
||||
|
|
|
|||
|
|
@ -4,6 +4,11 @@
|
|||
"name": "config",
|
||||
"label": "Configuration"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"name": "environment-variables",
|
||||
"label": "Environment Variables"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"name": "admin",
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ default.
|
|||
* Delete all remote and local media from 3 days ago, up until now:
|
||||
|
||||
`!admin media delete-past-remote-media -a 3d
|
||||
-yes-i-want-to-delete-local-media`
|
||||
--yes-i-want-to-delete-local-media`
|
||||
|
||||
## `!admin media delete-all-from-user`
|
||||
|
||||
|
|
@ -36,3 +36,7 @@ Deletes all the local media from a local user on our server. This will always ig
|
|||
## `!admin media delete-all-from-server`
|
||||
|
||||
Deletes all remote media from the specified remote server. This will always ignore errors by default
|
||||
|
||||
## `!admin media delete-url-preview`
|
||||
|
||||
Deletes a cached URL preview, forcing it to be re-fetched. Use --all to purge all cached URL previews
|
||||
|
|
|
|||
|
|
@ -112,6 +112,19 @@ Query the destinations cache
|
|||
|
||||
Query the overrides cache
|
||||
|
||||
### `!admin query resolver flush-cache`
|
||||
|
||||
Flush a given server from the resolver caches or flush them completely
|
||||
|
||||
* Examples:
|
||||
* Flush a specific server:
|
||||
|
||||
`!admin query resolver flush-cache matrix.example.com`
|
||||
|
||||
* Flush all resolver caches completely:
|
||||
|
||||
`!admin query resolver flush-cache --all`
|
||||
|
||||
## `!admin query pusher`
|
||||
|
||||
pusher service
|
||||
|
|
|
|||
281
docs/reference/environment-variables.mdx
Normal file
281
docs/reference/environment-variables.mdx
Normal file
|
|
@ -0,0 +1,281 @@
|
|||
# Environment Variables
|
||||
|
||||
Continuwuity can be configured entirely through environment variables, making it
|
||||
ideal for containerised deployments and infrastructure-as-code scenarios.
|
||||
|
||||
This is a convenience reference and may not be exhaustive. The
|
||||
[Configuration Reference](./config.mdx) is the primary source for all
|
||||
configuration options.
|
||||
|
||||
## Prefix System
|
||||
|
||||
Continuwuity supports three environment variable prefixes for backwards
|
||||
compatibility:
|
||||
|
||||
- `CONTINUWUITY_*` (current, recommended)
|
||||
- `CONDUWUIT_*` (compatibility)
|
||||
- `CONDUIT_*` (legacy)
|
||||
|
||||
All three prefixes work identically. Use double underscores (`__`) to represent
|
||||
nested configuration sections from the TOML config.
|
||||
|
||||
**Examples:**
|
||||
|
||||
```bash
|
||||
# Simple top-level config
|
||||
CONTINUWUITY_SERVER_NAME="matrix.example.com"
|
||||
CONTINUWUITY_PORT="8008"
|
||||
|
||||
# Nested config sections use double underscores
|
||||
# This maps to [database] section in TOML
|
||||
CONTINUWUITY_DATABASE__PATH="/var/lib/continuwuity"
|
||||
|
||||
# This maps to [tls] section in TOML
|
||||
CONTINUWUITY_TLS__CERTS="/path/to/cert.pem"
|
||||
```
|
||||
|
||||
## Configuration File Override
|
||||
|
||||
You can specify a custom configuration file path:
|
||||
|
||||
- `CONTINUWUITY_CONFIG` - Path to continuwuity.toml (current)
|
||||
- `CONDUWUIT_CONFIG` - Path to config file (compatibility)
|
||||
- `CONDUIT_CONFIG` - Path to config file (legacy)
|
||||
|
||||
## Essential Variables
|
||||
|
||||
These are the minimum variables needed for a working deployment:
|
||||
|
||||
| Variable | Description | Default |
|
||||
| ---------------------------- | ---------------------------------- | ---------------------- |
|
||||
| `CONTINUWUITY_SERVER_NAME` | Your Matrix server's domain name | Required |
|
||||
| `CONTINUWUITY_DATABASE_PATH` | Path to RocksDB database directory | `/var/lib/conduwuit` |
|
||||
| `CONTINUWUITY_ADDRESS` | IP address to bind to | `["127.0.0.1", "::1"]` |
|
||||
| `CONTINUWUITY_PORT` | Port to listen on | `8008` |
|
||||
|
||||
## Network Configuration
|
||||
|
||||
| Variable | Description | Default |
|
||||
| -------------------------------- | ----------------------------------------------- | ---------------------- |
|
||||
| `CONTINUWUITY_ADDRESS` | Bind address (use `0.0.0.0` for all interfaces) | `["127.0.0.1", "::1"]` |
|
||||
| `CONTINUWUITY_PORT` | HTTP port | `8008` |
|
||||
| `CONTINUWUITY_UNIX_SOCKET_PATH` | UNIX socket path (alternative to TCP) | - |
|
||||
| `CONTINUWUITY_UNIX_SOCKET_PERMS` | Socket permissions (octal) | `660` |
|
||||
|
||||
## Database Configuration
|
||||
|
||||
| Variable | Description | Default |
|
||||
| ------------------------------------------ | --------------------------- | -------------------- |
|
||||
| `CONTINUWUITY_DATABASE_PATH` | RocksDB data directory | `/var/lib/conduwuit` |
|
||||
| `CONTINUWUITY_DATABASE_BACKUP_PATH` | Backup directory | - |
|
||||
| `CONTINUWUITY_DATABASE_BACKUPS_TO_KEEP` | Number of backups to retain | `1` |
|
||||
| `CONTINUWUITY_DB_CACHE_CAPACITY_MB` | Database read cache (MB) | - |
|
||||
| `CONTINUWUITY_DB_WRITE_BUFFER_CAPACITY_MB` | Write cache (MB) | - |
|
||||
|
||||
## Cache Configuration
|
||||
|
||||
| Variable | Description |
|
||||
| ---------------------------------------- | ------------------------ |
|
||||
| `CONTINUWUITY_CACHE_CAPACITY_MODIFIER` | LRU cache multiplier |
|
||||
| `CONTINUWUITY_PDU_CACHE_CAPACITY` | PDU cache entries |
|
||||
| `CONTINUWUITY_AUTH_CHAIN_CACHE_CAPACITY` | Auth chain cache entries |
|
||||
|
||||
## DNS Configuration
|
||||
|
||||
Configure DNS resolution behaviour for federation and external requests.
|
||||
|
||||
| Variable | Description | Default |
|
||||
| ------------------------------------ | ---------------------------- | -------- |
|
||||
| `CONTINUWUITY_DNS_CACHE_ENTRIES` | Max DNS cache entries | `32768` |
|
||||
| `CONTINUWUITY_DNS_MIN_TTL` | Minimum cache TTL (seconds) | `10800` |
|
||||
| `CONTINUWUITY_DNS_MIN_TTL_NXDOMAIN` | NXDOMAIN cache TTL (seconds) | `259200` |
|
||||
| `CONTINUWUITY_DNS_ATTEMPTS` | Retry attempts | - |
|
||||
| `CONTINUWUITY_DNS_TIMEOUT` | Query timeout (seconds) | - |
|
||||
| `CONTINUWUITY_DNS_TCP_FALLBACK` | Allow TCP fallback | - |
|
||||
| `CONTINUWUITY_QUERY_ALL_NAMESERVERS` | Query all nameservers | - |
|
||||
| `CONTINUWUITY_QUERY_OVER_TCP_ONLY` | TCP-only queries | - |
|
||||
|
||||
## Request Configuration
|
||||
|
||||
| Variable | Description |
|
||||
| ------------------------------------ | ----------------------------- |
|
||||
| `CONTINUWUITY_MAX_REQUEST_SIZE` | Max HTTP request size (bytes) |
|
||||
| `CONTINUWUITY_REQUEST_CONN_TIMEOUT` | Connection timeout (seconds) |
|
||||
| `CONTINUWUITY_REQUEST_TIMEOUT` | Overall request timeout |
|
||||
| `CONTINUWUITY_REQUEST_TOTAL_TIMEOUT` | Total timeout |
|
||||
| `CONTINUWUITY_REQUEST_IDLE_TIMEOUT` | Idle timeout |
|
||||
| `CONTINUWUITY_REQUEST_IDLE_PER_HOST` | Idle connections per host |
|
||||
|
||||
## Federation Configuration
|
||||
|
||||
Control how your server federates with other Matrix servers.
|
||||
|
||||
| Variable | Description | Default |
|
||||
| ---------------------------------------------- | ----------------------------- | ------- |
|
||||
| `CONTINUWUITY_ALLOW_FEDERATION` | Enable federation | `true` |
|
||||
| `CONTINUWUITY_FEDERATION_LOOPBACK` | Allow loopback federation | - |
|
||||
| `CONTINUWUITY_FEDERATION_CONN_TIMEOUT` | Connection timeout | - |
|
||||
| `CONTINUWUITY_FEDERATION_TIMEOUT` | Request timeout | - |
|
||||
| `CONTINUWUITY_FEDERATION_IDLE_TIMEOUT` | Idle timeout | - |
|
||||
| `CONTINUWUITY_FEDERATION_IDLE_PER_HOST` | Idle connections per host | - |
|
||||
| `CONTINUWUITY_TRUSTED_SERVERS` | JSON array of trusted servers | - |
|
||||
| `CONTINUWUITY_QUERY_TRUSTED_KEY_SERVERS_FIRST` | Query trusted first | - |
|
||||
| `CONTINUWUITY_ONLY_QUERY_TRUSTED_KEY_SERVERS` | Only query trusted | - |
|
||||
|
||||
**Example:**
|
||||
|
||||
```bash
|
||||
# Trust matrix.org for key verification
|
||||
CONTINUWUITY_TRUSTED_SERVERS='["matrix.org"]'
|
||||
```
|
||||
|
||||
## Registration & User Configuration
|
||||
|
||||
Control user registration and account creation behaviour.
|
||||
|
||||
| Variable | Description | Default |
|
||||
| ------------------------------------------ | --------------------- | ------- |
|
||||
| `CONTINUWUITY_ALLOW_REGISTRATION` | Enable registration | `true` |
|
||||
| `CONTINUWUITY_REGISTRATION_TOKEN` | Token requirement | - |
|
||||
| `CONTINUWUITY_SUSPEND_ON_REGISTER` | Suspend new accounts | - |
|
||||
| `CONTINUWUITY_NEW_USER_DISPLAYNAME_SUFFIX` | Display name suffix | 🏳️⚧️ |
|
||||
| `CONTINUWUITY_RECAPTCHA_SITE_KEY` | reCAPTCHA site key | - |
|
||||
| `CONTINUWUITY_RECAPTCHA_PRIVATE_SITE_KEY` | reCAPTCHA private key | - |
|
||||
|
||||
**Example:**
|
||||
|
||||
```bash
|
||||
# Disable open registration
|
||||
CONTINUWUITY_ALLOW_REGISTRATION="false"
|
||||
|
||||
# Require a registration token
|
||||
CONTINUWUITY_REGISTRATION_TOKEN="your_secret_token_here"
|
||||
```
|
||||
|
||||
## Feature Configuration
|
||||
|
||||
| Variable | Description | Default |
|
||||
| ---------------------------------------------------------- | -------------------------- | ------- |
|
||||
| `CONTINUWUITY_ALLOW_ENCRYPTION` | Enable E2EE | `true` |
|
||||
| `CONTINUWUITY_ALLOW_ROOM_CREATION` | Enable room creation | - |
|
||||
| `CONTINUWUITY_ALLOW_UNSTABLE_ROOM_VERSIONS` | Allow unstable versions | - |
|
||||
| `CONTINUWUITY_DEFAULT_ROOM_VERSION` | Default room version | `v11` |
|
||||
| `CONTINUWUITY_REQUIRE_AUTH_FOR_PROFILE_REQUESTS` | Auth for profiles | - |
|
||||
| `CONTINUWUITY_ALLOW_PUBLIC_ROOM_DIRECTORY_OVER_FEDERATION` | Federate directory | - |
|
||||
| `CONTINUWUITY_ALLOW_PUBLIC_ROOM_DIRECTORY_WITHOUT_AUTH` | Unauth directory | - |
|
||||
| `CONTINUWUITY_ALLOW_DEVICE_NAME_FEDERATION` | Device names in federation | - |
|
||||
|
||||
## TLS Configuration
|
||||
|
||||
Built-in TLS support is primarily for testing. **For production deployments,
|
||||
especially when federating on the internet, use a reverse proxy** (Traefik,
|
||||
Caddy, nginx) to handle TLS termination.
|
||||
|
||||
| Variable | Description |
|
||||
| --------------------------------- | ------------------------- |
|
||||
| `CONTINUWUITY_TLS__CERTS` | TLS certificate file path |
|
||||
| `CONTINUWUITY_TLS__KEY` | TLS private key path |
|
||||
| `CONTINUWUITY_TLS__DUAL_PROTOCOL` | Support TLS 1.2 + 1.3 |
|
||||
|
||||
**Example (testing only):**
|
||||
|
||||
```bash
|
||||
CONTINUWUITY_TLS__CERTS="/etc/letsencrypt/live/matrix.example.com/fullchain.pem"
|
||||
CONTINUWUITY_TLS__KEY="/etc/letsencrypt/live/matrix.example.com/privkey.pem"
|
||||
```
|
||||
|
||||
## Logging Configuration
|
||||
|
||||
Control log output format and verbosity.
|
||||
|
||||
| Variable | Description | Default |
|
||||
| ------------------------------ | ------------------ | ------- |
|
||||
| `CONTINUWUITY_LOG` | Log filter level | - |
|
||||
| `CONTINUWUITY_LOG_COLORS` | ANSI colours | `true` |
|
||||
| `CONTINUWUITY_LOG_SPAN_EVENTS` | Log span events | `none` |
|
||||
| `CONTINUWUITY_LOG_THREAD_IDS` | Include thread IDs | - |
|
||||
|
||||
**Examples:**
|
||||
|
||||
```bash
|
||||
# Set log level to info
|
||||
CONTINUWUITY_LOG="info"
|
||||
|
||||
# Enable debug logging for specific modules
|
||||
CONTINUWUITY_LOG="warn,continuwuity::api=debug"
|
||||
|
||||
# Disable colours for log aggregation
|
||||
CONTINUWUITY_LOG_COLORS="false"
|
||||
```
|
||||
|
||||
## Observability Configuration
|
||||
|
||||
| Variable | Description |
|
||||
| ---------------------------------------- | --------------------- |
|
||||
| `CONTINUWUITY_ALLOW_OTLP` | Enable OpenTelemetry |
|
||||
| `CONTINUWUITY_OTLP_FILTER` | OTLP filter level |
|
||||
| `CONTINUWUITY_OTLP_PROTOCOL` | Protocol (http/grpc) |
|
||||
| `CONTINUWUITY_TRACING_FLAME` | Enable flame graphs |
|
||||
| `CONTINUWUITY_TRACING_FLAME_FILTER` | Flame graph filter |
|
||||
| `CONTINUWUITY_TRACING_FLAME_OUTPUT_PATH` | Output directory |
|
||||
| `CONTINUWUITY_SENTRY` | Enable Sentry |
|
||||
| `CONTINUWUITY_SENTRY_ENDPOINT` | Sentry DSN |
|
||||
| `CONTINUWUITY_SENTRY_SEND_SERVER_NAME` | Include server name |
|
||||
| `CONTINUWUITY_SENTRY_TRACES_SAMPLE_RATE` | Sample rate (0.0-1.0) |
|
||||
|
||||
## Admin Configuration
|
||||
|
||||
Configure admin users and automated command execution.
|
||||
|
||||
| Variable | Description | Default |
|
||||
| ------------------------------------------ | -------------------------------- | ----------------- |
|
||||
| `CONTINUWUITY_ADMINS_LIST` | JSON array of admin user IDs | - |
|
||||
| `CONTINUWUITY_ADMINS_FROM_ROOM` | Derive admins from room | - |
|
||||
| `CONTINUWUITY_ADMIN_ESCAPE_COMMANDS` | Allow `\` prefix in public rooms | - |
|
||||
| `CONTINUWUITY_ADMIN_CONSOLE_AUTOMATIC` | Auto-activate console | - |
|
||||
| `CONTINUWUITY_ADMIN_EXECUTE` | JSON array of startup commands | - |
|
||||
| `CONTINUWUITY_ADMIN_EXECUTE_ERRORS_IGNORE` | Ignore command errors | - |
|
||||
| `CONTINUWUITY_ADMIN_SIGNAL_EXECUTE` | Commands on SIGUSR2 | - |
|
||||
| `CONTINUWUITY_ADMIN_ROOM_TAG` | Admin room tag | `m.server_notice` |
|
||||
|
||||
**Examples:**
|
||||
|
||||
```bash
|
||||
# Create admin user on startup
|
||||
CONTINUWUITY_ADMIN_EXECUTE='["users create-user admin", "users make-user-admin admin"]'
|
||||
|
||||
# Specify admin users directly
|
||||
CONTINUWUITY_ADMINS_LIST='["@alice:example.com", "@bob:example.com"]'
|
||||
```
|
||||
|
||||
## Media & URL Preview Configuration
|
||||
|
||||
| Variable | Description |
|
||||
| ---------------------------------------------------- | ------------------ |
|
||||
| `CONTINUWUITY_URL_PREVIEW_BOUND_INTERFACE` | Bind interface |
|
||||
| `CONTINUWUITY_URL_PREVIEW_DOMAIN_CONTAINS_ALLOWLIST` | Domain allowlist |
|
||||
| `CONTINUWUITY_URL_PREVIEW_DOMAIN_EXPLICIT_ALLOWLIST` | Explicit allowlist |
|
||||
| `CONTINUWUITY_URL_PREVIEW_DOMAIN_EXPLICIT_DENYLIST` | Explicit denylist |
|
||||
| `CONTINUWUITY_URL_PREVIEW_MAX_SPIDER_SIZE` | Max fetch size |
|
||||
| `CONTINUWUITY_URL_PREVIEW_TIMEOUT` | Fetch timeout |
|
||||
| `CONTINUWUITY_IP_RANGE_DENYLIST` | IP range denylist |
|
||||
|
||||
## Tokio Runtime Configuration
|
||||
|
||||
These can be set as environment variables or CLI arguments:
|
||||
|
||||
| Variable | Description |
|
||||
| ----------------------------------------- | -------------------------- |
|
||||
| `TOKIO_WORKER_THREADS` | Worker thread count |
|
||||
| `TOKIO_GLOBAL_QUEUE_INTERVAL` | Global queue interval |
|
||||
| `TOKIO_EVENT_INTERVAL` | Event interval |
|
||||
| `TOKIO_MAX_IO_EVENTS_PER_TICK` | Max I/O events per tick |
|
||||
| `CONTINUWUITY_RUNTIME_HISTOGRAM_INTERVAL` | Histogram bucket size (μs) |
|
||||
| `CONTINUWUITY_RUNTIME_HISTOGRAM_BUCKETS` | Bucket count |
|
||||
| `CONTINUWUITY_RUNTIME_WORKER_AFFINITY` | Enable worker affinity |
|
||||
|
||||
## See Also
|
||||
|
||||
- [Configuration Reference](./config.mdx) - Complete TOML configuration
|
||||
documentation
|
||||
- [Admin Commands](./admin/) - Admin command reference
|
||||
|
|
@ -1,12 +1,37 @@
|
|||
# Troubleshooting Continuwuity
|
||||
|
||||
> **Docker users ⚠️**
|
||||
>
|
||||
> Docker can be difficult to use and debug. It's common for Docker
|
||||
> misconfigurations to cause issues, particularly with networking and permissions.
|
||||
> Please check that your issues are not due to problems with your Docker setup.
|
||||
:::warning{title="Docker users:"}
|
||||
Docker can be difficult to use and debug. It's common for Docker
|
||||
misconfigurations to cause issues, particularly with networking and permissions.
|
||||
Please check that your issues are not due to problems with your Docker setup.
|
||||
:::
|
||||
|
||||
## Continuwuity and Matrix issues
|
||||
## Continuwuity issues
|
||||
|
||||
### Slow joins to rooms
|
||||
|
||||
Some slowness is to be expected if you're the first person on your homserver to join a room (which will
|
||||
always be the case for single-user homeservers). In this situation, your homeserver has to verify the signatures of
|
||||
all of the state events sent by other servers before your join. To make this process as fast as possible, make sure you have
|
||||
multiple fast, trusted servers listed in `trusted_servers` in your configuration, and ensure
|
||||
`query_trusted_key_servers_first_on_join` is set to true (the default).
|
||||
If you need suggestions for trusted servers, ask in the Continuwuity main room.
|
||||
|
||||
However, _very_ slow joins, especially to rooms with only a few users in them or rooms created by another user
|
||||
on your homeserver, may be caused by [issue !779](https://forgejo.ellis.link/continuwuation/continuwuity/issues/779),
|
||||
which is a longstanding bug with synchronizing room joins to clients. In this situation, you did succeed in joining the room, but
|
||||
the bug caused your homeserver to forget to tell your client. **To fix this, clear your client's cache.** Both Element and Cinny
|
||||
have a button to clear their cache in the "About" section of their settings.
|
||||
|
||||
### Configuration not working as expected
|
||||
|
||||
Sometimes you can make a mistake in your configuration that
|
||||
means things don't get passed to Continuwuity correctly.
|
||||
This is particularly easy to do with environment variables.
|
||||
To check what configuration Continuwuity actually sees, you can
|
||||
use the `!admin server show-config` command in your admin room.
|
||||
Beware that this prints out any secrets in your configuration,
|
||||
so you might want to delete the result afterwards!
|
||||
|
||||
### Lost access to admin room
|
||||
|
||||
|
|
@ -18,7 +43,7 @@ argument once to invite yourslf to the admin room on startup
|
|||
- Or specify the `emergency_password` config option to allow you to temporarily
|
||||
log into the server account (`@conduit`) from a web client
|
||||
|
||||
## General potential issues
|
||||
## DNS issues
|
||||
|
||||
### Potential DNS issues when using Docker
|
||||
|
||||
|
|
|
|||
|
|
@ -1,94 +0,0 @@
|
|||
# Setting up TURN/STURN
|
||||
|
||||
In order to make or receive calls, a TURN server is required. Continuwuity suggests
|
||||
using [Coturn](https://github.com/coturn/coturn) for this purpose, which is also
|
||||
available as a Docker image.
|
||||
|
||||
### Configuration
|
||||
|
||||
Create a configuration file called `coturn.conf` containing:
|
||||
|
||||
```
|
||||
use-auth-secret
|
||||
static-auth-secret=<a secret key>
|
||||
realm=<your server domain>
|
||||
```
|
||||
|
||||
A common way to generate a suitable alphanumeric secret key is by using `pwgen
|
||||
-s 64 1`.
|
||||
|
||||
These same values need to be set in Continuwuity. See the [example
|
||||
config](./reference/config.mdx) in the TURN section for configuring these and
|
||||
restart Continuwuity after.
|
||||
|
||||
`turn_secret` or a path to `turn_secret_file` must have a value of your
|
||||
coturn `static-auth-secret`, or use `turn_username` and `turn_password`
|
||||
if using legacy username:password TURN authentication (not preferred).
|
||||
|
||||
`turn_uris` must be the list of TURN URIs you would like to send to the client.
|
||||
Typically you will just replace the example domain `example.turn.uri` with the
|
||||
`realm` you set from the example config.
|
||||
|
||||
If you are using TURN over TLS, you can replace `turn:` with `turns:` in the
|
||||
`turn_uris` config option to instruct clients to attempt to connect to
|
||||
TURN over TLS. This is highly recommended.
|
||||
|
||||
If you need unauthenticated access to the TURN URIs, or some clients may be
|
||||
having trouble, you can enable `turn_guest_access` in Continuwuity which disables
|
||||
authentication for the TURN URI endpoint `/_matrix/client/v3/voip/turnServer`
|
||||
|
||||
### Run
|
||||
|
||||
Run the [Coturn](https://hub.docker.com/r/coturn/coturn) image using
|
||||
|
||||
```bash
|
||||
docker run -d --network=host -v
|
||||
$(pwd)/coturn.conf:/etc/coturn/turnserver.conf coturn/coturn
|
||||
```
|
||||
|
||||
or docker-compose. For the latter, paste the following section into a file
|
||||
called `docker-compose.yml` and run `docker compose up -d` in the same
|
||||
directory.
|
||||
|
||||
```yml
|
||||
version: 3
|
||||
services:
|
||||
turn:
|
||||
container_name: coturn-server
|
||||
image: docker.io/coturn/coturn
|
||||
restart: unless-stopped
|
||||
network_mode: "host"
|
||||
volumes:
|
||||
- ./coturn.conf:/etc/coturn/turnserver.conf
|
||||
```
|
||||
|
||||
To understand why the host networking mode is used and explore alternative
|
||||
configuration options, please visit [Coturn's Docker
|
||||
documentation](https://github.com/coturn/coturn/blob/master/docker/coturn/README.md).
|
||||
|
||||
For security recommendations see Synapse's [Coturn
|
||||
documentation](https://element-hq.github.io/synapse/latest/turn-howto.html).
|
||||
|
||||
### Testing
|
||||
|
||||
To make sure turn credentials are being correctly served to clients, you can manually make a HTTP request to the turnServer endpoint.
|
||||
|
||||
`curl "https://<matrix.example.com>/_matrix/client/r0/voip/turnServer" -H 'Authorization: Bearer <your_client_token>' | jq`
|
||||
|
||||
You should get a response like this:
|
||||
|
||||
```json
|
||||
{
|
||||
"username": "1752792167:@jade:example.com",
|
||||
"password": "KjlDlawdPbU9mvP4bhdV/2c/h65=",
|
||||
"uris": [
|
||||
"turns:coturn.example.com?transport=udp",
|
||||
"turns:coturn.example.com?transport=tcp",
|
||||
"turn:coturn.example.com?transport=udp",
|
||||
"turn:coturn.example.com?transport=tcp"
|
||||
],
|
||||
"ttl": 86400
|
||||
}
|
||||
```
|
||||
|
||||
You can test these credentials work using [Trickle ICE](https://webrtc.github.io/samples/src/content/peerconnection/trickle-ice/)
|
||||
54
flake.lock
generated
54
flake.lock
generated
|
|
@ -3,11 +3,11 @@
|
|||
"advisory-db": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1766324728,
|
||||
"narHash": "sha256-9C+WyE5U3y5w4WQXxmb0ylRyMMsPyzxielWXSHrcDpE=",
|
||||
"lastModified": 1773786698,
|
||||
"narHash": "sha256-o/J7ZculgwSs1L4H4UFlFZENOXTJzq1X0n71x6oNNvY=",
|
||||
"owner": "rustsec",
|
||||
"repo": "advisory-db",
|
||||
"rev": "c88b88c62bda077be8aa621d4e89d8701e39cb5d",
|
||||
"rev": "99e9de91bb8b61f06ef234ff84e11f758ecd5384",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
|
@ -18,11 +18,11 @@
|
|||
},
|
||||
"crane": {
|
||||
"locked": {
|
||||
"lastModified": 1766194365,
|
||||
"narHash": "sha256-4AFsUZ0kl6MXSm4BaQgItD0VGlEKR3iq7gIaL7TjBvc=",
|
||||
"lastModified": 1773189535,
|
||||
"narHash": "sha256-E1G/Or6MWeP+L6mpQ0iTFLpzSzlpGrITfU2220Gq47g=",
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"rev": "7d8ec2c71771937ab99790b45e6d9b93d15d9379",
|
||||
"rev": "6fa2fb4cf4a89ba49fc9dd5a3eb6cde99d388269",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
|
@ -39,11 +39,11 @@
|
|||
"rust-analyzer-src": "rust-analyzer-src"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1766299592,
|
||||
"narHash": "sha256-7u+q5hexu2eAxL2VjhskHvaUKg+GexmelIR2ve9Nbb4=",
|
||||
"lastModified": 1773732206,
|
||||
"narHash": "sha256-HKibxaUXyWd4Hs+ZUnwo6XslvaFqFqJh66uL9tphU4Q=",
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"rev": "381579dee168d5ced412e2990e9637ecc7cf1c5d",
|
||||
"rev": "0aa13c1b54063a8d8679b28a5cd357ba98f4a56b",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
|
@ -55,11 +55,11 @@
|
|||
"flake-compat": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1765121682,
|
||||
"narHash": "sha256-4VBOP18BFeiPkyhy9o4ssBNQEvfvv1kXkasAYd0+rrA=",
|
||||
"lastModified": 1767039857,
|
||||
"narHash": "sha256-vNpUSpF5Nuw8xvDLj2KCwwksIbjua2LZCqhV1LNRDns=",
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"rev": "65f23138d8d09a92e30f1e5c87611b23ef451bf3",
|
||||
"rev": "5edf11c44bc78a0d334f6334cdaf7d60d732daab",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
|
@ -74,11 +74,11 @@
|
|||
"nixpkgs-lib": "nixpkgs-lib"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1765835352,
|
||||
"narHash": "sha256-XswHlK/Qtjasvhd1nOa1e8MgZ8GS//jBoTqWtrS1Giw=",
|
||||
"lastModified": 1772408722,
|
||||
"narHash": "sha256-rHuJtdcOjK7rAHpHphUb1iCvgkU3GpfvicLMwwnfMT0=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"rev": "a34fae9c08a15ad73f295041fec82323541400a9",
|
||||
"rev": "f20dc5d9b8027381c474144ecabc9034d6a839a3",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
|
@ -89,11 +89,11 @@
|
|||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1766070988,
|
||||
"narHash": "sha256-G/WVghka6c4bAzMhTwT2vjLccg/awmHkdKSd2JrycLc=",
|
||||
"lastModified": 1773734432,
|
||||
"narHash": "sha256-IF5ppUWh6gHGHYDbtVUyhwy/i7D261P7fWD1bPefOsw=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "c6245e83d836d0433170a16eb185cefe0572f8b8",
|
||||
"rev": "cda48547b432e8d3b18b4180ba07473762ec8558",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
|
@ -105,11 +105,11 @@
|
|||
},
|
||||
"nixpkgs-lib": {
|
||||
"locked": {
|
||||
"lastModified": 1765674936,
|
||||
"narHash": "sha256-k00uTP4JNfmejrCLJOwdObYC9jHRrr/5M/a/8L2EIdo=",
|
||||
"lastModified": 1772328832,
|
||||
"narHash": "sha256-e+/T/pmEkLP6BHhYjx6GmwP5ivonQQn0bJdH9YrRB+Q=",
|
||||
"owner": "nix-community",
|
||||
"repo": "nixpkgs.lib",
|
||||
"rev": "2075416fcb47225d9b68ac469a5c4801a9c4dd85",
|
||||
"rev": "c185c7a5e5dd8f9add5b2f8ebeff00888b070742",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
|
@ -132,11 +132,11 @@
|
|||
"rust-analyzer-src": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1766253897,
|
||||
"narHash": "sha256-ChK07B1aOlJ4QzWXpJo+y8IGAxp1V9yQ2YloJ+RgHRw=",
|
||||
"lastModified": 1773697963,
|
||||
"narHash": "sha256-xdKI77It9PM6eNrCcDZsnP4SKulZwk8VkDgBRVMnCb8=",
|
||||
"owner": "rust-lang",
|
||||
"repo": "rust-analyzer",
|
||||
"rev": "765b7bdb432b3740f2d564afccfae831d5a972e4",
|
||||
"rev": "2993637174252ff60a582fd1f55b9ab52c39db6d",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
|
@ -153,11 +153,11 @@
|
|||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1766000401,
|
||||
"narHash": "sha256-+cqN4PJz9y0JQXfAK5J1drd0U05D5fcAGhzhfVrDlsI=",
|
||||
"lastModified": 1773297127,
|
||||
"narHash": "sha256-6E/yhXP7Oy/NbXtf1ktzmU8SdVqJQ09HC/48ebEGBpk=",
|
||||
"owner": "numtide",
|
||||
"repo": "treefmt-nix",
|
||||
"rev": "42d96e75aa56a3f70cab7e7dc4a32868db28e8fd",
|
||||
"rev": "71b125cd05fbfd78cab3e070b73544abe24c5016",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
|
|
|||
|
|
@ -12,7 +12,6 @@
|
|||
|
||||
rocksdbAllFeatures = self'.packages.rocksdb.override {
|
||||
enableJemalloc = true;
|
||||
enableLiburing = true;
|
||||
};
|
||||
|
||||
commonAttrs = (uwulib.build.commonAttrs { }) // {
|
||||
|
|
|
|||
|
|
@ -27,7 +27,6 @@
|
|||
commonAttrsArgs.profile = "release";
|
||||
rocksdb = self'.packages.rocksdb.override {
|
||||
enableJemalloc = true;
|
||||
enableLiburing = true;
|
||||
};
|
||||
features = {
|
||||
enabledFeatures = "all";
|
||||
|
|
|
|||
|
|
@ -7,7 +7,6 @@
|
|||
rust-jemalloc-sys-unprefixed,
|
||||
|
||||
enableJemalloc ? false,
|
||||
enableLiburing ? false,
|
||||
|
||||
fetchFromGitea,
|
||||
|
||||
|
|
@ -32,7 +31,7 @@ in
|
|||
|
||||
# for some reason enableLiburing in nixpkgs rocksdb is default true
|
||||
# which breaks Darwin entirely
|
||||
enableLiburing = enableLiburing && notDarwin;
|
||||
enableLiburing = notDarwin;
|
||||
}).overrideAttrs
|
||||
(old: {
|
||||
src = fetchFromGitea {
|
||||
|
|
@ -74,7 +73,7 @@ in
|
|||
"USE_RTTI"
|
||||
]);
|
||||
|
||||
enableLiburing = enableLiburing && notDarwin;
|
||||
enableLiburing = notDarwin;
|
||||
|
||||
# outputs has "tools" which we don't need or use
|
||||
outputs = [ "out" ];
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ rec {
|
|||
# we need to keep the `web` directory which would be filtered out by the regular source filtering function
|
||||
#
|
||||
# https://crane.dev/API.html#cranelibcleancargosource
|
||||
isWebTemplate = path: _type: builtins.match ".*src/web.*" path != null;
|
||||
isWebTemplate = path: _type: builtins.match ".*(src/(web|service)|docs).*" path != null;
|
||||
isRust = craneLib.filterCargoSources;
|
||||
isNix = path: _type: builtins.match ".+/nix.*" path != null;
|
||||
webOrRustNotNix = p: t: !(isNix p t) && (isWebTemplate p t || isRust p t);
|
||||
|
|
@ -77,7 +77,12 @@ rec {
|
|||
craneLib.buildDepsOnly (
|
||||
(commonAttrs commonAttrsArgs)
|
||||
// {
|
||||
env = uwuenv.buildDepsOnlyEnv // (makeRocksDBEnv { inherit rocksdb; });
|
||||
env = uwuenv.buildDepsOnlyEnv
|
||||
// (makeRocksDBEnv { inherit rocksdb; })
|
||||
// {
|
||||
# required since we started using unstable reqwest apparently ... otherwise the all-features build will fail
|
||||
RUSTFLAGS = "--cfg reqwest_unstable";
|
||||
};
|
||||
inherit (features) cargoExtraArgs;
|
||||
}
|
||||
|
||||
|
|
@ -102,7 +107,13 @@ rec {
|
|||
'';
|
||||
cargoArtifacts = deps;
|
||||
doCheck = true;
|
||||
env = uwuenv.buildPackageEnv // rocksdbEnv;
|
||||
env =
|
||||
uwuenv.buildPackageEnv
|
||||
// rocksdbEnv
|
||||
// {
|
||||
# required since we started using unstable reqwest apparently ... otherwise the all-features build will fail
|
||||
RUSTFLAGS = "--cfg reqwest_unstable";
|
||||
};
|
||||
passthru.env = uwuenv.buildPackageEnv // rocksdbEnv;
|
||||
meta.mainProgram = crateInfo.pname;
|
||||
inherit (features) cargoExtraArgs;
|
||||
|
|
|
|||
|
|
@ -11,7 +11,6 @@
|
|||
uwulib = inputs.self.uwulib.init pkgs;
|
||||
rocksdbAllFeatures = self'.packages.rocksdb.override {
|
||||
enableJemalloc = true;
|
||||
enableLiburing = true;
|
||||
};
|
||||
in
|
||||
{
|
||||
|
|
|
|||
629
package-lock.json
generated
629
package-lock.json
generated
File diff suppressed because it is too large
Load diff
|
|
@ -18,6 +18,7 @@ Environment="CONTINUWUITY_DATABASE_PATH=%S/conduwuit"
|
|||
Environment="CONTINUWUITY_CONFIG_RELOAD_SIGNAL=true"
|
||||
|
||||
LoadCredential=conduwuit.toml:/etc/conduwuit/conduwuit.toml
|
||||
RefreshOnReload=yes
|
||||
|
||||
ExecStart=/usr/bin/conduwuit --config ${CREDENTIALS_DIRECTORY}/conduwuit.toml
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
{
|
||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
||||
"extends": ["config:recommended", "replacements:all"],
|
||||
"extends": ["config:recommended", "replacements:all", ":semanticCommitTypeAll(chore)"],
|
||||
"dependencyDashboard": true,
|
||||
"osvVulnerabilityAlerts": true,
|
||||
"lockFileMaintenance": {
|
||||
"enabled": true,
|
||||
|
|
@ -35,10 +36,18 @@
|
|||
},
|
||||
"packageRules": [
|
||||
{
|
||||
"description": "Batch patch-level Rust dependency updates",
|
||||
"description": "Batch minor and patch Rust dependency updates",
|
||||
"matchManagers": ["cargo"],
|
||||
"matchUpdateTypes": ["minor", "patch"],
|
||||
"matchCurrentVersion": ">=1.0.0",
|
||||
"groupName": "rust-non-major"
|
||||
},
|
||||
{
|
||||
"description": "Batch patch-level zerover Rust dependency updates",
|
||||
"matchManagers": ["cargo"],
|
||||
"matchUpdateTypes": ["patch"],
|
||||
"groupName": "rust-patch-updates"
|
||||
"matchCurrentVersion": ">=0.1.0,<1.0.0",
|
||||
"groupName": "rust-zerover-patch-updates"
|
||||
},
|
||||
{
|
||||
"description": "Limit concurrent Cargo PRs",
|
||||
|
|
@ -57,12 +66,25 @@
|
|||
"matchUpdateTypes": ["minor", "patch"],
|
||||
"groupName": "github-actions-non-major"
|
||||
},
|
||||
{
|
||||
"description": "Batch patch-level Node.js dependency updates",
|
||||
"matchManagers": ["npm"],
|
||||
"matchUpdateTypes": ["patch"],
|
||||
"groupName": "node-patch-updates"
|
||||
},
|
||||
{
|
||||
"description": "Pin forgejo artifact actions to prevent breaking changes",
|
||||
"matchManagers": ["github-actions"],
|
||||
"matchPackageNames": ["forgejo/upload-artifact", "forgejo/download-artifact"],
|
||||
"enabled": false
|
||||
},
|
||||
{
|
||||
"description": "Auto-merge crate-ci/typos minor updates",
|
||||
"matchPackageNames": ["crate-ci/typos"],
|
||||
"matchUpdateTypes": ["minor", "patch"],
|
||||
"automerge": true,
|
||||
"automergeStrategy": "fast-forward"
|
||||
},
|
||||
{
|
||||
"description": "Auto-merge renovatebot docker image updates",
|
||||
"matchDatasources": ["docker"],
|
||||
|
|
|
|||
|
|
@ -56,6 +56,9 @@ export default defineConfig({
|
|||
}, {
|
||||
from: '/community$',
|
||||
to: '/community/guidelines'
|
||||
}, {
|
||||
from: "^/turn",
|
||||
to: "/calls/turn",
|
||||
}
|
||||
]
|
||||
})],
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ use crate::{
|
|||
query::{self, QueryCommand},
|
||||
room::{self, RoomCommand},
|
||||
server::{self, ServerCommand},
|
||||
space::{self, SpaceCommand},
|
||||
token::{self, TokenCommand},
|
||||
user::{self, UserCommand},
|
||||
};
|
||||
|
|
@ -34,6 +35,10 @@ pub enum AdminCommand {
|
|||
/// Commands for managing rooms
|
||||
Rooms(RoomCommand),
|
||||
|
||||
#[command(subcommand)]
|
||||
/// Commands for managing space permissions
|
||||
Spaces(SpaceCommand),
|
||||
|
||||
#[command(subcommand)]
|
||||
/// Commands for managing federation
|
||||
Federation(FederationCommand),
|
||||
|
|
@ -81,6 +86,10 @@ pub(super) async fn process(command: AdminCommand, context: &Context<'_>) -> Res
|
|||
token::process(command, context).await
|
||||
},
|
||||
| Rooms(command) => room::process(command, context).await,
|
||||
| Spaces(command) => {
|
||||
context.bail_restricted()?;
|
||||
space::process(command, context).await
|
||||
},
|
||||
| Federation(command) => federation::process(command, context).await,
|
||||
| Server(command) => server::process(command, context).await,
|
||||
| Debug(command) => debug::process(command, context).await,
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
use std::fmt::Write;
|
||||
|
||||
use conduwuit::{Err, Result};
|
||||
use conduwuit::{Err, Result, utils::response::LimitReadExt};
|
||||
use futures::StreamExt;
|
||||
use ruma::{OwnedRoomId, OwnedServerName, OwnedUserId};
|
||||
|
||||
|
|
@ -30,12 +30,15 @@ pub(super) async fn incoming_federation(&self) -> Result {
|
|||
.federation_handletime
|
||||
.read();
|
||||
|
||||
let mut msg = format!("Handling {} incoming pdus:\n", map.len());
|
||||
let mut msg = format!(
|
||||
"Handling {} incoming PDUs across {} active transactions:\n",
|
||||
map.len(),
|
||||
self.services.transactions.txn_active_handle_count()
|
||||
);
|
||||
for (r, (e, i)) in map.iter() {
|
||||
let elapsed = i.elapsed();
|
||||
writeln!(msg, "{} {}: {}m{}s", r, e, elapsed.as_secs() / 60, elapsed.as_secs() % 60)?;
|
||||
}
|
||||
|
||||
msg
|
||||
};
|
||||
|
||||
|
|
@ -52,7 +55,15 @@ pub(super) async fn fetch_support_well_known(&self, server_name: OwnedServerName
|
|||
.send()
|
||||
.await?;
|
||||
|
||||
let text = response.text().await?;
|
||||
let text = response
|
||||
.limit_read_text(
|
||||
self.services
|
||||
.config
|
||||
.max_request_size
|
||||
.try_into()
|
||||
.expect("u64 fits into usize"),
|
||||
)
|
||||
.await?;
|
||||
|
||||
if text.is_empty() {
|
||||
return Err!("Response text/body is empty.");
|
||||
|
|
|
|||
|
|
@ -29,7 +29,9 @@ pub(super) async fn delete(
|
|||
.delete(&mxc.as_str().try_into()?)
|
||||
.await?;
|
||||
|
||||
return Err!("Deleted the MXC from our database and on our filesystem.",);
|
||||
return self
|
||||
.write_str("Deleted the MXC from our database and on our filesystem.")
|
||||
.await;
|
||||
}
|
||||
|
||||
if let Some(event_id) = event_id {
|
||||
|
|
@ -388,3 +390,19 @@ pub(super) async fn get_remote_thumbnail(
|
|||
self.write_str(&format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn delete_url_preview(&self, url: Option<String>, all: bool) -> Result {
|
||||
if all {
|
||||
self.services.media.clear_url_previews().await;
|
||||
|
||||
return self.write_str("Deleted all cached URL previews.").await;
|
||||
}
|
||||
|
||||
let url = url.expect("clap enforces url is required unless --all");
|
||||
|
||||
self.services.media.remove_url_preview(&url).await?;
|
||||
|
||||
self.write_str(&format!("Deleted cached URL preview for: {url}"))
|
||||
.await
|
||||
}
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ pub enum MediaCommand {
|
|||
/// * Delete all remote and local media from 3 days ago, up until now:
|
||||
///
|
||||
/// `!admin media delete-past-remote-media -a 3d
|
||||
///-yes-i-want-to-delete-local-media`
|
||||
///--yes-i-want-to-delete-local-media`
|
||||
#[command(verbatim_doc_comment)]
|
||||
DeletePastRemoteMedia {
|
||||
/// The relative time (e.g. 30s, 5m, 7d) from now within which to
|
||||
|
|
@ -108,4 +108,16 @@ pub enum MediaCommand {
|
|||
#[arg(long, default_value("800"))]
|
||||
height: u32,
|
||||
},
|
||||
|
||||
/// Deletes a cached URL preview, forcing it to be re-fetched.
|
||||
/// Use --all to purge all cached URL previews.
|
||||
DeleteUrlPreview {
|
||||
/// The URL to clear from the saved preview data
|
||||
#[arg(required_unless_present = "all")]
|
||||
url: Option<String>,
|
||||
|
||||
/// Purge all cached URL previews
|
||||
#[arg(long, conflicts_with = "url")]
|
||||
all: bool,
|
||||
},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ pub(crate) mod media;
|
|||
pub(crate) mod query;
|
||||
pub(crate) mod room;
|
||||
pub(crate) mod server;
|
||||
pub(crate) mod space;
|
||||
pub(crate) mod token;
|
||||
pub(crate) mod user;
|
||||
|
||||
|
|
|
|||
|
|
@ -209,7 +209,7 @@ pub(super) async fn compact(
|
|||
let parallelism = parallelism.unwrap_or(1);
|
||||
let results = maps
|
||||
.into_iter()
|
||||
.try_stream()
|
||||
.try_stream::<conduwuit::Error>()
|
||||
.paralleln_and_then(runtime, parallelism, move |map| {
|
||||
map.compact_blocking(options.clone())?;
|
||||
Ok(map.name().to_owned())
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
use clap::Subcommand;
|
||||
use conduwuit::{Result, utils::time};
|
||||
use conduwuit::{Err, Result, utils::time};
|
||||
use futures::StreamExt;
|
||||
use ruma::OwnedServerName;
|
||||
|
||||
|
|
@ -7,6 +7,7 @@ use crate::{admin_command, admin_command_dispatch};
|
|||
|
||||
#[admin_command_dispatch]
|
||||
#[derive(Debug, Subcommand)]
|
||||
#[allow(clippy::enum_variant_names)]
|
||||
/// Resolver service and caches
|
||||
pub enum ResolverCommand {
|
||||
/// Query the destinations cache
|
||||
|
|
@ -18,6 +19,24 @@ pub enum ResolverCommand {
|
|||
OverridesCache {
|
||||
name: Option<String>,
|
||||
},
|
||||
|
||||
/// Flush a given server from the resolver caches or flush them completely
|
||||
///
|
||||
/// * Examples:
|
||||
/// * Flush a specific server:
|
||||
///
|
||||
/// `!admin query resolver flush-cache matrix.example.com`
|
||||
///
|
||||
/// * Flush all resolver caches completely:
|
||||
///
|
||||
/// `!admin query resolver flush-cache --all`
|
||||
#[command(verbatim_doc_comment)]
|
||||
FlushCache {
|
||||
name: Option<OwnedServerName>,
|
||||
|
||||
#[arg(short, long)]
|
||||
all: bool,
|
||||
},
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
|
|
@ -69,3 +88,18 @@ async fn overrides_cache(&self, server_name: Option<String>) -> Result {
|
|||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn flush_cache(&self, name: Option<OwnedServerName>, all: bool) -> Result {
|
||||
if all {
|
||||
self.services.resolver.cache.clear().await;
|
||||
writeln!(self, "Resolver caches cleared!").await
|
||||
} else if let Some(name) = name {
|
||||
self.services.resolver.cache.del_destination(&name);
|
||||
self.services.resolver.cache.del_override(&name);
|
||||
self.write_str(&format!("Cleared {name} from resolver caches!"))
|
||||
.await
|
||||
} else {
|
||||
Err!("Missing name. Supply a name or use --all to flush the whole cache.")
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,12 +4,14 @@ use ruma::OwnedRoomId;
|
|||
|
||||
use crate::{PAGE_SIZE, admin_command, get_room_info};
|
||||
|
||||
#[allow(clippy::fn_params_excessive_bools)]
|
||||
#[admin_command]
|
||||
pub(super) async fn list_rooms(
|
||||
&self,
|
||||
page: Option<usize>,
|
||||
exclude_disabled: bool,
|
||||
exclude_banned: bool,
|
||||
include_empty: bool,
|
||||
no_details: bool,
|
||||
) -> Result {
|
||||
// TODO: i know there's a way to do this with clap, but i can't seem to find it
|
||||
|
|
@ -28,6 +30,20 @@ pub(super) async fn list_rooms(
|
|||
.then_some(room_id)
|
||||
})
|
||||
.then(|room_id| get_room_info(self.services, room_id))
|
||||
.then(|(room_id, total_members, name)| async move {
|
||||
let local_members: Vec<_> = self
|
||||
.services
|
||||
.rooms
|
||||
.state_cache
|
||||
.active_local_users_in_room(&room_id)
|
||||
.collect()
|
||||
.await;
|
||||
let local_members = local_members.len();
|
||||
(room_id, total_members, local_members, name)
|
||||
})
|
||||
.filter_map(|(room_id, total_members, local_members, name)| async move {
|
||||
(include_empty || local_members > 0).then_some((room_id, total_members, name))
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
|
||||
|
|
|
|||
|
|
@ -30,6 +30,10 @@ pub enum RoomCommand {
|
|||
#[arg(long)]
|
||||
exclude_banned: bool,
|
||||
|
||||
/// Includes disconnected/empty rooms (rooms with zero members)
|
||||
#[arg(long)]
|
||||
include_empty: bool,
|
||||
|
||||
#[arg(long)]
|
||||
/// Whether to only output room IDs without supplementary room
|
||||
/// information
|
||||
|
|
|
|||
|
|
@ -89,13 +89,7 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
|
|||
locally, if not using get_alias_helper to fetch room ID remotely"
|
||||
);
|
||||
|
||||
match self
|
||||
.services
|
||||
.rooms
|
||||
.alias
|
||||
.resolve_alias(room_alias, None)
|
||||
.await
|
||||
{
|
||||
match self.services.rooms.alias.resolve_alias(room_alias).await {
|
||||
| Ok((room_id, servers)) => {
|
||||
debug!(
|
||||
%room_id,
|
||||
|
|
@ -235,7 +229,7 @@ async fn ban_list_of_rooms(&self) -> Result {
|
|||
.services
|
||||
.rooms
|
||||
.alias
|
||||
.resolve_alias(room_alias, None)
|
||||
.resolve_alias(room_alias)
|
||||
.await
|
||||
{
|
||||
| Ok((room_id, servers)) => {
|
||||
|
|
@ -388,13 +382,7 @@ async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result {
|
|||
room ID over federation"
|
||||
);
|
||||
|
||||
match self
|
||||
.services
|
||||
.rooms
|
||||
.alias
|
||||
.resolve_alias(room_alias, None)
|
||||
.await
|
||||
{
|
||||
match self.services.rooms.alias.resolve_alias(room_alias).await {
|
||||
| Ok((room_id, servers)) => {
|
||||
debug!(
|
||||
%room_id,
|
||||
|
|
|
|||
|
|
@ -86,7 +86,7 @@ pub(super) async fn list_backups(&self) -> Result {
|
|||
.db
|
||||
.backup_list()?
|
||||
.try_stream()
|
||||
.try_for_each(|result| write!(self, "{result}"))
|
||||
.try_for_each(|result| writeln!(self, "{result}"))
|
||||
.await
|
||||
}
|
||||
|
||||
|
|
|
|||
15
src/admin/space/mod.rs
Normal file
15
src/admin/space/mod.rs
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
pub(super) mod roles;
|
||||
|
||||
use clap::Subcommand;
|
||||
use conduwuit::Result;
|
||||
|
||||
use self::roles::SpaceRolesCommand;
|
||||
use crate::admin_command_dispatch;
|
||||
|
||||
#[admin_command_dispatch]
|
||||
#[derive(Debug, Subcommand)]
|
||||
pub enum SpaceCommand {
|
||||
#[command(subcommand)]
|
||||
/// Manage space roles and permissions
|
||||
Roles(SpaceRolesCommand),
|
||||
}
|
||||
632
src/admin/space/roles.rs
Normal file
632
src/admin/space/roles.rs
Normal file
|
|
@ -0,0 +1,632 @@
|
|||
use std::fmt::Write;
|
||||
|
||||
use clap::Subcommand;
|
||||
use conduwuit::{Err, Event, Result, matrix::pdu::PduBuilder};
|
||||
use conduwuit_core::matrix::space_roles::{
|
||||
RoleDefinition, SPACE_CASCADING_EVENT_TYPE, SPACE_ROLE_MEMBER_EVENT_TYPE,
|
||||
SPACE_ROLE_ROOM_EVENT_TYPE, SPACE_ROLES_EVENT_TYPE, SpaceCascadingEventContent,
|
||||
SpaceRoleMemberEventContent, SpaceRoleRoomEventContent, SpaceRolesEventContent,
|
||||
};
|
||||
use futures::StreamExt;
|
||||
use ruma::{OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, events::StateEventType};
|
||||
use serde_json::value::to_raw_value;
|
||||
|
||||
use crate::{admin_command, admin_command_dispatch};
|
||||
|
||||
fn roles_event_type() -> StateEventType {
|
||||
StateEventType::from(SPACE_ROLES_EVENT_TYPE.to_owned())
|
||||
}
|
||||
|
||||
fn member_event_type() -> StateEventType {
|
||||
StateEventType::from(SPACE_ROLE_MEMBER_EVENT_TYPE.to_owned())
|
||||
}
|
||||
|
||||
fn room_event_type() -> StateEventType {
|
||||
StateEventType::from(SPACE_ROLE_ROOM_EVENT_TYPE.to_owned())
|
||||
}
|
||||
|
||||
fn cascading_event_type() -> StateEventType {
|
||||
StateEventType::from(SPACE_CASCADING_EVENT_TYPE.to_owned())
|
||||
}
|
||||
|
||||
macro_rules! resolve_room_as_space {
|
||||
($self:expr, $space:expr) => {{
|
||||
let space_id = $self.services.rooms.alias.resolve(&$space).await?;
|
||||
if !matches!(
|
||||
$self
|
||||
.services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_room_type(&space_id)
|
||||
.await,
|
||||
Ok(ruma::room::RoomType::Space)
|
||||
) {
|
||||
return Err!("The specified room is not a Space.");
|
||||
}
|
||||
space_id
|
||||
}};
|
||||
}
|
||||
|
||||
macro_rules! resolve_space {
|
||||
($self:expr, $space:expr) => {{
|
||||
let space_id = resolve_room_as_space!($self, $space);
|
||||
if !$self
|
||||
.services
|
||||
.rooms
|
||||
.roles
|
||||
.is_enabled_for_space(&space_id)
|
||||
.await
|
||||
{
|
||||
return $self
|
||||
.write_str(
|
||||
"Space permission cascading is disabled for this Space. Enable it \
|
||||
server-wide with `space_permission_cascading = true` in your config, or \
|
||||
per-Space with `!admin space roles enable <space>`.",
|
||||
)
|
||||
.await;
|
||||
}
|
||||
space_id
|
||||
}};
|
||||
}
|
||||
|
||||
macro_rules! custom_state_pdu {
|
||||
($event_type:expr, $state_key:expr, $content:expr) => {
|
||||
PduBuilder {
|
||||
event_type: $event_type.to_owned().into(),
|
||||
content: to_raw_value($content)
|
||||
.map_err(|e| conduwuit::err!("Failed to serialize state event content: {e}"))?,
|
||||
state_key: Some($state_key.to_owned().into()),
|
||||
..PduBuilder::default()
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Cascade-remove a role name from all state events of a given type. For each
|
||||
/// event that contains the role, the `$field` is filtered and the updated
|
||||
/// content is sent back as a new state event.
|
||||
macro_rules! cascade_remove_role {
|
||||
(
|
||||
$self:expr,
|
||||
$shortstatehash:expr,
|
||||
$event_type_fn:expr,
|
||||
$event_type_const:expr,
|
||||
$content_ty:ty,
|
||||
$field:ident,
|
||||
$role_name:expr,
|
||||
$space_id:expr,
|
||||
$state_lock:expr,
|
||||
$server_user:expr
|
||||
) => {{
|
||||
let ev_type = $event_type_fn;
|
||||
let entries: Vec<(_, ruma::OwnedEventId)> = $self
|
||||
.services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.state_keys_with_ids($shortstatehash, &ev_type)
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
for (state_key, event_id) in entries {
|
||||
if let Ok(pdu) = $self.services.rooms.timeline.get_pdu(&event_id).await {
|
||||
if let Ok(mut content) = pdu.get_content::<$content_ty>() {
|
||||
if content.$field.contains($role_name) {
|
||||
content.$field.retain(|r| r != $role_name);
|
||||
$self
|
||||
.services
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
custom_state_pdu!($event_type_const, &state_key, &content),
|
||||
$server_user,
|
||||
Some(&$space_id),
|
||||
&$state_lock,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
macro_rules! send_space_state {
|
||||
($self:expr, $space_id:expr, $event_type:expr, $state_key:expr, $content:expr) => {{
|
||||
let state_lock = $self.services.rooms.state.mutex.lock(&$space_id).await;
|
||||
let server_user = &$self.services.globals.server_user;
|
||||
$self
|
||||
.services
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
custom_state_pdu!($event_type, $state_key, $content),
|
||||
server_user,
|
||||
Some(&$space_id),
|
||||
&state_lock,
|
||||
)
|
||||
.await?
|
||||
}};
|
||||
}
|
||||
|
||||
#[admin_command_dispatch]
|
||||
#[derive(Debug, Subcommand)]
|
||||
pub enum SpaceRolesCommand {
|
||||
/// List all roles defined in a space
|
||||
List {
|
||||
space: OwnedRoomOrAliasId,
|
||||
},
|
||||
/// Add a new role to a space
|
||||
Add {
|
||||
space: OwnedRoomOrAliasId,
|
||||
role_name: String,
|
||||
#[arg(long)]
|
||||
description: Option<String>,
|
||||
#[arg(long)]
|
||||
power_level: Option<i64>,
|
||||
},
|
||||
/// Remove a role from a space
|
||||
Remove {
|
||||
space: OwnedRoomOrAliasId,
|
||||
role_name: String,
|
||||
},
|
||||
/// Assign a role to a user
|
||||
Assign {
|
||||
space: OwnedRoomOrAliasId,
|
||||
user_id: OwnedUserId,
|
||||
role_name: String,
|
||||
},
|
||||
/// Revoke a role from a user
|
||||
Revoke {
|
||||
space: OwnedRoomOrAliasId,
|
||||
user_id: OwnedUserId,
|
||||
role_name: String,
|
||||
},
|
||||
/// Require a role for a room
|
||||
Require {
|
||||
space: OwnedRoomOrAliasId,
|
||||
room_id: OwnedRoomId,
|
||||
role_name: String,
|
||||
},
|
||||
/// Remove a role requirement from a room
|
||||
Unrequire {
|
||||
space: OwnedRoomOrAliasId,
|
||||
room_id: OwnedRoomId,
|
||||
role_name: String,
|
||||
},
|
||||
/// Show a user's roles in a space
|
||||
User {
|
||||
space: OwnedRoomOrAliasId,
|
||||
user_id: OwnedUserId,
|
||||
},
|
||||
/// Show a room's role requirements in a space
|
||||
Room {
|
||||
space: OwnedRoomOrAliasId,
|
||||
room_id: OwnedRoomId,
|
||||
},
|
||||
/// Enable space permission cascading for a specific space (overrides
|
||||
/// server config)
|
||||
Enable {
|
||||
space: OwnedRoomOrAliasId,
|
||||
},
|
||||
/// Disable space permission cascading for a specific space (overrides
|
||||
/// server config)
|
||||
Disable {
|
||||
space: OwnedRoomOrAliasId,
|
||||
},
|
||||
/// Show whether cascading is enabled for a space and the source (server
|
||||
/// default or per-space override)
|
||||
Status {
|
||||
space: OwnedRoomOrAliasId,
|
||||
},
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn list(&self, space: OwnedRoomOrAliasId) -> Result {
|
||||
let space_id = resolve_space!(self, space);
|
||||
let roles_event_type = roles_event_type();
|
||||
|
||||
let content: SpaceRolesEventContent = self
|
||||
.services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get_content(&space_id, &roles_event_type, "")
|
||||
.await
|
||||
.unwrap_or_default();
|
||||
|
||||
if content.roles.is_empty() {
|
||||
return self.write_str("No roles defined in this space.").await;
|
||||
}
|
||||
|
||||
let mut msg = format!("Roles in {space_id}:\n```\n");
|
||||
for (name, def) in &content.roles {
|
||||
let pl = def
|
||||
.power_level
|
||||
.map(|p| format!(" (power_level: {p})"))
|
||||
.unwrap_or_default();
|
||||
let _ = writeln!(msg, "- {name}: {}{pl}", def.description);
|
||||
}
|
||||
msg.push_str("```");
|
||||
|
||||
self.write_str(&msg).await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn add(
|
||||
&self,
|
||||
space: OwnedRoomOrAliasId,
|
||||
role_name: String,
|
||||
description: Option<String>,
|
||||
power_level: Option<i64>,
|
||||
) -> Result {
|
||||
let space_id = resolve_space!(self, space);
|
||||
|
||||
if let Some(pl) = power_level {
|
||||
if pl > i64::from(ruma::Int::MAX) || pl < i64::from(ruma::Int::MIN) {
|
||||
return Err!(
|
||||
"Power level must be between {} and {}.",
|
||||
ruma::Int::MIN,
|
||||
ruma::Int::MAX
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let roles_event_type = roles_event_type();
|
||||
|
||||
let mut content: SpaceRolesEventContent = self
|
||||
.services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get_content(&space_id, &roles_event_type, "")
|
||||
.await
|
||||
.unwrap_or_default();
|
||||
|
||||
if content.roles.contains_key(&role_name) {
|
||||
return Err!("Role '{role_name}' already exists in this space.");
|
||||
}
|
||||
|
||||
content.roles.insert(role_name.clone(), RoleDefinition {
|
||||
description: description.unwrap_or_else(|| role_name.clone()),
|
||||
power_level,
|
||||
});
|
||||
|
||||
send_space_state!(self, space_id, SPACE_ROLES_EVENT_TYPE, "", &content);
|
||||
|
||||
self.write_str(&format!("Added role '{role_name}' to space {space_id}."))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn remove(&self, space: OwnedRoomOrAliasId, role_name: String) -> Result {
|
||||
let space_id = resolve_space!(self, space);
|
||||
let roles_event_type = roles_event_type();
|
||||
|
||||
let mut content: SpaceRolesEventContent = self
|
||||
.services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get_content(&space_id, &roles_event_type, "")
|
||||
.await
|
||||
.unwrap_or_default();
|
||||
|
||||
if content.roles.remove(&role_name).is_none() {
|
||||
return Err!("Role '{role_name}' does not exist in this space.");
|
||||
}
|
||||
|
||||
send_space_state!(self, space_id, SPACE_ROLES_EVENT_TYPE, "", &content);
|
||||
|
||||
// Cascade: remove the deleted role from all member and room events
|
||||
let server_user = &self.services.globals.server_user;
|
||||
if let Ok(shortstatehash) = self
|
||||
.services
|
||||
.rooms
|
||||
.state
|
||||
.get_room_shortstatehash(&space_id)
|
||||
.await
|
||||
{
|
||||
let state_lock = self.services.rooms.state.mutex.lock(&space_id).await;
|
||||
|
||||
cascade_remove_role!(
|
||||
self,
|
||||
shortstatehash,
|
||||
member_event_type(),
|
||||
SPACE_ROLE_MEMBER_EVENT_TYPE,
|
||||
SpaceRoleMemberEventContent,
|
||||
roles,
|
||||
&role_name,
|
||||
space_id,
|
||||
state_lock,
|
||||
server_user
|
||||
);
|
||||
|
||||
cascade_remove_role!(
|
||||
self,
|
||||
shortstatehash,
|
||||
room_event_type(),
|
||||
SPACE_ROLE_ROOM_EVENT_TYPE,
|
||||
SpaceRoleRoomEventContent,
|
||||
required_roles,
|
||||
&role_name,
|
||||
space_id,
|
||||
state_lock,
|
||||
server_user
|
||||
);
|
||||
}
|
||||
|
||||
self.write_str(&format!("Removed role '{role_name}' from space {space_id}."))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn assign(
|
||||
&self,
|
||||
space: OwnedRoomOrAliasId,
|
||||
user_id: OwnedUserId,
|
||||
role_name: String,
|
||||
) -> Result {
|
||||
let space_id = resolve_space!(self, space);
|
||||
|
||||
let roles_event_type = roles_event_type();
|
||||
let role_defs: SpaceRolesEventContent = self
|
||||
.services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get_content(&space_id, &roles_event_type, "")
|
||||
.await
|
||||
.unwrap_or_default();
|
||||
|
||||
if !role_defs.roles.contains_key(&role_name) {
|
||||
return Err!("Role '{role_name}' does not exist in this space.");
|
||||
}
|
||||
|
||||
let member_event_type = member_event_type();
|
||||
|
||||
let mut content: SpaceRoleMemberEventContent = self
|
||||
.services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get_content(&space_id, &member_event_type, user_id.as_str())
|
||||
.await
|
||||
.unwrap_or_default();
|
||||
|
||||
if content.roles.contains(&role_name) {
|
||||
return Err!("User {user_id} already has role '{role_name}' in this space.");
|
||||
}
|
||||
|
||||
content.roles.push(role_name.clone());
|
||||
|
||||
send_space_state!(self, space_id, SPACE_ROLE_MEMBER_EVENT_TYPE, user_id.as_str(), &content);
|
||||
|
||||
self.write_str(&format!("Assigned role '{role_name}' to {user_id} in space {space_id}."))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn revoke(
|
||||
&self,
|
||||
space: OwnedRoomOrAliasId,
|
||||
user_id: OwnedUserId,
|
||||
role_name: String,
|
||||
) -> Result {
|
||||
let space_id = resolve_space!(self, space);
|
||||
let member_event_type = member_event_type();
|
||||
|
||||
let mut content: SpaceRoleMemberEventContent = self
|
||||
.services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get_content(&space_id, &member_event_type, user_id.as_str())
|
||||
.await
|
||||
.unwrap_or_default();
|
||||
|
||||
let original_len = content.roles.len();
|
||||
content.roles.retain(|r| r != &role_name);
|
||||
|
||||
if content.roles.len() == original_len {
|
||||
return Err!("User {user_id} does not have role '{role_name}' in this space.");
|
||||
}
|
||||
|
||||
send_space_state!(self, space_id, SPACE_ROLE_MEMBER_EVENT_TYPE, user_id.as_str(), &content);
|
||||
|
||||
self.write_str(&format!("Revoked role '{role_name}' from {user_id} in space {space_id}."))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn require(
|
||||
&self,
|
||||
space: OwnedRoomOrAliasId,
|
||||
room_id: OwnedRoomId,
|
||||
role_name: String,
|
||||
) -> Result {
|
||||
let space_id = resolve_space!(self, space);
|
||||
|
||||
let child_rooms = self.services.rooms.roles.get_child_rooms(&space_id).await;
|
||||
if !child_rooms.contains(&room_id) {
|
||||
return Err!("Room {room_id} is not a child of space {space_id}.");
|
||||
}
|
||||
|
||||
let roles_event_type = roles_event_type();
|
||||
let role_defs: SpaceRolesEventContent = self
|
||||
.services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get_content(&space_id, &roles_event_type, "")
|
||||
.await
|
||||
.unwrap_or_default();
|
||||
|
||||
if !role_defs.roles.contains_key(&role_name) {
|
||||
return Err!("Role '{role_name}' does not exist in this space.");
|
||||
}
|
||||
|
||||
let room_event_type = room_event_type();
|
||||
|
||||
let mut content: SpaceRoleRoomEventContent = self
|
||||
.services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get_content(&space_id, &room_event_type, room_id.as_str())
|
||||
.await
|
||||
.unwrap_or_default();
|
||||
|
||||
if content.required_roles.contains(&role_name) {
|
||||
return Err!("Room {room_id} already requires role '{role_name}' in this space.");
|
||||
}
|
||||
|
||||
content.required_roles.push(role_name.clone());
|
||||
|
||||
send_space_state!(self, space_id, SPACE_ROLE_ROOM_EVENT_TYPE, room_id.as_str(), &content);
|
||||
|
||||
self.write_str(&format!(
|
||||
"Room {room_id} now requires role '{role_name}' in space {space_id}."
|
||||
))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn unrequire(
|
||||
&self,
|
||||
space: OwnedRoomOrAliasId,
|
||||
room_id: OwnedRoomId,
|
||||
role_name: String,
|
||||
) -> Result {
|
||||
let space_id = resolve_space!(self, space);
|
||||
let room_event_type = room_event_type();
|
||||
|
||||
let mut content: SpaceRoleRoomEventContent = self
|
||||
.services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get_content(&space_id, &room_event_type, room_id.as_str())
|
||||
.await
|
||||
.unwrap_or_default();
|
||||
|
||||
let original_len = content.required_roles.len();
|
||||
content.required_roles.retain(|r| r != &role_name);
|
||||
|
||||
if content.required_roles.len() == original_len {
|
||||
return Err!("Room {room_id} does not require role '{role_name}' in this space.");
|
||||
}
|
||||
|
||||
send_space_state!(self, space_id, SPACE_ROLE_ROOM_EVENT_TYPE, room_id.as_str(), &content);
|
||||
|
||||
self.write_str(&format!(
|
||||
"Removed role requirement '{role_name}' from room {room_id} in space {space_id}."
|
||||
))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn user(&self, space: OwnedRoomOrAliasId, user_id: OwnedUserId) -> Result {
|
||||
let space_id = resolve_space!(self, space);
|
||||
|
||||
let roles = self
|
||||
.services
|
||||
.rooms
|
||||
.roles
|
||||
.get_user_roles_in_space(&space_id, &user_id)
|
||||
.await;
|
||||
|
||||
match roles {
|
||||
| Some(roles) if !roles.is_empty() => {
|
||||
let list: String = roles
|
||||
.iter()
|
||||
.map(|r| format!("- {r}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
self.write_str(&format!("Roles for {user_id} in space {space_id}:\n```\n{list}\n```"))
|
||||
.await
|
||||
},
|
||||
| _ =>
|
||||
self.write_str(&format!("User {user_id} has no roles in space {space_id}."))
|
||||
.await,
|
||||
}
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn room(&self, space: OwnedRoomOrAliasId, room_id: OwnedRoomId) -> Result {
|
||||
let space_id = resolve_space!(self, space);
|
||||
|
||||
let reqs = self
|
||||
.services
|
||||
.rooms
|
||||
.roles
|
||||
.get_room_requirements_in_space(&space_id, &room_id)
|
||||
.await;
|
||||
|
||||
match reqs {
|
||||
| Some(reqs) if !reqs.is_empty() => {
|
||||
let list: String = reqs
|
||||
.iter()
|
||||
.map(|r| format!("- {r}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
self.write_str(&format!(
|
||||
"Required roles for room {room_id} in space {space_id}:\n```\n{list}\n```"
|
||||
))
|
||||
.await
|
||||
},
|
||||
| _ =>
|
||||
self.write_str(&format!(
|
||||
"Room {room_id} has no role requirements in space {space_id}."
|
||||
))
|
||||
.await,
|
||||
}
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn enable(&self, space: OwnedRoomOrAliasId) -> Result {
|
||||
let space_id = resolve_room_as_space!(self, space);
|
||||
|
||||
self.services
|
||||
.rooms
|
||||
.roles
|
||||
.ensure_default_roles(&space_id)
|
||||
.await?;
|
||||
|
||||
let content = SpaceCascadingEventContent { enabled: true };
|
||||
send_space_state!(self, space_id, SPACE_CASCADING_EVENT_TYPE, "", &content);
|
||||
|
||||
self.write_str(&format!("Space permission cascading enabled for {space_id}."))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn disable(&self, space: OwnedRoomOrAliasId) -> Result {
|
||||
let space_id = resolve_room_as_space!(self, space);
|
||||
|
||||
let content = SpaceCascadingEventContent { enabled: false };
|
||||
send_space_state!(self, space_id, SPACE_CASCADING_EVENT_TYPE, "", &content);
|
||||
|
||||
self.write_str(&format!("Space permission cascading disabled for {space_id}."))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn status(&self, space: OwnedRoomOrAliasId) -> Result {
|
||||
let space_id = resolve_room_as_space!(self, space);
|
||||
|
||||
let global_default = self.services.rooms.roles.is_enabled();
|
||||
let cascading_event_type = cascading_event_type();
|
||||
let per_space_override: Option<bool> = self
|
||||
.services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get_content::<SpaceCascadingEventContent>(
|
||||
&space_id,
|
||||
&cascading_event_type,
|
||||
"",
|
||||
)
|
||||
.await
|
||||
.ok()
|
||||
.map(|c| c.enabled);
|
||||
|
||||
let effective = per_space_override.unwrap_or(global_default);
|
||||
let source = match per_space_override {
|
||||
| Some(v) => format!("per-Space override (enabled: {v})"),
|
||||
| None => format!("server default (space_permission_cascading: {global_default})"),
|
||||
};
|
||||
|
||||
self.write_str(&format!(
|
||||
"Cascading status for {space_id}:\n- Effective: **{effective}**\n- Source: {source}"
|
||||
))
|
||||
.await
|
||||
}
|
||||
|
|
@ -5,7 +5,7 @@ use std::{
|
|||
|
||||
use api::client::{full_user_deactivate, join_room_by_id_helper, leave_room, remote_leave_room};
|
||||
use conduwuit::{
|
||||
Err, Result, debug, debug_warn, error, info, is_equal_to,
|
||||
Err, Result, debug_warn, error, info,
|
||||
matrix::{Event, pdu::PduBuilder},
|
||||
utils::{self, ReadyExt},
|
||||
warn,
|
||||
|
|
@ -140,7 +140,6 @@ pub(super) async fn create_user(&self, username: String, password: Option<String
|
|||
self.services.globals.server_name().to_owned(),
|
||||
room_server_name.to_owned(),
|
||||
],
|
||||
None,
|
||||
&None,
|
||||
)
|
||||
.await
|
||||
|
|
@ -168,27 +167,8 @@ pub(super) async fn create_user(&self, username: String, password: Option<String
|
|||
|
||||
// we dont add a device since we're not the user, just the creator
|
||||
|
||||
// if this account creation is from the CLI / --execute, invite the first user
|
||||
// to admin room
|
||||
if let Ok(admin_room) = self.services.admin.get_admin_room().await {
|
||||
if self
|
||||
.services
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_joined_count(&admin_room)
|
||||
.await
|
||||
.is_ok_and(is_equal_to!(1))
|
||||
{
|
||||
self.services
|
||||
.admin
|
||||
.make_user_admin(&user_id)
|
||||
.boxed()
|
||||
.await?;
|
||||
warn!("Granting {user_id} admin privileges as the first user");
|
||||
}
|
||||
} else {
|
||||
debug!("create_user admin command called without an admin room being available");
|
||||
}
|
||||
// Make the first user to register an administrator and disable first-run mode.
|
||||
self.services.firstrun.empower_first_user(&user_id).await?;
|
||||
|
||||
self.write_str(&format!("Created user with user_id: {user_id} and password: `{password}`"))
|
||||
.await
|
||||
|
|
@ -316,6 +296,31 @@ pub(super) async fn reset_password(
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn issue_password_reset_link(&self, username: String) -> Result {
|
||||
use conduwuit_service::password_reset::{PASSWORD_RESET_PATH, RESET_TOKEN_QUERY_PARAM};
|
||||
|
||||
self.bail_restricted()?;
|
||||
|
||||
let mut reset_url = self
|
||||
.services
|
||||
.config
|
||||
.get_client_domain()
|
||||
.join(PASSWORD_RESET_PATH)
|
||||
.unwrap();
|
||||
|
||||
let user_id = parse_local_user_id(self.services, &username)?;
|
||||
let token = self.services.password_reset.issue_token(user_id).await?;
|
||||
reset_url
|
||||
.query_pairs_mut()
|
||||
.append_pair(RESET_TOKEN_QUERY_PARAM, &token.token);
|
||||
|
||||
self.write_str(&format!("Password reset link issued for {username}: {reset_url}"))
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) -> Result {
|
||||
if self.body.len() < 2
|
||||
|
|
@ -549,7 +554,6 @@ pub(super) async fn force_join_list_of_local_users(
|
|||
&room_id,
|
||||
Some(String::from(BULK_JOIN_REASON)),
|
||||
&servers,
|
||||
None,
|
||||
&None,
|
||||
)
|
||||
.await
|
||||
|
|
@ -635,7 +639,6 @@ pub(super) async fn force_join_all_local_users(
|
|||
&room_id,
|
||||
Some(String::from(BULK_JOIN_REASON)),
|
||||
&servers,
|
||||
None,
|
||||
&None,
|
||||
)
|
||||
.await
|
||||
|
|
@ -675,8 +678,7 @@ pub(super) async fn force_join_room(
|
|||
self.services.globals.user_is_local(&user_id),
|
||||
"Parsed user_id must be a local user"
|
||||
);
|
||||
join_room_by_id_helper(self.services, &user_id, &room_id, None, &servers, None, &None)
|
||||
.await?;
|
||||
join_room_by_id_helper(self.services, &user_id, &room_id, None, &servers, &None).await?;
|
||||
|
||||
self.write_str(&format!("{user_id} has been joined to {room_id}.",))
|
||||
.await
|
||||
|
|
|
|||
|
|
@ -29,6 +29,12 @@ pub enum UserCommand {
|
|||
password: Option<String>,
|
||||
},
|
||||
|
||||
/// Issue a self-service password reset link for a user.
|
||||
IssuePasswordResetLink {
|
||||
/// Username of the user who may use the link
|
||||
username: String,
|
||||
},
|
||||
|
||||
/// Deactivate a user
|
||||
///
|
||||
/// User will be removed from all rooms by default.
|
||||
|
|
|
|||
|
|
@ -28,6 +28,10 @@ gzip_compression = [
|
|||
"conduwuit-service/gzip_compression",
|
||||
"reqwest/gzip",
|
||||
]
|
||||
http3 = [
|
||||
"conduwuit-core/http3",
|
||||
"conduwuit-service/http3",
|
||||
]
|
||||
io_uring = [
|
||||
"conduwuit-service/io_uring",
|
||||
]
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ use std::fmt::Write;
|
|||
use axum::extract::State;
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use conduwuit::{
|
||||
Err, Error, Event, Result, debug_info, err, error, info, is_equal_to,
|
||||
Err, Error, Event, Result, debug_info, err, error, info,
|
||||
matrix::pdu::PduBuilder,
|
||||
utils::{self, ReadyExt, stream::BroadbandExt},
|
||||
warn,
|
||||
|
|
@ -148,7 +148,12 @@ pub(crate) async fn register_route(
|
|||
let is_guest = body.kind == RegistrationKind::Guest;
|
||||
let emergency_mode_enabled = services.config.emergency_password.is_some();
|
||||
|
||||
if !services.config.allow_registration && body.appservice_info.is_none() {
|
||||
// Allow registration if it's enabled in the config file or if this is the first
|
||||
// run (so the first user account can be created)
|
||||
let allow_registration =
|
||||
services.config.allow_registration || services.firstrun.is_first_run();
|
||||
|
||||
if !allow_registration && body.appservice_info.is_none() {
|
||||
match (body.username.as_ref(), body.initial_device_display_name.as_ref()) {
|
||||
| (Some(username), Some(device_display_name)) => {
|
||||
info!(
|
||||
|
|
@ -185,17 +190,10 @@ pub(crate) async fn register_route(
|
|||
)));
|
||||
}
|
||||
|
||||
if is_guest
|
||||
&& (!services.config.allow_guest_registration
|
||||
|| (services.config.allow_registration
|
||||
&& services
|
||||
.registration_tokens
|
||||
.get_config_file_token()
|
||||
.is_some()))
|
||||
{
|
||||
if is_guest && !services.config.allow_guest_registration {
|
||||
info!(
|
||||
"Guest registration disabled / registration enabled with token configured, \
|
||||
rejecting guest registration attempt, initial device name: \"{}\"",
|
||||
"Guest registration disabled, rejecting guest registration attempt, initial device \
|
||||
name: \"{}\"",
|
||||
body.initial_device_display_name.as_deref().unwrap_or("")
|
||||
);
|
||||
return Err!(Request(GuestAccessForbidden("Guest registration is disabled.")));
|
||||
|
|
@ -254,6 +252,13 @@ pub(crate) async fn register_route(
|
|||
}
|
||||
}
|
||||
|
||||
// Don't allow registration with user IDs that aren't local
|
||||
if !services.globals.user_is_local(&user_id) {
|
||||
return Err!(Request(InvalidUsername(
|
||||
"Username {body_username} is not local to this server"
|
||||
)));
|
||||
}
|
||||
|
||||
user_id
|
||||
},
|
||||
| Err(e) => {
|
||||
|
|
@ -309,54 +314,63 @@ pub(crate) async fn register_route(
|
|||
let skip_auth = body.appservice_info.is_some() || is_guest;
|
||||
|
||||
// Populate required UIAA flows
|
||||
if services
|
||||
.registration_tokens
|
||||
.iterate_tokens()
|
||||
.next()
|
||||
.await
|
||||
.is_some()
|
||||
{
|
||||
// Registration token required
|
||||
|
||||
if services.firstrun.is_first_run() {
|
||||
// Registration token forced while in first-run mode
|
||||
uiaainfo.flows.push(AuthFlow {
|
||||
stages: vec![AuthType::RegistrationToken],
|
||||
});
|
||||
}
|
||||
if services.config.recaptcha_private_site_key.is_some() {
|
||||
if let Some(pubkey) = &services.config.recaptcha_site_key {
|
||||
// ReCaptcha required
|
||||
uiaainfo
|
||||
.flows
|
||||
.push(AuthFlow { stages: vec![AuthType::ReCaptcha] });
|
||||
uiaainfo.params = serde_json::value::to_raw_value(&serde_json::json!({
|
||||
"m.login.recaptcha": {
|
||||
"public_key": pubkey,
|
||||
},
|
||||
}))
|
||||
.expect("Failed to serialize recaptcha params");
|
||||
}
|
||||
}
|
||||
|
||||
if uiaainfo.flows.is_empty() && !skip_auth {
|
||||
// Registration isn't _disabled_, but there's no captcha configured and no
|
||||
// registration tokens currently set. Bail out by default unless open
|
||||
// registration was explicitly enabled.
|
||||
if !services
|
||||
.config
|
||||
.yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse
|
||||
} else {
|
||||
if services
|
||||
.registration_tokens
|
||||
.iterate_tokens()
|
||||
.next()
|
||||
.await
|
||||
.is_some()
|
||||
{
|
||||
return Err!(Request(Forbidden(
|
||||
"This server is not accepting registrations at this time."
|
||||
)));
|
||||
// Registration token required
|
||||
uiaainfo.flows.push(AuthFlow {
|
||||
stages: vec![AuthType::RegistrationToken],
|
||||
});
|
||||
}
|
||||
|
||||
// We have open registration enabled (😧), provide a dummy stage
|
||||
uiaainfo = UiaaInfo {
|
||||
flows: vec![AuthFlow { stages: vec![AuthType::Dummy] }],
|
||||
completed: Vec::new(),
|
||||
params: Box::default(),
|
||||
session: None,
|
||||
auth_error: None,
|
||||
};
|
||||
if services.config.recaptcha_private_site_key.is_some() {
|
||||
if let Some(pubkey) = &services.config.recaptcha_site_key {
|
||||
// ReCaptcha required
|
||||
uiaainfo
|
||||
.flows
|
||||
.push(AuthFlow { stages: vec![AuthType::ReCaptcha] });
|
||||
uiaainfo.params = serde_json::value::to_raw_value(&serde_json::json!({
|
||||
"m.login.recaptcha": {
|
||||
"public_key": pubkey,
|
||||
},
|
||||
}))
|
||||
.expect("Failed to serialize recaptcha params");
|
||||
}
|
||||
}
|
||||
|
||||
if uiaainfo.flows.is_empty() && !skip_auth {
|
||||
// Registration isn't _disabled_, but there's no captcha configured and no
|
||||
// registration tokens currently set. Bail out by default unless open
|
||||
// registration was explicitly enabled.
|
||||
if !services
|
||||
.config
|
||||
.yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse
|
||||
{
|
||||
return Err!(Request(Forbidden(
|
||||
"This server is not accepting registrations at this time."
|
||||
)));
|
||||
}
|
||||
|
||||
// We have open registration enabled (😧), provide a dummy stage
|
||||
uiaainfo = UiaaInfo {
|
||||
flows: vec![AuthFlow { stages: vec![AuthType::Dummy] }],
|
||||
completed: Vec::new(),
|
||||
params: Box::default(),
|
||||
session: None,
|
||||
auth_error: None,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
if !skip_auth {
|
||||
|
|
@ -514,39 +528,29 @@ pub(crate) async fn register_route(
|
|||
}
|
||||
}
|
||||
|
||||
// If this is the first real user, grant them admin privileges except for guest
|
||||
// users
|
||||
// Note: the server user is generated first
|
||||
if !is_guest {
|
||||
if let Ok(admin_room) = services.admin.get_admin_room().await {
|
||||
if services
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_joined_count(&admin_room)
|
||||
.await
|
||||
.is_ok_and(is_equal_to!(1))
|
||||
{
|
||||
services.admin.make_user_admin(&user_id).boxed().await?;
|
||||
warn!("Granting {user_id} admin privileges as the first user");
|
||||
} else if services.config.suspend_on_register {
|
||||
// This is not an admin, suspend them.
|
||||
// Note that we can still do auto joins for suspended users
|
||||
// Make the first user to register an administrator and disable first-run mode.
|
||||
let was_first_user = services.firstrun.empower_first_user(&user_id).await?;
|
||||
|
||||
// If the registering user was not the first and we're suspending users on
|
||||
// register, suspend them.
|
||||
if !was_first_user && services.config.suspend_on_register {
|
||||
// Note that we can still do auto joins for suspended users
|
||||
services
|
||||
.users
|
||||
.suspend_account(&user_id, &services.globals.server_user)
|
||||
.await;
|
||||
// And send an @room notice to the admin room, to prompt admins to review the
|
||||
// new user and ideally unsuspend them if deemed appropriate.
|
||||
if services.server.config.admin_room_notices {
|
||||
services
|
||||
.users
|
||||
.suspend_account(&user_id, &services.globals.server_user)
|
||||
.await;
|
||||
// And send an @room notice to the admin room, to prompt admins to review the
|
||||
// new user and ideally unsuspend them if deemed appropriate.
|
||||
if services.server.config.admin_room_notices {
|
||||
services
|
||||
.admin
|
||||
.send_loud_message(RoomMessageEventContent::text_plain(format!(
|
||||
"User {user_id} has been suspended as they are not the first user \
|
||||
on this server. Please review and unsuspend them if appropriate."
|
||||
)))
|
||||
.await
|
||||
.ok();
|
||||
}
|
||||
.admin
|
||||
.send_loud_message(RoomMessageEventContent::text_plain(format!(
|
||||
"User {user_id} has been suspended as they are not the first user on \
|
||||
this server. Please review and unsuspend them if appropriate."
|
||||
)))
|
||||
.await
|
||||
.ok();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -583,7 +587,6 @@ pub(crate) async fn register_route(
|
|||
&room_id,
|
||||
Some("Automatically joining this room upon registration".to_owned()),
|
||||
&[services.globals.server_name().to_owned(), room_server_name.to_owned()],
|
||||
None,
|
||||
&body.appservice_info,
|
||||
)
|
||||
.boxed()
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ use ruma::{
|
|||
},
|
||||
events::{
|
||||
AnyGlobalAccountDataEventContent, AnyRoomAccountDataEventContent,
|
||||
GlobalAccountDataEventType, RoomAccountDataEventType,
|
||||
RoomAccountDataEventType,
|
||||
},
|
||||
serde::Raw,
|
||||
};
|
||||
|
|
@ -126,12 +126,6 @@ async fn set_account_data(
|
|||
)));
|
||||
}
|
||||
|
||||
if event_type_s == GlobalAccountDataEventType::PushRules.to_cow_str() {
|
||||
return Err!(Request(BadJson(
|
||||
"This endpoint cannot be used for setting/configuring push rules."
|
||||
)));
|
||||
}
|
||||
|
||||
let data: serde_json::Value = serde_json::from_str(data.get())
|
||||
.map_err(|e| err!(Request(BadJson(warn!("Invalid JSON provided: {e}")))))?;
|
||||
|
||||
|
|
|
|||
|
|
@ -1,12 +1,6 @@
|
|||
use axum::extract::State;
|
||||
use conduwuit::{Err, Result, debug};
|
||||
use conduwuit_service::Services;
|
||||
use futures::StreamExt;
|
||||
use rand::seq::SliceRandom;
|
||||
use ruma::{
|
||||
OwnedServerName, RoomAliasId, RoomId,
|
||||
api::client::alias::{create_alias, delete_alias, get_alias},
|
||||
};
|
||||
use conduwuit::{Err, Result};
|
||||
use ruma::api::client::alias::{create_alias, delete_alias, get_alias};
|
||||
|
||||
use crate::Ruma;
|
||||
|
||||
|
|
@ -96,65 +90,9 @@ pub(crate) async fn get_alias_route(
|
|||
) -> Result<get_alias::v3::Response> {
|
||||
let room_alias = body.body.room_alias;
|
||||
|
||||
let Ok((room_id, servers)) = services.rooms.alias.resolve_alias(&room_alias, None).await
|
||||
else {
|
||||
let Ok((room_id, servers)) = services.rooms.alias.resolve_alias(&room_alias).await else {
|
||||
return Err!(Request(NotFound("Room with alias not found.")));
|
||||
};
|
||||
|
||||
let servers = room_available_servers(&services, &room_id, &room_alias, servers).await;
|
||||
debug!(%room_alias, %room_id, "available servers: {servers:?}");
|
||||
|
||||
Ok(get_alias::v3::Response::new(room_id, servers))
|
||||
}
|
||||
|
||||
async fn room_available_servers(
|
||||
services: &Services,
|
||||
room_id: &RoomId,
|
||||
room_alias: &RoomAliasId,
|
||||
pre_servers: Vec<OwnedServerName>,
|
||||
) -> Vec<OwnedServerName> {
|
||||
// find active servers in room state cache to suggest
|
||||
let mut servers: Vec<OwnedServerName> = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_servers(room_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
// push any servers we want in the list already (e.g. responded remote alias
|
||||
// servers, room alias server itself)
|
||||
servers.extend(pre_servers);
|
||||
|
||||
servers.sort_unstable();
|
||||
servers.dedup();
|
||||
|
||||
// shuffle list of servers randomly after sort and dedupe
|
||||
servers.shuffle(&mut rand::thread_rng());
|
||||
|
||||
// insert our server as the very first choice if in list, else check if we can
|
||||
// prefer the room alias server first
|
||||
match servers
|
||||
.iter()
|
||||
.position(|server_name| services.globals.server_is_ours(server_name))
|
||||
{
|
||||
| Some(server_index) => {
|
||||
servers.swap_remove(server_index);
|
||||
servers.insert(0, services.globals.server_name().to_owned());
|
||||
},
|
||||
| _ => {
|
||||
match servers
|
||||
.iter()
|
||||
.position(|server| server == room_alias.server_name())
|
||||
{
|
||||
| Some(alias_server_index) => {
|
||||
servers.swap_remove(alias_server_index);
|
||||
servers.insert(0, room_alias.server_name().into());
|
||||
},
|
||||
| _ => {},
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
servers
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,7 +16,10 @@ use ruma::{OwnedEventId, UserId, api::client::context::get_context, events::Stat
|
|||
|
||||
use crate::{
|
||||
Ruma,
|
||||
client::message::{event_filter, ignored_filter, lazy_loading_witness, visibility_filter},
|
||||
client::{
|
||||
is_ignored_pdu,
|
||||
message::{event_filter, ignored_filter, lazy_loading_witness, visibility_filter},
|
||||
},
|
||||
};
|
||||
|
||||
const LIMIT_MAX: usize = 100;
|
||||
|
|
@ -78,6 +81,9 @@ pub(crate) async fn get_context_route(
|
|||
return Err!(Request(NotFound("Event not found.")));
|
||||
}
|
||||
|
||||
// Return M_SENDER_IGNORED if the sender of base_event is ignored (MSC4406)
|
||||
is_ignored_pdu(&services, &base_pdu, sender_user).await?;
|
||||
|
||||
let base_count = base_id.pdu_count();
|
||||
|
||||
let base_event = ignored_filter(&services, (base_count, base_pdu), sender_user);
|
||||
|
|
|
|||
121
src/api/client/dehydrated_device.rs
Normal file
121
src/api/client/dehydrated_device.rs
Normal file
|
|
@ -0,0 +1,121 @@
|
|||
use axum::extract::State;
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use conduwuit::{Err, Result, at};
|
||||
use futures::StreamExt;
|
||||
use ruma::api::client::dehydrated_device::{
|
||||
delete_dehydrated_device::unstable as delete_dehydrated_device,
|
||||
get_dehydrated_device::unstable as get_dehydrated_device, get_events::unstable as get_events,
|
||||
put_dehydrated_device::unstable as put_dehydrated_device,
|
||||
};
|
||||
|
||||
use crate::Ruma;
|
||||
|
||||
const MAX_BATCH_EVENTS: usize = 50;
|
||||
|
||||
/// # `PUT /_matrix/client/../dehydrated_device`
|
||||
///
|
||||
/// Creates or overwrites the user's dehydrated device.
|
||||
#[tracing::instrument(skip_all, fields(%client))]
|
||||
pub(crate) async fn put_dehydrated_device_route(
|
||||
State(services): State<crate::State>,
|
||||
InsecureClientIp(client): InsecureClientIp,
|
||||
body: Ruma<put_dehydrated_device::Request>,
|
||||
) -> Result<put_dehydrated_device::Response> {
|
||||
let sender_user = body
|
||||
.sender_user
|
||||
.as_deref()
|
||||
.expect("AccessToken authentication required");
|
||||
|
||||
let device_id = body.body.device_id.clone();
|
||||
|
||||
services
|
||||
.users
|
||||
.set_dehydrated_device(sender_user, body.body)
|
||||
.await?;
|
||||
|
||||
Ok(put_dehydrated_device::Response { device_id })
|
||||
}
|
||||
|
||||
/// # `DELETE /_matrix/client/../dehydrated_device`
|
||||
///
|
||||
/// Deletes the user's dehydrated device without replacement.
|
||||
#[tracing::instrument(skip_all, fields(%client))]
|
||||
pub(crate) async fn delete_dehydrated_device_route(
|
||||
State(services): State<crate::State>,
|
||||
InsecureClientIp(client): InsecureClientIp,
|
||||
body: Ruma<delete_dehydrated_device::Request>,
|
||||
) -> Result<delete_dehydrated_device::Response> {
|
||||
let sender_user = body.sender_user();
|
||||
|
||||
let device_id = services.users.get_dehydrated_device_id(sender_user).await?;
|
||||
|
||||
services.users.remove_device(sender_user, &device_id).await;
|
||||
|
||||
Ok(delete_dehydrated_device::Response { device_id })
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/../dehydrated_device`
|
||||
///
|
||||
/// Gets the user's dehydrated device
|
||||
#[tracing::instrument(skip_all, fields(%client))]
|
||||
pub(crate) async fn get_dehydrated_device_route(
|
||||
State(services): State<crate::State>,
|
||||
InsecureClientIp(client): InsecureClientIp,
|
||||
body: Ruma<get_dehydrated_device::Request>,
|
||||
) -> Result<get_dehydrated_device::Response> {
|
||||
let sender_user = body.sender_user();
|
||||
|
||||
let device = services.users.get_dehydrated_device(sender_user).await?;
|
||||
|
||||
Ok(get_dehydrated_device::Response {
|
||||
device_id: device.device_id,
|
||||
device_data: device.device_data,
|
||||
})
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/../dehydrated_device/{device_id}/events`
|
||||
///
|
||||
/// Paginates the events of the dehydrated device.
|
||||
#[tracing::instrument(skip_all, fields(%client))]
|
||||
pub(crate) async fn get_dehydrated_events_route(
|
||||
State(services): State<crate::State>,
|
||||
InsecureClientIp(client): InsecureClientIp,
|
||||
body: Ruma<get_events::Request>,
|
||||
) -> Result<get_events::Response> {
|
||||
let sender_user = body.sender_user();
|
||||
|
||||
let device_id = &body.body.device_id;
|
||||
let existing_id = services.users.get_dehydrated_device_id(sender_user).await;
|
||||
|
||||
if existing_id.as_ref().is_err()
|
||||
|| existing_id
|
||||
.as_ref()
|
||||
.is_ok_and(|existing_id| existing_id != device_id)
|
||||
{
|
||||
return Err!(Request(Forbidden("Not the dehydrated device_id.")));
|
||||
}
|
||||
|
||||
let since: Option<u64> = body
|
||||
.body
|
||||
.next_batch
|
||||
.as_deref()
|
||||
.map(str::parse)
|
||||
.transpose()?;
|
||||
|
||||
let mut next_batch: Option<u64> = None;
|
||||
let events = services
|
||||
.users
|
||||
.get_to_device_events(sender_user, device_id, since, None)
|
||||
.take(MAX_BATCH_EVENTS)
|
||||
.inspect(|&(count, _)| {
|
||||
next_batch.replace(count);
|
||||
})
|
||||
.map(at!(1))
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
Ok(get_events::Response {
|
||||
events,
|
||||
next_batch: next_batch.as_ref().map(ToString::to_string),
|
||||
})
|
||||
}
|
||||
|
|
@ -6,6 +6,7 @@ use conduwuit::{
|
|||
Err, Result, err,
|
||||
utils::{self, content_disposition::make_content_disposition, math::ruma_from_usize},
|
||||
};
|
||||
use conduwuit_core::error;
|
||||
use conduwuit_service::{
|
||||
Services,
|
||||
media::{CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN, Dim, FileMeta, MXC_LENGTH},
|
||||
|
|
@ -144,12 +145,22 @@ pub(crate) async fn get_content_route(
|
|||
server_name: &body.server_name,
|
||||
media_id: &body.media_id,
|
||||
};
|
||||
|
||||
let FileMeta {
|
||||
content,
|
||||
content_type,
|
||||
content_disposition,
|
||||
} = fetch_file(&services, &mxc, user, body.timeout_ms, None).await?;
|
||||
} = match fetch_file(&services, &mxc, user, body.timeout_ms, None).await {
|
||||
| Ok(meta) => meta,
|
||||
| Err(conduwuit::Error::Io(e)) => match e.kind() {
|
||||
| std::io::ErrorKind::NotFound => return Err!(Request(NotFound("Media not found."))),
|
||||
| std::io::ErrorKind::PermissionDenied => {
|
||||
error!("Permission denied when trying to read file: {e:?}");
|
||||
return Err!(Request(Unknown("Unknown error when fetching file.")));
|
||||
},
|
||||
| _ => return Err!(Request(Unknown("Unknown error when fetching file."))),
|
||||
},
|
||||
| Err(_) => return Err!(Request(Unknown("Unknown error when fetching file."))),
|
||||
};
|
||||
|
||||
Ok(get_content::v1::Response {
|
||||
file: content.expect("entire file contents"),
|
||||
|
|
@ -185,7 +196,18 @@ pub(crate) async fn get_content_as_filename_route(
|
|||
content,
|
||||
content_type,
|
||||
content_disposition,
|
||||
} = fetch_file(&services, &mxc, user, body.timeout_ms, Some(&body.filename)).await?;
|
||||
} = match fetch_file(&services, &mxc, user, body.timeout_ms, None).await {
|
||||
| Ok(meta) => meta,
|
||||
| Err(conduwuit::Error::Io(e)) => match e.kind() {
|
||||
| std::io::ErrorKind::NotFound => return Err!(Request(NotFound("Media not found."))),
|
||||
| std::io::ErrorKind::PermissionDenied => {
|
||||
error!("Permission denied when trying to read file: {e:?}");
|
||||
return Err!(Request(Unknown("Unknown error when fetching file.")));
|
||||
},
|
||||
| _ => return Err!(Request(Unknown("Unknown error when fetching file."))),
|
||||
},
|
||||
| Err(_) => return Err!(Request(Unknown("Unknown error when fetching file."))),
|
||||
};
|
||||
|
||||
Ok(get_content_as_filename::v1::Response {
|
||||
file: content.expect("entire file contents"),
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ use std::{borrow::Borrow, collections::HashMap, iter::once, sync::Arc};
|
|||
use axum::extract::State;
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use conduwuit::{
|
||||
Err, Result, debug, debug_info, debug_warn, err, error, info,
|
||||
Err, Result, debug, debug_info, debug_warn, err, error, info, is_true,
|
||||
matrix::{
|
||||
StateKey,
|
||||
event::{gen_event_id, gen_event_id_canonical_json},
|
||||
|
|
@ -26,7 +26,7 @@ use ruma::{
|
|||
api::{
|
||||
client::{
|
||||
error::ErrorKind,
|
||||
membership::{ThirdPartySigned, join_room_by_id, join_room_by_id_or_alias},
|
||||
membership::{join_room_by_id, join_room_by_id_or_alias},
|
||||
},
|
||||
federation::{self},
|
||||
},
|
||||
|
|
@ -34,7 +34,7 @@ use ruma::{
|
|||
events::{
|
||||
StateEventType,
|
||||
room::{
|
||||
join_rules::{AllowRule, JoinRule},
|
||||
join_rules::JoinRule,
|
||||
member::{MembershipState, RoomMemberEventContent},
|
||||
},
|
||||
},
|
||||
|
|
@ -48,9 +48,13 @@ use service::{
|
|||
timeline::pdu_fits,
|
||||
},
|
||||
};
|
||||
use tokio::join;
|
||||
|
||||
use super::{banned_room_check, validate_remote_member_event_stub};
|
||||
use crate::Ruma;
|
||||
use crate::{
|
||||
Ruma,
|
||||
server::{select_authorising_user, user_can_perform_restricted_join},
|
||||
};
|
||||
|
||||
/// # `POST /_matrix/client/r0/rooms/{roomId}/join`
|
||||
///
|
||||
|
|
@ -116,7 +120,6 @@ pub(crate) async fn join_room_by_id_route(
|
|||
&body.room_id,
|
||||
body.reason.clone(),
|
||||
&servers,
|
||||
body.third_party_signed.as_ref(),
|
||||
&body.appservice_info,
|
||||
)
|
||||
.boxed()
|
||||
|
|
@ -195,11 +198,7 @@ pub(crate) async fn join_room_by_id_or_alias_route(
|
|||
(servers, room_id)
|
||||
},
|
||||
| Err(room_alias) => {
|
||||
let (room_id, mut servers) = services
|
||||
.rooms
|
||||
.alias
|
||||
.resolve_alias(&room_alias, Some(body.via.clone()))
|
||||
.await?;
|
||||
let (room_id, mut servers) = services.rooms.alias.resolve_alias(&room_alias).await?;
|
||||
|
||||
banned_room_check(
|
||||
&services,
|
||||
|
|
@ -248,7 +247,6 @@ pub(crate) async fn join_room_by_id_or_alias_route(
|
|||
&room_id,
|
||||
body.reason.clone(),
|
||||
&servers,
|
||||
body.third_party_signed.as_ref(),
|
||||
appservice_info,
|
||||
)
|
||||
.boxed()
|
||||
|
|
@ -263,7 +261,6 @@ pub async fn join_room_by_id_helper(
|
|||
room_id: &RoomId,
|
||||
reason: Option<String>,
|
||||
servers: &[OwnedServerName],
|
||||
third_party_signed: Option<&ThirdPartySigned>,
|
||||
appservice_info: &Option<RegistrationInfo>,
|
||||
) -> Result<join_room_by_id::v3::Response> {
|
||||
let state_lock = services.rooms.state.mutex.lock(room_id).await;
|
||||
|
|
@ -350,18 +347,16 @@ pub async fn join_room_by_id_helper(
|
|||
}
|
||||
}
|
||||
|
||||
if server_in_room {
|
||||
join_room_by_id_helper_local(
|
||||
services,
|
||||
sender_user,
|
||||
room_id,
|
||||
reason,
|
||||
servers,
|
||||
third_party_signed,
|
||||
state_lock,
|
||||
)
|
||||
.boxed()
|
||||
services
|
||||
.rooms
|
||||
.roles
|
||||
.check_join_allowed(room_id, sender_user)
|
||||
.await?;
|
||||
|
||||
if server_in_room {
|
||||
join_room_by_id_helper_local(services, sender_user, room_id, reason, servers, state_lock)
|
||||
.boxed()
|
||||
.await?;
|
||||
} else {
|
||||
// Ask a remote server if we are not participating in this room
|
||||
join_room_by_id_helper_remote(
|
||||
|
|
@ -370,7 +365,6 @@ pub async fn join_room_by_id_helper(
|
|||
room_id,
|
||||
reason,
|
||||
servers,
|
||||
third_party_signed,
|
||||
state_lock,
|
||||
)
|
||||
.boxed()
|
||||
|
|
@ -386,7 +380,6 @@ async fn join_room_by_id_helper_remote(
|
|||
room_id: &RoomId,
|
||||
reason: Option<String>,
|
||||
servers: &[OwnedServerName],
|
||||
_third_party_signed: Option<&ThirdPartySigned>,
|
||||
state_lock: RoomMutexGuard,
|
||||
) -> Result {
|
||||
info!("Joining {room_id} over federation.");
|
||||
|
|
@ -396,11 +389,10 @@ async fn join_room_by_id_helper_remote(
|
|||
|
||||
info!("make_join finished");
|
||||
|
||||
let Some(room_version_id) = make_join_response.room_version else {
|
||||
return Err!(BadServerResponse("Remote room version is not supported by conduwuit"));
|
||||
};
|
||||
let room_version_id = make_join_response.room_version.unwrap_or(RoomVersionId::V1);
|
||||
|
||||
if !services.server.supported_room_version(&room_version_id) {
|
||||
// How did we get here?
|
||||
return Err!(BadServerResponse(
|
||||
"Remote room version {room_version_id} is not supported by conduwuit"
|
||||
));
|
||||
|
|
@ -429,10 +421,6 @@ async fn join_room_by_id_helper_remote(
|
|||
}
|
||||
};
|
||||
|
||||
join_event_stub.insert(
|
||||
"origin".to_owned(),
|
||||
CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()),
|
||||
);
|
||||
join_event_stub.insert(
|
||||
"origin_server_ts".to_owned(),
|
||||
CanonicalJsonValue::Integer(
|
||||
|
|
@ -744,87 +732,45 @@ async fn join_room_by_id_helper_local(
|
|||
room_id: &RoomId,
|
||||
reason: Option<String>,
|
||||
servers: &[OwnedServerName],
|
||||
_third_party_signed: Option<&ThirdPartySigned>,
|
||||
state_lock: RoomMutexGuard,
|
||||
) -> Result {
|
||||
debug_info!("We can join locally");
|
||||
let join_rules = services.rooms.state_accessor.get_join_rules(room_id).await;
|
||||
info!("Joining room locally");
|
||||
|
||||
let mut restricted_join_authorized = None;
|
||||
match join_rules {
|
||||
| JoinRule::Restricted(restricted) | JoinRule::KnockRestricted(restricted) => {
|
||||
for restriction in restricted.allow {
|
||||
match restriction {
|
||||
| AllowRule::RoomMembership(membership) => {
|
||||
if services
|
||||
.rooms
|
||||
.state_cache
|
||||
.is_joined(sender_user, &membership.room_id)
|
||||
.await
|
||||
{
|
||||
restricted_join_authorized = Some(true);
|
||||
break;
|
||||
}
|
||||
},
|
||||
| AllowRule::UnstableSpamChecker => {
|
||||
match services
|
||||
.antispam
|
||||
.meowlnir_accept_make_join(room_id.to_owned(), sender_user.to_owned())
|
||||
.await
|
||||
{
|
||||
| Ok(()) => {
|
||||
restricted_join_authorized = Some(true);
|
||||
break;
|
||||
},
|
||||
| Err(_) =>
|
||||
return Err!(Request(Forbidden(
|
||||
"Antispam rejected join request."
|
||||
))),
|
||||
}
|
||||
},
|
||||
| _ => {},
|
||||
}
|
||||
let (room_version, join_rules, is_invited) = join!(
|
||||
services.rooms.state.get_room_version(room_id),
|
||||
services.rooms.state_accessor.get_join_rules(room_id),
|
||||
services.rooms.state_cache.is_invited(sender_user, room_id)
|
||||
);
|
||||
|
||||
let room_version = room_version?;
|
||||
let mut auth_user: Option<OwnedUserId> = None;
|
||||
if !is_invited && matches!(join_rules, JoinRule::Restricted(_) | JoinRule::KnockRestricted(_))
|
||||
{
|
||||
use RoomVersionId::*;
|
||||
if !matches!(room_version, V1 | V2 | V3 | V4 | V5 | V6 | V7) {
|
||||
// This is a restricted room, check if we can complete the join requirements
|
||||
// locally.
|
||||
let needs_auth_user =
|
||||
user_can_perform_restricted_join(services, sender_user, room_id, &room_version)
|
||||
.await;
|
||||
if needs_auth_user.is_ok_and(is_true!()) {
|
||||
// If there was an error or the value is false, we'll try joining over
|
||||
// federation. Since it's Ok(true), we can authorise this locally.
|
||||
// If we can't select a local user, this will remain None, the join will fail,
|
||||
// and we'll fall back to federation.
|
||||
auth_user = select_authorising_user(services, room_id, sender_user, &state_lock)
|
||||
.await
|
||||
.ok();
|
||||
}
|
||||
},
|
||||
| _ => {},
|
||||
}
|
||||
let join_authorized_via_users_server = if restricted_join_authorized.is_none() {
|
||||
None
|
||||
} else {
|
||||
match restricted_join_authorized.unwrap() {
|
||||
| true => services
|
||||
.rooms
|
||||
.state_cache
|
||||
.local_users_in_room(room_id)
|
||||
.filter(|user| {
|
||||
trace!("Checking if {user} can invite {sender_user} to {room_id}");
|
||||
services.rooms.state_accessor.user_can_invite(
|
||||
room_id,
|
||||
user,
|
||||
sender_user,
|
||||
&state_lock,
|
||||
)
|
||||
})
|
||||
.boxed()
|
||||
.next()
|
||||
.await
|
||||
.map(ToOwned::to_owned),
|
||||
| false => {
|
||||
warn!(
|
||||
"Join authorization failed for restricted join in room {room_id} for user \
|
||||
{sender_user}"
|
||||
);
|
||||
return Err!(Request(Forbidden("You are not authorized to join this room.")));
|
||||
},
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
let content = RoomMemberEventContent {
|
||||
displayname: services.users.displayname(sender_user).await.ok(),
|
||||
avatar_url: services.users.avatar_url(sender_user).await.ok(),
|
||||
blurhash: services.users.blurhash(sender_user).await.ok(),
|
||||
reason: reason.clone(),
|
||||
join_authorized_via_users_server,
|
||||
join_authorized_via_users_server: auth_user,
|
||||
..RoomMemberEventContent::new(MembershipState::Join)
|
||||
};
|
||||
|
||||
|
|
@ -840,6 +786,7 @@ async fn join_room_by_id_helper_local(
|
|||
)
|
||||
.await
|
||||
else {
|
||||
info!("Joined room locally");
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
|
|
@ -847,138 +794,13 @@ async fn join_room_by_id_helper_local(
|
|||
return Err(error);
|
||||
}
|
||||
|
||||
warn!(
|
||||
info!(
|
||||
?error,
|
||||
servers = %servers.len(),
|
||||
"Could not join restricted room locally, attempting remote join",
|
||||
remote_servers = %servers.len(),
|
||||
"Could not join room locally, attempting remote join",
|
||||
);
|
||||
let Ok((make_join_response, remote_server)) =
|
||||
make_join_request(services, sender_user, room_id, servers).await
|
||||
else {
|
||||
return Err(error);
|
||||
};
|
||||
|
||||
let Some(room_version_id) = make_join_response.room_version else {
|
||||
return Err!(BadServerResponse("Remote room version is not supported by conduwuit"));
|
||||
};
|
||||
|
||||
if !services.server.supported_room_version(&room_version_id) {
|
||||
return Err!(BadServerResponse(
|
||||
"Remote room version {room_version_id} is not supported by conduwuit"
|
||||
));
|
||||
}
|
||||
|
||||
let mut join_event_stub: CanonicalJsonObject =
|
||||
serde_json::from_str(make_join_response.event.get()).map_err(|e| {
|
||||
err!(BadServerResponse("Invalid make_join event json received from server: {e:?}"))
|
||||
})?;
|
||||
|
||||
validate_remote_member_event_stub(
|
||||
&MembershipState::Join,
|
||||
sender_user,
|
||||
room_id,
|
||||
&join_event_stub,
|
||||
)?;
|
||||
|
||||
let join_authorized_via_users_server = join_event_stub
|
||||
.get("content")
|
||||
.map(|s| {
|
||||
s.as_object()?
|
||||
.get("join_authorised_via_users_server")?
|
||||
.as_str()
|
||||
})
|
||||
.and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok());
|
||||
|
||||
join_event_stub.insert(
|
||||
"origin".to_owned(),
|
||||
CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()),
|
||||
);
|
||||
join_event_stub.insert(
|
||||
"origin_server_ts".to_owned(),
|
||||
CanonicalJsonValue::Integer(
|
||||
utils::millis_since_unix_epoch()
|
||||
.try_into()
|
||||
.expect("Timestamp is valid js_int value"),
|
||||
),
|
||||
);
|
||||
join_event_stub.insert(
|
||||
"content".to_owned(),
|
||||
to_canonical_value(RoomMemberEventContent {
|
||||
displayname: services.users.displayname(sender_user).await.ok(),
|
||||
avatar_url: services.users.avatar_url(sender_user).await.ok(),
|
||||
blurhash: services.users.blurhash(sender_user).await.ok(),
|
||||
reason,
|
||||
join_authorized_via_users_server,
|
||||
..RoomMemberEventContent::new(MembershipState::Join)
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
);
|
||||
|
||||
// We keep the "event_id" in the pdu only in v1 or
|
||||
// v2 rooms
|
||||
match room_version_id {
|
||||
| RoomVersionId::V1 | RoomVersionId::V2 => {},
|
||||
| _ => {
|
||||
join_event_stub.remove("event_id");
|
||||
},
|
||||
}
|
||||
|
||||
// In order to create a compatible ref hash (EventID) the `hashes` field needs
|
||||
// to be present
|
||||
services
|
||||
.server_keys
|
||||
.hash_and_sign_event(&mut join_event_stub, &room_version_id)?;
|
||||
|
||||
// Generate event id
|
||||
let event_id = gen_event_id(&join_event_stub, &room_version_id)?;
|
||||
|
||||
// Add event_id back
|
||||
join_event_stub
|
||||
.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into()));
|
||||
|
||||
// It has enough fields to be called a proper event now
|
||||
let join_event = join_event_stub;
|
||||
|
||||
let send_join_response = services
|
||||
.sending
|
||||
.send_synapse_request(
|
||||
&remote_server,
|
||||
federation::membership::create_join_event::v2::Request {
|
||||
room_id: room_id.to_owned(),
|
||||
event_id: event_id.clone(),
|
||||
omit_members: false,
|
||||
pdu: services
|
||||
.sending
|
||||
.convert_to_outgoing_federation_event(join_event.clone())
|
||||
.await,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
if let Some(signed_raw) = send_join_response.room_state.event {
|
||||
let (signed_event_id, signed_value) =
|
||||
gen_event_id_canonical_json(&signed_raw, &room_version_id).map_err(|e| {
|
||||
err!(Request(BadJson(warn!("Could not convert event to canonical JSON: {e}"))))
|
||||
})?;
|
||||
|
||||
if signed_event_id != event_id {
|
||||
return Err!(Request(BadJson(
|
||||
warn!(%signed_event_id, %event_id, "Server {remote_server} sent event with wrong event ID")
|
||||
)));
|
||||
}
|
||||
|
||||
drop(state_lock);
|
||||
services
|
||||
.rooms
|
||||
.event_handler
|
||||
.handle_incoming_pdu(&remote_server, room_id, &signed_event_id, signed_value, true)
|
||||
.boxed()
|
||||
.await?;
|
||||
} else {
|
||||
return Err(error);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
join_room_by_id_helper_remote(services, sender_user, room_id, reason, servers, state_lock)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn make_join_request(
|
||||
|
|
@ -987,17 +809,16 @@ async fn make_join_request(
|
|||
room_id: &RoomId,
|
||||
servers: &[OwnedServerName],
|
||||
) -> Result<(federation::membership::prepare_join_event::v1::Response, OwnedServerName)> {
|
||||
let mut make_join_response_and_server =
|
||||
Err!(BadServerResponse("No server available to assist in joining."));
|
||||
|
||||
let mut make_join_counter: usize = 0;
|
||||
let mut incompatible_room_version_count: usize = 0;
|
||||
let mut make_join_counter: usize = 1;
|
||||
|
||||
for remote_server in servers {
|
||||
if services.globals.server_is_ours(remote_server) {
|
||||
continue;
|
||||
}
|
||||
info!("Asking {remote_server} for make_join ({make_join_counter})");
|
||||
info!(
|
||||
"Asking {remote_server} for make_join (attempt {make_join_counter}/{})",
|
||||
servers.len()
|
||||
);
|
||||
let make_join_response = services
|
||||
.sending
|
||||
.send_federation_request(
|
||||
|
|
@ -1025,47 +846,44 @@ async fn make_join_request(
|
|||
warn!("make_join response from {remote_server} failed validation: {e}");
|
||||
continue;
|
||||
}
|
||||
make_join_response_and_server = Ok((response, remote_server.clone()));
|
||||
break;
|
||||
return Ok((response, remote_server.clone()));
|
||||
},
|
||||
| Err(e) => {
|
||||
info!("make_join request to {remote_server} failed: {e}");
|
||||
if matches!(
|
||||
e.kind(),
|
||||
ErrorKind::IncompatibleRoomVersion { .. } | ErrorKind::UnsupportedRoomVersion
|
||||
) {
|
||||
incompatible_room_version_count =
|
||||
incompatible_room_version_count.saturating_add(1);
|
||||
}
|
||||
|
||||
if incompatible_room_version_count > 15 {
|
||||
| Err(e) => match e.kind() {
|
||||
| ErrorKind::UnableToAuthorizeJoin => {
|
||||
info!(
|
||||
"15 servers have responded with M_INCOMPATIBLE_ROOM_VERSION or \
|
||||
M_UNSUPPORTED_ROOM_VERSION, assuming that conduwuit does not support \
|
||||
the room version {room_id}: {e}"
|
||||
"{remote_server} was unable to verify the joining user satisfied \
|
||||
restricted join requirements: {e}. Will continue trying."
|
||||
);
|
||||
make_join_response_and_server =
|
||||
Err!(BadServerResponse("Room version is not supported by Conduwuit"));
|
||||
return make_join_response_and_server;
|
||||
}
|
||||
|
||||
if make_join_counter > 40 {
|
||||
},
|
||||
| ErrorKind::UnableToGrantJoin => {
|
||||
info!(
|
||||
"{remote_server} believes the joining user satisfies restricted join \
|
||||
rules, but is unable to authorise a join for us. Will continue trying."
|
||||
);
|
||||
},
|
||||
| ErrorKind::IncompatibleRoomVersion { room_version } => {
|
||||
warn!(
|
||||
"40 servers failed to provide valid make_join response, assuming no \
|
||||
server can assist in joining."
|
||||
"{remote_server} reports the room we are trying to join is \
|
||||
v{room_version}, which we do not support."
|
||||
);
|
||||
make_join_response_and_server =
|
||||
Err!(BadServerResponse("No server available to assist in joining."));
|
||||
|
||||
return make_join_response_and_server;
|
||||
}
|
||||
return Err(e);
|
||||
},
|
||||
| ErrorKind::Forbidden { .. } => {
|
||||
warn!("{remote_server} refuses to let us join: {e}.");
|
||||
return Err(e);
|
||||
},
|
||||
| ErrorKind::NotFound => {
|
||||
info!(
|
||||
"{remote_server} does not know about {room_id}: {e}. Will continue \
|
||||
trying."
|
||||
);
|
||||
},
|
||||
| _ => {
|
||||
info!("{remote_server} failed to make_join: {e}. Will continue trying.");
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if make_join_response_and_server.is_ok() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
make_join_response_and_server
|
||||
info!("All {} servers were unable to assist in joining {room_id} :(", servers.len());
|
||||
Err!(BadServerResponse("No server available to assist in joining."))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -102,11 +102,7 @@ pub(crate) async fn knock_room_route(
|
|||
(servers, room_id)
|
||||
},
|
||||
| Err(room_alias) => {
|
||||
let (room_id, mut servers) = services
|
||||
.rooms
|
||||
.alias
|
||||
.resolve_alias(&room_alias, Some(body.via.clone()))
|
||||
.await?;
|
||||
let (room_id, mut servers) = services.rooms.alias.resolve_alias(&room_alias).await?;
|
||||
|
||||
banned_room_check(
|
||||
&services,
|
||||
|
|
@ -253,7 +249,6 @@ async fn knock_room_by_id_helper(
|
|||
room_id,
|
||||
reason.clone(),
|
||||
servers,
|
||||
None,
|
||||
&None,
|
||||
)
|
||||
.await
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
use axum::extract::State;
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use conduwuit::{
|
||||
Err, Result, at, debug_warn,
|
||||
Err, Error, Result, at, debug_warn,
|
||||
matrix::{
|
||||
event::{Event, Matches},
|
||||
pdu::PduCount,
|
||||
|
|
@ -26,7 +26,7 @@ use ruma::{
|
|||
DeviceId, RoomId, UserId,
|
||||
api::{
|
||||
Direction,
|
||||
client::{filter::RoomEventFilter, message::get_message_events},
|
||||
client::{error::ErrorKind, filter::RoomEventFilter, message::get_message_events},
|
||||
},
|
||||
events::{
|
||||
AnyStateEvent, StateEventType,
|
||||
|
|
@ -279,23 +279,30 @@ pub(crate) async fn ignored_filter(
|
|||
|
||||
is_ignored_pdu(services, pdu, user_id)
|
||||
.await
|
||||
.unwrap_or(true)
|
||||
.eq(&false)
|
||||
.then_some(item)
|
||||
}
|
||||
|
||||
/// Determine whether a PDU should be ignored for a given recipient user.
|
||||
/// Returns True if this PDU should be ignored, returns False otherwise.
|
||||
///
|
||||
/// The error SenderIgnored is returned if the sender or the sender's server is
|
||||
/// ignored by the relevant user. If the error cannot be returned to the user,
|
||||
/// it should equate to a true value (i.e. ignored).
|
||||
#[inline]
|
||||
pub(crate) async fn is_ignored_pdu<Pdu>(
|
||||
services: &Services,
|
||||
event: &Pdu,
|
||||
recipient_user: &UserId,
|
||||
) -> bool
|
||||
) -> Result<bool>
|
||||
where
|
||||
Pdu: Event + Send + Sync,
|
||||
{
|
||||
// exclude Synapse's dummy events from bloating up response bodies. clients
|
||||
// don't need to see this.
|
||||
if event.kind().to_cow_str() == "org.matrix.dummy_event" {
|
||||
return true;
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
let sender_user = event.sender();
|
||||
|
|
@ -310,21 +317,27 @@ where
|
|||
|
||||
if !type_ignored {
|
||||
// We cannot safely ignore this type
|
||||
return false;
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
if server_ignored {
|
||||
// the sender's server is ignored, so ignore this event
|
||||
return true;
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::SenderIgnored { sender: None },
|
||||
"The sender's server is ignored by this server.",
|
||||
));
|
||||
}
|
||||
|
||||
if user_ignored && !services.config.send_messages_from_ignored_users_to_client {
|
||||
// the recipient of this PDU has the sender ignored, and we're not
|
||||
// configured to send ignored messages to clients
|
||||
return true;
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::SenderIgnored { sender: Some(event.sender().to_owned()) },
|
||||
"You have ignored this sender.",
|
||||
));
|
||||
}
|
||||
|
||||
false
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ pub(super) mod appservice;
|
|||
pub(super) mod backup;
|
||||
pub(super) mod capabilities;
|
||||
pub(super) mod context;
|
||||
pub(super) mod dehydrated_device;
|
||||
pub(super) mod device;
|
||||
pub(super) mod directory;
|
||||
pub(super) mod filter;
|
||||
|
|
@ -49,6 +50,7 @@ pub(super) use appservice::*;
|
|||
pub(super) use backup::*;
|
||||
pub(super) use capabilities::*;
|
||||
pub(super) use context::*;
|
||||
pub(super) use dehydrated_device::*;
|
||||
pub(super) use device::*;
|
||||
pub(super) use directory::*;
|
||||
pub(super) use filter::*;
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
use axum::extract::State;
|
||||
use conduwuit::{
|
||||
Err, Result, at, debug_warn,
|
||||
Err, Result, at, debug_warn, err,
|
||||
matrix::{Event, event::RelationTypeEqual, pdu::PduCount},
|
||||
utils::{IterStream, ReadyExt, result::FlatOk, stream::WidebandExt},
|
||||
};
|
||||
|
|
@ -18,7 +18,7 @@ use ruma::{
|
|||
events::{TimelineEventType, relation::RelationType},
|
||||
};
|
||||
|
||||
use crate::Ruma;
|
||||
use crate::{Ruma, client::is_ignored_pdu};
|
||||
|
||||
/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}/{eventType}`
|
||||
pub(crate) async fn get_relating_events_with_rel_type_and_event_type_route(
|
||||
|
|
@ -118,6 +118,14 @@ async fn paginate_relations_with_filter(
|
|||
debug_warn!(req_evt = %target, %room_id, "Event relations requested by {sender_user} but is not allowed to see it, returning 404");
|
||||
return Err!(Request(NotFound("Event not found.")));
|
||||
}
|
||||
let target_pdu = services
|
||||
.rooms
|
||||
.timeline
|
||||
.get_pdu(target)
|
||||
.await
|
||||
.map_err(|_| err!(Request(NotFound("Event not found."))))?;
|
||||
// Return M_SENDER_IGNORED if the sender of base_event is ignored (MSC4406)
|
||||
is_ignored_pdu(services, &target_pdu, sender_user).await?;
|
||||
|
||||
let start: PduCount = from
|
||||
.map(str::parse)
|
||||
|
|
@ -159,6 +167,7 @@ async fn paginate_relations_with_filter(
|
|||
.ready_take_while(|(count, _)| Some(*count) != to)
|
||||
.take(limit)
|
||||
.wide_filter_map(|item| visibility_filter(services, sender_user, item))
|
||||
.wide_filter_map(|item| ignored_filter(services, item, sender_user))
|
||||
.then(async |mut pdu| {
|
||||
if let Err(e) = services
|
||||
.rooms
|
||||
|
|
@ -214,3 +223,17 @@ async fn visibility_filter<Pdu: Event + Send + Sync>(
|
|||
.await
|
||||
.then_some(item)
|
||||
}
|
||||
|
||||
async fn ignored_filter<Pdu: Event + Send + Sync>(
|
||||
services: &Services,
|
||||
item: (PduCount, Pdu),
|
||||
sender_user: &UserId,
|
||||
) -> Option<(PduCount, Pdu)> {
|
||||
let (_, pdu) = &item;
|
||||
|
||||
if is_ignored_pdu(services, pdu, sender_user).await.ok()? {
|
||||
None
|
||||
} else {
|
||||
Some(item)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ use axum::extract::State;
|
|||
use axum_client_ip::InsecureClientIp;
|
||||
use conduwuit::{Err, Event, Result, debug_info, info, matrix::pdu::PduEvent, utils::ReadyExt};
|
||||
use conduwuit_service::Services;
|
||||
use rand::Rng;
|
||||
use ruma::{
|
||||
EventId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId,
|
||||
api::client::{
|
||||
|
|
@ -244,7 +243,7 @@ fn build_report(report: Report) -> RoomMessageEventContent {
|
|||
/// random delay sending a response per spec suggestion regarding
|
||||
/// enumerating for potential events existing in our server.
|
||||
async fn delay_response() {
|
||||
let time_to_wait = rand::thread_rng().gen_range(2..5);
|
||||
let time_to_wait = rand::random_range(2..5);
|
||||
debug_info!(
|
||||
"Got successful /report request, waiting {time_to_wait} seconds before sending \
|
||||
successful response."
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ pub(crate) async fn get_room_event_route(
|
|||
|
||||
let (mut event, visible) = try_join(event, visible).await?;
|
||||
|
||||
if !visible || is_ignored_pdu(services, &event, body.sender_user()).await {
|
||||
if !visible || is_ignored_pdu(services, &event, body.sender_user()).await? {
|
||||
return Err!(Request(Forbidden("You don't have permission to view this event.")));
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -50,8 +50,8 @@ pub(crate) async fn send_message_event_route(
|
|||
|
||||
// Check if this is a new transaction id
|
||||
if let Ok(response) = services
|
||||
.transaction_ids
|
||||
.existing_txnid(sender_user, sender_device, &body.txn_id)
|
||||
.transactions
|
||||
.get_client_txn(sender_user, sender_device, &body.txn_id)
|
||||
.await
|
||||
{
|
||||
// The client might have sent a txnid of the /sendToDevice endpoint
|
||||
|
|
@ -92,7 +92,7 @@ pub(crate) async fn send_message_event_route(
|
|||
)
|
||||
.await?;
|
||||
|
||||
services.transaction_ids.add_txnid(
|
||||
services.transactions.add_client_txnid(
|
||||
sender_user,
|
||||
sender_device,
|
||||
&body.txn_id,
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue