Compare commits
202 commits
jade/http3
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 20b1529dc4 | |||
| bba5318ce8 | |||
| 1f91a74b27 | |||
| 5f901a560b | |||
| 59401e1786 | |||
| 95fa3b022a | |||
|
|
6b013bcf60 | ||
|
|
05a49ceb60 | ||
|
|
728c5828ba | ||
|
|
50c94d85a1 | ||
|
|
0cc188f62c | ||
|
|
6451671f66 | ||
|
|
ca21a885d5 | ||
|
|
4af4110f6d | ||
|
|
51b450c05c | ||
|
|
f9d1f71343 | ||
|
|
7901e4b996 | ||
|
|
7b6bf4b78e | ||
|
|
67d5619ccb | ||
|
|
bf001f96d6 | ||
|
|
ae2b87f03f | ||
|
|
957cd3502f | ||
|
|
a109542eb8 | ||
|
|
8c4844b00b | ||
|
|
eec7103910 | ||
|
|
43aa172829 | ||
|
|
9b4c483b6d | ||
|
|
b885e206ce | ||
|
|
07a935f625 | ||
|
|
d13801e976 | ||
|
|
5716c36b47 | ||
|
|
f11943b956 | ||
|
|
8b726a9c94 | ||
|
|
ffa3c53847 | ||
|
|
da8833fca4 | ||
|
|
267feb3c09 | ||
|
|
3d50af0943 | ||
|
|
9515019641 | ||
|
|
f0f53dfada | ||
| 3bfd10efab | |||
| 835d434d92 | |||
|
|
acef746d26 | ||
|
|
3356b60e97 | ||
|
|
c988c2b387 | ||
|
|
3121229707 | ||
|
|
ff85145ee8 | ||
|
|
f61d1a11e0 | ||
|
|
11ba8979ff | ||
|
|
f6956ccf12 | ||
|
|
977a5ac8c1 | ||
|
|
906c3df953 | ||
|
|
33e5fdc16f | ||
|
|
77ac17855a | ||
|
|
65ffcd2884 | ||
|
|
7ec88bdbfe | ||
|
|
da3fac8cb4 | ||
|
|
3366113939 | ||
|
|
9039784f41 | ||
|
|
7f165e5bbe | ||
|
|
c97111e3ca | ||
|
|
e8746760fa | ||
|
|
9dbd75e740 | ||
|
|
85b2fd91b9 | ||
|
|
6420c218a9 | ||
|
|
ec9402a328 | ||
|
|
d01f06a5c2 | ||
|
|
aee51b3b0d | ||
|
|
afcbccd9dd | ||
|
|
02448000f9 | ||
|
|
6af8918aa8 | ||
|
|
08f83cc438 | ||
|
|
a0468db121 | ||
|
|
4f23d566ed | ||
|
|
dac619b5f8 | ||
|
|
fdc9cc8074 | ||
|
|
40b1dabcca | ||
|
|
94c5af40cf | ||
|
|
36a3144757 | ||
|
|
220b61c589 | ||
|
|
38e93cde3e | ||
|
|
7e501cdb09 | ||
|
|
da182c162d | ||
|
|
9a3f7f4af7 | ||
|
|
5ce1f682f6 | ||
|
|
5feb08dff2 | ||
|
|
1e527c1075 | ||
|
|
c6943ae683 | ||
|
|
8932dacdc4 | ||
|
|
0be3d850ac | ||
|
|
57e7cf7057 | ||
|
|
1005585ccb | ||
|
|
1188566dbd | ||
|
|
0058212757 | ||
|
|
dbf8fd3320 | ||
|
|
ce295b079e | ||
|
|
5eb74bc1dd | ||
|
|
da561ab792 | ||
|
|
80c9bb4796 | ||
|
|
22a47d1e59 | ||
|
|
83883a002c | ||
|
|
8dd4b71e0e | ||
|
|
6fe3b1563c | ||
|
|
44d3825c8e | ||
|
|
d6c5484c3a | ||
|
|
1fd6056f3f | ||
|
|
525a0ae52b | ||
|
|
60210754d9 | ||
|
|
08dd787083 | ||
|
|
2c7233812b | ||
|
|
d725e98220 | ||
|
|
0226ca1e83 | ||
|
|
1695b6d19e | ||
|
|
c40cc3b236 | ||
|
|
754959e80d | ||
|
|
37888fb670 | ||
|
|
7207398a9e | ||
|
|
1a7bda209b | ||
|
|
7e1950b3d2 | ||
|
|
b507898c62 | ||
|
|
f4af67575e | ||
|
|
6adb99397e | ||
|
|
8ce83a8a14 | ||
|
|
052c4dfa21 | ||
|
|
a43dee1728 | ||
|
|
763d9b3de8 | ||
|
|
1e6d95583c | ||
|
|
8a254a33cc | ||
|
|
c97dd54766 | ||
|
|
8ddb7c70c0 | ||
|
|
cb9786466b | ||
|
|
18d2662b01 | ||
|
|
558262dd1f | ||
|
|
d311b87579 | ||
|
|
8702f55cf5 | ||
|
|
d4481b07ac | ||
|
|
92351df925 | ||
|
|
47e2733ea1 | ||
|
|
6637e4c6a7 | ||
|
|
35e441452f | ||
|
|
66bbb655bf | ||
|
|
81b202ce51 | ||
|
|
4657844d46 | ||
|
|
9016cd11a6 | ||
|
|
dd70094719 | ||
|
|
fcd49b7ab3 | ||
|
|
470c9b52dd | ||
|
|
0d8cafc329 | ||
|
|
2f9956ddca | ||
|
|
21a97cdd0b | ||
|
|
e986cd4536 | ||
|
|
526d862296 | ||
|
|
fbeb5bf186 | ||
|
|
a336f2df44 | ||
|
|
19b78ec73e | ||
|
|
27ff2d9363 | ||
|
|
50fa8c3abf | ||
|
|
18c4be869f | ||
|
|
fc00b96d8b | ||
|
|
fa4156d8a6 | ||
|
|
23638cd714 | ||
|
|
9f1a483e76 | ||
|
|
688ef727e5 | ||
|
|
3de026160e | ||
|
|
9fe761513d | ||
|
|
abf1e1195a | ||
|
|
d9537e9b55 | ||
|
|
0d1de70d8f | ||
|
|
4aa03a71eb | ||
|
|
f847918575 | ||
|
|
7569a0545b | ||
|
|
b6c5991e1f | ||
|
|
efd879fcd8 | ||
|
|
92a848f74d | ||
|
|
776b5865ba | ||
|
|
722bacbe89 | ||
|
|
46907e3dce | ||
|
|
31e2195e56 | ||
|
|
7ecac93ddc | ||
|
|
6a0b103722 | ||
|
|
23d77b614f | ||
|
|
e01aa44b16 | ||
|
|
a08739c246 | ||
|
|
c14864b881 | ||
|
|
1773e72e68 | ||
|
|
0f94d55689 | ||
|
|
abfb6377c2 | ||
|
|
91d64f5b24 | ||
|
|
9a3f3f6e78 | ||
|
|
b3e31a4aad | ||
|
|
8cda431cc6 | ||
|
|
02b9a3f713 | ||
|
|
d40893730c | ||
|
|
28fae58cf6 | ||
|
|
f458f6ab76 | ||
|
|
fdf9cea533 | ||
|
|
ecb1b73c84 | ||
|
|
e03082480a | ||
|
|
f9e7f019ad | ||
|
|
12069e7c86 | ||
|
|
77928a62b4 | ||
|
|
c73cb5c1bf | ||
|
|
a140eacb04 |
189 changed files with 9467 additions and 1980 deletions
|
|
@ -44,7 +44,7 @@ runs:
|
||||||
|
|
||||||
- name: Login to builtin registry
|
- name: Login to builtin registry
|
||||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v4
|
||||||
with:
|
with:
|
||||||
registry: ${{ env.BUILTIN_REGISTRY }}
|
registry: ${{ env.BUILTIN_REGISTRY }}
|
||||||
username: ${{ inputs.registry_user }}
|
username: ${{ inputs.registry_user }}
|
||||||
|
|
@ -52,7 +52,7 @@ runs:
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v4
|
||||||
with:
|
with:
|
||||||
# Use persistent BuildKit if BUILDKIT_ENDPOINT is set (e.g. tcp://buildkit:8125)
|
# Use persistent BuildKit if BUILDKIT_ENDPOINT is set (e.g. tcp://buildkit:8125)
|
||||||
driver: ${{ env.BUILDKIT_ENDPOINT != '' && 'remote' || 'docker-container' }}
|
driver: ${{ env.BUILDKIT_ENDPOINT != '' && 'remote' || 'docker-container' }}
|
||||||
|
|
@ -61,7 +61,7 @@ runs:
|
||||||
- name: Extract metadata (tags) for Docker
|
- name: Extract metadata (tags) for Docker
|
||||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||||
id: meta
|
id: meta
|
||||||
uses: docker/metadata-action@v5
|
uses: docker/metadata-action@v6
|
||||||
with:
|
with:
|
||||||
flavor: |
|
flavor: |
|
||||||
latest=auto
|
latest=auto
|
||||||
|
|
|
||||||
|
|
@ -67,7 +67,7 @@ runs:
|
||||||
uses: ./.forgejo/actions/rust-toolchain
|
uses: ./.forgejo/actions/rust-toolchain
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v4
|
||||||
with:
|
with:
|
||||||
# Use persistent BuildKit if BUILDKIT_ENDPOINT is set (e.g. tcp://buildkit:8125)
|
# Use persistent BuildKit if BUILDKIT_ENDPOINT is set (e.g. tcp://buildkit:8125)
|
||||||
driver: ${{ env.BUILDKIT_ENDPOINT != '' && 'remote' || 'docker-container' }}
|
driver: ${{ env.BUILDKIT_ENDPOINT != '' && 'remote' || 'docker-container' }}
|
||||||
|
|
@ -79,7 +79,7 @@ runs:
|
||||||
|
|
||||||
- name: Login to builtin registry
|
- name: Login to builtin registry
|
||||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v4
|
||||||
with:
|
with:
|
||||||
registry: ${{ env.BUILTIN_REGISTRY }}
|
registry: ${{ env.BUILTIN_REGISTRY }}
|
||||||
username: ${{ inputs.registry_user }}
|
username: ${{ inputs.registry_user }}
|
||||||
|
|
@ -87,7 +87,7 @@ runs:
|
||||||
|
|
||||||
- name: Extract metadata (labels, annotations) for Docker
|
- name: Extract metadata (labels, annotations) for Docker
|
||||||
id: meta
|
id: meta
|
||||||
uses: docker/metadata-action@v5
|
uses: docker/metadata-action@v6
|
||||||
with:
|
with:
|
||||||
images: ${{ inputs.images }}
|
images: ${{ inputs.images }}
|
||||||
# default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509
|
# default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509
|
||||||
|
|
@ -152,7 +152,7 @@ runs:
|
||||||
|
|
||||||
- name: inject cache into docker
|
- name: inject cache into docker
|
||||||
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
||||||
uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.3.0
|
uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.3.2
|
||||||
with:
|
with:
|
||||||
cache-map: |
|
cache-map: |
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -62,10 +62,6 @@ sync:
|
||||||
target: registry.gitlab.com/continuwuity/continuwuity
|
target: registry.gitlab.com/continuwuity/continuwuity
|
||||||
type: repository
|
type: repository
|
||||||
<<: *tags-main
|
<<: *tags-main
|
||||||
- source: *source
|
|
||||||
target: git.nexy7574.co.uk/mirrored/continuwuity
|
|
||||||
type: repository
|
|
||||||
<<: *tags-releases
|
|
||||||
- source: *source
|
- source: *source
|
||||||
target: ghcr.io/continuwuity/continuwuity
|
target: ghcr.io/continuwuity/continuwuity
|
||||||
type: repository
|
type: repository
|
||||||
|
|
|
||||||
|
|
@ -30,22 +30,22 @@ jobs:
|
||||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||||
echo "distribution=$DISTRIBUTION" >> $GITHUB_OUTPUT
|
echo "distribution=$DISTRIBUTION" >> $GITHUB_OUTPUT
|
||||||
echo "Debian distribution: $DISTRIBUTION ($VERSION)"
|
echo "Debian distribution: $DISTRIBUTION ($VERSION)"
|
||||||
- name: Work around llvm-project#153385
|
#- name: Work around llvm-project#153385
|
||||||
id: llvm-workaround
|
# id: llvm-workaround
|
||||||
run: |
|
# run: |
|
||||||
if [ -f /usr/share/apt/default-sequoia.config ]; then
|
# if [ -f /usr/share/apt/default-sequoia.config ]; then
|
||||||
echo "Applying workaround for llvm-project#153385"
|
# echo "Applying workaround for llvm-project#153385"
|
||||||
mkdir -p /etc/crypto-policies/back-ends/
|
# mkdir -p /etc/crypto-policies/back-ends/
|
||||||
cp /usr/share/apt/default-sequoia.config /etc/crypto-policies/back-ends/apt-sequoia.config
|
# cp /usr/share/apt/default-sequoia.config /etc/crypto-policies/back-ends/apt-sequoia.config
|
||||||
sed -i 's/\(sha1\.second_preimage_resistance = \)2026-02-01/\12026-06-01/' /etc/crypto-policies/back-ends/apt-sequoia.config
|
# sed -i 's/\(sha1\.second_preimage_resistance = \)2026-02-01/\12026-06-01/' /etc/crypto-policies/back-ends/apt-sequoia.config
|
||||||
else
|
# else
|
||||||
echo "No workaround needed for llvm-project#153385"
|
# echo "No workaround needed for llvm-project#153385"
|
||||||
fi
|
# fi
|
||||||
- name: Pick compatible clang version
|
- name: Pick compatible clang version
|
||||||
id: clang-version
|
id: clang-version
|
||||||
run: |
|
run: |
|
||||||
# both latest need to use clang-23, but oldstable and previous can just use clang
|
# both latest need to use clang-23, but oldstable and previous can just use clang
|
||||||
if [[ "${{ matrix.container }}" == "ubuntu-latest" || "${{ matrix.container }}" == "debian-latest" ]]; then
|
if [[ "${{ matrix.container }}" == "ubuntu-latest" ]]; then
|
||||||
echo "Using clang-23 package for ${{ matrix.container }}"
|
echo "Using clang-23 package for ${{ matrix.container }}"
|
||||||
echo "version=clang-23" >> $GITHUB_OUTPUT
|
echo "version=clang-23" >> $GITHUB_OUTPUT
|
||||||
else
|
else
|
||||||
|
|
|
||||||
|
|
@ -59,7 +59,7 @@ jobs:
|
||||||
registry_password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
|
registry_password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
|
||||||
- name: Build and push Docker image by digest
|
- name: Build and push Docker image by digest
|
||||||
id: build
|
id: build
|
||||||
uses: docker/build-push-action@v6
|
uses: docker/build-push-action@v7
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
file: "docker/Dockerfile"
|
file: "docker/Dockerfile"
|
||||||
|
|
@ -146,7 +146,7 @@ jobs:
|
||||||
registry_password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
|
registry_password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
|
||||||
- name: Build and push max-perf Docker image by digest
|
- name: Build and push max-perf Docker image by digest
|
||||||
id: build
|
id: build
|
||||||
uses: docker/build-push-action@v6
|
uses: docker/build-push-action@v7
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
file: "docker/Dockerfile"
|
file: "docker/Dockerfile"
|
||||||
|
|
|
||||||
|
|
@ -43,7 +43,7 @@ jobs:
|
||||||
name: Renovate
|
name: Renovate
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
container:
|
container:
|
||||||
image: ghcr.io/renovatebot/renovate:42.70.2@sha256:3c2ac1b94fa92ef2fa4d1a0493f2c3ba564454720a32fdbcac2db2846ff1ee47
|
image: ghcr.io/renovatebot/renovate:43.59.4@sha256:f951508dea1e7d71cbe6deca298ab0a05488e7631229304813f630cc06010892
|
||||||
options: --tmpfs /tmp:exec
|
options: --tmpfs /tmp:exec
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ jobs:
|
||||||
persist-credentials: true
|
persist-credentials: true
|
||||||
token: ${{ secrets.FORGEJO_TOKEN }}
|
token: ${{ secrets.FORGEJO_TOKEN }}
|
||||||
|
|
||||||
- uses: https://github.com/cachix/install-nix-action@4e002c8ec80594ecd40e759629461e26c8abed15 # v31.9.0
|
- uses: https://github.com/cachix/install-nix-action@19effe9fe722874e6d46dd7182e4b8b7a43c4a99 # v31.10.0
|
||||||
with:
|
with:
|
||||||
nix_path: nixpkgs=channel:nixos-unstable
|
nix_path: nixpkgs=channel:nixos-unstable
|
||||||
|
|
||||||
|
|
|
||||||
4
.github/FUNDING.yml
vendored
4
.github/FUNDING.yml
vendored
|
|
@ -1,4 +1,4 @@
|
||||||
github: [JadedBlueEyes, nexy7574, gingershaped]
|
github: [JadedBlueEyes, nexy7574, gingershaped]
|
||||||
custom:
|
custom:
|
||||||
- https://ko-fi.com/nexy7574
|
- https://timedout.uk/donate.html
|
||||||
- https://ko-fi.com/JadedBlueEyes
|
- https://jade.ellis.link/sponsors
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,6 @@
|
||||||
default_install_hook_types:
|
default_install_hook_types:
|
||||||
- pre-commit
|
- pre-commit
|
||||||
|
- pre-push
|
||||||
- commit-msg
|
- commit-msg
|
||||||
default_stages:
|
default_stages:
|
||||||
- pre-commit
|
- pre-commit
|
||||||
|
|
@ -23,7 +24,7 @@ repos:
|
||||||
- id: check-added-large-files
|
- id: check-added-large-files
|
||||||
|
|
||||||
- repo: https://github.com/crate-ci/typos
|
- repo: https://github.com/crate-ci/typos
|
||||||
rev: v1.43.4
|
rev: v1.44.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: typos
|
- id: typos
|
||||||
- id: typos
|
- id: typos
|
||||||
|
|
@ -31,7 +32,7 @@ repos:
|
||||||
stages: [commit-msg]
|
stages: [commit-msg]
|
||||||
|
|
||||||
- repo: https://github.com/crate-ci/committed
|
- repo: https://github.com/crate-ci/committed
|
||||||
rev: v1.1.10
|
rev: v1.1.11
|
||||||
hooks:
|
hooks:
|
||||||
- id: committed
|
- id: committed
|
||||||
|
|
||||||
|
|
@ -45,3 +46,14 @@ repos:
|
||||||
pass_filenames: false
|
pass_filenames: false
|
||||||
stages:
|
stages:
|
||||||
- pre-commit
|
- pre-commit
|
||||||
|
|
||||||
|
- repo: local
|
||||||
|
hooks:
|
||||||
|
- id: cargo-clippy
|
||||||
|
name: cargo clippy
|
||||||
|
entry: cargo clippy -- -D warnings
|
||||||
|
language: system
|
||||||
|
pass_filenames: false
|
||||||
|
types: [rust]
|
||||||
|
stages:
|
||||||
|
- pre-push
|
||||||
|
|
|
||||||
29
CHANGELOG.md
29
CHANGELOG.md
|
|
@ -1,3 +1,32 @@
|
||||||
|
# Continuwuity 0.5.6 (2026-03-03)
|
||||||
|
|
||||||
|
## Security
|
||||||
|
|
||||||
|
- Admin escape commands received over federation will never be executed, as this is never valid in a genuine situation. Contributed by @Jade.
|
||||||
|
- Fixed data amplification vulnerability (CWE-409) that affected configurations with server-side compression enabled (non-default). Contributed by @nex.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Outgoing presence is now disabled by default, and the config option documentation has been adjusted to more accurately represent the weight of presence, typing indicators, and read receipts. Contributed by @nex. ([#1399](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1399))
|
||||||
|
- Improved the concurrency handling of federation transactions, vastly improving performance and reliability by more accurately handling inbound transactions and reducing the amount of repeated wasted work. Contributed by @nex and @Jade. ([#1428](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1428))
|
||||||
|
- Added [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202) Device masquerading (not all of MSC3202). This should fix issues with enabling [MSC4190](https://github.com/matrix-org/matrix-spec-proposals/pull/4190) for some Mautrix bridges. Contributed by @Jade ([#1435](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1435))
|
||||||
|
- Added [MSC3814](https://github.com/matrix-org/matrix-spec-proposals/pull/3814) Dehydrated Devices - you can now decrypt messages sent while all devices were logged out. ([#1436](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1436))
|
||||||
|
- Implement [MSC4143](https://github.com/matrix-org/matrix-spec-proposals/pull/4143) MatrixRTC transport discovery endpoint. Move RTC foci configuration from `[global.well_known]` to a new `[global.matrix_rtc]` section with a `foci` field. Contributed by @0xnim ([#1442](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1442))
|
||||||
|
- Updated `list-backups` admin command to output one backup per line. ([#1394](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1394))
|
||||||
|
- Improved URL preview fetching with a more compatible user agent for sites like YouTube Music. Added `!admin media delete-url-preview <url>` command to clear cached URL previews that were stuck and broken. ([#1434](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1434))
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
- Removed non-compliant nor functional room alias lookups over federation. Contributed by @nex ([#1393](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1393))
|
||||||
|
- Removed ability to set rocksdb as read only. Doing so would cause unintentional and buggy behaviour. Contributed by @Terryiscool160. ([#1418](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1418))
|
||||||
|
- Fixed a startup crash in the sender service if we can't detect the number of CPU cores, even if the `sender_workers` config option is set correctly. Contributed by @katie. ([#1421](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1421))
|
||||||
|
- Removed the `allow_public_room_directory_without_auth` config option. Contributed by @0xnim. ([#1441](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1441))
|
||||||
|
- Fixed sliding sync v5 list ranges always starting from 0, causing extra rooms to be unnecessarily processed and returned. Contributed by @0xnim ([#1445](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1445))
|
||||||
|
- Fixed a bug that (repairably) caused a room split between continuwuity and non-continuwuity servers when the room had both `m.room.policy` and `org.matrix.msc4284.policy` in its room state. Contributed by @nex ([#1481](https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1481))
|
||||||
|
- Fixed `!admin media delete --mxc <url>` responding with an error message when the media was deleted successfully. Contributed by @lynxize
|
||||||
|
- Fixed spurious 404 media errors in the logs. Contributed by @benbot.
|
||||||
|
- Fixed spurious warn about needed backfill via federation for non-federated rooms. Contributed by @kraem.
|
||||||
|
|
||||||
# Continuwuity v0.5.5 (2026-02-15)
|
# Continuwuity v0.5.5 (2026-02-15)
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
|
||||||
|
|
@ -22,22 +22,18 @@ Continuwuity uses pre-commit hooks to enforce various coding standards and catch
|
||||||
- Validating YAML, JSON, and TOML files
|
- Validating YAML, JSON, and TOML files
|
||||||
- Checking for merge conflicts
|
- Checking for merge conflicts
|
||||||
|
|
||||||
You can run these checks locally by installing [prefligit](https://github.com/j178/prefligit):
|
You can run these checks locally by installing [prek](https://github.com/j178/prek):
|
||||||
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Requires UV: https://docs.astral.sh/uv/getting-started/installation/
|
# Install prek using cargo-binstall
|
||||||
# Mac/linux: curl -LsSf https://astral.sh/uv/install.sh | sh
|
cargo binstall prek
|
||||||
# Windows: powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex"
|
|
||||||
|
|
||||||
# Install prefligit using cargo-binstall
|
|
||||||
cargo binstall prefligit
|
|
||||||
|
|
||||||
# Install git hooks to run checks automatically
|
# Install git hooks to run checks automatically
|
||||||
prefligit install
|
prek install
|
||||||
|
|
||||||
# Run all checks
|
# Run all checks
|
||||||
prefligit --all-files
|
prek --all-files
|
||||||
```
|
```
|
||||||
|
|
||||||
Alternatively, you can use [pre-commit](https://pre-commit.com/):
|
Alternatively, you can use [pre-commit](https://pre-commit.com/):
|
||||||
|
|
@ -54,7 +50,7 @@ pre-commit install
|
||||||
pre-commit run --all-files
|
pre-commit run --all-files
|
||||||
```
|
```
|
||||||
|
|
||||||
These same checks are run in CI via the prefligit-checks workflow to ensure consistency. These must pass before the PR is merged.
|
These same checks are run in CI via the prek-checks workflow to ensure consistency. These must pass before the PR is merged.
|
||||||
|
|
||||||
### Running tests locally
|
### Running tests locally
|
||||||
|
|
||||||
|
|
@ -85,24 +81,31 @@ If your changes are done to fix Matrix tests, please note that in your pull requ
|
||||||
|
|
||||||
### Writing documentation
|
### Writing documentation
|
||||||
|
|
||||||
Continuwuity's website uses [`mdbook`][mdbook] and is deployed via CI using Cloudflare Pages
|
Continuwuity's website uses [`rspress`][rspress] and is deployed via CI using Cloudflare Pages
|
||||||
in the [`documentation.yml`][documentation.yml] workflow file. All documentation is in the `docs/`
|
in the [`documentation.yml`][documentation.yml] workflow file. All documentation is in the `docs/`
|
||||||
directory at the top level.
|
directory at the top level.
|
||||||
|
|
||||||
To build the documentation locally:
|
To load the documentation locally:
|
||||||
|
|
||||||
|
1. Install NodeJS and npm from their [official website][nodejs-download] or via your package manager of choice
|
||||||
|
|
||||||
|
2. From the project's root directory, install the relevant npm modules
|
||||||
|
|
||||||
1. Install mdbook if you don't have it already:
|
|
||||||
```bash
|
```bash
|
||||||
cargo install mdbook # or cargo binstall, or another method
|
npm ci
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Build the documentation:
|
3. Make changes to the document pages as you see fit
|
||||||
|
|
||||||
|
4. Generate a live preview of the documentation
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
mdbook build
|
npm run docs:dev
|
||||||
```
|
```
|
||||||
|
|
||||||
The output of the mdbook generation is in `public/`. You can open the HTML files directly in your browser without needing a web server.
|
A webserver for the docs will be spun up for you (e.g. at `http://localhost:3000`). Any changes you make to the documentation will be live-reloaded on the webpage.
|
||||||
|
|
||||||
|
Alternatively, you can build the documentation using `npm run docs:build` - the output of this will be in the `/doc_build` directory. Once you're happy with your documentation updates, you can commit the changes.
|
||||||
|
|
||||||
### Commit Messages
|
### Commit Messages
|
||||||
|
|
||||||
|
|
@ -169,5 +172,6 @@ continuwuity Matrix rooms for Code of Conduct violations.
|
||||||
[continuwuity-matrix]: https://matrix.to/#/#continuwuity:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org
|
[continuwuity-matrix]: https://matrix.to/#/#continuwuity:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org
|
||||||
[complement]: https://github.com/matrix-org/complement/
|
[complement]: https://github.com/matrix-org/complement/
|
||||||
[sytest]: https://github.com/matrix-org/sytest/
|
[sytest]: https://github.com/matrix-org/sytest/
|
||||||
[mdbook]: https://rust-lang.github.io/mdBook/
|
[nodejs-download]: https://nodejs.org/en/download
|
||||||
|
[rspress]: https://rspress.rs/
|
||||||
[documentation.yml]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/.forgejo/workflows/documentation.yml
|
[documentation.yml]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/.forgejo/workflows/documentation.yml
|
||||||
|
|
|
||||||
1248
Cargo.lock
generated
1248
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
39
Cargo.toml
39
Cargo.toml
|
|
@ -12,7 +12,7 @@ license = "Apache-2.0"
|
||||||
# See also `rust-toolchain.toml`
|
# See also `rust-toolchain.toml`
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
repository = "https://forgejo.ellis.link/continuwuation/continuwuity"
|
repository = "https://forgejo.ellis.link/continuwuation/continuwuity"
|
||||||
version = "0.5.5"
|
version = "0.5.7-alpha.1"
|
||||||
|
|
||||||
[workspace.metadata.crane]
|
[workspace.metadata.crane]
|
||||||
name = "conduwuit"
|
name = "conduwuit"
|
||||||
|
|
@ -68,7 +68,7 @@ default-features = false
|
||||||
version = "0.1.3"
|
version = "0.1.3"
|
||||||
|
|
||||||
[workspace.dependencies.rand]
|
[workspace.dependencies.rand]
|
||||||
version = "0.8.5"
|
version = "0.10.0"
|
||||||
|
|
||||||
# Used for the http request / response body type for Ruma endpoints used with reqwest
|
# Used for the http request / response body type for Ruma endpoints used with reqwest
|
||||||
[workspace.dependencies.bytes]
|
[workspace.dependencies.bytes]
|
||||||
|
|
@ -84,7 +84,7 @@ version = "1.3.1"
|
||||||
version = "1.11.1"
|
version = "1.11.1"
|
||||||
|
|
||||||
[workspace.dependencies.axum]
|
[workspace.dependencies.axum]
|
||||||
version = "0.7.9"
|
version = "0.8.8"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = [
|
features = [
|
||||||
"form",
|
"form",
|
||||||
|
|
@ -97,9 +97,9 @@ features = [
|
||||||
]
|
]
|
||||||
|
|
||||||
[workspace.dependencies.axum-extra]
|
[workspace.dependencies.axum-extra]
|
||||||
version = "0.9.6"
|
version = "0.12.0"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = ["typed-header", "tracing"]
|
features = ["typed-header", "tracing", "cookie"]
|
||||||
|
|
||||||
[workspace.dependencies.axum-server]
|
[workspace.dependencies.axum-server]
|
||||||
version = "0.7.2"
|
version = "0.7.2"
|
||||||
|
|
@ -110,7 +110,7 @@ default-features = false
|
||||||
version = "0.7"
|
version = "0.7"
|
||||||
|
|
||||||
[workspace.dependencies.axum-client-ip]
|
[workspace.dependencies.axum-client-ip]
|
||||||
version = "0.6.1"
|
version = "0.7"
|
||||||
|
|
||||||
[workspace.dependencies.tower]
|
[workspace.dependencies.tower]
|
||||||
version = "0.5.2"
|
version = "0.5.2"
|
||||||
|
|
@ -118,7 +118,7 @@ default-features = false
|
||||||
features = ["util"]
|
features = ["util"]
|
||||||
|
|
||||||
[workspace.dependencies.tower-http]
|
[workspace.dependencies.tower-http]
|
||||||
version = "0.6.2"
|
version = "0.6.8"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = [
|
features = [
|
||||||
"add-extension",
|
"add-extension",
|
||||||
|
|
@ -144,6 +144,7 @@ features = [
|
||||||
"socks",
|
"socks",
|
||||||
"hickory-dns",
|
"hickory-dns",
|
||||||
"http2",
|
"http2",
|
||||||
|
"stream",
|
||||||
]
|
]
|
||||||
|
|
||||||
[workspace.dependencies.serde]
|
[workspace.dependencies.serde]
|
||||||
|
|
@ -158,7 +159,7 @@ features = ["raw_value"]
|
||||||
|
|
||||||
# Used for appservice registration files
|
# Used for appservice registration files
|
||||||
[workspace.dependencies.serde-saphyr]
|
[workspace.dependencies.serde-saphyr]
|
||||||
version = "0.0.19"
|
version = "0.0.21"
|
||||||
|
|
||||||
# Used to load forbidden room/user regex from config
|
# Used to load forbidden room/user regex from config
|
||||||
[workspace.dependencies.serde_regex]
|
[workspace.dependencies.serde_regex]
|
||||||
|
|
@ -253,7 +254,7 @@ features = [
|
||||||
version = "0.4.0"
|
version = "0.4.0"
|
||||||
|
|
||||||
[workspace.dependencies.libloading]
|
[workspace.dependencies.libloading]
|
||||||
version = "0.8.6"
|
version = "0.9.0"
|
||||||
|
|
||||||
# Validating urls in config, was already a transitive dependency
|
# Validating urls in config, was already a transitive dependency
|
||||||
[workspace.dependencies.url]
|
[workspace.dependencies.url]
|
||||||
|
|
@ -298,7 +299,7 @@ default-features = false
|
||||||
features = ["env", "toml"]
|
features = ["env", "toml"]
|
||||||
|
|
||||||
[workspace.dependencies.hickory-resolver]
|
[workspace.dependencies.hickory-resolver]
|
||||||
version = "0.25.1"
|
version = "0.25.2"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = [
|
features = [
|
||||||
"serde",
|
"serde",
|
||||||
|
|
@ -342,7 +343,8 @@ version = "0.1.2"
|
||||||
# Used for matrix spec type definitions and helpers
|
# Used for matrix spec type definitions and helpers
|
||||||
[workspace.dependencies.ruma]
|
[workspace.dependencies.ruma]
|
||||||
git = "https://forgejo.ellis.link/continuwuation/ruwuma"
|
git = "https://forgejo.ellis.link/continuwuation/ruwuma"
|
||||||
rev = "b496b7f38d517149361a882e75d3fd4faf210441"
|
#branch = "conduwuit-changes"
|
||||||
|
rev = "bb12ed288a31a23aa11b10ba0fad22b7f985eb88"
|
||||||
features = [
|
features = [
|
||||||
"compat",
|
"compat",
|
||||||
"rand",
|
"rand",
|
||||||
|
|
@ -362,6 +364,7 @@ features = [
|
||||||
"unstable-msc2870",
|
"unstable-msc2870",
|
||||||
"unstable-msc3026",
|
"unstable-msc3026",
|
||||||
"unstable-msc3061",
|
"unstable-msc3061",
|
||||||
|
"unstable-msc3814",
|
||||||
"unstable-msc3245",
|
"unstable-msc3245",
|
||||||
"unstable-msc3266",
|
"unstable-msc3266",
|
||||||
"unstable-msc3381", # polls
|
"unstable-msc3381", # polls
|
||||||
|
|
@ -380,6 +383,7 @@ features = [
|
||||||
"unstable-pdu",
|
"unstable-pdu",
|
||||||
"unstable-msc4155",
|
"unstable-msc4155",
|
||||||
"unstable-msc4143", # livekit well_known response
|
"unstable-msc4143", # livekit well_known response
|
||||||
|
"unstable-msc4284"
|
||||||
]
|
]
|
||||||
|
|
||||||
[workspace.dependencies.rust-rocksdb]
|
[workspace.dependencies.rust-rocksdb]
|
||||||
|
|
@ -424,7 +428,7 @@ features = ["http", "grpc-tonic", "trace", "logs", "metrics"]
|
||||||
|
|
||||||
# optional sentry metrics for crash/panic reporting
|
# optional sentry metrics for crash/panic reporting
|
||||||
[workspace.dependencies.sentry]
|
[workspace.dependencies.sentry]
|
||||||
version = "0.45.0"
|
version = "0.46.0"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = [
|
features = [
|
||||||
"backtrace",
|
"backtrace",
|
||||||
|
|
@ -440,9 +444,9 @@ features = [
|
||||||
]
|
]
|
||||||
|
|
||||||
[workspace.dependencies.sentry-tracing]
|
[workspace.dependencies.sentry-tracing]
|
||||||
version = "0.45.0"
|
version = "0.46.0"
|
||||||
[workspace.dependencies.sentry-tower]
|
[workspace.dependencies.sentry-tower]
|
||||||
version = "0.45.0"
|
version = "0.46.0"
|
||||||
|
|
||||||
# jemalloc usage
|
# jemalloc usage
|
||||||
[workspace.dependencies.tikv-jemalloc-sys]
|
[workspace.dependencies.tikv-jemalloc-sys]
|
||||||
|
|
@ -471,7 +475,7 @@ features = ["use_std"]
|
||||||
version = "0.5"
|
version = "0.5"
|
||||||
|
|
||||||
[workspace.dependencies.nix]
|
[workspace.dependencies.nix]
|
||||||
version = "0.30.1"
|
version = "0.31.0"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = ["resource"]
|
features = ["resource"]
|
||||||
|
|
||||||
|
|
@ -553,7 +557,7 @@ version = "0.7.5"
|
||||||
version = "1.0.1"
|
version = "1.0.1"
|
||||||
|
|
||||||
[workspace.dependencies.askama]
|
[workspace.dependencies.askama]
|
||||||
version = "0.14.0"
|
version = "0.15.0"
|
||||||
|
|
||||||
#
|
#
|
||||||
# Patches
|
# Patches
|
||||||
|
|
@ -965,3 +969,6 @@ needless_raw_string_hashes = "allow"
|
||||||
|
|
||||||
# TODO: Enable this lint & fix all instances
|
# TODO: Enable this lint & fix all instances
|
||||||
collapsible_if = "allow"
|
collapsible_if = "allow"
|
||||||
|
|
||||||
|
# TODO: break these apart
|
||||||
|
cognitive_complexity = "allow"
|
||||||
|
|
|
||||||
11
README.md
11
README.md
|
|
@ -57,10 +57,15 @@ Continuwuity aims to:
|
||||||
|
|
||||||
### Can I try it out?
|
### Can I try it out?
|
||||||
|
|
||||||
Check out the [documentation](https://continuwuity.org) for installation instructions, or join one of these vetted public homeservers running Continuwuity to get a feel for things!
|
Check out the [documentation](https://continuwuity.org) for installation instructions.
|
||||||
|
|
||||||
- https://continuwuity.rocks -- A public demo server operated by the Continuwuity Team.
|
If you want to try it out as a user, we have some partnered homeservers you can use:
|
||||||
- https://federated.nexus -- Federated Nexus is a community resource hosting multiple FOSS (especially federated) services, including Matrix and Forgejo.
|
* You can head over to [https://federated.nexus](https://federated.nexus/) in your browser.
|
||||||
|
* Hit the `Apply to Join` button. Once your request has been accepted, you will receive an email with your username and password.
|
||||||
|
* Head over to [https://app.federated.nexus](https://app.federated.nexus/) and you can sign in there, or use any other matrix chat client you wish elsewhere.
|
||||||
|
* Your username for matrix will be in the form of `@username:federated.nexus`, however you can simply use the `username` part to log in. Your password is your password.
|
||||||
|
|
||||||
|
* There's also [https://continuwuity.rocks/](https://continuwuity.rocks/). You can register a new account using Cinny via [this convenient link](https://app.cinny.in/register/continuwuity.rocks), or you can use Element or another matrix client *that supports registration*.
|
||||||
|
|
||||||
### What are we working on?
|
### What are we working on?
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -6,10 +6,10 @@ set -euo pipefail
|
||||||
COMPLEMENT_SRC="${COMPLEMENT_SRC:-$1}"
|
COMPLEMENT_SRC="${COMPLEMENT_SRC:-$1}"
|
||||||
|
|
||||||
# A `.jsonl` file to write test logs to
|
# A `.jsonl` file to write test logs to
|
||||||
LOG_FILE="${2:-complement_test_logs.jsonl}"
|
LOG_FILE="${2:-tests/test_results/complement/test_logs.jsonl}"
|
||||||
|
|
||||||
# A `.jsonl` file to write test results to
|
# A `.jsonl` file to write test results to
|
||||||
RESULTS_FILE="${3:-complement_test_results.jsonl}"
|
RESULTS_FILE="${3:-tests/test_results/complement/test_results.jsonl}"
|
||||||
|
|
||||||
# The base docker image to use for complement tests
|
# The base docker image to use for complement tests
|
||||||
# You can build the default with `docker build -t continuwuity:complement -f ./docker/complement.Dockerfile .`
|
# You can build the default with `docker build -t continuwuity:complement -f ./docker/complement.Dockerfile .`
|
||||||
|
|
|
||||||
1
changelog.d/+6368729a.feature.md
Normal file
1
changelog.d/+6368729a.feature.md
Normal file
|
|
@ -0,0 +1 @@
|
||||||
|
Added support for using an admin command to issue self-service password reset links.
|
||||||
1
changelog.d/+6e57599d.bugfix.md
Normal file
1
changelog.d/+6e57599d.bugfix.md
Normal file
|
|
@ -0,0 +1 @@
|
||||||
|
Stopped left rooms from being unconditionally sent on initial sync, hopefully fixing spurious appearances of left rooms in some clients (and making sync faster as a bonus). Contributed by @ginger
|
||||||
1
changelog.d/+space-permission-cascading.feature.md
Normal file
1
changelog.d/+space-permission-cascading.feature.md
Normal file
|
|
@ -0,0 +1 @@
|
||||||
|
Add Space permission cascading: power levels cascade from Spaces to child rooms, role-based room access with custom roles, continuous enforcement (auto-join/kick), and admin commands for role management. Server-wide default controlled by `space_permission_cascading` config flag (off by default), with per-Space overrides via `!admin space roles enable/disable <space>`.
|
||||||
1
changelog.d/1265.bugfix
Normal file
1
changelog.d/1265.bugfix
Normal file
|
|
@ -0,0 +1 @@
|
||||||
|
Fixed corrupted appservice registrations causing the server to enter a crash loop. Contributed by @nex.
|
||||||
1
changelog.d/1371.feature.md
Normal file
1
changelog.d/1371.feature.md
Normal file
|
|
@ -0,0 +1 @@
|
||||||
|
Re-added support for reading registration tokens from a file. Contributed by @ginger and @benbot.
|
||||||
1
changelog.d/1448.bugfix
Normal file
1
changelog.d/1448.bugfix
Normal file
|
|
@ -0,0 +1 @@
|
||||||
|
Prevent removing the admin room alias (`#admins`) to avoid accidentally breaking admin room functionality. Contributed by @0xnim
|
||||||
1
changelog.d/1527.feature.md
Normal file
1
changelog.d/1527.feature.md
Normal file
|
|
@ -0,0 +1 @@
|
||||||
|
Add new config option to allow or disallow search engine indexing through a `<meta ../>` tag. Defaults to blocking indexing (`content="noindex"`). Contributed by @s1lv3r and @ginger.
|
||||||
18
clippy.toml
18
clippy.toml
|
|
@ -15,6 +15,18 @@ disallowed-macros = [
|
||||||
{ path = "log::trace", reason = "use conduwuit_core::trace" },
|
{ path = "log::trace", reason = "use conduwuit_core::trace" },
|
||||||
]
|
]
|
||||||
|
|
||||||
disallowed-methods = [
|
[[disallowed-methods]]
|
||||||
{ path = "tokio::spawn", reason = "use and pass conduuwit_core::server::Server::runtime() to spawn from" },
|
path = "tokio::spawn"
|
||||||
]
|
reason = "use and pass conduwuit_core::server::Server::runtime() to spawn from"
|
||||||
|
|
||||||
|
[[disallowed-methods]]
|
||||||
|
path = "reqwest::Response::bytes"
|
||||||
|
reason = "bytes is unsafe, use limit_read via the conduwuit_core::utils::LimitReadExt trait instead"
|
||||||
|
|
||||||
|
[[disallowed-methods]]
|
||||||
|
path = "reqwest::Response::text"
|
||||||
|
reason = "text is unsafe, use limit_read_text via the conduwuit_core::utils::LimitReadExt trait instead"
|
||||||
|
|
||||||
|
[[disallowed-methods]]
|
||||||
|
path = "reqwest::Response::json"
|
||||||
|
reason = "json is unsafe, use limit_read_text via the conduwuit_core::utils::LimitReadExt trait instead"
|
||||||
|
|
|
||||||
|
|
@ -9,10 +9,9 @@ address = "0.0.0.0"
|
||||||
allow_device_name_federation = true
|
allow_device_name_federation = true
|
||||||
allow_guest_registration = true
|
allow_guest_registration = true
|
||||||
allow_public_room_directory_over_federation = true
|
allow_public_room_directory_over_federation = true
|
||||||
allow_public_room_directory_without_auth = true
|
|
||||||
allow_registration = true
|
allow_registration = true
|
||||||
database_path = "/database"
|
database_path = "/database"
|
||||||
log = "trace,h2=debug,hyper=debug"
|
log = "trace,h2=debug,hyper=debug,conduwuit_database=warn,conduwuit_service::manager=info,conduwuit_api::router=error,conduwuit_router=error,tower_http=error"
|
||||||
port = [8008, 8448]
|
port = [8008, 8448]
|
||||||
trusted_servers = []
|
trusted_servers = []
|
||||||
only_query_trusted_key_servers = false
|
only_query_trusted_key_servers = false
|
||||||
|
|
@ -25,7 +24,7 @@ url_preview_domain_explicit_denylist = ["*"]
|
||||||
media_compat_file_link = false
|
media_compat_file_link = false
|
||||||
media_startup_check = true
|
media_startup_check = true
|
||||||
prune_missing_media = true
|
prune_missing_media = true
|
||||||
log_colors = true
|
log_colors = false
|
||||||
admin_room_notices = false
|
admin_room_notices = false
|
||||||
allow_check_for_updates = false
|
allow_check_for_updates = false
|
||||||
intentionally_unknown_config_option_for_testing = true
|
intentionally_unknown_config_option_for_testing = true
|
||||||
|
|
@ -48,6 +47,7 @@ federation_idle_timeout = 300
|
||||||
sender_timeout = 300
|
sender_timeout = 300
|
||||||
sender_idle_timeout = 300
|
sender_idle_timeout = 300
|
||||||
sender_retry_backoff_limit = 300
|
sender_retry_backoff_limit = 300
|
||||||
|
force_disable_first_run_mode = true
|
||||||
|
|
||||||
[global.tls]
|
[global.tls]
|
||||||
dual_protocol = true
|
dual_protocol = true
|
||||||
|
|
|
||||||
|
|
@ -25,6 +25,10 @@
|
||||||
#
|
#
|
||||||
# Also see the `[global.well_known]` config section at the very bottom.
|
# Also see the `[global.well_known]` config section at the very bottom.
|
||||||
#
|
#
|
||||||
|
# If `client` is not set under `[global.well_known]`, the server name will
|
||||||
|
# be used as the base domain for user-facing links (such as password
|
||||||
|
# reset links) created by Continuwuity.
|
||||||
|
#
|
||||||
# Examples of delegation:
|
# Examples of delegation:
|
||||||
# - https://continuwuity.org/.well-known/matrix/server
|
# - https://continuwuity.org/.well-known/matrix/server
|
||||||
# - https://continuwuity.org/.well-known/matrix/client
|
# - https://continuwuity.org/.well-known/matrix/client
|
||||||
|
|
@ -290,6 +294,25 @@
|
||||||
#
|
#
|
||||||
#max_fetch_prev_events = 192
|
#max_fetch_prev_events = 192
|
||||||
|
|
||||||
|
# How many incoming federation transactions the server is willing to be
|
||||||
|
# processing at any given time before it becomes overloaded and starts
|
||||||
|
# rejecting further transactions until some slots become available.
|
||||||
|
#
|
||||||
|
# Setting this value too low or too high may result in unstable
|
||||||
|
# federation, and setting it too high may cause runaway resource usage.
|
||||||
|
#
|
||||||
|
#max_concurrent_inbound_transactions = 150
|
||||||
|
|
||||||
|
# Maximum age (in seconds) for cached federation transaction responses.
|
||||||
|
# Entries older than this will be removed during cleanup.
|
||||||
|
#
|
||||||
|
#transaction_id_cache_max_age_secs = 7200 (2 hours)
|
||||||
|
|
||||||
|
# Maximum number of cached federation transaction responses.
|
||||||
|
# When the cache exceeds this limit, older entries will be removed.
|
||||||
|
#
|
||||||
|
#transaction_id_cache_max_entries = 8192
|
||||||
|
|
||||||
# Default/base connection timeout (seconds). This is used only by URL
|
# Default/base connection timeout (seconds). This is used only by URL
|
||||||
# previews and update/news endpoint checks.
|
# previews and update/news endpoint checks.
|
||||||
#
|
#
|
||||||
|
|
@ -451,24 +474,43 @@
|
||||||
#
|
#
|
||||||
#suspend_on_register = false
|
#suspend_on_register = false
|
||||||
|
|
||||||
|
# Server-wide default for space permission cascading (power levels and
|
||||||
|
# role-based access). Individual Spaces can override this via the
|
||||||
|
# `com.continuwuity.space.cascading` state event or the admin command
|
||||||
|
# `!admin space roles enable/disable <space>`.
|
||||||
|
#
|
||||||
|
#space_permission_cascading = false
|
||||||
|
|
||||||
|
# Maximum number of spaces to cache role data for. When exceeded the
|
||||||
|
# cache is cleared and repopulated on demand.
|
||||||
|
#
|
||||||
|
#space_roles_cache_flush_threshold = 1000
|
||||||
|
|
||||||
# Enabling this setting opens registration to anyone without restrictions.
|
# Enabling this setting opens registration to anyone without restrictions.
|
||||||
# This makes your server vulnerable to abuse
|
# This makes your server vulnerable to abuse
|
||||||
#
|
#
|
||||||
#yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse = false
|
#yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse = false
|
||||||
|
|
||||||
# A static registration token that new users will have to provide when
|
# A static registration token that new users will have to provide when
|
||||||
# creating an account. If unset and `allow_registration` is true,
|
# creating an account. This token does not supersede tokens from other
|
||||||
# you must set
|
# sources, such as the `!admin token` command or the
|
||||||
# `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse`
|
# `registration_token_file` configuration option.
|
||||||
# to true to allow open registration without any conditions.
|
|
||||||
#
|
|
||||||
# If you do not want to set a static token, the `!admin token` commands
|
|
||||||
# may also be used to manage registration tokens.
|
|
||||||
#
|
#
|
||||||
# example: "o&^uCtes4HPf0Vu@F20jQeeWE7"
|
# example: "o&^uCtes4HPf0Vu@F20jQeeWE7"
|
||||||
#
|
#
|
||||||
#registration_token =
|
#registration_token =
|
||||||
|
|
||||||
|
# A path to a file containing static registration tokens, one per line.
|
||||||
|
# Tokens in this file do not supersede tokens from other sources, such as
|
||||||
|
# the `!admin token` command or the `registration_token` configuration
|
||||||
|
# option.
|
||||||
|
#
|
||||||
|
# The file will be read once, when Continuwuity starts. It is not
|
||||||
|
# currently reread when the server configuration is reloaded. If the file
|
||||||
|
# cannot be read, Continuwuity will fail to start.
|
||||||
|
#
|
||||||
|
#registration_token_file =
|
||||||
|
|
||||||
# The public site key for reCaptcha. If this is provided, reCaptcha
|
# The public site key for reCaptcha. If this is provided, reCaptcha
|
||||||
# becomes required during registration. If both captcha *and*
|
# becomes required during registration. If both captcha *and*
|
||||||
# registration token are enabled, both will be required during
|
# registration token are enabled, both will be required during
|
||||||
|
|
@ -527,12 +569,6 @@
|
||||||
#
|
#
|
||||||
#allow_public_room_directory_over_federation = false
|
#allow_public_room_directory_over_federation = false
|
||||||
|
|
||||||
# Set this to true to allow your server's public room directory to be
|
|
||||||
# queried without client authentication (access token) through the Client
|
|
||||||
# APIs. Set this to false to protect against /publicRooms spiders.
|
|
||||||
#
|
|
||||||
#allow_public_room_directory_without_auth = false
|
|
||||||
|
|
||||||
# Allow guests/unauthenticated users to access TURN credentials.
|
# Allow guests/unauthenticated users to access TURN credentials.
|
||||||
#
|
#
|
||||||
# This is the equivalent of Synapse's `turn_allow_guests` config option.
|
# This is the equivalent of Synapse's `turn_allow_guests` config option.
|
||||||
|
|
@ -1056,14 +1092,6 @@
|
||||||
#
|
#
|
||||||
#rocksdb_repair = false
|
#rocksdb_repair = false
|
||||||
|
|
||||||
# This item is undocumented. Please contribute documentation for it.
|
|
||||||
#
|
|
||||||
#rocksdb_read_only = false
|
|
||||||
|
|
||||||
# This item is undocumented. Please contribute documentation for it.
|
|
||||||
#
|
|
||||||
#rocksdb_secondary = false
|
|
||||||
|
|
||||||
# Enables idle CPU priority for compaction thread. This is not enabled by
|
# Enables idle CPU priority for compaction thread. This is not enabled by
|
||||||
# default to prevent compaction from falling too far behind on busy
|
# default to prevent compaction from falling too far behind on busy
|
||||||
# systems.
|
# systems.
|
||||||
|
|
@ -1120,27 +1148,34 @@
|
||||||
|
|
||||||
# Allow local (your server only) presence updates/requests.
|
# Allow local (your server only) presence updates/requests.
|
||||||
#
|
#
|
||||||
# Note that presence on continuwuity is very fast unlike Synapse's. If
|
# Local presence must be enabled for outgoing presence to function.
|
||||||
# using outgoing presence, this MUST be enabled.
|
#
|
||||||
|
# Note that local presence is not as heavy on the CPU as federated
|
||||||
|
# presence, but will still become more expensive the more local users you
|
||||||
|
# have.
|
||||||
#
|
#
|
||||||
#allow_local_presence = true
|
#allow_local_presence = true
|
||||||
|
|
||||||
# Allow incoming federated presence updates/requests.
|
# Allow incoming federated presence updates.
|
||||||
#
|
#
|
||||||
# This option receives presence updates from other servers, but does not
|
# This option enables processing inbound presence updates from other
|
||||||
# send any unless `allow_outgoing_presence` is true. Note that presence on
|
# servers. Without it, remote users will appear as if they are always
|
||||||
# continuwuity is very fast unlike Synapse's.
|
# offline to your local users. This does not affect typing indicators or
|
||||||
|
# read receipts.
|
||||||
#
|
#
|
||||||
#allow_incoming_presence = true
|
#allow_incoming_presence = true
|
||||||
|
|
||||||
# Allow outgoing presence updates/requests.
|
# Allow outgoing presence updates/requests.
|
||||||
#
|
#
|
||||||
# This option sends presence updates to other servers, but does not
|
# This option sends presence updates to other servers, and requires that
|
||||||
# receive any unless `allow_incoming_presence` is true. Note that presence
|
# `allow_local_presence` is also enabled.
|
||||||
# on continuwuity is very fast unlike Synapse's. If using outgoing
|
|
||||||
# presence, you MUST enable `allow_local_presence` as well.
|
|
||||||
#
|
#
|
||||||
#allow_outgoing_presence = true
|
# Note that outgoing presence is very heavy on the CPU and network, and
|
||||||
|
# will typically cause extreme strain and slowdowns for no real benefit.
|
||||||
|
# There are only a few clients that even implement presence, so you
|
||||||
|
# probably don't want to enable this.
|
||||||
|
#
|
||||||
|
#allow_outgoing_presence = false
|
||||||
|
|
||||||
# How many seconds without presence updates before you become idle.
|
# How many seconds without presence updates before you become idle.
|
||||||
# Defaults to 5 minutes.
|
# Defaults to 5 minutes.
|
||||||
|
|
@ -1174,6 +1209,10 @@
|
||||||
|
|
||||||
# Allow sending read receipts to remote servers.
|
# Allow sending read receipts to remote servers.
|
||||||
#
|
#
|
||||||
|
# Note that sending read receipts to remote servers in large rooms with
|
||||||
|
# lots of other homeservers may cause additional strain on the CPU and
|
||||||
|
# network.
|
||||||
|
#
|
||||||
#allow_outgoing_read_receipts = true
|
#allow_outgoing_read_receipts = true
|
||||||
|
|
||||||
# Allow local typing updates.
|
# Allow local typing updates.
|
||||||
|
|
@ -1185,6 +1224,10 @@
|
||||||
|
|
||||||
# Allow outgoing typing updates to federation.
|
# Allow outgoing typing updates to federation.
|
||||||
#
|
#
|
||||||
|
# Note that sending typing indicators to remote servers in large rooms
|
||||||
|
# with lots of other homeservers may cause additional strain on the CPU
|
||||||
|
# and network.
|
||||||
|
#
|
||||||
#allow_outgoing_typing = true
|
#allow_outgoing_typing = true
|
||||||
|
|
||||||
# Allow incoming typing updates from federation.
|
# Allow incoming typing updates from federation.
|
||||||
|
|
@ -1318,7 +1361,7 @@
|
||||||
# sender user's server name, inbound federation X-Matrix origin, and
|
# sender user's server name, inbound federation X-Matrix origin, and
|
||||||
# outbound federation handler.
|
# outbound federation handler.
|
||||||
#
|
#
|
||||||
# You can set this to ["*"] to block all servers by default, and then
|
# You can set this to [".*"] to block all servers by default, and then
|
||||||
# use `allowed_remote_server_names` to allow only specific servers.
|
# use `allowed_remote_server_names` to allow only specific servers.
|
||||||
#
|
#
|
||||||
# example: ["badserver\\.tld$", "badphrase", "19dollarfortnitecards"]
|
# example: ["badserver\\.tld$", "badphrase", "19dollarfortnitecards"]
|
||||||
|
|
@ -1478,6 +1521,11 @@
|
||||||
#
|
#
|
||||||
#url_preview_user_agent = "continuwuity/<version> (bot; +https://continuwuity.org)"
|
#url_preview_user_agent = "continuwuity/<version> (bot; +https://continuwuity.org)"
|
||||||
|
|
||||||
|
# Determines whether audio and video files will be downloaded for URL
|
||||||
|
# previews.
|
||||||
|
#
|
||||||
|
#url_preview_allow_audio_video = false
|
||||||
|
|
||||||
# List of forbidden room aliases and room IDs as strings of regex
|
# List of forbidden room aliases and room IDs as strings of regex
|
||||||
# patterns.
|
# patterns.
|
||||||
#
|
#
|
||||||
|
|
@ -1763,6 +1811,11 @@
|
||||||
#
|
#
|
||||||
#config_reload_signal = true
|
#config_reload_signal = true
|
||||||
|
|
||||||
|
# Allow search engines and crawlers to index Continuwuity's built-in
|
||||||
|
# webpages served under the `/_continuwuity/` prefix.
|
||||||
|
#
|
||||||
|
#allow_web_indexing = false
|
||||||
|
|
||||||
[global.tls]
|
[global.tls]
|
||||||
|
|
||||||
# Path to a valid TLS certificate file.
|
# Path to a valid TLS certificate file.
|
||||||
|
|
@ -1824,14 +1877,13 @@
|
||||||
#
|
#
|
||||||
#support_mxid =
|
#support_mxid =
|
||||||
|
|
||||||
# A list of MatrixRTC foci URLs which will be served as part of the
|
# **DEPRECATED**: Use `[global.matrix_rtc].foci` instead.
|
||||||
# MSC4143 client endpoint at /.well-known/matrix/client. If you're
|
|
||||||
# setting up livekit, you'd want something like:
|
|
||||||
# rtc_focus_server_urls = [
|
|
||||||
# { type = "livekit", livekit_service_url = "https://livekit.example.com" },
|
|
||||||
# ]
|
|
||||||
#
|
#
|
||||||
# To disable, set this to be an empty vector (`[]`).
|
# A list of MatrixRTC foci URLs which will be served as part of the
|
||||||
|
# MSC4143 client endpoint at /.well-known/matrix/client.
|
||||||
|
#
|
||||||
|
# This option is deprecated and will be removed in a future release.
|
||||||
|
# Please migrate to the new `[global.matrix_rtc]` config section.
|
||||||
#
|
#
|
||||||
#rtc_focus_server_urls = []
|
#rtc_focus_server_urls = []
|
||||||
|
|
||||||
|
|
@ -1853,6 +1905,23 @@
|
||||||
#
|
#
|
||||||
#blurhash_max_raw_size = 33554432
|
#blurhash_max_raw_size = 33554432
|
||||||
|
|
||||||
|
[global.matrix_rtc]
|
||||||
|
|
||||||
|
# A list of MatrixRTC foci (transports) which will be served via the
|
||||||
|
# MSC4143 RTC transports endpoint at
|
||||||
|
# `/_matrix/client/v1/rtc/transports`. If you're setting up livekit,
|
||||||
|
# you'd want something like:
|
||||||
|
# ```toml
|
||||||
|
# [global.matrix_rtc]
|
||||||
|
# foci = [
|
||||||
|
# { type = "livekit", livekit_service_url = "https://livekit.example.com" },
|
||||||
|
# ]
|
||||||
|
# ```
|
||||||
|
#
|
||||||
|
# To disable, set this to an empty list (`[]`).
|
||||||
|
#
|
||||||
|
#foci = []
|
||||||
|
|
||||||
[global.ldap]
|
[global.ldap]
|
||||||
|
|
||||||
# Whether to enable LDAP login.
|
# Whether to enable LDAP login.
|
||||||
|
|
|
||||||
|
|
@ -48,11 +48,11 @@ EOF
|
||||||
|
|
||||||
# Developer tool versions
|
# Developer tool versions
|
||||||
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
|
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
|
||||||
ENV BINSTALL_VERSION=1.17.5
|
ENV BINSTALL_VERSION=1.17.7
|
||||||
# renovate: datasource=github-releases depName=psastras/sbom-rs
|
# renovate: datasource=github-releases depName=psastras/sbom-rs
|
||||||
ENV CARGO_SBOM_VERSION=0.9.1
|
ENV CARGO_SBOM_VERSION=0.9.1
|
||||||
# renovate: datasource=crate depName=lddtree
|
# renovate: datasource=crate depName=lddtree
|
||||||
ENV LDDTREE_VERSION=0.4.0
|
ENV LDDTREE_VERSION=0.5.0
|
||||||
# renovate: datasource=crate depName=timelord-cli
|
# renovate: datasource=crate depName=timelord-cli
|
||||||
ENV TIMELORD_VERSION=3.0.1
|
ENV TIMELORD_VERSION=3.0.1
|
||||||
|
|
||||||
|
|
@ -180,6 +180,11 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||||
export RUSTFLAGS="${RUSTFLAGS}"
|
export RUSTFLAGS="${RUSTFLAGS}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
RUST_PROFILE_DIR="${RUST_PROFILE}"
|
||||||
|
if [[ "${RUST_PROFILE}" == "dev" ]]; then
|
||||||
|
RUST_PROFILE_DIR="debug"
|
||||||
|
fi
|
||||||
|
|
||||||
TARGET_DIR=($(cargo metadata --no-deps --format-version 1 | \
|
TARGET_DIR=($(cargo metadata --no-deps --format-version 1 | \
|
||||||
jq -r ".target_directory"))
|
jq -r ".target_directory"))
|
||||||
mkdir /out/sbin
|
mkdir /out/sbin
|
||||||
|
|
@ -191,8 +196,8 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||||
jq -r ".packages[] | select(.name == \"$PACKAGE\") | .targets[] | select( .kind | map(. == \"bin\") | any ) | .name"))
|
jq -r ".packages[] | select(.name == \"$PACKAGE\") | .targets[] | select( .kind | map(. == \"bin\") | any ) | .name"))
|
||||||
for BINARY in "${BINARIES[@]}"; do
|
for BINARY in "${BINARIES[@]}"; do
|
||||||
echo $BINARY
|
echo $BINARY
|
||||||
xx-verify $TARGET_DIR/$(xx-cargo --print-target-triple)/${RUST_PROFILE}/$BINARY
|
xx-verify $TARGET_DIR/$(xx-cargo --print-target-triple)/${RUST_PROFILE_DIR}/$BINARY
|
||||||
cp $TARGET_DIR/$(xx-cargo --print-target-triple)/${RUST_PROFILE}/$BINARY /out/sbin/$BINARY
|
cp $TARGET_DIR/$(xx-cargo --print-target-triple)/${RUST_PROFILE_DIR}/$BINARY /out/sbin/$BINARY
|
||||||
done
|
done
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -18,11 +18,11 @@ RUN --mount=type=cache,target=/etc/apk/cache apk add \
|
||||||
|
|
||||||
# Developer tool versions
|
# Developer tool versions
|
||||||
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
|
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
|
||||||
ENV BINSTALL_VERSION=1.17.5
|
ENV BINSTALL_VERSION=1.17.7
|
||||||
# renovate: datasource=github-releases depName=psastras/sbom-rs
|
# renovate: datasource=github-releases depName=psastras/sbom-rs
|
||||||
ENV CARGO_SBOM_VERSION=0.9.1
|
ENV CARGO_SBOM_VERSION=0.9.1
|
||||||
# renovate: datasource=crate depName=lddtree
|
# renovate: datasource=crate depName=lddtree
|
||||||
ENV LDDTREE_VERSION=0.4.0
|
ENV LDDTREE_VERSION=0.5.0
|
||||||
|
|
||||||
# Install unpackaged tools
|
# Install unpackaged tools
|
||||||
RUN <<EOF
|
RUN <<EOF
|
||||||
|
|
|
||||||
|
|
@ -34,6 +34,11 @@
|
||||||
"name": "troubleshooting",
|
"name": "troubleshooting",
|
||||||
"label": "Troubleshooting"
|
"label": "Troubleshooting"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"type": "dir",
|
||||||
|
"name": "advanced",
|
||||||
|
"label": "Advanced"
|
||||||
|
},
|
||||||
"security",
|
"security",
|
||||||
{
|
{
|
||||||
"type": "dir-section-header",
|
"type": "dir-section-header",
|
||||||
|
|
@ -64,6 +69,11 @@
|
||||||
"label": "Configuration Reference",
|
"label": "Configuration Reference",
|
||||||
"name": "/reference/config"
|
"name": "/reference/config"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"type": "file",
|
||||||
|
"label": "Environment Variables",
|
||||||
|
"name": "/reference/environment-variables"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"type": "dir",
|
"type": "dir",
|
||||||
"label": "Admin Command Reference",
|
"label": "Admin Command Reference",
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@
|
||||||
{
|
{
|
||||||
"text": "Guide",
|
"text": "Guide",
|
||||||
"link": "/introduction",
|
"link": "/introduction",
|
||||||
"activeMatch": "^/(introduction|configuration|deploying|calls|appservices|maintenance|troubleshooting)"
|
"activeMatch": "^/(introduction|configuration|deploying|calls|appservices|maintenance|troubleshooting|advanced)"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"text": "Development",
|
"text": "Development",
|
||||||
|
|
|
||||||
7
docs/advanced/_meta.json
Normal file
7
docs/advanced/_meta.json
Normal file
|
|
@ -0,0 +1,7 @@
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"type": "file",
|
||||||
|
"name": "delegation",
|
||||||
|
"label": "Delegation / split-domain"
|
||||||
|
}
|
||||||
|
]
|
||||||
206
docs/advanced/delegation.mdx
Normal file
206
docs/advanced/delegation.mdx
Normal file
|
|
@ -0,0 +1,206 @@
|
||||||
|
# Delegation/split-domain deployment
|
||||||
|
|
||||||
|
Matrix allows clients and servers to discover a homeserver's "true" destination via **`.well-known` delegation**. This is especially useful if you would like to:
|
||||||
|
|
||||||
|
- Serve Continuwuity on a subdomain while having only the base domain for your usernames
|
||||||
|
- Use a port other than `:8448` for server-to-server connections
|
||||||
|
|
||||||
|
This guide will show you how to have `@user:example.com` usernames while serving Continuwuity on `https://matrix.example.com`. It assumes you are using port 443 for both client-to-server connections and server-to-server federation.
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
First, ensure you have set up A/AAAA records for `matrix.example.com` and `example.com` pointing to your IP.
|
||||||
|
|
||||||
|
Then, ensure that the `server_name` field matches your intended username suffix. If this is not the case, you **MUST** wipe the database directory and reinstall Continuwuity with your desired `server_name`.
|
||||||
|
|
||||||
|
Then, in the `[global.well_known]` section of your config file, add the following fields:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[global.well_known]
|
||||||
|
|
||||||
|
client = "https://matrix.example.com"
|
||||||
|
|
||||||
|
# port number MUST be specified
|
||||||
|
server = "matrix.example.com:443"
|
||||||
|
|
||||||
|
# (optional) customize your support contacts
|
||||||
|
#support_page =
|
||||||
|
#support_role = "m.role.admin"
|
||||||
|
#support_email =
|
||||||
|
#support_mxid = "@user:example.com"
|
||||||
|
```
|
||||||
|
|
||||||
|
Alternatively if you are using Docker, you can set the `CONTINUWUITY_WELL_KNOWN` environment variable as below:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
services:
|
||||||
|
continuwuity:
|
||||||
|
...
|
||||||
|
environment:
|
||||||
|
CONTINUWUITY_WELL_KNOWN: |
|
||||||
|
{
|
||||||
|
client=https://matrix.example.com,
|
||||||
|
server=matrix.example.com:443
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Serving with a reverse proxy
|
||||||
|
|
||||||
|
After doing the steps above, Continuwuity will serve these 3 JSON files:
|
||||||
|
|
||||||
|
- `/.well-known/matrix/client`: for Client-Server discovery
|
||||||
|
- `/.well-known/matrix/server`: for Server-Server (federation) discovery
|
||||||
|
- `/.well-known/matrix/support`: admin contact details (strongly recommended to have)
|
||||||
|
|
||||||
|
To enable full discovery, you will need to reverse proxy these paths from the base domain back to Continuwuity.
|
||||||
|
|
||||||
|
<details>
|
||||||
|
|
||||||
|
<summary>For Caddy</summary>
|
||||||
|
|
||||||
|
```
|
||||||
|
matrix.example.com:443 {
|
||||||
|
reverse_proxy 127.0.0.1:8008
|
||||||
|
}
|
||||||
|
|
||||||
|
example.com:443 {
|
||||||
|
reverse_proxy /.well-known/matrix* 127.0.0.1:8008
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
|
||||||
|
<summary>For Traefik (via Docker labels)</summary>
|
||||||
|
|
||||||
|
```
|
||||||
|
services:
|
||||||
|
continuwuity:
|
||||||
|
...
|
||||||
|
labels:
|
||||||
|
- "traefik.enable=true"
|
||||||
|
- "traefik.http.routers.continuwuity.rule=(Host(`matrix.example.com`) || (Host(`example.com`) && PathPrefix(`/.well-known/matrix`)))"
|
||||||
|
- "traefik.http.routers.continuwuity.service=continuwuity"
|
||||||
|
- "traefik.http.services.continuwuity.loadbalancer.server.port=8008"
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
Restart Continuwuity and your reverse proxy. Once that's done, visit these routes and check that the responses match the examples below:
|
||||||
|
|
||||||
|
<details open>
|
||||||
|
|
||||||
|
<summary>`https://example.com/.well-known/matrix/server`</summary>
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"m.server": "matrix.example.com:443"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details open>
|
||||||
|
|
||||||
|
<summary>`https://example.com/.well-known/matrix/client`</summary>
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"m.homeserver": {
|
||||||
|
"base_url": "https://matrix.example.com/"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Cannot log in with web clients
|
||||||
|
|
||||||
|
Make sure there is an `Access-Control-Allow-Origin: *` header in your `/.well-known/matrix/client` path. While Continuwuity serves this header by default, it may be dropped by reverse proxies or other middlewares.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Using SRV records (not recommended)
|
||||||
|
|
||||||
|
:::warning
|
||||||
|
The following methods are **not recommended** due to increased complexity with little benefits. If you have already set up `.well-known` delegation as above, you can safely skip this part.
|
||||||
|
:::
|
||||||
|
|
||||||
|
The following methods uses SRV DNS records and only work with federation traffic. They are only included for completeness.
|
||||||
|
|
||||||
|
<details>
|
||||||
|
|
||||||
|
<summary>Using only SRV records</summary>
|
||||||
|
|
||||||
|
If you can't set up `/.well-known/matrix/server` on :443 for some reason, you can set up a SRV record (via your DNS provider) as below:
|
||||||
|
|
||||||
|
- Service and name: `_matrix-fed._tcp.example.com.`
|
||||||
|
- Priority: `10` (can be any number)
|
||||||
|
- Weight: `10` (can be any number)
|
||||||
|
- Port: `443`
|
||||||
|
- Target: `matrix.example.com.`
|
||||||
|
|
||||||
|
On the target's IP at port 443, you must configure a valid route and cert for your server name, `example.com`. Therefore, this method only works to redirect traffic into the right IP/port combo, and can not delegate your federation to a different domain.
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
|
||||||
|
<summary>Using SRV records + .well-known</summary>
|
||||||
|
|
||||||
|
You can also set up `/.well-known/matrix/server` with a delegated domain but no ports:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[global.well_known]
|
||||||
|
server = "matrix.example.com"
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, set up a SRV record (via your DNS provider) to announce the port number as below:
|
||||||
|
|
||||||
|
- Service and name: `_matrix-fed._tcp.matrix.example.com.`
|
||||||
|
- Priority: `10` (can be any number)
|
||||||
|
- Weight: `10` (can be any number)
|
||||||
|
- Port: `443`
|
||||||
|
- Target: `matrix.example.com.`
|
||||||
|
|
||||||
|
On the target's IP at port 443, you'll need to provide a valid route and cert for `matrix.example.com`. It provides the same feature as pure `.well-known` delegation, albeit with more parts to handle.
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
|
||||||
|
<summary>Using SRV records as a fallback for .well-known delegation</summary>
|
||||||
|
|
||||||
|
Assume your delegation is as below:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[global.well_known]
|
||||||
|
server = "example.com:443"
|
||||||
|
```
|
||||||
|
|
||||||
|
If your Continuwuity instance becomes temporarily unreachable, other servers will not be able to find your `/.well-known/matrix/server` file, and defaults to using `server_name:8448`. This incorrect cache can persist for a long time, and would hinder re-federation when your server eventually comes back online.
|
||||||
|
|
||||||
|
If you want other servers to default to using port :443 even when it is offline, you could set up a SRV record (via your DNS provider) as follows:
|
||||||
|
|
||||||
|
- Service and name: `_matrix-fed._tcp.example.com.`
|
||||||
|
- Priority: `10` (can be any number)
|
||||||
|
- Weight: `10` (can be any number)
|
||||||
|
- Port: `443`
|
||||||
|
- Target: `example.com.`
|
||||||
|
|
||||||
|
On the target's IP at port 443, you'll need to provide a valid route and cert for `example.com`.
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Related Documentation
|
||||||
|
|
||||||
|
See the following Matrix Specs for full details on client/server resolution mechanisms:
|
||||||
|
|
||||||
|
- [Server-to-Server resolution](https://spec.matrix.org/v1.17/server-server-api/#resolving-server-names) (see this for more information on SRV records)
|
||||||
|
- [Client-to-Server resolution](https://spec.matrix.org/v1.17/client-server-api/#server-discovery)
|
||||||
|
- [MSC1929: Homeserver Admin Contact and Support page](https://github.com/matrix-org/matrix-spec-proposals/pull/1929)
|
||||||
|
|
@ -78,47 +78,19 @@ You will need to allow ports `7881/tcp` and `50100:50200/udp` through your firew
|
||||||
|
|
||||||
### 3. Telling clients where to find LiveKit
|
### 3. Telling clients where to find LiveKit
|
||||||
|
|
||||||
To tell clients where to find LiveKit, you need to add the address of your `lk-jwt-service` to your client .well-known file. To do so, in the config section `global.well-known`, add (or modify) the option `rtc_focus_server_urls`.
|
To tell clients where to find LiveKit, you need to add the address of your `lk-jwt-service` to the `[global.matrix_rtc]` config section using the `foci` option.
|
||||||
|
|
||||||
The variable should be a list of servers serving as MatrixRTC endpoints to serve in the well-known file to the client.
|
The variable should be a list of servers serving as MatrixRTC endpoints. Clients discover these via the `/_matrix/client/v1/rtc/transports` endpoint (MSC4143).
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
rtc_focus_server_urls = [
|
[global.matrix_rtc]
|
||||||
|
foci = [
|
||||||
{ type = "livekit", livekit_service_url = "https://livekit.example.com" },
|
{ type = "livekit", livekit_service_url = "https://livekit.example.com" },
|
||||||
]
|
]
|
||||||
```
|
```
|
||||||
|
|
||||||
Remember to replace the URL with the address you are deploying your instance of lk-jwt-service to.
|
Remember to replace the URL with the address you are deploying your instance of lk-jwt-service to.
|
||||||
|
|
||||||
#### Serving .well-known manually
|
|
||||||
|
|
||||||
If you don't let Continuwuity serve your `.well-known` files, you need to add the following lines to your `.well-known/matrix/client` file, remembering to replace the URL with your own `lk-jwt-service` deployment:
|
|
||||||
|
|
||||||
```json
|
|
||||||
"org.matrix.msc4143.rtc_foci": [
|
|
||||||
{
|
|
||||||
"type": "livekit",
|
|
||||||
"livekit_service_url": "https://livekit.example.com"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
The final file should look something like this:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"m.homeserver": {
|
|
||||||
"base_url":"https://matrix.example.com"
|
|
||||||
},
|
|
||||||
"org.matrix.msc4143.rtc_foci": [
|
|
||||||
{
|
|
||||||
"type": "livekit",
|
|
||||||
"livekit_service_url": "https://livekit.example.com"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### 4. Configure your Reverse Proxy
|
### 4. Configure your Reverse Proxy
|
||||||
|
|
||||||
Reverse proxies can be configured in many different ways - so we can't provide a step by step for this.
|
Reverse proxies can be configured in many different ways - so we can't provide a step by step for this.
|
||||||
|
|
@ -137,7 +109,7 @@ By default, all routes should be forwarded to Livekit with the exception of the
|
||||||
# for lk-jwt-service
|
# for lk-jwt-service
|
||||||
@lk-jwt-service path /sfu/get* /healthz* /get_token*
|
@lk-jwt-service path /sfu/get* /healthz* /get_token*
|
||||||
route @lk-jwt-service {
|
route @lk-jwt-service {
|
||||||
reverse_proxy 127.0.0.1:8080
|
reverse_proxy 127.0.0.1:8081
|
||||||
}
|
}
|
||||||
|
|
||||||
# for livekit
|
# for livekit
|
||||||
|
|
@ -146,6 +118,46 @@ By default, all routes should be forwarded to Livekit with the exception of the
|
||||||
```
|
```
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>Example nginx config</summary>
|
||||||
|
```
|
||||||
|
server {
|
||||||
|
server_name matrix-rtc.example.com;
|
||||||
|
|
||||||
|
# for lk-jwt-service
|
||||||
|
location ~ ^/(sfu/get|healthz|get_token) {
|
||||||
|
proxy_pass http://127.0.0.1:8081$request_uri;
|
||||||
|
proxy_set_header X-Forwarded-For $remote_addr;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header Host $http_host;
|
||||||
|
proxy_buffering off;
|
||||||
|
}
|
||||||
|
|
||||||
|
# for livekit
|
||||||
|
location / {
|
||||||
|
proxy_pass http://127.0.0.1:7880$request_uri;
|
||||||
|
proxy_set_header X-Forwarded-For $remote_addr;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header Host $http_host;
|
||||||
|
proxy_buffering off;
|
||||||
|
|
||||||
|
# websocket
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
|
proxy_set_header Connection $connection_upgrade;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that for websockets to work, you need to have this somewhere outside your server block:
|
||||||
|
```
|
||||||
|
map $http_upgrade $connection_upgrade {
|
||||||
|
default upgrade;
|
||||||
|
'' close;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
</details>
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary>Example traefik router</summary>
|
<summary>Example traefik router</summary>
|
||||||
```
|
```
|
||||||
|
|
@ -226,4 +238,3 @@ turn:
|
||||||
- [Synapse documentation](https://github.com/element-hq/element-call/blob/livekit/docs/self-hosting.md)
|
- [Synapse documentation](https://github.com/element-hq/element-call/blob/livekit/docs/self-hosting.md)
|
||||||
- [Community guide](https://tomfos.tr/matrix/livekit/)
|
- [Community guide](https://tomfos.tr/matrix/livekit/)
|
||||||
- [Community guide](https://blog.kimiblock.top/2024/12/24/hosting-element-call/)
|
- [Community guide](https://blog.kimiblock.top/2024/12/24/hosting-element-call/)
|
||||||
-
|
|
||||||
|
|
|
||||||
|
|
@ -13,8 +13,9 @@ settings.
|
||||||
|
|
||||||
The config file to use can be specified on the commandline when running
|
The config file to use can be specified on the commandline when running
|
||||||
Continuwuity by specifying the `-c`, `--config` flag. Alternatively, you can use
|
Continuwuity by specifying the `-c`, `--config` flag. Alternatively, you can use
|
||||||
the environment variable `CONDUWUIT_CONFIG` to specify the config file to used.
|
the environment variable `CONTINUWUITY_CONFIG` to specify the config file to be
|
||||||
Conduit's environment variables are supported for backwards compatibility.
|
used; see [the section on environment variables](#environment-variables) for
|
||||||
|
more information.
|
||||||
|
|
||||||
## Option commandline flag
|
## Option commandline flag
|
||||||
|
|
||||||
|
|
@ -52,13 +53,15 @@ This commandline argument can be paired with the `--option` flag.
|
||||||
|
|
||||||
All of the settings that are found in the config file can be specified by using
|
All of the settings that are found in the config file can be specified by using
|
||||||
environment variables. The environment variable names should be all caps and
|
environment variables. The environment variable names should be all caps and
|
||||||
prefixed with `CONDUWUIT_`.
|
prefixed with `CONTINUWUITY_`.
|
||||||
|
|
||||||
For example, if the setting you are changing is `max_request_size`, then the
|
For example, if the setting you are changing is `max_request_size`, then the
|
||||||
environment variable to set is `CONDUWUIT_MAX_REQUEST_SIZE`.
|
environment variable to set is `CONTINUWUITY_MAX_REQUEST_SIZE`.
|
||||||
|
|
||||||
To modify config options not in the `[global]` context such as
|
To modify config options not in the `[global]` context such as
|
||||||
`[global.well_known]`, use the `__` suffix split: `CONDUWUIT_WELL_KNOWN__SERVER`
|
`[global.well_known]`, use the `__` suffix split:
|
||||||
|
`CONTINUWUITY_WELL_KNOWN__SERVER`
|
||||||
|
|
||||||
Conduit's environment variables are supported for backwards compatibility (e.g.
|
Conduit and conduwuit's environment variables are also supported for backwards
|
||||||
|
compatibility, via the `CONDUIT_` and `CONDUWUIT_` prefixes respectively (e.g.
|
||||||
`CONDUIT_SERVER_NAME`).
|
`CONDUIT_SERVER_NAME`).
|
||||||
|
|
|
||||||
|
|
@ -6,9 +6,9 @@ services:
|
||||||
### then you are ready to go.
|
### then you are ready to go.
|
||||||
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
command: /sbin/conduwuit
|
||||||
volumes:
|
volumes:
|
||||||
- db:/var/lib/continuwuity
|
- db:/var/lib/continuwuity
|
||||||
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.
|
|
||||||
#- ./continuwuity.toml:/etc/continuwuity.toml
|
#- ./continuwuity.toml:/etc/continuwuity.toml
|
||||||
networks:
|
networks:
|
||||||
- proxy
|
- proxy
|
||||||
|
|
|
||||||
|
|
@ -16,14 +16,14 @@ services:
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
labels:
|
labels:
|
||||||
caddy: example.com
|
caddy: example.com
|
||||||
caddy.0_respond: /.well-known/matrix/server {"m.server":"matrix.example.com:443"}
|
caddy.reverse_proxy: /.well-known/matrix/* homeserver:6167
|
||||||
caddy.1_respond: /.well-known/matrix/client {"m.server":{"base_url":"https://matrix.example.com"},"m.homeserver":{"base_url":"https://matrix.example.com"},"org.matrix.msc3575.proxy":{"url":"https://matrix.example.com"}}
|
|
||||||
|
|
||||||
homeserver:
|
homeserver:
|
||||||
### If you already built the Continuwuity image with 'docker build' or want to use a registry image,
|
### If you already built the Continuwuity image with 'docker build' or want to use a registry image,
|
||||||
### then you are ready to go.
|
### then you are ready to go.
|
||||||
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
command: /sbin/conduwuit
|
||||||
volumes:
|
volumes:
|
||||||
- db:/var/lib/continuwuity
|
- db:/var/lib/continuwuity
|
||||||
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.
|
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.
|
||||||
|
|
@ -42,6 +42,10 @@ services:
|
||||||
#CONTINUWUITY_LOG: warn,state_res=warn
|
#CONTINUWUITY_LOG: warn,state_res=warn
|
||||||
CONTINUWUITY_ADDRESS: 0.0.0.0
|
CONTINUWUITY_ADDRESS: 0.0.0.0
|
||||||
#CONTINUWUITY_CONFIG: '/etc/continuwuity.toml' # Uncomment if you mapped config toml above
|
#CONTINUWUITY_CONFIG: '/etc/continuwuity.toml' # Uncomment if you mapped config toml above
|
||||||
|
|
||||||
|
# Required for .well-known delegation - edit these according to your chosen domain
|
||||||
|
CONTINUWUITY_WELL_KNOWN__CLIENT: https://matrix.example.com
|
||||||
|
CONTINUWUITY_WELL_KNOWN__SERVER: matrix.example.com:443
|
||||||
networks:
|
networks:
|
||||||
- caddy
|
- caddy
|
||||||
labels:
|
labels:
|
||||||
|
|
|
||||||
|
|
@ -6,6 +6,7 @@ services:
|
||||||
### then you are ready to go.
|
### then you are ready to go.
|
||||||
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
command: /sbin/conduwuit
|
||||||
volumes:
|
volumes:
|
||||||
- db:/var/lib/continuwuity
|
- db:/var/lib/continuwuity
|
||||||
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.
|
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.
|
||||||
|
|
|
||||||
|
|
@ -6,6 +6,7 @@ services:
|
||||||
### then you are ready to go.
|
### then you are ready to go.
|
||||||
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
command: /sbin/conduwuit
|
||||||
ports:
|
ports:
|
||||||
- 8448:6167
|
- 8448:6167
|
||||||
volumes:
|
volumes:
|
||||||
|
|
|
||||||
|
|
@ -2,28 +2,26 @@
|
||||||
|
|
||||||
## Docker
|
## Docker
|
||||||
|
|
||||||
To run Continuwuity with Docker, you can either build the image yourself or pull it
|
To run Continuwuity with Docker, you can either build the image yourself or pull
|
||||||
from a registry.
|
it from a registry.
|
||||||
|
|
||||||
### Use a registry
|
### Use a registry
|
||||||
|
|
||||||
OCI images for Continuwuity are available in the registries listed below.
|
Available OCI images:
|
||||||
|
|
||||||
| Registry | Image | Notes |
|
| Registry | Image | Notes |
|
||||||
| --------------- | --------------------------------------------------------------- | -----------------------|
|
| ---------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------- |
|
||||||
| Forgejo Registry| [forgejo.ellis.link/continuwuation/continuwuity:latest](https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity/latest) | Latest tagged image. |
|
| Forgejo Registry | [forgejo.ellis.link/continuwuation/continuwuity:latest](https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity/latest) | Latest tagged image. |
|
||||||
| Forgejo Registry| [forgejo.ellis.link/continuwuation/continuwuity:main](https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity/main) | Main branch image. |
|
| Forgejo Registry | [forgejo.ellis.link/continuwuation/continuwuity:main](https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity/main) | Main branch image. |
|
||||||
| Forgejo Registry| [forgejo.ellis.link/continuwuation/continuwuity:latest-maxperf](https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity/latest-maxperf) | [Performance optimised version.](./generic.mdx#performance-optimised-builds) |
|
| Forgejo Registry | [forgejo.ellis.link/continuwuation/continuwuity:latest-maxperf](https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity/latest-maxperf) | [Performance optimised version.](./generic.mdx#performance-optimised-builds) |
|
||||||
| Forgejo Registry| [forgejo.ellis.link/continuwuation/continuwuity:main-maxperf](https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity/main-maxperf) | [Performance optimised version.](./generic.mdx#performance-optimised-builds) |
|
| Forgejo Registry | [forgejo.ellis.link/continuwuation/continuwuity:main-maxperf](https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity/main-maxperf) | [Performance optimised version.](./generic.mdx#performance-optimised-builds) |
|
||||||
|
|
||||||
Use
|
**Example:**
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker image pull $LINK
|
docker image pull forgejo.ellis.link/continuwuation/continuwuity:main-maxperf
|
||||||
```
|
```
|
||||||
|
|
||||||
to pull it to your machine.
|
|
||||||
|
|
||||||
#### Mirrors
|
#### Mirrors
|
||||||
|
|
||||||
Images are mirrored to multiple locations automatically, on a schedule:
|
Images are mirrored to multiple locations automatically, on a schedule:
|
||||||
|
|
@ -33,39 +31,146 @@ Images are mirrored to multiple locations automatically, on a schedule:
|
||||||
- `registry.gitlab.com/continuwuity/continuwuity`
|
- `registry.gitlab.com/continuwuity/continuwuity`
|
||||||
- `git.nexy7574.co.uk/mirrored/continuwuity` (releases only, no `main`)
|
- `git.nexy7574.co.uk/mirrored/continuwuity` (releases only, no `main`)
|
||||||
|
|
||||||
### Run
|
### Quick Run
|
||||||
|
|
||||||
When you have the image, you can simply run it with
|
Get a working Continuwuity server with an admin user in four steps:
|
||||||
|
|
||||||
|
#### Prerequisites
|
||||||
|
|
||||||
|
Continuwuity requires HTTPS for Matrix federation. You'll need:
|
||||||
|
|
||||||
|
- A domain name pointing to your server
|
||||||
|
- A reverse proxy with SSL/TLS certificates (Traefik, Caddy, nginx, etc.)
|
||||||
|
|
||||||
|
See [Docker Compose](#docker-compose) for complete examples.
|
||||||
|
|
||||||
|
#### Environment Variables
|
||||||
|
|
||||||
|
- `CONTINUWUITY_SERVER_NAME` - Your Matrix server's domain name
|
||||||
|
- `CONTINUWUITY_DATABASE_PATH` - Where to store your database (must match the
|
||||||
|
volume mount)
|
||||||
|
- `CONTINUWUITY_ADDRESS` - Bind address (use `0.0.0.0` to listen on all
|
||||||
|
interfaces)
|
||||||
|
- `CONTINUWUITY_ALLOW_REGISTRATION` - Set to `false` to disable registration, or
|
||||||
|
use with `CONTINUWUITY_REGISTRATION_TOKEN` to require a token (see
|
||||||
|
[reference](../reference/environment-variables.mdx#registration--user-configuration)
|
||||||
|
for details)
|
||||||
|
|
||||||
|
See the
|
||||||
|
[Environment Variables Reference](../reference/environment-variables.mdx) for
|
||||||
|
more configuration options.
|
||||||
|
|
||||||
|
#### 1. Pull the image
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker run -d -p 8448:6167 \
|
docker pull forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||||
-v db:/var/lib/continuwuity/ \
|
|
||||||
-e CONTINUWUITY_SERVER_NAME="your.server.name" \
|
|
||||||
-e CONTINUWUITY_ALLOW_REGISTRATION=false \
|
|
||||||
--name continuwuity $LINK
|
|
||||||
```
|
```
|
||||||
|
|
||||||
or you can use [Docker Compose](#docker-compose).
|
#### 2. Start the server with initial admin user
|
||||||
|
|
||||||
The `-d` flag lets the container run in detached mode. You may supply an
|
```bash
|
||||||
optional `continuwuity.toml` config file, the example config can be found
|
docker run -d \
|
||||||
[here](../reference/config.mdx). You can pass in different env vars to
|
-p 6167:6167 \
|
||||||
change config values on the fly. You can even configure Continuwuity completely by
|
-v continuwuity_db:/var/lib/continuwuity \
|
||||||
using env vars. For an overview of possible values, please take a look at the
|
-e CONTINUWUITY_SERVER_NAME="matrix.example.com" \
|
||||||
<a href="/examples/docker-compose.yml" target="_blank">`docker-compose.yml`</a> file.
|
-e CONTINUWUITY_DATABASE_PATH="/var/lib/continuwuity" \
|
||||||
|
-e CONTINUWUITY_ADDRESS="0.0.0.0" \
|
||||||
|
-e CONTINUWUITY_ALLOW_REGISTRATION="false" \
|
||||||
|
--name continuwuity \
|
||||||
|
forgejo.ellis.link/continuwuation/continuwuity:latest \
|
||||||
|
/sbin/conduwuit --execute "users create-user admin"
|
||||||
|
```
|
||||||
|
|
||||||
If you just want to test Continuwuity for a short time, you can use the `--rm`
|
Replace `matrix.example.com` with your actual server name and `admin` with
|
||||||
flag, which cleans up everything related to your container after you stop
|
your preferred username.
|
||||||
it.
|
|
||||||
|
#### 3. Get your admin password
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker logs continuwuity 2>&1 | grep "Created user"
|
||||||
|
```
|
||||||
|
|
||||||
|
You'll see output like:
|
||||||
|
|
||||||
|
```
|
||||||
|
Created user with user_id: @admin:matrix.example.com and password: `[auto-generated-password]`
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 4. Configure your reverse proxy
|
||||||
|
|
||||||
|
Configure your reverse proxy to forward HTTPS traffic to Continuwuity. See
|
||||||
|
[Docker Compose](#docker-compose) for examples.
|
||||||
|
|
||||||
|
Once configured, log in with any Matrix client using `@admin:matrix.example.com`
|
||||||
|
and the generated password. You'll automatically be invited to the admin room
|
||||||
|
where you can manage your server.
|
||||||
|
|
||||||
### Docker Compose
|
### Docker Compose
|
||||||
|
|
||||||
If the `docker run` command is not suitable for you or your setup, you can also use one
|
Docker Compose is the recommended deployment method. These examples include
|
||||||
of the provided `docker-compose` files.
|
reverse proxy configurations for Matrix federation.
|
||||||
|
|
||||||
Depending on your proxy setup, you can use one of the following files:
|
#### Matrix Federation Requirements
|
||||||
|
|
||||||
### For existing Traefik setup
|
For Matrix federation to work, you need to serve `.well-known/matrix/client` and
|
||||||
|
`.well-known/matrix/server` endpoints. You can achieve this either by:
|
||||||
|
|
||||||
|
1. **Using a well-known service** - The compose files below include an nginx
|
||||||
|
container to serve these files
|
||||||
|
2. **Using Continuwuity's built-in delegation** (easier for Traefik) - Configure
|
||||||
|
delegation files in your config, then proxy `/.well-known/matrix/*` to
|
||||||
|
Continuwuity
|
||||||
|
|
||||||
|
**Traefik example using built-in delegation:**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
labels:
|
||||||
|
traefik.http.routers.continuwuity.rule: >-
|
||||||
|
(Host(`matrix.example.com`) ||
|
||||||
|
(Host(`example.com`) && PathPrefix(`/.well-known/matrix`)))
|
||||||
|
```
|
||||||
|
|
||||||
|
This routes your Matrix domain and well-known paths to Continuwuity.
|
||||||
|
|
||||||
|
#### Creating Your First Admin User
|
||||||
|
|
||||||
|
Add the `--execute` command to create an admin user on first startup. In your
|
||||||
|
compose file, add under the `continuwuity` service:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
services:
|
||||||
|
continuwuity:
|
||||||
|
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||||
|
command: /sbin/conduwuit --execute "users create-user admin"
|
||||||
|
# ... rest of configuration
|
||||||
|
```
|
||||||
|
|
||||||
|
Then retrieve the auto-generated password:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker compose logs continuwuity | grep "Created user"
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Choose Your Reverse Proxy
|
||||||
|
|
||||||
|
Select the compose file that matches your setup:
|
||||||
|
|
||||||
|
:::note DNS Performance
|
||||||
|
Docker's default DNS resolver can cause performance issues with Matrix
|
||||||
|
federation. If you experience slow federation or DNS timeouts, you may need to
|
||||||
|
use your host's DNS resolver instead. Add this volume mount to the
|
||||||
|
`continuwuity` service:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
volumes:
|
||||||
|
- /etc/resolv.conf:/etc/resolv.conf:ro
|
||||||
|
```
|
||||||
|
|
||||||
|
See [Troubleshooting - DNS Issues](../troubleshooting.mdx#potential-dns-issues-when-using-docker)
|
||||||
|
for more details and alternative solutions.
|
||||||
|
:::
|
||||||
|
|
||||||
|
##### For existing Traefik setup
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary>docker-compose.for-traefik.yml</summary>
|
<summary>docker-compose.for-traefik.yml</summary>
|
||||||
|
|
@ -76,7 +181,7 @@ Depending on your proxy setup, you can use one of the following files:
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
### With Traefik included
|
##### With Traefik included
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary>docker-compose.with-traefik.yml</summary>
|
<summary>docker-compose.with-traefik.yml</summary>
|
||||||
|
|
@ -87,7 +192,7 @@ Depending on your proxy setup, you can use one of the following files:
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
### With Caddy Docker Proxy
|
##### With Caddy Docker Proxy
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary>docker-compose.with-caddy.yml</summary>
|
<summary>docker-compose.with-caddy.yml</summary>
|
||||||
|
|
@ -98,9 +203,15 @@ Replace all `example.com` placeholders with your own domain.
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
If you don't already have a network for Caddy to monitor, create one first:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker network create caddy
|
||||||
|
```
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
### For other reverse proxies
|
##### For other reverse proxies
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary>docker-compose.yml</summary>
|
<summary>docker-compose.yml</summary>
|
||||||
|
|
@ -111,7 +222,7 @@ Replace all `example.com` placeholders with your own domain.
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
### Override file
|
##### Override file for customisation
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary>docker-compose.override.yml</summary>
|
<summary>docker-compose.override.yml</summary>
|
||||||
|
|
@ -122,98 +233,24 @@ Replace all `example.com` placeholders with your own domain.
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
When picking the Traefik-related compose file, rename it to
|
#### Starting Your Server
|
||||||
`docker-compose.yml`, and rename the override file to
|
|
||||||
`docker-compose.override.yml`. Edit the latter with the values you want for your
|
|
||||||
server.
|
|
||||||
|
|
||||||
When picking the `caddy-docker-proxy` compose file, it's important to first
|
1. Choose your compose file and rename it to `docker-compose.yml`
|
||||||
create the `caddy` network before spinning up the containers:
|
2. If using the override file, rename it to `docker-compose.override.yml` and
|
||||||
|
edit your values
|
||||||
```bash
|
3. Start the server:
|
||||||
docker network create caddy
|
|
||||||
```
|
|
||||||
|
|
||||||
After that, you can rename it to `docker-compose.yml` and spin up the
|
|
||||||
containers!
|
|
||||||
|
|
||||||
Additional info about deploying Continuwuity can be found [here](generic.mdx).
|
|
||||||
|
|
||||||
### Build
|
|
||||||
|
|
||||||
Official Continuwuity images are built using **Docker Buildx** and the Dockerfile found at [`docker/Dockerfile`][dockerfile-path]. This approach uses common Docker tooling and enables efficient multi-platform builds.
|
|
||||||
|
|
||||||
The resulting images are widely compatible with Docker and other container runtimes like Podman or containerd.
|
|
||||||
|
|
||||||
The images *do not contain a shell*. They contain only the Continuwuity binary, required libraries, TLS certificates, and metadata.
|
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary>Click to view the Dockerfile</summary>
|
|
||||||
|
|
||||||
You can also <a href="https://forgejo.ellis.link/continuwuation/continuwuation/src/branch/main/docker/Dockerfile" target="_blank">view the Dockerfile on Forgejo</a>.
|
|
||||||
|
|
||||||
```dockerfile file="../../docker/Dockerfile"
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
To build an image locally using Docker Buildx, you can typically run a command like:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Build for the current platform and load into the local Docker daemon
|
|
||||||
docker buildx build --load --tag continuwuity:latest -f docker/Dockerfile .
|
|
||||||
|
|
||||||
# Example: Build for specific platforms and push to a registry.
|
|
||||||
# docker buildx build --platform linux/amd64,linux/arm64 --tag registry.io/org/continuwuity:latest -f docker/Dockerfile . --push
|
|
||||||
|
|
||||||
# Example: Build binary optimised for the current CPU (standard release profile)
|
|
||||||
# docker buildx build --load \
|
|
||||||
# --tag continuwuity:latest \
|
|
||||||
# --build-arg TARGET_CPU=native \
|
|
||||||
# -f docker/Dockerfile .
|
|
||||||
|
|
||||||
# Example: Build maxperf variant (release-max-perf profile with LTO)
|
|
||||||
# Optimised for runtime performance and smaller binary size, but requires longer build time
|
|
||||||
# docker buildx build --load \
|
|
||||||
# --tag continuwuity:latest-maxperf \
|
|
||||||
# --build-arg TARGET_CPU=native \
|
|
||||||
# --build-arg RUST_PROFILE=release-max-perf \
|
|
||||||
# -f docker/Dockerfile .
|
|
||||||
```
|
|
||||||
|
|
||||||
Refer to the Docker Buildx documentation for more advanced build options.
|
|
||||||
|
|
||||||
[dockerfile-path]: https://forgejo.ellis.link/continuwuation/continuwuation/src/branch/main/docker/Dockerfile
|
|
||||||
|
|
||||||
### Run
|
|
||||||
|
|
||||||
If you have already built the image or want to use one from the registries, you
|
|
||||||
can start the container and everything else in the compose file in detached
|
|
||||||
mode with:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker compose up -d
|
docker compose up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
> **Note:** Don't forget to modify and adjust the compose file to your needs.
|
See the [generic deployment guide](generic.mdx) for more deployment options.
|
||||||
|
|
||||||
### Use Traefik as Proxy
|
### Building Custom Images
|
||||||
|
|
||||||
As a container user, you probably know about Traefik. It is an easy-to-use
|
For information on building your own Continuwuity Docker images, see the
|
||||||
reverse proxy for making containerized apps and services available through the
|
[Building Docker Images](../development/index.mdx#building-docker-images)
|
||||||
web. With the Traefik-related docker-compose files provided above, it is equally easy
|
section in the development documentation.
|
||||||
to deploy and use Continuwuity, with a small caveat. If you have already looked at
|
|
||||||
the files, you should have seen the `well-known` service, which is the
|
|
||||||
small caveat. Traefik is simply a proxy and load balancer and cannot
|
|
||||||
serve any kind of content. For Continuwuity to federate, we need to either
|
|
||||||
expose ports `443` and `8448` or serve two endpoints: `.well-known/matrix/client`
|
|
||||||
and `.well-known/matrix/server`.
|
|
||||||
|
|
||||||
With the service `well-known`, we use a single `nginx` container that serves
|
|
||||||
those two files.
|
|
||||||
|
|
||||||
Alternatively, you can use Continuwuity's built-in delegation file capability. Set up the delegation files in the configuration file, and then proxy paths under `/.well-known/matrix` to continuwuity. For example, the label ``traefik.http.routers.continuwuity.rule=(Host(`matrix.ellis.link`) || (Host(`ellis.link`) && PathPrefix(`/.well-known/matrix`)))`` does this for the domain `ellis.link`.
|
|
||||||
|
|
||||||
## Voice communication
|
## Voice communication
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,7 @@
|
||||||
# Continuwuity for FreeBSD
|
# Continuwuity for FreeBSD
|
||||||
|
|
||||||
Continuwuity currently does not provide FreeBSD builds or FreeBSD packaging. However, Continuwuity does build and work on FreeBSD using the system-provided RocksDB.
|
Continuwuity doesn't provide official FreeBSD packages; however, a community-maintained set of packages is available on [Forgejo](https://forgejo.ellis.link/katie/continuwuity-bsd). Note that these are provided as standalone packages and are not part of a FreeBSD package repository (yet), so updates need to be downloaded and installed manually.
|
||||||
|
|
||||||
Contributions to get Continuwuity packaged for FreeBSD are welcome.
|
Please see the installation instructions in that repository. Direct any questions to its issue tracker or to [@katie:kat5.dev](https://matrix.to/#/@katie:kat5.dev).
|
||||||
|
|
||||||
|
For general BSD support, please join our [Continuwuity BSD](https://matrix.to/#/%23bsd:continuwuity.org) community room.
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,110 @@
|
||||||
# Continuwuity for Kubernetes
|
# Continuwuity for Kubernetes
|
||||||
|
|
||||||
Continuwuity doesn't support horizontal scalability or distributed loading
|
Continuwuity doesn't support horizontal scalability or distributed loading
|
||||||
natively. However, [a community-maintained Helm Chart is available here to run
|
natively. However, a deployment in Kubernetes is very similar to the docker
|
||||||
|
setup. This is because Continuwuity can be fully configured using environment
|
||||||
|
variables. A sample StatefulSet is shared below. The only thing missing is
|
||||||
|
a PVC definition (named `continuwuity-data`) for the volume mounted to
|
||||||
|
the StatefulSet, an Ingress resources to point your webserver to the
|
||||||
|
Continuwuity Pods, and a Service resource (targeting `app.kubernetes.io/name: continuwuity`)
|
||||||
|
to glue the Ingress and Pod together.
|
||||||
|
|
||||||
|
Carefully go through the `env` section and add, change, and remove any env vars you like using the [Configuration reference](https://continuwuity.org/reference/config.html)
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: StatefulSet
|
||||||
|
metadata:
|
||||||
|
name: continuwuity
|
||||||
|
namespace: matrix
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: continuwuity
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
serviceName: continuwuity
|
||||||
|
podManagementPolicy: Parallel
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/name: continuwuity
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: continuwuity
|
||||||
|
spec:
|
||||||
|
securityContext:
|
||||||
|
sysctls:
|
||||||
|
- name: net.ipv4.ip_unprivileged_port_start
|
||||||
|
value: "0"
|
||||||
|
containers:
|
||||||
|
- name: continuwuity
|
||||||
|
# use a sha hash <3
|
||||||
|
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||||
|
command: ["/sbin/conduwuit"]
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
ports:
|
||||||
|
- name: http
|
||||||
|
containerPort: 80
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /data
|
||||||
|
name: data
|
||||||
|
subPath: data
|
||||||
|
securityContext:
|
||||||
|
capabilities:
|
||||||
|
add:
|
||||||
|
- NET_BIND_SERVICE
|
||||||
|
env:
|
||||||
|
- name: TOKIO_WORKER_THREADS
|
||||||
|
value: "2"
|
||||||
|
- name: CONTINUWUITY_SERVER_NAME
|
||||||
|
value: "example.com"
|
||||||
|
- name: CONTINUWUITY_DATABASE_PATH
|
||||||
|
value: "/data/db"
|
||||||
|
- name: CONTINUWUITY_DATABASE_BACKEND
|
||||||
|
value: "rocksdb"
|
||||||
|
- name: CONTINUWUITY_PORT
|
||||||
|
value: "80"
|
||||||
|
- name: CONTINUWUITY_MAX_REQUEST_SIZE
|
||||||
|
value: "20000000"
|
||||||
|
- name: CONTINUWUITY_ALLOW_FEDERATION
|
||||||
|
value: "true"
|
||||||
|
- name: CONTINUWUITY_TRUSTED_SERVERS
|
||||||
|
value: '["matrix.org"]'
|
||||||
|
- name: CONTINUWUITY_ADDRESS
|
||||||
|
value: "0.0.0.0"
|
||||||
|
- name: CONTINUWUITY_ROCKSDB_PARALLELISM_THREADS
|
||||||
|
value: "1"
|
||||||
|
- name: CONTINUWUITY_WELL_KNOWN__SERVER
|
||||||
|
value: "matrix.example.com:443"
|
||||||
|
- name: CONTINUWUITY_WELL_KNOWN__CLIENT
|
||||||
|
value: "https://matrix.example.com"
|
||||||
|
- name: CONTINUWUITY_ALLOW_REGISTRATION
|
||||||
|
value: "false"
|
||||||
|
- name: RUST_LOG
|
||||||
|
value: info
|
||||||
|
readinessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /_matrix/federation/v1/version
|
||||||
|
port: http
|
||||||
|
periodSeconds: 4
|
||||||
|
failureThreshold: 5
|
||||||
|
resources:
|
||||||
|
# Continuwuity might use quite some RAM :3
|
||||||
|
requests:
|
||||||
|
cpu: "2"
|
||||||
|
memory: "512Mi"
|
||||||
|
limits:
|
||||||
|
cpu: "4"
|
||||||
|
memory: "2048Mi"
|
||||||
|
volumes:
|
||||||
|
- name: data
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: continuwuity-data
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
Apart from manually configuring the containers,
|
||||||
|
[a community-maintained Helm Chart is available here to run
|
||||||
conduwuit on Kubernetes](https://gitlab.cronce.io/charts/conduwuit)
|
conduwuit on Kubernetes](https://gitlab.cronce.io/charts/conduwuit)
|
||||||
|
|
||||||
This should be compatible with Continuwuity, but you will need to change the image reference.
|
This should be compatible with Continuwuity, but you will need to change the image reference.
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,8 @@
|
||||||
|
|
||||||
Information about developing the project. If you are only interested in using
|
Information about developing the project. If you are only interested in using
|
||||||
it, you can safely ignore this page. If you plan on contributing, see the
|
it, you can safely ignore this page. If you plan on contributing, see the
|
||||||
[contributor's guide](./contributing.mdx) and [code style guide](./code_style.mdx).
|
[contributor's guide](./contributing.mdx) and
|
||||||
|
[code style guide](./code_style.mdx).
|
||||||
|
|
||||||
## Continuwuity project layout
|
## Continuwuity project layout
|
||||||
|
|
||||||
|
|
@ -12,86 +13,98 @@ members are under `src/`. The workspace definition is at the top level / root
|
||||||
`Cargo.toml`.
|
`Cargo.toml`.
|
||||||
|
|
||||||
The crate names are generally self-explanatory:
|
The crate names are generally self-explanatory:
|
||||||
|
|
||||||
- `admin` is the admin room
|
- `admin` is the admin room
|
||||||
- `api` is the HTTP API, Matrix C-S and S-S endpoints, etc
|
- `api` is the HTTP API, Matrix C-S and S-S endpoints, etc
|
||||||
- `core` is core Continuwuity functionality like config loading, error definitions,
|
- `core` is core Continuwuity functionality like config loading, error
|
||||||
global utilities, logging infrastructure, etc
|
definitions, global utilities, logging infrastructure, etc
|
||||||
- `database` is RocksDB methods, helpers, RocksDB config, and general database definitions,
|
- `database` is RocksDB methods, helpers, RocksDB config, and general database
|
||||||
utilities, or functions
|
definitions, utilities, or functions
|
||||||
- `macros` are Continuwuity Rust [macros][macros] like general helper macros, logging
|
- `macros` are Continuwuity Rust [macros][macros] like general helper macros,
|
||||||
and error handling macros, and [syn][syn] and [procedural macros][proc-macro]
|
logging and error handling macros, and [syn][syn] and [procedural
|
||||||
used for admin room commands and others
|
macros][proc-macro] used for admin room commands and others
|
||||||
- `main` is the "primary" sub-crate. This is where the `main()` function lives,
|
- `main` is the "primary" sub-crate. This is where the `main()` function lives,
|
||||||
tokio worker and async initialisation, Sentry initialisation, [clap][clap] init,
|
tokio worker and async initialisation, Sentry initialisation, [clap][clap]
|
||||||
and signal handling. If you are adding new [Rust features][features], they *must*
|
init, and signal handling. If you are adding new [Rust features][features],
|
||||||
go here.
|
they _must_ go here.
|
||||||
- `router` is the webserver and request handling bits, using axum, tower, tower-http,
|
- `router` is the webserver and request handling bits, using axum, tower,
|
||||||
hyper, etc, and the [global server state][state] to access `services`.
|
tower-http, hyper, etc, and the [global server state][state] to access
|
||||||
|
`services`.
|
||||||
- `service` is the high-level database definitions and functions for data,
|
- `service` is the high-level database definitions and functions for data,
|
||||||
outbound/sending code, and other business logic such as media fetching.
|
outbound/sending code, and other business logic such as media fetching.
|
||||||
|
|
||||||
It is highly unlikely you will ever need to add a new workspace member, but
|
It is highly unlikely you will ever need to add a new workspace member, but if
|
||||||
if you truly find yourself needing to, we recommend reaching out to us in
|
you truly find yourself needing to, we recommend reaching out to us in the
|
||||||
the Matrix room for discussions about it beforehand.
|
Matrix room for discussions about it beforehand.
|
||||||
|
|
||||||
The primary inspiration for this design was apart of hot reloadable development,
|
The primary inspiration for this design was apart of hot reloadable development,
|
||||||
to support "Continuwuity as a library" where specific parts can simply be swapped out.
|
to support "Continuwuity as a library" where specific parts can simply be
|
||||||
There is evidence Conduit wanted to go this route too as `axum` is technically an
|
swapped out. There is evidence Conduit wanted to go this route too as `axum` is
|
||||||
optional feature in Conduit, and can be compiled without the binary or axum library
|
technically an optional feature in Conduit, and can be compiled without the
|
||||||
for handling inbound web requests; but it was never completed or worked.
|
binary or axum library for handling inbound web requests; but it was never
|
||||||
|
completed or worked.
|
||||||
|
|
||||||
See the Rust documentation on [Workspaces][workspaces] for general questions
|
See the Rust documentation on [Workspaces][workspaces] for general questions and
|
||||||
and information on Cargo workspaces.
|
information on Cargo workspaces.
|
||||||
|
|
||||||
## Adding compile-time [features][features]
|
## Adding compile-time [features][features]
|
||||||
|
|
||||||
If you'd like to add a compile-time feature, you must first define it in
|
If you'd like to add a compile-time feature, you must first define it in the
|
||||||
the `main` workspace crate located in `src/main/Cargo.toml`. The feature must
|
`main` workspace crate located in `src/main/Cargo.toml`. The feature must enable
|
||||||
enable a feature in the other workspace crate(s) you intend to use it in. Then
|
a feature in the other workspace crate(s) you intend to use it in. Then the said
|
||||||
the said workspace crate(s) must define the feature there in its `Cargo.toml`.
|
workspace crate(s) must define the feature there in its `Cargo.toml`.
|
||||||
|
|
||||||
So, if this is adding a feature to the API such as `woof`, you define the feature
|
So, if this is adding a feature to the API such as `woof`, you define the
|
||||||
in the `api` crate's `Cargo.toml` as `woof = []`. The feature definition in `main`'s
|
feature in the `api` crate's `Cargo.toml` as `woof = []`. The feature definition
|
||||||
`Cargo.toml` will be `woof = ["conduwuit-api/woof"]`.
|
in `main`'s `Cargo.toml` will be `woof = ["conduwuit-api/woof"]`.
|
||||||
|
|
||||||
The rationale for this is due to Rust / Cargo not supporting
|
The rationale for this is due to Rust / Cargo not supporting ["workspace level
|
||||||
["workspace level features"][9], we must make a choice of; either scattering
|
features"][9], we must make a choice of; either scattering features all over the
|
||||||
features all over the workspace crates, making it difficult for anyone to add
|
workspace crates, making it difficult for anyone to add or remove default
|
||||||
or remove default features; or define all the features in one central workspace
|
features; or define all the features in one central workspace crate that
|
||||||
crate that propagate down/up to the other workspace crates. It is a Cargo pitfall,
|
propagate down/up to the other workspace crates. It is a Cargo pitfall, and we'd
|
||||||
and we'd like to see better developer UX in Rust's Workspaces.
|
like to see better developer UX in Rust's Workspaces.
|
||||||
|
|
||||||
Additionally, the definition of one single place makes "feature collection" in our
|
Additionally, the definition of one single place makes "feature collection" in
|
||||||
Nix flake a million times easier instead of collecting and deduping them all from
|
our Nix flake a million times easier instead of collecting and deduping them all
|
||||||
searching in all the workspace crates' `Cargo.toml`s. Though we wouldn't need to
|
from searching in all the workspace crates' `Cargo.toml`s. Though we wouldn't
|
||||||
do this if Rust supported workspace-level features to begin with.
|
need to do this if Rust supported workspace-level features to begin with.
|
||||||
|
|
||||||
## List of forked dependencies
|
## List of forked dependencies
|
||||||
|
|
||||||
During Continuwuity (and prior projects) development, we have had to fork some dependencies to support our use-cases.
|
During Continuwuity (and prior projects) development, we have had to fork some
|
||||||
These forks exist for various reasons including features that upstream projects won't accept,
|
dependencies to support our use-cases. These forks exist for various reasons
|
||||||
faster-paced development, Continuwuity-specific usecases, or lack of time to upstream changes.
|
including features that upstream projects won't accept, faster-paced
|
||||||
|
development, Continuwuity-specific usecases, or lack of time to upstream
|
||||||
|
changes.
|
||||||
|
|
||||||
All forked dependencies are maintained under the [continuwuation organization on Forgejo](https://forgejo.ellis.link/continuwuation):
|
All forked dependencies are maintained under the
|
||||||
|
[continuwuation organization on Forgejo](https://forgejo.ellis.link/continuwuation):
|
||||||
|
|
||||||
- [ruwuma][continuwuation-ruwuma] - Fork of [ruma/ruma][ruma] with various performance improvements, more features and better client/server interop
|
- [ruwuma][continuwuation-ruwuma] - Fork of [ruma/ruma][ruma] with various
|
||||||
- [rocksdb][continuwuation-rocksdb] - Fork of [facebook/rocksdb][rocksdb] via [`@zaidoon1`][8] with liburing build fixes and GCC debug build fixes
|
performance improvements, more features and better client/server interop
|
||||||
- [jemallocator][continuwuation-jemallocator] - Fork of [tikv/jemallocator][jemallocator] fixing musl builds, suspicious code,
|
- [rocksdb][continuwuation-rocksdb] - Fork of [facebook/rocksdb][rocksdb] via
|
||||||
and adding support for redzones in Valgrind
|
[`@zaidoon1`][8] with liburing build fixes and GCC debug build fixes
|
||||||
- [rustyline-async][continuwuation-rustyline-async] - Fork of [zyansheep/rustyline-async][rustyline-async] with tab completion callback
|
- [jemallocator][continuwuation-jemallocator] - Fork of
|
||||||
and `CTRL+\` signal quit event for Continuwuity console CLI
|
[tikv/jemallocator][jemallocator] fixing musl builds, suspicious code, and
|
||||||
- [rust-rocksdb][continuwuation-rust-rocksdb] - Fork of [rust-rocksdb/rust-rocksdb][rust-rocksdb] fixing musl build issues,
|
adding support for redzones in Valgrind
|
||||||
removing unnecessary `gtest` include, and using our RocksDB and jemallocator forks
|
- [rustyline-async][continuwuation-rustyline-async] - Fork of
|
||||||
- [tracing][continuwuation-tracing] - Fork of [tokio-rs/tracing][tracing] implementing `Clone` for `EnvFilter` to
|
[zyansheep/rustyline-async][rustyline-async] with tab completion callback and
|
||||||
support dynamically changing tracing environments
|
`CTRL+\` signal quit event for Continuwuity console CLI
|
||||||
|
- [rust-rocksdb][continuwuation-rust-rocksdb] - Fork of
|
||||||
|
[rust-rocksdb/rust-rocksdb][rust-rocksdb] fixing musl build issues, removing
|
||||||
|
unnecessary `gtest` include, and using our RocksDB and jemallocator forks
|
||||||
|
- [tracing][continuwuation-tracing] - Fork of [tokio-rs/tracing][tracing]
|
||||||
|
implementing `Clone` for `EnvFilter` to support dynamically changing tracing
|
||||||
|
environments
|
||||||
|
|
||||||
## Debugging with `tokio-console`
|
## Debugging with `tokio-console`
|
||||||
|
|
||||||
[`tokio-console`][7] can be a useful tool for debugging and profiling. To make a
|
[`tokio-console`][7] can be a useful tool for debugging and profiling. To make a
|
||||||
`tokio-console`-enabled build of Continuwuity, enable the `tokio_console` feature,
|
`tokio-console`-enabled build of Continuwuity, enable the `tokio_console`
|
||||||
disable the default `release_max_log_level` feature, and set the `--cfg
|
feature, disable the default `release_max_log_level` feature, and set the
|
||||||
tokio_unstable` flag to enable experimental tokio APIs. A build might look like
|
`--cfg tokio_unstable` flag to enable experimental tokio APIs. A build might
|
||||||
this:
|
look like this:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
RUSTFLAGS="--cfg tokio_unstable" cargo +nightly build \
|
RUSTFLAGS="--cfg tokio_unstable" cargo +nightly build \
|
||||||
|
|
@ -100,34 +113,84 @@ RUSTFLAGS="--cfg tokio_unstable" cargo +nightly build \
|
||||||
--features=systemd,element_hacks,gzip_compression,brotli_compression,zstd_compression,tokio_console
|
--features=systemd,element_hacks,gzip_compression,brotli_compression,zstd_compression,tokio_console
|
||||||
```
|
```
|
||||||
|
|
||||||
You will also need to enable the `tokio_console` config option in Continuwuity when
|
You will also need to enable the `tokio_console` config option in Continuwuity
|
||||||
starting it. This was due to tokio-console causing gradual memory leak/usage
|
when starting it. This was due to tokio-console causing gradual memory
|
||||||
if left enabled.
|
leak/usage if left enabled.
|
||||||
|
|
||||||
## Building Docker Images
|
## Building Docker Images
|
||||||
|
|
||||||
To build a Docker image for Continuwuity, use the standard Docker build command:
|
Official Continuwuity images are built using **Docker Buildx** and the
|
||||||
|
Dockerfile found at [`docker/Dockerfile`][dockerfile-path].
|
||||||
|
|
||||||
|
The images are compatible with Docker and other container runtimes like Podman
|
||||||
|
or containerd.
|
||||||
|
|
||||||
|
The images _do not contain a shell_. They contain only the Continuwuity binary,
|
||||||
|
required libraries, TLS certificates, and metadata.
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>Click to view the Dockerfile</summary>
|
||||||
|
|
||||||
|
You can also
|
||||||
|
|
||||||
|
<a
|
||||||
|
href="<https://forgejo.ellis.link/continuwuation/continuwuation/src/branch/main/docker/Dockerfile>"
|
||||||
|
target="_blank"
|
||||||
|
>
|
||||||
|
view the Dockerfile on Forgejo
|
||||||
|
</a>
|
||||||
|
.
|
||||||
|
|
||||||
|
```dockerfile file="../../docker/Dockerfile"
|
||||||
|
|
||||||
```bash
|
|
||||||
docker build -f docker/Dockerfile .
|
|
||||||
```
|
```
|
||||||
|
|
||||||
The image can be cross-compiled for different architectures.
|
</details>
|
||||||
|
|
||||||
|
### Building Locally
|
||||||
|
|
||||||
|
To build an image locally using Docker Buildx:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build for the current platform and load into the local Docker daemon
|
||||||
|
docker buildx build --load --tag continuwuity:latest -f docker/Dockerfile .
|
||||||
|
|
||||||
|
# Example: Build for specific platforms and push to a registry
|
||||||
|
# docker buildx build --platform linux/amd64,linux/arm64 --tag registry.io/org/continuwuity:latest -f docker/Dockerfile . --push
|
||||||
|
|
||||||
|
# Example: Build binary optimised for the current CPU (standard release profile)
|
||||||
|
# docker buildx build --load \
|
||||||
|
# --tag continuwuity:latest \
|
||||||
|
# --build-arg TARGET_CPU=native \
|
||||||
|
# -f docker/Dockerfile .
|
||||||
|
|
||||||
|
# Example: Build maxperf variant (release-max-perf profile with LTO)
|
||||||
|
# docker buildx build --load \
|
||||||
|
# --tag continuwuity:latest-maxperf \
|
||||||
|
# --build-arg TARGET_CPU=native \
|
||||||
|
# --build-arg RUST_PROFILE=release-max-perf \
|
||||||
|
# -f docker/Dockerfile .
|
||||||
|
```
|
||||||
|
|
||||||
|
Refer to the Docker Buildx documentation for more advanced build options.
|
||||||
|
|
||||||
|
[dockerfile-path]:
|
||||||
|
https://forgejo.ellis.link/continuwuation/continuwuation/src/branch/main/docker/Dockerfile
|
||||||
[continuwuation-ruwuma]: https://forgejo.ellis.link/continuwuation/ruwuma
|
[continuwuation-ruwuma]: https://forgejo.ellis.link/continuwuation/ruwuma
|
||||||
[continuwuation-rocksdb]: https://forgejo.ellis.link/continuwuation/rocksdb
|
[continuwuation-rocksdb]: https://forgejo.ellis.link/continuwuation/rocksdb
|
||||||
[continuwuation-jemallocator]: https://forgejo.ellis.link/continuwuation/jemallocator
|
[continuwuation-jemallocator]:
|
||||||
[continuwuation-rustyline-async]: https://forgejo.ellis.link/continuwuation/rustyline-async
|
https://forgejo.ellis.link/continuwuation/jemallocator
|
||||||
[continuwuation-rust-rocksdb]: https://forgejo.ellis.link/continuwuation/rust-rocksdb
|
[continuwuation-rustyline-async]:
|
||||||
|
https://forgejo.ellis.link/continuwuation/rustyline-async
|
||||||
|
[continuwuation-rust-rocksdb]:
|
||||||
|
https://forgejo.ellis.link/continuwuation/rust-rocksdb
|
||||||
[continuwuation-tracing]: https://forgejo.ellis.link/continuwuation/tracing
|
[continuwuation-tracing]: https://forgejo.ellis.link/continuwuation/tracing
|
||||||
|
|
||||||
[ruma]: https://github.com/ruma/ruma/
|
[ruma]: https://github.com/ruma/ruma/
|
||||||
[rocksdb]: https://github.com/facebook/rocksdb/
|
[rocksdb]: https://github.com/facebook/rocksdb/
|
||||||
[jemallocator]: https://github.com/tikv/jemallocator/
|
[jemallocator]: https://github.com/tikv/jemallocator/
|
||||||
[rustyline-async]: https://github.com/zyansheep/rustyline-async/
|
[rustyline-async]: https://github.com/zyansheep/rustyline-async/
|
||||||
[rust-rocksdb]: https://github.com/rust-rocksdb/rust-rocksdb/
|
[rust-rocksdb]: https://github.com/rust-rocksdb/rust-rocksdb/
|
||||||
[tracing]: https://github.com/tokio-rs/tracing/
|
[tracing]: https://github.com/tokio-rs/tracing/
|
||||||
|
|
||||||
[7]: https://docs.rs/tokio-console/latest/tokio_console/
|
[7]: https://docs.rs/tokio-console/latest/tokio_console/
|
||||||
[8]: https://github.com/zaidoon1/
|
[8]: https://github.com/zaidoon1/
|
||||||
[9]: https://github.com/rust-lang/cargo/issues/12162
|
[9]: https://github.com/rust-lang/cargo/issues/12162
|
||||||
|
|
|
||||||
|
|
@ -51,7 +51,13 @@ continuwuity aims to:
|
||||||
|
|
||||||
Check out the [documentation](https://continuwuity.org) for installation instructions.
|
Check out the [documentation](https://continuwuity.org) for installation instructions.
|
||||||
|
|
||||||
There are currently no open registration continuwuity instances available.
|
If you want to try it out as a user, we have some partnered homeservers you can use:
|
||||||
|
* You can head over to [https://federated.nexus](https://federated.nexus/) in your browser.
|
||||||
|
* Hit the `Apply to Join` button. Once your request has been accepted, you will receive an email with your username and password.
|
||||||
|
* Head over to [https://app.federated.nexus](https://app.federated.nexus/) and you can sign in there, or use any other matrix chat client you wish elsewhere.
|
||||||
|
* Your username for matrix will be in the form of `@username:federated.nexus`, however you can simply use the `username` part to log in. Your password is your password.
|
||||||
|
|
||||||
|
* There's also [https://continuwuity.rocks/](https://continuwuity.rocks/). You can register a new account using Cinny via [this convenient link](https://app.cinny.in/register/continuwuity.rocks), or you can use Element or another matrix client *that supports registration*.
|
||||||
|
|
||||||
## What are we working on?
|
## What are we working on?
|
||||||
|
|
||||||
|
|
|
||||||
226
docs/plans/2026-03-17-space-permission-cascading-design.md
Normal file
226
docs/plans/2026-03-17-space-permission-cascading-design.md
Normal file
|
|
@ -0,0 +1,226 @@
|
||||||
|
# Space Permission Cascading — Design Document
|
||||||
|
|
||||||
|
**Date:** 2026-03-17
|
||||||
|
**Status:** Approved
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
Server-side feature that allows user rights in a Space to cascade down to its
|
||||||
|
direct child rooms. Includes power level cascading and role-based room access
|
||||||
|
control. Enabled via a server-wide configuration flag, disabled by default.
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
1. Power levels defined in a Space cascade to all direct child rooms (Space
|
||||||
|
always wins over per-room overrides).
|
||||||
|
2. Admins can define custom roles in a Space and assign them to users.
|
||||||
|
3. Child rooms can require one or more roles for access.
|
||||||
|
4. Enforcement is continuous — role revocation auto-kicks users from rooms they
|
||||||
|
no longer qualify for.
|
||||||
|
5. Users are auto-joined to all qualifying child rooms when they join a Space or
|
||||||
|
receive a new role.
|
||||||
|
6. Cascading applies to direct parent Space only; no nested cascade through
|
||||||
|
sub-spaces.
|
||||||
|
7. Feature is toggled by a single server-wide config flag
|
||||||
|
(`space_permission_cascading`), off by default.
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
```toml
|
||||||
|
# conduwuit-example.toml
|
||||||
|
|
||||||
|
# Enable space permission cascading (power levels and role-based access).
|
||||||
|
# When enabled, power levels cascade from Spaces to child rooms and rooms
|
||||||
|
# can require roles for access. Applies to all Spaces on this server.
|
||||||
|
# Default: false
|
||||||
|
space_permission_cascading = false
|
||||||
|
```
|
||||||
|
|
||||||
|
## Custom State Events
|
||||||
|
|
||||||
|
All events live in the Space room.
|
||||||
|
|
||||||
|
### `m.space.roles` (state key: `""`)
|
||||||
|
|
||||||
|
Defines the available roles for the Space. Two default roles (`admin` and `mod`)
|
||||||
|
are created automatically when a Space is first encountered with the feature
|
||||||
|
enabled.
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"roles": {
|
||||||
|
"admin": {
|
||||||
|
"description": "Space administrator",
|
||||||
|
"power_level": 100
|
||||||
|
},
|
||||||
|
"mod": {
|
||||||
|
"description": "Space moderator",
|
||||||
|
"power_level": 50
|
||||||
|
},
|
||||||
|
"nsfw": {
|
||||||
|
"description": "Access to NSFW content"
|
||||||
|
},
|
||||||
|
"vip": {
|
||||||
|
"description": "VIP member"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
- `description` (string, required): Human-readable description.
|
||||||
|
- `power_level` (integer, optional): If present, users with this role receive
|
||||||
|
this power level in all child rooms. When a user holds multiple roles with
|
||||||
|
power levels, the highest value wins.
|
||||||
|
|
||||||
|
### `m.space.role.member` (state key: user ID)
|
||||||
|
|
||||||
|
Assigns roles to a user within the Space.
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"roles": ["nsfw", "vip"]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### `m.space.role.room` (state key: room ID)
|
||||||
|
|
||||||
|
Declares which roles a child room requires. A user must hold **all** listed
|
||||||
|
roles to access the room.
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"required_roles": ["nsfw"]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Enforcement Rules
|
||||||
|
|
||||||
|
All enforcement is skipped when `space_permission_cascading = false`.
|
||||||
|
|
||||||
|
### 1. Join gating
|
||||||
|
|
||||||
|
When a user attempts to join a room that is a direct child of a Space:
|
||||||
|
|
||||||
|
- Look up the room's `m.space.role.room` event in the parent Space.
|
||||||
|
- If the room has `required_roles`, check the user's `m.space.role.member`.
|
||||||
|
- Reject the join if the user is missing any required role.
|
||||||
|
|
||||||
|
### 2. Power level override
|
||||||
|
|
||||||
|
For every user in a child room of a Space:
|
||||||
|
|
||||||
|
- Look up their roles via `m.space.role.member` in the parent Space.
|
||||||
|
- For each role that has a `power_level`, take the highest value.
|
||||||
|
- Override the user's power level in the child room's `m.room.power_levels`.
|
||||||
|
- Reject attempts to manually set per-room power levels that conflict with
|
||||||
|
Space-granted levels.
|
||||||
|
|
||||||
|
### 3. Role revocation
|
||||||
|
|
||||||
|
When an `m.space.role.member` event is updated and a role is removed:
|
||||||
|
|
||||||
|
- Identify all child rooms that require the removed role.
|
||||||
|
- Auto-kick the user from rooms they no longer qualify for.
|
||||||
|
- Recalculate and update the user's power level in all child rooms.
|
||||||
|
|
||||||
|
### 4. Room requirement change
|
||||||
|
|
||||||
|
When an `m.space.role.room` event is updated with new requirements:
|
||||||
|
|
||||||
|
- Check all current members of the room.
|
||||||
|
- Auto-kick members who do not hold all newly required roles.
|
||||||
|
|
||||||
|
### 5. Auto-join on role grant
|
||||||
|
|
||||||
|
When an `m.space.role.member` event is updated and a role is added:
|
||||||
|
|
||||||
|
- Find all child rooms where the user now meets all required roles.
|
||||||
|
- Auto-join the user to qualifying rooms they are not already in.
|
||||||
|
|
||||||
|
This also applies when a user first joins the Space — they are auto-joined to
|
||||||
|
all child rooms they qualify for. Rooms with no role requirements auto-join all
|
||||||
|
Space members.
|
||||||
|
|
||||||
|
### 6. New child room
|
||||||
|
|
||||||
|
When a new `m.space.child` event is added to a Space:
|
||||||
|
|
||||||
|
- Auto-join all qualifying Space members to the new child room.
|
||||||
|
|
||||||
|
## Caching & Indexing
|
||||||
|
|
||||||
|
The source of truth is always the state events. The server maintains an
|
||||||
|
in-memory index for fast enforcement lookups, following the same patterns as the
|
||||||
|
existing `roomid_spacehierarchy_cache`.
|
||||||
|
|
||||||
|
### Index structures
|
||||||
|
|
||||||
|
| Index | Source event |
|
||||||
|
|------------------------------|------------------------|
|
||||||
|
| Space → roles defined | `m.space.roles` |
|
||||||
|
| Space → user → roles | `m.space.role.member` |
|
||||||
|
| Space → room → required roles| `m.space.role.room` |
|
||||||
|
| Room → parent Space | `m.space.child` (reverse lookup) |
|
||||||
|
|
||||||
|
The Space → child rooms mapping already exists.
|
||||||
|
|
||||||
|
### Cache invalidation triggers
|
||||||
|
|
||||||
|
| Event changed | Action |
|
||||||
|
|----------------------------|-----------------------------------------------------|
|
||||||
|
| `m.space.roles` | Refresh role definitions, revalidate all members |
|
||||||
|
| `m.space.role.member` | Refresh user's roles, trigger auto-join/kick |
|
||||||
|
| `m.space.role.room` | Refresh room requirements, trigger auto-join/kick |
|
||||||
|
| `m.space.child` added | Index new child, auto-join qualifying members |
|
||||||
|
| `m.space.child` removed | Remove from index (no auto-kick) |
|
||||||
|
| Server startup | Full rebuild from state events |
|
||||||
|
|
||||||
|
## Admin Room Commands
|
||||||
|
|
||||||
|
Roles are managed via the existing admin room interface, which sends the
|
||||||
|
appropriate state events under the hood and triggers enforcement.
|
||||||
|
|
||||||
|
```
|
||||||
|
!admin space roles list <space>
|
||||||
|
!admin space roles add <space> <role_name> [description] [power_level]
|
||||||
|
!admin space roles remove <space> <role_name>
|
||||||
|
!admin space roles assign <space> <user_id> <role_name>
|
||||||
|
!admin space roles revoke <space> <user_id> <role_name>
|
||||||
|
!admin space roles require <space> <room_id> <role_name>
|
||||||
|
!admin space roles unrequire <space> <room_id> <role_name>
|
||||||
|
!admin space roles user <space> <user_id>
|
||||||
|
!admin space roles room <space> <room_id>
|
||||||
|
```
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
**Approach:** Hybrid — state events for definition, database cache for
|
||||||
|
enforcement.
|
||||||
|
|
||||||
|
- State events are the source of truth and federate normally.
|
||||||
|
- The server maintains an in-memory cache/index for fast enforcement.
|
||||||
|
- Cache is invalidated on relevant state event changes and fully rebuilt on
|
||||||
|
startup.
|
||||||
|
- All enforcement hooks (join gating, PL override, auto-join, auto-kick) check
|
||||||
|
the feature flag first and no-op when disabled.
|
||||||
|
- Existing clients can manage roles via Developer Tools (custom state events).
|
||||||
|
The admin room commands provide a user-friendly interface.
|
||||||
|
|
||||||
|
## Scope
|
||||||
|
|
||||||
|
### In scope
|
||||||
|
|
||||||
|
- Server-wide feature flag
|
||||||
|
- Custom state events for role definition, assignment, and room requirements
|
||||||
|
- Power level cascading (Space always wins)
|
||||||
|
- Continuous enforcement (auto-join, auto-kick)
|
||||||
|
- Admin room commands
|
||||||
|
- In-memory caching with invalidation
|
||||||
|
- Default `admin` (PL 100) and `mod` (PL 50) roles
|
||||||
|
|
||||||
|
### Out of scope
|
||||||
|
|
||||||
|
- Client-side UI for role management
|
||||||
|
- Nested cascade through sub-spaces
|
||||||
|
- Per-space opt-in/opt-out (it is server-wide)
|
||||||
|
- Federation-specific logic beyond normal state event replication
|
||||||
1206
docs/plans/2026-03-17-space-permission-cascading.md
Normal file
1206
docs/plans/2026-03-17-space-permission-cascading.md
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -6,10 +6,10 @@
|
||||||
"message": "Welcome to Continuwuity! Important announcements about the project will appear here."
|
"message": "Welcome to Continuwuity! Important announcements about the project will appear here."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": 9,
|
"id": 10,
|
||||||
"mention_room": false,
|
"mention_room": false,
|
||||||
"date": "2026-02-09",
|
"date": "2026-03-03",
|
||||||
"message": "Yesterday we released [v0.5.4](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.4). Bugfixes, performance improvements and more moderation features! There's also a security fix, so please update as soon as possible. Don't forget to join [our announcements channel](https://matrix.to/#/!jIdNjSM5X-V5JVx2h2kAhUZIIQ08GyzPL55NFZAH1vM/%2489TY9CqRg4-ff1MGo3Ulc5r5X4pakfdzT-99RD8Docc?via=ellis.link&via=explodie.org&via=matrix.org) to get important information sooner <3 "
|
"message": "We've just released [v0.5.6](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.6), which contains a few security improvements - plus significant reliability and performance improvements. Please update as soon as possible. \n\nWe released [v0.5.5](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.5) two weeks ago, but it skipped your admin room straight to [our announcements channel](https://matrix.to/#/!jIdNjSM5X-V5JVx2h2kAhUZIIQ08GyzPL55NFZAH1vM?via=ellis.link&via=gingershaped.computer&via=matrix.org). Make sure you're there to get important information as soon as we announce it! [Our space](https://matrix.to/#/!8cR4g-i9ucof69E4JHNg9LbPVkGprHb3SzcrGBDDJgk?via=continuwuity.org&via=ellis.link&via=matrix.org) has also gained a bunch of new and interesting rooms - be there or be square."
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1 +1 @@
|
||||||
{"m.homeserver":{"base_url": "https://matrix.continuwuity.org"},"org.matrix.msc3575.proxy":{"url": "https://matrix.continuwuity.org"},"org.matrix.msc4143.rtc_foci":[{"type":"livekit","livekit_service_url":"https://livekit.ellis.link"}]}
|
{"m.homeserver":{"base_url": "https://matrix.continuwuity.org"},"org.matrix.msc4143.rtc_foci":[{"type":"livekit","livekit_service_url":"https://livekit.ellis.link"}]}
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,11 @@
|
||||||
"name": "config",
|
"name": "config",
|
||||||
"label": "Configuration"
|
"label": "Configuration"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"type": "file",
|
||||||
|
"name": "environment-variables",
|
||||||
|
"label": "Environment Variables"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"type": "file",
|
"type": "file",
|
||||||
"name": "admin",
|
"name": "admin",
|
||||||
|
|
|
||||||
|
|
@ -27,7 +27,7 @@ default.
|
||||||
* Delete all remote and local media from 3 days ago, up until now:
|
* Delete all remote and local media from 3 days ago, up until now:
|
||||||
|
|
||||||
`!admin media delete-past-remote-media -a 3d
|
`!admin media delete-past-remote-media -a 3d
|
||||||
-yes-i-want-to-delete-local-media`
|
--yes-i-want-to-delete-local-media`
|
||||||
|
|
||||||
## `!admin media delete-all-from-user`
|
## `!admin media delete-all-from-user`
|
||||||
|
|
||||||
|
|
@ -36,3 +36,7 @@ Deletes all the local media from a local user on our server. This will always ig
|
||||||
## `!admin media delete-all-from-server`
|
## `!admin media delete-all-from-server`
|
||||||
|
|
||||||
Deletes all remote media from the specified remote server. This will always ignore errors by default
|
Deletes all remote media from the specified remote server. This will always ignore errors by default
|
||||||
|
|
||||||
|
## `!admin media delete-url-preview`
|
||||||
|
|
||||||
|
Deletes a cached URL preview, forcing it to be re-fetched. Use --all to purge all cached URL previews
|
||||||
|
|
|
||||||
281
docs/reference/environment-variables.mdx
Normal file
281
docs/reference/environment-variables.mdx
Normal file
|
|
@ -0,0 +1,281 @@
|
||||||
|
# Environment Variables
|
||||||
|
|
||||||
|
Continuwuity can be configured entirely through environment variables, making it
|
||||||
|
ideal for containerised deployments and infrastructure-as-code scenarios.
|
||||||
|
|
||||||
|
This is a convenience reference and may not be exhaustive. The
|
||||||
|
[Configuration Reference](./config.mdx) is the primary source for all
|
||||||
|
configuration options.
|
||||||
|
|
||||||
|
## Prefix System
|
||||||
|
|
||||||
|
Continuwuity supports three environment variable prefixes for backwards
|
||||||
|
compatibility:
|
||||||
|
|
||||||
|
- `CONTINUWUITY_*` (current, recommended)
|
||||||
|
- `CONDUWUIT_*` (compatibility)
|
||||||
|
- `CONDUIT_*` (legacy)
|
||||||
|
|
||||||
|
All three prefixes work identically. Use double underscores (`__`) to represent
|
||||||
|
nested configuration sections from the TOML config.
|
||||||
|
|
||||||
|
**Examples:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Simple top-level config
|
||||||
|
CONTINUWUITY_SERVER_NAME="matrix.example.com"
|
||||||
|
CONTINUWUITY_PORT="8008"
|
||||||
|
|
||||||
|
# Nested config sections use double underscores
|
||||||
|
# This maps to [database] section in TOML
|
||||||
|
CONTINUWUITY_DATABASE__PATH="/var/lib/continuwuity"
|
||||||
|
|
||||||
|
# This maps to [tls] section in TOML
|
||||||
|
CONTINUWUITY_TLS__CERTS="/path/to/cert.pem"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Configuration File Override
|
||||||
|
|
||||||
|
You can specify a custom configuration file path:
|
||||||
|
|
||||||
|
- `CONTINUWUITY_CONFIG` - Path to continuwuity.toml (current)
|
||||||
|
- `CONDUWUIT_CONFIG` - Path to config file (compatibility)
|
||||||
|
- `CONDUIT_CONFIG` - Path to config file (legacy)
|
||||||
|
|
||||||
|
## Essential Variables
|
||||||
|
|
||||||
|
These are the minimum variables needed for a working deployment:
|
||||||
|
|
||||||
|
| Variable | Description | Default |
|
||||||
|
| ---------------------------- | ---------------------------------- | ---------------------- |
|
||||||
|
| `CONTINUWUITY_SERVER_NAME` | Your Matrix server's domain name | Required |
|
||||||
|
| `CONTINUWUITY_DATABASE_PATH` | Path to RocksDB database directory | `/var/lib/conduwuit` |
|
||||||
|
| `CONTINUWUITY_ADDRESS` | IP address to bind to | `["127.0.0.1", "::1"]` |
|
||||||
|
| `CONTINUWUITY_PORT` | Port to listen on | `8008` |
|
||||||
|
|
||||||
|
## Network Configuration
|
||||||
|
|
||||||
|
| Variable | Description | Default |
|
||||||
|
| -------------------------------- | ----------------------------------------------- | ---------------------- |
|
||||||
|
| `CONTINUWUITY_ADDRESS` | Bind address (use `0.0.0.0` for all interfaces) | `["127.0.0.1", "::1"]` |
|
||||||
|
| `CONTINUWUITY_PORT` | HTTP port | `8008` |
|
||||||
|
| `CONTINUWUITY_UNIX_SOCKET_PATH` | UNIX socket path (alternative to TCP) | - |
|
||||||
|
| `CONTINUWUITY_UNIX_SOCKET_PERMS` | Socket permissions (octal) | `660` |
|
||||||
|
|
||||||
|
## Database Configuration
|
||||||
|
|
||||||
|
| Variable | Description | Default |
|
||||||
|
| ------------------------------------------ | --------------------------- | -------------------- |
|
||||||
|
| `CONTINUWUITY_DATABASE_PATH` | RocksDB data directory | `/var/lib/conduwuit` |
|
||||||
|
| `CONTINUWUITY_DATABASE_BACKUP_PATH` | Backup directory | - |
|
||||||
|
| `CONTINUWUITY_DATABASE_BACKUPS_TO_KEEP` | Number of backups to retain | `1` |
|
||||||
|
| `CONTINUWUITY_DB_CACHE_CAPACITY_MB` | Database read cache (MB) | - |
|
||||||
|
| `CONTINUWUITY_DB_WRITE_BUFFER_CAPACITY_MB` | Write cache (MB) | - |
|
||||||
|
|
||||||
|
## Cache Configuration
|
||||||
|
|
||||||
|
| Variable | Description |
|
||||||
|
| ---------------------------------------- | ------------------------ |
|
||||||
|
| `CONTINUWUITY_CACHE_CAPACITY_MODIFIER` | LRU cache multiplier |
|
||||||
|
| `CONTINUWUITY_PDU_CACHE_CAPACITY` | PDU cache entries |
|
||||||
|
| `CONTINUWUITY_AUTH_CHAIN_CACHE_CAPACITY` | Auth chain cache entries |
|
||||||
|
|
||||||
|
## DNS Configuration
|
||||||
|
|
||||||
|
Configure DNS resolution behaviour for federation and external requests.
|
||||||
|
|
||||||
|
| Variable | Description | Default |
|
||||||
|
| ------------------------------------ | ---------------------------- | -------- |
|
||||||
|
| `CONTINUWUITY_DNS_CACHE_ENTRIES` | Max DNS cache entries | `32768` |
|
||||||
|
| `CONTINUWUITY_DNS_MIN_TTL` | Minimum cache TTL (seconds) | `10800` |
|
||||||
|
| `CONTINUWUITY_DNS_MIN_TTL_NXDOMAIN` | NXDOMAIN cache TTL (seconds) | `259200` |
|
||||||
|
| `CONTINUWUITY_DNS_ATTEMPTS` | Retry attempts | - |
|
||||||
|
| `CONTINUWUITY_DNS_TIMEOUT` | Query timeout (seconds) | - |
|
||||||
|
| `CONTINUWUITY_DNS_TCP_FALLBACK` | Allow TCP fallback | - |
|
||||||
|
| `CONTINUWUITY_QUERY_ALL_NAMESERVERS` | Query all nameservers | - |
|
||||||
|
| `CONTINUWUITY_QUERY_OVER_TCP_ONLY` | TCP-only queries | - |
|
||||||
|
|
||||||
|
## Request Configuration
|
||||||
|
|
||||||
|
| Variable | Description |
|
||||||
|
| ------------------------------------ | ----------------------------- |
|
||||||
|
| `CONTINUWUITY_MAX_REQUEST_SIZE` | Max HTTP request size (bytes) |
|
||||||
|
| `CONTINUWUITY_REQUEST_CONN_TIMEOUT` | Connection timeout (seconds) |
|
||||||
|
| `CONTINUWUITY_REQUEST_TIMEOUT` | Overall request timeout |
|
||||||
|
| `CONTINUWUITY_REQUEST_TOTAL_TIMEOUT` | Total timeout |
|
||||||
|
| `CONTINUWUITY_REQUEST_IDLE_TIMEOUT` | Idle timeout |
|
||||||
|
| `CONTINUWUITY_REQUEST_IDLE_PER_HOST` | Idle connections per host |
|
||||||
|
|
||||||
|
## Federation Configuration
|
||||||
|
|
||||||
|
Control how your server federates with other Matrix servers.
|
||||||
|
|
||||||
|
| Variable | Description | Default |
|
||||||
|
| ---------------------------------------------- | ----------------------------- | ------- |
|
||||||
|
| `CONTINUWUITY_ALLOW_FEDERATION` | Enable federation | `true` |
|
||||||
|
| `CONTINUWUITY_FEDERATION_LOOPBACK` | Allow loopback federation | - |
|
||||||
|
| `CONTINUWUITY_FEDERATION_CONN_TIMEOUT` | Connection timeout | - |
|
||||||
|
| `CONTINUWUITY_FEDERATION_TIMEOUT` | Request timeout | - |
|
||||||
|
| `CONTINUWUITY_FEDERATION_IDLE_TIMEOUT` | Idle timeout | - |
|
||||||
|
| `CONTINUWUITY_FEDERATION_IDLE_PER_HOST` | Idle connections per host | - |
|
||||||
|
| `CONTINUWUITY_TRUSTED_SERVERS` | JSON array of trusted servers | - |
|
||||||
|
| `CONTINUWUITY_QUERY_TRUSTED_KEY_SERVERS_FIRST` | Query trusted first | - |
|
||||||
|
| `CONTINUWUITY_ONLY_QUERY_TRUSTED_KEY_SERVERS` | Only query trusted | - |
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Trust matrix.org for key verification
|
||||||
|
CONTINUWUITY_TRUSTED_SERVERS='["matrix.org"]'
|
||||||
|
```
|
||||||
|
|
||||||
|
## Registration & User Configuration
|
||||||
|
|
||||||
|
Control user registration and account creation behaviour.
|
||||||
|
|
||||||
|
| Variable | Description | Default |
|
||||||
|
| ------------------------------------------ | --------------------- | ------- |
|
||||||
|
| `CONTINUWUITY_ALLOW_REGISTRATION` | Enable registration | `true` |
|
||||||
|
| `CONTINUWUITY_REGISTRATION_TOKEN` | Token requirement | - |
|
||||||
|
| `CONTINUWUITY_SUSPEND_ON_REGISTER` | Suspend new accounts | - |
|
||||||
|
| `CONTINUWUITY_NEW_USER_DISPLAYNAME_SUFFIX` | Display name suffix | 🏳️⚧️ |
|
||||||
|
| `CONTINUWUITY_RECAPTCHA_SITE_KEY` | reCAPTCHA site key | - |
|
||||||
|
| `CONTINUWUITY_RECAPTCHA_PRIVATE_SITE_KEY` | reCAPTCHA private key | - |
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Disable open registration
|
||||||
|
CONTINUWUITY_ALLOW_REGISTRATION="false"
|
||||||
|
|
||||||
|
# Require a registration token
|
||||||
|
CONTINUWUITY_REGISTRATION_TOKEN="your_secret_token_here"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Feature Configuration
|
||||||
|
|
||||||
|
| Variable | Description | Default |
|
||||||
|
| ---------------------------------------------------------- | -------------------------- | ------- |
|
||||||
|
| `CONTINUWUITY_ALLOW_ENCRYPTION` | Enable E2EE | `true` |
|
||||||
|
| `CONTINUWUITY_ALLOW_ROOM_CREATION` | Enable room creation | - |
|
||||||
|
| `CONTINUWUITY_ALLOW_UNSTABLE_ROOM_VERSIONS` | Allow unstable versions | - |
|
||||||
|
| `CONTINUWUITY_DEFAULT_ROOM_VERSION` | Default room version | `v11` |
|
||||||
|
| `CONTINUWUITY_REQUIRE_AUTH_FOR_PROFILE_REQUESTS` | Auth for profiles | - |
|
||||||
|
| `CONTINUWUITY_ALLOW_PUBLIC_ROOM_DIRECTORY_OVER_FEDERATION` | Federate directory | - |
|
||||||
|
| `CONTINUWUITY_ALLOW_PUBLIC_ROOM_DIRECTORY_WITHOUT_AUTH` | Unauth directory | - |
|
||||||
|
| `CONTINUWUITY_ALLOW_DEVICE_NAME_FEDERATION` | Device names in federation | - |
|
||||||
|
|
||||||
|
## TLS Configuration
|
||||||
|
|
||||||
|
Built-in TLS support is primarily for testing. **For production deployments,
|
||||||
|
especially when federating on the internet, use a reverse proxy** (Traefik,
|
||||||
|
Caddy, nginx) to handle TLS termination.
|
||||||
|
|
||||||
|
| Variable | Description |
|
||||||
|
| --------------------------------- | ------------------------- |
|
||||||
|
| `CONTINUWUITY_TLS__CERTS` | TLS certificate file path |
|
||||||
|
| `CONTINUWUITY_TLS__KEY` | TLS private key path |
|
||||||
|
| `CONTINUWUITY_TLS__DUAL_PROTOCOL` | Support TLS 1.2 + 1.3 |
|
||||||
|
|
||||||
|
**Example (testing only):**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
CONTINUWUITY_TLS__CERTS="/etc/letsencrypt/live/matrix.example.com/fullchain.pem"
|
||||||
|
CONTINUWUITY_TLS__KEY="/etc/letsencrypt/live/matrix.example.com/privkey.pem"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Logging Configuration
|
||||||
|
|
||||||
|
Control log output format and verbosity.
|
||||||
|
|
||||||
|
| Variable | Description | Default |
|
||||||
|
| ------------------------------ | ------------------ | ------- |
|
||||||
|
| `CONTINUWUITY_LOG` | Log filter level | - |
|
||||||
|
| `CONTINUWUITY_LOG_COLORS` | ANSI colours | `true` |
|
||||||
|
| `CONTINUWUITY_LOG_SPAN_EVENTS` | Log span events | `none` |
|
||||||
|
| `CONTINUWUITY_LOG_THREAD_IDS` | Include thread IDs | - |
|
||||||
|
|
||||||
|
**Examples:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Set log level to info
|
||||||
|
CONTINUWUITY_LOG="info"
|
||||||
|
|
||||||
|
# Enable debug logging for specific modules
|
||||||
|
CONTINUWUITY_LOG="warn,continuwuity::api=debug"
|
||||||
|
|
||||||
|
# Disable colours for log aggregation
|
||||||
|
CONTINUWUITY_LOG_COLORS="false"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Observability Configuration
|
||||||
|
|
||||||
|
| Variable | Description |
|
||||||
|
| ---------------------------------------- | --------------------- |
|
||||||
|
| `CONTINUWUITY_ALLOW_OTLP` | Enable OpenTelemetry |
|
||||||
|
| `CONTINUWUITY_OTLP_FILTER` | OTLP filter level |
|
||||||
|
| `CONTINUWUITY_OTLP_PROTOCOL` | Protocol (http/grpc) |
|
||||||
|
| `CONTINUWUITY_TRACING_FLAME` | Enable flame graphs |
|
||||||
|
| `CONTINUWUITY_TRACING_FLAME_FILTER` | Flame graph filter |
|
||||||
|
| `CONTINUWUITY_TRACING_FLAME_OUTPUT_PATH` | Output directory |
|
||||||
|
| `CONTINUWUITY_SENTRY` | Enable Sentry |
|
||||||
|
| `CONTINUWUITY_SENTRY_ENDPOINT` | Sentry DSN |
|
||||||
|
| `CONTINUWUITY_SENTRY_SEND_SERVER_NAME` | Include server name |
|
||||||
|
| `CONTINUWUITY_SENTRY_TRACES_SAMPLE_RATE` | Sample rate (0.0-1.0) |
|
||||||
|
|
||||||
|
## Admin Configuration
|
||||||
|
|
||||||
|
Configure admin users and automated command execution.
|
||||||
|
|
||||||
|
| Variable | Description | Default |
|
||||||
|
| ------------------------------------------ | -------------------------------- | ----------------- |
|
||||||
|
| `CONTINUWUITY_ADMINS_LIST` | JSON array of admin user IDs | - |
|
||||||
|
| `CONTINUWUITY_ADMINS_FROM_ROOM` | Derive admins from room | - |
|
||||||
|
| `CONTINUWUITY_ADMIN_ESCAPE_COMMANDS` | Allow `\` prefix in public rooms | - |
|
||||||
|
| `CONTINUWUITY_ADMIN_CONSOLE_AUTOMATIC` | Auto-activate console | - |
|
||||||
|
| `CONTINUWUITY_ADMIN_EXECUTE` | JSON array of startup commands | - |
|
||||||
|
| `CONTINUWUITY_ADMIN_EXECUTE_ERRORS_IGNORE` | Ignore command errors | - |
|
||||||
|
| `CONTINUWUITY_ADMIN_SIGNAL_EXECUTE` | Commands on SIGUSR2 | - |
|
||||||
|
| `CONTINUWUITY_ADMIN_ROOM_TAG` | Admin room tag | `m.server_notice` |
|
||||||
|
|
||||||
|
**Examples:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create admin user on startup
|
||||||
|
CONTINUWUITY_ADMIN_EXECUTE='["users create-user admin", "users make-user-admin admin"]'
|
||||||
|
|
||||||
|
# Specify admin users directly
|
||||||
|
CONTINUWUITY_ADMINS_LIST='["@alice:example.com", "@bob:example.com"]'
|
||||||
|
```
|
||||||
|
|
||||||
|
## Media & URL Preview Configuration
|
||||||
|
|
||||||
|
| Variable | Description |
|
||||||
|
| ---------------------------------------------------- | ------------------ |
|
||||||
|
| `CONTINUWUITY_URL_PREVIEW_BOUND_INTERFACE` | Bind interface |
|
||||||
|
| `CONTINUWUITY_URL_PREVIEW_DOMAIN_CONTAINS_ALLOWLIST` | Domain allowlist |
|
||||||
|
| `CONTINUWUITY_URL_PREVIEW_DOMAIN_EXPLICIT_ALLOWLIST` | Explicit allowlist |
|
||||||
|
| `CONTINUWUITY_URL_PREVIEW_DOMAIN_EXPLICIT_DENYLIST` | Explicit denylist |
|
||||||
|
| `CONTINUWUITY_URL_PREVIEW_MAX_SPIDER_SIZE` | Max fetch size |
|
||||||
|
| `CONTINUWUITY_URL_PREVIEW_TIMEOUT` | Fetch timeout |
|
||||||
|
| `CONTINUWUITY_IP_RANGE_DENYLIST` | IP range denylist |
|
||||||
|
|
||||||
|
## Tokio Runtime Configuration
|
||||||
|
|
||||||
|
These can be set as environment variables or CLI arguments:
|
||||||
|
|
||||||
|
| Variable | Description |
|
||||||
|
| ----------------------------------------- | -------------------------- |
|
||||||
|
| `TOKIO_WORKER_THREADS` | Worker thread count |
|
||||||
|
| `TOKIO_GLOBAL_QUEUE_INTERVAL` | Global queue interval |
|
||||||
|
| `TOKIO_EVENT_INTERVAL` | Event interval |
|
||||||
|
| `TOKIO_MAX_IO_EVENTS_PER_TICK` | Max I/O events per tick |
|
||||||
|
| `CONTINUWUITY_RUNTIME_HISTOGRAM_INTERVAL` | Histogram bucket size (μs) |
|
||||||
|
| `CONTINUWUITY_RUNTIME_HISTOGRAM_BUCKETS` | Bucket count |
|
||||||
|
| `CONTINUWUITY_RUNTIME_WORKER_AFFINITY` | Enable worker affinity |
|
||||||
|
|
||||||
|
## See Also
|
||||||
|
|
||||||
|
- [Configuration Reference](./config.mdx) - Complete TOML configuration
|
||||||
|
documentation
|
||||||
|
- [Admin Commands](./admin/) - Admin command reference
|
||||||
|
|
@ -1,12 +1,37 @@
|
||||||
# Troubleshooting Continuwuity
|
# Troubleshooting Continuwuity
|
||||||
|
|
||||||
> **Docker users ⚠️**
|
:::warning{title="Docker users:"}
|
||||||
>
|
Docker can be difficult to use and debug. It's common for Docker
|
||||||
> Docker can be difficult to use and debug. It's common for Docker
|
misconfigurations to cause issues, particularly with networking and permissions.
|
||||||
> misconfigurations to cause issues, particularly with networking and permissions.
|
Please check that your issues are not due to problems with your Docker setup.
|
||||||
> Please check that your issues are not due to problems with your Docker setup.
|
:::
|
||||||
|
|
||||||
## Continuwuity and Matrix issues
|
## Continuwuity issues
|
||||||
|
|
||||||
|
### Slow joins to rooms
|
||||||
|
|
||||||
|
Some slowness is to be expected if you're the first person on your homserver to join a room (which will
|
||||||
|
always be the case for single-user homeservers). In this situation, your homeserver has to verify the signatures of
|
||||||
|
all of the state events sent by other servers before your join. To make this process as fast as possible, make sure you have
|
||||||
|
multiple fast, trusted servers listed in `trusted_servers` in your configuration, and ensure
|
||||||
|
`query_trusted_key_servers_first_on_join` is set to true (the default).
|
||||||
|
If you need suggestions for trusted servers, ask in the Continuwuity main room.
|
||||||
|
|
||||||
|
However, _very_ slow joins, especially to rooms with only a few users in them or rooms created by another user
|
||||||
|
on your homeserver, may be caused by [issue !779](https://forgejo.ellis.link/continuwuation/continuwuity/issues/779),
|
||||||
|
which is a longstanding bug with synchronizing room joins to clients. In this situation, you did succeed in joining the room, but
|
||||||
|
the bug caused your homeserver to forget to tell your client. **To fix this, clear your client's cache.** Both Element and Cinny
|
||||||
|
have a button to clear their cache in the "About" section of their settings.
|
||||||
|
|
||||||
|
### Configuration not working as expected
|
||||||
|
|
||||||
|
Sometimes you can make a mistake in your configuration that
|
||||||
|
means things don't get passed to Continuwuity correctly.
|
||||||
|
This is particularly easy to do with environment variables.
|
||||||
|
To check what configuration Continuwuity actually sees, you can
|
||||||
|
use the `!admin server show-config` command in your admin room.
|
||||||
|
Beware that this prints out any secrets in your configuration,
|
||||||
|
so you might want to delete the result afterwards!
|
||||||
|
|
||||||
### Lost access to admin room
|
### Lost access to admin room
|
||||||
|
|
||||||
|
|
@ -18,17 +43,7 @@ argument once to invite yourslf to the admin room on startup
|
||||||
- Or specify the `emergency_password` config option to allow you to temporarily
|
- Or specify the `emergency_password` config option to allow you to temporarily
|
||||||
log into the server account (`@conduit`) from a web client
|
log into the server account (`@conduit`) from a web client
|
||||||
|
|
||||||
## General potential issues
|
## DNS issues
|
||||||
|
|
||||||
### Configuration not working as expected
|
|
||||||
|
|
||||||
Sometimes you can make a mistake in your configuration that
|
|
||||||
means things don't get passed to Continuwuity correctly.
|
|
||||||
This is particularly easy to do with environment variables.
|
|
||||||
To check what configuration Continuwuity actually sees, you can
|
|
||||||
use the `!admin server show-config` command in your admin room.
|
|
||||||
Beware that this prints out any secrets in your configuration,
|
|
||||||
so you might want to delete the result afterwards!
|
|
||||||
|
|
||||||
### Potential DNS issues when using Docker
|
### Potential DNS issues when using Docker
|
||||||
|
|
||||||
|
|
|
||||||
54
flake.lock
generated
54
flake.lock
generated
|
|
@ -3,11 +3,11 @@
|
||||||
"advisory-db": {
|
"advisory-db": {
|
||||||
"flake": false,
|
"flake": false,
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1766324728,
|
"lastModified": 1773786698,
|
||||||
"narHash": "sha256-9C+WyE5U3y5w4WQXxmb0ylRyMMsPyzxielWXSHrcDpE=",
|
"narHash": "sha256-o/J7ZculgwSs1L4H4UFlFZENOXTJzq1X0n71x6oNNvY=",
|
||||||
"owner": "rustsec",
|
"owner": "rustsec",
|
||||||
"repo": "advisory-db",
|
"repo": "advisory-db",
|
||||||
"rev": "c88b88c62bda077be8aa621d4e89d8701e39cb5d",
|
"rev": "99e9de91bb8b61f06ef234ff84e11f758ecd5384",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
|
@ -18,11 +18,11 @@
|
||||||
},
|
},
|
||||||
"crane": {
|
"crane": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1766194365,
|
"lastModified": 1773189535,
|
||||||
"narHash": "sha256-4AFsUZ0kl6MXSm4BaQgItD0VGlEKR3iq7gIaL7TjBvc=",
|
"narHash": "sha256-E1G/Or6MWeP+L6mpQ0iTFLpzSzlpGrITfU2220Gq47g=",
|
||||||
"owner": "ipetkov",
|
"owner": "ipetkov",
|
||||||
"repo": "crane",
|
"repo": "crane",
|
||||||
"rev": "7d8ec2c71771937ab99790b45e6d9b93d15d9379",
|
"rev": "6fa2fb4cf4a89ba49fc9dd5a3eb6cde99d388269",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
|
@ -39,11 +39,11 @@
|
||||||
"rust-analyzer-src": "rust-analyzer-src"
|
"rust-analyzer-src": "rust-analyzer-src"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1766299592,
|
"lastModified": 1773732206,
|
||||||
"narHash": "sha256-7u+q5hexu2eAxL2VjhskHvaUKg+GexmelIR2ve9Nbb4=",
|
"narHash": "sha256-HKibxaUXyWd4Hs+ZUnwo6XslvaFqFqJh66uL9tphU4Q=",
|
||||||
"owner": "nix-community",
|
"owner": "nix-community",
|
||||||
"repo": "fenix",
|
"repo": "fenix",
|
||||||
"rev": "381579dee168d5ced412e2990e9637ecc7cf1c5d",
|
"rev": "0aa13c1b54063a8d8679b28a5cd357ba98f4a56b",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
|
@ -55,11 +55,11 @@
|
||||||
"flake-compat": {
|
"flake-compat": {
|
||||||
"flake": false,
|
"flake": false,
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1765121682,
|
"lastModified": 1767039857,
|
||||||
"narHash": "sha256-4VBOP18BFeiPkyhy9o4ssBNQEvfvv1kXkasAYd0+rrA=",
|
"narHash": "sha256-vNpUSpF5Nuw8xvDLj2KCwwksIbjua2LZCqhV1LNRDns=",
|
||||||
"owner": "edolstra",
|
"owner": "edolstra",
|
||||||
"repo": "flake-compat",
|
"repo": "flake-compat",
|
||||||
"rev": "65f23138d8d09a92e30f1e5c87611b23ef451bf3",
|
"rev": "5edf11c44bc78a0d334f6334cdaf7d60d732daab",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
|
@ -74,11 +74,11 @@
|
||||||
"nixpkgs-lib": "nixpkgs-lib"
|
"nixpkgs-lib": "nixpkgs-lib"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1765835352,
|
"lastModified": 1772408722,
|
||||||
"narHash": "sha256-XswHlK/Qtjasvhd1nOa1e8MgZ8GS//jBoTqWtrS1Giw=",
|
"narHash": "sha256-rHuJtdcOjK7rAHpHphUb1iCvgkU3GpfvicLMwwnfMT0=",
|
||||||
"owner": "hercules-ci",
|
"owner": "hercules-ci",
|
||||||
"repo": "flake-parts",
|
"repo": "flake-parts",
|
||||||
"rev": "a34fae9c08a15ad73f295041fec82323541400a9",
|
"rev": "f20dc5d9b8027381c474144ecabc9034d6a839a3",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
|
@ -89,11 +89,11 @@
|
||||||
},
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1766070988,
|
"lastModified": 1773734432,
|
||||||
"narHash": "sha256-G/WVghka6c4bAzMhTwT2vjLccg/awmHkdKSd2JrycLc=",
|
"narHash": "sha256-IF5ppUWh6gHGHYDbtVUyhwy/i7D261P7fWD1bPefOsw=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "c6245e83d836d0433170a16eb185cefe0572f8b8",
|
"rev": "cda48547b432e8d3b18b4180ba07473762ec8558",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
|
@ -105,11 +105,11 @@
|
||||||
},
|
},
|
||||||
"nixpkgs-lib": {
|
"nixpkgs-lib": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1765674936,
|
"lastModified": 1772328832,
|
||||||
"narHash": "sha256-k00uTP4JNfmejrCLJOwdObYC9jHRrr/5M/a/8L2EIdo=",
|
"narHash": "sha256-e+/T/pmEkLP6BHhYjx6GmwP5ivonQQn0bJdH9YrRB+Q=",
|
||||||
"owner": "nix-community",
|
"owner": "nix-community",
|
||||||
"repo": "nixpkgs.lib",
|
"repo": "nixpkgs.lib",
|
||||||
"rev": "2075416fcb47225d9b68ac469a5c4801a9c4dd85",
|
"rev": "c185c7a5e5dd8f9add5b2f8ebeff00888b070742",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
|
@ -132,11 +132,11 @@
|
||||||
"rust-analyzer-src": {
|
"rust-analyzer-src": {
|
||||||
"flake": false,
|
"flake": false,
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1766253897,
|
"lastModified": 1773697963,
|
||||||
"narHash": "sha256-ChK07B1aOlJ4QzWXpJo+y8IGAxp1V9yQ2YloJ+RgHRw=",
|
"narHash": "sha256-xdKI77It9PM6eNrCcDZsnP4SKulZwk8VkDgBRVMnCb8=",
|
||||||
"owner": "rust-lang",
|
"owner": "rust-lang",
|
||||||
"repo": "rust-analyzer",
|
"repo": "rust-analyzer",
|
||||||
"rev": "765b7bdb432b3740f2d564afccfae831d5a972e4",
|
"rev": "2993637174252ff60a582fd1f55b9ab52c39db6d",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
|
@ -153,11 +153,11 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1766000401,
|
"lastModified": 1773297127,
|
||||||
"narHash": "sha256-+cqN4PJz9y0JQXfAK5J1drd0U05D5fcAGhzhfVrDlsI=",
|
"narHash": "sha256-6E/yhXP7Oy/NbXtf1ktzmU8SdVqJQ09HC/48ebEGBpk=",
|
||||||
"owner": "numtide",
|
"owner": "numtide",
|
||||||
"repo": "treefmt-nix",
|
"repo": "treefmt-nix",
|
||||||
"rev": "42d96e75aa56a3f70cab7e7dc4a32868db28e8fd",
|
"rev": "71b125cd05fbfd78cab3e070b73544abe24c5016",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
|
|
||||||
|
|
@ -12,7 +12,6 @@
|
||||||
|
|
||||||
rocksdbAllFeatures = self'.packages.rocksdb.override {
|
rocksdbAllFeatures = self'.packages.rocksdb.override {
|
||||||
enableJemalloc = true;
|
enableJemalloc = true;
|
||||||
enableLiburing = true;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
commonAttrs = (uwulib.build.commonAttrs { }) // {
|
commonAttrs = (uwulib.build.commonAttrs { }) // {
|
||||||
|
|
|
||||||
|
|
@ -27,7 +27,6 @@
|
||||||
commonAttrsArgs.profile = "release";
|
commonAttrsArgs.profile = "release";
|
||||||
rocksdb = self'.packages.rocksdb.override {
|
rocksdb = self'.packages.rocksdb.override {
|
||||||
enableJemalloc = true;
|
enableJemalloc = true;
|
||||||
enableLiburing = true;
|
|
||||||
};
|
};
|
||||||
features = {
|
features = {
|
||||||
enabledFeatures = "all";
|
enabledFeatures = "all";
|
||||||
|
|
|
||||||
|
|
@ -7,7 +7,6 @@
|
||||||
rust-jemalloc-sys-unprefixed,
|
rust-jemalloc-sys-unprefixed,
|
||||||
|
|
||||||
enableJemalloc ? false,
|
enableJemalloc ? false,
|
||||||
enableLiburing ? false,
|
|
||||||
|
|
||||||
fetchFromGitea,
|
fetchFromGitea,
|
||||||
|
|
||||||
|
|
@ -32,7 +31,7 @@ in
|
||||||
|
|
||||||
# for some reason enableLiburing in nixpkgs rocksdb is default true
|
# for some reason enableLiburing in nixpkgs rocksdb is default true
|
||||||
# which breaks Darwin entirely
|
# which breaks Darwin entirely
|
||||||
enableLiburing = enableLiburing && notDarwin;
|
enableLiburing = notDarwin;
|
||||||
}).overrideAttrs
|
}).overrideAttrs
|
||||||
(old: {
|
(old: {
|
||||||
src = fetchFromGitea {
|
src = fetchFromGitea {
|
||||||
|
|
@ -74,7 +73,7 @@ in
|
||||||
"USE_RTTI"
|
"USE_RTTI"
|
||||||
]);
|
]);
|
||||||
|
|
||||||
enableLiburing = enableLiburing && notDarwin;
|
enableLiburing = notDarwin;
|
||||||
|
|
||||||
# outputs has "tools" which we don't need or use
|
# outputs has "tools" which we don't need or use
|
||||||
outputs = [ "out" ];
|
outputs = [ "out" ];
|
||||||
|
|
|
||||||
|
|
@ -77,7 +77,12 @@ rec {
|
||||||
craneLib.buildDepsOnly (
|
craneLib.buildDepsOnly (
|
||||||
(commonAttrs commonAttrsArgs)
|
(commonAttrs commonAttrsArgs)
|
||||||
// {
|
// {
|
||||||
env = uwuenv.buildDepsOnlyEnv // (makeRocksDBEnv { inherit rocksdb; });
|
env = uwuenv.buildDepsOnlyEnv
|
||||||
|
// (makeRocksDBEnv { inherit rocksdb; })
|
||||||
|
// {
|
||||||
|
# required since we started using unstable reqwest apparently ... otherwise the all-features build will fail
|
||||||
|
RUSTFLAGS = "--cfg reqwest_unstable";
|
||||||
|
};
|
||||||
inherit (features) cargoExtraArgs;
|
inherit (features) cargoExtraArgs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -102,7 +107,13 @@ rec {
|
||||||
'';
|
'';
|
||||||
cargoArtifacts = deps;
|
cargoArtifacts = deps;
|
||||||
doCheck = true;
|
doCheck = true;
|
||||||
env = uwuenv.buildPackageEnv // rocksdbEnv;
|
env =
|
||||||
|
uwuenv.buildPackageEnv
|
||||||
|
// rocksdbEnv
|
||||||
|
// {
|
||||||
|
# required since we started using unstable reqwest apparently ... otherwise the all-features build will fail
|
||||||
|
RUSTFLAGS = "--cfg reqwest_unstable";
|
||||||
|
};
|
||||||
passthru.env = uwuenv.buildPackageEnv // rocksdbEnv;
|
passthru.env = uwuenv.buildPackageEnv // rocksdbEnv;
|
||||||
meta.mainProgram = crateInfo.pname;
|
meta.mainProgram = crateInfo.pname;
|
||||||
inherit (features) cargoExtraArgs;
|
inherit (features) cargoExtraArgs;
|
||||||
|
|
|
||||||
|
|
@ -11,7 +11,6 @@
|
||||||
uwulib = inputs.self.uwulib.init pkgs;
|
uwulib = inputs.self.uwulib.init pkgs;
|
||||||
rocksdbAllFeatures = self'.packages.rocksdb.override {
|
rocksdbAllFeatures = self'.packages.rocksdb.override {
|
||||||
enableJemalloc = true;
|
enableJemalloc = true;
|
||||||
enableLiburing = true;
|
|
||||||
};
|
};
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
|
|
|
||||||
618
package-lock.json
generated
618
package-lock.json
generated
File diff suppressed because it is too large
Load diff
|
|
@ -18,6 +18,7 @@ Environment="CONTINUWUITY_DATABASE_PATH=%S/conduwuit"
|
||||||
Environment="CONTINUWUITY_CONFIG_RELOAD_SIGNAL=true"
|
Environment="CONTINUWUITY_CONFIG_RELOAD_SIGNAL=true"
|
||||||
|
|
||||||
LoadCredential=conduwuit.toml:/etc/conduwuit/conduwuit.toml
|
LoadCredential=conduwuit.toml:/etc/conduwuit/conduwuit.toml
|
||||||
|
RefreshOnReload=yes
|
||||||
|
|
||||||
ExecStart=/usr/bin/conduwuit --config ${CREDENTIALS_DIRECTORY}/conduwuit.toml
|
ExecStart=/usr/bin/conduwuit --config ${CREDENTIALS_DIRECTORY}/conduwuit.toml
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
{
|
{
|
||||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
||||||
"extends": ["config:recommended", "replacements:all"],
|
"extends": ["config:recommended", "replacements:all", ":semanticCommitTypeAll(chore)"],
|
||||||
|
"dependencyDashboard": true,
|
||||||
"osvVulnerabilityAlerts": true,
|
"osvVulnerabilityAlerts": true,
|
||||||
"lockFileMaintenance": {
|
"lockFileMaintenance": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
|
|
@ -35,10 +36,18 @@
|
||||||
},
|
},
|
||||||
"packageRules": [
|
"packageRules": [
|
||||||
{
|
{
|
||||||
"description": "Batch patch-level Rust dependency updates",
|
"description": "Batch minor and patch Rust dependency updates",
|
||||||
|
"matchManagers": ["cargo"],
|
||||||
|
"matchUpdateTypes": ["minor", "patch"],
|
||||||
|
"matchCurrentVersion": ">=1.0.0",
|
||||||
|
"groupName": "rust-non-major"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Batch patch-level zerover Rust dependency updates",
|
||||||
"matchManagers": ["cargo"],
|
"matchManagers": ["cargo"],
|
||||||
"matchUpdateTypes": ["patch"],
|
"matchUpdateTypes": ["patch"],
|
||||||
"groupName": "rust-patch-updates"
|
"matchCurrentVersion": ">=0.1.0,<1.0.0",
|
||||||
|
"groupName": "rust-zerover-patch-updates"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "Limit concurrent Cargo PRs",
|
"description": "Limit concurrent Cargo PRs",
|
||||||
|
|
@ -57,12 +66,25 @@
|
||||||
"matchUpdateTypes": ["minor", "patch"],
|
"matchUpdateTypes": ["minor", "patch"],
|
||||||
"groupName": "github-actions-non-major"
|
"groupName": "github-actions-non-major"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"description": "Batch patch-level Node.js dependency updates",
|
||||||
|
"matchManagers": ["npm"],
|
||||||
|
"matchUpdateTypes": ["patch"],
|
||||||
|
"groupName": "node-patch-updates"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"description": "Pin forgejo artifact actions to prevent breaking changes",
|
"description": "Pin forgejo artifact actions to prevent breaking changes",
|
||||||
"matchManagers": ["github-actions"],
|
"matchManagers": ["github-actions"],
|
||||||
"matchPackageNames": ["forgejo/upload-artifact", "forgejo/download-artifact"],
|
"matchPackageNames": ["forgejo/upload-artifact", "forgejo/download-artifact"],
|
||||||
"enabled": false
|
"enabled": false
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"description": "Auto-merge crate-ci/typos minor updates",
|
||||||
|
"matchPackageNames": ["crate-ci/typos"],
|
||||||
|
"matchUpdateTypes": ["minor", "patch"],
|
||||||
|
"automerge": true,
|
||||||
|
"automergeStrategy": "fast-forward"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"description": "Auto-merge renovatebot docker image updates",
|
"description": "Auto-merge renovatebot docker image updates",
|
||||||
"matchDatasources": ["docker"],
|
"matchDatasources": ["docker"],
|
||||||
|
|
|
||||||
|
|
@ -11,6 +11,7 @@ use crate::{
|
||||||
query::{self, QueryCommand},
|
query::{self, QueryCommand},
|
||||||
room::{self, RoomCommand},
|
room::{self, RoomCommand},
|
||||||
server::{self, ServerCommand},
|
server::{self, ServerCommand},
|
||||||
|
space::{self, SpaceCommand},
|
||||||
token::{self, TokenCommand},
|
token::{self, TokenCommand},
|
||||||
user::{self, UserCommand},
|
user::{self, UserCommand},
|
||||||
};
|
};
|
||||||
|
|
@ -34,6 +35,10 @@ pub enum AdminCommand {
|
||||||
/// Commands for managing rooms
|
/// Commands for managing rooms
|
||||||
Rooms(RoomCommand),
|
Rooms(RoomCommand),
|
||||||
|
|
||||||
|
#[command(subcommand)]
|
||||||
|
/// Commands for managing space permissions
|
||||||
|
Spaces(SpaceCommand),
|
||||||
|
|
||||||
#[command(subcommand)]
|
#[command(subcommand)]
|
||||||
/// Commands for managing federation
|
/// Commands for managing federation
|
||||||
Federation(FederationCommand),
|
Federation(FederationCommand),
|
||||||
|
|
@ -81,6 +86,10 @@ pub(super) async fn process(command: AdminCommand, context: &Context<'_>) -> Res
|
||||||
token::process(command, context).await
|
token::process(command, context).await
|
||||||
},
|
},
|
||||||
| Rooms(command) => room::process(command, context).await,
|
| Rooms(command) => room::process(command, context).await,
|
||||||
|
| Spaces(command) => {
|
||||||
|
context.bail_restricted()?;
|
||||||
|
space::process(command, context).await
|
||||||
|
},
|
||||||
| Federation(command) => federation::process(command, context).await,
|
| Federation(command) => federation::process(command, context).await,
|
||||||
| Server(command) => server::process(command, context).await,
|
| Server(command) => server::process(command, context).await,
|
||||||
| Debug(command) => debug::process(command, context).await,
|
| Debug(command) => debug::process(command, context).await,
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
use std::fmt::Write;
|
use std::fmt::Write;
|
||||||
|
|
||||||
use conduwuit::{Err, Result};
|
use conduwuit::{Err, Result, utils::response::LimitReadExt};
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use ruma::{OwnedRoomId, OwnedServerName, OwnedUserId};
|
use ruma::{OwnedRoomId, OwnedServerName, OwnedUserId};
|
||||||
|
|
||||||
|
|
@ -30,12 +30,15 @@ pub(super) async fn incoming_federation(&self) -> Result {
|
||||||
.federation_handletime
|
.federation_handletime
|
||||||
.read();
|
.read();
|
||||||
|
|
||||||
let mut msg = format!("Handling {} incoming pdus:\n", map.len());
|
let mut msg = format!(
|
||||||
|
"Handling {} incoming PDUs across {} active transactions:\n",
|
||||||
|
map.len(),
|
||||||
|
self.services.transactions.txn_active_handle_count()
|
||||||
|
);
|
||||||
for (r, (e, i)) in map.iter() {
|
for (r, (e, i)) in map.iter() {
|
||||||
let elapsed = i.elapsed();
|
let elapsed = i.elapsed();
|
||||||
writeln!(msg, "{} {}: {}m{}s", r, e, elapsed.as_secs() / 60, elapsed.as_secs() % 60)?;
|
writeln!(msg, "{} {}: {}m{}s", r, e, elapsed.as_secs() / 60, elapsed.as_secs() % 60)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
msg
|
msg
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -52,7 +55,15 @@ pub(super) async fn fetch_support_well_known(&self, server_name: OwnedServerName
|
||||||
.send()
|
.send()
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let text = response.text().await?;
|
let text = response
|
||||||
|
.limit_read_text(
|
||||||
|
self.services
|
||||||
|
.config
|
||||||
|
.max_request_size
|
||||||
|
.try_into()
|
||||||
|
.expect("u64 fits into usize"),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
if text.is_empty() {
|
if text.is_empty() {
|
||||||
return Err!("Response text/body is empty.");
|
return Err!("Response text/body is empty.");
|
||||||
|
|
|
||||||
|
|
@ -29,7 +29,9 @@ pub(super) async fn delete(
|
||||||
.delete(&mxc.as_str().try_into()?)
|
.delete(&mxc.as_str().try_into()?)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
return Err!("Deleted the MXC from our database and on our filesystem.",);
|
return self
|
||||||
|
.write_str("Deleted the MXC from our database and on our filesystem.")
|
||||||
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(event_id) = event_id {
|
if let Some(event_id) = event_id {
|
||||||
|
|
@ -388,3 +390,19 @@ pub(super) async fn get_remote_thumbnail(
|
||||||
self.write_str(&format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```"))
|
self.write_str(&format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```"))
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[admin_command]
|
||||||
|
pub(super) async fn delete_url_preview(&self, url: Option<String>, all: bool) -> Result {
|
||||||
|
if all {
|
||||||
|
self.services.media.clear_url_previews().await;
|
||||||
|
|
||||||
|
return self.write_str("Deleted all cached URL previews.").await;
|
||||||
|
}
|
||||||
|
|
||||||
|
let url = url.expect("clap enforces url is required unless --all");
|
||||||
|
|
||||||
|
self.services.media.remove_url_preview(&url).await?;
|
||||||
|
|
||||||
|
self.write_str(&format!("Deleted cached URL preview for: {url}"))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -40,7 +40,7 @@ pub enum MediaCommand {
|
||||||
/// * Delete all remote and local media from 3 days ago, up until now:
|
/// * Delete all remote and local media from 3 days ago, up until now:
|
||||||
///
|
///
|
||||||
/// `!admin media delete-past-remote-media -a 3d
|
/// `!admin media delete-past-remote-media -a 3d
|
||||||
///-yes-i-want-to-delete-local-media`
|
///--yes-i-want-to-delete-local-media`
|
||||||
#[command(verbatim_doc_comment)]
|
#[command(verbatim_doc_comment)]
|
||||||
DeletePastRemoteMedia {
|
DeletePastRemoteMedia {
|
||||||
/// The relative time (e.g. 30s, 5m, 7d) from now within which to
|
/// The relative time (e.g. 30s, 5m, 7d) from now within which to
|
||||||
|
|
@ -108,4 +108,16 @@ pub enum MediaCommand {
|
||||||
#[arg(long, default_value("800"))]
|
#[arg(long, default_value("800"))]
|
||||||
height: u32,
|
height: u32,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
/// Deletes a cached URL preview, forcing it to be re-fetched.
|
||||||
|
/// Use --all to purge all cached URL previews.
|
||||||
|
DeleteUrlPreview {
|
||||||
|
/// The URL to clear from the saved preview data
|
||||||
|
#[arg(required_unless_present = "all")]
|
||||||
|
url: Option<String>,
|
||||||
|
|
||||||
|
/// Purge all cached URL previews
|
||||||
|
#[arg(long, conflicts_with = "url")]
|
||||||
|
all: bool,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -17,6 +17,7 @@ pub(crate) mod media;
|
||||||
pub(crate) mod query;
|
pub(crate) mod query;
|
||||||
pub(crate) mod room;
|
pub(crate) mod room;
|
||||||
pub(crate) mod server;
|
pub(crate) mod server;
|
||||||
|
pub(crate) mod space;
|
||||||
pub(crate) mod token;
|
pub(crate) mod token;
|
||||||
pub(crate) mod user;
|
pub(crate) mod user;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -209,7 +209,7 @@ pub(super) async fn compact(
|
||||||
let parallelism = parallelism.unwrap_or(1);
|
let parallelism = parallelism.unwrap_or(1);
|
||||||
let results = maps
|
let results = maps
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.try_stream()
|
.try_stream::<conduwuit::Error>()
|
||||||
.paralleln_and_then(runtime, parallelism, move |map| {
|
.paralleln_and_then(runtime, parallelism, move |map| {
|
||||||
map.compact_blocking(options.clone())?;
|
map.compact_blocking(options.clone())?;
|
||||||
Ok(map.name().to_owned())
|
Ok(map.name().to_owned())
|
||||||
|
|
|
||||||
|
|
@ -20,7 +20,17 @@ pub enum ResolverCommand {
|
||||||
name: Option<String>,
|
name: Option<String>,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// Flush a specific server from the resolver caches or everything
|
/// Flush a given server from the resolver caches or flush them completely
|
||||||
|
///
|
||||||
|
/// * Examples:
|
||||||
|
/// * Flush a specific server:
|
||||||
|
///
|
||||||
|
/// `!admin query resolver flush-cache matrix.example.com`
|
||||||
|
///
|
||||||
|
/// * Flush all resolver caches completely:
|
||||||
|
///
|
||||||
|
/// `!admin query resolver flush-cache --all`
|
||||||
|
#[command(verbatim_doc_comment)]
|
||||||
FlushCache {
|
FlushCache {
|
||||||
name: Option<OwnedServerName>,
|
name: Option<OwnedServerName>,
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -89,13 +89,7 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
|
||||||
locally, if not using get_alias_helper to fetch room ID remotely"
|
locally, if not using get_alias_helper to fetch room ID remotely"
|
||||||
);
|
);
|
||||||
|
|
||||||
match self
|
match self.services.rooms.alias.resolve_alias(room_alias).await {
|
||||||
.services
|
|
||||||
.rooms
|
|
||||||
.alias
|
|
||||||
.resolve_alias(room_alias, None)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
| Ok((room_id, servers)) => {
|
| Ok((room_id, servers)) => {
|
||||||
debug!(
|
debug!(
|
||||||
%room_id,
|
%room_id,
|
||||||
|
|
@ -235,7 +229,7 @@ async fn ban_list_of_rooms(&self) -> Result {
|
||||||
.services
|
.services
|
||||||
.rooms
|
.rooms
|
||||||
.alias
|
.alias
|
||||||
.resolve_alias(room_alias, None)
|
.resolve_alias(room_alias)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
| Ok((room_id, servers)) => {
|
| Ok((room_id, servers)) => {
|
||||||
|
|
@ -388,13 +382,7 @@ async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result {
|
||||||
room ID over federation"
|
room ID over federation"
|
||||||
);
|
);
|
||||||
|
|
||||||
match self
|
match self.services.rooms.alias.resolve_alias(room_alias).await {
|
||||||
.services
|
|
||||||
.rooms
|
|
||||||
.alias
|
|
||||||
.resolve_alias(room_alias, None)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
| Ok((room_id, servers)) => {
|
| Ok((room_id, servers)) => {
|
||||||
debug!(
|
debug!(
|
||||||
%room_id,
|
%room_id,
|
||||||
|
|
|
||||||
|
|
@ -86,7 +86,7 @@ pub(super) async fn list_backups(&self) -> Result {
|
||||||
.db
|
.db
|
||||||
.backup_list()?
|
.backup_list()?
|
||||||
.try_stream()
|
.try_stream()
|
||||||
.try_for_each(|result| write!(self, "{result}"))
|
.try_for_each(|result| writeln!(self, "{result}"))
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
15
src/admin/space/mod.rs
Normal file
15
src/admin/space/mod.rs
Normal file
|
|
@ -0,0 +1,15 @@
|
||||||
|
pub(super) mod roles;
|
||||||
|
|
||||||
|
use clap::Subcommand;
|
||||||
|
use conduwuit::Result;
|
||||||
|
|
||||||
|
use self::roles::SpaceRolesCommand;
|
||||||
|
use crate::admin_command_dispatch;
|
||||||
|
|
||||||
|
#[admin_command_dispatch]
|
||||||
|
#[derive(Debug, Subcommand)]
|
||||||
|
pub enum SpaceCommand {
|
||||||
|
#[command(subcommand)]
|
||||||
|
/// Manage space roles and permissions
|
||||||
|
Roles(SpaceRolesCommand),
|
||||||
|
}
|
||||||
632
src/admin/space/roles.rs
Normal file
632
src/admin/space/roles.rs
Normal file
|
|
@ -0,0 +1,632 @@
|
||||||
|
use std::fmt::Write;
|
||||||
|
|
||||||
|
use clap::Subcommand;
|
||||||
|
use conduwuit::{Err, Event, Result, matrix::pdu::PduBuilder};
|
||||||
|
use conduwuit_core::matrix::space_roles::{
|
||||||
|
RoleDefinition, SPACE_CASCADING_EVENT_TYPE, SPACE_ROLE_MEMBER_EVENT_TYPE,
|
||||||
|
SPACE_ROLE_ROOM_EVENT_TYPE, SPACE_ROLES_EVENT_TYPE, SpaceCascadingEventContent,
|
||||||
|
SpaceRoleMemberEventContent, SpaceRoleRoomEventContent, SpaceRolesEventContent,
|
||||||
|
};
|
||||||
|
use futures::StreamExt;
|
||||||
|
use ruma::{OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, events::StateEventType};
|
||||||
|
use serde_json::value::to_raw_value;
|
||||||
|
|
||||||
|
use crate::{admin_command, admin_command_dispatch};
|
||||||
|
|
||||||
|
fn roles_event_type() -> StateEventType {
|
||||||
|
StateEventType::from(SPACE_ROLES_EVENT_TYPE.to_owned())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn member_event_type() -> StateEventType {
|
||||||
|
StateEventType::from(SPACE_ROLE_MEMBER_EVENT_TYPE.to_owned())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn room_event_type() -> StateEventType {
|
||||||
|
StateEventType::from(SPACE_ROLE_ROOM_EVENT_TYPE.to_owned())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn cascading_event_type() -> StateEventType {
|
||||||
|
StateEventType::from(SPACE_CASCADING_EVENT_TYPE.to_owned())
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! resolve_room_as_space {
|
||||||
|
($self:expr, $space:expr) => {{
|
||||||
|
let space_id = $self.services.rooms.alias.resolve(&$space).await?;
|
||||||
|
if !matches!(
|
||||||
|
$self
|
||||||
|
.services
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.get_room_type(&space_id)
|
||||||
|
.await,
|
||||||
|
Ok(ruma::room::RoomType::Space)
|
||||||
|
) {
|
||||||
|
return Err!("The specified room is not a Space.");
|
||||||
|
}
|
||||||
|
space_id
|
||||||
|
}};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! resolve_space {
|
||||||
|
($self:expr, $space:expr) => {{
|
||||||
|
let space_id = resolve_room_as_space!($self, $space);
|
||||||
|
if !$self
|
||||||
|
.services
|
||||||
|
.rooms
|
||||||
|
.roles
|
||||||
|
.is_enabled_for_space(&space_id)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
return $self
|
||||||
|
.write_str(
|
||||||
|
"Space permission cascading is disabled for this Space. Enable it \
|
||||||
|
server-wide with `space_permission_cascading = true` in your config, or \
|
||||||
|
per-Space with `!admin space roles enable <space>`.",
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
space_id
|
||||||
|
}};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! custom_state_pdu {
|
||||||
|
($event_type:expr, $state_key:expr, $content:expr) => {
|
||||||
|
PduBuilder {
|
||||||
|
event_type: $event_type.to_owned().into(),
|
||||||
|
content: to_raw_value($content)
|
||||||
|
.map_err(|e| conduwuit::err!("Failed to serialize state event content: {e}"))?,
|
||||||
|
state_key: Some($state_key.to_owned().into()),
|
||||||
|
..PduBuilder::default()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Cascade-remove a role name from all state events of a given type. For each
|
||||||
|
/// event that contains the role, the `$field` is filtered and the updated
|
||||||
|
/// content is sent back as a new state event.
|
||||||
|
macro_rules! cascade_remove_role {
|
||||||
|
(
|
||||||
|
$self:expr,
|
||||||
|
$shortstatehash:expr,
|
||||||
|
$event_type_fn:expr,
|
||||||
|
$event_type_const:expr,
|
||||||
|
$content_ty:ty,
|
||||||
|
$field:ident,
|
||||||
|
$role_name:expr,
|
||||||
|
$space_id:expr,
|
||||||
|
$state_lock:expr,
|
||||||
|
$server_user:expr
|
||||||
|
) => {{
|
||||||
|
let ev_type = $event_type_fn;
|
||||||
|
let entries: Vec<(_, ruma::OwnedEventId)> = $self
|
||||||
|
.services
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.state_keys_with_ids($shortstatehash, &ev_type)
|
||||||
|
.collect()
|
||||||
|
.await;
|
||||||
|
|
||||||
|
for (state_key, event_id) in entries {
|
||||||
|
if let Ok(pdu) = $self.services.rooms.timeline.get_pdu(&event_id).await {
|
||||||
|
if let Ok(mut content) = pdu.get_content::<$content_ty>() {
|
||||||
|
if content.$field.contains($role_name) {
|
||||||
|
content.$field.retain(|r| r != $role_name);
|
||||||
|
$self
|
||||||
|
.services
|
||||||
|
.rooms
|
||||||
|
.timeline
|
||||||
|
.build_and_append_pdu(
|
||||||
|
custom_state_pdu!($event_type_const, &state_key, &content),
|
||||||
|
$server_user,
|
||||||
|
Some(&$space_id),
|
||||||
|
&$state_lock,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! send_space_state {
|
||||||
|
($self:expr, $space_id:expr, $event_type:expr, $state_key:expr, $content:expr) => {{
|
||||||
|
let state_lock = $self.services.rooms.state.mutex.lock(&$space_id).await;
|
||||||
|
let server_user = &$self.services.globals.server_user;
|
||||||
|
$self
|
||||||
|
.services
|
||||||
|
.rooms
|
||||||
|
.timeline
|
||||||
|
.build_and_append_pdu(
|
||||||
|
custom_state_pdu!($event_type, $state_key, $content),
|
||||||
|
server_user,
|
||||||
|
Some(&$space_id),
|
||||||
|
&state_lock,
|
||||||
|
)
|
||||||
|
.await?
|
||||||
|
}};
|
||||||
|
}
|
||||||
|
|
||||||
|
#[admin_command_dispatch]
|
||||||
|
#[derive(Debug, Subcommand)]
|
||||||
|
pub enum SpaceRolesCommand {
|
||||||
|
/// List all roles defined in a space
|
||||||
|
List {
|
||||||
|
space: OwnedRoomOrAliasId,
|
||||||
|
},
|
||||||
|
/// Add a new role to a space
|
||||||
|
Add {
|
||||||
|
space: OwnedRoomOrAliasId,
|
||||||
|
role_name: String,
|
||||||
|
#[arg(long)]
|
||||||
|
description: Option<String>,
|
||||||
|
#[arg(long)]
|
||||||
|
power_level: Option<i64>,
|
||||||
|
},
|
||||||
|
/// Remove a role from a space
|
||||||
|
Remove {
|
||||||
|
space: OwnedRoomOrAliasId,
|
||||||
|
role_name: String,
|
||||||
|
},
|
||||||
|
/// Assign a role to a user
|
||||||
|
Assign {
|
||||||
|
space: OwnedRoomOrAliasId,
|
||||||
|
user_id: OwnedUserId,
|
||||||
|
role_name: String,
|
||||||
|
},
|
||||||
|
/// Revoke a role from a user
|
||||||
|
Revoke {
|
||||||
|
space: OwnedRoomOrAliasId,
|
||||||
|
user_id: OwnedUserId,
|
||||||
|
role_name: String,
|
||||||
|
},
|
||||||
|
/// Require a role for a room
|
||||||
|
Require {
|
||||||
|
space: OwnedRoomOrAliasId,
|
||||||
|
room_id: OwnedRoomId,
|
||||||
|
role_name: String,
|
||||||
|
},
|
||||||
|
/// Remove a role requirement from a room
|
||||||
|
Unrequire {
|
||||||
|
space: OwnedRoomOrAliasId,
|
||||||
|
room_id: OwnedRoomId,
|
||||||
|
role_name: String,
|
||||||
|
},
|
||||||
|
/// Show a user's roles in a space
|
||||||
|
User {
|
||||||
|
space: OwnedRoomOrAliasId,
|
||||||
|
user_id: OwnedUserId,
|
||||||
|
},
|
||||||
|
/// Show a room's role requirements in a space
|
||||||
|
Room {
|
||||||
|
space: OwnedRoomOrAliasId,
|
||||||
|
room_id: OwnedRoomId,
|
||||||
|
},
|
||||||
|
/// Enable space permission cascading for a specific space (overrides
|
||||||
|
/// server config)
|
||||||
|
Enable {
|
||||||
|
space: OwnedRoomOrAliasId,
|
||||||
|
},
|
||||||
|
/// Disable space permission cascading for a specific space (overrides
|
||||||
|
/// server config)
|
||||||
|
Disable {
|
||||||
|
space: OwnedRoomOrAliasId,
|
||||||
|
},
|
||||||
|
/// Show whether cascading is enabled for a space and the source (server
|
||||||
|
/// default or per-space override)
|
||||||
|
Status {
|
||||||
|
space: OwnedRoomOrAliasId,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
#[admin_command]
|
||||||
|
async fn list(&self, space: OwnedRoomOrAliasId) -> Result {
|
||||||
|
let space_id = resolve_space!(self, space);
|
||||||
|
let roles_event_type = roles_event_type();
|
||||||
|
|
||||||
|
let content: SpaceRolesEventContent = self
|
||||||
|
.services
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.room_state_get_content(&space_id, &roles_event_type, "")
|
||||||
|
.await
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
if content.roles.is_empty() {
|
||||||
|
return self.write_str("No roles defined in this space.").await;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut msg = format!("Roles in {space_id}:\n```\n");
|
||||||
|
for (name, def) in &content.roles {
|
||||||
|
let pl = def
|
||||||
|
.power_level
|
||||||
|
.map(|p| format!(" (power_level: {p})"))
|
||||||
|
.unwrap_or_default();
|
||||||
|
let _ = writeln!(msg, "- {name}: {}{pl}", def.description);
|
||||||
|
}
|
||||||
|
msg.push_str("```");
|
||||||
|
|
||||||
|
self.write_str(&msg).await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[admin_command]
|
||||||
|
async fn add(
|
||||||
|
&self,
|
||||||
|
space: OwnedRoomOrAliasId,
|
||||||
|
role_name: String,
|
||||||
|
description: Option<String>,
|
||||||
|
power_level: Option<i64>,
|
||||||
|
) -> Result {
|
||||||
|
let space_id = resolve_space!(self, space);
|
||||||
|
|
||||||
|
if let Some(pl) = power_level {
|
||||||
|
if pl > i64::from(ruma::Int::MAX) || pl < i64::from(ruma::Int::MIN) {
|
||||||
|
return Err!(
|
||||||
|
"Power level must be between {} and {}.",
|
||||||
|
ruma::Int::MIN,
|
||||||
|
ruma::Int::MAX
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let roles_event_type = roles_event_type();
|
||||||
|
|
||||||
|
let mut content: SpaceRolesEventContent = self
|
||||||
|
.services
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.room_state_get_content(&space_id, &roles_event_type, "")
|
||||||
|
.await
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
if content.roles.contains_key(&role_name) {
|
||||||
|
return Err!("Role '{role_name}' already exists in this space.");
|
||||||
|
}
|
||||||
|
|
||||||
|
content.roles.insert(role_name.clone(), RoleDefinition {
|
||||||
|
description: description.unwrap_or_else(|| role_name.clone()),
|
||||||
|
power_level,
|
||||||
|
});
|
||||||
|
|
||||||
|
send_space_state!(self, space_id, SPACE_ROLES_EVENT_TYPE, "", &content);
|
||||||
|
|
||||||
|
self.write_str(&format!("Added role '{role_name}' to space {space_id}."))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[admin_command]
|
||||||
|
async fn remove(&self, space: OwnedRoomOrAliasId, role_name: String) -> Result {
|
||||||
|
let space_id = resolve_space!(self, space);
|
||||||
|
let roles_event_type = roles_event_type();
|
||||||
|
|
||||||
|
let mut content: SpaceRolesEventContent = self
|
||||||
|
.services
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.room_state_get_content(&space_id, &roles_event_type, "")
|
||||||
|
.await
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
if content.roles.remove(&role_name).is_none() {
|
||||||
|
return Err!("Role '{role_name}' does not exist in this space.");
|
||||||
|
}
|
||||||
|
|
||||||
|
send_space_state!(self, space_id, SPACE_ROLES_EVENT_TYPE, "", &content);
|
||||||
|
|
||||||
|
// Cascade: remove the deleted role from all member and room events
|
||||||
|
let server_user = &self.services.globals.server_user;
|
||||||
|
if let Ok(shortstatehash) = self
|
||||||
|
.services
|
||||||
|
.rooms
|
||||||
|
.state
|
||||||
|
.get_room_shortstatehash(&space_id)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
let state_lock = self.services.rooms.state.mutex.lock(&space_id).await;
|
||||||
|
|
||||||
|
cascade_remove_role!(
|
||||||
|
self,
|
||||||
|
shortstatehash,
|
||||||
|
member_event_type(),
|
||||||
|
SPACE_ROLE_MEMBER_EVENT_TYPE,
|
||||||
|
SpaceRoleMemberEventContent,
|
||||||
|
roles,
|
||||||
|
&role_name,
|
||||||
|
space_id,
|
||||||
|
state_lock,
|
||||||
|
server_user
|
||||||
|
);
|
||||||
|
|
||||||
|
cascade_remove_role!(
|
||||||
|
self,
|
||||||
|
shortstatehash,
|
||||||
|
room_event_type(),
|
||||||
|
SPACE_ROLE_ROOM_EVENT_TYPE,
|
||||||
|
SpaceRoleRoomEventContent,
|
||||||
|
required_roles,
|
||||||
|
&role_name,
|
||||||
|
space_id,
|
||||||
|
state_lock,
|
||||||
|
server_user
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
self.write_str(&format!("Removed role '{role_name}' from space {space_id}."))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[admin_command]
|
||||||
|
async fn assign(
|
||||||
|
&self,
|
||||||
|
space: OwnedRoomOrAliasId,
|
||||||
|
user_id: OwnedUserId,
|
||||||
|
role_name: String,
|
||||||
|
) -> Result {
|
||||||
|
let space_id = resolve_space!(self, space);
|
||||||
|
|
||||||
|
let roles_event_type = roles_event_type();
|
||||||
|
let role_defs: SpaceRolesEventContent = self
|
||||||
|
.services
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.room_state_get_content(&space_id, &roles_event_type, "")
|
||||||
|
.await
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
if !role_defs.roles.contains_key(&role_name) {
|
||||||
|
return Err!("Role '{role_name}' does not exist in this space.");
|
||||||
|
}
|
||||||
|
|
||||||
|
let member_event_type = member_event_type();
|
||||||
|
|
||||||
|
let mut content: SpaceRoleMemberEventContent = self
|
||||||
|
.services
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.room_state_get_content(&space_id, &member_event_type, user_id.as_str())
|
||||||
|
.await
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
if content.roles.contains(&role_name) {
|
||||||
|
return Err!("User {user_id} already has role '{role_name}' in this space.");
|
||||||
|
}
|
||||||
|
|
||||||
|
content.roles.push(role_name.clone());
|
||||||
|
|
||||||
|
send_space_state!(self, space_id, SPACE_ROLE_MEMBER_EVENT_TYPE, user_id.as_str(), &content);
|
||||||
|
|
||||||
|
self.write_str(&format!("Assigned role '{role_name}' to {user_id} in space {space_id}."))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[admin_command]
|
||||||
|
async fn revoke(
|
||||||
|
&self,
|
||||||
|
space: OwnedRoomOrAliasId,
|
||||||
|
user_id: OwnedUserId,
|
||||||
|
role_name: String,
|
||||||
|
) -> Result {
|
||||||
|
let space_id = resolve_space!(self, space);
|
||||||
|
let member_event_type = member_event_type();
|
||||||
|
|
||||||
|
let mut content: SpaceRoleMemberEventContent = self
|
||||||
|
.services
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.room_state_get_content(&space_id, &member_event_type, user_id.as_str())
|
||||||
|
.await
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
let original_len = content.roles.len();
|
||||||
|
content.roles.retain(|r| r != &role_name);
|
||||||
|
|
||||||
|
if content.roles.len() == original_len {
|
||||||
|
return Err!("User {user_id} does not have role '{role_name}' in this space.");
|
||||||
|
}
|
||||||
|
|
||||||
|
send_space_state!(self, space_id, SPACE_ROLE_MEMBER_EVENT_TYPE, user_id.as_str(), &content);
|
||||||
|
|
||||||
|
self.write_str(&format!("Revoked role '{role_name}' from {user_id} in space {space_id}."))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[admin_command]
|
||||||
|
async fn require(
|
||||||
|
&self,
|
||||||
|
space: OwnedRoomOrAliasId,
|
||||||
|
room_id: OwnedRoomId,
|
||||||
|
role_name: String,
|
||||||
|
) -> Result {
|
||||||
|
let space_id = resolve_space!(self, space);
|
||||||
|
|
||||||
|
let child_rooms = self.services.rooms.roles.get_child_rooms(&space_id).await;
|
||||||
|
if !child_rooms.contains(&room_id) {
|
||||||
|
return Err!("Room {room_id} is not a child of space {space_id}.");
|
||||||
|
}
|
||||||
|
|
||||||
|
let roles_event_type = roles_event_type();
|
||||||
|
let role_defs: SpaceRolesEventContent = self
|
||||||
|
.services
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.room_state_get_content(&space_id, &roles_event_type, "")
|
||||||
|
.await
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
if !role_defs.roles.contains_key(&role_name) {
|
||||||
|
return Err!("Role '{role_name}' does not exist in this space.");
|
||||||
|
}
|
||||||
|
|
||||||
|
let room_event_type = room_event_type();
|
||||||
|
|
||||||
|
let mut content: SpaceRoleRoomEventContent = self
|
||||||
|
.services
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.room_state_get_content(&space_id, &room_event_type, room_id.as_str())
|
||||||
|
.await
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
if content.required_roles.contains(&role_name) {
|
||||||
|
return Err!("Room {room_id} already requires role '{role_name}' in this space.");
|
||||||
|
}
|
||||||
|
|
||||||
|
content.required_roles.push(role_name.clone());
|
||||||
|
|
||||||
|
send_space_state!(self, space_id, SPACE_ROLE_ROOM_EVENT_TYPE, room_id.as_str(), &content);
|
||||||
|
|
||||||
|
self.write_str(&format!(
|
||||||
|
"Room {room_id} now requires role '{role_name}' in space {space_id}."
|
||||||
|
))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[admin_command]
|
||||||
|
async fn unrequire(
|
||||||
|
&self,
|
||||||
|
space: OwnedRoomOrAliasId,
|
||||||
|
room_id: OwnedRoomId,
|
||||||
|
role_name: String,
|
||||||
|
) -> Result {
|
||||||
|
let space_id = resolve_space!(self, space);
|
||||||
|
let room_event_type = room_event_type();
|
||||||
|
|
||||||
|
let mut content: SpaceRoleRoomEventContent = self
|
||||||
|
.services
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.room_state_get_content(&space_id, &room_event_type, room_id.as_str())
|
||||||
|
.await
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
let original_len = content.required_roles.len();
|
||||||
|
content.required_roles.retain(|r| r != &role_name);
|
||||||
|
|
||||||
|
if content.required_roles.len() == original_len {
|
||||||
|
return Err!("Room {room_id} does not require role '{role_name}' in this space.");
|
||||||
|
}
|
||||||
|
|
||||||
|
send_space_state!(self, space_id, SPACE_ROLE_ROOM_EVENT_TYPE, room_id.as_str(), &content);
|
||||||
|
|
||||||
|
self.write_str(&format!(
|
||||||
|
"Removed role requirement '{role_name}' from room {room_id} in space {space_id}."
|
||||||
|
))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[admin_command]
|
||||||
|
async fn user(&self, space: OwnedRoomOrAliasId, user_id: OwnedUserId) -> Result {
|
||||||
|
let space_id = resolve_space!(self, space);
|
||||||
|
|
||||||
|
let roles = self
|
||||||
|
.services
|
||||||
|
.rooms
|
||||||
|
.roles
|
||||||
|
.get_user_roles_in_space(&space_id, &user_id)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
match roles {
|
||||||
|
| Some(roles) if !roles.is_empty() => {
|
||||||
|
let list: String = roles
|
||||||
|
.iter()
|
||||||
|
.map(|r| format!("- {r}"))
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.join("\n");
|
||||||
|
self.write_str(&format!("Roles for {user_id} in space {space_id}:\n```\n{list}\n```"))
|
||||||
|
.await
|
||||||
|
},
|
||||||
|
| _ =>
|
||||||
|
self.write_str(&format!("User {user_id} has no roles in space {space_id}."))
|
||||||
|
.await,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[admin_command]
|
||||||
|
async fn room(&self, space: OwnedRoomOrAliasId, room_id: OwnedRoomId) -> Result {
|
||||||
|
let space_id = resolve_space!(self, space);
|
||||||
|
|
||||||
|
let reqs = self
|
||||||
|
.services
|
||||||
|
.rooms
|
||||||
|
.roles
|
||||||
|
.get_room_requirements_in_space(&space_id, &room_id)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
match reqs {
|
||||||
|
| Some(reqs) if !reqs.is_empty() => {
|
||||||
|
let list: String = reqs
|
||||||
|
.iter()
|
||||||
|
.map(|r| format!("- {r}"))
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.join("\n");
|
||||||
|
self.write_str(&format!(
|
||||||
|
"Required roles for room {room_id} in space {space_id}:\n```\n{list}\n```"
|
||||||
|
))
|
||||||
|
.await
|
||||||
|
},
|
||||||
|
| _ =>
|
||||||
|
self.write_str(&format!(
|
||||||
|
"Room {room_id} has no role requirements in space {space_id}."
|
||||||
|
))
|
||||||
|
.await,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[admin_command]
|
||||||
|
async fn enable(&self, space: OwnedRoomOrAliasId) -> Result {
|
||||||
|
let space_id = resolve_room_as_space!(self, space);
|
||||||
|
|
||||||
|
self.services
|
||||||
|
.rooms
|
||||||
|
.roles
|
||||||
|
.ensure_default_roles(&space_id)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let content = SpaceCascadingEventContent { enabled: true };
|
||||||
|
send_space_state!(self, space_id, SPACE_CASCADING_EVENT_TYPE, "", &content);
|
||||||
|
|
||||||
|
self.write_str(&format!("Space permission cascading enabled for {space_id}."))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[admin_command]
|
||||||
|
async fn disable(&self, space: OwnedRoomOrAliasId) -> Result {
|
||||||
|
let space_id = resolve_room_as_space!(self, space);
|
||||||
|
|
||||||
|
let content = SpaceCascadingEventContent { enabled: false };
|
||||||
|
send_space_state!(self, space_id, SPACE_CASCADING_EVENT_TYPE, "", &content);
|
||||||
|
|
||||||
|
self.write_str(&format!("Space permission cascading disabled for {space_id}."))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[admin_command]
|
||||||
|
async fn status(&self, space: OwnedRoomOrAliasId) -> Result {
|
||||||
|
let space_id = resolve_room_as_space!(self, space);
|
||||||
|
|
||||||
|
let global_default = self.services.rooms.roles.is_enabled();
|
||||||
|
let cascading_event_type = cascading_event_type();
|
||||||
|
let per_space_override: Option<bool> = self
|
||||||
|
.services
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.room_state_get_content::<SpaceCascadingEventContent>(
|
||||||
|
&space_id,
|
||||||
|
&cascading_event_type,
|
||||||
|
"",
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.ok()
|
||||||
|
.map(|c| c.enabled);
|
||||||
|
|
||||||
|
let effective = per_space_override.unwrap_or(global_default);
|
||||||
|
let source = match per_space_override {
|
||||||
|
| Some(v) => format!("per-Space override (enabled: {v})"),
|
||||||
|
| None => format!("server default (space_permission_cascading: {global_default})"),
|
||||||
|
};
|
||||||
|
|
||||||
|
self.write_str(&format!(
|
||||||
|
"Cascading status for {space_id}:\n- Effective: **{effective}**\n- Source: {source}"
|
||||||
|
))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
@ -296,6 +296,31 @@ pub(super) async fn reset_password(
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[admin_command]
|
||||||
|
pub(super) async fn issue_password_reset_link(&self, username: String) -> Result {
|
||||||
|
use conduwuit_service::password_reset::{PASSWORD_RESET_PATH, RESET_TOKEN_QUERY_PARAM};
|
||||||
|
|
||||||
|
self.bail_restricted()?;
|
||||||
|
|
||||||
|
let mut reset_url = self
|
||||||
|
.services
|
||||||
|
.config
|
||||||
|
.get_client_domain()
|
||||||
|
.join(PASSWORD_RESET_PATH)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let user_id = parse_local_user_id(self.services, &username)?;
|
||||||
|
let token = self.services.password_reset.issue_token(user_id).await?;
|
||||||
|
reset_url
|
||||||
|
.query_pairs_mut()
|
||||||
|
.append_pair(RESET_TOKEN_QUERY_PARAM, &token.token);
|
||||||
|
|
||||||
|
self.write_str(&format!("Password reset link issued for {username}: {reset_url}"))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) -> Result {
|
pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) -> Result {
|
||||||
if self.body.len() < 2
|
if self.body.len() < 2
|
||||||
|
|
|
||||||
|
|
@ -29,6 +29,12 @@ pub enum UserCommand {
|
||||||
password: Option<String>,
|
password: Option<String>,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
/// Issue a self-service password reset link for a user.
|
||||||
|
IssuePasswordResetLink {
|
||||||
|
/// Username of the user who may use the link
|
||||||
|
username: String,
|
||||||
|
},
|
||||||
|
|
||||||
/// Deactivate a user
|
/// Deactivate a user
|
||||||
///
|
///
|
||||||
/// User will be removed from all rooms by default.
|
/// User will be removed from all rooms by default.
|
||||||
|
|
|
||||||
|
|
@ -252,6 +252,13 @@ pub(crate) async fn register_route(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Don't allow registration with user IDs that aren't local
|
||||||
|
if !services.globals.user_is_local(&user_id) {
|
||||||
|
return Err!(Request(InvalidUsername(
|
||||||
|
"Username {body_username} is not local to this server"
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
user_id
|
user_id
|
||||||
},
|
},
|
||||||
| Err(e) => {
|
| Err(e) => {
|
||||||
|
|
|
||||||
|
|
@ -9,7 +9,7 @@ use ruma::{
|
||||||
},
|
},
|
||||||
events::{
|
events::{
|
||||||
AnyGlobalAccountDataEventContent, AnyRoomAccountDataEventContent,
|
AnyGlobalAccountDataEventContent, AnyRoomAccountDataEventContent,
|
||||||
GlobalAccountDataEventType, RoomAccountDataEventType,
|
RoomAccountDataEventType,
|
||||||
},
|
},
|
||||||
serde::Raw,
|
serde::Raw,
|
||||||
};
|
};
|
||||||
|
|
@ -126,12 +126,6 @@ async fn set_account_data(
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
|
|
||||||
if event_type_s == GlobalAccountDataEventType::PushRules.to_cow_str() {
|
|
||||||
return Err!(Request(BadJson(
|
|
||||||
"This endpoint cannot be used for setting/configuring push rules."
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
let data: serde_json::Value = serde_json::from_str(data.get())
|
let data: serde_json::Value = serde_json::from_str(data.get())
|
||||||
.map_err(|e| err!(Request(BadJson(warn!("Invalid JSON provided: {e}")))))?;
|
.map_err(|e| err!(Request(BadJson(warn!("Invalid JSON provided: {e}")))))?;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,12 +1,6 @@
|
||||||
use axum::extract::State;
|
use axum::extract::State;
|
||||||
use conduwuit::{Err, Result, debug};
|
use conduwuit::{Err, Result};
|
||||||
use conduwuit_service::Services;
|
use ruma::api::client::alias::{create_alias, delete_alias, get_alias};
|
||||||
use futures::StreamExt;
|
|
||||||
use rand::seq::SliceRandom;
|
|
||||||
use ruma::{
|
|
||||||
OwnedServerName, RoomAliasId, RoomId,
|
|
||||||
api::client::alias::{create_alias, delete_alias, get_alias},
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::Ruma;
|
use crate::Ruma;
|
||||||
|
|
||||||
|
|
@ -96,65 +90,9 @@ pub(crate) async fn get_alias_route(
|
||||||
) -> Result<get_alias::v3::Response> {
|
) -> Result<get_alias::v3::Response> {
|
||||||
let room_alias = body.body.room_alias;
|
let room_alias = body.body.room_alias;
|
||||||
|
|
||||||
let Ok((room_id, servers)) = services.rooms.alias.resolve_alias(&room_alias, None).await
|
let Ok((room_id, servers)) = services.rooms.alias.resolve_alias(&room_alias).await else {
|
||||||
else {
|
|
||||||
return Err!(Request(NotFound("Room with alias not found.")));
|
return Err!(Request(NotFound("Room with alias not found.")));
|
||||||
};
|
};
|
||||||
|
|
||||||
let servers = room_available_servers(&services, &room_id, &room_alias, servers).await;
|
|
||||||
debug!(%room_alias, %room_id, "available servers: {servers:?}");
|
|
||||||
|
|
||||||
Ok(get_alias::v3::Response::new(room_id, servers))
|
Ok(get_alias::v3::Response::new(room_id, servers))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn room_available_servers(
|
|
||||||
services: &Services,
|
|
||||||
room_id: &RoomId,
|
|
||||||
room_alias: &RoomAliasId,
|
|
||||||
pre_servers: Vec<OwnedServerName>,
|
|
||||||
) -> Vec<OwnedServerName> {
|
|
||||||
// find active servers in room state cache to suggest
|
|
||||||
let mut servers: Vec<OwnedServerName> = services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.room_servers(room_id)
|
|
||||||
.map(ToOwned::to_owned)
|
|
||||||
.collect()
|
|
||||||
.await;
|
|
||||||
|
|
||||||
// push any servers we want in the list already (e.g. responded remote alias
|
|
||||||
// servers, room alias server itself)
|
|
||||||
servers.extend(pre_servers);
|
|
||||||
|
|
||||||
servers.sort_unstable();
|
|
||||||
servers.dedup();
|
|
||||||
|
|
||||||
// shuffle list of servers randomly after sort and dedupe
|
|
||||||
servers.shuffle(&mut rand::thread_rng());
|
|
||||||
|
|
||||||
// insert our server as the very first choice if in list, else check if we can
|
|
||||||
// prefer the room alias server first
|
|
||||||
match servers
|
|
||||||
.iter()
|
|
||||||
.position(|server_name| services.globals.server_is_ours(server_name))
|
|
||||||
{
|
|
||||||
| Some(server_index) => {
|
|
||||||
servers.swap_remove(server_index);
|
|
||||||
servers.insert(0, services.globals.server_name().to_owned());
|
|
||||||
},
|
|
||||||
| _ => {
|
|
||||||
match servers
|
|
||||||
.iter()
|
|
||||||
.position(|server| server == room_alias.server_name())
|
|
||||||
{
|
|
||||||
| Some(alias_server_index) => {
|
|
||||||
servers.swap_remove(alias_server_index);
|
|
||||||
servers.insert(0, room_alias.server_name().into());
|
|
||||||
},
|
|
||||||
| _ => {},
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
servers
|
|
||||||
}
|
|
||||||
|
|
|
||||||
121
src/api/client/dehydrated_device.rs
Normal file
121
src/api/client/dehydrated_device.rs
Normal file
|
|
@ -0,0 +1,121 @@
|
||||||
|
use axum::extract::State;
|
||||||
|
use axum_client_ip::InsecureClientIp;
|
||||||
|
use conduwuit::{Err, Result, at};
|
||||||
|
use futures::StreamExt;
|
||||||
|
use ruma::api::client::dehydrated_device::{
|
||||||
|
delete_dehydrated_device::unstable as delete_dehydrated_device,
|
||||||
|
get_dehydrated_device::unstable as get_dehydrated_device, get_events::unstable as get_events,
|
||||||
|
put_dehydrated_device::unstable as put_dehydrated_device,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::Ruma;
|
||||||
|
|
||||||
|
const MAX_BATCH_EVENTS: usize = 50;
|
||||||
|
|
||||||
|
/// # `PUT /_matrix/client/../dehydrated_device`
|
||||||
|
///
|
||||||
|
/// Creates or overwrites the user's dehydrated device.
|
||||||
|
#[tracing::instrument(skip_all, fields(%client))]
|
||||||
|
pub(crate) async fn put_dehydrated_device_route(
|
||||||
|
State(services): State<crate::State>,
|
||||||
|
InsecureClientIp(client): InsecureClientIp,
|
||||||
|
body: Ruma<put_dehydrated_device::Request>,
|
||||||
|
) -> Result<put_dehydrated_device::Response> {
|
||||||
|
let sender_user = body
|
||||||
|
.sender_user
|
||||||
|
.as_deref()
|
||||||
|
.expect("AccessToken authentication required");
|
||||||
|
|
||||||
|
let device_id = body.body.device_id.clone();
|
||||||
|
|
||||||
|
services
|
||||||
|
.users
|
||||||
|
.set_dehydrated_device(sender_user, body.body)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(put_dehydrated_device::Response { device_id })
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `DELETE /_matrix/client/../dehydrated_device`
|
||||||
|
///
|
||||||
|
/// Deletes the user's dehydrated device without replacement.
|
||||||
|
#[tracing::instrument(skip_all, fields(%client))]
|
||||||
|
pub(crate) async fn delete_dehydrated_device_route(
|
||||||
|
State(services): State<crate::State>,
|
||||||
|
InsecureClientIp(client): InsecureClientIp,
|
||||||
|
body: Ruma<delete_dehydrated_device::Request>,
|
||||||
|
) -> Result<delete_dehydrated_device::Response> {
|
||||||
|
let sender_user = body.sender_user();
|
||||||
|
|
||||||
|
let device_id = services.users.get_dehydrated_device_id(sender_user).await?;
|
||||||
|
|
||||||
|
services.users.remove_device(sender_user, &device_id).await;
|
||||||
|
|
||||||
|
Ok(delete_dehydrated_device::Response { device_id })
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/../dehydrated_device`
|
||||||
|
///
|
||||||
|
/// Gets the user's dehydrated device
|
||||||
|
#[tracing::instrument(skip_all, fields(%client))]
|
||||||
|
pub(crate) async fn get_dehydrated_device_route(
|
||||||
|
State(services): State<crate::State>,
|
||||||
|
InsecureClientIp(client): InsecureClientIp,
|
||||||
|
body: Ruma<get_dehydrated_device::Request>,
|
||||||
|
) -> Result<get_dehydrated_device::Response> {
|
||||||
|
let sender_user = body.sender_user();
|
||||||
|
|
||||||
|
let device = services.users.get_dehydrated_device(sender_user).await?;
|
||||||
|
|
||||||
|
Ok(get_dehydrated_device::Response {
|
||||||
|
device_id: device.device_id,
|
||||||
|
device_data: device.device_data,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/../dehydrated_device/{device_id}/events`
|
||||||
|
///
|
||||||
|
/// Paginates the events of the dehydrated device.
|
||||||
|
#[tracing::instrument(skip_all, fields(%client))]
|
||||||
|
pub(crate) async fn get_dehydrated_events_route(
|
||||||
|
State(services): State<crate::State>,
|
||||||
|
InsecureClientIp(client): InsecureClientIp,
|
||||||
|
body: Ruma<get_events::Request>,
|
||||||
|
) -> Result<get_events::Response> {
|
||||||
|
let sender_user = body.sender_user();
|
||||||
|
|
||||||
|
let device_id = &body.body.device_id;
|
||||||
|
let existing_id = services.users.get_dehydrated_device_id(sender_user).await;
|
||||||
|
|
||||||
|
if existing_id.as_ref().is_err()
|
||||||
|
|| existing_id
|
||||||
|
.as_ref()
|
||||||
|
.is_ok_and(|existing_id| existing_id != device_id)
|
||||||
|
{
|
||||||
|
return Err!(Request(Forbidden("Not the dehydrated device_id.")));
|
||||||
|
}
|
||||||
|
|
||||||
|
let since: Option<u64> = body
|
||||||
|
.body
|
||||||
|
.next_batch
|
||||||
|
.as_deref()
|
||||||
|
.map(str::parse)
|
||||||
|
.transpose()?;
|
||||||
|
|
||||||
|
let mut next_batch: Option<u64> = None;
|
||||||
|
let events = services
|
||||||
|
.users
|
||||||
|
.get_to_device_events(sender_user, device_id, since, None)
|
||||||
|
.take(MAX_BATCH_EVENTS)
|
||||||
|
.inspect(|&(count, _)| {
|
||||||
|
next_batch.replace(count);
|
||||||
|
})
|
||||||
|
.map(at!(1))
|
||||||
|
.collect()
|
||||||
|
.await;
|
||||||
|
|
||||||
|
Ok(get_events::Response {
|
||||||
|
events,
|
||||||
|
next_batch: next_batch.as_ref().map(ToString::to_string),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
@ -6,6 +6,7 @@ use conduwuit::{
|
||||||
Err, Result, err,
|
Err, Result, err,
|
||||||
utils::{self, content_disposition::make_content_disposition, math::ruma_from_usize},
|
utils::{self, content_disposition::make_content_disposition, math::ruma_from_usize},
|
||||||
};
|
};
|
||||||
|
use conduwuit_core::error;
|
||||||
use conduwuit_service::{
|
use conduwuit_service::{
|
||||||
Services,
|
Services,
|
||||||
media::{CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN, Dim, FileMeta, MXC_LENGTH},
|
media::{CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN, Dim, FileMeta, MXC_LENGTH},
|
||||||
|
|
@ -144,12 +145,22 @@ pub(crate) async fn get_content_route(
|
||||||
server_name: &body.server_name,
|
server_name: &body.server_name,
|
||||||
media_id: &body.media_id,
|
media_id: &body.media_id,
|
||||||
};
|
};
|
||||||
|
|
||||||
let FileMeta {
|
let FileMeta {
|
||||||
content,
|
content,
|
||||||
content_type,
|
content_type,
|
||||||
content_disposition,
|
content_disposition,
|
||||||
} = fetch_file(&services, &mxc, user, body.timeout_ms, None).await?;
|
} = match fetch_file(&services, &mxc, user, body.timeout_ms, None).await {
|
||||||
|
| Ok(meta) => meta,
|
||||||
|
| Err(conduwuit::Error::Io(e)) => match e.kind() {
|
||||||
|
| std::io::ErrorKind::NotFound => return Err!(Request(NotFound("Media not found."))),
|
||||||
|
| std::io::ErrorKind::PermissionDenied => {
|
||||||
|
error!("Permission denied when trying to read file: {e:?}");
|
||||||
|
return Err!(Request(Unknown("Unknown error when fetching file.")));
|
||||||
|
},
|
||||||
|
| _ => return Err!(Request(Unknown("Unknown error when fetching file."))),
|
||||||
|
},
|
||||||
|
| Err(_) => return Err!(Request(Unknown("Unknown error when fetching file."))),
|
||||||
|
};
|
||||||
|
|
||||||
Ok(get_content::v1::Response {
|
Ok(get_content::v1::Response {
|
||||||
file: content.expect("entire file contents"),
|
file: content.expect("entire file contents"),
|
||||||
|
|
@ -185,7 +196,18 @@ pub(crate) async fn get_content_as_filename_route(
|
||||||
content,
|
content,
|
||||||
content_type,
|
content_type,
|
||||||
content_disposition,
|
content_disposition,
|
||||||
} = fetch_file(&services, &mxc, user, body.timeout_ms, Some(&body.filename)).await?;
|
} = match fetch_file(&services, &mxc, user, body.timeout_ms, None).await {
|
||||||
|
| Ok(meta) => meta,
|
||||||
|
| Err(conduwuit::Error::Io(e)) => match e.kind() {
|
||||||
|
| std::io::ErrorKind::NotFound => return Err!(Request(NotFound("Media not found."))),
|
||||||
|
| std::io::ErrorKind::PermissionDenied => {
|
||||||
|
error!("Permission denied when trying to read file: {e:?}");
|
||||||
|
return Err!(Request(Unknown("Unknown error when fetching file.")));
|
||||||
|
},
|
||||||
|
| _ => return Err!(Request(Unknown("Unknown error when fetching file."))),
|
||||||
|
},
|
||||||
|
| Err(_) => return Err!(Request(Unknown("Unknown error when fetching file."))),
|
||||||
|
};
|
||||||
|
|
||||||
Ok(get_content_as_filename::v1::Response {
|
Ok(get_content_as_filename::v1::Response {
|
||||||
file: content.expect("entire file contents"),
|
file: content.expect("entire file contents"),
|
||||||
|
|
|
||||||
|
|
@ -198,11 +198,7 @@ pub(crate) async fn join_room_by_id_or_alias_route(
|
||||||
(servers, room_id)
|
(servers, room_id)
|
||||||
},
|
},
|
||||||
| Err(room_alias) => {
|
| Err(room_alias) => {
|
||||||
let (room_id, mut servers) = services
|
let (room_id, mut servers) = services.rooms.alias.resolve_alias(&room_alias).await?;
|
||||||
.rooms
|
|
||||||
.alias
|
|
||||||
.resolve_alias(&room_alias, Some(body.via.clone()))
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
banned_room_check(
|
banned_room_check(
|
||||||
&services,
|
&services,
|
||||||
|
|
@ -351,6 +347,12 @@ pub async fn join_room_by_id_helper(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
services
|
||||||
|
.rooms
|
||||||
|
.roles
|
||||||
|
.check_join_allowed(room_id, sender_user)
|
||||||
|
.await?;
|
||||||
|
|
||||||
if server_in_room {
|
if server_in_room {
|
||||||
join_room_by_id_helper_local(services, sender_user, room_id, reason, servers, state_lock)
|
join_room_by_id_helper_local(services, sender_user, room_id, reason, servers, state_lock)
|
||||||
.boxed()
|
.boxed()
|
||||||
|
|
|
||||||
|
|
@ -102,11 +102,7 @@ pub(crate) async fn knock_room_route(
|
||||||
(servers, room_id)
|
(servers, room_id)
|
||||||
},
|
},
|
||||||
| Err(room_alias) => {
|
| Err(room_alias) => {
|
||||||
let (room_id, mut servers) = services
|
let (room_id, mut servers) = services.rooms.alias.resolve_alias(&room_alias).await?;
|
||||||
.rooms
|
|
||||||
.alias
|
|
||||||
.resolve_alias(&room_alias, Some(body.via.clone()))
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
banned_room_check(
|
banned_room_check(
|
||||||
&services,
|
&services,
|
||||||
|
|
|
||||||
|
|
@ -6,6 +6,7 @@ pub(super) mod appservice;
|
||||||
pub(super) mod backup;
|
pub(super) mod backup;
|
||||||
pub(super) mod capabilities;
|
pub(super) mod capabilities;
|
||||||
pub(super) mod context;
|
pub(super) mod context;
|
||||||
|
pub(super) mod dehydrated_device;
|
||||||
pub(super) mod device;
|
pub(super) mod device;
|
||||||
pub(super) mod directory;
|
pub(super) mod directory;
|
||||||
pub(super) mod filter;
|
pub(super) mod filter;
|
||||||
|
|
@ -49,6 +50,7 @@ pub(super) use appservice::*;
|
||||||
pub(super) use backup::*;
|
pub(super) use backup::*;
|
||||||
pub(super) use capabilities::*;
|
pub(super) use capabilities::*;
|
||||||
pub(super) use context::*;
|
pub(super) use context::*;
|
||||||
|
pub(super) use dehydrated_device::*;
|
||||||
pub(super) use device::*;
|
pub(super) use device::*;
|
||||||
pub(super) use directory::*;
|
pub(super) use directory::*;
|
||||||
pub(super) use filter::*;
|
pub(super) use filter::*;
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,6 @@ use axum::extract::State;
|
||||||
use axum_client_ip::InsecureClientIp;
|
use axum_client_ip::InsecureClientIp;
|
||||||
use conduwuit::{Err, Event, Result, debug_info, info, matrix::pdu::PduEvent, utils::ReadyExt};
|
use conduwuit::{Err, Event, Result, debug_info, info, matrix::pdu::PduEvent, utils::ReadyExt};
|
||||||
use conduwuit_service::Services;
|
use conduwuit_service::Services;
|
||||||
use rand::Rng;
|
|
||||||
use ruma::{
|
use ruma::{
|
||||||
EventId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId,
|
EventId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId,
|
||||||
api::client::{
|
api::client::{
|
||||||
|
|
@ -244,7 +243,7 @@ fn build_report(report: Report) -> RoomMessageEventContent {
|
||||||
/// random delay sending a response per spec suggestion regarding
|
/// random delay sending a response per spec suggestion regarding
|
||||||
/// enumerating for potential events existing in our server.
|
/// enumerating for potential events existing in our server.
|
||||||
async fn delay_response() {
|
async fn delay_response() {
|
||||||
let time_to_wait = rand::thread_rng().gen_range(2..5);
|
let time_to_wait = rand::random_range(2..5);
|
||||||
debug_info!(
|
debug_info!(
|
||||||
"Got successful /report request, waiting {time_to_wait} seconds before sending \
|
"Got successful /report request, waiting {time_to_wait} seconds before sending \
|
||||||
successful response."
|
successful response."
|
||||||
|
|
|
||||||
|
|
@ -50,8 +50,8 @@ pub(crate) async fn send_message_event_route(
|
||||||
|
|
||||||
// Check if this is a new transaction id
|
// Check if this is a new transaction id
|
||||||
if let Ok(response) = services
|
if let Ok(response) = services
|
||||||
.transaction_ids
|
.transactions
|
||||||
.existing_txnid(sender_user, sender_device, &body.txn_id)
|
.get_client_txn(sender_user, sender_device, &body.txn_id)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
// The client might have sent a txnid of the /sendToDevice endpoint
|
// The client might have sent a txnid of the /sendToDevice endpoint
|
||||||
|
|
@ -92,7 +92,7 @@ pub(crate) async fn send_message_event_route(
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
services.transaction_ids.add_txnid(
|
services.transactions.add_client_txnid(
|
||||||
sender_user,
|
sender_user,
|
||||||
sender_device,
|
sender_device,
|
||||||
&body.txn_id,
|
&body.txn_id,
|
||||||
|
|
|
||||||
|
|
@ -342,10 +342,10 @@ async fn allowed_to_send_state_event(
|
||||||
}
|
}
|
||||||
|
|
||||||
for alias in aliases {
|
for alias in aliases {
|
||||||
let (alias_room_id, _servers) = services
|
let (alias_room_id, _) = services
|
||||||
.rooms
|
.rooms
|
||||||
.alias
|
.alias
|
||||||
.resolve_alias(&alias, None)
|
.resolve_alias(&alias)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
err!(Request(Unknown("Failed resolving alias \"{alias}\": {e}")))
|
err!(Request(Unknown("Failed resolving alias \"{alias}\": {e}")))
|
||||||
|
|
|
||||||
|
|
@ -270,7 +270,7 @@ async fn build_state_and_timeline(
|
||||||
// joined since the last sync, that being the syncing user's join event. if
|
// joined since the last sync, that being the syncing user's join event. if
|
||||||
// it's empty something is wrong.
|
// it's empty something is wrong.
|
||||||
if joined_since_last_sync && timeline.pdus.is_empty() {
|
if joined_since_last_sync && timeline.pdus.is_empty() {
|
||||||
warn!("timeline for newly joined room is empty");
|
debug_warn!("timeline for newly joined room is empty");
|
||||||
}
|
}
|
||||||
|
|
||||||
let (summary, device_list_updates) = try_join(
|
let (summary, device_list_updates) = try_join(
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
use conduwuit::{
|
use conduwuit::{
|
||||||
Event, PduCount, PduEvent, Result, at, debug_warn,
|
Event, PduEvent, Result, at, debug_warn,
|
||||||
pdu::EventHash,
|
pdu::EventHash,
|
||||||
trace,
|
trace,
|
||||||
utils::{self, IterStream, future::ReadyEqExt, stream::WidebandExt as _},
|
utils::{self, IterStream, future::ReadyEqExt, stream::WidebandExt as _},
|
||||||
|
|
@ -68,9 +68,13 @@ pub(super) async fn load_left_room(
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
|
|
||||||
// return early if this is an incremental sync, and we've already synced this
|
// return early if:
|
||||||
// leave to the user, and `include_leave` isn't set on the filter.
|
// - this is an initial sync and the room filter doesn't include leaves, or
|
||||||
if !filter.room.include_leave && last_sync_end_count >= Some(left_count) {
|
// - this is an incremental sync, and we've already synced the leave, and the
|
||||||
|
// room filter doesn't include leaves
|
||||||
|
if last_sync_end_count.is_none_or(|last_sync_end_count| last_sync_end_count >= left_count)
|
||||||
|
&& !filter.room.include_leave
|
||||||
|
{
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -195,27 +199,13 @@ async fn build_left_state_and_timeline(
|
||||||
leave_shortstatehash: ShortStateHash,
|
leave_shortstatehash: ShortStateHash,
|
||||||
prev_membership_event: PduEvent,
|
prev_membership_event: PduEvent,
|
||||||
) -> Result<(TimelinePdus, Vec<PduEvent>)> {
|
) -> Result<(TimelinePdus, Vec<PduEvent>)> {
|
||||||
let SyncContext {
|
let SyncContext { syncing_user, filter, .. } = sync_context;
|
||||||
syncing_user,
|
|
||||||
last_sync_end_count,
|
|
||||||
filter,
|
|
||||||
..
|
|
||||||
} = sync_context;
|
|
||||||
|
|
||||||
let timeline_start_count = if let Some(last_sync_end_count) = last_sync_end_count {
|
let timeline_start_count = services
|
||||||
// for incremental syncs, start the timeline after `since`
|
.rooms
|
||||||
PduCount::Normal(last_sync_end_count)
|
.timeline
|
||||||
} else {
|
.get_pdu_count(&prev_membership_event.event_id)
|
||||||
// for initial syncs, start the timeline after the previous membership
|
.await?;
|
||||||
// event. we don't want to include the membership event itself
|
|
||||||
// because clients get confused when they see a `join`
|
|
||||||
// membership event in a `leave` room.
|
|
||||||
services
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.get_pdu_count(&prev_membership_event.event_id)
|
|
||||||
.await?
|
|
||||||
};
|
|
||||||
|
|
||||||
// end the timeline at the user's leave event
|
// end the timeline at the user's leave event
|
||||||
let timeline_end_count = services
|
let timeline_end_count = services
|
||||||
|
|
|
||||||
|
|
@ -11,7 +11,7 @@ use std::{
|
||||||
use axum::extract::State;
|
use axum::extract::State;
|
||||||
use axum_client_ip::InsecureClientIp;
|
use axum_client_ip::InsecureClientIp;
|
||||||
use conduwuit::{
|
use conduwuit::{
|
||||||
Result, extract_variant,
|
Result, at, extract_variant,
|
||||||
utils::{
|
utils::{
|
||||||
ReadyExt, TryFutureExtExt,
|
ReadyExt, TryFutureExtExt,
|
||||||
stream::{BroadbandExt, Tools, WidebandExt},
|
stream::{BroadbandExt, Tools, WidebandExt},
|
||||||
|
|
@ -297,12 +297,18 @@ pub(crate) async fn build_sync_events(
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
.rooms_left(syncing_user)
|
.rooms_left(syncing_user)
|
||||||
.broad_filter_map(|(room_id, leave_pdu)| {
|
.broad_filter_map(|(room_id, leave_pdu)| async {
|
||||||
load_left_room(services, context, room_id.clone(), leave_pdu)
|
let left_room = load_left_room(services, context, room_id.clone(), leave_pdu).await;
|
||||||
.map_ok(move |left_room| (room_id, left_room))
|
|
||||||
.ok()
|
match left_room {
|
||||||
|
| Ok(Some(left_room)) => Some((room_id, left_room)),
|
||||||
|
| Ok(None) => None,
|
||||||
|
| Err(err) => {
|
||||||
|
warn!(?err, %room_id, "error loading joined room");
|
||||||
|
None
|
||||||
|
},
|
||||||
|
}
|
||||||
})
|
})
|
||||||
.ready_filter_map(|(room_id, left_room)| left_room.map(|left_room| (room_id, left_room)))
|
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let invited_rooms = services
|
let invited_rooms = services
|
||||||
|
|
@ -385,6 +391,7 @@ pub(crate) async fn build_sync_events(
|
||||||
last_sync_end_count,
|
last_sync_end_count,
|
||||||
Some(current_count),
|
Some(current_count),
|
||||||
)
|
)
|
||||||
|
.map(at!(1))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
let device_one_time_keys_count = services
|
let device_one_time_keys_count = services
|
||||||
|
|
|
||||||
|
|
@ -336,7 +336,9 @@ where
|
||||||
let ranges = list.ranges.clone();
|
let ranges = list.ranges.clone();
|
||||||
|
|
||||||
for mut range in ranges {
|
for mut range in ranges {
|
||||||
range.0 = uint!(0);
|
range.0 = range
|
||||||
|
.0
|
||||||
|
.min(UInt::try_from(active_rooms.len()).unwrap_or(UInt::MAX));
|
||||||
range.1 = range.1.checked_add(uint!(1)).unwrap_or(range.1);
|
range.1 = range.1.checked_add(uint!(1)).unwrap_or(range.1);
|
||||||
range.1 = range
|
range.1 = range
|
||||||
.1
|
.1
|
||||||
|
|
@ -1027,6 +1029,7 @@ async fn collect_to_device(
|
||||||
events: services
|
events: services
|
||||||
.users
|
.users
|
||||||
.get_to_device_events(sender_user, sender_device, None, Some(next_batch))
|
.get_to_device_events(sender_user, sender_device, None, Some(next_batch))
|
||||||
|
.map(at!(1))
|
||||||
.collect()
|
.collect()
|
||||||
.await,
|
.await,
|
||||||
})
|
})
|
||||||
|
|
|
||||||
|
|
@ -26,8 +26,8 @@ pub(crate) async fn send_event_to_device_route(
|
||||||
|
|
||||||
// Check if this is a new transaction id
|
// Check if this is a new transaction id
|
||||||
if services
|
if services
|
||||||
.transaction_ids
|
.transactions
|
||||||
.existing_txnid(sender_user, sender_device, &body.txn_id)
|
.get_client_txn(sender_user, sender_device, &body.txn_id)
|
||||||
.await
|
.await
|
||||||
.is_ok()
|
.is_ok()
|
||||||
{
|
{
|
||||||
|
|
@ -104,8 +104,8 @@ pub(crate) async fn send_event_to_device_route(
|
||||||
|
|
||||||
// Save transaction id with empty data
|
// Save transaction id with empty data
|
||||||
services
|
services
|
||||||
.transaction_ids
|
.transactions
|
||||||
.add_txnid(sender_user, sender_device, &body.txn_id, &[]);
|
.add_client_txnid(sender_user, sender_device, &body.txn_id, &[]);
|
||||||
|
|
||||||
Ok(send_event_to_device::v3::Response {})
|
Ok(send_event_to_device::v3::Response {})
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -50,6 +50,7 @@ pub(crate) async fn get_supported_versions_route(
|
||||||
("org.matrix.msc2836".to_owned(), true), /* threading/threads (https://github.com/matrix-org/matrix-spec-proposals/pull/2836) */
|
("org.matrix.msc2836".to_owned(), true), /* threading/threads (https://github.com/matrix-org/matrix-spec-proposals/pull/2836) */
|
||||||
("org.matrix.msc2946".to_owned(), true), /* spaces/hierarchy summaries (https://github.com/matrix-org/matrix-spec-proposals/pull/2946) */
|
("org.matrix.msc2946".to_owned(), true), /* spaces/hierarchy summaries (https://github.com/matrix-org/matrix-spec-proposals/pull/2946) */
|
||||||
("org.matrix.msc3026.busy_presence".to_owned(), true), /* busy presence status (https://github.com/matrix-org/matrix-spec-proposals/pull/3026) */
|
("org.matrix.msc3026.busy_presence".to_owned(), true), /* busy presence status (https://github.com/matrix-org/matrix-spec-proposals/pull/3026) */
|
||||||
|
("org.matrix.msc3814".to_owned(), true), /* dehydrated devices */
|
||||||
("org.matrix.msc3827".to_owned(), true), /* filtering of /publicRooms by room type (https://github.com/matrix-org/matrix-spec-proposals/pull/3827) */
|
("org.matrix.msc3827".to_owned(), true), /* filtering of /publicRooms by room type (https://github.com/matrix-org/matrix-spec-proposals/pull/3827) */
|
||||||
("org.matrix.msc3952_intentional_mentions".to_owned(), true), /* intentional mentions (https://github.com/matrix-org/matrix-spec-proposals/pull/3952) */
|
("org.matrix.msc3952_intentional_mentions".to_owned(), true), /* intentional mentions (https://github.com/matrix-org/matrix-spec-proposals/pull/3952) */
|
||||||
("org.matrix.msc3916.stable".to_owned(), true), /* authenticated media (https://github.com/matrix-org/matrix-spec-proposals/pull/3916) */
|
("org.matrix.msc3916.stable".to_owned(), true), /* authenticated media (https://github.com/matrix-org/matrix-spec-proposals/pull/3916) */
|
||||||
|
|
|
||||||
|
|
@ -27,10 +27,32 @@ pub(crate) async fn well_known_client(
|
||||||
identity_server: None,
|
identity_server: None,
|
||||||
sliding_sync_proxy: Some(SlidingSyncProxyInfo { url: client_url }),
|
sliding_sync_proxy: Some(SlidingSyncProxyInfo { url: client_url }),
|
||||||
tile_server: None,
|
tile_server: None,
|
||||||
rtc_foci: services.config.well_known.rtc_focus_server_urls.clone(),
|
rtc_foci: services
|
||||||
|
.config
|
||||||
|
.matrix_rtc
|
||||||
|
.effective_foci(&services.config.well_known.rtc_focus_server_urls)
|
||||||
|
.to_vec(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/v1/rtc/transports`
|
||||||
|
/// # `GET /_matrix/client/unstable/org.matrix.msc4143/rtc/transports`
|
||||||
|
///
|
||||||
|
/// Returns the list of MatrixRTC foci (transports) configured for this
|
||||||
|
/// homeserver, implementing MSC4143.
|
||||||
|
pub(crate) async fn get_rtc_transports(
|
||||||
|
State(services): State<crate::State>,
|
||||||
|
_body: Ruma<ruma::api::client::discovery::get_rtc_transports::Request>,
|
||||||
|
) -> Result<ruma::api::client::discovery::get_rtc_transports::Response> {
|
||||||
|
Ok(ruma::api::client::discovery::get_rtc_transports::Response::new(
|
||||||
|
services
|
||||||
|
.config
|
||||||
|
.matrix_rtc
|
||||||
|
.effective_foci(&services.config.well_known.rtc_focus_server_urls)
|
||||||
|
.to_vec(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
/// # `GET /.well-known/matrix/support`
|
/// # `GET /.well-known/matrix/support`
|
||||||
///
|
///
|
||||||
/// Server support contact and support page of a homeserver's domain.
|
/// Server support contact and support page of a homeserver's domain.
|
||||||
|
|
|
||||||
|
|
@ -122,23 +122,23 @@ pub fn build(router: Router<State>, server: &Server) -> Router<State> {
|
||||||
// Ruma doesn't have support for multiple paths for a single endpoint yet, and these routes
|
// Ruma doesn't have support for multiple paths for a single endpoint yet, and these routes
|
||||||
// share one Ruma request / response type pair with {get,send}_state_event_for_key_route
|
// share one Ruma request / response type pair with {get,send}_state_event_for_key_route
|
||||||
.route(
|
.route(
|
||||||
"/_matrix/client/r0/rooms/:room_id/state/:event_type",
|
"/_matrix/client/r0/rooms/{room_id}/state/{event_type}",
|
||||||
get(client::get_state_events_for_empty_key_route)
|
get(client::get_state_events_for_empty_key_route)
|
||||||
.put(client::send_state_event_for_empty_key_route),
|
.put(client::send_state_event_for_empty_key_route),
|
||||||
)
|
)
|
||||||
.route(
|
.route(
|
||||||
"/_matrix/client/v3/rooms/:room_id/state/:event_type",
|
"/_matrix/client/v3/rooms/{room_id}/state/{event_type}",
|
||||||
get(client::get_state_events_for_empty_key_route)
|
get(client::get_state_events_for_empty_key_route)
|
||||||
.put(client::send_state_event_for_empty_key_route),
|
.put(client::send_state_event_for_empty_key_route),
|
||||||
)
|
)
|
||||||
// These two endpoints allow trailing slashes
|
// These two endpoints allow trailing slashes
|
||||||
.route(
|
.route(
|
||||||
"/_matrix/client/r0/rooms/:room_id/state/:event_type/",
|
"/_matrix/client/r0/rooms/{room_id}/state/{event_type}/",
|
||||||
get(client::get_state_events_for_empty_key_route)
|
get(client::get_state_events_for_empty_key_route)
|
||||||
.put(client::send_state_event_for_empty_key_route),
|
.put(client::send_state_event_for_empty_key_route),
|
||||||
)
|
)
|
||||||
.route(
|
.route(
|
||||||
"/_matrix/client/v3/rooms/:room_id/state/:event_type/",
|
"/_matrix/client/v3/rooms/{room_id}/state/{event_type}/",
|
||||||
get(client::get_state_events_for_empty_key_route)
|
get(client::get_state_events_for_empty_key_route)
|
||||||
.put(client::send_state_event_for_empty_key_route),
|
.put(client::send_state_event_for_empty_key_route),
|
||||||
)
|
)
|
||||||
|
|
@ -160,6 +160,10 @@ pub fn build(router: Router<State>, server: &Server) -> Router<State> {
|
||||||
.ruma_route(&client::update_device_route)
|
.ruma_route(&client::update_device_route)
|
||||||
.ruma_route(&client::delete_device_route)
|
.ruma_route(&client::delete_device_route)
|
||||||
.ruma_route(&client::delete_devices_route)
|
.ruma_route(&client::delete_devices_route)
|
||||||
|
.ruma_route(&client::put_dehydrated_device_route)
|
||||||
|
.ruma_route(&client::delete_dehydrated_device_route)
|
||||||
|
.ruma_route(&client::get_dehydrated_device_route)
|
||||||
|
.ruma_route(&client::get_dehydrated_events_route)
|
||||||
.ruma_route(&client::get_tags_route)
|
.ruma_route(&client::get_tags_route)
|
||||||
.ruma_route(&client::update_tag_route)
|
.ruma_route(&client::update_tag_route)
|
||||||
.ruma_route(&client::delete_tag_route)
|
.ruma_route(&client::delete_tag_route)
|
||||||
|
|
@ -177,13 +181,14 @@ pub fn build(router: Router<State>, server: &Server) -> Router<State> {
|
||||||
.ruma_route(&client::get_mutual_rooms_route)
|
.ruma_route(&client::get_mutual_rooms_route)
|
||||||
.ruma_route(&client::get_room_summary)
|
.ruma_route(&client::get_room_summary)
|
||||||
.route(
|
.route(
|
||||||
"/_matrix/client/unstable/im.nheko.summary/rooms/:room_id_or_alias/summary",
|
"/_matrix/client/unstable/im.nheko.summary/rooms/{room_id_or_alias}/summary",
|
||||||
get(client::get_room_summary_legacy)
|
get(client::get_room_summary_legacy)
|
||||||
)
|
)
|
||||||
.ruma_route(&client::get_suspended_status)
|
.ruma_route(&client::get_suspended_status)
|
||||||
.ruma_route(&client::put_suspended_status)
|
.ruma_route(&client::put_suspended_status)
|
||||||
.ruma_route(&client::well_known_support)
|
.ruma_route(&client::well_known_support)
|
||||||
.ruma_route(&client::well_known_client)
|
.ruma_route(&client::well_known_client)
|
||||||
|
.ruma_route(&client::get_rtc_transports)
|
||||||
.route("/_conduwuit/server_version", get(client::conduwuit_server_version))
|
.route("/_conduwuit/server_version", get(client::conduwuit_server_version))
|
||||||
.route("/_continuwuity/server_version", get(client::conduwuit_server_version))
|
.route("/_continuwuity/server_version", get(client::conduwuit_server_version))
|
||||||
.ruma_route(&client::room_initial_sync_route)
|
.ruma_route(&client::room_initial_sync_route)
|
||||||
|
|
@ -196,7 +201,7 @@ pub fn build(router: Router<State>, server: &Server) -> Router<State> {
|
||||||
.ruma_route(&server::get_server_version_route)
|
.ruma_route(&server::get_server_version_route)
|
||||||
.route("/_matrix/key/v2/server", get(server::get_server_keys_route))
|
.route("/_matrix/key/v2/server", get(server::get_server_keys_route))
|
||||||
.route(
|
.route(
|
||||||
"/_matrix/key/v2/server/:key_id",
|
"/_matrix/key/v2/server/{key_id}",
|
||||||
get(server::get_server_keys_deprecated_route),
|
get(server::get_server_keys_deprecated_route),
|
||||||
)
|
)
|
||||||
.ruma_route(&server::get_public_rooms_route)
|
.ruma_route(&server::get_public_rooms_route)
|
||||||
|
|
@ -232,9 +237,9 @@ pub fn build(router: Router<State>, server: &Server) -> Router<State> {
|
||||||
.route("/_continuwuity/local_user_count", get(client::conduwuit_local_user_count));
|
.route("/_continuwuity/local_user_count", get(client::conduwuit_local_user_count));
|
||||||
} else {
|
} else {
|
||||||
router = router
|
router = router
|
||||||
.route("/_matrix/federation/*path", any(federation_disabled))
|
.route("/_matrix/federation/{*path}", any(federation_disabled))
|
||||||
.route("/.well-known/matrix/server", any(federation_disabled))
|
.route("/.well-known/matrix/server", any(federation_disabled))
|
||||||
.route("/_matrix/key/*path", any(federation_disabled))
|
.route("/_matrix/key/{*path}", any(federation_disabled))
|
||||||
.route("/_conduwuit/local_user_count", any(federation_disabled))
|
.route("/_conduwuit/local_user_count", any(federation_disabled))
|
||||||
.route("/_continuwuity/local_user_count", any(federation_disabled));
|
.route("/_continuwuity/local_user_count", any(federation_disabled));
|
||||||
}
|
}
|
||||||
|
|
@ -253,27 +258,27 @@ pub fn build(router: Router<State>, server: &Server) -> Router<State> {
|
||||||
get(client::get_media_preview_legacy_legacy_route),
|
get(client::get_media_preview_legacy_legacy_route),
|
||||||
)
|
)
|
||||||
.route(
|
.route(
|
||||||
"/_matrix/media/v1/download/:server_name/:media_id",
|
"/_matrix/media/v1/download/{server_name}/{media_id}",
|
||||||
get(client::get_content_legacy_legacy_route),
|
get(client::get_content_legacy_legacy_route),
|
||||||
)
|
)
|
||||||
.route(
|
.route(
|
||||||
"/_matrix/media/v1/download/:server_name/:media_id/:file_name",
|
"/_matrix/media/v1/download/{server_name}/{media_id}/{file_name}",
|
||||||
get(client::get_content_as_filename_legacy_legacy_route),
|
get(client::get_content_as_filename_legacy_legacy_route),
|
||||||
)
|
)
|
||||||
.route(
|
.route(
|
||||||
"/_matrix/media/v1/thumbnail/:server_name/:media_id",
|
"/_matrix/media/v1/thumbnail/{server_name}/{media_id}",
|
||||||
get(client::get_content_thumbnail_legacy_legacy_route),
|
get(client::get_content_thumbnail_legacy_legacy_route),
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
router = router
|
router = router
|
||||||
.route("/_matrix/media/v1/*path", any(legacy_media_disabled))
|
.route("/_matrix/media/v1/{*path}", any(legacy_media_disabled))
|
||||||
.route("/_matrix/media/v3/config", any(legacy_media_disabled))
|
.route("/_matrix/media/v3/config", any(legacy_media_disabled))
|
||||||
.route("/_matrix/media/v3/download/*path", any(legacy_media_disabled))
|
.route("/_matrix/media/v3/download/{*path}", any(legacy_media_disabled))
|
||||||
.route("/_matrix/media/v3/thumbnail/*path", any(legacy_media_disabled))
|
.route("/_matrix/media/v3/thumbnail/{*path}", any(legacy_media_disabled))
|
||||||
.route("/_matrix/media/v3/preview_url", any(redirect_legacy_preview))
|
.route("/_matrix/media/v3/preview_url", any(redirect_legacy_preview))
|
||||||
.route("/_matrix/media/r0/config", any(legacy_media_disabled))
|
.route("/_matrix/media/r0/config", any(legacy_media_disabled))
|
||||||
.route("/_matrix/media/r0/download/*path", any(legacy_media_disabled))
|
.route("/_matrix/media/r0/download/{*path}", any(legacy_media_disabled))
|
||||||
.route("/_matrix/media/r0/thumbnail/*path", any(legacy_media_disabled))
|
.route("/_matrix/media/r0/thumbnail/{*path}", any(legacy_media_disabled))
|
||||||
.route("/_matrix/media/r0/preview_url", any(redirect_legacy_preview));
|
.route("/_matrix/media/r0/preview_url", any(redirect_legacy_preview));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,5 @@
|
||||||
use std::{mem, ops::Deref};
|
use std::{mem, ops::Deref};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use axum::{body::Body, extract::FromRequest};
|
use axum::{body::Body, extract::FromRequest};
|
||||||
use bytes::{BufMut, Bytes, BytesMut};
|
use bytes::{BufMut, Bytes, BytesMut};
|
||||||
use conduwuit::{Error, Result, debug, debug_warn, err, trace, utils::string::EMPTY};
|
use conduwuit::{Error, Result, debug, debug_warn, err, trace, utils::string::EMPTY};
|
||||||
|
|
@ -79,7 +78,6 @@ where
|
||||||
fn deref(&self) -> &Self::Target { &self.body }
|
fn deref(&self) -> &Self::Target { &self.body }
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<T> FromRequest<State, Body> for Args<T>
|
impl<T> FromRequest<State, Body> for Args<T>
|
||||||
where
|
where
|
||||||
T: IncomingRequest + Send + Sync + 'static,
|
T: IncomingRequest + Send + Sync + 'static,
|
||||||
|
|
|
||||||
|
|
@ -14,7 +14,8 @@ use futures::{
|
||||||
pin_mut,
|
pin_mut,
|
||||||
};
|
};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
CanonicalJsonObject, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId,
|
CanonicalJsonObject, CanonicalJsonValue, DeviceId, OwnedDeviceId, OwnedServerName,
|
||||||
|
OwnedUserId, UserId,
|
||||||
api::{
|
api::{
|
||||||
AuthScheme, IncomingRequest, Metadata,
|
AuthScheme, IncomingRequest, Metadata,
|
||||||
client::{
|
client::{
|
||||||
|
|
@ -54,7 +55,8 @@ pub(super) async fn auth(
|
||||||
json_body: Option<&CanonicalJsonValue>,
|
json_body: Option<&CanonicalJsonValue>,
|
||||||
metadata: &Metadata,
|
metadata: &Metadata,
|
||||||
) -> Result<Auth> {
|
) -> Result<Auth> {
|
||||||
let bearer: Option<TypedHeader<Authorization<Bearer>>> = request.parts.extract().await?;
|
let bearer: Option<TypedHeader<Authorization<Bearer>>> =
|
||||||
|
request.parts.extract().await.unwrap_or(None);
|
||||||
let token = match &bearer {
|
let token = match &bearer {
|
||||||
| Some(TypedHeader(Authorization(bearer))) => Some(bearer.token()),
|
| Some(TypedHeader(Authorization(bearer))) => Some(bearer.token()),
|
||||||
| None => request.query.access_token.as_deref(),
|
| None => request.query.access_token.as_deref(),
|
||||||
|
|
@ -65,23 +67,17 @@ pub(super) async fn auth(
|
||||||
if metadata.authentication == AuthScheme::None {
|
if metadata.authentication == AuthScheme::None {
|
||||||
match metadata {
|
match metadata {
|
||||||
| &get_public_rooms::v3::Request::METADATA => {
|
| &get_public_rooms::v3::Request::METADATA => {
|
||||||
if !services
|
match token {
|
||||||
.server
|
| Token::Appservice(_) | Token::User(_) => {
|
||||||
.config
|
// we should have validated the token above
|
||||||
.allow_public_room_directory_without_auth
|
// already
|
||||||
{
|
},
|
||||||
match token {
|
| Token::None | Token::Invalid => {
|
||||||
| Token::Appservice(_) | Token::User(_) => {
|
return Err(Error::BadRequest(
|
||||||
// we should have validated the token above
|
ErrorKind::MissingToken,
|
||||||
// already
|
"Missing or invalid access token.",
|
||||||
},
|
));
|
||||||
| Token::None | Token::Invalid => {
|
},
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::MissingToken,
|
|
||||||
"Missing or invalid access token.",
|
|
||||||
));
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
| &get_profile::v3::Request::METADATA
|
| &get_profile::v3::Request::METADATA
|
||||||
|
|
@ -233,10 +229,33 @@ async fn auth_appservice(
|
||||||
return Err!(Request(Exclusive("User is not in namespace.")));
|
return Err!(Request(Exclusive("User is not in namespace.")));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MSC3202/MSC4190: Handle device_id masquerading for appservices.
|
||||||
|
// The device_id can be provided via `device_id` or
|
||||||
|
// `org.matrix.msc3202.device_id` query parameter.
|
||||||
|
let sender_device = if let Some(ref device_id_str) = request.query.device_id {
|
||||||
|
let device_id: &DeviceId = device_id_str.as_str().into();
|
||||||
|
|
||||||
|
// Verify the device exists for this user
|
||||||
|
if services
|
||||||
|
.users
|
||||||
|
.get_device_metadata(&user_id, device_id)
|
||||||
|
.await
|
||||||
|
.is_err()
|
||||||
|
{
|
||||||
|
return Err!(Request(Forbidden(
|
||||||
|
"Device does not exist for user or appservice cannot masquerade as this device."
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(device_id.to_owned())
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
Ok(Auth {
|
Ok(Auth {
|
||||||
origin: None,
|
origin: None,
|
||||||
sender_user: Some(user_id),
|
sender_user: Some(user_id),
|
||||||
sender_device: None,
|
sender_device,
|
||||||
appservice_info: Some(*info),
|
appservice_info: Some(*info),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -11,6 +11,10 @@ use service::Services;
|
||||||
pub(super) struct QueryParams {
|
pub(super) struct QueryParams {
|
||||||
pub(super) access_token: Option<String>,
|
pub(super) access_token: Option<String>,
|
||||||
pub(super) user_id: Option<String>,
|
pub(super) user_id: Option<String>,
|
||||||
|
/// Device ID for appservice device masquerading (MSC3202/MSC4190).
|
||||||
|
/// Can be provided as `device_id` or `org.matrix.msc3202.device_id`.
|
||||||
|
#[serde(alias = "org.matrix.msc3202.device_id")]
|
||||||
|
pub(super) device_id: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) struct Request {
|
pub(super) struct Request {
|
||||||
|
|
|
||||||
|
|
@ -40,7 +40,7 @@ pub(crate) async fn get_room_information_route(
|
||||||
servers.sort_unstable();
|
servers.sort_unstable();
|
||||||
servers.dedup();
|
servers.dedup();
|
||||||
|
|
||||||
servers.shuffle(&mut rand::thread_rng());
|
servers.shuffle(&mut rand::rng());
|
||||||
|
|
||||||
// insert our server as the very first choice if in list
|
// insert our server as the very first choice if in list
|
||||||
if let Some(server_index) = servers
|
if let Some(server_index) = servers
|
||||||
|
|
|
||||||
|
|
@ -1,27 +1,33 @@
|
||||||
use std::{collections::BTreeMap, net::IpAddr, time::Instant};
|
use std::{
|
||||||
|
collections::{BTreeMap, HashMap, HashSet},
|
||||||
|
net::IpAddr,
|
||||||
|
time::{Duration, Instant},
|
||||||
|
};
|
||||||
|
|
||||||
use axum::extract::State;
|
use axum::extract::State;
|
||||||
use axum_client_ip::InsecureClientIp;
|
use axum_client_ip::InsecureClientIp;
|
||||||
use conduwuit::{
|
use conduwuit::{
|
||||||
Err, Error, Result, debug, debug_warn, err, error,
|
Err, Error, Result, debug, debug_warn, err, error,
|
||||||
result::LogErr,
|
result::LogErr,
|
||||||
|
state_res::lexicographical_topological_sort,
|
||||||
trace,
|
trace,
|
||||||
utils::{
|
utils::{
|
||||||
IterStream, ReadyExt, millis_since_unix_epoch,
|
IterStream, ReadyExt, millis_since_unix_epoch,
|
||||||
stream::{BroadbandExt, TryBroadbandExt, automatic_width},
|
stream::{BroadbandExt, TryBroadbandExt, automatic_width},
|
||||||
},
|
},
|
||||||
warn,
|
|
||||||
};
|
};
|
||||||
use conduwuit_service::{
|
use conduwuit_service::{
|
||||||
Services,
|
Services,
|
||||||
sending::{EDU_LIMIT, PDU_LIMIT},
|
sending::{EDU_LIMIT, PDU_LIMIT},
|
||||||
};
|
};
|
||||||
use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt};
|
use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt};
|
||||||
|
use http::StatusCode;
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
CanonicalJsonObject, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, ServerName, UserId,
|
CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, OwnedUserId,
|
||||||
|
RoomId, ServerName, UserId,
|
||||||
api::{
|
api::{
|
||||||
client::error::ErrorKind,
|
client::error::{ErrorKind, ErrorKind::LimitExceeded},
|
||||||
federation::transactions::{
|
federation::transactions::{
|
||||||
edu::{
|
edu::{
|
||||||
DeviceListUpdateContent, DirectDeviceContent, Edu, PresenceContent,
|
DeviceListUpdateContent, DirectDeviceContent, Edu, PresenceContent,
|
||||||
|
|
@ -32,9 +38,16 @@ use ruma::{
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
events::receipt::{ReceiptEvent, ReceiptEventContent, ReceiptType},
|
events::receipt::{ReceiptEvent, ReceiptEventContent, ReceiptType},
|
||||||
|
int,
|
||||||
serde::Raw,
|
serde::Raw,
|
||||||
to_device::DeviceIdOrAllDevices,
|
to_device::DeviceIdOrAllDevices,
|
||||||
|
uint,
|
||||||
};
|
};
|
||||||
|
use service::transactions::{
|
||||||
|
FederationTxnState, TransactionError, TxnKey, WrappedTransactionResponse,
|
||||||
|
};
|
||||||
|
use tokio::sync::watch::{Receiver, Sender};
|
||||||
|
use tracing::instrument;
|
||||||
|
|
||||||
use crate::Ruma;
|
use crate::Ruma;
|
||||||
|
|
||||||
|
|
@ -44,15 +57,6 @@ type Pdu = (OwnedRoomId, OwnedEventId, CanonicalJsonObject);
|
||||||
/// # `PUT /_matrix/federation/v1/send/{txnId}`
|
/// # `PUT /_matrix/federation/v1/send/{txnId}`
|
||||||
///
|
///
|
||||||
/// Push EDUs and PDUs to this server.
|
/// Push EDUs and PDUs to this server.
|
||||||
#[tracing::instrument(
|
|
||||||
name = "txn",
|
|
||||||
level = "debug",
|
|
||||||
skip_all,
|
|
||||||
fields(
|
|
||||||
%client,
|
|
||||||
origin = body.origin().as_str()
|
|
||||||
),
|
|
||||||
)]
|
|
||||||
pub(crate) async fn send_transaction_message_route(
|
pub(crate) async fn send_transaction_message_route(
|
||||||
State(services): State<crate::State>,
|
State(services): State<crate::State>,
|
||||||
InsecureClientIp(client): InsecureClientIp,
|
InsecureClientIp(client): InsecureClientIp,
|
||||||
|
|
@ -76,16 +80,73 @@ pub(crate) async fn send_transaction_message_route(
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
|
|
||||||
let txn_start_time = Instant::now();
|
let txn_key = (body.origin().to_owned(), body.transaction_id.clone());
|
||||||
trace!(
|
|
||||||
pdus = body.pdus.len(),
|
|
||||||
edus = body.edus.len(),
|
|
||||||
elapsed = ?txn_start_time.elapsed(),
|
|
||||||
id = %body.transaction_id,
|
|
||||||
origin = %body.origin(),
|
|
||||||
"Starting txn",
|
|
||||||
);
|
|
||||||
|
|
||||||
|
// Atomically check cache, join active, or start new transaction
|
||||||
|
match services
|
||||||
|
.transactions
|
||||||
|
.get_or_start_federation_txn(txn_key.clone())?
|
||||||
|
{
|
||||||
|
| FederationTxnState::Cached(response) => {
|
||||||
|
// Already responded
|
||||||
|
Ok(response)
|
||||||
|
},
|
||||||
|
| FederationTxnState::Active(receiver) => {
|
||||||
|
// Another thread is processing
|
||||||
|
wait_for_result(receiver).await
|
||||||
|
},
|
||||||
|
| FederationTxnState::Started { receiver, sender } => {
|
||||||
|
// We're the first, spawn the processing task
|
||||||
|
services
|
||||||
|
.server
|
||||||
|
.runtime()
|
||||||
|
.spawn(process_inbound_transaction(services, body, client, txn_key, sender));
|
||||||
|
// and wait for it
|
||||||
|
wait_for_result(receiver).await
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn wait_for_result(
|
||||||
|
mut recv: Receiver<WrappedTransactionResponse>,
|
||||||
|
) -> Result<send_transaction_message::v1::Response> {
|
||||||
|
if tokio::time::timeout(Duration::from_secs(50), recv.changed())
|
||||||
|
.await
|
||||||
|
.is_err()
|
||||||
|
{
|
||||||
|
// Took too long, return 429 to encourage the sender to try again
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
LimitExceeded { retry_after: None },
|
||||||
|
"Transaction is being still being processed. Please try again later.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
let value = recv.borrow_and_update();
|
||||||
|
match value.clone() {
|
||||||
|
| Some(Ok(response)) => Ok(response),
|
||||||
|
| Some(Err(err)) => Err(transaction_error_to_response(&err)),
|
||||||
|
| None => Err(Error::Request(
|
||||||
|
ErrorKind::Unknown,
|
||||||
|
"Transaction processing failed unexpectedly".into(),
|
||||||
|
StatusCode::INTERNAL_SERVER_ERROR,
|
||||||
|
)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(
|
||||||
|
skip_all,
|
||||||
|
fields(
|
||||||
|
id = ?body.transaction_id.as_str(),
|
||||||
|
origin = ?body.origin()
|
||||||
|
)
|
||||||
|
)]
|
||||||
|
async fn process_inbound_transaction(
|
||||||
|
services: crate::State,
|
||||||
|
body: Ruma<send_transaction_message::v1::Request>,
|
||||||
|
client: IpAddr,
|
||||||
|
txn_key: TxnKey,
|
||||||
|
sender: Sender<WrappedTransactionResponse>,
|
||||||
|
) {
|
||||||
|
let txn_start_time = Instant::now();
|
||||||
let pdus = body
|
let pdus = body
|
||||||
.pdus
|
.pdus
|
||||||
.iter()
|
.iter()
|
||||||
|
|
@ -102,40 +163,79 @@ pub(crate) async fn send_transaction_message_route(
|
||||||
.filter_map(Result::ok)
|
.filter_map(Result::ok)
|
||||||
.stream();
|
.stream();
|
||||||
|
|
||||||
let results = handle(&services, &client, body.origin(), txn_start_time, pdus, edus).await?;
|
debug!(pdus = body.pdus.len(), edus = body.edus.len(), "Processing transaction",);
|
||||||
|
let results = match handle(&services, &client, body.origin(), pdus, edus).await {
|
||||||
|
| Ok(results) => results,
|
||||||
|
| Err(err) => {
|
||||||
|
fail_federation_txn(services, &txn_key, &sender, err);
|
||||||
|
return;
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
for (id, result) in &results {
|
||||||
|
if let Err(e) = result {
|
||||||
|
if matches!(e, Error::BadRequest(ErrorKind::NotFound, _)) {
|
||||||
|
debug_warn!("Incoming PDU failed {id}: {e:?}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
pdus = body.pdus.len(),
|
pdus = body.pdus.len(),
|
||||||
edus = body.edus.len(),
|
edus = body.edus.len(),
|
||||||
elapsed = ?txn_start_time.elapsed(),
|
elapsed = ?txn_start_time.elapsed(),
|
||||||
id = %body.transaction_id,
|
"Finished processing transaction"
|
||||||
origin = %body.origin(),
|
|
||||||
"Finished txn",
|
|
||||||
);
|
);
|
||||||
for (id, result) in &results {
|
|
||||||
if let Err(e) = result {
|
|
||||||
if matches!(e, Error::BadRequest(ErrorKind::NotFound, _)) {
|
|
||||||
warn!("Incoming PDU failed {id}: {e:?}");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(send_transaction_message::v1::Response {
|
let response = send_transaction_message::v1::Response {
|
||||||
pdus: results
|
pdus: results
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(e, r)| (e, r.map_err(error::sanitized_message)))
|
.map(|(e, r)| (e, r.map_err(error::sanitized_message)))
|
||||||
.collect(),
|
.collect(),
|
||||||
})
|
};
|
||||||
|
|
||||||
|
services
|
||||||
|
.transactions
|
||||||
|
.finish_federation_txn(txn_key, sender, response);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Handles a failed federation transaction by sending the error through
|
||||||
|
/// the channel and cleaning up the transaction state. This allows waiters to
|
||||||
|
/// receive an appropriate error response.
|
||||||
|
fn fail_federation_txn(
|
||||||
|
services: crate::State,
|
||||||
|
txn_key: &TxnKey,
|
||||||
|
sender: &Sender<WrappedTransactionResponse>,
|
||||||
|
err: TransactionError,
|
||||||
|
) {
|
||||||
|
debug!("Transaction failed: {err}");
|
||||||
|
|
||||||
|
// Remove from active state so the transaction can be retried
|
||||||
|
services.transactions.remove_federation_txn(txn_key);
|
||||||
|
|
||||||
|
// Send the error to any waiters
|
||||||
|
if let Err(e) = sender.send(Some(Err(err))) {
|
||||||
|
debug_warn!("Failed to send transaction error to receivers: {e}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Converts a TransactionError into an appropriate HTTP error response.
|
||||||
|
fn transaction_error_to_response(err: &TransactionError) -> Error {
|
||||||
|
match err {
|
||||||
|
| TransactionError::ShuttingDown => Error::Request(
|
||||||
|
ErrorKind::Unknown,
|
||||||
|
"Server is shutting down, please retry later".into(),
|
||||||
|
StatusCode::SERVICE_UNAVAILABLE,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
async fn handle(
|
async fn handle(
|
||||||
services: &Services,
|
services: &Services,
|
||||||
client: &IpAddr,
|
client: &IpAddr,
|
||||||
origin: &ServerName,
|
origin: &ServerName,
|
||||||
started: Instant,
|
|
||||||
pdus: impl Stream<Item = Pdu> + Send,
|
pdus: impl Stream<Item = Pdu> + Send,
|
||||||
edus: impl Stream<Item = Edu> + Send,
|
edus: impl Stream<Item = Edu> + Send,
|
||||||
) -> Result<ResolvedMap> {
|
) -> std::result::Result<ResolvedMap, TransactionError> {
|
||||||
// group pdus by room
|
// group pdus by room
|
||||||
let pdus = pdus
|
let pdus = pdus
|
||||||
.collect()
|
.collect()
|
||||||
|
|
@ -152,7 +252,7 @@ async fn handle(
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.try_stream()
|
.try_stream()
|
||||||
.broad_and_then(|(room_id, pdus): (_, Vec<_>)| {
|
.broad_and_then(|(room_id, pdus): (_, Vec<_>)| {
|
||||||
handle_room(services, client, origin, started, room_id, pdus.into_iter())
|
handle_room(services, client, origin, room_id, pdus.into_iter())
|
||||||
.map_ok(Vec::into_iter)
|
.map_ok(Vec::into_iter)
|
||||||
.map_ok(IterStream::try_stream)
|
.map_ok(IterStream::try_stream)
|
||||||
})
|
})
|
||||||
|
|
@ -169,14 +269,51 @@ async fn handle(
|
||||||
Ok(results)
|
Ok(results)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Attempts to build a localised directed acyclic graph out of the given PDUs,
|
||||||
|
/// returning them in a topologically sorted order.
|
||||||
|
///
|
||||||
|
/// This is used to attempt to process PDUs in an order that respects their
|
||||||
|
/// dependencies, however it is ultimately the sender's responsibility to send
|
||||||
|
/// them in a processable order, so this is just a best effort attempt. It does
|
||||||
|
/// not account for power levels or other tie breaks.
|
||||||
|
async fn build_local_dag(
|
||||||
|
pdu_map: &HashMap<OwnedEventId, CanonicalJsonObject>,
|
||||||
|
) -> Result<Vec<OwnedEventId>> {
|
||||||
|
debug_assert!(pdu_map.len() >= 2, "needless call to build_local_dag with less than 2 PDUs");
|
||||||
|
let mut dag: HashMap<OwnedEventId, HashSet<OwnedEventId>> = HashMap::new();
|
||||||
|
|
||||||
|
for (event_id, value) in pdu_map {
|
||||||
|
let prev_events = value
|
||||||
|
.get("prev_events")
|
||||||
|
.expect("pdu must have prev_events")
|
||||||
|
.as_array()
|
||||||
|
.expect("prev_events must be an array")
|
||||||
|
.iter()
|
||||||
|
.map(|v| {
|
||||||
|
OwnedEventId::parse(v.as_str().expect("prev_events values must be strings"))
|
||||||
|
.expect("prev_events must be valid event IDs")
|
||||||
|
})
|
||||||
|
.collect::<HashSet<OwnedEventId>>();
|
||||||
|
|
||||||
|
dag.insert(event_id.clone(), prev_events);
|
||||||
|
}
|
||||||
|
lexicographical_topological_sort(&dag, &|_| async {
|
||||||
|
// Note: we don't bother fetching power levels because that would massively slow
|
||||||
|
// this function down. This is a best-effort attempt to order events correctly
|
||||||
|
// for processing, however ultimately that should be the sender's job.
|
||||||
|
Ok((int!(0), MilliSecondsSinceUnixEpoch(uint!(0))))
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.map_err(|e| err!("failed to resolve local graph: {e}"))
|
||||||
|
}
|
||||||
|
|
||||||
async fn handle_room(
|
async fn handle_room(
|
||||||
services: &Services,
|
services: &Services,
|
||||||
_client: &IpAddr,
|
_client: &IpAddr,
|
||||||
origin: &ServerName,
|
origin: &ServerName,
|
||||||
txn_start_time: Instant,
|
|
||||||
room_id: OwnedRoomId,
|
room_id: OwnedRoomId,
|
||||||
pdus: impl Iterator<Item = Pdu> + Send,
|
pdus: impl Iterator<Item = Pdu> + Send,
|
||||||
) -> Result<Vec<(OwnedEventId, Result)>> {
|
) -> std::result::Result<Vec<(OwnedEventId, Result)>, TransactionError> {
|
||||||
let _room_lock = services
|
let _room_lock = services
|
||||||
.rooms
|
.rooms
|
||||||
.event_handler
|
.event_handler
|
||||||
|
|
@ -185,27 +322,40 @@ async fn handle_room(
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
let room_id = &room_id;
|
let room_id = &room_id;
|
||||||
pdus.try_stream()
|
let pdu_map: HashMap<OwnedEventId, CanonicalJsonObject> = pdus
|
||||||
.and_then(|(_, event_id, value)| async move {
|
.into_iter()
|
||||||
services.server.check_running()?;
|
.map(|(_, event_id, value)| (event_id, value))
|
||||||
let pdu_start_time = Instant::now();
|
.collect();
|
||||||
let result = services
|
// Try to sort PDUs by their dependencies, but fall back to arbitrary order on
|
||||||
.rooms
|
// failure (e.g., cycles). This is best-effort; proper ordering is the sender's
|
||||||
.event_handler
|
// responsibility.
|
||||||
.handle_incoming_pdu(origin, room_id, &event_id, value, true)
|
let sorted_event_ids = if pdu_map.len() >= 2 {
|
||||||
.await
|
build_local_dag(&pdu_map).await.unwrap_or_else(|e| {
|
||||||
.map(|_| ());
|
debug_warn!("Failed to build local DAG for room {room_id}: {e}");
|
||||||
|
pdu_map.keys().cloned().collect()
|
||||||
debug!(
|
|
||||||
pdu_elapsed = ?pdu_start_time.elapsed(),
|
|
||||||
txn_elapsed = ?txn_start_time.elapsed(),
|
|
||||||
"Finished PDU {event_id}",
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok((event_id, result))
|
|
||||||
})
|
})
|
||||||
.try_collect()
|
} else {
|
||||||
.await
|
pdu_map.keys().cloned().collect()
|
||||||
|
};
|
||||||
|
let mut results = Vec::with_capacity(sorted_event_ids.len());
|
||||||
|
for event_id in sorted_event_ids {
|
||||||
|
let value = pdu_map
|
||||||
|
.get(&event_id)
|
||||||
|
.expect("sorted event IDs must be from the original map")
|
||||||
|
.clone();
|
||||||
|
services
|
||||||
|
.server
|
||||||
|
.check_running()
|
||||||
|
.map_err(|_| TransactionError::ShuttingDown)?;
|
||||||
|
let result = services
|
||||||
|
.rooms
|
||||||
|
.event_handler
|
||||||
|
.handle_incoming_pdu(origin, room_id, &event_id, value, true)
|
||||||
|
.await
|
||||||
|
.map(|_| ());
|
||||||
|
results.push((event_id, result));
|
||||||
|
}
|
||||||
|
Ok(results)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_edu(services: &Services, client: &IpAddr, origin: &ServerName, edu: Edu) {
|
async fn handle_edu(services: &Services, client: &IpAddr, origin: &ServerName, edu: Edu) {
|
||||||
|
|
@ -478,8 +628,8 @@ async fn handle_edu_direct_to_device(
|
||||||
|
|
||||||
// Check if this is a new transaction id
|
// Check if this is a new transaction id
|
||||||
if services
|
if services
|
||||||
.transaction_ids
|
.transactions
|
||||||
.existing_txnid(sender, None, message_id)
|
.get_client_txn(sender, None, message_id)
|
||||||
.await
|
.await
|
||||||
.is_ok()
|
.is_ok()
|
||||||
{
|
{
|
||||||
|
|
@ -498,8 +648,8 @@ async fn handle_edu_direct_to_device(
|
||||||
|
|
||||||
// Save transaction id with empty data
|
// Save transaction id with empty data
|
||||||
services
|
services
|
||||||
.transaction_ids
|
.transactions
|
||||||
.add_txnid(sender, None, message_id, &[]);
|
.add_client_txnid(sender, None, message_id, &[]);
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_edu_direct_to_device_user<Event: Send + Sync>(
|
async fn handle_edu_direct_to_device_user<Event: Send + Sync>(
|
||||||
|
|
|
||||||
|
|
@ -86,6 +86,7 @@ libloading.optional = true
|
||||||
log.workspace = true
|
log.workspace = true
|
||||||
num-traits.workspace = true
|
num-traits.workspace = true
|
||||||
rand.workspace = true
|
rand.workspace = true
|
||||||
|
rand_core = { version = "0.6.4", features = ["getrandom"] }
|
||||||
regex.workspace = true
|
regex.workspace = true
|
||||||
reqwest.workspace = true
|
reqwest.workspace = true
|
||||||
ring.workspace = true
|
ring.workspace = true
|
||||||
|
|
|
||||||
|
|
@ -174,6 +174,7 @@ pub fn check(config: &Config) -> Result {
|
||||||
if config.allow_registration
|
if config.allow_registration
|
||||||
&& config.yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse
|
&& config.yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse
|
||||||
&& config.registration_token.is_none()
|
&& config.registration_token.is_none()
|
||||||
|
&& config.registration_token_file.is_none()
|
||||||
{
|
{
|
||||||
warn!(
|
warn!(
|
||||||
"Open registration is enabled via setting \
|
"Open registration is enabled via setting \
|
||||||
|
|
|
||||||
|
|
@ -68,6 +68,10 @@ pub struct Config {
|
||||||
///
|
///
|
||||||
/// Also see the `[global.well_known]` config section at the very bottom.
|
/// Also see the `[global.well_known]` config section at the very bottom.
|
||||||
///
|
///
|
||||||
|
/// If `client` is not set under `[global.well_known]`, the server name will
|
||||||
|
/// be used as the base domain for user-facing links (such as password
|
||||||
|
/// reset links) created by Continuwuity.
|
||||||
|
///
|
||||||
/// Examples of delegation:
|
/// Examples of delegation:
|
||||||
/// - https://continuwuity.org/.well-known/matrix/server
|
/// - https://continuwuity.org/.well-known/matrix/server
|
||||||
/// - https://continuwuity.org/.well-known/matrix/client
|
/// - https://continuwuity.org/.well-known/matrix/client
|
||||||
|
|
@ -368,6 +372,31 @@ pub struct Config {
|
||||||
#[serde(default = "default_max_fetch_prev_events")]
|
#[serde(default = "default_max_fetch_prev_events")]
|
||||||
pub max_fetch_prev_events: u16,
|
pub max_fetch_prev_events: u16,
|
||||||
|
|
||||||
|
/// How many incoming federation transactions the server is willing to be
|
||||||
|
/// processing at any given time before it becomes overloaded and starts
|
||||||
|
/// rejecting further transactions until some slots become available.
|
||||||
|
///
|
||||||
|
/// Setting this value too low or too high may result in unstable
|
||||||
|
/// federation, and setting it too high may cause runaway resource usage.
|
||||||
|
///
|
||||||
|
/// default: 150
|
||||||
|
#[serde(default = "default_max_concurrent_inbound_transactions")]
|
||||||
|
pub max_concurrent_inbound_transactions: usize,
|
||||||
|
|
||||||
|
/// Maximum age (in seconds) for cached federation transaction responses.
|
||||||
|
/// Entries older than this will be removed during cleanup.
|
||||||
|
///
|
||||||
|
/// default: 7200 (2 hours)
|
||||||
|
#[serde(default = "default_transaction_id_cache_max_age_secs")]
|
||||||
|
pub transaction_id_cache_max_age_secs: u64,
|
||||||
|
|
||||||
|
/// Maximum number of cached federation transaction responses.
|
||||||
|
/// When the cache exceeds this limit, older entries will be removed.
|
||||||
|
///
|
||||||
|
/// default: 8192
|
||||||
|
#[serde(default = "default_transaction_id_cache_max_entries")]
|
||||||
|
pub transaction_id_cache_max_entries: usize,
|
||||||
|
|
||||||
/// Default/base connection timeout (seconds). This is used only by URL
|
/// Default/base connection timeout (seconds). This is used only by URL
|
||||||
/// previews and update/news endpoint checks.
|
/// previews and update/news endpoint checks.
|
||||||
///
|
///
|
||||||
|
|
@ -578,25 +607,47 @@ pub struct Config {
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub suspend_on_register: bool,
|
pub suspend_on_register: bool,
|
||||||
|
|
||||||
|
/// Server-wide default for space permission cascading (power levels and
|
||||||
|
/// role-based access). Individual Spaces can override this via the
|
||||||
|
/// `com.continuwuity.space.cascading` state event or the admin command
|
||||||
|
/// `!admin space roles enable/disable <space>`.
|
||||||
|
///
|
||||||
|
/// default: false
|
||||||
|
#[serde(default)]
|
||||||
|
pub space_permission_cascading: bool,
|
||||||
|
|
||||||
|
/// Maximum number of spaces to cache role data for. When exceeded the
|
||||||
|
/// cache is cleared and repopulated on demand.
|
||||||
|
///
|
||||||
|
/// default: 1000
|
||||||
|
#[serde(default = "default_space_roles_cache_flush_threshold")]
|
||||||
|
pub space_roles_cache_flush_threshold: u32,
|
||||||
|
|
||||||
/// Enabling this setting opens registration to anyone without restrictions.
|
/// Enabling this setting opens registration to anyone without restrictions.
|
||||||
/// This makes your server vulnerable to abuse
|
/// This makes your server vulnerable to abuse
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse: bool,
|
pub yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse: bool,
|
||||||
|
|
||||||
/// A static registration token that new users will have to provide when
|
/// A static registration token that new users will have to provide when
|
||||||
/// creating an account. If unset and `allow_registration` is true,
|
/// creating an account. This token does not supersede tokens from other
|
||||||
/// you must set
|
/// sources, such as the `!admin token` command or the
|
||||||
/// `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse`
|
/// `registration_token_file` configuration option.
|
||||||
/// to true to allow open registration without any conditions.
|
|
||||||
///
|
|
||||||
/// If you do not want to set a static token, the `!admin token` commands
|
|
||||||
/// may also be used to manage registration tokens.
|
|
||||||
///
|
///
|
||||||
/// example: "o&^uCtes4HPf0Vu@F20jQeeWE7"
|
/// example: "o&^uCtes4HPf0Vu@F20jQeeWE7"
|
||||||
///
|
///
|
||||||
/// display: sensitive
|
/// display: sensitive
|
||||||
pub registration_token: Option<String>,
|
pub registration_token: Option<String>,
|
||||||
|
|
||||||
|
/// A path to a file containing static registration tokens, one per line.
|
||||||
|
/// Tokens in this file do not supersede tokens from other sources, such as
|
||||||
|
/// the `!admin token` command or the `registration_token` configuration
|
||||||
|
/// option.
|
||||||
|
///
|
||||||
|
/// The file will be read once, when Continuwuity starts. It is not
|
||||||
|
/// currently reread when the server configuration is reloaded. If the file
|
||||||
|
/// cannot be read, Continuwuity will fail to start.
|
||||||
|
pub registration_token_file: Option<PathBuf>,
|
||||||
|
|
||||||
/// The public site key for reCaptcha. If this is provided, reCaptcha
|
/// The public site key for reCaptcha. If this is provided, reCaptcha
|
||||||
/// becomes required during registration. If both captcha *and*
|
/// becomes required during registration. If both captcha *and*
|
||||||
/// registration token are enabled, both will be required during
|
/// registration token are enabled, both will be required during
|
||||||
|
|
@ -653,12 +704,6 @@ pub struct Config {
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub allow_public_room_directory_over_federation: bool,
|
pub allow_public_room_directory_over_federation: bool,
|
||||||
|
|
||||||
/// Set this to true to allow your server's public room directory to be
|
|
||||||
/// queried without client authentication (access token) through the Client
|
|
||||||
/// APIs. Set this to false to protect against /publicRooms spiders.
|
|
||||||
#[serde(default)]
|
|
||||||
pub allow_public_room_directory_without_auth: bool,
|
|
||||||
|
|
||||||
/// Allow guests/unauthenticated users to access TURN credentials.
|
/// Allow guests/unauthenticated users to access TURN credentials.
|
||||||
///
|
///
|
||||||
/// This is the equivalent of Synapse's `turn_allow_guests` config option.
|
/// This is the equivalent of Synapse's `turn_allow_guests` config option.
|
||||||
|
|
@ -1244,12 +1289,6 @@ pub struct Config {
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub rocksdb_repair: bool,
|
pub rocksdb_repair: bool,
|
||||||
|
|
||||||
#[serde(default)]
|
|
||||||
pub rocksdb_read_only: bool,
|
|
||||||
|
|
||||||
#[serde(default)]
|
|
||||||
pub rocksdb_secondary: bool,
|
|
||||||
|
|
||||||
/// Enables idle CPU priority for compaction thread. This is not enabled by
|
/// Enables idle CPU priority for compaction thread. This is not enabled by
|
||||||
/// default to prevent compaction from falling too far behind on busy
|
/// default to prevent compaction from falling too far behind on busy
|
||||||
/// systems.
|
/// systems.
|
||||||
|
|
@ -1309,26 +1348,33 @@ pub struct Config {
|
||||||
|
|
||||||
/// Allow local (your server only) presence updates/requests.
|
/// Allow local (your server only) presence updates/requests.
|
||||||
///
|
///
|
||||||
/// Note that presence on continuwuity is very fast unlike Synapse's. If
|
/// Local presence must be enabled for outgoing presence to function.
|
||||||
/// using outgoing presence, this MUST be enabled.
|
///
|
||||||
|
/// Note that local presence is not as heavy on the CPU as federated
|
||||||
|
/// presence, but will still become more expensive the more local users you
|
||||||
|
/// have.
|
||||||
#[serde(default = "true_fn")]
|
#[serde(default = "true_fn")]
|
||||||
pub allow_local_presence: bool,
|
pub allow_local_presence: bool,
|
||||||
|
|
||||||
/// Allow incoming federated presence updates/requests.
|
/// Allow incoming federated presence updates.
|
||||||
///
|
///
|
||||||
/// This option receives presence updates from other servers, but does not
|
/// This option enables processing inbound presence updates from other
|
||||||
/// send any unless `allow_outgoing_presence` is true. Note that presence on
|
/// servers. Without it, remote users will appear as if they are always
|
||||||
/// continuwuity is very fast unlike Synapse's.
|
/// offline to your local users. This does not affect typing indicators or
|
||||||
|
/// read receipts.
|
||||||
#[serde(default = "true_fn")]
|
#[serde(default = "true_fn")]
|
||||||
pub allow_incoming_presence: bool,
|
pub allow_incoming_presence: bool,
|
||||||
|
|
||||||
/// Allow outgoing presence updates/requests.
|
/// Allow outgoing presence updates/requests.
|
||||||
///
|
///
|
||||||
/// This option sends presence updates to other servers, but does not
|
/// This option sends presence updates to other servers, and requires that
|
||||||
/// receive any unless `allow_incoming_presence` is true. Note that presence
|
/// `allow_local_presence` is also enabled.
|
||||||
/// on continuwuity is very fast unlike Synapse's. If using outgoing
|
///
|
||||||
/// presence, you MUST enable `allow_local_presence` as well.
|
/// Note that outgoing presence is very heavy on the CPU and network, and
|
||||||
#[serde(default = "true_fn")]
|
/// will typically cause extreme strain and slowdowns for no real benefit.
|
||||||
|
/// There are only a few clients that even implement presence, so you
|
||||||
|
/// probably don't want to enable this.
|
||||||
|
#[serde(default)]
|
||||||
pub allow_outgoing_presence: bool,
|
pub allow_outgoing_presence: bool,
|
||||||
|
|
||||||
/// How many seconds without presence updates before you become idle.
|
/// How many seconds without presence updates before you become idle.
|
||||||
|
|
@ -1366,6 +1412,10 @@ pub struct Config {
|
||||||
pub allow_incoming_read_receipts: bool,
|
pub allow_incoming_read_receipts: bool,
|
||||||
|
|
||||||
/// Allow sending read receipts to remote servers.
|
/// Allow sending read receipts to remote servers.
|
||||||
|
///
|
||||||
|
/// Note that sending read receipts to remote servers in large rooms with
|
||||||
|
/// lots of other homeservers may cause additional strain on the CPU and
|
||||||
|
/// network.
|
||||||
#[serde(default = "true_fn")]
|
#[serde(default = "true_fn")]
|
||||||
pub allow_outgoing_read_receipts: bool,
|
pub allow_outgoing_read_receipts: bool,
|
||||||
|
|
||||||
|
|
@ -1377,6 +1427,10 @@ pub struct Config {
|
||||||
pub allow_local_typing: bool,
|
pub allow_local_typing: bool,
|
||||||
|
|
||||||
/// Allow outgoing typing updates to federation.
|
/// Allow outgoing typing updates to federation.
|
||||||
|
///
|
||||||
|
/// Note that sending typing indicators to remote servers in large rooms
|
||||||
|
/// with lots of other homeservers may cause additional strain on the CPU
|
||||||
|
/// and network.
|
||||||
#[serde(default = "true_fn")]
|
#[serde(default = "true_fn")]
|
||||||
pub allow_outgoing_typing: bool,
|
pub allow_outgoing_typing: bool,
|
||||||
|
|
||||||
|
|
@ -1516,7 +1570,7 @@ pub struct Config {
|
||||||
/// sender user's server name, inbound federation X-Matrix origin, and
|
/// sender user's server name, inbound federation X-Matrix origin, and
|
||||||
/// outbound federation handler.
|
/// outbound federation handler.
|
||||||
///
|
///
|
||||||
/// You can set this to ["*"] to block all servers by default, and then
|
/// You can set this to [".*"] to block all servers by default, and then
|
||||||
/// use `allowed_remote_server_names` to allow only specific servers.
|
/// use `allowed_remote_server_names` to allow only specific servers.
|
||||||
///
|
///
|
||||||
/// example: ["badserver\\.tld$", "badphrase", "19dollarfortnitecards"]
|
/// example: ["badserver\\.tld$", "badphrase", "19dollarfortnitecards"]
|
||||||
|
|
@ -1701,6 +1755,11 @@ pub struct Config {
|
||||||
/// default: "continuwuity/<version> (bot; +https://continuwuity.org)"
|
/// default: "continuwuity/<version> (bot; +https://continuwuity.org)"
|
||||||
pub url_preview_user_agent: Option<String>,
|
pub url_preview_user_agent: Option<String>,
|
||||||
|
|
||||||
|
/// Determines whether audio and video files will be downloaded for URL
|
||||||
|
/// previews.
|
||||||
|
#[serde(default)]
|
||||||
|
pub url_preview_allow_audio_video: bool,
|
||||||
|
|
||||||
/// List of forbidden room aliases and room IDs as strings of regex
|
/// List of forbidden room aliases and room IDs as strings of regex
|
||||||
/// patterns.
|
/// patterns.
|
||||||
///
|
///
|
||||||
|
|
@ -2040,6 +2099,23 @@ pub struct Config {
|
||||||
pub allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure:
|
pub allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure:
|
||||||
bool,
|
bool,
|
||||||
|
|
||||||
|
/// Forcibly disables first-run mode.
|
||||||
|
///
|
||||||
|
/// This is intended to be used for Complement testing to allow the test
|
||||||
|
/// suite to register users, because first-run mode interferes with open
|
||||||
|
/// registration.
|
||||||
|
///
|
||||||
|
/// display: hidden
|
||||||
|
#[serde(default)]
|
||||||
|
pub force_disable_first_run_mode: bool,
|
||||||
|
|
||||||
|
/// Allow search engines and crawlers to index Continuwuity's built-in
|
||||||
|
/// webpages served under the `/_continuwuity/` prefix.
|
||||||
|
///
|
||||||
|
/// default: false
|
||||||
|
#[serde(default)]
|
||||||
|
pub allow_web_indexing: bool,
|
||||||
|
|
||||||
/// display: nested
|
/// display: nested
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub ldap: LdapConfig,
|
pub ldap: LdapConfig,
|
||||||
|
|
@ -2052,6 +2128,12 @@ pub struct Config {
|
||||||
/// display: nested
|
/// display: nested
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub blurhashing: BlurhashConfig,
|
pub blurhashing: BlurhashConfig,
|
||||||
|
|
||||||
|
/// Configuration for MatrixRTC (MSC4143) transport discovery.
|
||||||
|
/// display: nested
|
||||||
|
#[serde(default)]
|
||||||
|
pub matrix_rtc: MatrixRtcConfig,
|
||||||
|
|
||||||
#[serde(flatten)]
|
#[serde(flatten)]
|
||||||
#[allow(clippy::zero_sized_map_values)]
|
#[allow(clippy::zero_sized_map_values)]
|
||||||
// this is a catchall, the map shouldn't be zero at runtime
|
// this is a catchall, the map shouldn't be zero at runtime
|
||||||
|
|
@ -2117,17 +2199,16 @@ pub struct WellKnownConfig {
|
||||||
/// listed.
|
/// listed.
|
||||||
pub support_mxid: Option<OwnedUserId>,
|
pub support_mxid: Option<OwnedUserId>,
|
||||||
|
|
||||||
/// A list of MatrixRTC foci URLs which will be served as part of the
|
/// **DEPRECATED**: Use `[global.matrix_rtc].foci` instead.
|
||||||
/// MSC4143 client endpoint at /.well-known/matrix/client. If you're
|
|
||||||
/// setting up livekit, you'd want something like:
|
|
||||||
/// rtc_focus_server_urls = [
|
|
||||||
/// { type = "livekit", livekit_service_url = "https://livekit.example.com" },
|
|
||||||
/// ]
|
|
||||||
///
|
///
|
||||||
/// To disable, set this to be an empty vector (`[]`).
|
/// A list of MatrixRTC foci URLs which will be served as part of the
|
||||||
|
/// MSC4143 client endpoint at /.well-known/matrix/client.
|
||||||
|
///
|
||||||
|
/// This option is deprecated and will be removed in a future release.
|
||||||
|
/// Please migrate to the new `[global.matrix_rtc]` config section.
|
||||||
///
|
///
|
||||||
/// default: []
|
/// default: []
|
||||||
#[serde(default = "default_rtc_focus_urls")]
|
#[serde(default)]
|
||||||
pub rtc_focus_server_urls: Vec<RtcFocusInfo>,
|
pub rtc_focus_server_urls: Vec<RtcFocusInfo>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -2156,6 +2237,43 @@ pub struct BlurhashConfig {
|
||||||
pub blurhash_max_raw_size: u64,
|
pub blurhash_max_raw_size: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Deserialize, Default)]
|
||||||
|
#[config_example_generator(filename = "conduwuit-example.toml", section = "global.matrix_rtc")]
|
||||||
|
pub struct MatrixRtcConfig {
|
||||||
|
/// A list of MatrixRTC foci (transports) which will be served via the
|
||||||
|
/// MSC4143 RTC transports endpoint at
|
||||||
|
/// `/_matrix/client/v1/rtc/transports`. If you're setting up livekit,
|
||||||
|
/// you'd want something like:
|
||||||
|
/// ```toml
|
||||||
|
/// [global.matrix_rtc]
|
||||||
|
/// foci = [
|
||||||
|
/// { type = "livekit", livekit_service_url = "https://livekit.example.com" },
|
||||||
|
/// ]
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// To disable, set this to an empty list (`[]`).
|
||||||
|
///
|
||||||
|
/// default: []
|
||||||
|
#[serde(default)]
|
||||||
|
pub foci: Vec<RtcFocusInfo>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MatrixRtcConfig {
|
||||||
|
/// Returns the effective foci, falling back to the deprecated
|
||||||
|
/// `rtc_focus_server_urls` if the new config is empty.
|
||||||
|
#[must_use]
|
||||||
|
pub fn effective_foci<'a>(
|
||||||
|
&'a self,
|
||||||
|
deprecated_foci: &'a [RtcFocusInfo],
|
||||||
|
) -> &'a [RtcFocusInfo] {
|
||||||
|
if !self.foci.is_empty() {
|
||||||
|
&self.foci
|
||||||
|
} else {
|
||||||
|
deprecated_foci
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default, Deserialize)]
|
#[derive(Clone, Debug, Default, Deserialize)]
|
||||||
#[config_example_generator(filename = "conduwuit-example.toml", section = "global.ldap")]
|
#[config_example_generator(filename = "conduwuit-example.toml", section = "global.ldap")]
|
||||||
pub struct LdapConfig {
|
pub struct LdapConfig {
|
||||||
|
|
@ -2349,6 +2467,7 @@ const DEPRECATED_KEYS: &[&str] = &[
|
||||||
"well_known_support_email",
|
"well_known_support_email",
|
||||||
"well_known_support_mxid",
|
"well_known_support_mxid",
|
||||||
"registration_token_file",
|
"registration_token_file",
|
||||||
|
"well_known.rtc_focus_server_urls",
|
||||||
];
|
];
|
||||||
|
|
||||||
impl Config {
|
impl Config {
|
||||||
|
|
@ -2531,6 +2650,12 @@ fn default_pusher_idle_timeout() -> u64 { 15 }
|
||||||
|
|
||||||
fn default_max_fetch_prev_events() -> u16 { 192_u16 }
|
fn default_max_fetch_prev_events() -> u16 { 192_u16 }
|
||||||
|
|
||||||
|
fn default_max_concurrent_inbound_transactions() -> usize { 150 }
|
||||||
|
|
||||||
|
fn default_transaction_id_cache_max_age_secs() -> u64 { 60 * 60 * 2 }
|
||||||
|
|
||||||
|
fn default_transaction_id_cache_max_entries() -> usize { 8192 }
|
||||||
|
|
||||||
fn default_tracing_flame_filter() -> String {
|
fn default_tracing_flame_filter() -> String {
|
||||||
cfg!(debug_assertions)
|
cfg!(debug_assertions)
|
||||||
.then_some("trace,h2=off")
|
.then_some("trace,h2=off")
|
||||||
|
|
@ -2626,9 +2751,6 @@ fn default_rocksdb_stats_level() -> u8 { 1 }
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn default_default_room_version() -> RoomVersionId { RoomVersionId::V11 }
|
pub fn default_default_room_version() -> RoomVersionId { RoomVersionId::V11 }
|
||||||
|
|
||||||
#[must_use]
|
|
||||||
pub fn default_rtc_focus_urls() -> Vec<RtcFocusInfo> { vec![] }
|
|
||||||
|
|
||||||
fn default_ip_range_denylist() -> Vec<String> {
|
fn default_ip_range_denylist() -> Vec<String> {
|
||||||
vec![
|
vec![
|
||||||
"127.0.0.0/8".to_owned(),
|
"127.0.0.0/8".to_owned(),
|
||||||
|
|
@ -2731,3 +2853,5 @@ fn default_ldap_search_filter() -> String { "(objectClass=*)".to_owned() }
|
||||||
fn default_ldap_uid_attribute() -> String { String::from("uid") }
|
fn default_ldap_uid_attribute() -> String { String::from("uid") }
|
||||||
|
|
||||||
fn default_ldap_name_attribute() -> String { String::from("givenName") }
|
fn default_ldap_name_attribute() -> String { String::from("givenName") }
|
||||||
|
|
||||||
|
fn default_space_roles_cache_flush_threshold() -> u32 { 1000 }
|
||||||
|
|
|
||||||
|
|
@ -191,6 +191,7 @@ impl Error {
|
||||||
| Self::Reqwest(error) => error.status().unwrap_or(StatusCode::INTERNAL_SERVER_ERROR),
|
| Self::Reqwest(error) => error.status().unwrap_or(StatusCode::INTERNAL_SERVER_ERROR),
|
||||||
| Self::Conflict(_) => StatusCode::CONFLICT,
|
| Self::Conflict(_) => StatusCode::CONFLICT,
|
||||||
| Self::Io(error) => response::io_error_code(error.kind()),
|
| Self::Io(error) => response::io_error_code(error.kind()),
|
||||||
|
| Self::Uiaa(_) => StatusCode::UNAUTHORIZED,
|
||||||
| _ => StatusCode::INTERNAL_SERVER_ERROR,
|
| _ => StatusCode::INTERNAL_SERVER_ERROR,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue