Compare commits

..

1 commit

Author SHA1 Message Date
Renovate Bot
7b5a292bc2 fix(deps): update rust crate rand_core to 0.10.0 2026-03-06 05:06:35 +00:00
97 changed files with 863 additions and 6743 deletions

View file

@ -44,7 +44,7 @@ runs:
- name: Login to builtin registry - name: Login to builtin registry
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }} if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
uses: docker/login-action@v4 uses: docker/login-action@v3
with: with:
registry: ${{ env.BUILTIN_REGISTRY }} registry: ${{ env.BUILTIN_REGISTRY }}
username: ${{ inputs.registry_user }} username: ${{ inputs.registry_user }}
@ -52,7 +52,7 @@ runs:
- name: Set up Docker Buildx - name: Set up Docker Buildx
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }} if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
uses: docker/setup-buildx-action@v4 uses: docker/setup-buildx-action@v3
with: with:
# Use persistent BuildKit if BUILDKIT_ENDPOINT is set (e.g. tcp://buildkit:8125) # Use persistent BuildKit if BUILDKIT_ENDPOINT is set (e.g. tcp://buildkit:8125)
driver: ${{ env.BUILDKIT_ENDPOINT != '' && 'remote' || 'docker-container' }} driver: ${{ env.BUILDKIT_ENDPOINT != '' && 'remote' || 'docker-container' }}
@ -61,7 +61,7 @@ runs:
- name: Extract metadata (tags) for Docker - name: Extract metadata (tags) for Docker
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }} if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
id: meta id: meta
uses: docker/metadata-action@v6 uses: docker/metadata-action@v5
with: with:
flavor: | flavor: |
latest=auto latest=auto

View file

@ -67,7 +67,7 @@ runs:
uses: ./.forgejo/actions/rust-toolchain uses: ./.forgejo/actions/rust-toolchain
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v4 uses: docker/setup-buildx-action@v3
with: with:
# Use persistent BuildKit if BUILDKIT_ENDPOINT is set (e.g. tcp://buildkit:8125) # Use persistent BuildKit if BUILDKIT_ENDPOINT is set (e.g. tcp://buildkit:8125)
driver: ${{ env.BUILDKIT_ENDPOINT != '' && 'remote' || 'docker-container' }} driver: ${{ env.BUILDKIT_ENDPOINT != '' && 'remote' || 'docker-container' }}
@ -79,7 +79,7 @@ runs:
- name: Login to builtin registry - name: Login to builtin registry
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }} if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
uses: docker/login-action@v4 uses: docker/login-action@v3
with: with:
registry: ${{ env.BUILTIN_REGISTRY }} registry: ${{ env.BUILTIN_REGISTRY }}
username: ${{ inputs.registry_user }} username: ${{ inputs.registry_user }}
@ -87,7 +87,7 @@ runs:
- name: Extract metadata (labels, annotations) for Docker - name: Extract metadata (labels, annotations) for Docker
id: meta id: meta
uses: docker/metadata-action@v6 uses: docker/metadata-action@v5
with: with:
images: ${{ inputs.images }} images: ${{ inputs.images }}
# default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509 # default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509
@ -152,7 +152,7 @@ runs:
- name: inject cache into docker - name: inject cache into docker
if: ${{ env.BUILDKIT_ENDPOINT == '' }} if: ${{ env.BUILDKIT_ENDPOINT == '' }}
uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.3.2 uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.3.0
with: with:
cache-map: | cache-map: |
{ {

View file

@ -62,6 +62,10 @@ sync:
target: registry.gitlab.com/continuwuity/continuwuity target: registry.gitlab.com/continuwuity/continuwuity
type: repository type: repository
<<: *tags-main <<: *tags-main
- source: *source
target: git.nexy7574.co.uk/mirrored/continuwuity
type: repository
<<: *tags-releases
- source: *source - source: *source
target: ghcr.io/continuwuity/continuwuity target: ghcr.io/continuwuity/continuwuity
type: repository type: repository

View file

@ -59,7 +59,7 @@ jobs:
registry_password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }} registry_password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
- name: Build and push Docker image by digest - name: Build and push Docker image by digest
id: build id: build
uses: docker/build-push-action@v7 uses: docker/build-push-action@v6
with: with:
context: . context: .
file: "docker/Dockerfile" file: "docker/Dockerfile"
@ -146,7 +146,7 @@ jobs:
registry_password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }} registry_password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
- name: Build and push max-perf Docker image by digest - name: Build and push max-perf Docker image by digest
id: build id: build
uses: docker/build-push-action@v7 uses: docker/build-push-action@v6
with: with:
context: . context: .
file: "docker/Dockerfile" file: "docker/Dockerfile"

View file

@ -43,7 +43,7 @@ jobs:
name: Renovate name: Renovate
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: container:
image: ghcr.io/renovatebot/renovate:43.59.4@sha256:f951508dea1e7d71cbe6deca298ab0a05488e7631229304813f630cc06010892 image: ghcr.io/renovatebot/renovate:42.70.2@sha256:3c2ac1b94fa92ef2fa4d1a0493f2c3ba564454720a32fdbcac2db2846ff1ee47
options: --tmpfs /tmp:exec options: --tmpfs /tmp:exec
steps: steps:
- name: Checkout - name: Checkout

View file

@ -23,7 +23,7 @@ jobs:
persist-credentials: true persist-credentials: true
token: ${{ secrets.FORGEJO_TOKEN }} token: ${{ secrets.FORGEJO_TOKEN }}
- uses: https://github.com/cachix/install-nix-action@19effe9fe722874e6d46dd7182e4b8b7a43c4a99 # v31.10.0 - uses: https://github.com/cachix/install-nix-action@4e002c8ec80594ecd40e759629461e26c8abed15 # v31.9.0
with: with:
nix_path: nixpkgs=channel:nixos-unstable nix_path: nixpkgs=channel:nixos-unstable

4
.github/FUNDING.yml vendored
View file

@ -1,4 +1,4 @@
github: [JadedBlueEyes, nexy7574, gingershaped] github: [JadedBlueEyes, nexy7574, gingershaped]
custom: custom:
- https://timedout.uk/donate.html - https://ko-fi.com/nexy7574
- https://jade.ellis.link/sponsors - https://ko-fi.com/JadedBlueEyes

808
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -12,7 +12,7 @@ license = "Apache-2.0"
# See also `rust-toolchain.toml` # See also `rust-toolchain.toml`
readme = "README.md" readme = "README.md"
repository = "https://forgejo.ellis.link/continuwuation/continuwuity" repository = "https://forgejo.ellis.link/continuwuation/continuwuity"
version = "0.5.7-alpha.1" version = "0.5.6"
[workspace.metadata.crane] [workspace.metadata.crane]
name = "conduwuit" name = "conduwuit"
@ -99,7 +99,7 @@ features = [
[workspace.dependencies.axum-extra] [workspace.dependencies.axum-extra]
version = "0.12.0" version = "0.12.0"
default-features = false default-features = false
features = ["typed-header", "tracing", "cookie"] features = ["typed-header", "tracing"]
[workspace.dependencies.axum-server] [workspace.dependencies.axum-server]
version = "0.7.2" version = "0.7.2"
@ -159,7 +159,7 @@ features = ["raw_value"]
# Used for appservice registration files # Used for appservice registration files
[workspace.dependencies.serde-saphyr] [workspace.dependencies.serde-saphyr]
version = "0.0.21" version = "0.0.19"
# Used to load forbidden room/user regex from config # Used to load forbidden room/user regex from config
[workspace.dependencies.serde_regex] [workspace.dependencies.serde_regex]
@ -969,6 +969,3 @@ needless_raw_string_hashes = "allow"
# TODO: Enable this lint & fix all instances # TODO: Enable this lint & fix all instances
collapsible_if = "allow" collapsible_if = "allow"
# TODO: break these apart
cognitive_complexity = "allow"

View file

@ -1 +0,0 @@
Added support for using an admin command to issue self-service password reset links.

View file

@ -1 +0,0 @@
Add Space permission cascading: power levels cascade from Spaces to child rooms, role-based room access with custom roles, continuous enforcement (auto-join/kick), and admin commands for role management. Server-wide default controlled by `space_permission_cascading` config flag (off by default), with per-Space overrides via `!admin space roles enable/disable <space>`.

View file

@ -1 +0,0 @@
Fixed corrupted appservice registrations causing the server to enter a crash loop. Contributed by @nex.

View file

@ -1 +0,0 @@
Add new config option to allow or disallow search engine indexing through a `<meta ../>` tag. Defaults to blocking indexing (`content="noindex"`). Contributed by @s1lv3r and @ginger.

View file

@ -25,10 +25,6 @@
# #
# Also see the `[global.well_known]` config section at the very bottom. # Also see the `[global.well_known]` config section at the very bottom.
# #
# If `client` is not set under `[global.well_known]`, the server name will
# be used as the base domain for user-facing links (such as password
# reset links) created by Continuwuity.
#
# Examples of delegation: # Examples of delegation:
# - https://continuwuity.org/.well-known/matrix/server # - https://continuwuity.org/.well-known/matrix/server
# - https://continuwuity.org/.well-known/matrix/client # - https://continuwuity.org/.well-known/matrix/client
@ -474,18 +470,6 @@
# #
#suspend_on_register = false #suspend_on_register = false
# Server-wide default for space permission cascading (power levels and
# role-based access). Individual Spaces can override this via the
# `com.continuwuity.space.cascading` state event or the admin command
# `!admin space roles enable/disable <space>`.
#
#space_permission_cascading = false
# Maximum number of spaces to cache role data for. When exceeded the
# cache is cleared and repopulated on demand.
#
#space_roles_cache_flush_threshold = 1000
# Enabling this setting opens registration to anyone without restrictions. # Enabling this setting opens registration to anyone without restrictions.
# This makes your server vulnerable to abuse # This makes your server vulnerable to abuse
# #
@ -1521,11 +1505,6 @@
# #
#url_preview_user_agent = "continuwuity/<version> (bot; +https://continuwuity.org)" #url_preview_user_agent = "continuwuity/<version> (bot; +https://continuwuity.org)"
# Determines whether audio and video files will be downloaded for URL
# previews.
#
#url_preview_allow_audio_video = false
# List of forbidden room aliases and room IDs as strings of regex # List of forbidden room aliases and room IDs as strings of regex
# patterns. # patterns.
# #
@ -1811,11 +1790,6 @@
# #
#config_reload_signal = true #config_reload_signal = true
# Allow search engines and crawlers to index Continuwuity's built-in
# webpages served under the `/_continuwuity/` prefix.
#
#allow_web_indexing = false
[global.tls] [global.tls]
# Path to a valid TLS certificate file. # Path to a valid TLS certificate file.

View file

@ -48,7 +48,7 @@ EOF
# Developer tool versions # Developer tool versions
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall # renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
ENV BINSTALL_VERSION=1.17.7 ENV BINSTALL_VERSION=1.17.6
# renovate: datasource=github-releases depName=psastras/sbom-rs # renovate: datasource=github-releases depName=psastras/sbom-rs
ENV CARGO_SBOM_VERSION=0.9.1 ENV CARGO_SBOM_VERSION=0.9.1
# renovate: datasource=crate depName=lddtree # renovate: datasource=crate depName=lddtree

View file

@ -18,7 +18,7 @@ RUN --mount=type=cache,target=/etc/apk/cache apk add \
# Developer tool versions # Developer tool versions
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall # renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
ENV BINSTALL_VERSION=1.17.7 ENV BINSTALL_VERSION=1.17.6
# renovate: datasource=github-releases depName=psastras/sbom-rs # renovate: datasource=github-releases depName=psastras/sbom-rs
ENV CARGO_SBOM_VERSION=0.9.1 ENV CARGO_SBOM_VERSION=0.9.1
# renovate: datasource=crate depName=lddtree # renovate: datasource=crate depName=lddtree

View file

@ -69,11 +69,6 @@
"label": "Configuration Reference", "label": "Configuration Reference",
"name": "/reference/config" "name": "/reference/config"
}, },
{
"type": "file",
"label": "Environment Variables",
"name": "/reference/environment-variables"
},
{ {
"type": "dir", "type": "dir",
"label": "Admin Command Reference", "label": "Admin Command Reference",

View file

@ -109,6 +109,9 @@ Restart Continuwuity and your reverse proxy. Once that's done, visit these route
{ {
"m.homeserver": { "m.homeserver": {
"base_url": "https://matrix.example.com/" "base_url": "https://matrix.example.com/"
},
"org.matrix.msc3575.proxy": {
"url": "https://matrix.example.com/"
} }
} }
``` ```

View file

@ -6,9 +6,9 @@ services:
### then you are ready to go. ### then you are ready to go.
image: forgejo.ellis.link/continuwuation/continuwuity:latest image: forgejo.ellis.link/continuwuation/continuwuity:latest
restart: unless-stopped restart: unless-stopped
command: /sbin/conduwuit
volumes: volumes:
- db:/var/lib/continuwuity - db:/var/lib/continuwuity
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.
#- ./continuwuity.toml:/etc/continuwuity.toml #- ./continuwuity.toml:/etc/continuwuity.toml
networks: networks:
- proxy - proxy

View file

@ -23,7 +23,6 @@ services:
### then you are ready to go. ### then you are ready to go.
image: forgejo.ellis.link/continuwuation/continuwuity:latest image: forgejo.ellis.link/continuwuation/continuwuity:latest
restart: unless-stopped restart: unless-stopped
command: /sbin/conduwuit
volumes: volumes:
- db:/var/lib/continuwuity - db:/var/lib/continuwuity
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's. - /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.

View file

@ -6,7 +6,6 @@ services:
### then you are ready to go. ### then you are ready to go.
image: forgejo.ellis.link/continuwuation/continuwuity:latest image: forgejo.ellis.link/continuwuation/continuwuity:latest
restart: unless-stopped restart: unless-stopped
command: /sbin/conduwuit
volumes: volumes:
- db:/var/lib/continuwuity - db:/var/lib/continuwuity
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's. - /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.

View file

@ -6,7 +6,6 @@ services:
### then you are ready to go. ### then you are ready to go.
image: forgejo.ellis.link/continuwuation/continuwuity:latest image: forgejo.ellis.link/continuwuation/continuwuity:latest
restart: unless-stopped restart: unless-stopped
command: /sbin/conduwuit
ports: ports:
- 8448:6167 - 8448:6167
volumes: volumes:

View file

@ -2,26 +2,28 @@
## Docker ## Docker
To run Continuwuity with Docker, you can either build the image yourself or pull To run Continuwuity with Docker, you can either build the image yourself or pull it
it from a registry. from a registry.
### Use a registry ### Use a registry
Available OCI images: OCI images for Continuwuity are available in the registries listed below.
| Registry | Image | Notes | | Registry | Image | Notes |
| ---------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | | --------------- | --------------------------------------------------------------- | -----------------------|
| Forgejo Registry | [forgejo.ellis.link/continuwuation/continuwuity:latest](https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity/latest) | Latest tagged image. | | Forgejo Registry| [forgejo.ellis.link/continuwuation/continuwuity:latest](https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity/latest) | Latest tagged image. |
| Forgejo Registry | [forgejo.ellis.link/continuwuation/continuwuity:main](https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity/main) | Main branch image. | | Forgejo Registry| [forgejo.ellis.link/continuwuation/continuwuity:main](https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity/main) | Main branch image. |
| Forgejo Registry | [forgejo.ellis.link/continuwuation/continuwuity:latest-maxperf](https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity/latest-maxperf) | [Performance optimised version.](./generic.mdx#performance-optimised-builds) | | Forgejo Registry| [forgejo.ellis.link/continuwuation/continuwuity:latest-maxperf](https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity/latest-maxperf) | [Performance optimised version.](./generic.mdx#performance-optimised-builds) |
| Forgejo Registry | [forgejo.ellis.link/continuwuation/continuwuity:main-maxperf](https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity/main-maxperf) | [Performance optimised version.](./generic.mdx#performance-optimised-builds) | | Forgejo Registry| [forgejo.ellis.link/continuwuation/continuwuity:main-maxperf](https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity/main-maxperf) | [Performance optimised version.](./generic.mdx#performance-optimised-builds) |
**Example:** Use
```bash ```bash
docker image pull forgejo.ellis.link/continuwuation/continuwuity:main-maxperf docker image pull $LINK
``` ```
to pull it to your machine.
#### Mirrors #### Mirrors
Images are mirrored to multiple locations automatically, on a schedule: Images are mirrored to multiple locations automatically, on a schedule:
@ -31,146 +33,39 @@ Images are mirrored to multiple locations automatically, on a schedule:
- `registry.gitlab.com/continuwuity/continuwuity` - `registry.gitlab.com/continuwuity/continuwuity`
- `git.nexy7574.co.uk/mirrored/continuwuity` (releases only, no `main`) - `git.nexy7574.co.uk/mirrored/continuwuity` (releases only, no `main`)
### Quick Run ### Run
Get a working Continuwuity server with an admin user in four steps: When you have the image, you can simply run it with
#### Prerequisites
Continuwuity requires HTTPS for Matrix federation. You'll need:
- A domain name pointing to your server
- A reverse proxy with SSL/TLS certificates (Traefik, Caddy, nginx, etc.)
See [Docker Compose](#docker-compose) for complete examples.
#### Environment Variables
- `CONTINUWUITY_SERVER_NAME` - Your Matrix server's domain name
- `CONTINUWUITY_DATABASE_PATH` - Where to store your database (must match the
volume mount)
- `CONTINUWUITY_ADDRESS` - Bind address (use `0.0.0.0` to listen on all
interfaces)
- `CONTINUWUITY_ALLOW_REGISTRATION` - Set to `false` to disable registration, or
use with `CONTINUWUITY_REGISTRATION_TOKEN` to require a token (see
[reference](../reference/environment-variables.mdx#registration--user-configuration)
for details)
See the
[Environment Variables Reference](../reference/environment-variables.mdx) for
more configuration options.
#### 1. Pull the image
```bash ```bash
docker pull forgejo.ellis.link/continuwuation/continuwuity:latest docker run -d -p 8448:6167 \
-v db:/var/lib/continuwuity/ \
-e CONTINUWUITY_SERVER_NAME="your.server.name" \
-e CONTINUWUITY_ALLOW_REGISTRATION=false \
--name continuwuity $LINK
``` ```
#### 2. Start the server with initial admin user or you can use [Docker Compose](#docker-compose).
```bash The `-d` flag lets the container run in detached mode. You may supply an
docker run -d \ optional `continuwuity.toml` config file, the example config can be found
-p 6167:6167 \ [here](../reference/config.mdx). You can pass in different env vars to
-v continuwuity_db:/var/lib/continuwuity \ change config values on the fly. You can even configure Continuwuity completely by
-e CONTINUWUITY_SERVER_NAME="matrix.example.com" \ using env vars. For an overview of possible values, please take a look at the
-e CONTINUWUITY_DATABASE_PATH="/var/lib/continuwuity" \ <a href="/examples/docker-compose.yml" target="_blank">`docker-compose.yml`</a> file.
-e CONTINUWUITY_ADDRESS="0.0.0.0" \
-e CONTINUWUITY_ALLOW_REGISTRATION="false" \
--name continuwuity \
forgejo.ellis.link/continuwuation/continuwuity:latest \
/sbin/conduwuit --execute "users create-user admin"
```
Replace `matrix.example.com` with your actual server name and `admin` with If you just want to test Continuwuity for a short time, you can use the `--rm`
your preferred username. flag, which cleans up everything related to your container after you stop
it.
#### 3. Get your admin password
```bash
docker logs continuwuity 2>&1 | grep "Created user"
```
You'll see output like:
```
Created user with user_id: @admin:matrix.example.com and password: `[auto-generated-password]`
```
#### 4. Configure your reverse proxy
Configure your reverse proxy to forward HTTPS traffic to Continuwuity. See
[Docker Compose](#docker-compose) for examples.
Once configured, log in with any Matrix client using `@admin:matrix.example.com`
and the generated password. You'll automatically be invited to the admin room
where you can manage your server.
### Docker Compose ### Docker Compose
Docker Compose is the recommended deployment method. These examples include If the `docker run` command is not suitable for you or your setup, you can also use one
reverse proxy configurations for Matrix federation. of the provided `docker-compose` files.
#### Matrix Federation Requirements Depending on your proxy setup, you can use one of the following files:
For Matrix federation to work, you need to serve `.well-known/matrix/client` and ### For existing Traefik setup
`.well-known/matrix/server` endpoints. You can achieve this either by:
1. **Using a well-known service** - The compose files below include an nginx
container to serve these files
2. **Using Continuwuity's built-in delegation** (easier for Traefik) - Configure
delegation files in your config, then proxy `/.well-known/matrix/*` to
Continuwuity
**Traefik example using built-in delegation:**
```yaml
labels:
traefik.http.routers.continuwuity.rule: >-
(Host(`matrix.example.com`) ||
(Host(`example.com`) && PathPrefix(`/.well-known/matrix`)))
```
This routes your Matrix domain and well-known paths to Continuwuity.
#### Creating Your First Admin User
Add the `--execute` command to create an admin user on first startup. In your
compose file, add under the `continuwuity` service:
```yaml
services:
continuwuity:
image: forgejo.ellis.link/continuwuation/continuwuity:latest
command: /sbin/conduwuit --execute "users create-user admin"
# ... rest of configuration
```
Then retrieve the auto-generated password:
```bash
docker compose logs continuwuity | grep "Created user"
```
#### Choose Your Reverse Proxy
Select the compose file that matches your setup:
:::note DNS Performance
Docker's default DNS resolver can cause performance issues with Matrix
federation. If you experience slow federation or DNS timeouts, you may need to
use your host's DNS resolver instead. Add this volume mount to the
`continuwuity` service:
```yaml
volumes:
- /etc/resolv.conf:/etc/resolv.conf:ro
```
See [Troubleshooting - DNS Issues](../troubleshooting.mdx#potential-dns-issues-when-using-docker)
for more details and alternative solutions.
:::
##### For existing Traefik setup
<details> <details>
<summary>docker-compose.for-traefik.yml</summary> <summary>docker-compose.for-traefik.yml</summary>
@ -181,7 +76,7 @@ for more details and alternative solutions.
</details> </details>
##### With Traefik included ### With Traefik included
<details> <details>
<summary>docker-compose.with-traefik.yml</summary> <summary>docker-compose.with-traefik.yml</summary>
@ -192,7 +87,7 @@ for more details and alternative solutions.
</details> </details>
##### With Caddy Docker Proxy ### With Caddy Docker Proxy
<details> <details>
<summary>docker-compose.with-caddy.yml</summary> <summary>docker-compose.with-caddy.yml</summary>
@ -203,15 +98,9 @@ Replace all `example.com` placeholders with your own domain.
``` ```
If you don't already have a network for Caddy to monitor, create one first:
```bash
docker network create caddy
```
</details> </details>
##### For other reverse proxies ### For other reverse proxies
<details> <details>
<summary>docker-compose.yml</summary> <summary>docker-compose.yml</summary>
@ -222,7 +111,7 @@ docker network create caddy
</details> </details>
##### Override file for customisation ### Override file
<details> <details>
<summary>docker-compose.override.yml</summary> <summary>docker-compose.override.yml</summary>
@ -233,24 +122,98 @@ docker network create caddy
</details> </details>
#### Starting Your Server When picking the Traefik-related compose file, rename it to
`docker-compose.yml`, and rename the override file to
`docker-compose.override.yml`. Edit the latter with the values you want for your
server.
1. Choose your compose file and rename it to `docker-compose.yml` When picking the `caddy-docker-proxy` compose file, it's important to first
2. If using the override file, rename it to `docker-compose.override.yml` and create the `caddy` network before spinning up the containers:
edit your values
3. Start the server: ```bash
docker network create caddy
```
After that, you can rename it to `docker-compose.yml` and spin up the
containers!
Additional info about deploying Continuwuity can be found [here](generic.mdx).
### Build
Official Continuwuity images are built using **Docker Buildx** and the Dockerfile found at [`docker/Dockerfile`][dockerfile-path]. This approach uses common Docker tooling and enables efficient multi-platform builds.
The resulting images are widely compatible with Docker and other container runtimes like Podman or containerd.
The images *do not contain a shell*. They contain only the Continuwuity binary, required libraries, TLS certificates, and metadata.
<details>
<summary>Click to view the Dockerfile</summary>
You can also <a href="https://forgejo.ellis.link/continuwuation/continuwuation/src/branch/main/docker/Dockerfile" target="_blank">view the Dockerfile on Forgejo</a>.
```dockerfile file="../../docker/Dockerfile"
```
</details>
To build an image locally using Docker Buildx, you can typically run a command like:
```bash
# Build for the current platform and load into the local Docker daemon
docker buildx build --load --tag continuwuity:latest -f docker/Dockerfile .
# Example: Build for specific platforms and push to a registry.
# docker buildx build --platform linux/amd64,linux/arm64 --tag registry.io/org/continuwuity:latest -f docker/Dockerfile . --push
# Example: Build binary optimised for the current CPU (standard release profile)
# docker buildx build --load \
# --tag continuwuity:latest \
# --build-arg TARGET_CPU=native \
# -f docker/Dockerfile .
# Example: Build maxperf variant (release-max-perf profile with LTO)
# Optimised for runtime performance and smaller binary size, but requires longer build time
# docker buildx build --load \
# --tag continuwuity:latest-maxperf \
# --build-arg TARGET_CPU=native \
# --build-arg RUST_PROFILE=release-max-perf \
# -f docker/Dockerfile .
```
Refer to the Docker Buildx documentation for more advanced build options.
[dockerfile-path]: https://forgejo.ellis.link/continuwuation/continuwuation/src/branch/main/docker/Dockerfile
### Run
If you have already built the image or want to use one from the registries, you
can start the container and everything else in the compose file in detached
mode with:
```bash ```bash
docker compose up -d docker compose up -d
``` ```
See the [generic deployment guide](generic.mdx) for more deployment options. > **Note:** Don't forget to modify and adjust the compose file to your needs.
### Building Custom Images ### Use Traefik as Proxy
For information on building your own Continuwuity Docker images, see the As a container user, you probably know about Traefik. It is an easy-to-use
[Building Docker Images](../development/index.mdx#building-docker-images) reverse proxy for making containerized apps and services available through the
section in the development documentation. web. With the Traefik-related docker-compose files provided above, it is equally easy
to deploy and use Continuwuity, with a small caveat. If you have already looked at
the files, you should have seen the `well-known` service, which is the
small caveat. Traefik is simply a proxy and load balancer and cannot
serve any kind of content. For Continuwuity to federate, we need to either
expose ports `443` and `8448` or serve two endpoints: `.well-known/matrix/client`
and `.well-known/matrix/server`.
With the service `well-known`, we use a single `nginx` container that serves
those two files.
Alternatively, you can use Continuwuity's built-in delegation file capability. Set up the delegation files in the configuration file, and then proxy paths under `/.well-known/matrix` to continuwuity. For example, the label ``traefik.http.routers.continuwuity.rule=(Host(`matrix.ellis.link`) || (Host(`ellis.link`) && PathPrefix(`/.well-known/matrix`)))`` does this for the domain `ellis.link`.
## Voice communication ## Voice communication

View file

@ -1,7 +1,7 @@
# Continuwuity for FreeBSD # Continuwuity for FreeBSD
Continuwuity doesn't provide official FreeBSD packages; however, a community-maintained set of packages is available on [Forgejo](https://forgejo.ellis.link/katie/continuwuity-bsd). Note that these are provided as standalone packages and are not part of a FreeBSD package repository (yet), so updates need to be downloaded and installed manually. Continuwuity currently does not provide FreeBSD builds or FreeBSD packaging. However, Continuwuity does build and work on FreeBSD using the system-provided RocksDB.
Please see the installation instructions in that repository. Direct any questions to its issue tracker or to [@katie:kat5.dev](https://matrix.to/#/@katie:kat5.dev). Contributions to get Continuwuity packaged for FreeBSD are welcome.
For general BSD support, please join our [Continuwuity BSD](https://matrix.to/#/%23bsd:continuwuity.org) community room. Please join our [Continuwuity BSD](https://matrix.to/#/%23bsd:continuwuity.org) community room.

View file

@ -39,7 +39,6 @@ spec:
- name: continuwuity - name: continuwuity
# use a sha hash <3 # use a sha hash <3
image: forgejo.ellis.link/continuwuation/continuwuity:latest image: forgejo.ellis.link/continuwuation/continuwuity:latest
command: ["/sbin/conduwuit"]
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
ports: ports:
- name: http - name: http

View file

@ -2,8 +2,7 @@
Information about developing the project. If you are only interested in using Information about developing the project. If you are only interested in using
it, you can safely ignore this page. If you plan on contributing, see the it, you can safely ignore this page. If you plan on contributing, see the
[contributor's guide](./contributing.mdx) and [contributor's guide](./contributing.mdx) and [code style guide](./code_style.mdx).
[code style guide](./code_style.mdx).
## Continuwuity project layout ## Continuwuity project layout
@ -13,98 +12,86 @@ members are under `src/`. The workspace definition is at the top level / root
`Cargo.toml`. `Cargo.toml`.
The crate names are generally self-explanatory: The crate names are generally self-explanatory:
- `admin` is the admin room - `admin` is the admin room
- `api` is the HTTP API, Matrix C-S and S-S endpoints, etc - `api` is the HTTP API, Matrix C-S and S-S endpoints, etc
- `core` is core Continuwuity functionality like config loading, error - `core` is core Continuwuity functionality like config loading, error definitions,
definitions, global utilities, logging infrastructure, etc global utilities, logging infrastructure, etc
- `database` is RocksDB methods, helpers, RocksDB config, and general database - `database` is RocksDB methods, helpers, RocksDB config, and general database definitions,
definitions, utilities, or functions utilities, or functions
- `macros` are Continuwuity Rust [macros][macros] like general helper macros, - `macros` are Continuwuity Rust [macros][macros] like general helper macros, logging
logging and error handling macros, and [syn][syn] and [procedural and error handling macros, and [syn][syn] and [procedural macros][proc-macro]
macros][proc-macro] used for admin room commands and others used for admin room commands and others
- `main` is the "primary" sub-crate. This is where the `main()` function lives, - `main` is the "primary" sub-crate. This is where the `main()` function lives,
tokio worker and async initialisation, Sentry initialisation, [clap][clap] tokio worker and async initialisation, Sentry initialisation, [clap][clap] init,
init, and signal handling. If you are adding new [Rust features][features], and signal handling. If you are adding new [Rust features][features], they *must*
they _must_ go here. go here.
- `router` is the webserver and request handling bits, using axum, tower, - `router` is the webserver and request handling bits, using axum, tower, tower-http,
tower-http, hyper, etc, and the [global server state][state] to access hyper, etc, and the [global server state][state] to access `services`.
`services`.
- `service` is the high-level database definitions and functions for data, - `service` is the high-level database definitions and functions for data,
outbound/sending code, and other business logic such as media fetching. outbound/sending code, and other business logic such as media fetching.
It is highly unlikely you will ever need to add a new workspace member, but if It is highly unlikely you will ever need to add a new workspace member, but
you truly find yourself needing to, we recommend reaching out to us in the if you truly find yourself needing to, we recommend reaching out to us in
Matrix room for discussions about it beforehand. the Matrix room for discussions about it beforehand.
The primary inspiration for this design was apart of hot reloadable development, The primary inspiration for this design was apart of hot reloadable development,
to support "Continuwuity as a library" where specific parts can simply be to support "Continuwuity as a library" where specific parts can simply be swapped out.
swapped out. There is evidence Conduit wanted to go this route too as `axum` is There is evidence Conduit wanted to go this route too as `axum` is technically an
technically an optional feature in Conduit, and can be compiled without the optional feature in Conduit, and can be compiled without the binary or axum library
binary or axum library for handling inbound web requests; but it was never for handling inbound web requests; but it was never completed or worked.
completed or worked.
See the Rust documentation on [Workspaces][workspaces] for general questions and See the Rust documentation on [Workspaces][workspaces] for general questions
information on Cargo workspaces. and information on Cargo workspaces.
## Adding compile-time [features][features] ## Adding compile-time [features][features]
If you'd like to add a compile-time feature, you must first define it in the If you'd like to add a compile-time feature, you must first define it in
`main` workspace crate located in `src/main/Cargo.toml`. The feature must enable the `main` workspace crate located in `src/main/Cargo.toml`. The feature must
a feature in the other workspace crate(s) you intend to use it in. Then the said enable a feature in the other workspace crate(s) you intend to use it in. Then
workspace crate(s) must define the feature there in its `Cargo.toml`. the said workspace crate(s) must define the feature there in its `Cargo.toml`.
So, if this is adding a feature to the API such as `woof`, you define the So, if this is adding a feature to the API such as `woof`, you define the feature
feature in the `api` crate's `Cargo.toml` as `woof = []`. The feature definition in the `api` crate's `Cargo.toml` as `woof = []`. The feature definition in `main`'s
in `main`'s `Cargo.toml` will be `woof = ["conduwuit-api/woof"]`. `Cargo.toml` will be `woof = ["conduwuit-api/woof"]`.
The rationale for this is due to Rust / Cargo not supporting ["workspace level The rationale for this is due to Rust / Cargo not supporting
features"][9], we must make a choice of; either scattering features all over the ["workspace level features"][9], we must make a choice of; either scattering
workspace crates, making it difficult for anyone to add or remove default features all over the workspace crates, making it difficult for anyone to add
features; or define all the features in one central workspace crate that or remove default features; or define all the features in one central workspace
propagate down/up to the other workspace crates. It is a Cargo pitfall, and we'd crate that propagate down/up to the other workspace crates. It is a Cargo pitfall,
like to see better developer UX in Rust's Workspaces. and we'd like to see better developer UX in Rust's Workspaces.
Additionally, the definition of one single place makes "feature collection" in Additionally, the definition of one single place makes "feature collection" in our
our Nix flake a million times easier instead of collecting and deduping them all Nix flake a million times easier instead of collecting and deduping them all from
from searching in all the workspace crates' `Cargo.toml`s. Though we wouldn't searching in all the workspace crates' `Cargo.toml`s. Though we wouldn't need to
need to do this if Rust supported workspace-level features to begin with. do this if Rust supported workspace-level features to begin with.
## List of forked dependencies ## List of forked dependencies
During Continuwuity (and prior projects) development, we have had to fork some During Continuwuity (and prior projects) development, we have had to fork some dependencies to support our use-cases.
dependencies to support our use-cases. These forks exist for various reasons These forks exist for various reasons including features that upstream projects won't accept,
including features that upstream projects won't accept, faster-paced faster-paced development, Continuwuity-specific usecases, or lack of time to upstream changes.
development, Continuwuity-specific usecases, or lack of time to upstream
changes.
All forked dependencies are maintained under the All forked dependencies are maintained under the [continuwuation organization on Forgejo](https://forgejo.ellis.link/continuwuation):
[continuwuation organization on Forgejo](https://forgejo.ellis.link/continuwuation):
- [ruwuma][continuwuation-ruwuma] - Fork of [ruma/ruma][ruma] with various - [ruwuma][continuwuation-ruwuma] - Fork of [ruma/ruma][ruma] with various performance improvements, more features and better client/server interop
performance improvements, more features and better client/server interop - [rocksdb][continuwuation-rocksdb] - Fork of [facebook/rocksdb][rocksdb] via [`@zaidoon1`][8] with liburing build fixes and GCC debug build fixes
- [rocksdb][continuwuation-rocksdb] - Fork of [facebook/rocksdb][rocksdb] via - [jemallocator][continuwuation-jemallocator] - Fork of [tikv/jemallocator][jemallocator] fixing musl builds, suspicious code,
[`@zaidoon1`][8] with liburing build fixes and GCC debug build fixes and adding support for redzones in Valgrind
- [jemallocator][continuwuation-jemallocator] - Fork of - [rustyline-async][continuwuation-rustyline-async] - Fork of [zyansheep/rustyline-async][rustyline-async] with tab completion callback
[tikv/jemallocator][jemallocator] fixing musl builds, suspicious code, and and `CTRL+\` signal quit event for Continuwuity console CLI
adding support for redzones in Valgrind - [rust-rocksdb][continuwuation-rust-rocksdb] - Fork of [rust-rocksdb/rust-rocksdb][rust-rocksdb] fixing musl build issues,
- [rustyline-async][continuwuation-rustyline-async] - Fork of removing unnecessary `gtest` include, and using our RocksDB and jemallocator forks
[zyansheep/rustyline-async][rustyline-async] with tab completion callback and - [tracing][continuwuation-tracing] - Fork of [tokio-rs/tracing][tracing] implementing `Clone` for `EnvFilter` to
`CTRL+\` signal quit event for Continuwuity console CLI support dynamically changing tracing environments
- [rust-rocksdb][continuwuation-rust-rocksdb] - Fork of
[rust-rocksdb/rust-rocksdb][rust-rocksdb] fixing musl build issues, removing
unnecessary `gtest` include, and using our RocksDB and jemallocator forks
- [tracing][continuwuation-tracing] - Fork of [tokio-rs/tracing][tracing]
implementing `Clone` for `EnvFilter` to support dynamically changing tracing
environments
## Debugging with `tokio-console` ## Debugging with `tokio-console`
[`tokio-console`][7] can be a useful tool for debugging and profiling. To make a [`tokio-console`][7] can be a useful tool for debugging and profiling. To make a
`tokio-console`-enabled build of Continuwuity, enable the `tokio_console` `tokio-console`-enabled build of Continuwuity, enable the `tokio_console` feature,
feature, disable the default `release_max_log_level` feature, and set the disable the default `release_max_log_level` feature, and set the `--cfg
`--cfg tokio_unstable` flag to enable experimental tokio APIs. A build might tokio_unstable` flag to enable experimental tokio APIs. A build might look like
look like this: this:
```bash ```bash
RUSTFLAGS="--cfg tokio_unstable" cargo +nightly build \ RUSTFLAGS="--cfg tokio_unstable" cargo +nightly build \
@ -113,84 +100,34 @@ RUSTFLAGS="--cfg tokio_unstable" cargo +nightly build \
--features=systemd,element_hacks,gzip_compression,brotli_compression,zstd_compression,tokio_console --features=systemd,element_hacks,gzip_compression,brotli_compression,zstd_compression,tokio_console
``` ```
You will also need to enable the `tokio_console` config option in Continuwuity You will also need to enable the `tokio_console` config option in Continuwuity when
when starting it. This was due to tokio-console causing gradual memory starting it. This was due to tokio-console causing gradual memory leak/usage
leak/usage if left enabled. if left enabled.
## Building Docker Images ## Building Docker Images
Official Continuwuity images are built using **Docker Buildx** and the To build a Docker image for Continuwuity, use the standard Docker build command:
Dockerfile found at [`docker/Dockerfile`][dockerfile-path].
The images are compatible with Docker and other container runtimes like Podman
or containerd.
The images _do not contain a shell_. They contain only the Continuwuity binary,
required libraries, TLS certificates, and metadata.
<details>
<summary>Click to view the Dockerfile</summary>
You can also
<a
href="<https://forgejo.ellis.link/continuwuation/continuwuation/src/branch/main/docker/Dockerfile>"
target="_blank"
>
view the Dockerfile on Forgejo
</a>
.
```dockerfile file="../../docker/Dockerfile"
```
</details>
### Building Locally
To build an image locally using Docker Buildx:
```bash ```bash
# Build for the current platform and load into the local Docker daemon docker build -f docker/Dockerfile .
docker buildx build --load --tag continuwuity:latest -f docker/Dockerfile .
# Example: Build for specific platforms and push to a registry
# docker buildx build --platform linux/amd64,linux/arm64 --tag registry.io/org/continuwuity:latest -f docker/Dockerfile . --push
# Example: Build binary optimised for the current CPU (standard release profile)
# docker buildx build --load \
# --tag continuwuity:latest \
# --build-arg TARGET_CPU=native \
# -f docker/Dockerfile .
# Example: Build maxperf variant (release-max-perf profile with LTO)
# docker buildx build --load \
# --tag continuwuity:latest-maxperf \
# --build-arg TARGET_CPU=native \
# --build-arg RUST_PROFILE=release-max-perf \
# -f docker/Dockerfile .
``` ```
Refer to the Docker Buildx documentation for more advanced build options. The image can be cross-compiled for different architectures.
[dockerfile-path]:
https://forgejo.ellis.link/continuwuation/continuwuation/src/branch/main/docker/Dockerfile
[continuwuation-ruwuma]: https://forgejo.ellis.link/continuwuation/ruwuma [continuwuation-ruwuma]: https://forgejo.ellis.link/continuwuation/ruwuma
[continuwuation-rocksdb]: https://forgejo.ellis.link/continuwuation/rocksdb [continuwuation-rocksdb]: https://forgejo.ellis.link/continuwuation/rocksdb
[continuwuation-jemallocator]: [continuwuation-jemallocator]: https://forgejo.ellis.link/continuwuation/jemallocator
https://forgejo.ellis.link/continuwuation/jemallocator [continuwuation-rustyline-async]: https://forgejo.ellis.link/continuwuation/rustyline-async
[continuwuation-rustyline-async]: [continuwuation-rust-rocksdb]: https://forgejo.ellis.link/continuwuation/rust-rocksdb
https://forgejo.ellis.link/continuwuation/rustyline-async
[continuwuation-rust-rocksdb]:
https://forgejo.ellis.link/continuwuation/rust-rocksdb
[continuwuation-tracing]: https://forgejo.ellis.link/continuwuation/tracing [continuwuation-tracing]: https://forgejo.ellis.link/continuwuation/tracing
[ruma]: https://github.com/ruma/ruma/ [ruma]: https://github.com/ruma/ruma/
[rocksdb]: https://github.com/facebook/rocksdb/ [rocksdb]: https://github.com/facebook/rocksdb/
[jemallocator]: https://github.com/tikv/jemallocator/ [jemallocator]: https://github.com/tikv/jemallocator/
[rustyline-async]: https://github.com/zyansheep/rustyline-async/ [rustyline-async]: https://github.com/zyansheep/rustyline-async/
[rust-rocksdb]: https://github.com/rust-rocksdb/rust-rocksdb/ [rust-rocksdb]: https://github.com/rust-rocksdb/rust-rocksdb/
[tracing]: https://github.com/tokio-rs/tracing/ [tracing]: https://github.com/tokio-rs/tracing/
[7]: https://docs.rs/tokio-console/latest/tokio_console/ [7]: https://docs.rs/tokio-console/latest/tokio_console/
[8]: https://github.com/zaidoon1/ [8]: https://github.com/zaidoon1/
[9]: https://github.com/rust-lang/cargo/issues/12162 [9]: https://github.com/rust-lang/cargo/issues/12162

View file

@ -1,226 +0,0 @@
# Space Permission Cascading — Design Document
**Date:** 2026-03-17
**Status:** Approved
## Overview
Server-side feature that allows user rights in a Space to cascade down to its
direct child rooms. Includes power level cascading and role-based room access
control. Enabled via a server-wide configuration flag, disabled by default.
## Requirements
1. Power levels defined in a Space cascade to all direct child rooms (Space
always wins over per-room overrides).
2. Admins can define custom roles in a Space and assign them to users.
3. Child rooms can require one or more roles for access.
4. Enforcement is continuous — role revocation auto-kicks users from rooms they
no longer qualify for.
5. Users are auto-joined to all qualifying child rooms when they join a Space or
receive a new role.
6. Cascading applies to direct parent Space only; no nested cascade through
sub-spaces.
7. Feature is toggled by a single server-wide config flag
(`space_permission_cascading`), off by default.
## Configuration
```toml
# conduwuit-example.toml
# Enable space permission cascading (power levels and role-based access).
# When enabled, power levels cascade from Spaces to child rooms and rooms
# can require roles for access. Applies to all Spaces on this server.
# Default: false
space_permission_cascading = false
```
## Custom State Events
All events live in the Space room.
### `m.space.roles` (state key: `""`)
Defines the available roles for the Space. Two default roles (`admin` and `mod`)
are created automatically when a Space is first encountered with the feature
enabled.
```json
{
"roles": {
"admin": {
"description": "Space administrator",
"power_level": 100
},
"mod": {
"description": "Space moderator",
"power_level": 50
},
"nsfw": {
"description": "Access to NSFW content"
},
"vip": {
"description": "VIP member"
}
}
}
```
- `description` (string, required): Human-readable description.
- `power_level` (integer, optional): If present, users with this role receive
this power level in all child rooms. When a user holds multiple roles with
power levels, the highest value wins.
### `m.space.role.member` (state key: user ID)
Assigns roles to a user within the Space.
```json
{
"roles": ["nsfw", "vip"]
}
```
### `m.space.role.room` (state key: room ID)
Declares which roles a child room requires. A user must hold **all** listed
roles to access the room.
```json
{
"required_roles": ["nsfw"]
}
```
## Enforcement Rules
All enforcement is skipped when `space_permission_cascading = false`.
### 1. Join gating
When a user attempts to join a room that is a direct child of a Space:
- Look up the room's `m.space.role.room` event in the parent Space.
- If the room has `required_roles`, check the user's `m.space.role.member`.
- Reject the join if the user is missing any required role.
### 2. Power level override
For every user in a child room of a Space:
- Look up their roles via `m.space.role.member` in the parent Space.
- For each role that has a `power_level`, take the highest value.
- Override the user's power level in the child room's `m.room.power_levels`.
- Reject attempts to manually set per-room power levels that conflict with
Space-granted levels.
### 3. Role revocation
When an `m.space.role.member` event is updated and a role is removed:
- Identify all child rooms that require the removed role.
- Auto-kick the user from rooms they no longer qualify for.
- Recalculate and update the user's power level in all child rooms.
### 4. Room requirement change
When an `m.space.role.room` event is updated with new requirements:
- Check all current members of the room.
- Auto-kick members who do not hold all newly required roles.
### 5. Auto-join on role grant
When an `m.space.role.member` event is updated and a role is added:
- Find all child rooms where the user now meets all required roles.
- Auto-join the user to qualifying rooms they are not already in.
This also applies when a user first joins the Space — they are auto-joined to
all child rooms they qualify for. Rooms with no role requirements auto-join all
Space members.
### 6. New child room
When a new `m.space.child` event is added to a Space:
- Auto-join all qualifying Space members to the new child room.
## Caching & Indexing
The source of truth is always the state events. The server maintains an
in-memory index for fast enforcement lookups, following the same patterns as the
existing `roomid_spacehierarchy_cache`.
### Index structures
| Index | Source event |
|------------------------------|------------------------|
| Space → roles defined | `m.space.roles` |
| Space → user → roles | `m.space.role.member` |
| Space → room → required roles| `m.space.role.room` |
| Room → parent Space | `m.space.child` (reverse lookup) |
The Space → child rooms mapping already exists.
### Cache invalidation triggers
| Event changed | Action |
|----------------------------|-----------------------------------------------------|
| `m.space.roles` | Refresh role definitions, revalidate all members |
| `m.space.role.member` | Refresh user's roles, trigger auto-join/kick |
| `m.space.role.room` | Refresh room requirements, trigger auto-join/kick |
| `m.space.child` added | Index new child, auto-join qualifying members |
| `m.space.child` removed | Remove from index (no auto-kick) |
| Server startup | Full rebuild from state events |
## Admin Room Commands
Roles are managed via the existing admin room interface, which sends the
appropriate state events under the hood and triggers enforcement.
```
!admin space roles list <space>
!admin space roles add <space> <role_name> [description] [power_level]
!admin space roles remove <space> <role_name>
!admin space roles assign <space> <user_id> <role_name>
!admin space roles revoke <space> <user_id> <role_name>
!admin space roles require <space> <room_id> <role_name>
!admin space roles unrequire <space> <room_id> <role_name>
!admin space roles user <space> <user_id>
!admin space roles room <space> <room_id>
```
## Architecture
**Approach:** Hybrid — state events for definition, database cache for
enforcement.
- State events are the source of truth and federate normally.
- The server maintains an in-memory cache/index for fast enforcement.
- Cache is invalidated on relevant state event changes and fully rebuilt on
startup.
- All enforcement hooks (join gating, PL override, auto-join, auto-kick) check
the feature flag first and no-op when disabled.
- Existing clients can manage roles via Developer Tools (custom state events).
The admin room commands provide a user-friendly interface.
## Scope
### In scope
- Server-wide feature flag
- Custom state events for role definition, assignment, and room requirements
- Power level cascading (Space always wins)
- Continuous enforcement (auto-join, auto-kick)
- Admin room commands
- In-memory caching with invalidation
- Default `admin` (PL 100) and `mod` (PL 50) roles
### Out of scope
- Client-side UI for role management
- Nested cascade through sub-spaces
- Per-space opt-in/opt-out (it is server-wide)
- Federation-specific logic beyond normal state event replication

File diff suppressed because it is too large Load diff

View file

@ -1 +1 @@
{"m.homeserver":{"base_url": "https://matrix.continuwuity.org"},"org.matrix.msc4143.rtc_foci":[{"type":"livekit","livekit_service_url":"https://livekit.ellis.link"}]} {"m.homeserver":{"base_url": "https://matrix.continuwuity.org"},"org.matrix.msc3575.proxy":{"url": "https://matrix.continuwuity.org"},"org.matrix.msc4143.rtc_foci":[{"type":"livekit","livekit_service_url":"https://livekit.ellis.link"}]}

View file

@ -4,11 +4,6 @@
"name": "config", "name": "config",
"label": "Configuration" "label": "Configuration"
}, },
{
"type": "file",
"name": "environment-variables",
"label": "Environment Variables"
},
{ {
"type": "file", "type": "file",
"name": "admin", "name": "admin",

View file

@ -1,281 +0,0 @@
# Environment Variables
Continuwuity can be configured entirely through environment variables, making it
ideal for containerised deployments and infrastructure-as-code scenarios.
This is a convenience reference and may not be exhaustive. The
[Configuration Reference](./config.mdx) is the primary source for all
configuration options.
## Prefix System
Continuwuity supports three environment variable prefixes for backwards
compatibility:
- `CONTINUWUITY_*` (current, recommended)
- `CONDUWUIT_*` (compatibility)
- `CONDUIT_*` (legacy)
All three prefixes work identically. Use double underscores (`__`) to represent
nested configuration sections from the TOML config.
**Examples:**
```bash
# Simple top-level config
CONTINUWUITY_SERVER_NAME="matrix.example.com"
CONTINUWUITY_PORT="8008"
# Nested config sections use double underscores
# This maps to [database] section in TOML
CONTINUWUITY_DATABASE__PATH="/var/lib/continuwuity"
# This maps to [tls] section in TOML
CONTINUWUITY_TLS__CERTS="/path/to/cert.pem"
```
## Configuration File Override
You can specify a custom configuration file path:
- `CONTINUWUITY_CONFIG` - Path to continuwuity.toml (current)
- `CONDUWUIT_CONFIG` - Path to config file (compatibility)
- `CONDUIT_CONFIG` - Path to config file (legacy)
## Essential Variables
These are the minimum variables needed for a working deployment:
| Variable | Description | Default |
| ---------------------------- | ---------------------------------- | ---------------------- |
| `CONTINUWUITY_SERVER_NAME` | Your Matrix server's domain name | Required |
| `CONTINUWUITY_DATABASE_PATH` | Path to RocksDB database directory | `/var/lib/conduwuit` |
| `CONTINUWUITY_ADDRESS` | IP address to bind to | `["127.0.0.1", "::1"]` |
| `CONTINUWUITY_PORT` | Port to listen on | `8008` |
## Network Configuration
| Variable | Description | Default |
| -------------------------------- | ----------------------------------------------- | ---------------------- |
| `CONTINUWUITY_ADDRESS` | Bind address (use `0.0.0.0` for all interfaces) | `["127.0.0.1", "::1"]` |
| `CONTINUWUITY_PORT` | HTTP port | `8008` |
| `CONTINUWUITY_UNIX_SOCKET_PATH` | UNIX socket path (alternative to TCP) | - |
| `CONTINUWUITY_UNIX_SOCKET_PERMS` | Socket permissions (octal) | `660` |
## Database Configuration
| Variable | Description | Default |
| ------------------------------------------ | --------------------------- | -------------------- |
| `CONTINUWUITY_DATABASE_PATH` | RocksDB data directory | `/var/lib/conduwuit` |
| `CONTINUWUITY_DATABASE_BACKUP_PATH` | Backup directory | - |
| `CONTINUWUITY_DATABASE_BACKUPS_TO_KEEP` | Number of backups to retain | `1` |
| `CONTINUWUITY_DB_CACHE_CAPACITY_MB` | Database read cache (MB) | - |
| `CONTINUWUITY_DB_WRITE_BUFFER_CAPACITY_MB` | Write cache (MB) | - |
## Cache Configuration
| Variable | Description |
| ---------------------------------------- | ------------------------ |
| `CONTINUWUITY_CACHE_CAPACITY_MODIFIER` | LRU cache multiplier |
| `CONTINUWUITY_PDU_CACHE_CAPACITY` | PDU cache entries |
| `CONTINUWUITY_AUTH_CHAIN_CACHE_CAPACITY` | Auth chain cache entries |
## DNS Configuration
Configure DNS resolution behaviour for federation and external requests.
| Variable | Description | Default |
| ------------------------------------ | ---------------------------- | -------- |
| `CONTINUWUITY_DNS_CACHE_ENTRIES` | Max DNS cache entries | `32768` |
| `CONTINUWUITY_DNS_MIN_TTL` | Minimum cache TTL (seconds) | `10800` |
| `CONTINUWUITY_DNS_MIN_TTL_NXDOMAIN` | NXDOMAIN cache TTL (seconds) | `259200` |
| `CONTINUWUITY_DNS_ATTEMPTS` | Retry attempts | - |
| `CONTINUWUITY_DNS_TIMEOUT` | Query timeout (seconds) | - |
| `CONTINUWUITY_DNS_TCP_FALLBACK` | Allow TCP fallback | - |
| `CONTINUWUITY_QUERY_ALL_NAMESERVERS` | Query all nameservers | - |
| `CONTINUWUITY_QUERY_OVER_TCP_ONLY` | TCP-only queries | - |
## Request Configuration
| Variable | Description |
| ------------------------------------ | ----------------------------- |
| `CONTINUWUITY_MAX_REQUEST_SIZE` | Max HTTP request size (bytes) |
| `CONTINUWUITY_REQUEST_CONN_TIMEOUT` | Connection timeout (seconds) |
| `CONTINUWUITY_REQUEST_TIMEOUT` | Overall request timeout |
| `CONTINUWUITY_REQUEST_TOTAL_TIMEOUT` | Total timeout |
| `CONTINUWUITY_REQUEST_IDLE_TIMEOUT` | Idle timeout |
| `CONTINUWUITY_REQUEST_IDLE_PER_HOST` | Idle connections per host |
## Federation Configuration
Control how your server federates with other Matrix servers.
| Variable | Description | Default |
| ---------------------------------------------- | ----------------------------- | ------- |
| `CONTINUWUITY_ALLOW_FEDERATION` | Enable federation | `true` |
| `CONTINUWUITY_FEDERATION_LOOPBACK` | Allow loopback federation | - |
| `CONTINUWUITY_FEDERATION_CONN_TIMEOUT` | Connection timeout | - |
| `CONTINUWUITY_FEDERATION_TIMEOUT` | Request timeout | - |
| `CONTINUWUITY_FEDERATION_IDLE_TIMEOUT` | Idle timeout | - |
| `CONTINUWUITY_FEDERATION_IDLE_PER_HOST` | Idle connections per host | - |
| `CONTINUWUITY_TRUSTED_SERVERS` | JSON array of trusted servers | - |
| `CONTINUWUITY_QUERY_TRUSTED_KEY_SERVERS_FIRST` | Query trusted first | - |
| `CONTINUWUITY_ONLY_QUERY_TRUSTED_KEY_SERVERS` | Only query trusted | - |
**Example:**
```bash
# Trust matrix.org for key verification
CONTINUWUITY_TRUSTED_SERVERS='["matrix.org"]'
```
## Registration & User Configuration
Control user registration and account creation behaviour.
| Variable | Description | Default |
| ------------------------------------------ | --------------------- | ------- |
| `CONTINUWUITY_ALLOW_REGISTRATION` | Enable registration | `true` |
| `CONTINUWUITY_REGISTRATION_TOKEN` | Token requirement | - |
| `CONTINUWUITY_SUSPEND_ON_REGISTER` | Suspend new accounts | - |
| `CONTINUWUITY_NEW_USER_DISPLAYNAME_SUFFIX` | Display name suffix | 🏳️‍⚧️ |
| `CONTINUWUITY_RECAPTCHA_SITE_KEY` | reCAPTCHA site key | - |
| `CONTINUWUITY_RECAPTCHA_PRIVATE_SITE_KEY` | reCAPTCHA private key | - |
**Example:**
```bash
# Disable open registration
CONTINUWUITY_ALLOW_REGISTRATION="false"
# Require a registration token
CONTINUWUITY_REGISTRATION_TOKEN="your_secret_token_here"
```
## Feature Configuration
| Variable | Description | Default |
| ---------------------------------------------------------- | -------------------------- | ------- |
| `CONTINUWUITY_ALLOW_ENCRYPTION` | Enable E2EE | `true` |
| `CONTINUWUITY_ALLOW_ROOM_CREATION` | Enable room creation | - |
| `CONTINUWUITY_ALLOW_UNSTABLE_ROOM_VERSIONS` | Allow unstable versions | - |
| `CONTINUWUITY_DEFAULT_ROOM_VERSION` | Default room version | `v11` |
| `CONTINUWUITY_REQUIRE_AUTH_FOR_PROFILE_REQUESTS` | Auth for profiles | - |
| `CONTINUWUITY_ALLOW_PUBLIC_ROOM_DIRECTORY_OVER_FEDERATION` | Federate directory | - |
| `CONTINUWUITY_ALLOW_PUBLIC_ROOM_DIRECTORY_WITHOUT_AUTH` | Unauth directory | - |
| `CONTINUWUITY_ALLOW_DEVICE_NAME_FEDERATION` | Device names in federation | - |
## TLS Configuration
Built-in TLS support is primarily for testing. **For production deployments,
especially when federating on the internet, use a reverse proxy** (Traefik,
Caddy, nginx) to handle TLS termination.
| Variable | Description |
| --------------------------------- | ------------------------- |
| `CONTINUWUITY_TLS__CERTS` | TLS certificate file path |
| `CONTINUWUITY_TLS__KEY` | TLS private key path |
| `CONTINUWUITY_TLS__DUAL_PROTOCOL` | Support TLS 1.2 + 1.3 |
**Example (testing only):**
```bash
CONTINUWUITY_TLS__CERTS="/etc/letsencrypt/live/matrix.example.com/fullchain.pem"
CONTINUWUITY_TLS__KEY="/etc/letsencrypt/live/matrix.example.com/privkey.pem"
```
## Logging Configuration
Control log output format and verbosity.
| Variable | Description | Default |
| ------------------------------ | ------------------ | ------- |
| `CONTINUWUITY_LOG` | Log filter level | - |
| `CONTINUWUITY_LOG_COLORS` | ANSI colours | `true` |
| `CONTINUWUITY_LOG_SPAN_EVENTS` | Log span events | `none` |
| `CONTINUWUITY_LOG_THREAD_IDS` | Include thread IDs | - |
**Examples:**
```bash
# Set log level to info
CONTINUWUITY_LOG="info"
# Enable debug logging for specific modules
CONTINUWUITY_LOG="warn,continuwuity::api=debug"
# Disable colours for log aggregation
CONTINUWUITY_LOG_COLORS="false"
```
## Observability Configuration
| Variable | Description |
| ---------------------------------------- | --------------------- |
| `CONTINUWUITY_ALLOW_OTLP` | Enable OpenTelemetry |
| `CONTINUWUITY_OTLP_FILTER` | OTLP filter level |
| `CONTINUWUITY_OTLP_PROTOCOL` | Protocol (http/grpc) |
| `CONTINUWUITY_TRACING_FLAME` | Enable flame graphs |
| `CONTINUWUITY_TRACING_FLAME_FILTER` | Flame graph filter |
| `CONTINUWUITY_TRACING_FLAME_OUTPUT_PATH` | Output directory |
| `CONTINUWUITY_SENTRY` | Enable Sentry |
| `CONTINUWUITY_SENTRY_ENDPOINT` | Sentry DSN |
| `CONTINUWUITY_SENTRY_SEND_SERVER_NAME` | Include server name |
| `CONTINUWUITY_SENTRY_TRACES_SAMPLE_RATE` | Sample rate (0.0-1.0) |
## Admin Configuration
Configure admin users and automated command execution.
| Variable | Description | Default |
| ------------------------------------------ | -------------------------------- | ----------------- |
| `CONTINUWUITY_ADMINS_LIST` | JSON array of admin user IDs | - |
| `CONTINUWUITY_ADMINS_FROM_ROOM` | Derive admins from room | - |
| `CONTINUWUITY_ADMIN_ESCAPE_COMMANDS` | Allow `\` prefix in public rooms | - |
| `CONTINUWUITY_ADMIN_CONSOLE_AUTOMATIC` | Auto-activate console | - |
| `CONTINUWUITY_ADMIN_EXECUTE` | JSON array of startup commands | - |
| `CONTINUWUITY_ADMIN_EXECUTE_ERRORS_IGNORE` | Ignore command errors | - |
| `CONTINUWUITY_ADMIN_SIGNAL_EXECUTE` | Commands on SIGUSR2 | - |
| `CONTINUWUITY_ADMIN_ROOM_TAG` | Admin room tag | `m.server_notice` |
**Examples:**
```bash
# Create admin user on startup
CONTINUWUITY_ADMIN_EXECUTE='["users create-user admin", "users make-user-admin admin"]'
# Specify admin users directly
CONTINUWUITY_ADMINS_LIST='["@alice:example.com", "@bob:example.com"]'
```
## Media & URL Preview Configuration
| Variable | Description |
| ---------------------------------------------------- | ------------------ |
| `CONTINUWUITY_URL_PREVIEW_BOUND_INTERFACE` | Bind interface |
| `CONTINUWUITY_URL_PREVIEW_DOMAIN_CONTAINS_ALLOWLIST` | Domain allowlist |
| `CONTINUWUITY_URL_PREVIEW_DOMAIN_EXPLICIT_ALLOWLIST` | Explicit allowlist |
| `CONTINUWUITY_URL_PREVIEW_DOMAIN_EXPLICIT_DENYLIST` | Explicit denylist |
| `CONTINUWUITY_URL_PREVIEW_MAX_SPIDER_SIZE` | Max fetch size |
| `CONTINUWUITY_URL_PREVIEW_TIMEOUT` | Fetch timeout |
| `CONTINUWUITY_IP_RANGE_DENYLIST` | IP range denylist |
## Tokio Runtime Configuration
These can be set as environment variables or CLI arguments:
| Variable | Description |
| ----------------------------------------- | -------------------------- |
| `TOKIO_WORKER_THREADS` | Worker thread count |
| `TOKIO_GLOBAL_QUEUE_INTERVAL` | Global queue interval |
| `TOKIO_EVENT_INTERVAL` | Event interval |
| `TOKIO_MAX_IO_EVENTS_PER_TICK` | Max I/O events per tick |
| `CONTINUWUITY_RUNTIME_HISTOGRAM_INTERVAL` | Histogram bucket size (μs) |
| `CONTINUWUITY_RUNTIME_HISTOGRAM_BUCKETS` | Bucket count |
| `CONTINUWUITY_RUNTIME_WORKER_AFFINITY` | Enable worker affinity |
## See Also
- [Configuration Reference](./config.mdx) - Complete TOML configuration
documentation
- [Admin Commands](./admin/) - Admin command reference

View file

@ -6,7 +6,7 @@ misconfigurations to cause issues, particularly with networking and permissions.
Please check that your issues are not due to problems with your Docker setup. Please check that your issues are not due to problems with your Docker setup.
::: :::
## Continuwuity issues ## Continuwuity and Matrix issues
### Slow joins to rooms ### Slow joins to rooms
@ -23,16 +23,6 @@ which is a longstanding bug with synchronizing room joins to clients. In this si
the bug caused your homeserver to forget to tell your client. **To fix this, clear your client's cache.** Both Element and Cinny the bug caused your homeserver to forget to tell your client. **To fix this, clear your client's cache.** Both Element and Cinny
have a button to clear their cache in the "About" section of their settings. have a button to clear their cache in the "About" section of their settings.
### Configuration not working as expected
Sometimes you can make a mistake in your configuration that
means things don't get passed to Continuwuity correctly.
This is particularly easy to do with environment variables.
To check what configuration Continuwuity actually sees, you can
use the `!admin server show-config` command in your admin room.
Beware that this prints out any secrets in your configuration,
so you might want to delete the result afterwards!
### Lost access to admin room ### Lost access to admin room
You can reinvite yourself to the admin room through the following methods: You can reinvite yourself to the admin room through the following methods:
@ -43,7 +33,17 @@ argument once to invite yourslf to the admin room on startup
- Or specify the `emergency_password` config option to allow you to temporarily - Or specify the `emergency_password` config option to allow you to temporarily
log into the server account (`@conduit`) from a web client log into the server account (`@conduit`) from a web client
## DNS issues ## General potential issues
### Configuration not working as expected
Sometimes you can make a mistake in your configuration that
means things don't get passed to Continuwuity correctly.
This is particularly easy to do with environment variables.
To check what configuration Continuwuity actually sees, you can
use the `!admin server show-config` command in your admin room.
Beware that this prints out any secrets in your configuration,
so you might want to delete the result afterwards!
### Potential DNS issues when using Docker ### Potential DNS issues when using Docker

54
flake.lock generated
View file

@ -3,11 +3,11 @@
"advisory-db": { "advisory-db": {
"flake": false, "flake": false,
"locked": { "locked": {
"lastModified": 1773786698, "lastModified": 1766324728,
"narHash": "sha256-o/J7ZculgwSs1L4H4UFlFZENOXTJzq1X0n71x6oNNvY=", "narHash": "sha256-9C+WyE5U3y5w4WQXxmb0ylRyMMsPyzxielWXSHrcDpE=",
"owner": "rustsec", "owner": "rustsec",
"repo": "advisory-db", "repo": "advisory-db",
"rev": "99e9de91bb8b61f06ef234ff84e11f758ecd5384", "rev": "c88b88c62bda077be8aa621d4e89d8701e39cb5d",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -18,11 +18,11 @@
}, },
"crane": { "crane": {
"locked": { "locked": {
"lastModified": 1773189535, "lastModified": 1766194365,
"narHash": "sha256-E1G/Or6MWeP+L6mpQ0iTFLpzSzlpGrITfU2220Gq47g=", "narHash": "sha256-4AFsUZ0kl6MXSm4BaQgItD0VGlEKR3iq7gIaL7TjBvc=",
"owner": "ipetkov", "owner": "ipetkov",
"repo": "crane", "repo": "crane",
"rev": "6fa2fb4cf4a89ba49fc9dd5a3eb6cde99d388269", "rev": "7d8ec2c71771937ab99790b45e6d9b93d15d9379",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -39,11 +39,11 @@
"rust-analyzer-src": "rust-analyzer-src" "rust-analyzer-src": "rust-analyzer-src"
}, },
"locked": { "locked": {
"lastModified": 1773732206, "lastModified": 1766299592,
"narHash": "sha256-HKibxaUXyWd4Hs+ZUnwo6XslvaFqFqJh66uL9tphU4Q=", "narHash": "sha256-7u+q5hexu2eAxL2VjhskHvaUKg+GexmelIR2ve9Nbb4=",
"owner": "nix-community", "owner": "nix-community",
"repo": "fenix", "repo": "fenix",
"rev": "0aa13c1b54063a8d8679b28a5cd357ba98f4a56b", "rev": "381579dee168d5ced412e2990e9637ecc7cf1c5d",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -55,11 +55,11 @@
"flake-compat": { "flake-compat": {
"flake": false, "flake": false,
"locked": { "locked": {
"lastModified": 1767039857, "lastModified": 1765121682,
"narHash": "sha256-vNpUSpF5Nuw8xvDLj2KCwwksIbjua2LZCqhV1LNRDns=", "narHash": "sha256-4VBOP18BFeiPkyhy9o4ssBNQEvfvv1kXkasAYd0+rrA=",
"owner": "edolstra", "owner": "edolstra",
"repo": "flake-compat", "repo": "flake-compat",
"rev": "5edf11c44bc78a0d334f6334cdaf7d60d732daab", "rev": "65f23138d8d09a92e30f1e5c87611b23ef451bf3",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -74,11 +74,11 @@
"nixpkgs-lib": "nixpkgs-lib" "nixpkgs-lib": "nixpkgs-lib"
}, },
"locked": { "locked": {
"lastModified": 1772408722, "lastModified": 1765835352,
"narHash": "sha256-rHuJtdcOjK7rAHpHphUb1iCvgkU3GpfvicLMwwnfMT0=", "narHash": "sha256-XswHlK/Qtjasvhd1nOa1e8MgZ8GS//jBoTqWtrS1Giw=",
"owner": "hercules-ci", "owner": "hercules-ci",
"repo": "flake-parts", "repo": "flake-parts",
"rev": "f20dc5d9b8027381c474144ecabc9034d6a839a3", "rev": "a34fae9c08a15ad73f295041fec82323541400a9",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -89,11 +89,11 @@
}, },
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1773734432, "lastModified": 1766070988,
"narHash": "sha256-IF5ppUWh6gHGHYDbtVUyhwy/i7D261P7fWD1bPefOsw=", "narHash": "sha256-G/WVghka6c4bAzMhTwT2vjLccg/awmHkdKSd2JrycLc=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "cda48547b432e8d3b18b4180ba07473762ec8558", "rev": "c6245e83d836d0433170a16eb185cefe0572f8b8",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -105,11 +105,11 @@
}, },
"nixpkgs-lib": { "nixpkgs-lib": {
"locked": { "locked": {
"lastModified": 1772328832, "lastModified": 1765674936,
"narHash": "sha256-e+/T/pmEkLP6BHhYjx6GmwP5ivonQQn0bJdH9YrRB+Q=", "narHash": "sha256-k00uTP4JNfmejrCLJOwdObYC9jHRrr/5M/a/8L2EIdo=",
"owner": "nix-community", "owner": "nix-community",
"repo": "nixpkgs.lib", "repo": "nixpkgs.lib",
"rev": "c185c7a5e5dd8f9add5b2f8ebeff00888b070742", "rev": "2075416fcb47225d9b68ac469a5c4801a9c4dd85",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -132,11 +132,11 @@
"rust-analyzer-src": { "rust-analyzer-src": {
"flake": false, "flake": false,
"locked": { "locked": {
"lastModified": 1773697963, "lastModified": 1766253897,
"narHash": "sha256-xdKI77It9PM6eNrCcDZsnP4SKulZwk8VkDgBRVMnCb8=", "narHash": "sha256-ChK07B1aOlJ4QzWXpJo+y8IGAxp1V9yQ2YloJ+RgHRw=",
"owner": "rust-lang", "owner": "rust-lang",
"repo": "rust-analyzer", "repo": "rust-analyzer",
"rev": "2993637174252ff60a582fd1f55b9ab52c39db6d", "rev": "765b7bdb432b3740f2d564afccfae831d5a972e4",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -153,11 +153,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1773297127, "lastModified": 1766000401,
"narHash": "sha256-6E/yhXP7Oy/NbXtf1ktzmU8SdVqJQ09HC/48ebEGBpk=", "narHash": "sha256-+cqN4PJz9y0JQXfAK5J1drd0U05D5fcAGhzhfVrDlsI=",
"owner": "numtide", "owner": "numtide",
"repo": "treefmt-nix", "repo": "treefmt-nix",
"rev": "71b125cd05fbfd78cab3e070b73544abe24c5016", "rev": "42d96e75aa56a3f70cab7e7dc4a32868db28e8fd",
"type": "github" "type": "github"
}, },
"original": { "original": {

405
package-lock.json generated
View file

@ -16,21 +16,21 @@
} }
}, },
"node_modules/@emnapi/core": { "node_modules/@emnapi/core": {
"version": "1.9.0", "version": "1.8.1",
"resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.9.0.tgz", "resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.8.1.tgz",
"integrity": "sha512-0DQ98G9ZQZOxfUcQn1waV2yS8aWdZ6kJMbYCJB3oUBecjWYO1fqJ+a1DRfPF3O5JEkwqwP1A9QEN/9mYm2Yd0w==", "integrity": "sha512-AvT9QFpxK0Zd8J0jopedNm+w/2fIzvtPKPjqyw9jwvBaReTTqPBk9Hixaz7KbjimP+QNz605/XnjFcDAL2pqBg==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"optional": true, "optional": true,
"dependencies": { "dependencies": {
"@emnapi/wasi-threads": "1.2.0", "@emnapi/wasi-threads": "1.1.0",
"tslib": "^2.4.0" "tslib": "^2.4.0"
} }
}, },
"node_modules/@emnapi/runtime": { "node_modules/@emnapi/runtime": {
"version": "1.9.0", "version": "1.8.1",
"resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.9.0.tgz", "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.8.1.tgz",
"integrity": "sha512-QN75eB0IH2ywSpRpNddCRfQIhmJYBCJ1x5Lb3IscKAL8bMnVAKnRg8dCoXbHzVLLH7P38N2Z3mtulB7W0J0FKw==", "integrity": "sha512-mehfKSMWjjNol8659Z8KxEMrdSJDDot5SXMq00dM8BN4o+CLNXQ0xH2V7EchNHV4RmbZLmmPdEaXZc5H2FXmDg==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"optional": true, "optional": true,
@ -39,9 +39,9 @@
} }
}, },
"node_modules/@emnapi/wasi-threads": { "node_modules/@emnapi/wasi-threads": {
"version": "1.2.0", "version": "1.1.0",
"resolved": "https://registry.npmjs.org/@emnapi/wasi-threads/-/wasi-threads-1.2.0.tgz", "resolved": "https://registry.npmjs.org/@emnapi/wasi-threads/-/wasi-threads-1.1.0.tgz",
"integrity": "sha512-N10dEJNSsUx41Z6pZsXU8FjPjpBEplgH24sfkmITrBED1/U2Esum9F3lfLrMjKHHjmi557zQn7kR9R+XWXu5Rg==", "integrity": "sha512-WI0DdZ8xFSbgMjR1sFsKABJ/C5OnRrjT06JXbZKexJGrDuPTzZdDYfFlsgcCXCyf+suG5QU2e/y1Wo2V/OapLQ==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"optional": true, "optional": true,
@ -144,22 +144,17 @@
} }
}, },
"node_modules/@rsbuild/plugin-react": { "node_modules/@rsbuild/plugin-react": {
"version": "1.4.6", "version": "1.4.5",
"resolved": "https://registry.npmjs.org/@rsbuild/plugin-react/-/plugin-react-1.4.6.tgz", "resolved": "https://registry.npmjs.org/@rsbuild/plugin-react/-/plugin-react-1.4.5.tgz",
"integrity": "sha512-LAT6xHlEyZKA0VjF/ph5d50iyG+WSmBx+7g98HNZUwb94VeeTMZFB8qVptTkbIRMss3BNKOXmHOu71Lhsh9oEw==", "integrity": "sha512-eS2sXCedgGA/7bLu8yVtn48eE/GyPbXx4Q7OcutB01IQ1D2y8WSMBys4nwfrecy19utvw4NPn4gYDy52316+vg==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@rspack/plugin-react-refresh": "^1.6.1", "@rspack/plugin-react-refresh": "^1.6.0",
"react-refresh": "^0.18.0" "react-refresh": "^0.18.0"
}, },
"peerDependencies": { "peerDependencies": {
"@rsbuild/core": "^1.0.0 || ^2.0.0-0" "@rsbuild/core": "^1.0.0 || ^2.0.0-0"
},
"peerDependenciesMeta": {
"@rsbuild/core": {
"optional": true
}
} }
}, },
"node_modules/@rspack/binding": { "node_modules/@rspack/binding": {
@ -347,9 +342,9 @@
} }
}, },
"node_modules/@rspack/plugin-react-refresh": { "node_modules/@rspack/plugin-react-refresh": {
"version": "1.6.1", "version": "1.6.0",
"resolved": "https://registry.npmjs.org/@rspack/plugin-react-refresh/-/plugin-react-refresh-1.6.1.tgz", "resolved": "https://registry.npmjs.org/@rspack/plugin-react-refresh/-/plugin-react-refresh-1.6.0.tgz",
"integrity": "sha512-eqqW5645VG3CzGzFgNg5HqNdHVXY+567PGjtDhhrM8t67caxmsSzRmT5qfoEIfBcGgFkH9vEg7kzXwmCYQdQDw==", "integrity": "sha512-OO53gkrte/Ty4iRXxxM6lkwPGxsSsupFKdrPFnjwFIYrPvFLjeolAl5cTx+FzO5hYygJiGnw7iEKTmD+ptxqDA==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
@ -367,9 +362,9 @@
} }
}, },
"node_modules/@rspress/core": { "node_modules/@rspress/core": {
"version": "2.0.5", "version": "2.0.4",
"resolved": "https://registry.npmjs.org/@rspress/core/-/core-2.0.5.tgz", "resolved": "https://registry.npmjs.org/@rspress/core/-/core-2.0.4.tgz",
"integrity": "sha512-2ezGmANmIrWmhsUrvlRb9Df4xsun1BDgEertDc890aQqtKcNrbu+TBRsOoO+E/N6ioavun7JGGe1wWjvxubCHw==", "integrity": "sha512-OdeGMY75OFzyRZvXuBEMre3q8Y4/OjYJa4vVBDp4Z2E65LSt8+hYkzzkarEl6sFWqbp8c1o9qfSUf4xMctmKvw==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
@ -377,7 +372,7 @@
"@mdx-js/react": "^3.1.1", "@mdx-js/react": "^3.1.1",
"@rsbuild/core": "2.0.0-beta.6", "@rsbuild/core": "2.0.0-beta.6",
"@rsbuild/plugin-react": "~1.4.5", "@rsbuild/plugin-react": "~1.4.5",
"@rspress/shared": "2.0.5", "@rspress/shared": "2.0.4",
"@shikijs/rehype": "^4.0.1", "@shikijs/rehype": "^4.0.1",
"@types/unist": "^3.0.3", "@types/unist": "^3.0.3",
"@unhead/react": "^2.1.9", "@unhead/react": "^2.1.9",
@ -404,8 +399,6 @@
"react-router-dom": "^7.13.1", "react-router-dom": "^7.13.1",
"rehype-external-links": "^3.0.0", "rehype-external-links": "^3.0.0",
"rehype-raw": "^7.0.0", "rehype-raw": "^7.0.0",
"remark-cjk-friendly": "^2.0.1",
"remark-cjk-friendly-gfm-strikethrough": "^2.0.1",
"remark-gfm": "^4.0.1", "remark-gfm": "^4.0.1",
"remark-mdx": "^3.1.1", "remark-mdx": "^3.1.1",
"remark-parse": "^11.0.0", "remark-parse": "^11.0.0",
@ -427,35 +420,35 @@
} }
}, },
"node_modules/@rspress/plugin-client-redirects": { "node_modules/@rspress/plugin-client-redirects": {
"version": "2.0.5", "version": "2.0.4",
"resolved": "https://registry.npmjs.org/@rspress/plugin-client-redirects/-/plugin-client-redirects-2.0.5.tgz", "resolved": "https://registry.npmjs.org/@rspress/plugin-client-redirects/-/plugin-client-redirects-2.0.4.tgz",
"integrity": "sha512-sxwWzwHPefSPWUyV6/AA/hBlQUeNFntL8dBQi/vZCQiZHM6ShvKbqa3s5Xu2yI7DeFKHH3jb0VGbjufu8M3Ypw==", "integrity": "sha512-cm7VNfisVCHe+YHNjd9YrWt6/WtJ5I/oNRyjt+tqCeOcC1IJSX2LhNXpNN5h9az3wxYn37kVctBUjzqkj2FQ+A==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"engines": { "engines": {
"node": "^20.19.0 || >=22.12.0" "node": "^20.19.0 || >=22.12.0"
}, },
"peerDependencies": { "peerDependencies": {
"@rspress/core": "^2.0.5" "@rspress/core": "^2.0.4"
} }
}, },
"node_modules/@rspress/plugin-sitemap": { "node_modules/@rspress/plugin-sitemap": {
"version": "2.0.5", "version": "2.0.4",
"resolved": "https://registry.npmjs.org/@rspress/plugin-sitemap/-/plugin-sitemap-2.0.5.tgz", "resolved": "https://registry.npmjs.org/@rspress/plugin-sitemap/-/plugin-sitemap-2.0.4.tgz",
"integrity": "sha512-wBxKL8sNd3bkKFxlFtB1xJ7jCtSRDL6pfVvWsmTIbTNDPCtefd1nmiMBIDMLOR8EflwuStIz3bMQXdWpbC7ahA==", "integrity": "sha512-TKaj3/8+P1fP3sD5NOaWVMXvRvJFQmuJQlUBxhRM0oiUHhzNNkVy/2YXkjYJuXuMhFPLnOWCjrYjTG3xcZE7Wg==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"engines": { "engines": {
"node": "^20.19.0 || >=22.12.0" "node": "^20.19.0 || >=22.12.0"
}, },
"peerDependencies": { "peerDependencies": {
"@rspress/core": "^2.0.5" "@rspress/core": "^2.0.4"
} }
}, },
"node_modules/@rspress/shared": { "node_modules/@rspress/shared": {
"version": "2.0.5", "version": "2.0.4",
"resolved": "https://registry.npmjs.org/@rspress/shared/-/shared-2.0.5.tgz", "resolved": "https://registry.npmjs.org/@rspress/shared/-/shared-2.0.4.tgz",
"integrity": "sha512-Wdhh+VjU8zJWoVLhv9KJTRAZQ4X2V/Z81Lo2D0hQsa0Kj5F3EaxlMt5/dhX7DoflqNuZPZk/e7CSUB+gO/Umlg==", "integrity": "sha512-os2nzsPgHKVFXjDoW7N53rmhLChCw/y2O2TGilT4w2A4HNJa2oJwRk0UryXbxxWD5C85HErTjovs2uBdhdOTtA==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
@ -467,14 +460,14 @@
} }
}, },
"node_modules/@shikijs/core": { "node_modules/@shikijs/core": {
"version": "4.0.2", "version": "4.0.1",
"resolved": "https://registry.npmjs.org/@shikijs/core/-/core-4.0.2.tgz", "resolved": "https://registry.npmjs.org/@shikijs/core/-/core-4.0.1.tgz",
"integrity": "sha512-hxT0YF4ExEqB8G/qFdtJvpmHXBYJ2lWW7qTHDarVkIudPFE6iCIrqdgWxGn5s+ppkGXI0aEGlibI0PAyzP3zlw==", "integrity": "sha512-vWvqi9JNgz1dRL9Nvog5wtx7RuNkf7MEPl2mU/cyUUxJeH1CAr3t+81h8zO8zs7DK6cKLMoU9TvukWIDjP4Lzg==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@shikijs/primitive": "4.0.2", "@shikijs/primitive": "4.0.1",
"@shikijs/types": "4.0.2", "@shikijs/types": "4.0.1",
"@shikijs/vscode-textmate": "^10.0.2", "@shikijs/vscode-textmate": "^10.0.2",
"@types/hast": "^3.0.4", "@types/hast": "^3.0.4",
"hast-util-to-html": "^9.0.5" "hast-util-to-html": "^9.0.5"
@ -484,13 +477,13 @@
} }
}, },
"node_modules/@shikijs/engine-javascript": { "node_modules/@shikijs/engine-javascript": {
"version": "4.0.2", "version": "4.0.1",
"resolved": "https://registry.npmjs.org/@shikijs/engine-javascript/-/engine-javascript-4.0.2.tgz", "resolved": "https://registry.npmjs.org/@shikijs/engine-javascript/-/engine-javascript-4.0.1.tgz",
"integrity": "sha512-7PW0Nm49DcoUIQEXlJhNNBHyoGMjalRETTCcjMqEaMoJRLljy1Bi/EGV3/qLBgLKQejdspiiYuHGQW6dX94Nag==", "integrity": "sha512-DJK9NiwtGYqMuKCRO4Ip0FKNDQpmaiS+K5bFjJ7DWFn4zHueDWgaUG8kAofkrnXF6zPPYYQY7J5FYVW9MbZyBg==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@shikijs/types": "4.0.2", "@shikijs/types": "4.0.1",
"@shikijs/vscode-textmate": "^10.0.2", "@shikijs/vscode-textmate": "^10.0.2",
"oniguruma-to-es": "^4.3.4" "oniguruma-to-es": "^4.3.4"
}, },
@ -499,13 +492,13 @@
} }
}, },
"node_modules/@shikijs/engine-oniguruma": { "node_modules/@shikijs/engine-oniguruma": {
"version": "4.0.2", "version": "4.0.1",
"resolved": "https://registry.npmjs.org/@shikijs/engine-oniguruma/-/engine-oniguruma-4.0.2.tgz", "resolved": "https://registry.npmjs.org/@shikijs/engine-oniguruma/-/engine-oniguruma-4.0.1.tgz",
"integrity": "sha512-UpCB9Y2sUKlS9z8juFSKz7ZtysmeXCgnRF0dlhXBkmQnek7lAToPte8DkxmEYGNTMii72zU/lyXiCB6StuZeJg==", "integrity": "sha512-oCWdCTDch3J8Kc0OZJ98KuUPC02O1VqIE3W/e2uvrHqTxYRR21RGEJMtchrgrxhsoJJCzmIciKsqG+q/yD+Cxg==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@shikijs/types": "4.0.2", "@shikijs/types": "4.0.1",
"@shikijs/vscode-textmate": "^10.0.2" "@shikijs/vscode-textmate": "^10.0.2"
}, },
"engines": { "engines": {
@ -513,26 +506,26 @@
} }
}, },
"node_modules/@shikijs/langs": { "node_modules/@shikijs/langs": {
"version": "4.0.2", "version": "4.0.1",
"resolved": "https://registry.npmjs.org/@shikijs/langs/-/langs-4.0.2.tgz", "resolved": "https://registry.npmjs.org/@shikijs/langs/-/langs-4.0.1.tgz",
"integrity": "sha512-KaXby5dvoeuZzN0rYQiPMjFoUrz4hgwIE+D6Du9owcHcl6/g16/yT5BQxSW5cGt2MZBz6Hl0YuRqf12omRfUUg==", "integrity": "sha512-v/mluaybWdnGJR4GqAR6zh8qAZohW9k+cGYT28Y7M8+jLbC0l4yG085O1A+WkseHTn+awd+P3UBymb2+MXFc8w==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@shikijs/types": "4.0.2" "@shikijs/types": "4.0.1"
}, },
"engines": { "engines": {
"node": ">=20" "node": ">=20"
} }
}, },
"node_modules/@shikijs/primitive": { "node_modules/@shikijs/primitive": {
"version": "4.0.2", "version": "4.0.1",
"resolved": "https://registry.npmjs.org/@shikijs/primitive/-/primitive-4.0.2.tgz", "resolved": "https://registry.npmjs.org/@shikijs/primitive/-/primitive-4.0.1.tgz",
"integrity": "sha512-M6UMPrSa3fN5ayeJwFVl9qWofl273wtK1VG8ySDZ1mQBfhCpdd8nEx7nPZ/tk7k+TYcpqBZzj/AnwxT9lO+HJw==", "integrity": "sha512-ns0hHZc5eWZuvuIEJz2pTx3Qecz0aRVYumVQJ8JgWY2tq/dH8WxdcVM49Fc2NsHEILNIT6vfdW9MF26RANWiTA==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@shikijs/types": "4.0.2", "@shikijs/types": "4.0.1",
"@shikijs/vscode-textmate": "^10.0.2", "@shikijs/vscode-textmate": "^10.0.2",
"@types/hast": "^3.0.4" "@types/hast": "^3.0.4"
}, },
@ -541,16 +534,16 @@
} }
}, },
"node_modules/@shikijs/rehype": { "node_modules/@shikijs/rehype": {
"version": "4.0.2", "version": "4.0.1",
"resolved": "https://registry.npmjs.org/@shikijs/rehype/-/rehype-4.0.2.tgz", "resolved": "https://registry.npmjs.org/@shikijs/rehype/-/rehype-4.0.1.tgz",
"integrity": "sha512-cmPlKLD8JeojasNFoY64162ScpEdEdQUMuVodPCrv1nx1z3bjmGwoKWDruQWa/ejSznImlaeB0Ty6Q3zPaVQAA==", "integrity": "sha512-bx7bYA0/p/pgeEICaPO0jT6TXrXHmr9tGRUDhOMy1cAUN2YA0iANfXX7seBnImy8DGu/rxm1ij9/ZofYrAaUjQ==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@shikijs/types": "4.0.2", "@shikijs/types": "4.0.1",
"@types/hast": "^3.0.4", "@types/hast": "^3.0.4",
"hast-util-to-string": "^3.0.1", "hast-util-to-string": "^3.0.1",
"shiki": "4.0.2", "shiki": "4.0.1",
"unified": "^11.0.5", "unified": "^11.0.5",
"unist-util-visit": "^5.1.0" "unist-util-visit": "^5.1.0"
}, },
@ -559,22 +552,22 @@
} }
}, },
"node_modules/@shikijs/themes": { "node_modules/@shikijs/themes": {
"version": "4.0.2", "version": "4.0.1",
"resolved": "https://registry.npmjs.org/@shikijs/themes/-/themes-4.0.2.tgz", "resolved": "https://registry.npmjs.org/@shikijs/themes/-/themes-4.0.1.tgz",
"integrity": "sha512-mjCafwt8lJJaVSsQvNVrJumbnnj1RI8jbUKrPKgE6E3OvQKxnuRoBaYC51H4IGHePsGN/QtALglWBU7DoKDFnA==", "integrity": "sha512-FW41C/D6j/yKQkzVdjrRPiJCtgeDaYRJFEyCKFCINuRJRj9WcmubhP4KQHPZ4+9eT87jruSrYPyoblNRyDFzvA==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@shikijs/types": "4.0.2" "@shikijs/types": "4.0.1"
}, },
"engines": { "engines": {
"node": ">=20" "node": ">=20"
} }
}, },
"node_modules/@shikijs/types": { "node_modules/@shikijs/types": {
"version": "4.0.2", "version": "4.0.1",
"resolved": "https://registry.npmjs.org/@shikijs/types/-/types-4.0.2.tgz", "resolved": "https://registry.npmjs.org/@shikijs/types/-/types-4.0.1.tgz",
"integrity": "sha512-qzbeRooUTPnLE+sHD/Z8DStmaDgnbbc/pMrU203950aRqjX/6AFHeDYT+j00y2lPdz0ywJKx7o/7qnqTivtlXg==", "integrity": "sha512-EaygPEn57+jJ76mw+nTLvIpJMAcMPokFbrF8lufsZP7Ukk+ToJYEcswN1G0e49nUZAq7aCQtoeW219A8HK1ZOw==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
@ -675,9 +668,9 @@
"license": "MIT" "license": "MIT"
}, },
"node_modules/@types/react": { "node_modules/@types/react": {
"version": "19.2.14", "version": "19.2.6",
"resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.14.tgz", "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.6.tgz",
"integrity": "sha512-ilcTH/UniCkMdtexkoCN0bI7pMcJDvmQFPvuPvmEaYA/NSfFTAgdUSLAoVjaRJm7+6PvcM+q1zYOwS4wTYMF9w==", "integrity": "sha512-p/jUvulfgU7oKtj6Xpk8cA2Y1xKTtICGpJYeJXz2YVO2UcvjQgeRMLDGfDeqeRW2Ta+0QNFwcc8X3GH8SxZz6w==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"peer": true, "peer": true,
@ -700,13 +693,13 @@
"license": "ISC" "license": "ISC"
}, },
"node_modules/@unhead/react": { "node_modules/@unhead/react": {
"version": "2.1.12", "version": "2.1.10",
"resolved": "https://registry.npmjs.org/@unhead/react/-/react-2.1.12.tgz", "resolved": "https://registry.npmjs.org/@unhead/react/-/react-2.1.10.tgz",
"integrity": "sha512-1xXFrxyw29f+kScXfEb0GxjlgtnHxoYau0qpW9k8sgWhQUNnE5gNaH3u+rNhd5IqhyvbdDRJpQ25zoz0HIyGaw==", "integrity": "sha512-z9IzzkaCI1GyiBwVRMt4dGc2mOvsj9drbAdXGMy6DWpu9FwTR37ZTmAi7UeCVyIkpVdIaNalz7vkbvGG8afFng==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"unhead": "2.1.12" "unhead": "2.1.10"
}, },
"funding": { "funding": {
"url": "https://github.com/sponsors/harlan-zw" "url": "https://github.com/sponsors/harlan-zw"
@ -716,9 +709,9 @@
} }
}, },
"node_modules/acorn": { "node_modules/acorn": {
"version": "8.16.0", "version": "8.15.0",
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.16.0.tgz", "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz",
"integrity": "sha512-UVJyE9MttOsBQIDKw1skb9nAwQuR5wuGD3+82K6JgJlm/Y+KI92oNsMNGZCYdDsVtRHSak0pcV5Dno5+4jh9sw==", "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"bin": { "bin": {
@ -996,9 +989,9 @@
} }
}, },
"node_modules/decode-named-character-reference": { "node_modules/decode-named-character-reference": {
"version": "1.3.0", "version": "1.2.0",
"resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.3.0.tgz", "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.2.0.tgz",
"integrity": "sha512-GtpQYB283KrPp6nRw50q3U9/VfOutZOe103qlN7BPP6Ad27xYnOIWv4lPzo8HCAL+mMZofJ9KEy30fq6MfaK6Q==", "integrity": "sha512-c6fcElNV6ShtZXmsgNgFFV5tVX2PaV4g+MOAkb8eXHvn6sryJBrZa9r0zV6+dtTyoCKxtDy5tyQ5ZwQuidtd+Q==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
@ -1033,19 +1026,6 @@
"url": "https://github.com/sponsors/wooorm" "url": "https://github.com/sponsors/wooorm"
} }
}, },
"node_modules/entities": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/entities/-/entities-6.0.1.tgz",
"integrity": "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==",
"dev": true,
"license": "BSD-2-Clause",
"engines": {
"node": ">=0.12"
},
"funding": {
"url": "https://github.com/fb55/entities?sponsor=1"
}
},
"node_modules/error-stack-parser": { "node_modules/error-stack-parser": {
"version": "2.1.4", "version": "2.1.4",
"resolved": "https://registry.npmjs.org/error-stack-parser/-/error-stack-parser-2.1.4.tgz", "resolved": "https://registry.npmjs.org/error-stack-parser/-/error-stack-parser-2.1.4.tgz",
@ -1292,19 +1272,6 @@
"node": "^8.16.0 || ^10.6.0 || >=11.0.0" "node": "^8.16.0 || ^10.6.0 || >=11.0.0"
} }
}, },
"node_modules/get-east-asian-width": {
"version": "1.5.0",
"resolved": "https://registry.npmjs.org/get-east-asian-width/-/get-east-asian-width-1.5.0.tgz",
"integrity": "sha512-CQ+bEO+Tva/qlmw24dCejulK5pMzVnUOFOijVogd3KQs07HnRIgp8TGipvCCRT06xeYEbpbgwaCxglFyiuIcmA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=18"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/github-slugger": { "node_modules/github-slugger": {
"version": "2.0.0", "version": "2.0.0",
"resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-2.0.0.tgz", "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-2.0.0.tgz",
@ -1512,16 +1479,16 @@
} }
}, },
"node_modules/hast-util-to-parse5": { "node_modules/hast-util-to-parse5": {
"version": "8.0.1", "version": "8.0.0",
"resolved": "https://registry.npmjs.org/hast-util-to-parse5/-/hast-util-to-parse5-8.0.1.tgz", "resolved": "https://registry.npmjs.org/hast-util-to-parse5/-/hast-util-to-parse5-8.0.0.tgz",
"integrity": "sha512-MlWT6Pjt4CG9lFCjiz4BH7l9wmrMkfkJYCxFwKQic8+RTZgWPuWxwAfjJElsXkex7DJjfSJsQIt931ilUgmwdA==", "integrity": "sha512-3KKrV5ZVI8if87DVSi1vDeByYrkGzg4mEfeu4alwgmmIeARiBLKCZS2uw5Gb6nU9x9Yufyj3iudm6i7nl52PFw==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@types/hast": "^3.0.0", "@types/hast": "^3.0.0",
"comma-separated-tokens": "^2.0.0", "comma-separated-tokens": "^2.0.0",
"devlop": "^1.0.0", "devlop": "^1.0.0",
"property-information": "^7.0.0", "property-information": "^6.0.0",
"space-separated-tokens": "^2.0.0", "space-separated-tokens": "^2.0.0",
"web-namespaces": "^2.0.0", "web-namespaces": "^2.0.0",
"zwitch": "^2.0.0" "zwitch": "^2.0.0"
@ -1531,6 +1498,17 @@
"url": "https://opencollective.com/unified" "url": "https://opencollective.com/unified"
} }
}, },
"node_modules/hast-util-to-parse5/node_modules/property-information": {
"version": "6.5.0",
"resolved": "https://registry.npmjs.org/property-information/-/property-information-6.5.0.tgz",
"integrity": "sha512-PgTgs/BlvHxOu8QuEN7wi5A0OmXaBcHpmCSTehcs6Uuu9IkDIEo13Hy7n898RHfrQ49vKCoGeWZSaAK01nwVig==",
"dev": true,
"license": "MIT",
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/hast-util-to-string": { "node_modules/hast-util-to-string": {
"version": "3.0.1", "version": "3.0.1",
"resolved": "https://registry.npmjs.org/hast-util-to-string/-/hast-util-to-string-3.0.1.tgz", "resolved": "https://registry.npmjs.org/hast-util-to-string/-/hast-util-to-string-3.0.1.tgz",
@ -1578,9 +1556,9 @@
} }
}, },
"node_modules/hookable": { "node_modules/hookable": {
"version": "6.1.0", "version": "6.0.1",
"resolved": "https://registry.npmjs.org/hookable/-/hookable-6.1.0.tgz", "resolved": "https://registry.npmjs.org/hookable/-/hookable-6.0.1.tgz",
"integrity": "sha512-ZoKZSJgu8voGK2geJS+6YtYjvIzu9AOM/KZXsBxr83uhLL++e9pEv/dlgwgy3dvHg06kTz6JOh1hk3C8Ceiymw==", "integrity": "sha512-uKGyY8BuzN/a5gvzvA+3FVWo0+wUjgtfSdnmjtrOVwQCZPHpHDH2WRO3VZSOeluYrHoDCiXFffZXs8Dj1ULWtw==",
"dev": true, "dev": true,
"license": "MIT" "license": "MIT"
}, },
@ -1613,9 +1591,9 @@
} }
}, },
"node_modules/inline-style-parser": { "node_modules/inline-style-parser": {
"version": "0.2.7", "version": "0.2.6",
"resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.7.tgz", "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.6.tgz",
"integrity": "sha512-Nb2ctOyNR8DqQoR0OwRG95uNWIC0C1lCgf5Naz5H6Ji72KZ8OcFZLz2P5sNgwlyoJ8Yif11oMuYs5pBQa86csA==", "integrity": "sha512-gtGXVaBdl5mAes3rPcMedEBm12ibjt1kDMFfheul1wUAOVEJW60voNdMVzVkfLN06O7ZaD/rxhfKgtlgtTbMjg==",
"dev": true, "dev": true,
"license": "MIT" "license": "MIT"
}, },
@ -1833,9 +1811,9 @@
} }
}, },
"node_modules/mdast-util-from-markdown": { "node_modules/mdast-util-from-markdown": {
"version": "2.0.3", "version": "2.0.2",
"resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.3.tgz", "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.2.tgz",
"integrity": "sha512-W4mAWTvSlKvf8L6J+VN9yLSqQ9AOAAvHuoDAmPkz4dHf553m5gVj2ejadHJhoJmcmxEnOv6Pa8XJhpxE93kb8Q==", "integrity": "sha512-uZhTV/8NBuw0WHkPTrCqDOl0zVe1BIng5ZtHoDk49ME1qqcjYmmLmOf0gELgcRMxN4w2iuIeVso5/6QymSrgmA==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
@ -2196,80 +2174,6 @@
"micromark-util-types": "^2.0.0" "micromark-util-types": "^2.0.0"
} }
}, },
"node_modules/micromark-extension-cjk-friendly": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/micromark-extension-cjk-friendly/-/micromark-extension-cjk-friendly-2.0.1.tgz",
"integrity": "sha512-OkzoYVTL1ChbvQ8Cc1ayTIz7paFQz8iS9oIYmewncweUSwmWR+hkJF9spJ1lxB90XldJl26A1F4IkPOKS3bDXw==",
"dev": true,
"license": "MIT",
"dependencies": {
"devlop": "^1.1.0",
"micromark-extension-cjk-friendly-util": "3.0.1",
"micromark-util-chunked": "^2.0.1",
"micromark-util-resolve-all": "^2.0.1",
"micromark-util-symbol": "^2.0.1"
},
"engines": {
"node": ">=18"
},
"peerDependencies": {
"micromark": "^4.0.0",
"micromark-util-types": "^2.0.0"
},
"peerDependenciesMeta": {
"micromark-util-types": {
"optional": true
}
}
},
"node_modules/micromark-extension-cjk-friendly-gfm-strikethrough": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/micromark-extension-cjk-friendly-gfm-strikethrough/-/micromark-extension-cjk-friendly-gfm-strikethrough-2.0.1.tgz",
"integrity": "sha512-wVC0zwjJNqQeX+bb07YTPu/CvSAyCTafyYb7sMhX1r62/Lw5M/df3JyYaANyp8g15c1ypJRFSsookTqA1IDsUg==",
"dev": true,
"license": "MIT",
"dependencies": {
"devlop": "^1.1.0",
"get-east-asian-width": "^1.4.0",
"micromark-extension-cjk-friendly-util": "3.0.1",
"micromark-util-character": "^2.1.1",
"micromark-util-chunked": "^2.0.1",
"micromark-util-resolve-all": "^2.0.1",
"micromark-util-symbol": "^2.0.1"
},
"engines": {
"node": ">=18"
},
"peerDependencies": {
"micromark": "^4.0.0",
"micromark-util-types": "^2.0.0"
},
"peerDependenciesMeta": {
"micromark-util-types": {
"optional": true
}
}
},
"node_modules/micromark-extension-cjk-friendly-util": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/micromark-extension-cjk-friendly-util/-/micromark-extension-cjk-friendly-util-3.0.1.tgz",
"integrity": "sha512-GcbXqTTHOsiZHyF753oIddP/J2eH8j9zpyQPhkof6B2JNxfEJabnQqxbCgzJNuNes0Y2jTNJ3LiYPSXr6eJA8w==",
"dev": true,
"license": "MIT",
"dependencies": {
"get-east-asian-width": "^1.4.0",
"micromark-util-character": "^2.1.1",
"micromark-util-symbol": "^2.0.1"
},
"engines": {
"node": ">=18"
},
"peerDependenciesMeta": {
"micromark-util-types": {
"optional": true
}
}
},
"node_modules/micromark-extension-gfm": { "node_modules/micromark-extension-gfm": {
"version": "3.0.0", "version": "3.0.0",
"resolved": "https://registry.npmjs.org/micromark-extension-gfm/-/micromark-extension-gfm-3.0.0.tgz", "resolved": "https://registry.npmjs.org/micromark-extension-gfm/-/micromark-extension-gfm-3.0.0.tgz",
@ -2983,14 +2887,14 @@
"license": "MIT" "license": "MIT"
}, },
"node_modules/oniguruma-to-es": { "node_modules/oniguruma-to-es": {
"version": "4.3.5", "version": "4.3.4",
"resolved": "https://registry.npmjs.org/oniguruma-to-es/-/oniguruma-to-es-4.3.5.tgz", "resolved": "https://registry.npmjs.org/oniguruma-to-es/-/oniguruma-to-es-4.3.4.tgz",
"integrity": "sha512-Zjygswjpsewa0NLTsiizVuMQZbp0MDyM6lIt66OxsF21npUDlzpHi1Mgb/qhQdkb+dWFTzJmFbEWdvZgRho8eQ==", "integrity": "sha512-3VhUGN3w2eYxnTzHn+ikMI+fp/96KoRSVK9/kMTcFqj1NRDh2IhQCKvYxDnWePKRXY/AqH+Fuiyb7VHSzBjHfA==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"oniguruma-parser": "^0.12.1", "oniguruma-parser": "^0.12.1",
"regex": "^6.1.0", "regex": "^6.0.1",
"regex-recursion": "^6.0.2" "regex-recursion": "^6.0.2"
} }
}, },
@ -3034,6 +2938,19 @@
"url": "https://github.com/inikulin/parse5?sponsor=1" "url": "https://github.com/inikulin/parse5?sponsor=1"
} }
}, },
"node_modules/parse5/node_modules/entities": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/entities/-/entities-6.0.1.tgz",
"integrity": "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==",
"dev": true,
"license": "BSD-2-Clause",
"engines": {
"node": ">=0.12"
},
"funding": {
"url": "https://github.com/fb55/entities?sponsor=1"
}
},
"node_modules/picocolors": { "node_modules/picocolors": {
"version": "1.1.1", "version": "1.1.1",
"resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz",
@ -3336,50 +3253,6 @@
"url": "https://opencollective.com/unified" "url": "https://opencollective.com/unified"
} }
}, },
"node_modules/remark-cjk-friendly": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/remark-cjk-friendly/-/remark-cjk-friendly-2.0.1.tgz",
"integrity": "sha512-6WwkoQyZf/4j5k53zdFYrR8Ca+UVn992jXdLUSBDZR4eBpFhKyVxmA4gUHra/5fesjGIxrDhHesNr/sVoiiysA==",
"dev": true,
"license": "MIT",
"dependencies": {
"micromark-extension-cjk-friendly": "2.0.1"
},
"engines": {
"node": ">=18"
},
"peerDependencies": {
"@types/mdast": "^4.0.0",
"unified": "^11.0.0"
},
"peerDependenciesMeta": {
"@types/mdast": {
"optional": true
}
}
},
"node_modules/remark-cjk-friendly-gfm-strikethrough": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/remark-cjk-friendly-gfm-strikethrough/-/remark-cjk-friendly-gfm-strikethrough-2.0.1.tgz",
"integrity": "sha512-pWKj25O2eLXIL1aBupayl1fKhco+Brw8qWUWJPVB9EBzbQNd7nGLj0nLmJpggWsGLR5j5y40PIdjxby9IEYTuA==",
"dev": true,
"license": "MIT",
"dependencies": {
"micromark-extension-cjk-friendly-gfm-strikethrough": "2.0.1"
},
"engines": {
"node": ">=18"
},
"peerDependencies": {
"@types/mdast": "^4.0.0",
"unified": "^11.0.0"
},
"peerDependenciesMeta": {
"@types/mdast": {
"optional": true
}
}
},
"node_modules/remark-gfm": { "node_modules/remark-gfm": {
"version": "4.0.1", "version": "4.0.1",
"resolved": "https://registry.npmjs.org/remark-gfm/-/remark-gfm-4.0.1.tgz", "resolved": "https://registry.npmjs.org/remark-gfm/-/remark-gfm-4.0.1.tgz",
@ -3504,18 +3377,18 @@
"license": "MIT" "license": "MIT"
}, },
"node_modules/shiki": { "node_modules/shiki": {
"version": "4.0.2", "version": "4.0.1",
"resolved": "https://registry.npmjs.org/shiki/-/shiki-4.0.2.tgz", "resolved": "https://registry.npmjs.org/shiki/-/shiki-4.0.1.tgz",
"integrity": "sha512-eAVKTMedR5ckPo4xne/PjYQYrU3qx78gtJZ+sHlXEg5IHhhoQhMfZVzetTYuaJS0L2Ef3AcCRzCHV8T0WI6nIQ==", "integrity": "sha512-EkAEhDTN5WhpoQFXFw79OHIrSAfHhlImeCdSyg4u4XvrpxKEmdo/9x/HWSowujAnUrFsGOwWiE58a6GVentMnQ==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@shikijs/core": "4.0.2", "@shikijs/core": "4.0.1",
"@shikijs/engine-javascript": "4.0.2", "@shikijs/engine-javascript": "4.0.1",
"@shikijs/engine-oniguruma": "4.0.2", "@shikijs/engine-oniguruma": "4.0.1",
"@shikijs/langs": "4.0.2", "@shikijs/langs": "4.0.1",
"@shikijs/themes": "4.0.2", "@shikijs/themes": "4.0.1",
"@shikijs/types": "4.0.2", "@shikijs/types": "4.0.1",
"@shikijs/vscode-textmate": "^10.0.2", "@shikijs/vscode-textmate": "^10.0.2",
"@types/hast": "^3.0.4" "@types/hast": "^3.0.4"
}, },
@ -3584,23 +3457,23 @@
} }
}, },
"node_modules/style-to-js": { "node_modules/style-to-js": {
"version": "1.1.21", "version": "1.1.19",
"resolved": "https://registry.npmjs.org/style-to-js/-/style-to-js-1.1.21.tgz", "resolved": "https://registry.npmjs.org/style-to-js/-/style-to-js-1.1.19.tgz",
"integrity": "sha512-RjQetxJrrUJLQPHbLku6U/ocGtzyjbJMP9lCNK7Ag0CNh690nSH8woqWH9u16nMjYBAok+i7JO1NP2pOy8IsPQ==", "integrity": "sha512-Ev+SgeqiNGT1ufsXyVC5RrJRXdrkRJ1Gol9Qw7Pb72YCKJXrBvP0ckZhBeVSrw2m06DJpei2528uIpjMb4TsoQ==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"style-to-object": "1.0.14" "style-to-object": "1.0.12"
} }
}, },
"node_modules/style-to-object": { "node_modules/style-to-object": {
"version": "1.0.14", "version": "1.0.12",
"resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.14.tgz", "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.12.tgz",
"integrity": "sha512-LIN7rULI0jBscWQYaSswptyderlarFkjQ+t79nzty8tcIAceVomEVlLzH5VP4Cmsv6MtKhs7qaAiwlcp+Mgaxw==", "integrity": "sha512-ddJqYnoT4t97QvN2C95bCgt+m7AAgXjVnkk/jxAfmp7EAB8nnqqZYEbMd3em7/vEomDb2LAQKAy1RFfv41mdNw==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"inline-style-parser": "0.2.7" "inline-style-parser": "0.2.6"
} }
}, },
"node_modules/tinyglobby": { "node_modules/tinyglobby": {
@ -3725,9 +3598,9 @@
} }
}, },
"node_modules/unhead": { "node_modules/unhead": {
"version": "2.1.12", "version": "2.1.10",
"resolved": "https://registry.npmjs.org/unhead/-/unhead-2.1.12.tgz", "resolved": "https://registry.npmjs.org/unhead/-/unhead-2.1.10.tgz",
"integrity": "sha512-iTHdWD9ztTunOErtfUFk6Wr11BxvzumcYJ0CzaSCBUOEtg+DUZ9+gnE99i8QkLFT2q1rZD48BYYGXpOZVDLYkA==", "integrity": "sha512-We8l9uNF8zz6U8lfQaVG70+R/QBfQx1oPIgXin4BtZnK2IQpz6yazQ0qjMNVBDw2ADgF2ea58BtvSK+XX5AS7g==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {

View file

@ -18,7 +18,6 @@ Environment="CONTINUWUITY_DATABASE_PATH=%S/conduwuit"
Environment="CONTINUWUITY_CONFIG_RELOAD_SIGNAL=true" Environment="CONTINUWUITY_CONFIG_RELOAD_SIGNAL=true"
LoadCredential=conduwuit.toml:/etc/conduwuit/conduwuit.toml LoadCredential=conduwuit.toml:/etc/conduwuit/conduwuit.toml
RefreshOnReload=yes
ExecStart=/usr/bin/conduwuit --config ${CREDENTIALS_DIRECTORY}/conduwuit.toml ExecStart=/usr/bin/conduwuit --config ${CREDENTIALS_DIRECTORY}/conduwuit.toml

View file

@ -1,6 +1,6 @@
{ {
"$schema": "https://docs.renovatebot.com/renovate-schema.json", "$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": ["config:recommended", "replacements:all", ":semanticCommitTypeAll(chore)"], "extends": ["config:recommended", "replacements:all"],
"dependencyDashboard": true, "dependencyDashboard": true,
"osvVulnerabilityAlerts": true, "osvVulnerabilityAlerts": true,
"lockFileMaintenance": { "lockFileMaintenance": {
@ -36,18 +36,10 @@
}, },
"packageRules": [ "packageRules": [
{ {
"description": "Batch minor and patch Rust dependency updates", "description": "Batch patch-level Rust dependency updates",
"matchManagers": ["cargo"],
"matchUpdateTypes": ["minor", "patch"],
"matchCurrentVersion": ">=1.0.0",
"groupName": "rust-non-major"
},
{
"description": "Batch patch-level zerover Rust dependency updates",
"matchManagers": ["cargo"], "matchManagers": ["cargo"],
"matchUpdateTypes": ["patch"], "matchUpdateTypes": ["patch"],
"matchCurrentVersion": ">=0.1.0,<1.0.0", "groupName": "rust-patch-updates"
"groupName": "rust-zerover-patch-updates"
}, },
{ {
"description": "Limit concurrent Cargo PRs", "description": "Limit concurrent Cargo PRs",

View file

@ -11,7 +11,6 @@ use crate::{
query::{self, QueryCommand}, query::{self, QueryCommand},
room::{self, RoomCommand}, room::{self, RoomCommand},
server::{self, ServerCommand}, server::{self, ServerCommand},
space::{self, SpaceCommand},
token::{self, TokenCommand}, token::{self, TokenCommand},
user::{self, UserCommand}, user::{self, UserCommand},
}; };
@ -35,10 +34,6 @@ pub enum AdminCommand {
/// Commands for managing rooms /// Commands for managing rooms
Rooms(RoomCommand), Rooms(RoomCommand),
#[command(subcommand)]
/// Commands for managing space permissions
Spaces(SpaceCommand),
#[command(subcommand)] #[command(subcommand)]
/// Commands for managing federation /// Commands for managing federation
Federation(FederationCommand), Federation(FederationCommand),
@ -86,10 +81,6 @@ pub(super) async fn process(command: AdminCommand, context: &Context<'_>) -> Res
token::process(command, context).await token::process(command, context).await
}, },
| Rooms(command) => room::process(command, context).await, | Rooms(command) => room::process(command, context).await,
| Spaces(command) => {
context.bail_restricted()?;
space::process(command, context).await
},
| Federation(command) => federation::process(command, context).await, | Federation(command) => federation::process(command, context).await,
| Server(command) => server::process(command, context).await, | Server(command) => server::process(command, context).await,
| Debug(command) => debug::process(command, context).await, | Debug(command) => debug::process(command, context).await,

View file

@ -17,7 +17,6 @@ pub(crate) mod media;
pub(crate) mod query; pub(crate) mod query;
pub(crate) mod room; pub(crate) mod room;
pub(crate) mod server; pub(crate) mod server;
pub(crate) mod space;
pub(crate) mod token; pub(crate) mod token;
pub(crate) mod user; pub(crate) mod user;

View file

@ -1,15 +0,0 @@
pub(super) mod roles;
use clap::Subcommand;
use conduwuit::Result;
use self::roles::SpaceRolesCommand;
use crate::admin_command_dispatch;
#[admin_command_dispatch]
#[derive(Debug, Subcommand)]
pub enum SpaceCommand {
#[command(subcommand)]
/// Manage space roles and permissions
Roles(SpaceRolesCommand),
}

View file

@ -1,632 +0,0 @@
use std::fmt::Write;
use clap::Subcommand;
use conduwuit::{Err, Event, Result, matrix::pdu::PduBuilder};
use conduwuit_core::matrix::space_roles::{
RoleDefinition, SPACE_CASCADING_EVENT_TYPE, SPACE_ROLE_MEMBER_EVENT_TYPE,
SPACE_ROLE_ROOM_EVENT_TYPE, SPACE_ROLES_EVENT_TYPE, SpaceCascadingEventContent,
SpaceRoleMemberEventContent, SpaceRoleRoomEventContent, SpaceRolesEventContent,
};
use futures::StreamExt;
use ruma::{OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, events::StateEventType};
use serde_json::value::to_raw_value;
use crate::{admin_command, admin_command_dispatch};
fn roles_event_type() -> StateEventType {
StateEventType::from(SPACE_ROLES_EVENT_TYPE.to_owned())
}
fn member_event_type() -> StateEventType {
StateEventType::from(SPACE_ROLE_MEMBER_EVENT_TYPE.to_owned())
}
fn room_event_type() -> StateEventType {
StateEventType::from(SPACE_ROLE_ROOM_EVENT_TYPE.to_owned())
}
fn cascading_event_type() -> StateEventType {
StateEventType::from(SPACE_CASCADING_EVENT_TYPE.to_owned())
}
macro_rules! resolve_room_as_space {
($self:expr, $space:expr) => {{
let space_id = $self.services.rooms.alias.resolve(&$space).await?;
if !matches!(
$self
.services
.rooms
.state_accessor
.get_room_type(&space_id)
.await,
Ok(ruma::room::RoomType::Space)
) {
return Err!("The specified room is not a Space.");
}
space_id
}};
}
macro_rules! resolve_space {
($self:expr, $space:expr) => {{
let space_id = resolve_room_as_space!($self, $space);
if !$self
.services
.rooms
.roles
.is_enabled_for_space(&space_id)
.await
{
return $self
.write_str(
"Space permission cascading is disabled for this Space. Enable it \
server-wide with `space_permission_cascading = true` in your config, or \
per-Space with `!admin space roles enable <space>`.",
)
.await;
}
space_id
}};
}
macro_rules! custom_state_pdu {
($event_type:expr, $state_key:expr, $content:expr) => {
PduBuilder {
event_type: $event_type.to_owned().into(),
content: to_raw_value($content)
.map_err(|e| conduwuit::err!("Failed to serialize state event content: {e}"))?,
state_key: Some($state_key.to_owned().into()),
..PduBuilder::default()
}
};
}
/// Cascade-remove a role name from all state events of a given type. For each
/// event that contains the role, the `$field` is filtered and the updated
/// content is sent back as a new state event.
macro_rules! cascade_remove_role {
(
$self:expr,
$shortstatehash:expr,
$event_type_fn:expr,
$event_type_const:expr,
$content_ty:ty,
$field:ident,
$role_name:expr,
$space_id:expr,
$state_lock:expr,
$server_user:expr
) => {{
let ev_type = $event_type_fn;
let entries: Vec<(_, ruma::OwnedEventId)> = $self
.services
.rooms
.state_accessor
.state_keys_with_ids($shortstatehash, &ev_type)
.collect()
.await;
for (state_key, event_id) in entries {
if let Ok(pdu) = $self.services.rooms.timeline.get_pdu(&event_id).await {
if let Ok(mut content) = pdu.get_content::<$content_ty>() {
if content.$field.contains($role_name) {
content.$field.retain(|r| r != $role_name);
$self
.services
.rooms
.timeline
.build_and_append_pdu(
custom_state_pdu!($event_type_const, &state_key, &content),
$server_user,
Some(&$space_id),
&$state_lock,
)
.await?;
}
}
}
}
}};
}
macro_rules! send_space_state {
($self:expr, $space_id:expr, $event_type:expr, $state_key:expr, $content:expr) => {{
let state_lock = $self.services.rooms.state.mutex.lock(&$space_id).await;
let server_user = &$self.services.globals.server_user;
$self
.services
.rooms
.timeline
.build_and_append_pdu(
custom_state_pdu!($event_type, $state_key, $content),
server_user,
Some(&$space_id),
&state_lock,
)
.await?
}};
}
#[admin_command_dispatch]
#[derive(Debug, Subcommand)]
pub enum SpaceRolesCommand {
/// List all roles defined in a space
List {
space: OwnedRoomOrAliasId,
},
/// Add a new role to a space
Add {
space: OwnedRoomOrAliasId,
role_name: String,
#[arg(long)]
description: Option<String>,
#[arg(long)]
power_level: Option<i64>,
},
/// Remove a role from a space
Remove {
space: OwnedRoomOrAliasId,
role_name: String,
},
/// Assign a role to a user
Assign {
space: OwnedRoomOrAliasId,
user_id: OwnedUserId,
role_name: String,
},
/// Revoke a role from a user
Revoke {
space: OwnedRoomOrAliasId,
user_id: OwnedUserId,
role_name: String,
},
/// Require a role for a room
Require {
space: OwnedRoomOrAliasId,
room_id: OwnedRoomId,
role_name: String,
},
/// Remove a role requirement from a room
Unrequire {
space: OwnedRoomOrAliasId,
room_id: OwnedRoomId,
role_name: String,
},
/// Show a user's roles in a space
User {
space: OwnedRoomOrAliasId,
user_id: OwnedUserId,
},
/// Show a room's role requirements in a space
Room {
space: OwnedRoomOrAliasId,
room_id: OwnedRoomId,
},
/// Enable space permission cascading for a specific space (overrides
/// server config)
Enable {
space: OwnedRoomOrAliasId,
},
/// Disable space permission cascading for a specific space (overrides
/// server config)
Disable {
space: OwnedRoomOrAliasId,
},
/// Show whether cascading is enabled for a space and the source (server
/// default or per-space override)
Status {
space: OwnedRoomOrAliasId,
},
}
#[admin_command]
async fn list(&self, space: OwnedRoomOrAliasId) -> Result {
let space_id = resolve_space!(self, space);
let roles_event_type = roles_event_type();
let content: SpaceRolesEventContent = self
.services
.rooms
.state_accessor
.room_state_get_content(&space_id, &roles_event_type, "")
.await
.unwrap_or_default();
if content.roles.is_empty() {
return self.write_str("No roles defined in this space.").await;
}
let mut msg = format!("Roles in {space_id}:\n```\n");
for (name, def) in &content.roles {
let pl = def
.power_level
.map(|p| format!(" (power_level: {p})"))
.unwrap_or_default();
let _ = writeln!(msg, "- {name}: {}{pl}", def.description);
}
msg.push_str("```");
self.write_str(&msg).await
}
#[admin_command]
async fn add(
&self,
space: OwnedRoomOrAliasId,
role_name: String,
description: Option<String>,
power_level: Option<i64>,
) -> Result {
let space_id = resolve_space!(self, space);
if let Some(pl) = power_level {
if pl > i64::from(ruma::Int::MAX) || pl < i64::from(ruma::Int::MIN) {
return Err!(
"Power level must be between {} and {}.",
ruma::Int::MIN,
ruma::Int::MAX
);
}
}
let roles_event_type = roles_event_type();
let mut content: SpaceRolesEventContent = self
.services
.rooms
.state_accessor
.room_state_get_content(&space_id, &roles_event_type, "")
.await
.unwrap_or_default();
if content.roles.contains_key(&role_name) {
return Err!("Role '{role_name}' already exists in this space.");
}
content.roles.insert(role_name.clone(), RoleDefinition {
description: description.unwrap_or_else(|| role_name.clone()),
power_level,
});
send_space_state!(self, space_id, SPACE_ROLES_EVENT_TYPE, "", &content);
self.write_str(&format!("Added role '{role_name}' to space {space_id}."))
.await
}
#[admin_command]
async fn remove(&self, space: OwnedRoomOrAliasId, role_name: String) -> Result {
let space_id = resolve_space!(self, space);
let roles_event_type = roles_event_type();
let mut content: SpaceRolesEventContent = self
.services
.rooms
.state_accessor
.room_state_get_content(&space_id, &roles_event_type, "")
.await
.unwrap_or_default();
if content.roles.remove(&role_name).is_none() {
return Err!("Role '{role_name}' does not exist in this space.");
}
send_space_state!(self, space_id, SPACE_ROLES_EVENT_TYPE, "", &content);
// Cascade: remove the deleted role from all member and room events
let server_user = &self.services.globals.server_user;
if let Ok(shortstatehash) = self
.services
.rooms
.state
.get_room_shortstatehash(&space_id)
.await
{
let state_lock = self.services.rooms.state.mutex.lock(&space_id).await;
cascade_remove_role!(
self,
shortstatehash,
member_event_type(),
SPACE_ROLE_MEMBER_EVENT_TYPE,
SpaceRoleMemberEventContent,
roles,
&role_name,
space_id,
state_lock,
server_user
);
cascade_remove_role!(
self,
shortstatehash,
room_event_type(),
SPACE_ROLE_ROOM_EVENT_TYPE,
SpaceRoleRoomEventContent,
required_roles,
&role_name,
space_id,
state_lock,
server_user
);
}
self.write_str(&format!("Removed role '{role_name}' from space {space_id}."))
.await
}
#[admin_command]
async fn assign(
&self,
space: OwnedRoomOrAliasId,
user_id: OwnedUserId,
role_name: String,
) -> Result {
let space_id = resolve_space!(self, space);
let roles_event_type = roles_event_type();
let role_defs: SpaceRolesEventContent = self
.services
.rooms
.state_accessor
.room_state_get_content(&space_id, &roles_event_type, "")
.await
.unwrap_or_default();
if !role_defs.roles.contains_key(&role_name) {
return Err!("Role '{role_name}' does not exist in this space.");
}
let member_event_type = member_event_type();
let mut content: SpaceRoleMemberEventContent = self
.services
.rooms
.state_accessor
.room_state_get_content(&space_id, &member_event_type, user_id.as_str())
.await
.unwrap_or_default();
if content.roles.contains(&role_name) {
return Err!("User {user_id} already has role '{role_name}' in this space.");
}
content.roles.push(role_name.clone());
send_space_state!(self, space_id, SPACE_ROLE_MEMBER_EVENT_TYPE, user_id.as_str(), &content);
self.write_str(&format!("Assigned role '{role_name}' to {user_id} in space {space_id}."))
.await
}
#[admin_command]
async fn revoke(
&self,
space: OwnedRoomOrAliasId,
user_id: OwnedUserId,
role_name: String,
) -> Result {
let space_id = resolve_space!(self, space);
let member_event_type = member_event_type();
let mut content: SpaceRoleMemberEventContent = self
.services
.rooms
.state_accessor
.room_state_get_content(&space_id, &member_event_type, user_id.as_str())
.await
.unwrap_or_default();
let original_len = content.roles.len();
content.roles.retain(|r| r != &role_name);
if content.roles.len() == original_len {
return Err!("User {user_id} does not have role '{role_name}' in this space.");
}
send_space_state!(self, space_id, SPACE_ROLE_MEMBER_EVENT_TYPE, user_id.as_str(), &content);
self.write_str(&format!("Revoked role '{role_name}' from {user_id} in space {space_id}."))
.await
}
#[admin_command]
async fn require(
&self,
space: OwnedRoomOrAliasId,
room_id: OwnedRoomId,
role_name: String,
) -> Result {
let space_id = resolve_space!(self, space);
let child_rooms = self.services.rooms.roles.get_child_rooms(&space_id).await;
if !child_rooms.contains(&room_id) {
return Err!("Room {room_id} is not a child of space {space_id}.");
}
let roles_event_type = roles_event_type();
let role_defs: SpaceRolesEventContent = self
.services
.rooms
.state_accessor
.room_state_get_content(&space_id, &roles_event_type, "")
.await
.unwrap_or_default();
if !role_defs.roles.contains_key(&role_name) {
return Err!("Role '{role_name}' does not exist in this space.");
}
let room_event_type = room_event_type();
let mut content: SpaceRoleRoomEventContent = self
.services
.rooms
.state_accessor
.room_state_get_content(&space_id, &room_event_type, room_id.as_str())
.await
.unwrap_or_default();
if content.required_roles.contains(&role_name) {
return Err!("Room {room_id} already requires role '{role_name}' in this space.");
}
content.required_roles.push(role_name.clone());
send_space_state!(self, space_id, SPACE_ROLE_ROOM_EVENT_TYPE, room_id.as_str(), &content);
self.write_str(&format!(
"Room {room_id} now requires role '{role_name}' in space {space_id}."
))
.await
}
#[admin_command]
async fn unrequire(
&self,
space: OwnedRoomOrAliasId,
room_id: OwnedRoomId,
role_name: String,
) -> Result {
let space_id = resolve_space!(self, space);
let room_event_type = room_event_type();
let mut content: SpaceRoleRoomEventContent = self
.services
.rooms
.state_accessor
.room_state_get_content(&space_id, &room_event_type, room_id.as_str())
.await
.unwrap_or_default();
let original_len = content.required_roles.len();
content.required_roles.retain(|r| r != &role_name);
if content.required_roles.len() == original_len {
return Err!("Room {room_id} does not require role '{role_name}' in this space.");
}
send_space_state!(self, space_id, SPACE_ROLE_ROOM_EVENT_TYPE, room_id.as_str(), &content);
self.write_str(&format!(
"Removed role requirement '{role_name}' from room {room_id} in space {space_id}."
))
.await
}
#[admin_command]
async fn user(&self, space: OwnedRoomOrAliasId, user_id: OwnedUserId) -> Result {
let space_id = resolve_space!(self, space);
let roles = self
.services
.rooms
.roles
.get_user_roles_in_space(&space_id, &user_id)
.await;
match roles {
| Some(roles) if !roles.is_empty() => {
let list: String = roles
.iter()
.map(|r| format!("- {r}"))
.collect::<Vec<_>>()
.join("\n");
self.write_str(&format!("Roles for {user_id} in space {space_id}:\n```\n{list}\n```"))
.await
},
| _ =>
self.write_str(&format!("User {user_id} has no roles in space {space_id}."))
.await,
}
}
#[admin_command]
async fn room(&self, space: OwnedRoomOrAliasId, room_id: OwnedRoomId) -> Result {
let space_id = resolve_space!(self, space);
let reqs = self
.services
.rooms
.roles
.get_room_requirements_in_space(&space_id, &room_id)
.await;
match reqs {
| Some(reqs) if !reqs.is_empty() => {
let list: String = reqs
.iter()
.map(|r| format!("- {r}"))
.collect::<Vec<_>>()
.join("\n");
self.write_str(&format!(
"Required roles for room {room_id} in space {space_id}:\n```\n{list}\n```"
))
.await
},
| _ =>
self.write_str(&format!(
"Room {room_id} has no role requirements in space {space_id}."
))
.await,
}
}
#[admin_command]
async fn enable(&self, space: OwnedRoomOrAliasId) -> Result {
let space_id = resolve_room_as_space!(self, space);
self.services
.rooms
.roles
.ensure_default_roles(&space_id)
.await?;
let content = SpaceCascadingEventContent { enabled: true };
send_space_state!(self, space_id, SPACE_CASCADING_EVENT_TYPE, "", &content);
self.write_str(&format!("Space permission cascading enabled for {space_id}."))
.await
}
#[admin_command]
async fn disable(&self, space: OwnedRoomOrAliasId) -> Result {
let space_id = resolve_room_as_space!(self, space);
let content = SpaceCascadingEventContent { enabled: false };
send_space_state!(self, space_id, SPACE_CASCADING_EVENT_TYPE, "", &content);
self.write_str(&format!("Space permission cascading disabled for {space_id}."))
.await
}
#[admin_command]
async fn status(&self, space: OwnedRoomOrAliasId) -> Result {
let space_id = resolve_room_as_space!(self, space);
let global_default = self.services.rooms.roles.is_enabled();
let cascading_event_type = cascading_event_type();
let per_space_override: Option<bool> = self
.services
.rooms
.state_accessor
.room_state_get_content::<SpaceCascadingEventContent>(
&space_id,
&cascading_event_type,
"",
)
.await
.ok()
.map(|c| c.enabled);
let effective = per_space_override.unwrap_or(global_default);
let source = match per_space_override {
| Some(v) => format!("per-Space override (enabled: {v})"),
| None => format!("server default (space_permission_cascading: {global_default})"),
};
self.write_str(&format!(
"Cascading status for {space_id}:\n- Effective: **{effective}**\n- Source: {source}"
))
.await
}

View file

@ -296,31 +296,6 @@ pub(super) async fn reset_password(
Ok(()) Ok(())
} }
#[admin_command]
pub(super) async fn issue_password_reset_link(&self, username: String) -> Result {
use conduwuit_service::password_reset::{PASSWORD_RESET_PATH, RESET_TOKEN_QUERY_PARAM};
self.bail_restricted()?;
let mut reset_url = self
.services
.config
.get_client_domain()
.join(PASSWORD_RESET_PATH)
.unwrap();
let user_id = parse_local_user_id(self.services, &username)?;
let token = self.services.password_reset.issue_token(user_id).await?;
reset_url
.query_pairs_mut()
.append_pair(RESET_TOKEN_QUERY_PARAM, &token.token);
self.write_str(&format!("Password reset link issued for {username}: {reset_url}"))
.await?;
Ok(())
}
#[admin_command] #[admin_command]
pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) -> Result { pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) -> Result {
if self.body.len() < 2 if self.body.len() < 2

View file

@ -29,12 +29,6 @@ pub enum UserCommand {
password: Option<String>, password: Option<String>,
}, },
/// Issue a self-service password reset link for a user.
IssuePasswordResetLink {
/// Username of the user who may use the link
username: String,
},
/// Deactivate a user /// Deactivate a user
/// ///
/// User will be removed from all rooms by default. /// User will be removed from all rooms by default.

View file

@ -347,12 +347,6 @@ pub async fn join_room_by_id_helper(
} }
} }
services
.rooms
.roles
.check_join_allowed(room_id, sender_user)
.await?;
if server_in_room { if server_in_room {
join_room_by_id_helper_local(services, sender_user, room_id, reason, servers, state_lock) join_room_by_id_helper_local(services, sender_user, room_id, reason, servers, state_lock)
.boxed() .boxed()

View file

@ -270,7 +270,7 @@ async fn build_state_and_timeline(
// joined since the last sync, that being the syncing user's join event. if // joined since the last sync, that being the syncing user's join event. if
// it's empty something is wrong. // it's empty something is wrong.
if joined_since_last_sync && timeline.pdus.is_empty() { if joined_since_last_sync && timeline.pdus.is_empty() {
debug_warn!("timeline for newly joined room is empty"); warn!("timeline for newly joined room is empty");
} }
let (summary, device_list_updates) = try_join( let (summary, device_list_updates) = try_join(

View file

@ -86,7 +86,7 @@ libloading.optional = true
log.workspace = true log.workspace = true
num-traits.workspace = true num-traits.workspace = true
rand.workspace = true rand.workspace = true
rand_core = { version = "0.6.4", features = ["getrandom"] } rand_core = { version = "0.10.0", features = ["getrandom"] }
regex.workspace = true regex.workspace = true
reqwest.workspace = true reqwest.workspace = true
ring.workspace = true ring.workspace = true

View file

@ -68,10 +68,6 @@ pub struct Config {
/// ///
/// Also see the `[global.well_known]` config section at the very bottom. /// Also see the `[global.well_known]` config section at the very bottom.
/// ///
/// If `client` is not set under `[global.well_known]`, the server name will
/// be used as the base domain for user-facing links (such as password
/// reset links) created by Continuwuity.
///
/// Examples of delegation: /// Examples of delegation:
/// - https://continuwuity.org/.well-known/matrix/server /// - https://continuwuity.org/.well-known/matrix/server
/// - https://continuwuity.org/.well-known/matrix/client /// - https://continuwuity.org/.well-known/matrix/client
@ -607,22 +603,6 @@ pub struct Config {
#[serde(default)] #[serde(default)]
pub suspend_on_register: bool, pub suspend_on_register: bool,
/// Server-wide default for space permission cascading (power levels and
/// role-based access). Individual Spaces can override this via the
/// `com.continuwuity.space.cascading` state event or the admin command
/// `!admin space roles enable/disable <space>`.
///
/// default: false
#[serde(default)]
pub space_permission_cascading: bool,
/// Maximum number of spaces to cache role data for. When exceeded the
/// cache is cleared and repopulated on demand.
///
/// default: 1000
#[serde(default = "default_space_roles_cache_flush_threshold")]
pub space_roles_cache_flush_threshold: u32,
/// Enabling this setting opens registration to anyone without restrictions. /// Enabling this setting opens registration to anyone without restrictions.
/// This makes your server vulnerable to abuse /// This makes your server vulnerable to abuse
#[serde(default)] #[serde(default)]
@ -1755,11 +1735,6 @@ pub struct Config {
/// default: "continuwuity/<version> (bot; +https://continuwuity.org)" /// default: "continuwuity/<version> (bot; +https://continuwuity.org)"
pub url_preview_user_agent: Option<String>, pub url_preview_user_agent: Option<String>,
/// Determines whether audio and video files will be downloaded for URL
/// previews.
#[serde(default)]
pub url_preview_allow_audio_video: bool,
/// List of forbidden room aliases and room IDs as strings of regex /// List of forbidden room aliases and room IDs as strings of regex
/// patterns. /// patterns.
/// ///
@ -2109,13 +2084,6 @@ pub struct Config {
#[serde(default)] #[serde(default)]
pub force_disable_first_run_mode: bool, pub force_disable_first_run_mode: bool,
/// Allow search engines and crawlers to index Continuwuity's built-in
/// webpages served under the `/_continuwuity/` prefix.
///
/// default: false
#[serde(default)]
pub allow_web_indexing: bool,
/// display: nested /// display: nested
#[serde(default)] #[serde(default)]
pub ldap: LdapConfig, pub ldap: LdapConfig,
@ -2853,5 +2821,3 @@ fn default_ldap_search_filter() -> String { "(objectClass=*)".to_owned() }
fn default_ldap_uid_attribute() -> String { String::from("uid") } fn default_ldap_uid_attribute() -> String { String::from("uid") }
fn default_ldap_name_attribute() -> String { String::from("givenName") } fn default_ldap_name_attribute() -> String { String::from("givenName") }
fn default_space_roles_cache_flush_threshold() -> u32 { 1000 }

View file

@ -2,7 +2,6 @@
pub mod event; pub mod event;
pub mod pdu; pub mod pdu;
pub mod space_roles;
pub mod state_key; pub mod state_key;
pub mod state_res; pub mod state_res;

View file

@ -1,81 +0,0 @@
use std::collections::BTreeMap;
use serde::{Deserialize, Serialize};
pub const SPACE_ROLES_EVENT_TYPE: &str = "com.continuwuity.space.roles";
pub const SPACE_ROLE_MEMBER_EVENT_TYPE: &str = "com.continuwuity.space.role.member";
pub const SPACE_ROLE_ROOM_EVENT_TYPE: &str = "com.continuwuity.space.role.room";
pub const SPACE_CASCADING_EVENT_TYPE: &str = "com.continuwuity.space.cascading";
#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq, Eq)]
pub struct SpaceRolesEventContent {
pub roles: BTreeMap<String, RoleDefinition>,
}
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)]
pub struct RoleDefinition {
pub description: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub power_level: Option<i64>,
}
#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq, Eq)]
pub struct SpaceRoleMemberEventContent {
pub roles: Vec<String>,
}
#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq, Eq)]
pub struct SpaceRoleRoomEventContent {
pub required_roles: Vec<String>,
}
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)]
pub struct SpaceCascadingEventContent {
pub enabled: bool,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn space_roles_roundtrip() {
let mut roles = BTreeMap::new();
roles.insert("admin".to_owned(), RoleDefinition {
description: "Space administrator".to_owned(),
power_level: Some(100),
});
roles.insert("nsfw".to_owned(), RoleDefinition {
description: "NSFW access".to_owned(),
power_level: None,
});
let content = SpaceRolesEventContent { roles };
let json = serde_json::to_string(&content).unwrap();
let deserialized: SpaceRolesEventContent = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized.roles["admin"].power_level, Some(100));
assert!(deserialized.roles["nsfw"].power_level.is_none());
}
#[test]
fn power_level_omitted_in_serialization_when_none() {
let role = RoleDefinition {
description: "Test".to_owned(),
power_level: None,
};
let json = serde_json::to_string(&role).unwrap();
assert!(!json.contains("power_level"));
}
#[test]
fn negative_power_level() {
let json = r#"{"description":"Restricted","power_level":-10}"#;
let role: RoleDefinition = serde_json::from_str(json).unwrap();
assert_eq!(role.power_level, Some(-10));
}
#[test]
fn missing_description_fails() {
let json = r#"{"power_level":100}"#;
serde_json::from_str::<RoleDefinition>(json).unwrap_err();
}
}

View file

@ -1224,7 +1224,6 @@ fn can_send_event(event: &impl Event, ple: Option<&impl Event>, user_level: Int)
} }
/// Confirm that the event sender has the required power levels. /// Confirm that the event sender has the required power levels.
#[allow(clippy::cognitive_complexity)]
fn check_power_levels( fn check_power_levels(
room_version: &RoomVersion, room_version: &RoomVersion,
power_event: &impl Event, power_event: &impl Event,

View file

@ -75,7 +75,6 @@ type Result<T, E = Error> = crate::Result<T, E>;
/// event is part of the same room. /// event is part of the same room.
//#[tracing::instrument(level = "debug", skip(state_sets, auth_chain_sets, //#[tracing::instrument(level = "debug", skip(state_sets, auth_chain_sets,
//#[tracing::instrument(level event_fetch))] //#[tracing::instrument(level event_fetch))]
#[allow(clippy::cognitive_complexity)]
pub async fn resolve<'a, Pdu, Sets, SetIter, Hasher, Fetch, FetchFut, Exists, ExistsFut>( pub async fn resolve<'a, Pdu, Sets, SetIter, Hasher, Fetch, FetchFut, Exists, ExistsFut>(
room_version: &RoomVersionId, room_version: &RoomVersionId,
state_sets: Sets, state_sets: Sets,

View file

@ -112,10 +112,6 @@ pub(super) static MAPS: &[Descriptor] = &[
name: "onetimekeyid_onetimekeys", name: "onetimekeyid_onetimekeys",
..descriptor::RANDOM_SMALL ..descriptor::RANDOM_SMALL
}, },
Descriptor {
name: "passwordresettoken_info",
..descriptor::RANDOM_SMALL
},
Descriptor { Descriptor {
name: "pduid_pdu", name: "pduid_pdu",
cache_disp: CacheDisp::SharedWith("eventid_outlierpdu"), cache_disp: CacheDisp::SharedWith("eventid_outlierpdu"),

View file

@ -18,5 +18,5 @@ pub(crate) fn build(services: &Arc<Services>) -> (Router, Guard) {
} }
async fn not_found(_uri: Uri) -> impl IntoResponse { async fn not_found(_uri: Uri) -> impl IntoResponse {
Error::Request(ErrorKind::Unrecognized, "not found :(".into(), StatusCode::NOT_FOUND) Error::Request(ErrorKind::Unrecognized, "Not Found".into(), StatusCode::NOT_FOUND)
} }

View file

@ -121,7 +121,7 @@ webpage.workspace = true
webpage.optional = true webpage.optional = true
blurhash.workspace = true blurhash.workspace = true
blurhash.optional = true blurhash.optional = true
recaptcha-verify = { version = "0.2.0", default-features = false } recaptcha-verify = { version = "0.1.5", default-features = false }
yansi.workspace = true yansi.workspace = true
[target.'cfg(all(unix, target_os = "linux"))'.dependencies] [target.'cfg(all(unix, target_os = "linux"))'.dependencies]

View file

@ -272,10 +272,7 @@ impl Service {
.get(id) .get(id)
.await .await
.and_then(|ref bytes| serde_saphyr::from_slice(bytes).map_err(Into::into)) .and_then(|ref bytes| serde_saphyr::from_slice(bytes).map_err(Into::into))
.map_err(|e| { .map_err(|e| err!(Database("Invalid appservice {id:?} registration: {e:?}")))
self.db.id_appserviceregistrations.remove(id);
err!(Database("Invalid appservice {id:?} registration: {e:?}. Removed."))
})
} }
pub fn read(&self) -> impl Future<Output = RwLockReadGuard<'_, Registrations>> + Send { pub fn read(&self) -> impl Future<Output = RwLockReadGuard<'_, Registrations>> + Send {

View file

@ -6,7 +6,6 @@ use conduwuit::{
config::{Config, check}, config::{Config, check},
error, implement, error, implement,
}; };
use url::Url;
use crate::registration_tokens::{ValidToken, ValidTokenSource}; use crate::registration_tokens::{ValidToken, ValidTokenSource};
@ -24,18 +23,6 @@ impl Service {
.clone() .clone()
.map(|token| ValidToken { token, source: ValidTokenSource::Config }) .map(|token| ValidToken { token, source: ValidTokenSource::Config })
} }
/// Get the base domain to use for user-facing URLs.
#[must_use]
pub fn get_client_domain(&self) -> Url {
self.well_known.client.clone().unwrap_or_else(|| {
let host = self.server_name.host();
format!("https://{host}")
.as_str()
.try_into()
.expect("server name should be a valid host")
})
}
} }
#[async_trait] #[async_trait]

View file

@ -142,10 +142,6 @@ impl Service {
self.server.config.url_preview_check_root_domain self.server.config.url_preview_check_root_domain
} }
pub fn url_preview_allow_audio_video(&self) -> bool {
self.server.config.url_preview_allow_audio_video
}
pub fn forbidden_alias_names(&self) -> &RegexSet { &self.server.config.forbidden_alias_names } pub fn forbidden_alias_names(&self) -> &RegexSet { &self.server.config.forbidden_alias_names }
pub fn forbidden_usernames(&self) -> &RegexSet { &self.server.config.forbidden_usernames } pub fn forbidden_usernames(&self) -> &RegexSet { &self.server.config.forbidden_usernames }

View file

@ -207,28 +207,6 @@ impl Data {
value.extend_from_slice(&data.image_width.unwrap_or(0).to_be_bytes()); value.extend_from_slice(&data.image_width.unwrap_or(0).to_be_bytes());
value.push(0xFF); value.push(0xFF);
value.extend_from_slice(&data.image_height.unwrap_or(0).to_be_bytes()); value.extend_from_slice(&data.image_height.unwrap_or(0).to_be_bytes());
value.push(0xFF);
value.extend_from_slice(
data.video
.as_ref()
.map(String::as_bytes)
.unwrap_or_default(),
);
value.push(0xFF);
value.extend_from_slice(&data.video_size.unwrap_or(0).to_be_bytes());
value.push(0xFF);
value.extend_from_slice(&data.video_width.unwrap_or(0).to_be_bytes());
value.push(0xFF);
value.extend_from_slice(&data.video_height.unwrap_or(0).to_be_bytes());
value.push(0xFF);
value.extend_from_slice(
data.audio
.as_ref()
.map(String::as_bytes)
.unwrap_or_default(),
);
value.push(0xFF);
value.extend_from_slice(&data.audio_size.unwrap_or(0).to_be_bytes());
self.url_previews.insert(url.as_bytes(), &value); self.url_previews.insert(url.as_bytes(), &value);
@ -289,48 +267,6 @@ impl Data {
| Some(0) => None, | Some(0) => None,
| x => x, | x => x,
}; };
let video = match values
.next()
.and_then(|b| String::from_utf8(b.to_vec()).ok())
{
| Some(s) if s.is_empty() => None,
| x => x,
};
let video_size = match values
.next()
.map(|b| usize::from_be_bytes(b.try_into().unwrap_or_default()))
{
| Some(0) => None,
| x => x,
};
let video_width = match values
.next()
.map(|b| u32::from_be_bytes(b.try_into().unwrap_or_default()))
{
| Some(0) => None,
| x => x,
};
let video_height = match values
.next()
.map(|b| u32::from_be_bytes(b.try_into().unwrap_or_default()))
{
| Some(0) => None,
| x => x,
};
let audio = match values
.next()
.and_then(|b| String::from_utf8(b.to_vec()).ok())
{
| Some(s) if s.is_empty() => None,
| x => x,
};
let audio_size = match values
.next()
.map(|b| usize::from_be_bytes(b.try_into().unwrap_or_default()))
{
| Some(0) => None,
| x => x,
};
Ok(UrlPreviewData { Ok(UrlPreviewData {
title, title,
@ -339,12 +275,6 @@ impl Data {
image_size, image_size,
image_width, image_width,
image_height, image_height,
video,
video_size,
video_width,
video_height,
audio,
audio_size,
}) })
} }
} }

View file

@ -10,8 +10,6 @@ use std::time::SystemTime;
use conduwuit::{Err, Result, debug, err, utils::response::LimitReadExt}; use conduwuit::{Err, Result, debug, err, utils::response::LimitReadExt};
use conduwuit_core::implement; use conduwuit_core::implement;
use ipaddress::IPAddress; use ipaddress::IPAddress;
#[cfg(feature = "url_preview")]
use ruma::OwnedMxcUri;
use serde::Serialize; use serde::Serialize;
use url::Url; use url::Url;
@ -31,18 +29,6 @@ pub struct UrlPreviewData {
pub image_width: Option<u32>, pub image_width: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none", rename(serialize = "og:image:height"))] #[serde(skip_serializing_if = "Option::is_none", rename(serialize = "og:image:height"))]
pub image_height: Option<u32>, pub image_height: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none", rename(serialize = "og:video"))]
pub video: Option<String>,
#[serde(skip_serializing_if = "Option::is_none", rename(serialize = "matrix:video:size"))]
pub video_size: Option<usize>,
#[serde(skip_serializing_if = "Option::is_none", rename(serialize = "og:video:width"))]
pub video_width: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none", rename(serialize = "og:video:height"))]
pub video_height: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none", rename(serialize = "og:audio"))]
pub audio: Option<String>,
#[serde(skip_serializing_if = "Option::is_none", rename(serialize = "matrix:audio:size"))]
pub audio_size: Option<usize>,
} }
#[implement(Service)] #[implement(Service)]
@ -110,9 +96,7 @@ async fn request_url_preview(&self, url: &Url) -> Result<UrlPreviewData> {
let data = match content_type { let data = match content_type {
| html if html.starts_with("text/html") => self.download_html(url.as_str()).await?, | html if html.starts_with("text/html") => self.download_html(url.as_str()).await?,
| img if img.starts_with("image/") => self.download_image(url.as_str(), None).await?, | img if img.starts_with("image/") => self.download_image(url.as_str()).await?,
| video if video.starts_with("video/") => self.download_video(url.as_str(), None).await?,
| audio if audio.starts_with("audio/") => self.download_audio(url.as_str(), None).await?,
| _ => return Err!(Request(Unknown("Unsupported Content-Type"))), | _ => return Err!(Request(Unknown("Unsupported Content-Type"))),
}; };
@ -123,17 +107,11 @@ async fn request_url_preview(&self, url: &Url) -> Result<UrlPreviewData> {
#[cfg(feature = "url_preview")] #[cfg(feature = "url_preview")]
#[implement(Service)] #[implement(Service)]
pub async fn download_image( pub async fn download_image(&self, url: &str) -> Result<UrlPreviewData> {
&self,
url: &str,
preview_data: Option<UrlPreviewData>,
) -> Result<UrlPreviewData> {
use conduwuit::utils::random_string; use conduwuit::utils::random_string;
use image::ImageReader; use image::ImageReader;
use ruma::Mxc; use ruma::Mxc;
let mut preview_data = preview_data.unwrap_or_default();
let image = self let image = self
.services .services
.client .client
@ -150,7 +128,6 @@ pub async fn download_image(
.expect("u64 should fit in usize"), .expect("u64 should fit in usize"),
) )
.await?; .await?;
let mxc = Mxc { let mxc = Mxc {
server_name: self.services.globals.server_name(), server_name: self.services.globals.server_name(),
media_id: &random_string(super::MXC_LENGTH), media_id: &random_string(super::MXC_LENGTH),
@ -158,125 +135,27 @@ pub async fn download_image(
self.create(&mxc, None, None, None, &image).await?; self.create(&mxc, None, None, None, &image).await?;
preview_data.image = Some(mxc.to_string()); let cursor = std::io::Cursor::new(&image);
if preview_data.image_height.is_none() || preview_data.image_width.is_none() { let (width, height) = match ImageReader::new(cursor).with_guessed_format() {
let cursor = std::io::Cursor::new(&image); | Err(_) => (None, None),
let (width, height) = match ImageReader::new(cursor).with_guessed_format() { | Ok(reader) => match reader.into_dimensions() {
| Err(_) => (None, None), | Err(_) => (None, None),
| Ok(reader) => match reader.into_dimensions() { | Ok((width, height)) => (Some(width), Some(height)),
| Err(_) => (None, None), },
| Ok((width, height)) => (Some(width), Some(height)),
},
};
preview_data.image_width = width;
preview_data.image_height = height;
}
Ok(preview_data)
}
#[cfg(feature = "url_preview")]
#[implement(Service)]
pub async fn download_video(
&self,
url: &str,
preview_data: Option<UrlPreviewData>,
) -> Result<UrlPreviewData> {
let mut preview_data = preview_data.unwrap_or_default();
if self.services.globals.url_preview_allow_audio_video() {
let (url, size) = self.download_media(url).await?;
preview_data.video = Some(url.to_string());
preview_data.video_size = Some(size);
}
Ok(preview_data)
}
#[cfg(feature = "url_preview")]
#[implement(Service)]
pub async fn download_audio(
&self,
url: &str,
preview_data: Option<UrlPreviewData>,
) -> Result<UrlPreviewData> {
let mut preview_data = preview_data.unwrap_or_default();
if self.services.globals.url_preview_allow_audio_video() {
let (url, size) = self.download_media(url).await?;
preview_data.audio = Some(url.to_string());
preview_data.audio_size = Some(size);
}
Ok(preview_data)
}
#[cfg(feature = "url_preview")]
#[implement(Service)]
pub async fn download_media(&self, url: &str) -> Result<(OwnedMxcUri, usize)> {
use conduwuit::utils::random_string;
use http::header::CONTENT_TYPE;
use ruma::Mxc;
let response = self.services.client.url_preview.get(url).send().await?;
let content_type = response.headers().get(CONTENT_TYPE).cloned();
let media = response
.limit_read(
self.services
.server
.config
.max_request_size
.try_into()
.expect("u64 should fit in usize"),
)
.await?;
let mxc = Mxc {
server_name: self.services.globals.server_name(),
media_id: &random_string(super::MXC_LENGTH),
}; };
let content_type = content_type.and_then(|v| v.to_str().map(ToOwned::to_owned).ok()); Ok(UrlPreviewData {
self.create(&mxc, None, None, content_type.as_deref(), &media) image: Some(mxc.to_string()),
.await?; image_size: Some(image.len()),
image_width: width,
Ok((OwnedMxcUri::from(mxc.to_string()), media.len())) image_height: height,
..Default::default()
})
} }
#[cfg(not(feature = "url_preview"))] #[cfg(not(feature = "url_preview"))]
#[implement(Service)] #[implement(Service)]
pub async fn download_image( pub async fn download_image(&self, _url: &str) -> Result<UrlPreviewData> {
&self,
_url: &str,
_preview_data: Option<UrlPreviewData>,
) -> Result<UrlPreviewData> {
Err!(FeatureDisabled("url_preview"))
}
#[cfg(not(feature = "url_preview"))]
#[implement(Service)]
pub async fn download_video(
&self,
_url: &str,
_preview_data: Option<UrlPreviewData>,
) -> Result<UrlPreviewData> {
Err!(FeatureDisabled("url_preview"))
}
#[cfg(not(feature = "url_preview"))]
#[implement(Service)]
pub async fn download_audio(
&self,
_url: &str,
_preview_data: Option<UrlPreviewData>,
) -> Result<UrlPreviewData> {
Err!(FeatureDisabled("url_preview"))
}
#[cfg(not(feature = "url_preview"))]
#[implement(Service)]
pub async fn download_media(&self, _url: &str) -> Result<UrlPreviewData> {
Err!(FeatureDisabled("url_preview")) Err!(FeatureDisabled("url_preview"))
} }
@ -303,29 +182,18 @@ async fn download_html(&self, url: &str) -> Result<UrlPreviewData> {
return Err!(Request(Unknown("Failed to parse HTML"))); return Err!(Request(Unknown("Failed to parse HTML")));
}; };
let mut preview_data = UrlPreviewData::default(); let mut data = match html.opengraph.images.first() {
| None => UrlPreviewData::default(),
if let Some(obj) = html.opengraph.images.first() { | Some(obj) => self.download_image(&obj.url).await?,
preview_data = self.download_image(&obj.url, Some(preview_data)).await?; };
}
if let Some(obj) = html.opengraph.videos.first() {
preview_data = self.download_video(&obj.url, Some(preview_data)).await?;
preview_data.video_width = obj.properties.get("width").and_then(|v| v.parse().ok());
preview_data.video_height = obj.properties.get("height").and_then(|v| v.parse().ok());
}
if let Some(obj) = html.opengraph.audios.first() {
preview_data = self.download_audio(&obj.url, Some(preview_data)).await?;
}
let props = html.opengraph.properties; let props = html.opengraph.properties;
/* use OpenGraph title/description, but fall back to HTML if not available */ /* use OpenGraph title/description, but fall back to HTML if not available */
preview_data.title = props.get("title").cloned().or(html.title); data.title = props.get("title").cloned().or(html.title);
preview_data.description = props.get("description").cloned().or(html.description); data.description = props.get("description").cloned().or(html.description);
Ok(preview_data) Ok(data)
} }
#[cfg(not(feature = "url_preview"))] #[cfg(not(feature = "url_preview"))]

View file

@ -1,9 +1,8 @@
use std::{cmp, collections::HashMap, future::ready}; use std::{cmp, collections::HashMap, future::ready};
use conduwuit::{ use conduwuit::{
Err, Event, Pdu, Result, debug, debug_info, debug_warn, err, error, info, Err, Event, Pdu, Result, debug, debug_info, debug_warn, error, info,
result::NotFound, result::NotFound,
trace,
utils::{ utils::{
IterStream, ReadyExt, IterStream, ReadyExt,
stream::{TryExpect, TryIgnore}, stream::{TryExpect, TryIgnore},
@ -58,7 +57,6 @@ pub(crate) async fn migrations(services: &Services) -> Result<()> {
} }
async fn fresh(services: &Services) -> Result<()> { async fn fresh(services: &Services) -> Result<()> {
info!("Creating new fresh database");
let db = &services.db; let db = &services.db;
services.globals.db.bump_database_version(DATABASE_VERSION); services.globals.db.bump_database_version(DATABASE_VERSION);
@ -68,18 +66,11 @@ async fn fresh(services: &Services) -> Result<()> {
db["global"].insert(b"retroactively_fix_bad_data_from_roomuserid_joined", []); db["global"].insert(b"retroactively_fix_bad_data_from_roomuserid_joined", []);
db["global"].insert(b"fix_referencedevents_missing_sep", []); db["global"].insert(b"fix_referencedevents_missing_sep", []);
db["global"].insert(b"fix_readreceiptid_readreceipt_duplicates", []); db["global"].insert(b"fix_readreceiptid_readreceipt_duplicates", []);
db["global"].insert(b"fix_corrupt_msc4133_fields", []);
db["global"].insert(b"populate_userroomid_leftstate_table", []);
db["global"].insert(b"fix_local_invite_state", []);
// Create the admin room and server user on first run // Create the admin room and server user on first run
info!("Creating admin room and server user"); crate::admin::create_admin_room(services).boxed().await?;
crate::admin::create_admin_room(services)
.boxed()
.await
.inspect_err(|e| error!("Failed to create admin room during db init: {e}"))?;
info!("Created new database with version {DATABASE_VERSION}"); warn!("Created new RocksDB database with version {DATABASE_VERSION}");
Ok(()) Ok(())
} }
@ -97,33 +88,19 @@ async fn migrate(services: &Services) -> Result<()> {
} }
if services.globals.db.database_version().await < 12 { if services.globals.db.database_version().await < 12 {
db_lt_12(services) db_lt_12(services).await?;
.await
.map_err(|e| err!("Failed to run v12 migrations: {e}"))?;
} }
// This migration can be reused as-is anytime the server-default rules are // This migration can be reused as-is anytime the server-default rules are
// updated. // updated.
if services.globals.db.database_version().await < 13 { if services.globals.db.database_version().await < 13 {
db_lt_13(services) db_lt_13(services).await?;
.await
.map_err(|e| err!("Failed to run v13 migrations: {e}"))?;
} }
if db["global"].get(b"feat_sha256_media").await.is_not_found() { if db["global"].get(b"feat_sha256_media").await.is_not_found() {
media::migrations::migrate_sha256_media(services) media::migrations::migrate_sha256_media(services).await?;
.await
.map_err(|e| err!("Failed to run SHA256 media migration: {e}"))?;
} else if config.media_startup_check { } else if config.media_startup_check {
info!("Starting media startup integrity check."); media::migrations::checkup_sha256_media(services).await?;
let now = std::time::Instant::now();
media::migrations::checkup_sha256_media(services)
.await
.map_err(|e| err!("Failed to verify media integrity: {e}"))?;
info!(
"Finished media startup integrity check in {} seconds.",
now.elapsed().as_secs_f32()
);
} }
if db["global"] if db["global"]
@ -131,12 +108,7 @@ async fn migrate(services: &Services) -> Result<()> {
.await .await
.is_not_found() .is_not_found()
{ {
info!("Running migration 'fix_bad_double_separator_in_state_cache'"); fix_bad_double_separator_in_state_cache(services).await?;
fix_bad_double_separator_in_state_cache(services)
.await
.map_err(|e| {
err!("Failed to run 'fix_bad_double_separator_in_state_cache' migration: {e}")
})?;
} }
if db["global"] if db["global"]
@ -144,15 +116,7 @@ async fn migrate(services: &Services) -> Result<()> {
.await .await
.is_not_found() .is_not_found()
{ {
info!("Running migration 'retroactively_fix_bad_data_from_roomuserid_joined'"); retroactively_fix_bad_data_from_roomuserid_joined(services).await?;
retroactively_fix_bad_data_from_roomuserid_joined(services)
.await
.map_err(|e| {
err!(
"Failed to run 'retroactively_fix_bad_data_from_roomuserid_joined' \
migration: {e}"
)
})?;
} }
if db["global"] if db["global"]
@ -161,12 +125,7 @@ async fn migrate(services: &Services) -> Result<()> {
.is_not_found() .is_not_found()
|| services.globals.db.database_version().await < 17 || services.globals.db.database_version().await < 17
{ {
info!("Running migration 'fix_referencedevents_missing_sep'"); fix_referencedevents_missing_sep(services).await?;
fix_referencedevents_missing_sep(services)
.await
.map_err(|e| {
err!("Failed to run 'fix_referencedevents_missing_sep' migration': {e}")
})?;
} }
if db["global"] if db["global"]
@ -175,12 +134,7 @@ async fn migrate(services: &Services) -> Result<()> {
.is_not_found() .is_not_found()
|| services.globals.db.database_version().await < 17 || services.globals.db.database_version().await < 17
{ {
info!("Running migration 'fix_readreceiptid_readreceipt_duplicates'"); fix_readreceiptid_readreceipt_duplicates(services).await?;
fix_readreceiptid_readreceipt_duplicates(services)
.await
.map_err(|e| {
err!("Failed to run 'fix_readreceiptid_readreceipt_duplicates' migration': {e}")
})?;
} }
if services.globals.db.database_version().await < 17 { if services.globals.db.database_version().await < 17 {
@ -193,10 +147,7 @@ async fn migrate(services: &Services) -> Result<()> {
.await .await
.is_not_found() .is_not_found()
{ {
info!("Running migration 'fix_corrupt_msc4133_fields'"); fix_corrupt_msc4133_fields(services).await?;
fix_corrupt_msc4133_fields(services)
.await
.map_err(|e| err!("Failed to run 'fix_corrupt_msc4133_fields' migration': {e}"))?;
} }
if services.globals.db.database_version().await < 18 { if services.globals.db.database_version().await < 18 {
@ -209,12 +160,7 @@ async fn migrate(services: &Services) -> Result<()> {
.await .await
.is_not_found() .is_not_found()
{ {
info!("Running migration 'populate_userroomid_leftstate_table'"); populate_userroomid_leftstate_table(services).await?;
populate_userroomid_leftstate_table(services)
.await
.map_err(|e| {
err!("Failed to run 'populate_userroomid_leftstate_table' migration': {e}")
})?;
} }
if db["global"] if db["global"]
@ -222,17 +168,14 @@ async fn migrate(services: &Services) -> Result<()> {
.await .await
.is_not_found() .is_not_found()
{ {
info!("Running migration 'fix_local_invite_state'"); fix_local_invite_state(services).await?;
fix_local_invite_state(services)
.await
.map_err(|e| err!("Failed to run 'fix_local_invite_state' migration': {e}"))?;
} }
assert_eq!( assert_eq!(
services.globals.db.database_version().await, services.globals.db.database_version().await,
DATABASE_VERSION, DATABASE_VERSION,
"Failed asserting local database version {} is equal to known latest continuwuity \ "Failed asserting local database version {} is equal to known latest conduwuit database \
database version {}", version {}",
services.globals.db.database_version().await, services.globals.db.database_version().await,
DATABASE_VERSION, DATABASE_VERSION,
); );
@ -427,7 +370,7 @@ async fn db_lt_13(services: &Services) -> Result<()> {
} }
async fn fix_bad_double_separator_in_state_cache(services: &Services) -> Result<()> { async fn fix_bad_double_separator_in_state_cache(services: &Services) -> Result<()> {
info!("Fixing bad double separator in state_cache roomuserid_joined"); warn!("Fixing bad double separator in state_cache roomuserid_joined");
let db = &services.db; let db = &services.db;
let roomuserid_joined = &db["roomuserid_joined"]; let roomuserid_joined = &db["roomuserid_joined"];
@ -471,7 +414,7 @@ async fn fix_bad_double_separator_in_state_cache(services: &Services) -> Result<
} }
async fn retroactively_fix_bad_data_from_roomuserid_joined(services: &Services) -> Result<()> { async fn retroactively_fix_bad_data_from_roomuserid_joined(services: &Services) -> Result<()> {
info!("Retroactively fixing bad data from broken roomuserid_joined"); warn!("Retroactively fixing bad data from broken roomuserid_joined");
let db = &services.db; let db = &services.db;
let _cork = db.cork_and_sync(); let _cork = db.cork_and_sync();
@ -561,7 +504,7 @@ async fn retroactively_fix_bad_data_from_roomuserid_joined(services: &Services)
} }
async fn fix_referencedevents_missing_sep(services: &Services) -> Result { async fn fix_referencedevents_missing_sep(services: &Services) -> Result {
info!("Fixing missing record separator between room_id and event_id in referencedevents"); warn!("Fixing missing record separator between room_id and event_id in referencedevents");
let db = &services.db; let db = &services.db;
let cork = db.cork_and_sync(); let cork = db.cork_and_sync();
@ -609,7 +552,7 @@ async fn fix_readreceiptid_readreceipt_duplicates(services: &Services) -> Result
type ArrayId = ArrayString<MAX_BYTES>; type ArrayId = ArrayString<MAX_BYTES>;
type Key<'a> = (&'a RoomId, u64, &'a UserId); type Key<'a> = (&'a RoomId, u64, &'a UserId);
info!("Fixing undeleted entries in readreceiptid_readreceipt..."); warn!("Fixing undeleted entries in readreceiptid_readreceipt...");
let db = &services.db; let db = &services.db;
let cork = db.cork_and_sync(); let cork = db.cork_and_sync();
@ -663,7 +606,7 @@ async fn fix_corrupt_msc4133_fields(services: &Services) -> Result {
use serde_json::{Value, from_slice}; use serde_json::{Value, from_slice};
type KeyVal<'a> = ((OwnedUserId, String), &'a [u8]); type KeyVal<'a> = ((OwnedUserId, String), &'a [u8]);
info!("Fixing corrupted `us.cloke.msc4175.tz` fields..."); warn!("Fixing corrupted `us.cloke.msc4175.tz` fields...");
let db = &services.db; let db = &services.db;
let cork = db.cork_and_sync(); let cork = db.cork_and_sync();
@ -803,18 +746,7 @@ async fn fix_local_invite_state(services: &Services) -> Result {
let fixed = userroomid_invitestate.stream() let fixed = userroomid_invitestate.stream()
// if they're a local user on this homeserver // if they're a local user on this homeserver
.try_filter(|((user_id, _), _): &KeyVal<'_>| ready(services.globals.user_is_local(user_id))) .try_filter(|((user_id, _), _): &KeyVal<'_>| ready(services.globals.user_is_local(user_id)))
.and_then(async |((user_id, room_id), stripped_state): KeyVal<'_>| Ok::<_, .and_then(async |((user_id, room_id), stripped_state): KeyVal<'_>| Ok::<_, conduwuit::Error>((user_id.to_owned(), room_id.to_owned(), stripped_state.deserialize()?)))
conduwuit::Error>((user_id.to_owned(), room_id.to_owned(), stripped_state.deserialize
().unwrap_or_else(|e| {
trace!("Failed to deserialize: {:?}", stripped_state.json());
warn!(
%user_id,
%room_id,
"Failed to deserialize stripped state for invite, removing from db: {e}"
);
userroomid_invitestate.del((user_id, room_id));
vec![]
}))))
.try_fold(0_usize, async |mut fixed, (user_id, room_id, stripped_state)| { .try_fold(0_usize, async |mut fixed, (user_id, room_id, stripped_state)| {
// and their invite state is None // and their invite state is None
if stripped_state.is_empty() if stripped_state.is_empty()

View file

@ -23,7 +23,6 @@ pub mod globals;
pub mod key_backups; pub mod key_backups;
pub mod media; pub mod media;
pub mod moderation; pub mod moderation;
pub mod password_reset;
pub mod presence; pub mod presence;
pub mod pusher; pub mod pusher;
pub mod registration_tokens; pub mod registration_tokens;

View file

@ -1,68 +0,0 @@
use std::{
sync::Arc,
time::{Duration, SystemTime},
};
use conduwuit::utils::{ReadyExt, stream::TryExpect};
use database::{Database, Deserialized, Json, Map};
use ruma::{OwnedUserId, UserId};
use serde::{Deserialize, Serialize};
pub(super) struct Data {
passwordresettoken_info: Arc<Map>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ResetTokenInfo {
pub user: OwnedUserId,
pub issued_at: SystemTime,
}
impl ResetTokenInfo {
// one hour
const MAX_TOKEN_AGE: Duration = Duration::from_secs(60 * 60);
pub fn is_valid(&self) -> bool {
let now = SystemTime::now();
now.duration_since(self.issued_at)
.is_ok_and(|duration| duration < Self::MAX_TOKEN_AGE)
}
}
impl Data {
pub(super) fn new(db: &Arc<Database>) -> Self {
Self {
passwordresettoken_info: db["passwordresettoken_info"].clone(),
}
}
/// Associate a reset token with its info in the database.
pub(super) fn save_token(&self, token: &str, info: &ResetTokenInfo) {
self.passwordresettoken_info.raw_put(token, Json(info));
}
/// Lookup the info for a reset token.
pub(super) async fn lookup_token_info(&self, token: &str) -> Option<ResetTokenInfo> {
self.passwordresettoken_info
.get(token)
.await
.deserialized()
.ok()
}
/// Find a user's existing reset token, if any.
pub(super) async fn find_token_for_user(
&self,
user: &UserId,
) -> Option<(String, ResetTokenInfo)> {
self.passwordresettoken_info
.stream::<'_, String, ResetTokenInfo>()
.expect_ok()
.ready_find(|(_, info)| info.user == user)
.await
}
/// Remove a reset token.
pub(super) fn remove_token(&self, token: &str) { self.passwordresettoken_info.remove(token); }
}

View file

@ -1,120 +0,0 @@
mod data;
use std::{sync::Arc, time::SystemTime};
use conduwuit::{Err, Result, utils};
use data::{Data, ResetTokenInfo};
use ruma::OwnedUserId;
use crate::{Dep, globals, users};
pub const PASSWORD_RESET_PATH: &str = "/_continuwuity/account/reset_password";
pub const RESET_TOKEN_QUERY_PARAM: &str = "token";
const RESET_TOKEN_LENGTH: usize = 32;
pub struct Service {
db: Data,
services: Services,
}
struct Services {
users: Dep<users::Service>,
globals: Dep<globals::Service>,
}
#[derive(Debug)]
pub struct ValidResetToken {
pub token: String,
pub info: ResetTokenInfo,
}
impl crate::Service for Service {
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
Ok(Arc::new(Self {
db: Data::new(args.db),
services: Services {
users: args.depend::<users::Service>("users"),
globals: args.depend::<globals::Service>("globals"),
},
}))
}
fn name(&self) -> &str { crate::service::make_name(std::module_path!()) }
}
impl Service {
/// Generate a random string suitable to be used as a password reset token.
#[must_use]
pub fn generate_token_string() -> String { utils::random_string(RESET_TOKEN_LENGTH) }
/// Issue a password reset token for `user`, who must be a local user with
/// the `password` origin.
pub async fn issue_token(&self, user_id: OwnedUserId) -> Result<ValidResetToken> {
if !self.services.globals.user_is_local(&user_id) {
return Err!("Cannot issue a password reset token for remote user {user_id}");
}
if user_id == self.services.globals.server_user {
return Err!("Cannot issue a password reset token for the server user");
}
if self
.services
.users
.origin(&user_id)
.await
.unwrap_or_else(|_| "password".to_owned())
!= "password"
{
return Err!("Cannot issue a password reset token for non-internal user {user_id}");
}
if self.services.users.is_deactivated(&user_id).await? {
return Err!("Cannot issue a password reset token for deactivated user {user_id}");
}
if let Some((existing_token, _)) = self.db.find_token_for_user(&user_id).await {
self.db.remove_token(&existing_token);
}
let token = Self::generate_token_string();
let info = ResetTokenInfo {
user: user_id,
issued_at: SystemTime::now(),
};
self.db.save_token(&token, &info);
Ok(ValidResetToken { token, info })
}
/// Check if `token` represents a valid, non-expired password reset token.
pub async fn check_token(&self, token: &str) -> Option<ValidResetToken> {
self.db.lookup_token_info(token).await.and_then(|info| {
if info.is_valid() {
Some(ValidResetToken { token: token.to_owned(), info })
} else {
self.db.remove_token(token);
None
}
})
}
/// Consume the supplied valid token, using it to change its user's password
/// to `new_password`.
pub async fn consume_token(
&self,
ValidResetToken { token, info }: ValidResetToken,
new_password: &str,
) -> Result<()> {
if info.is_valid() {
self.db.remove_token(&token);
self.services
.users
.set_password(&info.user, Some(new_password))
.await?;
}
Ok(())
}
}

View file

@ -7,7 +7,6 @@ pub mod metadata;
pub mod outlier; pub mod outlier;
pub mod pdu_metadata; pub mod pdu_metadata;
pub mod read_receipt; pub mod read_receipt;
pub mod roles;
pub mod search; pub mod search;
pub mod short; pub mod short;
pub mod spaces; pub mod spaces;
@ -32,7 +31,6 @@ pub struct Service {
pub outlier: Arc<outlier::Service>, pub outlier: Arc<outlier::Service>,
pub pdu_metadata: Arc<pdu_metadata::Service>, pub pdu_metadata: Arc<pdu_metadata::Service>,
pub read_receipt: Arc<read_receipt::Service>, pub read_receipt: Arc<read_receipt::Service>,
pub roles: Arc<roles::Service>,
pub search: Arc<search::Service>, pub search: Arc<search::Service>,
pub short: Arc<short::Service>, pub short: Arc<short::Service>,
pub spaces: Arc<spaces::Service>, pub spaces: Arc<spaces::Service>,

File diff suppressed because it is too large Load diff

View file

@ -1,204 +0,0 @@
use std::collections::{BTreeMap, HashSet};
use conduwuit_core::matrix::space_roles::RoleDefinition;
use super::{compute_user_power_level, roles_satisfy_requirements};
pub(super) fn make_roles(entries: &[(&str, Option<i64>)]) -> BTreeMap<String, RoleDefinition> {
entries
.iter()
.map(|(name, pl)| {
((*name).to_owned(), RoleDefinition {
description: format!("{name} role"),
power_level: *pl,
})
})
.collect()
}
pub(super) fn make_set(items: &[&str]) -> HashSet<String> {
items.iter().map(|s| (*s).to_owned()).collect()
}
#[test]
fn power_level_single_role() {
let roles = make_roles(&[("admin", Some(100)), ("mod", Some(50))]);
assert_eq!(compute_user_power_level(&roles, &make_set(&["admin"])), Some(100));
}
#[test]
fn power_level_multiple_roles_takes_highest() {
let roles = make_roles(&[("admin", Some(100)), ("mod", Some(50)), ("helper", Some(25))]);
assert_eq!(compute_user_power_level(&roles, &make_set(&["mod", "helper"])), Some(50));
}
#[test]
fn power_level_no_power_roles() {
let roles = make_roles(&[("nsfw", None), ("vip", None)]);
assert_eq!(compute_user_power_level(&roles, &make_set(&["nsfw", "vip"])), None);
}
#[test]
fn power_level_mixed_roles() {
let roles = make_roles(&[("mod", Some(50)), ("nsfw", None)]);
assert_eq!(compute_user_power_level(&roles, &make_set(&["mod", "nsfw"])), Some(50));
}
#[test]
fn power_level_no_roles_assigned() {
let roles = make_roles(&[("admin", Some(100))]);
assert_eq!(compute_user_power_level(&roles, &HashSet::new()), None);
}
#[test]
fn power_level_unknown_role_ignored() {
let roles = make_roles(&[("admin", Some(100))]);
assert_eq!(compute_user_power_level(&roles, &make_set(&["nonexistent"])), None);
}
#[test]
fn qualifies_with_all_required_roles() {
assert!(roles_satisfy_requirements(
&make_set(&["nsfw", "vip"]),
&make_set(&["nsfw", "vip", "extra"]),
));
}
#[test]
fn does_not_qualify_missing_one_role() {
assert!(!roles_satisfy_requirements(&make_set(&["nsfw", "vip"]), &make_set(&["nsfw"]),));
}
#[test]
fn qualifies_with_no_requirements() {
assert!(roles_satisfy_requirements(&HashSet::new(), &make_set(&["nsfw"])));
}
#[test]
fn does_not_qualify_with_no_roles() {
assert!(!roles_satisfy_requirements(&make_set(&["nsfw"]), &HashSet::new()));
}
// Multi-space scenarios
#[test]
fn multi_space_highest_pl_wins() {
let space_a_roles = make_roles(&[("mod", Some(50))]);
let space_b_roles = make_roles(&[("admin", Some(100))]);
let user_roles_a = make_set(&["mod"]);
let user_roles_b = make_set(&["admin"]);
let pl_a = compute_user_power_level(&space_a_roles, &user_roles_a);
let pl_b = compute_user_power_level(&space_b_roles, &user_roles_b);
let effective = [pl_a, pl_b].into_iter().flatten().max();
assert_eq!(effective, Some(100));
}
#[test]
fn multi_space_one_space_has_no_pl() {
let space_a_roles = make_roles(&[("nsfw", None)]);
let space_b_roles = make_roles(&[("mod", Some(50))]);
let user_roles_a = make_set(&["nsfw"]);
let user_roles_b = make_set(&["mod"]);
let pl_a = compute_user_power_level(&space_a_roles, &user_roles_a);
let pl_b = compute_user_power_level(&space_b_roles, &user_roles_b);
let effective = [pl_a, pl_b].into_iter().flatten().max();
assert_eq!(effective, Some(50));
}
#[test]
fn multi_space_neither_has_pl() {
let space_a_roles = make_roles(&[("nsfw", None)]);
let space_b_roles = make_roles(&[("vip", None)]);
let user_roles_a = make_set(&["nsfw"]);
let user_roles_b = make_set(&["vip"]);
let pl_a = compute_user_power_level(&space_a_roles, &user_roles_a);
let pl_b = compute_user_power_level(&space_b_roles, &user_roles_b);
let effective = [pl_a, pl_b].into_iter().flatten().max();
assert_eq!(effective, None);
}
#[test]
fn multi_space_user_only_in_one_space() {
let space_a_roles = make_roles(&[("admin", Some(100))]);
let space_b_roles = make_roles(&[("mod", Some(50))]);
let user_roles_a = make_set(&["admin"]);
let user_roles_b: HashSet<String> = HashSet::new();
let pl_a = compute_user_power_level(&space_a_roles, &user_roles_a);
let pl_b = compute_user_power_level(&space_b_roles, &user_roles_b);
let effective = [pl_a, pl_b].into_iter().flatten().max();
assert_eq!(effective, Some(100));
}
#[test]
fn multi_space_qualifies_in_one_not_other() {
let space_a_reqs = make_set(&["staff"]);
let space_b_reqs = make_set(&["nsfw"]);
let user_roles = make_set(&["nsfw"]);
assert!(!roles_satisfy_requirements(&space_a_reqs, &user_roles));
assert!(roles_satisfy_requirements(&space_b_reqs, &user_roles));
}
#[test]
fn multi_space_qualifies_after_role_revoke_via_other_space() {
let space_a_reqs = make_set(&["nsfw"]);
let space_b_reqs = make_set(&["vip"]);
let user_roles_after_revoke = make_set(&["vip"]);
assert!(!roles_satisfy_requirements(&space_a_reqs, &user_roles_after_revoke));
assert!(roles_satisfy_requirements(&space_b_reqs, &user_roles_after_revoke));
}
#[test]
fn multi_space_room_has_reqs_in_one_space_only() {
let space_a_reqs = make_set(&["admin"]);
let space_b_reqs: HashSet<String> = HashSet::new();
let user_roles = make_set(&["nsfw"]);
assert!(!roles_satisfy_requirements(&space_a_reqs, &user_roles));
assert!(roles_satisfy_requirements(&space_b_reqs, &user_roles));
}
#[test]
fn multi_space_no_qualification_anywhere() {
let space_a_reqs = make_set(&["staff"]);
let space_b_reqs = make_set(&["admin"]);
let user_roles = make_set(&["nsfw"]);
let qualifies_a = roles_satisfy_requirements(&space_a_reqs, &user_roles);
let qualifies_b = roles_satisfy_requirements(&space_b_reqs, &user_roles);
assert!(!qualifies_a);
assert!(!qualifies_b);
assert!(!(qualifies_a || qualifies_b));
}
#[test]
fn multi_space_same_role_different_pl() {
let space_a_roles = make_roles(&[("mod", Some(50))]);
let space_b_roles = make_roles(&[("mod", Some(75))]);
let user_roles = make_set(&["mod"]);
let pl_a = compute_user_power_level(&space_a_roles, &user_roles);
let pl_b = compute_user_power_level(&space_b_roles, &user_roles);
let effective = [pl_a, pl_b].into_iter().flatten().max();
assert_eq!(effective, Some(75));
}

View file

@ -327,7 +327,7 @@ where
} }
}, },
| TimelineEventType::SpaceChild => | TimelineEventType::SpaceChild =>
if pdu.state_key().is_some() { if let Some(_state_key) = pdu.state_key() {
self.services self.services
.spaces .spaces
.roomid_spacehierarchy_cache .roomid_spacehierarchy_cache
@ -359,8 +359,6 @@ where
| _ => {}, | _ => {},
} }
self.services.roles.on_pdu_appended(room_id, &pdu);
// CONCERN: If we receive events with a relation out-of-order, we never write // CONCERN: If we receive events with a relation out-of-order, we never write
// their relation / thread. We need some kind of way to trigger when we receive // their relation / thread. We need some kind of way to trigger when we receive
// this event, and potentially a way to rebuild the table entirely. // this event, and potentially a way to rebuild the table entirely.

View file

@ -97,17 +97,6 @@ pub async fn build_and_append_pdu(
))); )));
} }
} }
if *pdu.kind() == TimelineEventType::RoomPowerLevels {
if let Ok(proposed) =
pdu.get_content::<ruma::events::room::power_levels::RoomPowerLevelsEventContent>()
{
self.services
.roles
.validate_pl_change(&room_id, pdu.sender(), &proposed)
.await?;
}
}
if *pdu.kind() == TimelineEventType::RoomCreate { if *pdu.kind() == TimelineEventType::RoomCreate {
trace!("Creating shortroomid for {room_id}"); trace!("Creating shortroomid for {room_id}");
self.services self.services

View file

@ -80,7 +80,6 @@ struct Services {
threads: Dep<rooms::threads::Service>, threads: Dep<rooms::threads::Service>,
search: Dep<rooms::search::Service>, search: Dep<rooms::search::Service>,
spaces: Dep<rooms::spaces::Service>, spaces: Dep<rooms::spaces::Service>,
roles: Dep<rooms::roles::Service>,
event_handler: Dep<rooms::event_handler::Service>, event_handler: Dep<rooms::event_handler::Service>,
} }
@ -113,7 +112,6 @@ impl crate::Service for Service {
threads: args.depend::<rooms::threads::Service>("rooms::threads"), threads: args.depend::<rooms::threads::Service>("rooms::threads"),
search: args.depend::<rooms::search::Service>("rooms::search"), search: args.depend::<rooms::search::Service>("rooms::search"),
spaces: args.depend::<rooms::spaces::Service>("rooms::spaces"), spaces: args.depend::<rooms::spaces::Service>("rooms::spaces"),
roles: args.depend::<rooms::roles::Service>("rooms::roles"),
event_handler: args event_handler: args
.depend::<rooms::event_handler::Service>("rooms::event_handler"), .depend::<rooms::event_handler::Service>("rooms::event_handler"),
}, },

View file

@ -228,7 +228,7 @@ async fn acquire_notary_result(&self, missing: &mut Batch, server_keys: ServerSi
self.add_signing_keys(server_keys.clone()).await; self.add_signing_keys(server_keys.clone()).await;
if let Some(key_ids) = missing.get_mut(server) { if let Some(key_ids) = missing.get_mut(server) {
key_ids.retain(|key_id| !key_exists(&server_keys, key_id)); key_ids.retain(|key_id| key_exists(&server_keys, key_id));
if key_ids.is_empty() { if key_ids.is_empty() {
missing.remove(server); missing.remove(server);
} }

View file

@ -1,7 +1,7 @@
use std::{any::Any, collections::BTreeMap, sync::Arc}; use std::{any::Any, collections::BTreeMap, sync::Arc};
use conduwuit::{ use conduwuit::{
Result, Server, SyncRwLock, debug, debug_info, error, info, trace, utils::stream::IterStream, Result, Server, SyncRwLock, debug, debug_info, info, trace, utils::stream::IterStream,
}; };
use database::Database; use database::Database;
use futures::{Stream, StreamExt, TryStreamExt}; use futures::{Stream, StreamExt, TryStreamExt};
@ -11,8 +11,8 @@ use crate::{
account_data, admin, announcements, antispam, appservice, client, config, emergency, account_data, admin, announcements, antispam, appservice, client, config, emergency,
federation, firstrun, globals, key_backups, federation, firstrun, globals, key_backups,
manager::Manager, manager::Manager,
media, moderation, password_reset, presence, pusher, registration_tokens, resolver, rooms, media, moderation, presence, pusher, registration_tokens, resolver, rooms, sending,
sending, server_keys, server_keys,
service::{self, Args, Map, Service}, service::{self, Args, Map, Service},
sync, transactions, uiaa, users, sync, transactions, uiaa, users,
}; };
@ -27,7 +27,6 @@ pub struct Services {
pub globals: Arc<globals::Service>, pub globals: Arc<globals::Service>,
pub key_backups: Arc<key_backups::Service>, pub key_backups: Arc<key_backups::Service>,
pub media: Arc<media::Service>, pub media: Arc<media::Service>,
pub password_reset: Arc<password_reset::Service>,
pub presence: Arc<presence::Service>, pub presence: Arc<presence::Service>,
pub pusher: Arc<pusher::Service>, pub pusher: Arc<pusher::Service>,
pub registration_tokens: Arc<registration_tokens::Service>, pub registration_tokens: Arc<registration_tokens::Service>,
@ -82,7 +81,6 @@ impl Services {
globals: build!(globals::Service), globals: build!(globals::Service),
key_backups: build!(key_backups::Service), key_backups: build!(key_backups::Service),
media: build!(media::Service), media: build!(media::Service),
password_reset: build!(password_reset::Service),
presence: build!(presence::Service), presence: build!(presence::Service),
pusher: build!(pusher::Service), pusher: build!(pusher::Service),
registration_tokens: build!(registration_tokens::Service), registration_tokens: build!(registration_tokens::Service),
@ -96,7 +94,6 @@ impl Services {
outlier: build!(rooms::outlier::Service), outlier: build!(rooms::outlier::Service),
pdu_metadata: build!(rooms::pdu_metadata::Service), pdu_metadata: build!(rooms::pdu_metadata::Service),
read_receipt: build!(rooms::read_receipt::Service), read_receipt: build!(rooms::read_receipt::Service),
roles: build!(rooms::roles::Service),
search: build!(rooms::search::Service), search: build!(rooms::search::Service),
short: build!(rooms::short::Service), short: build!(rooms::short::Service),
spaces: build!(rooms::spaces::Service), spaces: build!(rooms::spaces::Service),
@ -128,12 +125,10 @@ impl Services {
} }
pub async fn start(self: &Arc<Self>) -> Result<Arc<Self>> { pub async fn start(self: &Arc<Self>) -> Result<Arc<Self>> {
info!("Starting services..."); debug_info!("Starting services...");
self.admin.set_services(Some(Arc::clone(self)).as_ref()); self.admin.set_services(Some(Arc::clone(self)).as_ref());
super::migrations::migrations(self) super::migrations::migrations(self).await?;
.await
.inspect_err(|e| error!("Migrations failed: {e}"))?;
self.manager self.manager
.lock() .lock()
.await .await
@ -152,7 +147,7 @@ impl Services {
.await; .await;
} }
info!("Services startup complete."); debug_info!("Services startup complete.");
Ok(Arc::clone(self)) Ok(Arc::clone(self))
} }

View file

@ -181,11 +181,20 @@ pub async fn try_auth(
uiaainfo.completed.push(AuthType::Password); uiaainfo.completed.push(AuthType::Password);
}, },
| AuthData::ReCaptcha(r) => { | AuthData::ReCaptcha(r) => {
let Some(ref private_site_key) = self.services.config.recaptcha_private_site_key if self.services.config.recaptcha_private_site_key.is_none() {
else {
return Err!(Request(Forbidden("ReCaptcha is not configured."))); return Err!(Request(Forbidden("ReCaptcha is not configured.")));
}; }
match recaptcha_verify::verify_v3(private_site_key, r.response.as_str(), None).await { match recaptcha_verify::verify(
self.services
.config
.recaptcha_private_site_key
.as_ref()
.unwrap(),
r.response.as_str(),
None,
)
.await
{
| Ok(()) => { | Ok(()) => {
uiaainfo.completed.push(AuthType::ReCaptcha); uiaainfo.completed.push(AuthType::ReCaptcha);
}, },

View file

@ -20,25 +20,12 @@ crate-type = [
[dependencies] [dependencies]
conduwuit-build-metadata.workspace = true conduwuit-build-metadata.workspace = true
conduwuit-service.workspace = true conduwuit-service.workspace = true
conduwuit-core.workspace = true
async-trait.workspace = true
askama.workspace = true askama.workspace = true
axum.workspace = true axum.workspace = true
axum-extra.workspace = true
base64.workspace = true
futures.workspace = true futures.workspace = true
tracing.workspace = true tracing.workspace = true
rand.workspace = true rand.workspace = true
ruma.workspace = true
thiserror.workspace = true thiserror.workspace = true
tower-http.workspace = true
serde.workspace = true
memory-serve = "2.1.0"
validator = { version = "0.20.0", features = ["derive"] }
tower-sec-fetch = { version = "0.1.2", features = ["tracing"] }
[build-dependencies]
memory-serve = "2.1.0"
[lints] [lints]
workspace = true workspace = true

View file

@ -1,2 +0,0 @@
[general]
dirs = ["pages/templates"]

View file

@ -1 +0,0 @@
fn main() { memory_serve::load_directory("./pages/resources"); }

94
src/web/css/index.css Normal file
View file

@ -0,0 +1,94 @@
:root {
color-scheme: light;
--font-stack: sans-serif;
--background-color: #fff;
--text-color: #000;
--bg: oklch(0.76 0.0854 317.27);
--panel-bg: oklch(0.91 0.042 317.27);
--name-lightness: 0.45;
@media (prefers-color-scheme: dark) {
color-scheme: dark;
--text-color: #fff;
--bg: oklch(0.15 0.042 317.27);
--panel-bg: oklch(0.24 0.03 317.27);
--name-lightness: 0.8;
}
--c1: oklch(0.44 0.177 353.06);
--c2: oklch(0.59 0.158 150.88);
--normal-font-size: 1rem;
--small-font-size: 0.8rem;
}
body {
color: var(--text-color);
font-family: var(--font-stack);
margin: 0;
padding: 0;
display: grid;
place-items: center;
min-height: 100vh;
}
html {
background-color: var(--bg);
background-image: linear-gradient(
70deg,
oklch(from var(--bg) l + 0.2 c h),
oklch(from var(--bg) l - 0.2 c h)
);
font-size: 16px;
}
.panel {
width: min(clamp(24rem, 12rem + 40vw, 48rem), calc(100vw - 3rem));
border-radius: 15px;
background-color: var(--panel-bg);
padding-inline: 1.5rem;
padding-block: 1rem;
box-shadow: 0 0.25em 0.375em hsla(0, 0%, 0%, 0.1);
}
@media (max-width: 24rem) {
.panel {
padding-inline: 0.25rem;
width: calc(100vw - 0.5rem);
border-radius: 0;
margin-block-start: 0.2rem;
}
main {
height: 100%;
}
}
footer {
padding-inline: 0.25rem;
height: max(fit-content, 2rem);
}
.project-name {
text-decoration: none;
background: linear-gradient(
130deg,
oklch(from var(--c1) var(--name-lightness) c h),
oklch(from var(--c2) var(--name-lightness) c h)
);
background-clip: text;
color: transparent;
filter: brightness(1.2);
}
b {
color: oklch(from var(--c2) var(--name-lightness) c h);
}
.logo {
width: 100%;
height: 64px;
}

View file

@ -1,113 +1,86 @@
use std::any::Any;
use askama::Template; use askama::Template;
use axum::{ use axum::{
Router, Router,
extract::rejection::{FormRejection, QueryRejection}, extract::State,
http::{HeaderValue, StatusCode, header}, http::{StatusCode, header},
response::{Html, IntoResponse, Response}, response::{Html, IntoResponse, Response},
routing::get,
}; };
use conduwuit_build_metadata::{GIT_REMOTE_COMMIT_URL, GIT_REMOTE_WEB_URL, version_tag};
use conduwuit_service::state; use conduwuit_service::state;
use tower_http::{catch_panic::CatchPanicLayer, set_header::SetResponseHeaderLayer};
use tower_sec_fetch::SecFetchLayer;
use crate::pages::TemplateContext; pub fn build() -> Router<state::State> {
Router::<state::State>::new()
.route("/", get(index_handler))
.route("/_continuwuity/logo.svg", get(logo_handler))
}
mod pages; async fn index_handler(
State(services): State<state::State>,
) -> Result<impl IntoResponse, WebError> {
#[derive(Debug, Template)]
#[template(path = "index.html.j2")]
struct Index<'a> {
nonce: &'a str,
server_name: &'a str,
first_run: bool,
}
let nonce = rand::random::<u64>().to_string();
type State = state::State; let template = Index {
nonce: &nonce,
server_name: services.config.server_name.as_str(),
first_run: services.firstrun.is_first_run(),
};
Ok((
[(
header::CONTENT_SECURITY_POLICY,
format!("default-src 'nonce-{nonce}'; img-src 'self';"),
)],
Html(template.render()?),
))
}
const CATASTROPHIC_FAILURE: &str = "cat-astrophic failure! we couldn't even render the error template. \ async fn logo_handler() -> impl IntoResponse {
please contact the team @ https://continuwuity.org"; (
[(header::CONTENT_TYPE, "image/svg+xml")],
include_str!("templates/logo.svg").to_owned(),
)
}
#[derive(Debug, thiserror::Error)] #[derive(Debug, thiserror::Error)]
enum WebError { enum WebError {
#[error("Failed to validate form body: {0}")]
ValidationError(#[from] validator::ValidationErrors),
#[error("{0}")]
QueryRejection(#[from] QueryRejection),
#[error("{0}")]
FormRejection(#[from] FormRejection),
#[error("{0}")]
BadRequest(String),
#[error("This page does not exist.")]
NotFound,
#[error("Failed to render template: {0}")] #[error("Failed to render template: {0}")]
Render(#[from] askama::Error), Render(#[from] askama::Error),
#[error("{0}")]
InternalError(#[from] conduwuit_core::Error),
#[error("Request handler panicked! {0}")]
Panic(String),
} }
impl IntoResponse for WebError { impl IntoResponse for WebError {
fn into_response(self) -> Response { fn into_response(self) -> Response {
#[derive(Debug, Template)] #[derive(Debug, Template)]
#[template(path = "error.html.j2")] #[template(path = "error.html.j2")]
struct Error { struct Error<'a> {
error: WebError, nonce: &'a str,
status: StatusCode, err: WebError,
context: TemplateContext,
} }
let nonce = rand::random::<u64>().to_string();
let status = match &self { let status = match &self {
| Self::ValidationError(_) | Self::Render(_) => StatusCode::INTERNAL_SERVER_ERROR,
| Self::BadRequest(_)
| Self::QueryRejection(_)
| Self::FormRejection(_) => StatusCode::BAD_REQUEST,
| Self::NotFound => StatusCode::NOT_FOUND,
| _ => StatusCode::INTERNAL_SERVER_ERROR,
}; };
let tmpl = Error { nonce: &nonce, err: self };
let template = Error { if let Ok(body) = tmpl.render() {
error: self, (
status, status,
context: TemplateContext { [(
// Statically set false to prevent error pages from being indexed. header::CONTENT_SECURITY_POLICY,
allow_indexing: false, format!("default-src 'none' 'nonce-{nonce}';"),
}, )],
}; Html(body),
)
if let Ok(body) = template.render() { .into_response()
(status, Html(body)).into_response()
} else { } else {
(status, CATASTROPHIC_FAILURE).into_response() (status, "Something went wrong").into_response()
} }
} }
} }
pub fn build() -> Router<state::State> {
#[allow(clippy::wildcard_imports)]
use pages::*;
Router::new()
.merge(index::build())
.nest(
"/_continuwuity/",
Router::new()
.merge(resources::build())
.merge(password_reset::build())
.merge(debug::build())
.fallback(async || WebError::NotFound),
)
.layer(CatchPanicLayer::custom(|panic: Box<dyn Any + Send + 'static>| {
let details = if let Some(s) = panic.downcast_ref::<String>() {
s.clone()
} else if let Some(s) = panic.downcast_ref::<&str>() {
(*s).to_owned()
} else {
"(opaque panic payload)".to_owned()
};
WebError::Panic(details).into_response()
}))
.layer(SetResponseHeaderLayer::if_not_present(
header::CONTENT_SECURITY_POLICY,
HeaderValue::from_static("default-src 'self'; img-src 'self' data:;"),
))
.layer(SecFetchLayer::new(|policy| {
policy.allow_safe_methods().reject_missing_metadata();
}))
}

View file

@ -1,98 +0,0 @@
use askama::{Template, filters::HtmlSafe};
use validator::ValidationErrors;
/// A reusable form component with field validation.
#[derive(Debug, Template)]
#[template(path = "_components/form.html.j2", print = "code")]
pub(crate) struct Form<'a> {
pub inputs: Vec<FormInput<'a>>,
pub validation_errors: Option<ValidationErrors>,
pub submit_label: &'a str,
}
impl HtmlSafe for Form<'_> {}
/// An input element in a form component.
#[derive(Debug, Clone, Copy)]
pub(crate) struct FormInput<'a> {
/// The field name of the input.
pub id: &'static str,
/// The `type` property of the input.
pub input_type: &'a str,
/// The contents of the input's label.
pub label: &'a str,
/// Whether the input is required. Defaults to `true`.
pub required: bool,
/// The autocomplete mode for the input. Defaults to `on`.
pub autocomplete: &'a str,
// This is a hack to make the form! macro's support for client-only fields
// work properly. Client-only fields are specified in the macro without a type and aren't
// included in the POST body or as a field in the generated struct.
// To keep the field from being included in the POST body, its `name` property needs not to
// be set in the template. Because of limitations of macro_rules!'s repetition feature, this
// field needs to exist to allow the template to check if the field is client-only.
#[doc(hidden)]
pub type_name: Option<&'static str>,
}
impl Default for FormInput<'_> {
fn default() -> Self {
Self {
id: "",
input_type: "text",
label: "",
required: true,
autocomplete: "",
type_name: None,
}
}
}
/// Generate a deserializable struct which may be turned into a [`Form`]
/// for inclusion in another template.
#[macro_export]
macro_rules! form {
(
$(#[$struct_meta:meta])*
struct $struct_name:ident {
$(
$(#[$field_meta:meta])*
$name:ident$(: $type:ty)? where { $($prop:ident: $value:expr),* }
),*
submit: $submit_label:expr
}
) => {
#[derive(Debug, serde::Deserialize, validator::Validate)]
$(#[$struct_meta])*
struct $struct_name {
$(
$(#[$field_meta])*
$(pub $name: $type,)?
)*
}
impl $struct_name {
/// Generate a [`Form`] which matches the shape of this struct.
#[allow(clippy::needless_update)]
fn build(validation_errors: Option<validator::ValidationErrors>) -> $crate::pages::components::form::Form<'static> {
$crate::pages::components::form::Form {
inputs: vec![
$(
$crate::pages::components::form::FormInput {
id: stringify!($name),
$(type_name: Some(stringify!($type)),)?
$($prop: $value),*,
..Default::default()
},
)*
],
validation_errors,
submit_label: $submit_label,
}
}
}
};
}

View file

@ -1,71 +0,0 @@
use askama::{Template, filters::HtmlSafe};
use base64::Engine;
use conduwuit_core::result::FlatOk;
use conduwuit_service::Services;
use ruma::UserId;
pub(super) mod form;
#[derive(Debug)]
pub(super) enum AvatarType<'a> {
Initial(char),
Image(&'a str),
}
#[derive(Debug, Template)]
#[template(path = "_components/avatar.html.j2")]
pub(super) struct Avatar<'a> {
pub(super) avatar_type: AvatarType<'a>,
}
impl HtmlSafe for Avatar<'_> {}
#[derive(Debug, Template)]
#[template(path = "_components/user_card.html.j2")]
pub(super) struct UserCard<'a> {
pub user_id: &'a UserId,
pub display_name: Option<String>,
pub avatar_src: Option<String>,
}
impl HtmlSafe for UserCard<'_> {}
impl<'a> UserCard<'a> {
pub(super) async fn for_local_user(services: &Services, user_id: &'a UserId) -> Self {
let display_name = services.users.displayname(user_id).await.ok();
let avatar_src = async {
let avatar_url = services.users.avatar_url(user_id).await.ok()?;
let avatar_mxc = avatar_url.parts().ok()?;
let file = services.media.get(&avatar_mxc).await.flat_ok()?;
Some(format!(
"data:{};base64,{}",
file.content_type
.unwrap_or_else(|| "application/octet-stream".to_owned()),
file.content
.map(|content| base64::prelude::BASE64_STANDARD.encode(content))
.unwrap_or_default(),
))
}
.await;
Self { user_id, display_name, avatar_src }
}
fn avatar(&'a self) -> Avatar<'a> {
let avatar_type = if let Some(ref avatar_src) = self.avatar_src {
AvatarType::Image(avatar_src)
} else if let Some(initial) = self
.display_name
.as_ref()
.and_then(|display_name| display_name.chars().next())
{
AvatarType::Initial(initial)
} else {
AvatarType::Initial(self.user_id.localpart().chars().next().unwrap())
};
Avatar { avatar_type }
}
}

View file

@ -1,17 +0,0 @@
use std::convert::Infallible;
use axum::{Router, routing::get};
use conduwuit_core::Error;
use crate::WebError;
pub(crate) fn build() -> Router<crate::State> {
Router::new()
.route("/_debug/panic", get(async || -> Infallible { panic!("Guru meditation error") }))
.route(
"/_debug/error",
get(async || -> WebError {
Error::Err(std::borrow::Cow::Borrowed("Guru meditation error")).into()
}),
)
}

View file

@ -1,28 +0,0 @@
use askama::Template;
use axum::{Router, extract::State, response::IntoResponse, routing::get};
use crate::{WebError, template};
pub(crate) fn build() -> Router<crate::State> {
Router::new()
.route("/", get(index_handler))
.route("/_continuwuity/", get(index_handler))
}
async fn index_handler(
State(services): State<crate::State>,
) -> Result<impl IntoResponse, WebError> {
template! {
struct Index<'a> use "index.html.j2" {
server_name: &'a str,
first_run: bool
}
}
Ok(Index::new(
&services,
services.globals.server_name().as_str(),
services.firstrun.is_first_run(),
)
.into_response())
}

View file

@ -1,53 +0,0 @@
mod components;
pub(super) mod debug;
pub(super) mod index;
pub(super) mod password_reset;
pub(super) mod resources;
#[derive(Debug)]
pub(crate) struct TemplateContext {
pub allow_indexing: bool,
}
impl From<&crate::State> for TemplateContext {
fn from(state: &crate::State) -> Self {
Self {
allow_indexing: state.config.allow_web_indexing,
}
}
}
#[macro_export]
macro_rules! template {
(
struct $name:ident $(<$lifetime:lifetime>)? use $path:literal {
$($field_name:ident: $field_type:ty),*
}
) => {
#[derive(Debug, askama::Template)]
#[template(path = $path)]
struct $name$(<$lifetime>)? {
context: $crate::pages::TemplateContext,
$($field_name: $field_type,)*
}
impl$(<$lifetime>)? $name$(<$lifetime>)? {
fn new(state: &$crate::State, $($field_name: $field_type,)*) -> Self {
Self {
context: state.into(),
$($field_name,)*
}
}
}
#[allow(single_use_lifetimes)]
impl$(<$lifetime>)? axum::response::IntoResponse for $name$(<$lifetime>)? {
fn into_response(self) -> axum::response::Response {
match self.render() {
Ok(rendered) => axum::response::Html(rendered).into_response(),
Err(err) => $crate::WebError::from(err).into_response()
}
}
}
};
}

View file

@ -1,120 +0,0 @@
use askama::Template;
use axum::{
Router,
extract::{
Query, State,
rejection::{FormRejection, QueryRejection},
},
http::StatusCode,
response::{IntoResponse, Response},
routing::get,
};
use serde::Deserialize;
use validator::Validate;
use crate::{
WebError, form,
pages::components::{UserCard, form::Form},
template,
};
const INVALID_TOKEN_ERROR: &str = "Invalid reset token. Your reset link may have expired.";
#[derive(Deserialize)]
struct PasswordResetQuery {
token: String,
}
template! {
struct PasswordReset<'a> use "password_reset.html.j2" {
user_card: UserCard<'a>,
body: PasswordResetBody
}
}
#[derive(Debug)]
enum PasswordResetBody {
Form(Form<'static>),
Success,
}
form! {
struct PasswordResetForm {
#[validate(length(min = 1, message = "Password cannot be empty"))]
new_password: String where {
input_type: "password",
label: "New password",
autocomplete: "new-password"
},
#[validate(must_match(other = "new_password", message = "Passwords must match"))]
confirm_new_password: String where {
input_type: "password",
label: "Confirm new password",
autocomplete: "new-password"
}
submit: "Reset Password"
}
}
pub(crate) fn build() -> Router<crate::State> {
Router::new()
.route("/account/reset_password", get(get_password_reset).post(post_password_reset))
}
async fn password_reset_form(
services: crate::State,
query: PasswordResetQuery,
reset_form: Form<'static>,
) -> Result<impl IntoResponse, WebError> {
let Some(token) = services.password_reset.check_token(&query.token).await else {
return Err(WebError::BadRequest(INVALID_TOKEN_ERROR.to_owned()));
};
let user_card = UserCard::for_local_user(&services, &token.info.user).await;
Ok(PasswordReset::new(&services, user_card, PasswordResetBody::Form(reset_form))
.into_response())
}
async fn get_password_reset(
State(services): State<crate::State>,
query: Result<Query<PasswordResetQuery>, QueryRejection>,
) -> Result<impl IntoResponse, WebError> {
let Query(query) = query?;
password_reset_form(services, query, PasswordResetForm::build(None)).await
}
async fn post_password_reset(
State(services): State<crate::State>,
query: Result<Query<PasswordResetQuery>, QueryRejection>,
form: Result<axum::Form<PasswordResetForm>, FormRejection>,
) -> Result<Response, WebError> {
let Query(query) = query?;
let axum::Form(form) = form?;
match form.validate() {
| Ok(()) => {
let Some(token) = services.password_reset.check_token(&query.token).await else {
return Err(WebError::BadRequest(INVALID_TOKEN_ERROR.to_owned()));
};
let user_id = token.info.user.clone();
services
.password_reset
.consume_token(token, &form.new_password)
.await?;
let user_card = UserCard::for_local_user(&services, &user_id).await;
Ok(PasswordReset::new(&services, user_card, PasswordResetBody::Success)
.into_response())
},
| Err(err) => Ok((
StatusCode::BAD_REQUEST,
password_reset_form(services, query, PasswordResetForm::build(Some(err))).await,
)
.into_response()),
}
}

View file

@ -1,9 +0,0 @@
use axum::Router;
pub(crate) fn build() -> Router<crate::State> {
Router::new().nest(
"/resources/",
#[allow(unused_qualifications)]
memory_serve::load!().index_file(None).into_router(),
)
}

View file

@ -1,185 +0,0 @@
:root {
color-scheme: light;
--font-stack: sans-serif;
--background-color: #fff;
--text-color: #000;
--secondary: #666;
--bg: oklch(0.76 0.0854 317.27);
--panel-bg: oklch(0.91 0.042 317.27);
--c1: oklch(0.44 0.177 353.06);
--c2: oklch(0.59 0.158 150.88);
--name-lightness: 0.45;
--background-lightness: 0.9;
--background-gradient:
radial-gradient(42.12% 56.13% at 100% 0%, oklch(from var(--c2) var(--background-lightness) c h) 0%, #fff0 100%),
radial-gradient(42.01% 79.63% at 52.86% 0%, #d9ff5333 0%, #fff0 100%),
radial-gradient(79.67% 58.09% at 0% 0%, oklch(from var(--c1) var(--background-lightness) c h) 0%, #fff0 100%);
--normal-font-size: 1rem;
--small-font-size: 0.8rem;
--border-radius-sm: 5px;
--border-radius-lg: 15px;
@media (prefers-color-scheme: dark) {
color-scheme: dark;
--text-color: #fff;
--secondary: #888;
--bg: oklch(0.15 0.042 317.27);
--panel-bg: oklch(0.24 0.03 317.27);
--name-lightness: 0.8;
--background-lightness: 0.2;
--background-gradient:
radial-gradient(
42.12% 56.13% at 100% 0%,
oklch(from var(--c2) var(--background-lightness) c h) 0%,
#12121200 100%
),
radial-gradient(55.81% 87.78% at 48.37% 0%, #000 0%, #12121200 89.55%),
radial-gradient(
122.65% 88.24% at 0% 0%,
oklch(from var(--c1) var(--background-lightness) c h) 0%,
#12121200 100%
);
}
}
* {
box-sizing: border-box;
}
body {
display: grid;
margin: 0;
padding: 0;
place-items: center;
min-height: 100vh;
color: var(--text-color);
font-family: var(--font-stack);
line-height: 1.5;
}
html {
background-color: var(--bg);
background-image: var(--background-gradient);
font-size: var(--normal-font-size);
}
footer {
padding-inline: 0.25rem;
height: max(fit-content, 2rem);
.logo {
width: 100%;
height: 64px;
}
}
p {
margin: 1rem 0;
}
em {
color: oklch(from var(--c2) var(--name-lightness) c h);
font-weight: bold;
font-style: normal;
}
small {
color: var(--secondary);
}
small.error {
display: block;
color: red;
font-size: small;
font-style: italic;
margin-bottom: 0.5rem;
}
.panel {
--preferred-width: 12rem + 40dvw;
--maximum-width: 48rem;
width: min(clamp(24rem, var(--preferred-width), var(--maximum-width)), calc(100dvw - 3rem));
border-radius: var(--border-radius-lg);
background-color: var(--panel-bg);
padding-inline: 1.5rem;
padding-block: 1rem;
box-shadow: 0 0.25em 0.375em hsla(0, 0%, 0%, 0.1);
&.narrow {
--preferred-width: 12rem + 20dvw;
--maximum-width: 36rem;
input, button {
width: 100%;
}
}
}
label {
display: block;
}
input, button {
display: inline-block;
padding: 0.5em;
margin-bottom: 0.5em;
font-size: inherit;
font-family: inherit;
color: white;
background-color: transparent;
border: none;
border-radius: var(--border-radius-sm);
}
input {
border: 2px solid var(--secondary);
&:focus-visible {
outline: 2px solid var(--c1);
border-color: transparent;
}
}
button {
background-color: var(--c1);
transition: opacity .2s;
&:enabled:hover {
opacity: 0.8;
cursor: pointer;
}
}
h1 {
margin-top: 0;
margin-bottom: 0.67em;
}
@media (max-width: 425px) {
main {
padding-block-start: 2rem;
width: 100%;
}
.panel {
border-radius: 0;
width: 100%;
}
}
@media (max-width: 799px) {
input, button {
width: 100%;
}
}

View file

@ -1,44 +0,0 @@
.avatar {
--avatar-size: 56px;
display: inline-block;
aspect-ratio: 1 / 1;
inline-size: var(--avatar-size);
border-radius: 50%;
text-align: center;
text-transform: uppercase;
font-size: calc(var(--avatar-size) * 0.5);
font-weight: 700;
line-height: calc(var(--avatar-size) - 2px);
color: oklch(from var(--c1) calc(l + 0.2) c h);
background-color: var(--c1);
}
.user-card {
display: flex;
flex-direction: row;
align-items: center;
gap: 16px;
background-color: oklch(from var(--panel-bg) calc(l - 0.05) c h);
border-radius: var(--border-radius-lg);
padding: 16px;
.info {
flex: 1 1;
p {
margin: 0;
&.display-name {
font-weight: 700;
}
&:nth-of-type(2) {
color: var(--secondary);
}
}
}
}

View file

@ -1,13 +0,0 @@
.k10y {
font-family: monospace;
font-size: x-small;
font-weight: 700;
transform: translate(1rem, 1.6rem);
color: var(--secondary);
user-select: none;
}
h1 {
display: flex;
align-items: center;
}

View file

@ -1,11 +0,0 @@
.project-name {
text-decoration: none;
background: linear-gradient(
130deg,
oklch(from var(--c1) var(--name-lightness) c h),
oklch(from var(--c2) var(--name-lightness) c h)
);
background-clip: text;
color: transparent;
filter: brightness(1.2);
}

View file

@ -1,43 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
width="447.99823"
height="447.99823"
viewBox="0 0 447.99823 447.99823"
version="1.1"
id="svg1"
xml:space="preserve"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg"><defs
id="defs1" /><g
id="layer1"
transform="translate(-32.000893,-32.000893)"><circle
style="fill:#9b4bd4;fill-opacity:1;stroke:#000000;stroke-width:10;stroke-dasharray:none;stroke-opacity:1"
id="path1"
cy="256"
cx="256"
r="176" /><path
style="fill:#de6cd3;fill-opacity:1;stroke:#000000;stroke-width:10;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1"
d="m 41,174 69,36 C 135,126 175,102 226,94 l -12,31 62,-44 -69,-44 15,30 C 128,69 84,109 41,172 Z"
id="path7" /><path
style="fill:#de6cd3;fill-opacity:1;stroke:#000000;stroke-width:10;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1"
d="m 338,41 -36,69 c 84,25 108,65 116,116 l -31,-12 44,62 44,-69 -30,15 C 443,128 403,84 340,41 Z"
id="path6" /><path
style="fill:#de6cd3;fill-opacity:1;stroke:#000000;stroke-width:10;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1"
d="m 471,338 -69,-36 c -25,84 -65,108 -116,116 l 12,-31 -62,44 69,44 -15,-30 c 94,-2 138,-42 181,-105 z"
id="path8" /><path
style="fill:#de6cd3;fill-opacity:1;stroke:#000000;stroke-width:10;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1"
d="m 174,471 36,-69 C 126,377 102,337 94,286 l 31,12 -44,-62 -44,69 30,-15 c 2,94 42,138 105,181 z"
id="path9" /><g
id="g15"
transform="translate(-5.4157688e-4)"><path
style="fill:none;stroke:#000000;stroke-width:10;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1;paint-order:normal"
d="m 155.45977,224.65379 c -7.25909,13.49567 -7.25909,26.09161 -6.35171,39.58729 0.90737,11.69626 12.7034,24.29222 24.49943,26.09164 21.77727,3.59884 28.12898,-20.69338 28.12898,-20.69338 0,0 4.53693,-15.29508 5.4443,-40.48699"
id="path11" /><path
style="fill:none;stroke:#000000;stroke-width:10;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1;paint-order:normal"
d="m 218.96706,278.05399 c 3.00446,17.12023 7.52704,24.88918 19.22704,28.48918 9,2.7 22.5,-4.5 22.5,-16.2 0.9,21.6 17.1,17.1 19.8,17.1 11.7,-1.8 18.9,-14.4 16.2,-30.6"
id="path12" /><path
style="fill:none;stroke:#000000;stroke-width:10;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1;paint-order:normal"
d="m 305.6941,230.94317 c 1.8,27 6.3,40.5 6.3,40.5 8.1,27 28.8,19.8 28.8,19.8 18.9,-7.2 22.5,-24.3 22.5,-30.6 0,-25.2 -6.3,-35.1 -6.3,-35.1"
id="path13" /></g></g></svg>

Before

Width:  |  Height:  |  Size: 2.8 KiB

View file

@ -1,6 +0,0 @@
{% match avatar_type %}
{% when AvatarType::Initial with (initial) %}
<span class="avatar" role="img">{{ initial }}</span>
{% when AvatarType::Image with (src) %}
<img class="avatar" src="{{ src }}">
{% endmatch %}

View file

@ -1,30 +0,0 @@
<form method="post">
{% let validation_errors = validation_errors.clone().unwrap_or_default() %}
{% let field_errors = validation_errors.field_errors() %}
{% for input in inputs %}
<p>
<label for="{{ input.id }}">{{ input.label }}</label>
{% let name = std::borrow::Cow::from(*input.id) %}
{% if let Some(errors) = field_errors.get(name) %}
{% for error in errors %}
<small class="error">
{% if let Some(message) = error.message %}
{{ message }}
{% else %}
Mysterious validation error <code>{{ error.code }}</code>!
{% endif %}
</small>
{% endfor %}
{% endif %}
<input
type="{{ input.input_type }}"
id="{{ input.id }}"
autocomplete="{{ input.autocomplete }}"
{% if input.type_name.is_some() %}name="{{ input.id }}"{% endif %}
{% if input.required %}required{% endif %}
>
</p>
{% endfor %}
<button type="submit">{{ submit_label }}</button>
</form>

View file

@ -1,9 +0,0 @@
<div class="user-card">
{{ avatar() }}
<div class="info">
{% if let Some(display_name) = display_name %}
<p class="display-name">{{ display_name }}</p>
{% endif %}
<p class="user_id">{{ user_id }}</p>
</div>
</div>

View file

@ -1,41 +0,0 @@
{% extends "_layout.html.j2" %}
{%- block head -%}
<link rel="stylesheet" href="/_continuwuity/resources/error.css">
{%- endblock -%}
{%- block title -%}
🐈 Request Error
{%- endblock -%}
{%- block content -%}
<pre class="k10y" aria-hidden>
        
      |  _  _|
     ` ミ_x
     /      |
    /  ヽ   ノ
    │  | | |
 / ̄|   | | |
 | ( ̄ヽ__ヽ_)__)
 \二つ
</pre>
<div class="panel">
<h1>
{% if status == StatusCode::NOT_FOUND %}
Not found
{% else if status == StatusCode::INTERNAL_SERVER_ERROR %}
Internal server error
{% else %}
Bad request
{% endif %}
</h1>
{% if status == StatusCode::INTERNAL_SERVER_ERROR %}
<p>Please <a href="https://forgejo.ellis.link/continuwuation/continuwuity/issues/new">submit a bug report</a> 🥺</p>
{% endif %}
<pre><code>{{ error }}</code></pre>
</div>
{%- endblock -%}

View file

@ -1,18 +0,0 @@
{% extends "_layout.html.j2" %}
{%- block title -%}
Reset Password
{%- endblock -%}
{%- block content -%}
<div class="panel narrow">
<h1>Reset Password</h1>
{{ user_card }}
{% match body %}
{% when PasswordResetBody::Form(reset_form) %}
{{ reset_form }}
{% when PasswordResetBody::Success %}
<p>Your password has been reset successfully.</p>
{% endmatch %}
</div>
{%- endblock -%}

View file

@ -5,24 +5,23 @@
<meta charset="UTF-8" /> <meta charset="UTF-8" />
<title>{% block title %}Continuwuity{% endblock %}</title> <title>{% block title %}Continuwuity{% endblock %}</title>
<meta name="viewport" content="width=device-width, initial-scale=1" /> <meta name="viewport" content="width=device-width, initial-scale=1" />
{%- if !context.allow_indexing %}
<meta name="robots" content="noindex" />
{%- endif %}
<link rel="icon" href="/_continuwuity/resources/logo.svg"> <link rel="icon" href="/_continuwuity/logo.svg">
<link rel="stylesheet" href="/_continuwuity/resources/common.css"> <style type="text/css" nonce="{{ nonce }}">
<link rel="stylesheet" href="/_continuwuity/resources/components.css"> /*<![CDATA[*/
{% block head %}{% endblock %} {{ include_str !("css/index.css") | safe }}
/*]]>*/
</style>
</head> </head>
<body> <body>
<main>{%~ block content %}{% endblock ~%}</main> <main>{%~ block content %}{% endblock ~%}</main>
{%~ block footer ~%} {%~ block footer ~%}
<footer> <footer>
<img class="logo" src="/_continuwuity/resources/logo.svg"> <img class="logo" src="/_continuwuity/logo.svg">
<p>Powered by <a href="https://continuwuity.org">Continuwuity</a> {{ env!("CARGO_PKG_VERSION") }} <p>Powered by <a href="https://continuwuity.org">Continuwuity</a> {{ env!("CARGO_PKG_VERSION") }}
{%~ if let Some(version_info) = conduwuit_build_metadata::version_tag() ~%} {%~ if let Some(version_info) = self::version_tag() ~%}
{%~ if let Some(url) = conduwuit_build_metadata::GIT_REMOTE_COMMIT_URL.or(conduwuit_build_metadata::GIT_REMOTE_WEB_URL) ~%} {%~ if let Some(url) = GIT_REMOTE_COMMIT_URL.or(GIT_REMOTE_WEB_URL) ~%}
(<a href="{{ url }}">{{ version_info }}</a>) (<a href="{{ url }}">{{ version_info }}</a>)
{%~ else ~%} {%~ else ~%}
({{ version_info }}) ({{ version_info }})

View file

@ -0,0 +1,20 @@
{% extends "_layout.html.j2" %}
{%- block title -%}
Server Error
{%- endblock -%}
{%- block content -%}
<h1>
{%- match err -%}
{% else -%} 500: Internal Server Error
{%- endmatch -%}
</h1>
{%- match err -%}
{% when WebError::Render(err) -%}
<pre>{{ err }}</pre>
{% else -%} <p>An error occurred</p>
{%- endmatch -%}
{%- endblock -%}

View file

@ -1,9 +1,4 @@
{% extends "_layout.html.j2" %} {% extends "_layout.html.j2" %}
{%- block head -%}
<link rel="stylesheet" href="/_continuwuity/resources/index.css">
{%- endblock -%}
{%- block content -%} {%- block content -%}
<div class="panel"> <div class="panel">
<h1> <h1>
@ -11,7 +6,7 @@
</h1> </h1>
<p>Continuwuity is successfully installed and working.</p> <p>Continuwuity is successfully installed and working.</p>
{%- if first_run %} {%- if first_run %}
<p>To get started, <em>check the server logs</em> for instructions on how to create the first account.</p> <p>To get started, <b>check the server logs</b> for instructions on how to create the first account.</p>
<p>For support, take a look at the <a href="https://continuwuity.org/introduction">documentation</a> or join the <a href="https://matrix.to/#/#continuwuity:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org">Continuwuity Matrix room</a>.</p> <p>For support, take a look at the <a href="https://continuwuity.org/introduction">documentation</a> or join the <a href="https://matrix.to/#/#continuwuity:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org">Continuwuity Matrix room</a>.</p>
{%- else %} {%- else %}
<p>To get started, <a href="https://matrix.org/ecosystem/clients">choose a client</a> and connect to <code>{{ server_name }}</code>.</p> <p>To get started, <a href="https://matrix.org/ecosystem/clients">choose a client</a> and connect to <code>{{ server_name }}</code>.</p>

1
src/web/templates/logo.svg Symbolic link
View file

@ -0,0 +1 @@
../../../docs/public/assets/logo.svg