fix: Apply client-requested timeout to federated key queries

Also parallelised federation calls in related functions
This commit is contained in:
timedout 2026-01-03 16:05:05 +00:00
parent 6c61b3ec5b
commit bc426e1bfc
No known key found for this signature in database
GPG key ID: 0FA334385D0B689F
7 changed files with 81 additions and 33 deletions

2
changelog.d/1261.bugfix Normal file
View file

@ -0,0 +1,2 @@
Client requested timeout parameter is now applied to e2ee key lookups and claims. Related federation requests are now
also concurrent. Contributed by @nex.

View file

@ -465,7 +465,7 @@ pub(super) async fn force_join_list_of_local_users(
if server_admins.is_empty() {
return Err!("There are no admins set for this server.");
};
}
let (room_id, servers) = self
.services
@ -580,7 +580,7 @@ pub(super) async fn force_join_all_local_users(
if server_admins.is_empty() {
return Err!("There are no admins set for this server.");
};
}
let (room_id, servers) = self
.services

View file

@ -1,7 +1,16 @@
use std::collections::{BTreeMap, HashMap, HashSet};
use std::{
collections::{BTreeMap, HashMap, HashSet},
ops::Add,
time::Duration,
};
use axum::extract::State;
use conduwuit::{Err, Error, Result, debug, debug_warn, err, result::NotFound, utils};
use conduwuit::{
Err, Error, Result, debug, debug_warn, err,
result::NotFound,
utils,
utils::{IterStream, stream::WidebandExt},
};
use conduwuit_service::{Services, users::parse_master_key};
use futures::{StreamExt, stream::FuturesUnordered};
use ruma::{
@ -134,6 +143,7 @@ pub(crate) async fn get_keys_route(
&body.device_keys,
|u| u == sender_user,
true, // Always allow local users to see device names of other local users
body.timeout.unwrap_or(Duration::from_secs(10)),
)
.await
}
@ -145,7 +155,12 @@ pub(crate) async fn claim_keys_route(
State(services): State<crate::State>,
body: Ruma<claim_keys::v3::Request>,
) -> Result<claim_keys::v3::Response> {
claim_keys_helper(&services, &body.one_time_keys).await
claim_keys_helper(
&services,
&body.one_time_keys,
body.timeout.unwrap_or(Duration::from_secs(10)),
)
.await
}
/// # `POST /_matrix/client/r0/keys/device_signing/upload`
@ -421,10 +436,12 @@ pub(crate) async fn get_keys_helper<F>(
device_keys_input: &BTreeMap<OwnedUserId, Vec<OwnedDeviceId>>,
allowed_signatures: F,
include_display_names: bool,
timeout: Duration,
) -> Result<get_keys::v3::Response>
where
F: Fn(&UserId) -> bool + Send + Sync,
{
let deadline = tokio::time::Instant::now().add(timeout);
let mut master_keys = BTreeMap::new();
let mut self_signing_keys = BTreeMap::new();
let mut user_signing_keys = BTreeMap::new();
@ -512,9 +529,10 @@ where
let mut failures = BTreeMap::new();
let mut futures: FuturesUnordered<_> = get_over_federation
let mut futures = get_over_federation
.into_iter()
.map(|(server, vec)| async move {
.stream()
.wide_filter_map(|(server, vec)| async move {
let mut device_keys_input_fed = BTreeMap::new();
for (user_id, keys) in vec {
device_keys_input_fed.insert(user_id.to_owned(), keys.clone());
@ -522,17 +540,23 @@ where
let request =
federation::keys::get_keys::v1::Request { device_keys: device_keys_input_fed };
let response = tokio::time::timeout_at(
deadline,
services.sending.send_federation_request(server, request),
)
.await
// Need to flatten the Result<Result<V, E>, E> into Result<V, E>
.map_err(|_| err!(Request(Unknown("Timeout when getting keys over federation."))))
.and_then(|res| res);
let response = services
.sending
.send_federation_request(server, request)
.await;
(server, response)
Some((server, response))
})
.collect();
.then(async |v| v)
.collect::<FuturesUnordered<_>>()
.await
.into_iter();
while let Some((server, response)) = futures.next().await {
while let Some((server, response)) = futures.next() {
match response {
| Ok(response) => {
for (user, master_key) in response.master_keys {
@ -564,8 +588,9 @@ where
self_signing_keys.extend(response.self_signing_keys);
device_keys.extend(response.device_keys);
},
| _ => {
failures.insert(server.to_string(), json!({}));
| Err(e) => {
// Extra branch is needed because Elapsed != Error (types)
failures.insert(server.to_string(), json!({ "error": e.to_string() }));
},
}
}
@ -608,7 +633,9 @@ fn add_unsigned_device_display_name(
pub(crate) async fn claim_keys_helper(
services: &Services,
one_time_keys_input: &BTreeMap<OwnedUserId, BTreeMap<OwnedDeviceId, OneTimeKeyAlgorithm>>,
timeout: Duration,
) -> Result<claim_keys::v3::Response> {
let deadline = tokio::time::Instant::now().add(timeout);
let mut one_time_keys = BTreeMap::new();
let mut get_over_federation = BTreeMap::new();
@ -638,26 +665,34 @@ pub(crate) async fn claim_keys_helper(
let mut failures = BTreeMap::new();
let mut futures: FuturesUnordered<_> = get_over_federation
let mut futures = get_over_federation
.into_iter()
.map(|(server, vec)| async move {
.stream()
.wide_filter_map(|(server, vec)| async move {
let mut one_time_keys_input_fed = BTreeMap::new();
for (user_id, keys) in vec {
one_time_keys_input_fed.insert(user_id.clone(), keys.clone());
}
(
server,
services
.sending
.send_federation_request(server, federation::keys::claim_keys::v1::Request {
let response = tokio::time::timeout_at(
deadline,
services.sending.send_federation_request(
server,
federation::keys::claim_keys::v1::Request {
one_time_keys: one_time_keys_input_fed,
})
.await,
},
),
)
.await
.map_err(|_| err!(Request(Unknown("Timeout when claiming keys over federation."))))
.and_then(|res| res);
Some((server, response))
})
.collect();
.then(async |v| v)
.collect::<FuturesUnordered<_>>()
.await
.into_iter();
while let Some((server, response)) = futures.next().await {
while let Some((server, response)) = futures.next() {
match response {
| Ok(keys) => {
one_time_keys.extend(keys.one_time_keys);

View file

@ -72,7 +72,7 @@ pub(crate) async fn well_known_support(
if contacts.is_empty() {
let admin_users = services.admin.get_admins().await;
for user_id in admin_users.iter() {
for user_id in &admin_users {
if *user_id == services.globals.server_user {
continue;
}

View file

@ -1,3 +1,5 @@
use std::time::Duration;
use axum::extract::State;
use conduwuit::{Error, Result};
use futures::{FutureExt, StreamExt, TryFutureExt};
@ -96,6 +98,7 @@ pub(crate) async fn get_keys_route(
&body.device_keys,
|u| Some(u.server_name()) == body.origin.as_deref(),
services.globals.allow_device_name_federation(),
Duration::from_secs(0),
)
.await?;
@ -124,7 +127,8 @@ pub(crate) async fn claim_keys_route(
));
}
let result = claim_keys_helper(&services, &body.one_time_keys).await?;
let result =
claim_keys_helper(&services, &body.one_time_keys, Duration::from_secs(0)).await?;
Ok(claim_keys::v1::Response { one_time_keys: result.one_time_keys })
}

View file

@ -188,7 +188,7 @@ pub async fn revoke_admin(&self, user_id: &UserId) -> Result {
warn!(
"Revoking the admin status of {user_id} will not work correctly as they are within \
the admins_list config."
)
);
}
let Ok(room_id) = self.get_admin_room().await else {

View file

@ -406,13 +406,20 @@ impl Service {
/// Checks whether a given user is an admin of this server
pub async fn user_is_admin(&self, user_id: &UserId) -> bool {
if self.services.server.config.admins_list.contains(&user_id.to_owned()) {
if self
.services
.server
.config
.admins_list
.contains(&user_id.to_owned())
{
return true;
}
if self.services.server.config.admins_from_room {
if let Ok(admin_room) = self.get_admin_room().await {
return self.services
return self
.services
.state_cache
.is_joined(user_id, &admin_room)
.await;