Compare commits

..

No commits in common. "next" and "master" have entirely different histories.
next ... master

68 changed files with 1842 additions and 2335 deletions

4
.envrc
View file

@ -1,5 +1 @@
#!/usr/bin/env bash
use flake use flake
PATH_add bin

3
.gitignore vendored
View file

@ -68,6 +68,3 @@ cached_target
# Direnv cache # Direnv cache
/.direnv /.direnv
# Gitlab CI cache
/.gitlab-ci.d

View file

@ -1,180 +1,244 @@
stages: stages:
- ci - build
- artifacts - build docker image
- publish - test
- upload artifacts
variables: variables:
# Makes some things print in color # Make GitLab CI go fast:
TERM: ansi GIT_SUBMODULE_STRATEGY: recursive
FF_USE_FASTZIP: 1
CACHE_COMPRESSION_LEVEL: fastest
before_script: # --------------------------------------------------------------------- #
# Enable nix-command and flakes # Create and publish docker image #
- if command -v nix > /dev/null; then echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi # --------------------------------------------------------------------- #
# Add our own binary cache .docker-shared-settings:
- if command -v nix > /dev/null; then echo "extra-substituters = https://nix.computer.surgery/conduit" >> /etc/nix/nix.conf; fi stage: "build docker image"
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = conduit:ZGAf6P6LhNvnoJJ3Me3PRg7tlLSrPxcQ2RiE5LIppjo=" >> /etc/nix/nix.conf; fi needs: []
tags: [ "docker" ]
# Add crane binary cache variables:
- if command -v nix > /dev/null; then echo "extra-substituters = https://crane.cachix.org" >> /etc/nix/nix.conf; fi # Docker in Docker:
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=" >> /etc/nix/nix.conf; fi DOCKER_BUILDKIT: 1
image:
# Add nix-community binary cache name: docker.io/docker
- if command -v nix > /dev/null; then echo "extra-substituters = https://nix-community.cachix.org" >> /etc/nix/nix.conf; fi
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=" >> /etc/nix/nix.conf; fi
# Install direnv and nix-direnv
- if command -v nix > /dev/null; then nix-env -iA nixpkgs.direnv nixpkgs.nix-direnv; fi
# Allow .envrc
- if command -v nix > /dev/null; then direnv allow; fi
# Set CARGO_HOME to a cacheable path
- export CARGO_HOME="$(git rev-parse --show-toplevel)/.gitlab-ci.d/cargo"
ci:
stage: ci
image: nixos/nix:2.19.2
script:
- direnv exec . engage
cache:
key: nix
paths:
- target
- .gitlab-ci.d
static:x86_64-unknown-linux-musl:
stage: artifacts
image: nixos/nix:2.19.2
script:
# Push artifacts and build requirements to binary cache
- ./bin/nix-build-and-cache .#static-x86_64-unknown-linux-musl
# Make the output less difficult to find
- cp result/bin/conduit conduit
artifacts:
paths:
- conduit
static:aarch64-unknown-linux-musl:
stage: artifacts
image: nixos/nix:2.19.2
script:
# Push artifacts and build requirements to binary cache
- ./bin/nix-build-and-cache .#static-aarch64-unknown-linux-musl
# Make the output less difficult to find
- cp result/bin/conduit conduit
artifacts:
paths:
- conduit
# Note that although we have an `oci-image-x86_64-unknown-linux-musl` output,
# we don't build it because it would be largely redundant to this one since it's
# all containerized anyway.
oci-image:x86_64-unknown-linux-gnu:
stage: artifacts
image: nixos/nix:2.19.2
script:
# Push artifacts and build requirements to binary cache
#
# Since the OCI image package is based on the binary package, this has the
# fun side effect of uploading the normal binary too. Conduit users who are
# deploying with Nix can leverage this fact by adding our binary cache to
# their systems.
- ./bin/nix-build-and-cache .#oci-image
# Make the output less difficult to find
- cp result oci-image-amd64.tar.gz
artifacts:
paths:
- oci-image-amd64.tar.gz
oci-image:aarch64-unknown-linux-musl:
stage: artifacts
needs:
# Wait for the static binary job to finish before starting so we don't have
# to build that twice for no reason
- static:aarch64-unknown-linux-musl
image: nixos/nix:2.19.2
script:
# Push artifacts and build requirements to binary cache
- ./bin/nix-build-and-cache .#oci-image-aarch64-unknown-linux-musl
# Make the output less difficult to find
- cp result oci-image-arm64v8.tar.gz
artifacts:
paths:
- oci-image-arm64v8.tar.gz
debian:x86_64-unknown-linux-gnu:
stage: artifacts
# See also `rust-toolchain.toml`
image: rust:1.75.0
script:
- apt-get update && apt-get install -y --no-install-recommends libclang-dev
- cargo install cargo-deb
- cargo deb
# Make the output less difficult to find
- mv target/debian/*.deb conduit.deb
artifacts:
paths:
- conduit.deb
cache:
key: debian
paths:
- target
- .gitlab-ci.d
.push-oci-image:
stage: publish
image: docker:25.0.0
services: services:
- docker:25.0.0-dind - name: docker.io/docker:dind
variables: alias: docker
IMAGE_SUFFIX_AMD64: amd64
IMAGE_SUFFIX_ARM64V8: arm64v8
script: script:
- docker load -i oci-image-amd64.tar.gz - apk add openssh-client
- IMAGE_ID_AMD64=$(docker images -q conduit:next) - eval $(ssh-agent -s)
- docker load -i oci-image-arm64v8.tar.gz - mkdir -p ~/.ssh && chmod 700 ~/.ssh
- IMAGE_ID_ARM64V8=$(docker images -q conduit:next) - printf "Host *\n\tStrictHostKeyChecking no\n\n" >> ~/.ssh/config
# Tag and push the architecture specific images - sh .gitlab/setup-buildx-remote-builders.sh
- docker tag $IMAGE_ID_AMD64 $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 # Authorize against this project's own image registry:
- docker tag $IMAGE_ID_ARM64V8 $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8 - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
- docker push $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 # Build multiplatform image and push to temporary tag:
- docker push $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8 - >
# Tag the multi-arch image docker buildx build
- docker manifest create $IMAGE_NAME:$CI_COMMIT_SHA --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8 --platform "linux/arm/v7,linux/arm64,linux/amd64"
- docker manifest push $IMAGE_NAME:$CI_COMMIT_SHA --pull
# Tag and push the git ref --tag "$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID"
- docker manifest create $IMAGE_NAME:$CI_COMMIT_REF_NAME --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8 --push
- docker manifest push $IMAGE_NAME:$CI_COMMIT_REF_NAME --provenance=false
# Tag git tags as 'latest' --file "Dockerfile" .
- | # Build multiplatform image to deb stage and extract their .deb files:
if [[ -n "$CI_COMMIT_TAG" ]]; then - >
docker manifest create $IMAGE_NAME:latest --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8 docker buildx build
docker manifest push $IMAGE_NAME:latest --platform "linux/arm/v7,linux/arm64,linux/amd64"
fi --target "packager-result"
dependencies: --output="type=local,dest=/tmp/build-output"
- oci-image:x86_64-unknown-linux-gnu --provenance=false
- oci-image:aarch64-unknown-linux-musl --file "Dockerfile" .
only: # Build multiplatform image to binary stage and extract their binaries:
- next - >
- master docker buildx build
- tags --platform "linux/arm/v7,linux/arm64,linux/amd64"
--target "builder-result"
--output="type=local,dest=/tmp/build-output"
--provenance=false
--file "Dockerfile" .
# Copy to GitLab container registry:
- >
docker buildx imagetools create
--tag "$CI_REGISTRY_IMAGE/$TAG"
--tag "$CI_REGISTRY_IMAGE/$TAG-bullseye"
--tag "$CI_REGISTRY_IMAGE/$TAG-commit-$CI_COMMIT_SHORT_SHA"
"$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID"
# if DockerHub credentials exist, also copy to dockerhub:
- if [ -n "${DOCKER_HUB}" ]; then docker login -u "$DOCKER_HUB_USER" -p "$DOCKER_HUB_PASSWORD" "$DOCKER_HUB"; fi
- >
if [ -n "${DOCKER_HUB}" ]; then
docker buildx imagetools create
--tag "$DOCKER_HUB_IMAGE/$TAG"
--tag "$DOCKER_HUB_IMAGE/$TAG-bullseye"
--tag "$DOCKER_HUB_IMAGE/$TAG-commit-$CI_COMMIT_SHORT_SHA"
"$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID"
; fi
- mv /tmp/build-output ./
artifacts:
paths:
- "./build-output/"
oci-image:push-gitlab: docker:next:
extends: .push-oci-image extends: .docker-shared-settings
rules:
- if: '$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_BRANCH == "next"'
variables: variables:
IMAGE_NAME: $CI_REGISTRY_IMAGE/matrix-conduit TAG: "matrix-conduit:next"
before_script:
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
oci-image:push-dockerhub: docker:master:
extends: .push-oci-image extends: .docker-shared-settings
rules:
- if: '$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_BRANCH == "master"'
variables: variables:
IMAGE_NAME: matrixconduit/matrix-conduit TAG: "matrix-conduit:latest"
docker:tags:
extends: .docker-shared-settings
rules:
- if: "$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_TAG"
variables:
TAG: "matrix-conduit:$CI_COMMIT_TAG"
docker build debugging:
extends: .docker-shared-settings
rules:
- if: "$CI_MERGE_REQUEST_TITLE =~ /.*[Dd]ocker.*/"
variables:
TAG: "matrix-conduit-docker-tests:latest"
# --------------------------------------------------------------------- #
# Run tests #
# --------------------------------------------------------------------- #
cargo check:
stage: test
image: docker.io/rust:1.70.0-bullseye
needs: []
interruptible: true
before_script: before_script:
- docker login -u $DOCKER_HUB_USER -p $DOCKER_HUB_PASSWORD - "rustup show && rustc --version && cargo --version" # Print version info for debugging
- apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb
script:
- cargo check
.test-shared-settings:
stage: "test"
needs: []
image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:latest"
tags: ["docker"]
variables:
CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow
interruptible: true
test:cargo:
extends: .test-shared-settings
before_script:
- apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb
script:
- rustc --version && cargo --version # Print version info for debugging
- "cargo test --color always --workspace --verbose --locked --no-fail-fast"
test:clippy:
extends: .test-shared-settings
allow_failure: true
before_script:
- rustup component add clippy
- apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb
script:
- rustc --version && cargo --version # Print version info for debugging
- "cargo clippy --color always --verbose --message-format=json | gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json"
artifacts:
when: always
reports:
codequality: gl-code-quality-report.json
test:format:
extends: .test-shared-settings
before_script:
- rustup component add rustfmt
script:
- cargo fmt --all -- --check
test:audit:
extends: .test-shared-settings
allow_failure: true
script:
- cargo audit --color always || true
- cargo audit --stale --json | gitlab-report -p audit > gl-sast-report.json
artifacts:
when: always
reports:
sast: gl-sast-report.json
test:dockerlint:
stage: "test"
needs: []
image: "ghcr.io/hadolint/hadolint@sha256:6c4b7c23f96339489dd35f21a711996d7ce63047467a9a562287748a03ad5242" # 2.8.0-alpine
interruptible: true
script:
- hadolint --version
# First pass: Print for CI log:
- >
hadolint
--no-fail --verbose
./Dockerfile
# Then output the results into a json for GitLab to pretty-print this in the MR:
- >
hadolint
--format gitlab_codeclimate
--failure-threshold error
./Dockerfile > dockerlint.json
artifacts:
when: always
reports:
codequality: dockerlint.json
paths:
- dockerlint.json
rules:
- if: '$CI_COMMIT_REF_NAME != "master"'
changes:
- docker/*Dockerfile
- Dockerfile
- .gitlab-ci.yml
- if: '$CI_COMMIT_REF_NAME == "master"'
- if: '$CI_COMMIT_REF_NAME == "next"'
# --------------------------------------------------------------------- #
# Store binaries as package so they have download urls #
# --------------------------------------------------------------------- #
# DISABLED FOR NOW, NEEDS TO BE FIXED AT A LATER TIME:
#publish:package:
# stage: "upload artifacts"
# needs:
# - "docker:tags"
# rules:
# - if: "$CI_COMMIT_TAG"
# image: curlimages/curl:latest
# tags: ["docker"]
# variables:
# GIT_STRATEGY: "none" # Don't need a clean copy of the code, we just operate on artifacts
# script:
# - 'BASE_URL="${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}"'
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_amd64/conduit "${BASE_URL}/conduit-x86_64-unknown-linux-gnu"'
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm_v7/conduit "${BASE_URL}/conduit-armv7-unknown-linux-gnu"'
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm64/conduit "${BASE_URL}/conduit-aarch64-unknown-linux-gnu"'
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_amd64/conduit.deb "${BASE_URL}/conduit-x86_64-unknown-linux-gnu.deb"'
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm_v7/conduit.deb "${BASE_URL}/conduit-armv7-unknown-linux-gnu.deb"'
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm64/conduit.deb "${BASE_URL}/conduit-aarch64-unknown-linux-gnu.deb"'
# Avoid duplicate pipelines
# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines
workflow:
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
- if: "$CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS"
when: never
- if: "$CI_COMMIT_BRANCH"
- if: "$CI_COMMIT_TAG"

1659
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,14 +1,3 @@
# Keep alphabetically sorted
[workspace.lints.rust]
explicit_outlives_requirements = "warn"
unused_qualifications = "warn"
# Keep alphabetically sorted
[workspace.lints.clippy]
cloned_instead_of_copied = "warn"
dbg_macro = "warn"
str_to_string = "warn"
[package] [package]
name = "conduit" name = "conduit"
description = "A Matrix homeserver written in Rust" description = "A Matrix homeserver written in Rust"
@ -17,17 +6,17 @@ authors = ["timokoesters <timo@koesters.xyz>"]
homepage = "https://conduit.rs" homepage = "https://conduit.rs"
repository = "https://gitlab.com/famedly/conduit" repository = "https://gitlab.com/famedly/conduit"
readme = "README.md" readme = "README.md"
version = "0.7.0-alpha" version = "0.6.0"
edition = "2021" edition = "2021"
# See also `rust-toolchain.toml` # When changing this, make sure to update the `flake.lock` file by running
rust-version = "1.75.0" # `nix flake update`. If you don't have Nix installed or otherwise don't know
# how to do this, ping `@charles:computer.surgery` or `@dusk:gaze.systems` in
# the matrix room.
rust-version = "1.70.0"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lints]
workspace = true
[dependencies] [dependencies]
# Web framework # Web framework
axum = { version = "0.6.18", default-features = false, features = ["form", "headers", "http1", "http2", "json", "matched-path"], optional = true } axum = { version = "0.6.18", default-features = false, features = ["form", "headers", "http1", "http2", "json", "matched-path"], optional = true }
@ -37,7 +26,7 @@ tower-http = { version = "0.4.1", features = ["add-extension", "cors", "sensitiv
# Used for matrix spec type definitions and helpers # Used for matrix spec type definitions and helpers
#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
ruma = { git = "https://github.com/ruma/ruma", rev = "1a1c61ee1e8f0936e956a3b69c931ce12ee28475", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } ruma = { git = "https://github.com/ruma/ruma", rev = "3bd58e3c899457c2d55c45268dcb8a65ae682d54", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
#ruma = { git = "https://github.com/timokoesters/ruma", rev = "4ec9c69bb7e09391add2382b3ebac97b6e8f4c64", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "4ec9c69bb7e09391add2382b3ebac97b6e8f4c64", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
@ -64,8 +53,7 @@ rand = "0.8.5"
# Used to hash passwords # Used to hash passwords
rust-argon2 = "1.0.0" rust-argon2 = "1.0.0"
# Used to send requests # Used to send requests
hyper = "0.14.26" reqwest = { default-features = false, features = ["rustls-tls-native-roots", "socks"], git = "https://github.com/timokoesters/reqwest", rev = "57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" }
reqwest = { version = "0.11.18", default-features = false, features = ["rustls-tls-native-roots", "socks"] }
# Used for conduit::Error type # Used for conduit::Error type
thiserror = "1.0.40" thiserror = "1.0.40"
# Used to generate thumbnails for images # Used to generate thumbnails for images
@ -73,13 +61,13 @@ image = { version = "0.24.6", default-features = false, features = ["jpeg", "png
# Used to encode server public key # Used to encode server public key
base64 = "0.21.2" base64 = "0.21.2"
# Used when hashing the state # Used when hashing the state
ring = "0.17.7" ring = "0.16.20"
# Used when querying the SRV record of other servers # Used when querying the SRV record of other servers
trust-dns-resolver = "0.22.0" trust-dns-resolver = "0.22.0"
# Used to find matching events for appservices # Used to find matching events for appservices
regex = "1.8.1" regex = "1.8.1"
# jwt jsonwebtokens # jwt jsonwebtokens
jsonwebtoken = "9.2.0" jsonwebtoken = "8.3.0"
# Performance measurements # Performance measurements
tracing = { version = "0.1.37", features = [] } tracing = { version = "0.1.37", features = [] }
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
@ -90,10 +78,10 @@ tracing-opentelemetry = "0.18.0"
lru-cache = "0.1.2" lru-cache = "0.1.2"
rusqlite = { version = "0.29.0", optional = true, features = ["bundled"] } rusqlite = { version = "0.29.0", optional = true, features = ["bundled"] }
parking_lot = { version = "0.12.1", optional = true } parking_lot = { version = "0.12.1", optional = true }
# crossbeam = { version = "0.8.2", optional = true } crossbeam = { version = "0.8.2", optional = true }
num_cpus = "1.15.0" num_cpus = "1.15.0"
threadpool = "1.8.1" threadpool = "1.8.1"
# heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true }
# Used for ruma wrapper # Used for ruma wrapper
serde_html_form = "0.2.0" serde_html_form = "0.2.0"
@ -124,7 +112,7 @@ default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "systemd"]
#backend_sled = ["sled"] #backend_sled = ["sled"]
backend_persy = ["persy", "parking_lot"] backend_persy = ["persy", "parking_lot"]
backend_sqlite = ["sqlite"] backend_sqlite = ["sqlite"]
#backend_heed = ["heed", "crossbeam"] backend_heed = ["heed", "crossbeam"]
backend_rocksdb = ["rocksdb"] backend_rocksdb = ["rocksdb"]
jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator"] jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator"]
sqlite = ["rusqlite", "parking_lot", "tokio/signal"] sqlite = ["rusqlite", "parking_lot", "tokio/signal"]

View file

@ -12,13 +12,11 @@ only offer Linux binaries.
You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the appropriate url: You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the appropriate url:
**Stable versions:** | CPU Architecture | Download stable version | Download development version |
| ------------------------------------------- | --------------------------------------------------------------- | ----------------------------------------------------------- |
| CPU Architecture | Download stable version | | x84_64 / amd64 (Most servers and computers) | [Binary][x84_64-glibc-master] / [.deb][x84_64-glibc-master-deb] | [Binary][x84_64-glibc-next] / [.deb][x84_64-glibc-next-deb] |
| ------------------------------------------- | --------------------------------------------------------------- | | armv7 (e.g. Raspberry Pi by default) | [Binary][armv7-glibc-master] / [.deb][armv7-glibc-master-deb] | [Binary][armv7-glibc-next] / [.deb][armv7-glibc-next-deb] |
| x84_64 / amd64 (Most servers and computers) | [Binary][x84_64-glibc-master] / [.deb][x84_64-glibc-master-deb] | | armv8 / aarch64 | [Binary][armv8-glibc-master] / [.deb][armv8-glibc-master-deb] | [Binary][armv8-glibc-next] / [.deb][armv8-glibc-next-deb] |
| armv7 (e.g. Raspberry Pi by default) | [Binary][armv7-glibc-master] / [.deb][armv7-glibc-master-deb] |
| armv8 / aarch64 | [Binary][armv8-glibc-master] / [.deb][armv8-glibc-master-deb] |
These builds were created on and linked against the glibc version shipped with Debian bullseye. These builds were created on and linked against the glibc version shipped with Debian bullseye.
If you use a system with an older glibc version (e.g. RHEL8), you might need to compile Conduit yourself. If you use a system with an older glibc version (e.g. RHEL8), you might need to compile Conduit yourself.
@ -26,19 +24,15 @@ If you use a system with an older glibc version (e.g. RHEL8), you might need to
[x84_64-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit?job=docker:master [x84_64-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit?job=docker:master
[armv7-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit?job=docker:master [armv7-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit?job=docker:master
[armv8-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit?job=docker:master [armv8-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit?job=docker:master
[x84_64-glibc-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_amd64/conduit?job=docker:next
[armv7-glibc-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm_v7/conduit?job=docker:next
[armv8-glibc-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm64/conduit?job=docker:next
[x84_64-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit.deb?job=docker:master [x84_64-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit.deb?job=docker:master
[armv7-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit.deb?job=docker:master [armv7-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit.deb?job=docker:master
[armv8-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit.deb?job=docker:master [armv8-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit.deb?job=docker:master
[x84_64-glibc-next-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_amd64/conduit.deb?job=docker:next
**Latest versions:** [armv7-glibc-next-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm_v7/conduit.deb?job=docker:next
[armv8-glibc-next-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm64/conduit.deb?job=docker:next
| Target | Type | Download |
|-|-|-|
| `x86_64-unknown-linux-gnu` | Dynamically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/conduit.deb?job=debian:x86_64-unknown-linux-gnu) |
| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/conduit?job=static:x86_64-unknown-linux-musl) |
| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/conduit?job=static:aarch64-unknown-linux-musl) |
| `x86_64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-amd64.tar.gz?job=oci-image:x86_64-unknown-linux-musl) |
| `aarch64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-arm64v8.tar.gz?job=oci-image:aarch64-unknown-linux-musl) |
```bash ```bash
$ sudo wget -O /usr/local/bin/matrix-conduit <url> $ sudo wget -O /usr/local/bin/matrix-conduit <url>
@ -279,7 +273,7 @@ server {
client_max_body_size 20M; client_max_body_size 20M;
location /_matrix/ { location /_matrix/ {
proxy_pass http://127.0.0.1:6167; proxy_pass http://127.0.0.1:6167$request_uri;
proxy_set_header Host $http_host; proxy_set_header Host $http_host;
proxy_buffering off; proxy_buffering off;
proxy_read_timeout 5m; proxy_read_timeout 5m;

132
Dockerfile Normal file
View file

@ -0,0 +1,132 @@
# syntax=docker/dockerfile:1
FROM docker.io/rust:1.70-bullseye AS base
FROM base AS builder
WORKDIR /usr/src/conduit
# Install required packages to build Conduit and it's dependencies
RUN apt-get update && \
apt-get -y --no-install-recommends install libclang-dev=1:11.0-51+nmu5
# == Build dependencies without our own code separately for caching ==
#
# Need a fake main.rs since Cargo refuses to build anything otherwise.
#
# See https://github.com/rust-lang/cargo/issues/2644 for a Cargo feature
# request that would allow just dependencies to be compiled, presumably
# regardless of whether source files are available.
RUN mkdir src && touch src/lib.rs && echo 'fn main() {}' > src/main.rs
COPY Cargo.toml Cargo.lock ./
RUN cargo build --release && rm -r src
# Copy over actual Conduit sources
COPY src src
# main.rs and lib.rs need their timestamp updated for this to work correctly since
# otherwise the build with the fake main.rs from above is newer than the
# source files (COPY preserves timestamps).
#
# Builds conduit and places the binary at /usr/src/conduit/target/release/conduit
RUN touch src/main.rs && touch src/lib.rs && cargo build --release
# ONLY USEFUL FOR CI: target stage to extract build artifacts
FROM scratch AS builder-result
COPY --from=builder /usr/src/conduit/target/release/conduit /conduit
# ---------------------------------------------------------------------------------------------------------------
# Build cargo-deb, a tool to package up rust binaries into .deb packages for Debian/Ubuntu based systems:
# ---------------------------------------------------------------------------------------------------------------
FROM base AS build-cargo-deb
RUN apt-get update && \
apt-get install -y --no-install-recommends \
dpkg \
dpkg-dev \
liblzma-dev
RUN cargo install cargo-deb
# => binary is in /usr/local/cargo/bin/cargo-deb
# ---------------------------------------------------------------------------------------------------------------
# Package conduit build-result into a .deb package:
# ---------------------------------------------------------------------------------------------------------------
FROM builder AS packager
WORKDIR /usr/src/conduit
COPY ./LICENSE ./LICENSE
COPY ./README.md ./README.md
COPY debian ./debian
COPY --from=build-cargo-deb /usr/local/cargo/bin/cargo-deb /usr/local/cargo/bin/cargo-deb
# --no-build makes cargo-deb reuse already compiled project
RUN cargo deb --no-build
# => Package is in /usr/src/conduit/target/debian/<project_name>_<version>_<arch>.deb
# ONLY USEFUL FOR CI: target stage to extract build artifacts
FROM scratch AS packager-result
COPY --from=packager /usr/src/conduit/target/debian/*.deb /conduit.deb
# ---------------------------------------------------------------------------------------------------------------
# Stuff below this line actually ends up in the resulting docker image
# ---------------------------------------------------------------------------------------------------------------
FROM docker.io/debian:bullseye-slim AS runner
# Standard port on which Conduit launches.
# You still need to map the port when using the docker command or docker-compose.
EXPOSE 6167
ARG DEFAULT_DB_PATH=/var/lib/matrix-conduit
ENV CONDUIT_PORT=6167 \
CONDUIT_ADDRESS="0.0.0.0" \
CONDUIT_DATABASE_PATH=${DEFAULT_DB_PATH} \
CONDUIT_CONFIG=''
# └─> Set no config file to do all configuration with env vars
# Conduit needs:
# dpkg: to install conduit.deb
# ca-certificates: for https
# iproute2 & wget: for the healthcheck script
RUN apt-get update && apt-get -y --no-install-recommends install \
dpkg \
ca-certificates \
iproute2 \
wget \
&& rm -rf /var/lib/apt/lists/*
# Test if Conduit is still alive, uses the same endpoint as Element
COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh
HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh
# Install conduit.deb:
COPY --from=packager /usr/src/conduit/target/debian/*.deb /srv/conduit/
RUN dpkg -i /srv/conduit/*.deb
# Improve security: Don't run stuff as root, that does not need to run as root
# Most distros also use 1000:1000 for the first real user, so this should resolve volume mounting problems.
ARG USER_ID=1000
ARG GROUP_ID=1000
RUN set -x ; \
groupadd -r -g ${GROUP_ID} conduit ; \
useradd -l -r -M -d /srv/conduit -o -u ${USER_ID} -g conduit conduit && exit 0 ; exit 1
# Create database directory, change ownership of Conduit files to conduit user and group and make the healthcheck executable:
RUN chown -cR conduit:conduit /srv/conduit && \
chmod +x /srv/conduit/healthcheck.sh && \
mkdir -p ${DEFAULT_DB_PATH} && \
chown -cR conduit:conduit ${DEFAULT_DB_PATH}
# Change user to conduit, no root permissions afterwards:
USER conduit
# Set container home directory
WORKDIR /srv/conduit
# Run Conduit and print backtraces on panics
ENV RUST_BACKTRACE=1
ENTRYPOINT [ "/usr/sbin/matrix-conduit" ]

View file

@ -1,37 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Path to Complement's source code
COMPLEMENT_SRC="$1"
# A `.jsonl` file to write test logs to
LOG_FILE="$2"
# A `.jsonl` file to write test results to
RESULTS_FILE="$3"
OCI_IMAGE="complement-conduit:dev"
env \
-C "$(git rev-parse --show-toplevel)" \
docker build \
--tag "$OCI_IMAGE" \
--file complement/Dockerfile \
.
# It's okay (likely, even) that `go test` exits nonzero
set +o pipefail
env \
-C "$COMPLEMENT_SRC" \
COMPLEMENT_BASE_IMAGE="$OCI_IMAGE" \
go test -json ./tests | tee "$LOG_FILE"
set -o pipefail
# Post-process the results into an easy-to-compare format
cat "$LOG_FILE" | jq -c '
select(
(.Action == "pass" or .Action == "fail" or .Action == "skip")
and .Test != null
) | {Action: .Action, Test: .Test}
' | sort > "$RESULTS_FILE"

View file

@ -1,31 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# The first argument must be the desired installable
INSTALLABLE="$1"
# Build the installable and forward any other arguments too
nix build "$@"
if [ ! -z ${ATTIC_TOKEN+x} ]; then
nix run --inputs-from . attic -- login \
conduit \
https://nix.computer.surgery/conduit \
"$ATTIC_TOKEN"
push_args=(
# Attic and its build dependencies
"$(nix path-info --inputs-from . attic)"
"$(nix path-info --inputs-from . attic --derivation)"
# The target installable and its build dependencies
"$(nix path-info "$INSTALLABLE" --derivation)"
"$(nix path-info "$INSTALLABLE")"
)
nix run --inputs-from . attic -- push conduit "${push_args[@]}"
else
echo "\$ATTIC_TOKEN is unset, skipping uploading to the binary cache"
fi

View file

@ -1,30 +1,26 @@
FROM rust:1.75.0 # For use in our CI only. This requires a build artifact created by a previous run pipline stage to be placed in cached_target/release/conduit
FROM registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:commit-16a08e9b as builder
#FROM rust:latest as builder
WORKDIR /workdir WORKDIR /workdir
RUN apt-get update && apt-get install -y --no-install-recommends \ ARG RUSTC_WRAPPER
libclang-dev ARG AWS_ACCESS_KEY_ID
ARG AWS_SECRET_ACCESS_KEY
ARG SCCACHE_BUCKET
ARG SCCACHE_ENDPOINT
ARG SCCACHE_S3_USE_SSL
COPY Cargo.toml Cargo.toml COPY . .
COPY Cargo.lock Cargo.lock RUN mkdir -p target/release
COPY src src RUN test -e cached_target/release/conduit && cp cached_target/release/conduit target/release/conduit || cargo build --release
RUN cargo build --release \
&& mv target/release/conduit conduit \ ## Actual image
&& rm -rf target FROM debian:bullseye
WORKDIR /workdir
# Install caddy # Install caddy
RUN apt-get update \ RUN apt-get update && apt-get install -y debian-keyring debian-archive-keyring apt-transport-https curl && curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/gpg.key' | gpg --dearmor -o /usr/share/keyrings/caddy-testing-archive-keyring.gpg && curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/debian.deb.txt' | tee /etc/apt/sources.list.d/caddy-testing.list && apt-get update && apt-get install -y caddy
&& apt-get install -y \
debian-keyring \
debian-archive-keyring \
apt-transport-https \
curl \
&& curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/gpg.key' \
| gpg --dearmor -o /usr/share/keyrings/caddy-testing-archive-keyring.gpg \
&& curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/debian.deb.txt' \
| tee /etc/apt/sources.list.d/caddy-testing.list \
&& apt-get update \
&& apt-get install -y caddy
COPY conduit-example.toml conduit.toml COPY conduit-example.toml conduit.toml
COPY complement/caddy.json caddy.json COPY complement/caddy.json caddy.json
@ -33,9 +29,16 @@ ENV SERVER_NAME=localhost
ENV CONDUIT_CONFIG=/workdir/conduit.toml ENV CONDUIT_CONFIG=/workdir/conduit.toml
RUN sed -i "s/port = 6167/port = 8008/g" conduit.toml RUN sed -i "s/port = 6167/port = 8008/g" conduit.toml
RUN echo "allow_federation = true" >> conduit.toml
RUN echo "allow_check_for_updates = true" >> conduit.toml
RUN echo "allow_encryption = true" >> conduit.toml
RUN echo "allow_registration = true" >> conduit.toml
RUN echo "log = \"warn,_=off,sled=off\"" >> conduit.toml RUN echo "log = \"warn,_=off,sled=off\"" >> conduit.toml
RUN sed -i "s/address = \"127.0.0.1\"/address = \"0.0.0.0\"/g" conduit.toml RUN sed -i "s/address = \"127.0.0.1\"/address = \"0.0.0.0\"/g" conduit.toml
COPY --from=builder /workdir/target/release/conduit /workdir/conduit
RUN chmod +x /workdir/conduit
EXPOSE 8008 8448 EXPOSE 8008 8448
CMD uname -a && \ CMD uname -a && \

View file

@ -1,11 +1,13 @@
# Complement # Running Conduit on Complement
## What's that? This assumes that you're familiar with complement, if not, please readme
[their readme](https://github.com/matrix-org/complement#running).
Have a look at [its repository](https://github.com/matrix-org/complement). Complement works with "base images", this directory (and Dockerfile) helps build the conduit complement-ready docker
image.
## How do I use it with Conduit? To build, `cd` to the base directory of the workspace, and run this:
The script at [`../bin/complement`](../bin/complement) has automation for this. `docker build -t complement-conduit:dev -f complement/Dockerfile .`
It takes a few command line arguments, you can read the script to find out what
those are. Then use `complement-conduit:dev` as a base image for running complement tests.

View file

@ -1,10 +0,0 @@
(import
(
let lock = builtins.fromJSON (builtins.readFile ./flake.lock); in
fetchTarball {
url = lock.nodes.flake-compat.locked.url or "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz";
sha256 = lock.nodes.flake-compat.locked.narHash;
}
)
{ src = ./.; }
).defaultNix

View file

@ -76,7 +76,7 @@ to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible
If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it. If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it.
### Docker-compose ## Docker-compose
If the `docker run` command is not for you or your setup, you can also use one of the provided `docker-compose` files. If the `docker run` command is not for you or your setup, you can also use one of the provided `docker-compose` files.
@ -161,58 +161,3 @@ So...step by step:
6. Run `docker-compose up -d` 6. Run `docker-compose up -d`
7. Connect to your homeserver with your preferred client and create a user. You should do this immediately after starting Conduit, because the first created user is the admin. 7. Connect to your homeserver with your preferred client and create a user. You should do this immediately after starting Conduit, because the first created user is the admin.
## Voice communication
In order to make or receive calls, a TURN server is required. Conduit suggests using [Coturn](https://github.com/coturn/coturn) for this purpose, which is also available as a Docker image. Before proceeding with the software installation, it is essential to have the necessary configurations in place.
### Configuration
Create a configuration file called `coturn.conf` containing:
```conf
use-auth-secret
static-auth-secret=<a secret key>
realm=<your server domain>
```
A common way to generate a suitable alphanumeric secret key is by using `pwgen -s 64 1`.
These same values need to be set in conduit. You can either modify conduit.toml to include these lines:
```
turn_uris = ["turn:<your server domain>?transport=udp", "turn:<your server domain>?transport=tcp"]
turn_secret = "<secret key from coturn configuration>"
```
or append the following to the docker environment variables dependig on which configuration method you used earlier:
```yml
CONDUIT_TURN_URIS: '["turn:<your server domain>?transport=udp", "turn:<your server domain>?transport=tcp"]'
CONDUIT_TURN_SECRET: "<secret key from coturn configuration>"
```
Restart Conduit to apply these changes.
### Run
Run the [Coturn](https://hub.docker.com/r/coturn/coturn) image using
```bash
docker run -d --network=host -v $(pwd)/coturn.conf:/etc/coturn/turnserver.conf coturn/coturn
```
or docker-compose. For the latter, paste the following section into a file called `docker-compose.yml`
and run `docker-compose up -d` in the same directory.
```yml
version: 3
services:
turn:
container_name: coturn-server
image: docker.io/coturn/coturn
restart: unless-stopped
network_mode: "host"
volumes:
- ./coturn.conf:/etc/coturn/turnserver.conf
```
To understand why the host networking mode is used and explore alternative configuration options, please visit the following link: https://github.com/coturn/coturn/blob/master/docker/coturn/README.md.
For security recommendations see Synapse's [Coturn documentation](https://github.com/matrix-org/synapse/blob/develop/docs/setup/turn/coturn.md#configuration).

View file

@ -1,64 +0,0 @@
interpreter = ["bash", "-euo", "pipefail", "-c"]
[[task]]
name = "engage"
group = "versions"
script = "engage --version"
[[task]]
name = "rustc"
group = "versions"
script = "rustc --version"
[[task]]
name = "cargo"
group = "versions"
script = "cargo --version"
[[task]]
name = "cargo-fmt"
group = "versions"
script = "cargo fmt --version"
[[task]]
name = "rustdoc"
group = "versions"
script = "rustdoc --version"
[[task]]
name = "cargo-clippy"
group = "versions"
script = "cargo clippy -- --version"
[[task]]
name = "cargo-fmt"
group = "lints"
script = "cargo fmt --check -- --color=always"
[[task]]
name = "cargo-doc"
group = "lints"
script = """
RUSTDOCFLAGS="-D warnings" cargo doc \
--workspace \
--no-deps \
--document-private-items \
--color always
"""
[[task]]
name = "cargo-clippy"
group = "lints"
script = "cargo clippy --workspace --all-targets --color=always -- -D warnings"
[[task]]
name = "cargo"
group = "tests"
script = """
cargo test \
--workspace \
--all-targets \
--color=always \
-- \
--color=always
"""

View file

@ -1,41 +1,22 @@
{ {
"nodes": { "nodes": {
"attic": {
"inputs": {
"crane": "crane",
"flake-compat": "flake-compat",
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs",
"nixpkgs-stable": "nixpkgs-stable"
},
"locked": {
"lastModified": 1705617092,
"narHash": "sha256-n9PK4O4X4S1JkwpkMuYm1wHZYJzRqif8g3RuVIPD+rY=",
"owner": "zhaofengli",
"repo": "attic",
"rev": "fbe252a5c21febbe920c025560cbd63b20e24f3b",
"type": "github"
},
"original": {
"owner": "zhaofengli",
"ref": "main",
"repo": "attic",
"type": "github"
}
},
"crane": { "crane": {
"inputs": { "inputs": {
"flake-compat": "flake-compat",
"flake-utils": [
"flake-utils"
],
"nixpkgs": [ "nixpkgs": [
"attic",
"nixpkgs" "nixpkgs"
] ],
"rust-overlay": "rust-overlay"
}, },
"locked": { "locked": {
"lastModified": 1702918879, "lastModified": 1688772518,
"narHash": "sha256-tWJqzajIvYcaRWxn+cLUB9L9Pv4dQ3Bfit/YjU5ze3g=", "narHash": "sha256-ol7gZxwvgLnxNSZwFTDJJ49xVY5teaSvF7lzlo3YQfM=",
"owner": "ipetkov", "owner": "ipetkov",
"repo": "crane", "repo": "crane",
"rev": "7195c00c272fdd92fc74e7d5a0a2844b9fadb2fb", "rev": "8b08e96c9af8c6e3a2b69af5a7fa168750fcf88e",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -44,27 +25,6 @@
"type": "github" "type": "github"
} }
}, },
"crane_2": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1706473964,
"narHash": "sha256-Fq6xleee/TsX6NbtoRuI96bBuDHMU57PrcK9z1QEKbk=",
"owner": "ipetkov",
"repo": "crane",
"rev": "c798790eabec3e3da48190ae3698ac227aab770c",
"type": "github"
},
"original": {
"owner": "ipetkov",
"ref": "master",
"repo": "crane",
"type": "github"
}
},
"fenix": { "fenix": {
"inputs": { "inputs": {
"nixpkgs": [ "nixpkgs": [
@ -73,11 +33,11 @@
"rust-analyzer-src": "rust-analyzer-src" "rust-analyzer-src": "rust-analyzer-src"
}, },
"locked": { "locked": {
"lastModified": 1705559032, "lastModified": 1689488573,
"narHash": "sha256-Cb+Jd1+Gz4Wi+8elPnUIHnqQmE1qjDRZ+PsJaPaAffY=", "narHash": "sha256-diVASflKCCryTYv0djvMnP2444mFsIG0ge5pa7ahauQ=",
"owner": "nix-community", "owner": "nix-community",
"repo": "fenix", "repo": "fenix",
"rev": "e132ea0eb0c799a2109a91688e499d7bf4962801", "rev": "39096fe3f379036ff4a5fa198950b8e79defe939",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -102,47 +62,16 @@
"type": "github" "type": "github"
} }
}, },
"flake-compat_2": {
"flake": false,
"locked": {
"lastModified": 1696426674,
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-utils": { "flake-utils": {
"locked": {
"lastModified": 1667395993,
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_2": {
"inputs": { "inputs": {
"systems": "systems" "systems": "systems"
}, },
"locked": { "locked": {
"lastModified": 1705309234, "lastModified": 1689068808,
"narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=", "narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=",
"owner": "numtide", "owner": "numtide",
"repo": "flake-utils", "repo": "flake-utils",
"rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26", "rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -151,60 +80,13 @@
"type": "github" "type": "github"
} }
}, },
"nix-filter": {
"locked": {
"lastModified": 1705332318,
"narHash": "sha256-kcw1yFeJe9N4PjQji9ZeX47jg0p9A0DuU4djKvg1a7I=",
"owner": "numtide",
"repo": "nix-filter",
"rev": "3449dc925982ad46246cfc36469baf66e1b64f17",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "nix-filter",
"type": "github"
}
},
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1702539185, "lastModified": 1689444953,
"narHash": "sha256-KnIRG5NMdLIpEkZTnN5zovNYc0hhXjAgv6pfd5Z4c7U=", "narHash": "sha256-0o56bfb2LC38wrinPdCGLDScd77LVcr7CrH1zK7qvDg=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "aa9d4729cbc99dabacb50e3994dcefb3ea0f7447", "rev": "8acef304efe70152463a6399f73e636bcc363813",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-stable": {
"locked": {
"lastModified": 1702780907,
"narHash": "sha256-blbrBBXjjZt6OKTcYX1jpe9SRof2P9ZYWPzq22tzXAA=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "1e2e384c5b7c50dbf8e9c441a9e58d85f408b01f",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-23.11",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1705496572,
"narHash": "sha256-rPIe9G5EBLXdBdn9ilGc0nq082lzQd0xGGe092R/5QE=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "842d9d80cfd4560648c785f8a4e6f3b096790e19",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -216,23 +98,20 @@
}, },
"root": { "root": {
"inputs": { "inputs": {
"attic": "attic", "crane": "crane",
"crane": "crane_2",
"fenix": "fenix", "fenix": "fenix",
"flake-compat": "flake-compat_2", "flake-utils": "flake-utils",
"flake-utils": "flake-utils_2", "nixpkgs": "nixpkgs"
"nix-filter": "nix-filter",
"nixpkgs": "nixpkgs_2"
} }
}, },
"rust-analyzer-src": { "rust-analyzer-src": {
"flake": false, "flake": false,
"locked": { "locked": {
"lastModified": 1705523001, "lastModified": 1689441253,
"narHash": "sha256-TWq5vJ6m+9HGSDMsQAmz1TMegMi79R3TTyKjnPWsQp8=", "narHash": "sha256-4MSDZaFI4DOfsLIZYPMBl0snzWhX1/OqR/QHir382CY=",
"owner": "rust-lang", "owner": "rust-lang",
"repo": "rust-analyzer", "repo": "rust-analyzer",
"rev": "9d9b34354d2f13e33568c9c55b226dd014a146a0", "rev": "996e054f1eb1dbfc8455ecabff0f6ff22ba7f7c8",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -242,6 +121,31 @@
"type": "github" "type": "github"
} }
}, },
"rust-overlay": {
"inputs": {
"flake-utils": [
"crane",
"flake-utils"
],
"nixpkgs": [
"crane",
"nixpkgs"
]
},
"locked": {
"lastModified": 1688351637,
"narHash": "sha256-CLTufJ29VxNOIZ8UTg0lepsn3X03AmopmaLTTeHDCL4=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "f9b92316727af9e6c7fee4a761242f7f46880329",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"type": "github"
}
},
"systems": { "systems": {
"locked": { "locked": {
"lastModified": 1681028828, "lastModified": 1681028828,

264
flake.nix
View file

@ -2,258 +2,92 @@
inputs = { inputs = {
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable"; nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable";
flake-utils.url = "github:numtide/flake-utils"; flake-utils.url = "github:numtide/flake-utils";
nix-filter.url = "github:numtide/nix-filter";
flake-compat = {
url = "github:edolstra/flake-compat";
flake = false;
};
fenix = { fenix = {
url = "github:nix-community/fenix"; url = "github:nix-community/fenix";
inputs.nixpkgs.follows = "nixpkgs"; inputs.nixpkgs.follows = "nixpkgs";
}; };
crane = { crane = {
url = "github:ipetkov/crane?ref=master"; url = "github:ipetkov/crane";
inputs.nixpkgs.follows = "nixpkgs"; inputs.nixpkgs.follows = "nixpkgs";
inputs.flake-utils.follows = "flake-utils";
}; };
attic.url = "github:zhaofengli/attic?ref=main";
}; };
outputs = outputs =
{ self { self
, nixpkgs , nixpkgs
, flake-utils , flake-utils
, nix-filter
, fenix , fenix
, crane , crane
, ...
}: flake-utils.lib.eachDefaultSystem (system: }: flake-utils.lib.eachDefaultSystem (system:
let let
pkgsHost = nixpkgs.legacyPackages.${system}; pkgs = nixpkgs.legacyPackages.${system};
# Use mold on Linux
stdenv = if pkgs.stdenv.isLinux then
pkgs.stdenvAdapters.useMoldLinker pkgs.stdenv
else
pkgs.stdenv;
# Nix-accessible `Cargo.toml` # Nix-accessible `Cargo.toml`
cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml); cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml);
# The Rust toolchain to use # The Rust toolchain to use
toolchain = fenix.packages.${system}.fromToolchainFile { toolchain = fenix.packages.${system}.toolchainOf {
file = ./rust-toolchain.toml; # Use the Rust version defined in `Cargo.toml`
channel = cargoToml.package.rust-version;
# See also `rust-toolchain.toml` # THE rust-version HASH
sha256 = "sha256-SXRtAuO4IqNOQq+nLbrsDFbVk+3aVA8NNpSZsKlVH/8="; sha256 = "sha256-gdYqng0y9iHYzYPAdkC/ka3DRny3La/S5G8ASj0Ayyc=";
}; };
builder = pkgs: # The system's RocksDB
((crane.mkLib pkgs).overrideToolchain toolchain).buildPackage;
nativeBuildInputs = pkgs: [
# bindgen needs the build platform's libclang. Apparently due to
# "splicing weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't
# quite do the right thing here.
pkgs.buildPackages.rustPlatform.bindgenHook
];
env = pkgs: {
ROCKSDB_INCLUDE_DIR = "${pkgs.rocksdb}/include"; ROCKSDB_INCLUDE_DIR = "${pkgs.rocksdb}/include";
ROCKSDB_LIB_DIR = "${pkgs.rocksdb}/lib"; ROCKSDB_LIB_DIR = "${pkgs.rocksdb}/lib";
}
// pkgs.lib.optionalAttrs pkgs.stdenv.hostPlatform.isStatic {
ROCKSDB_STATIC = "";
}
// {
CARGO_BUILD_RUSTFLAGS = let inherit (pkgs) lib stdenv; in
lib.concatStringsSep " " ([]
++ lib.optionals
# This disables PIE for static builds, which isn't great in terms
# of security. Unfortunately, my hand is forced because nixpkgs'
# `libstdc++.a` is built without `-fPIE`, which precludes us from
# leaving PIE enabled.
stdenv.hostPlatform.isStatic
["-C" "relocation-model=static"]
++ lib.optionals
(stdenv.buildPlatform.config != stdenv.hostPlatform.config)
["-l" "c"]
++ lib.optionals
# This check has to match the one [here][0]. We only need to set
# these flags when using a different linker. Don't ask me why,
# though, because I don't know. All I know is it breaks otherwise.
#
# [0]: https://github.com/NixOS/nixpkgs/blob/612f97239e2cc474c13c9dafa0df378058c5ad8d/pkgs/build-support/rust/lib/default.nix#L36-L39
(
# Nixpkgs doesn't check for x86_64 here but we do, because I
# observed a failure building statically for x86_64 without
# including it here. Linkers are weird.
(stdenv.hostPlatform.isAarch64 || stdenv.hostPlatform.isx86_64)
&& stdenv.hostPlatform.isStatic
&& !stdenv.isDarwin
&& !stdenv.cc.bintools.isLLVM
)
[
"-l"
"stdc++"
"-L"
"${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib"
]
);
}
# What follows is stolen from [here][0]. Its purpose is to properly # Shared between the package and the devShell
# configure compilers and linkers for various stages of the build, and nativeBuildInputs = (with pkgs.rustPlatform; [
# even covers the case of build scripts that need native code compiled and bindgenHook
# run on the build platform (I think). ]);
#
# [0]: https://github.com/NixOS/nixpkgs/blob/612f97239e2cc474c13c9dafa0df378058c5ad8d/pkgs/build-support/rust/lib/default.nix#L64-L78
// (
let
inherit (pkgs.rust.lib) envVars;
in
pkgs.lib.optionalAttrs
(pkgs.stdenv.targetPlatform.rust.rustcTarget
!= pkgs.stdenv.hostPlatform.rust.rustcTarget)
(
let
inherit (pkgs.stdenv.targetPlatform.rust) cargoEnvVarTarget;
in
{
"CC_${cargoEnvVarTarget}" = envVars.ccForTarget;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" =
envVars.linkerForTarget;
}
)
// (
let
inherit (pkgs.stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget;
in
{
"CC_${cargoEnvVarTarget}" = envVars.ccForHost;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForHost;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForHost;
CARGO_BUILD_TARGET = rustcTarget;
}
)
// (
let
inherit (pkgs.stdenv.buildPlatform.rust) cargoEnvVarTarget;
in
{
"CC_${cargoEnvVarTarget}" = envVars.ccForBuild;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForBuild;
HOST_CC = "${pkgs.buildPackages.stdenv.cc}/bin/cc";
HOST_CXX = "${pkgs.buildPackages.stdenv.cc}/bin/c++";
}
));
package = pkgs: builder pkgs { builder =
src = nix-filter { ((crane.mkLib pkgs).overrideToolchain toolchain.toolchain).buildPackage;
root = ./.; in
include = [ {
"src" packages.default = builder {
"Cargo.toml" src = ./.;
"Cargo.lock"
]; inherit
stdenv
nativeBuildInputs
ROCKSDB_INCLUDE_DIR
ROCKSDB_LIB_DIR;
}; };
# This is redundant with CI devShells.default = (pkgs.mkShell.override { inherit stdenv; }) {
doCheck = false;
env = env pkgs;
nativeBuildInputs = nativeBuildInputs pkgs;
meta.mainProgram = cargoToml.package.name;
};
mkOciImage = pkgs: package:
pkgs.dockerTools.buildImage {
name = package.pname;
tag = "next";
copyToRoot = [
pkgs.dockerTools.caCertificates
];
config = {
# Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT)
# are handled as expected
Entrypoint = [
"${pkgs.lib.getExe' pkgs.tini "tini"}"
"--"
];
Cmd = [
"${pkgs.lib.getExe package}"
];
};
};
in
{
packages = {
default = package pkgsHost;
oci-image = mkOciImage pkgsHost self.packages.${system}.default;
}
//
builtins.listToAttrs
(builtins.concatLists
(builtins.map
(crossSystem:
let
binaryName = "static-${crossSystem}";
pkgsCrossStatic =
(import nixpkgs {
inherit system;
crossSystem = {
config = crossSystem;
};
}).pkgsStatic;
in
[
# An output for a statically-linked binary
{
name = binaryName;
value = package pkgsCrossStatic;
}
# An output for an OCI image based on that binary
{
name = "oci-image-${crossSystem}";
value = mkOciImage
pkgsCrossStatic
self.packages.${system}.${binaryName};
}
]
)
[
"x86_64-unknown-linux-musl"
"aarch64-unknown-linux-musl"
]
)
);
devShells.default = pkgsHost.mkShell {
env = env pkgsHost // {
# Rust Analyzer needs to be able to find the path to default crate # Rust Analyzer needs to be able to find the path to default crate
# sources, and it can read this environment variable to do so. The # sources, and it can read this environment variable to do so
# `rust-src` component is required in order for this to work. RUST_SRC_PATH = "${toolchain.rust-src}/lib/rustlib/src/rust/library";
RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library";
}; inherit
ROCKSDB_INCLUDE_DIR
ROCKSDB_LIB_DIR;
# Development tools # Development tools
nativeBuildInputs = nativeBuildInputs pkgsHost ++ [ nativeBuildInputs = nativeBuildInputs ++ (with toolchain; [
# Always use nightly rustfmt because most of its options are unstable cargo
# clippy
# This needs to come before `toolchain` in this list, otherwise rust-src
# `$PATH` will have stable rustfmt instead. rustc
fenix.packages.${system}.latest.rustfmt rustfmt
toolchain
] ++ (with pkgsHost; [
engage
# Needed for Complement
go
olm
# Needed for our script for Complement
jq
]); ]);
}; };
checks = {
packagesDefault = self.packages.${system}.default;
devShellsDefault = self.devShells.${system}.default;
};
}); });
} }

View file

@ -1,22 +0,0 @@
# This is the authoritiative configuration of this project's Rust toolchain.
#
# Other files that need upkeep when this changes:
#
# * `.gitlab-ci.yml`
# * `Cargo.toml`
# * `flake.nix`
#
# Search in those files for `rust-toolchain.toml` to find the relevant places.
# If you're having trouble making the relevant changes, bug a maintainer.
[toolchain]
channel = "1.75.0"
components = [
# For rust-analyzer
"rust-src",
]
targets = [
"x86_64-unknown-linux-gnu",
"x86_64-unknown-linux-musl",
"aarch64-unknown-linux-musl",
]

View file

@ -17,11 +17,7 @@ use ruma::{
DeviceKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, DeviceKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId,
}; };
use serde_json::json; use serde_json::json;
use std::{ use std::collections::{BTreeMap, HashMap, HashSet};
collections::{hash_map, BTreeMap, HashMap, HashSet},
time::{Duration, Instant},
};
use tracing::debug;
/// # `POST /_matrix/client/r0/keys/upload` /// # `POST /_matrix/client/r0/keys/upload`
/// ///
@ -339,68 +335,31 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
let mut failures = BTreeMap::new(); let mut failures = BTreeMap::new();
let back_off = |id| match services()
.globals
.bad_query_ratelimiter
.write()
.unwrap()
.entry(id)
{
hash_map::Entry::Vacant(e) => {
e.insert((Instant::now(), 1));
}
hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
};
let mut futures: FuturesUnordered<_> = get_over_federation let mut futures: FuturesUnordered<_> = get_over_federation
.into_iter() .into_iter()
.map(|(server, vec)| async move { .map(|(server, vec)| async move {
if let Some((time, tries)) = services()
.globals
.bad_query_ratelimiter
.read()
.unwrap()
.get(server)
{
// Exponential backoff
let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries);
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
}
if time.elapsed() < min_elapsed_duration {
debug!("Backing off query from {:?}", server);
return (
server,
Err(Error::BadServerResponse("bad query, still backing off")),
);
}
}
let mut device_keys_input_fed = BTreeMap::new(); let mut device_keys_input_fed = BTreeMap::new();
for (user_id, keys) in vec { for (user_id, keys) in vec {
device_keys_input_fed.insert(user_id.to_owned(), keys.clone()); device_keys_input_fed.insert(user_id.to_owned(), keys.clone());
} }
( (
server, server,
tokio::time::timeout( services()
Duration::from_secs(25), .sending
services().sending.send_federation_request( .send_federation_request(
server, server,
federation::keys::get_keys::v1::Request { federation::keys::get_keys::v1::Request {
device_keys: device_keys_input_fed, device_keys: device_keys_input_fed,
}, },
),
) )
.await .await,
.map_err(|_e| Error::BadServerResponse("Query took too long")),
) )
}) })
.collect(); .collect();
while let Some((server, response)) = futures.next().await { while let Some((server, response)) = futures.next().await {
match response { match response {
Ok(Ok(response)) => { Ok(response) => {
for (user, masterkey) in response.master_keys { for (user, masterkey) in response.master_keys {
let (master_key_id, mut master_key) = let (master_key_id, mut master_key) =
services().users.parse_master_key(&user, &masterkey)?; services().users.parse_master_key(&user, &masterkey)?;
@ -427,8 +386,7 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
self_signing_keys.extend(response.self_signing_keys); self_signing_keys.extend(response.self_signing_keys);
device_keys.extend(response.device_keys); device_keys.extend(response.device_keys);
} }
_ => { Err(_e) => {
back_off(server.to_owned());
failures.insert(server.to_string(), json!({})); failures.insert(server.to_string(), json!({}));
} }
} }

View file

@ -51,7 +51,7 @@ pub async fn create_content_route(
.await?; .await?;
Ok(create_content::v3::Response { Ok(create_content::v3::Response {
content_uri: mxc.into(), content_uri: mxc.try_into().expect("Invalid mxc:// URI"),
blurhash: None, blurhash: None,
}) })
} }

View file

@ -64,12 +64,7 @@ pub async fn join_room_by_id_route(
.map(|user| user.server_name().to_owned()), .map(|user| user.server_name().to_owned()),
); );
servers.push( servers.push(body.room_id.server_name().to_owned());
body.room_id
.server_name()
.expect("Room IDs should always have a server name")
.into(),
);
join_room_by_id_helper( join_room_by_id_helper(
body.sender_user.as_deref(), body.sender_user.as_deref(),
@ -110,12 +105,7 @@ pub async fn join_room_by_id_or_alias_route(
.map(|user| user.server_name().to_owned()), .map(|user| user.server_name().to_owned()),
); );
servers.push( servers.push(room_id.server_name().to_owned());
room_id
.server_name()
.expect("Room IDs should always have a server name")
.into(),
);
(servers, room_id) (servers, room_id)
} }
@ -410,7 +400,7 @@ pub async fn get_member_events_route(
if !services() if !services()
.rooms .rooms
.state_accessor .state_accessor
.user_can_see_state_events(sender_user, &body.room_id)? .user_can_see_state_events(&sender_user, &body.room_id)?
{ {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::Forbidden, ErrorKind::Forbidden,
@ -445,7 +435,7 @@ pub async fn joined_members_route(
if !services() if !services()
.rooms .rooms
.state_accessor .state_accessor
.user_can_see_state_events(sender_user, &body.room_id)? .user_can_see_state_events(&sender_user, &body.room_id)?
{ {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::Forbidden, ErrorKind::Forbidden,
@ -629,7 +619,7 @@ async fn join_room_by_id_helper(
)); ));
} }
match signed_value["signatures"] if let Ok(signature) = signed_value["signatures"]
.as_object() .as_object()
.ok_or(Error::BadRequest( .ok_or(Error::BadRequest(
ErrorKind::InvalidParam, ErrorKind::InvalidParam,
@ -640,22 +630,20 @@ async fn join_room_by_id_helper(
ErrorKind::InvalidParam, ErrorKind::InvalidParam,
"Server did not send its signature", "Server did not send its signature",
)) ))
}) { })
Ok(signature) => { {
join_event join_event
.get_mut("signatures") .get_mut("signatures")
.expect("we created a valid pdu") .expect("we created a valid pdu")
.as_object_mut() .as_object_mut()
.expect("we created a valid pdu") .expect("we created a valid pdu")
.insert(remote_server.to_string(), signature.clone()); .insert(remote_server.to_string(), signature.clone());
} } else {
Err(e) => {
warn!( warn!(
"Server {remote_server} sent invalid signature in sendjoin signatures for event {signed_value:?}: {e:?}", "Server {remote_server} sent invalid signature in sendjoin signatures for event {signed_value:?}",
); );
} }
} }
}
services().rooms.short.get_or_create_shortroomid(room_id)?; services().rooms.short.get_or_create_shortroomid(room_id)?;
@ -722,7 +710,7 @@ async fn join_room_by_id_helper(
} }
info!("Running send_join auth check"); info!("Running send_join auth check");
let authenticated = state_res::event_auth::auth_check( if !state_res::event_auth::auth_check(
&state_res::RoomVersion::new(&room_version_id).expect("room version is supported"), &state_res::RoomVersion::new(&room_version_id).expect("room version is supported"),
&parsed_join_pdu, &parsed_join_pdu,
None::<PduEvent>, // TODO: third party invite None::<PduEvent>, // TODO: third party invite
@ -745,9 +733,7 @@ async fn join_room_by_id_helper(
.map_err(|e| { .map_err(|e| {
warn!("Auth check failed: {e}"); warn!("Auth check failed: {e}");
Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed") Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed")
})?; })? {
if !authenticated {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::InvalidParam, ErrorKind::InvalidParam,
"Auth check failed", "Auth check failed",
@ -1376,7 +1362,7 @@ pub async fn leave_all_rooms(user_id: &UserId) -> Result<()> {
pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option<String>) -> Result<()> { pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option<String>) -> Result<()> {
// Ask a remote server if we don't have this room // Ask a remote server if we don't have this room
if !services().rooms.metadata.exists(room_id)? if !services().rooms.metadata.exists(room_id)?
&& room_id.server_name() != Some(services().globals.server_name()) && room_id.server_name() != services().globals.server_name()
{ {
if let Err(e) = remote_leave_room(user_id, room_id).await { if let Err(e) = remote_leave_room(user_id, room_id).await {
warn!("Failed to leave room {} remotely: {}", user_id, e); warn!("Failed to leave room {} remotely: {}", user_id, e);

View file

@ -124,7 +124,7 @@ pub async fn get_message_events_route(
let to = body let to = body
.to .to
.as_ref() .as_ref()
.and_then(|t| PduCount::try_from_string(t).ok()); .and_then(|t| PduCount::try_from_string(&t).ok());
services().rooms.lazy_loading.lazy_load_confirm_delivery( services().rooms.lazy_loading.lazy_load_confirm_delivery(
sender_user, sender_user,

View file

@ -1,8 +1,5 @@
use crate::{services, utils, Error, Result, Ruma}; use crate::{services, utils, Result, Ruma};
use ruma::api::client::{ use ruma::api::client::presence::{get_presence, set_presence};
error::ErrorKind,
presence::{get_presence, set_presence},
};
use std::time::Duration; use std::time::Duration;
/// # `PUT /_matrix/client/r0/presence/{userId}/status` /// # `PUT /_matrix/client/r0/presence/{userId}/status`
@ -82,9 +79,6 @@ pub async fn get_presence_route(
presence: presence.content.presence, presence: presence.content.presence,
}) })
} else { } else {
Err(Error::BadRequest( todo!();
ErrorKind::NotFound,
"Presence state for this user was not found",
))
} }
} }

View file

@ -34,7 +34,6 @@ pub async fn redact_event_route(
PduBuilder { PduBuilder {
event_type: TimelineEventType::RoomRedaction, event_type: TimelineEventType::RoomRedaction,
content: to_raw_value(&RoomRedactionEventContent { content: to_raw_value(&RoomRedactionEventContent {
redacts: Some(body.event_id.clone()),
reason: body.reason.clone(), reason: body.reason.clone(),
}) })
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),

View file

@ -23,7 +23,7 @@ pub async fn get_relating_events_with_rel_type_and_event_type_route(
let to = body let to = body
.to .to
.as_ref() .as_ref()
.and_then(|t| PduCount::try_from_string(t).ok()); .and_then(|t| PduCount::try_from_string(&t).ok());
// Use limit or else 10, with maximum 100 // Use limit or else 10, with maximum 100
let limit = body let limit = body
@ -73,7 +73,7 @@ pub async fn get_relating_events_with_rel_type_route(
let to = body let to = body
.to .to
.as_ref() .as_ref()
.and_then(|t| PduCount::try_from_string(t).ok()); .and_then(|t| PduCount::try_from_string(&t).ok());
// Use limit or else 10, with maximum 100 // Use limit or else 10, with maximum 100
let limit = body let limit = body
@ -121,7 +121,7 @@ pub async fn get_relating_events_route(
let to = body let to = body
.to .to
.as_ref() .as_ref()
.and_then(|t| PduCount::try_from_string(t).ok()); .and_then(|t| PduCount::try_from_string(&t).ok());
// Use limit or else 10, with maximum 100 // Use limit or else 10, with maximum 100
let limit = body let limit = body

View file

@ -142,9 +142,8 @@ pub async fn create_room_route(
content content
} }
None => { None => {
// TODO: Add correct value for v11
let mut content = serde_json::from_str::<CanonicalJsonObject>( let mut content = serde_json::from_str::<CanonicalJsonObject>(
to_raw_value(&RoomCreateEventContent::new_v1(sender_user.clone())) to_raw_value(&RoomCreateEventContent::new(sender_user.clone()))
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid creation content"))? .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid creation content"))?
.get(), .get(),
) )
@ -366,7 +365,7 @@ pub async fn create_room_route(
services().rooms.timeline.build_and_append_pdu( services().rooms.timeline.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: TimelineEventType::RoomName, event_type: TimelineEventType::RoomName,
content: to_raw_value(&RoomNameEventContent::new(name.clone())) content: to_raw_value(&RoomNameEventContent::new(Some(name.clone())))
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),

View file

@ -42,31 +42,24 @@ pub async fn get_login_types_route(
/// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see /// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see
/// supported login types. /// supported login types.
pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Response> { pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Response> {
// To allow deprecated login methods
#![allow(deprecated)]
// Validate login method // Validate login method
// TODO: Other login methods // TODO: Other login methods
let user_id = match &body.login_info { let user_id = match &body.login_info {
login::v3::LoginInfo::Password(login::v3::Password { login::v3::LoginInfo::Password(login::v3::Password {
identifier, identifier,
password, password,
user,
address: _,
medium: _,
}) => { }) => {
let user_id = if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier { let username = if let UserIdentifier::UserIdOrLocalpart(user_id) = identifier {
UserId::parse_with_server_name( user_id.to_lowercase()
user_id.to_lowercase(),
services().globals.server_name(),
)
} else if let Some(user) = user {
UserId::parse(user)
} else { } else {
warn!("Bad login type: {:?}", &body.login_info); warn!("Bad login type: {:?}", &body.login_info);
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
} };
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; let user_id =
UserId::parse_with_server_name(username, services().globals.server_name())
.map_err(|_| {
Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.")
})?;
let hash = services() let hash = services()
.users .users
.password_hash(&user_id)? .password_hash(&user_id)?
@ -112,28 +105,24 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
)); ));
} }
} }
login::v3::LoginInfo::ApplicationService(login::v3::ApplicationService { login::v3::LoginInfo::ApplicationService(login::v3::ApplicationService { identifier }) => {
identifier,
user,
}) => {
if !body.from_appservice { if !body.from_appservice {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::Forbidden, ErrorKind::Forbidden,
"Forbidden login type.", "Forbidden login type.",
)); ));
}; };
if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier { let username = if let UserIdentifier::UserIdOrLocalpart(user_id) = identifier {
UserId::parse_with_server_name( user_id.to_lowercase()
user_id.to_lowercase(),
services().globals.server_name(),
)
} else if let Some(user) = user {
UserId::parse(user)
} else { } else {
warn!("Bad login type: {:?}", &body.login_info);
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
} };
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))? let user_id =
UserId::parse_with_server_name(username, services().globals.server_name())
.map_err(|_| {
Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.")
})?;
user_id
} }
_ => { _ => {
warn!("Unsupported or unknown login type: {:?}", &body.login_info); warn!("Unsupported or unknown login type: {:?}", &body.login_info);
@ -174,8 +163,6 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
info!("{} logged in", user_id); info!("{} logged in", user_id);
// Homeservers are still required to send the `home_server` field
#[allow(deprecated)]
Ok(login::v3::Response { Ok(login::v3::Response {
user_id, user_id,
access_token: token, access_token: token,

View file

@ -85,7 +85,7 @@ pub async fn get_state_events_route(
if !services() if !services()
.rooms .rooms
.state_accessor .state_accessor
.user_can_see_state_events(sender_user, &body.room_id)? .user_can_see_state_events(&sender_user, &body.room_id)?
{ {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::Forbidden, ErrorKind::Forbidden,
@ -118,7 +118,7 @@ pub async fn get_state_events_for_key_route(
if !services() if !services()
.rooms .rooms
.state_accessor .state_accessor
.user_can_see_state_events(sender_user, &body.room_id)? .user_can_see_state_events(&sender_user, &body.room_id)?
{ {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::Forbidden, ErrorKind::Forbidden,
@ -157,7 +157,7 @@ pub async fn get_state_events_for_empty_key_route(
if !services() if !services()
.rooms .rooms
.state_accessor .state_accessor
.user_can_see_state_events(sender_user, &body.room_id)? .user_can_see_state_events(&sender_user, &body.room_id)?
{ {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::Forbidden, ErrorKind::Forbidden,

View file

@ -20,7 +20,7 @@ use ruma::{
StateEventType, TimelineEventType, StateEventType, TimelineEventType,
}, },
serde::Raw, serde::Raw,
uint, DeviceId, JsOption, OwnedDeviceId, OwnedUserId, RoomId, UInt, UserId, uint, DeviceId, OwnedDeviceId, OwnedUserId, RoomId, UInt, UserId,
}; };
use std::{ use std::{
collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet}, collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet},
@ -554,7 +554,6 @@ async fn sync_helper(
} }
} }
#[allow(clippy::too_many_arguments)]
async fn load_joined_room( async fn load_joined_room(
sender_user: &UserId, sender_user: &UserId,
sender_device: &DeviceId, sender_device: &DeviceId,
@ -591,7 +590,7 @@ async fn load_joined_room(
|| services() || services()
.rooms .rooms
.user .user
.last_notification_read(sender_user, room_id)? .last_notification_read(&sender_user, &room_id)?
> since; > since;
let mut timeline_users = HashSet::new(); let mut timeline_users = HashSet::new();
@ -600,16 +599,16 @@ async fn load_joined_room(
} }
services().rooms.lazy_loading.lazy_load_confirm_delivery( services().rooms.lazy_loading.lazy_load_confirm_delivery(
sender_user, &sender_user,
sender_device, &sender_device,
room_id, &room_id,
sincecount, sincecount,
)?; )?;
// Database queries: // Database queries:
let current_shortstatehash = let current_shortstatehash =
if let Some(s) = services().rooms.state.get_room_shortstatehash(room_id)? { if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? {
s s
} else { } else {
error!("Room {} has no state", room_id); error!("Room {} has no state", room_id);
@ -619,7 +618,7 @@ async fn load_joined_room(
let since_shortstatehash = services() let since_shortstatehash = services()
.rooms .rooms
.user .user
.get_token_shortstatehash(room_id, since)?; .get_token_shortstatehash(&room_id, since)?;
let (heroes, joined_member_count, invited_member_count, joined_since_last_sync, state_events) = let (heroes, joined_member_count, invited_member_count, joined_since_last_sync, state_events) =
if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) { if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) {
@ -631,12 +630,12 @@ async fn load_joined_room(
let joined_member_count = services() let joined_member_count = services()
.rooms .rooms
.state_cache .state_cache
.room_joined_count(room_id)? .room_joined_count(&room_id)?
.unwrap_or(0); .unwrap_or(0);
let invited_member_count = services() let invited_member_count = services()
.rooms .rooms
.state_cache .state_cache
.room_invited_count(room_id)? .room_invited_count(&room_id)?
.unwrap_or(0); .unwrap_or(0);
// Recalculate heroes (first 5 members) // Recalculate heroes (first 5 members)
@ -649,7 +648,7 @@ async fn load_joined_room(
for hero in services() for hero in services()
.rooms .rooms
.timeline .timeline
.all_pdus(sender_user, room_id)? .all_pdus(&sender_user, &room_id)?
.filter_map(|pdu| pdu.ok()) // Ignore all broken pdus .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus
.filter(|(_, pdu)| pdu.kind == TimelineEventType::RoomMember) .filter(|(_, pdu)| pdu.kind == TimelineEventType::RoomMember)
.map(|(_, pdu)| { .map(|(_, pdu)| {
@ -670,11 +669,11 @@ async fn load_joined_room(
) && (services() ) && (services()
.rooms .rooms
.state_cache .state_cache
.is_joined(&user_id, room_id)? .is_joined(&user_id, &room_id)?
|| services() || services()
.rooms .rooms
.state_cache .state_cache
.is_invited(&user_id, room_id)?) .is_invited(&user_id, &room_id)?)
{ {
Ok::<_, Error>(Some(state_key.clone())) Ok::<_, Error>(Some(state_key.clone()))
} else { } else {
@ -790,17 +789,17 @@ async fn load_joined_room(
// Reset lazy loading because this is an initial sync // Reset lazy loading because this is an initial sync
services().rooms.lazy_loading.lazy_load_reset( services().rooms.lazy_loading.lazy_load_reset(
sender_user, &sender_user,
sender_device, &sender_device,
room_id, &room_id,
)?; )?;
// The state_events above should contain all timeline_users, let's mark them as lazy // The state_events above should contain all timeline_users, let's mark them as lazy
// loaded. // loaded.
services().rooms.lazy_loading.lazy_load_mark_sent( services().rooms.lazy_loading.lazy_load_mark_sent(
sender_user, &sender_user,
sender_device, &sender_device,
room_id, &room_id,
lazy_loaded, lazy_loaded,
next_batchcount, next_batchcount,
); );
@ -867,14 +866,14 @@ async fn load_joined_room(
} }
if !services().rooms.lazy_loading.lazy_load_was_sent_before( if !services().rooms.lazy_loading.lazy_load_was_sent_before(
sender_user, &sender_user,
sender_device, &sender_device,
room_id, &room_id,
&event.sender, &event.sender,
)? || lazy_load_send_redundant )? || lazy_load_send_redundant
{ {
if let Some(member_event) = services().rooms.state_accessor.room_state_get( if let Some(member_event) = services().rooms.state_accessor.room_state_get(
room_id, &room_id,
&StateEventType::RoomMember, &StateEventType::RoomMember,
event.sender.as_str(), event.sender.as_str(),
)? { )? {
@ -885,9 +884,9 @@ async fn load_joined_room(
} }
services().rooms.lazy_loading.lazy_load_mark_sent( services().rooms.lazy_loading.lazy_load_mark_sent(
sender_user, &sender_user,
sender_device, &sender_device,
room_id, &room_id,
lazy_loaded, lazy_loaded,
next_batchcount, next_batchcount,
); );
@ -935,7 +934,7 @@ async fn load_joined_room(
match new_membership { match new_membership {
MembershipState::Join => { MembershipState::Join => {
// A new user joined an encrypted room // A new user joined an encrypted room
if !share_encrypted_room(sender_user, &user_id, room_id)? { if !share_encrypted_room(&sender_user, &user_id, &room_id)? {
device_list_updates.insert(user_id); device_list_updates.insert(user_id);
} }
} }
@ -955,15 +954,15 @@ async fn load_joined_room(
services() services()
.rooms .rooms
.state_cache .state_cache
.room_members(room_id) .room_members(&room_id)
.flatten() .flatten()
.filter(|user_id| { .filter(|user_id| {
// Don't send key updates from the sender to the sender // Don't send key updates from the sender to the sender
sender_user != user_id &sender_user != user_id
}) })
.filter(|user_id| { .filter(|user_id| {
// Only send keys if the sender doesn't share an encrypted room with the target already // Only send keys if the sender doesn't share an encrypted room with the target already
!share_encrypted_room(sender_user, user_id, room_id) !share_encrypted_room(&sender_user, user_id, &room_id)
.unwrap_or(false) .unwrap_or(false)
}), }),
); );
@ -998,7 +997,7 @@ async fn load_joined_room(
services() services()
.rooms .rooms
.user .user
.notification_count(sender_user, room_id)? .notification_count(&sender_user, &room_id)?
.try_into() .try_into()
.expect("notification count can't go that high"), .expect("notification count can't go that high"),
) )
@ -1011,7 +1010,7 @@ async fn load_joined_room(
services() services()
.rooms .rooms
.user .user
.highlight_count(sender_user, room_id)? .highlight_count(&sender_user, &room_id)?
.try_into() .try_into()
.expect("highlight count can't go that high"), .expect("highlight count can't go that high"),
) )
@ -1040,15 +1039,15 @@ async fn load_joined_room(
.rooms .rooms
.edus .edus
.read_receipt .read_receipt
.readreceipts_since(room_id, since) .readreceipts_since(&room_id, since)
.filter_map(|r| r.ok()) // Filter out buggy events .filter_map(|r| r.ok()) // Filter out buggy events
.map(|(_, _, v)| v) .map(|(_, _, v)| v)
.collect(); .collect();
if services().rooms.edus.typing.last_typing_update(room_id)? > since { if services().rooms.edus.typing.last_typing_update(&room_id)? > since {
edus.push( edus.push(
serde_json::from_str( serde_json::from_str(
&serde_json::to_string(&services().rooms.edus.typing.typings_all(room_id)?) &serde_json::to_string(&services().rooms.edus.typing.typings_all(&room_id)?)
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
) )
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
@ -1057,7 +1056,7 @@ async fn load_joined_room(
// Save the state after this sync so we can send the correct state diff next sync // Save the state after this sync so we can send the correct state diff next sync
services().rooms.user.associate_token_shortstatehash( services().rooms.user.associate_token_shortstatehash(
room_id, &room_id,
next_batch, next_batch,
current_shortstatehash, current_shortstatehash,
)?; )?;
@ -1066,7 +1065,7 @@ async fn load_joined_room(
account_data: RoomAccountData { account_data: RoomAccountData {
events: services() events: services()
.account_data .account_data
.changes_since(Some(room_id), sender_user, since)? .changes_since(Some(&room_id), &sender_user, since)?
.into_iter() .into_iter()
.filter_map(|(_, v)| { .filter_map(|(_, v)| {
serde_json::from_str(v.json().get()) serde_json::from_str(v.json().get())
@ -1103,7 +1102,7 @@ async fn load_joined_room(
fn load_timeline( fn load_timeline(
sender_user: &UserId, sender_user: &UserId,
room_id: &RoomId, room_id: &RoomId,
roomsincecount: PduCount, sincecount: PduCount,
limit: u64, limit: u64,
) -> Result<(Vec<(PduCount, PduEvent)>, bool), Error> { ) -> Result<(Vec<(PduCount, PduEvent)>, bool), Error> {
let timeline_pdus; let timeline_pdus;
@ -1111,13 +1110,13 @@ fn load_timeline(
if services() if services()
.rooms .rooms
.timeline .timeline
.last_timeline_count(sender_user, room_id)? .last_timeline_count(&sender_user, &room_id)?
> roomsincecount > sincecount
{ {
let mut non_timeline_pdus = services() let mut non_timeline_pdus = services()
.rooms .rooms
.timeline .timeline
.pdus_until(sender_user, room_id, PduCount::max())? .pdus_until(&sender_user, &room_id, PduCount::max())?
.filter_map(|r| { .filter_map(|r| {
// Filter out buggy events // Filter out buggy events
if r.is_err() { if r.is_err() {
@ -1125,7 +1124,7 @@ fn load_timeline(
} }
r.ok() r.ok()
}) })
.take_while(|(pducount, _)| pducount > &roomsincecount); .take_while(|(pducount, _)| pducount > &sincecount);
// Take the last events for the timeline // Take the last events for the timeline
timeline_pdus = non_timeline_pdus timeline_pdus = non_timeline_pdus
@ -1179,15 +1178,16 @@ pub async fn sync_events_v4_route(
// Setup watchers, so if there's no response, we can wait for them // Setup watchers, so if there's no response, we can wait for them
let watcher = services().globals.watch(&sender_user, &sender_device); let watcher = services().globals.watch(&sender_user, &sender_device);
let next_batch = services().globals.next_count()?; let next_batch = services().globals.current_count()?;
let globalsince = body let since = body
.pos .pos
.as_ref() .as_ref()
.and_then(|string| string.parse().ok()) .and_then(|string| string.parse().ok())
.unwrap_or(0); .unwrap_or(0);
let sincecount = PduCount::Normal(since);
if globalsince == 0 { if since == 0 {
if let Some(conn_id) = &body.conn_id { if let Some(conn_id) = &body.conn_id {
services().users.forget_sync_request_connection( services().users.forget_sync_request_connection(
sender_user.clone(), sender_user.clone(),
@ -1214,7 +1214,7 @@ pub async fn sync_events_v4_route(
if body.extensions.to_device.enabled.unwrap_or(false) { if body.extensions.to_device.enabled.unwrap_or(false) {
services() services()
.users .users
.remove_to_device_events(&sender_user, &sender_device, globalsince)?; .remove_to_device_events(&sender_user, &sender_device, since)?;
} }
let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in
@ -1226,13 +1226,13 @@ pub async fn sync_events_v4_route(
device_list_changes.extend( device_list_changes.extend(
services() services()
.users .users
.keys_changed(sender_user.as_ref(), globalsince, None) .keys_changed(sender_user.as_ref(), since, None)
.filter_map(|r| r.ok()), .filter_map(|r| r.ok()),
); );
for room_id in &all_joined_rooms { for room_id in &all_joined_rooms {
let current_shortstatehash = let current_shortstatehash =
if let Some(s) = services().rooms.state.get_room_shortstatehash(room_id)? { if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? {
s s
} else { } else {
error!("Room {} has no state", room_id); error!("Room {} has no state", room_id);
@ -1242,7 +1242,7 @@ pub async fn sync_events_v4_route(
let since_shortstatehash = services() let since_shortstatehash = services()
.rooms .rooms
.user .user
.get_token_shortstatehash(room_id, globalsince)?; .get_token_shortstatehash(&room_id, since)?;
let since_sender_member: Option<RoomMemberEventContent> = since_shortstatehash let since_sender_member: Option<RoomMemberEventContent> = since_shortstatehash
.and_then(|shortstatehash| { .and_then(|shortstatehash| {
@ -1331,7 +1331,7 @@ pub async fn sync_events_v4_route(
if !share_encrypted_room( if !share_encrypted_room(
&sender_user, &sender_user,
&user_id, &user_id,
room_id, &room_id,
)? { )? {
device_list_changes.insert(user_id); device_list_changes.insert(user_id);
} }
@ -1352,7 +1352,7 @@ pub async fn sync_events_v4_route(
services() services()
.rooms .rooms
.state_cache .state_cache
.room_members(room_id) .room_members(&room_id)
.flatten() .flatten()
.filter(|user_id| { .filter(|user_id| {
// Don't send key updates from the sender to the sender // Don't send key updates from the sender to the sender
@ -1360,7 +1360,7 @@ pub async fn sync_events_v4_route(
}) })
.filter(|user_id| { .filter(|user_id| {
// Only send keys if the sender doesn't share an encrypted room with the target already // Only send keys if the sender doesn't share an encrypted room with the target already
!share_encrypted_room(&sender_user, user_id, room_id) !share_encrypted_room(&sender_user, user_id, &room_id)
.unwrap_or(false) .unwrap_or(false)
}), }),
); );
@ -1371,7 +1371,7 @@ pub async fn sync_events_v4_route(
device_list_changes.extend( device_list_changes.extend(
services() services()
.users .users
.keys_changed(room_id.as_ref(), globalsince, None) .keys_changed(room_id.as_ref(), since, None)
.filter_map(|r| r.ok()), .filter_map(|r| r.ok()),
); );
} }
@ -1408,7 +1408,7 @@ pub async fn sync_events_v4_route(
continue; continue;
} }
let mut new_known_rooms = BTreeSet::new(); let mut new_known_rooms = BTreeMap::new();
lists.insert( lists.insert(
list_id.clone(), list_id.clone(),
@ -1424,12 +1424,12 @@ pub async fn sync_events_v4_route(
let room_ids = all_joined_rooms let room_ids = all_joined_rooms
[(u64::from(r.0) as usize)..=(u64::from(r.1) as usize)] [(u64::from(r.0) as usize)..=(u64::from(r.1) as usize)]
.to_vec(); .to_vec();
new_known_rooms.extend(room_ids.iter().cloned()); new_known_rooms.extend(room_ids.iter().cloned().map(|r| (r, true)));
for room_id in &room_ids { for room_id in &room_ids {
let todo_room = todo_rooms.entry(room_id.clone()).or_insert(( let todo_room = todo_rooms.entry(room_id.clone()).or_insert((
BTreeSet::new(), BTreeSet::new(),
0, 0,
u64::MAX, true,
)); ));
let limit = list let limit = list
.room_details .room_details
@ -1440,18 +1440,14 @@ pub async fn sync_events_v4_route(
.0 .0
.extend(list.room_details.required_state.iter().cloned()); .extend(list.room_details.required_state.iter().cloned());
todo_room.1 = todo_room.1.max(limit); todo_room.1 = todo_room.1.max(limit);
// 0 means unknown because it got out of date if known_rooms.get(&list_id).and_then(|k| k.get(room_id)) != Some(&true)
todo_room.2 = todo_room.2.min( {
known_rooms todo_room.2 = false;
.get(&list_id) }
.and_then(|k| k.get(room_id))
.copied()
.unwrap_or(0),
);
} }
sync_events::v4::SyncOp { sync_events::v4::SyncOp {
op: SlidingOp::Sync, op: SlidingOp::Sync,
range: Some(r), range: Some(r.clone()),
index: None, index: None,
room_ids, room_ids,
room_id: None, room_id: None,
@ -1469,31 +1465,26 @@ pub async fn sync_events_v4_route(
conn_id.clone(), conn_id.clone(),
list_id, list_id,
new_known_rooms, new_known_rooms,
globalsince,
); );
} }
} }
let mut known_subscription_rooms = BTreeSet::new(); let mut known_subscription_rooms = BTreeMap::new();
for (room_id, room) in &body.room_subscriptions { for (room_id, room) in &body.room_subscriptions {
if !services().rooms.metadata.exists(room_id)? {
continue;
}
let todo_room = todo_rooms let todo_room = todo_rooms
.entry(room_id.clone()) .entry(room_id.clone())
.or_insert((BTreeSet::new(), 0, u64::MAX)); .or_insert((BTreeSet::new(), 0, true));
let limit = room.timeline_limit.map_or(10, u64::from).min(100); let limit = room.timeline_limit.map_or(10, u64::from).min(100);
todo_room.0.extend(room.required_state.iter().cloned()); todo_room.0.extend(room.required_state.iter().cloned());
todo_room.1 = todo_room.1.max(limit); todo_room.1 = todo_room.1.max(limit);
// 0 means unknown because it got out of date if known_rooms
todo_room.2 = todo_room.2.min(
known_rooms
.get("subscriptions") .get("subscriptions")
.and_then(|k| k.get(room_id)) .and_then(|k| k.get(room_id))
.copied() != Some(&true)
.unwrap_or(0), {
); todo_room.2 = false;
known_subscription_rooms.insert(room_id.clone()); }
known_subscription_rooms.insert(room_id.clone(), true);
} }
for r in body.unsubscribe_rooms { for r in body.unsubscribe_rooms {
@ -1508,7 +1499,6 @@ pub async fn sync_events_v4_route(
conn_id.clone(), conn_id.clone(),
"subscriptions".to_owned(), "subscriptions".to_owned(),
known_subscription_rooms, known_subscription_rooms,
globalsince,
); );
} }
@ -1522,13 +1512,12 @@ pub async fn sync_events_v4_route(
} }
let mut rooms = BTreeMap::new(); let mut rooms = BTreeMap::new();
for (room_id, (required_state_request, timeline_limit, roomsince)) in &todo_rooms { for (room_id, (required_state_request, timeline_limit, known)) in &todo_rooms {
let roomsincecount = PduCount::Normal(*roomsince); // TODO: per-room sync tokens
let (timeline_pdus, limited) = let (timeline_pdus, limited) =
load_timeline(&sender_user, room_id, roomsincecount, *timeline_limit)?; load_timeline(&sender_user, &room_id, sincecount, *timeline_limit)?;
if roomsince != &0 && timeline_pdus.is_empty() { if *known && timeline_pdus.is_empty() {
continue; continue;
} }
@ -1544,8 +1533,8 @@ pub async fn sync_events_v4_route(
})) }))
})? })?
.or_else(|| { .or_else(|| {
if roomsince != &0 { if since != 0 {
Some(roomsince.to_string()) Some(since.to_string())
} else { } else {
None None
} }
@ -1558,31 +1547,30 @@ pub async fn sync_events_v4_route(
let required_state = required_state_request let required_state = required_state_request
.iter() .iter()
.flat_map(|state| { .map(|state| {
services() services()
.rooms .rooms
.state_accessor .state_accessor
.room_state_get(room_id, &state.0, &state.1) .room_state_get(&room_id, &state.0, &state.1)
.ok()
.flatten()
.map(|state| state.to_sync_state_event())
}) })
.filter_map(|r| r.ok())
.filter_map(|o| o)
.map(|state| state.to_sync_state_event())
.collect(); .collect();
// Heroes // Heroes
let heroes = services() let heroes = services()
.rooms .rooms
.state_cache .state_cache
.room_members(room_id) .room_members(&room_id)
.filter_map(|r| r.ok()) .filter_map(|r| r.ok())
.filter(|member| member != &sender_user) .filter(|member| member != &sender_user)
.flat_map(|member| { .map(|member| {
Ok::<_, Error>(
services() services()
.rooms .rooms
.state_accessor .state_accessor
.get_member(room_id, &member) .get_member(&room_id, &member)?
.ok()
.flatten()
.map(|memberevent| { .map(|memberevent| {
( (
memberevent memberevent
@ -1590,26 +1578,32 @@ pub async fn sync_events_v4_route(
.unwrap_or_else(|| member.to_string()), .unwrap_or_else(|| member.to_string()),
memberevent.avatar_url, memberevent.avatar_url,
) )
}),
)
}) })
}) .filter_map(|r| r.ok())
.filter_map(|o| o)
.take(5) .take(5)
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let name = match &heroes[..] { let name = if heroes.len() > 1 {
[] => None, let last = heroes[0].0.clone();
[only] => Some(only.0.clone()), Some(
[firsts @ .., last] => Some( heroes[1..]
firsts
.iter() .iter()
.map(|h| h.0.clone()) .map(|h| h.0.clone())
.collect::<Vec<_>>() .collect::<Vec<_>>()
.join(", ") .join(", ")
+ " and " + " and "
+ &last.0, + &last,
), )
} else if heroes.len() == 1 {
Some(heroes[0].0.clone())
} else {
None
}; };
let avatar = if let [only] = &heroes[..] { let avatar = if heroes.len() == 1 {
only.1.clone() heroes[0].1.clone()
} else { } else {
None None
}; };
@ -1617,17 +1611,17 @@ pub async fn sync_events_v4_route(
rooms.insert( rooms.insert(
room_id.clone(), room_id.clone(),
sync_events::v4::SlidingSyncRoom { sync_events::v4::SlidingSyncRoom {
name: services().rooms.state_accessor.get_name(room_id)?.or(name), name: services()
avatar: if let Some(avatar) = avatar { .rooms
JsOption::Some(avatar) .state_accessor
} else { .get_name(&room_id)?
match services().rooms.state_accessor.get_avatar(room_id)? { .or_else(|| name),
JsOption::Some(avatar) => JsOption::from_option(avatar.url), avatar: services()
JsOption::Null => JsOption::Null, .rooms
JsOption::Undefined => JsOption::Undefined, .state_accessor
} .get_avatar(&room_id)?
}, .map_or(avatar, |a| a.url),
initial: Some(roomsince == &0), initial: Some(!known),
is_dm: None, is_dm: None,
invite_state: None, invite_state: None,
unread_notifications: UnreadNotificationsCount { unread_notifications: UnreadNotificationsCount {
@ -1635,7 +1629,7 @@ pub async fn sync_events_v4_route(
services() services()
.rooms .rooms
.user .user
.highlight_count(&sender_user, room_id)? .highlight_count(&sender_user, &room_id)?
.try_into() .try_into()
.expect("notification count can't go that high"), .expect("notification count can't go that high"),
), ),
@ -1643,7 +1637,7 @@ pub async fn sync_events_v4_route(
services() services()
.rooms .rooms
.user .user
.notification_count(&sender_user, room_id)? .notification_count(&sender_user, &room_id)?
.try_into() .try_into()
.expect("notification count can't go that high"), .expect("notification count can't go that high"),
), ),
@ -1656,7 +1650,7 @@ pub async fn sync_events_v4_route(
(services() (services()
.rooms .rooms
.state_cache .state_cache
.room_joined_count(room_id)? .room_joined_count(&room_id)?
.unwrap_or(0) as u32) .unwrap_or(0) as u32)
.into(), .into(),
), ),
@ -1664,12 +1658,11 @@ pub async fn sync_events_v4_route(
(services() (services()
.rooms .rooms
.state_cache .state_cache
.room_invited_count(room_id)? .room_invited_count(&room_id)?
.unwrap_or(0) as u32) .unwrap_or(0) as u32)
.into(), .into(),
), ),
num_live: None, // Count events in timeline greater than global sync counter num_live: None, // Count events in timeline greater than global sync counter
timestamp: None,
}, },
); );
} }
@ -1688,7 +1681,7 @@ pub async fn sync_events_v4_route(
} }
Ok(sync_events::v4::Response { Ok(sync_events::v4::Response {
initial: globalsince == 0, initial: since == 0,
txn_id: body.txn_id.clone(), txn_id: body.txn_id.clone(),
pos: next_batch.to_string(), pos: next_batch.to_string(),
lists, lists,
@ -1719,7 +1712,7 @@ pub async fn sync_events_v4_route(
global: if body.extensions.account_data.enabled.unwrap_or(false) { global: if body.extensions.account_data.enabled.unwrap_or(false) {
services() services()
.account_data .account_data
.changes_since(None, &sender_user, globalsince)? .changes_since(None, &sender_user, since)?
.into_iter() .into_iter()
.filter_map(|(_, v)| { .filter_map(|(_, v)| {
serde_json::from_str(v.json().get()) serde_json::from_str(v.json().get())

View file

@ -26,7 +26,6 @@ pub async fn get_supported_versions_route(
"v1.2".to_owned(), "v1.2".to_owned(),
"v1.3".to_owned(), "v1.3".to_owned(),
"v1.4".to_owned(), "v1.4".to_owned(),
"v1.5".to_owned(),
], ],
unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]), unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]),
}; };

View file

@ -341,7 +341,7 @@ fn add_port_to_hostname(destination_str: &str) -> FedDest {
} }
/// Returns: actual_destination, host header /// Returns: actual_destination, host header
/// Implemented according to the specification at <https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names> /// Implemented according to the specification at https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names
/// Numbers in comments below refer to bullet points in linked section of specification /// Numbers in comments below refer to bullet points in linked section of specification
async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDest) { async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDest) {
debug!("Finding actual destination for {destination}"); debug!("Finding actual destination for {destination}");
@ -666,7 +666,7 @@ pub fn parse_incoming_pdu(
let room_version_id = services().rooms.state.get_room_version(&room_id)?; let room_version_id = services().rooms.state.get_room_version(&room_id)?;
let (event_id, value) = match gen_event_id_canonical_json(pdu, &room_version_id) { let (event_id, value) = match gen_event_id_canonical_json(&pdu, &room_version_id) {
Ok(t) => t, Ok(t) => t,
Err(_) => { Err(_) => {
// Event could not be converted to canonical json // Event could not be converted to canonical json
@ -707,24 +707,7 @@ pub async fn send_transaction_message_route(
// let mut auth_cache = EventMap::new(); // let mut auth_cache = EventMap::new();
for pdu in &body.pdus { for pdu in &body.pdus {
let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { let r = parse_incoming_pdu(&pdu);
warn!("Error parsing incoming event {:?}: {:?}", pdu, e);
Error::BadServerResponse("Invalid PDU in server response")
})?;
let room_id: OwnedRoomId = value
.get("room_id")
.and_then(|id| RoomId::parse(id.as_str()?).ok())
.ok_or(Error::BadRequest(
ErrorKind::InvalidParam,
"Invalid room id in pdu",
))?;
if services().rooms.state.get_room_version(&room_id).is_err() {
debug!("Server is not in room {room_id}");
continue;
}
let r = parse_incoming_pdu(pdu);
let (event_id, value, room_id) = match r { let (event_id, value, room_id) = match r {
Ok(t) => t, Ok(t) => t,
Err(e) => { Err(e) => {
@ -735,6 +718,11 @@ pub async fn send_transaction_message_route(
}; };
// We do not add the event_id field to the pdu here because of signature and hashes checks // We do not add the event_id field to the pdu here because of signature and hashes checks
services()
.rooms
.event_handler
.acl_check(sender_servername, &room_id)?;
let mutex = Arc::clone( let mutex = Arc::clone(
services() services()
.globals .globals
@ -992,7 +980,7 @@ pub async fn get_event_route(
if !services().rooms.state_accessor.server_can_see_event( if !services().rooms.state_accessor.server_can_see_event(
sender_servername, sender_servername,
room_id, &room_id,
&body.event_id, &body.event_id,
)? { )? {
return Err(Error::BadRequest( return Err(Error::BadRequest(
@ -1058,7 +1046,7 @@ pub async fn get_backfill_route(
let all_events = services() let all_events = services()
.rooms .rooms
.timeline .timeline
.pdus_until(user_id!("@doesntmatter:conduit.rs"), &body.room_id, until)? .pdus_until(&user_id!("@doesntmatter:conduit.rs"), &body.room_id, until)?
.take(limit.try_into().unwrap()); .take(limit.try_into().unwrap());
let events = all_events let events = all_events
@ -1075,7 +1063,7 @@ pub async fn get_backfill_route(
}) })
.map(|(_, pdu)| services().rooms.timeline.get_pdu_json(&pdu.event_id)) .map(|(_, pdu)| services().rooms.timeline.get_pdu_json(&pdu.event_id))
.filter_map(|r| r.ok().flatten()) .filter_map(|r| r.ok().flatten())
.map(PduEvent::convert_to_outgoing_federation_event) .map(|pdu| PduEvent::convert_to_outgoing_federation_event(pdu))
.collect(); .collect();
Ok(get_backfill::v1::Response { Ok(get_backfill::v1::Response {
@ -1799,13 +1787,6 @@ pub async fn get_devices_route(
return Err(Error::bad_config("Federation is disabled.")); return Err(Error::bad_config("Federation is disabled."));
} }
if body.user_id.server_name() != services().globals.server_name() {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Tried to access user from other server.",
));
}
let sender_servername = body let sender_servername = body
.sender_servername .sender_servername
.as_ref() .as_ref()
@ -1880,13 +1861,6 @@ pub async fn get_profile_information_route(
return Err(Error::bad_config("Federation is disabled.")); return Err(Error::bad_config("Federation is disabled."));
} }
if body.user_id.server_name() != services().globals.server_name() {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Tried to access user from other server.",
));
}
let mut displayname = None; let mut displayname = None;
let mut avatar_url = None; let mut avatar_url = None;
let mut blurhash = None; let mut blurhash = None;
@ -1923,17 +1897,6 @@ pub async fn get_keys_route(body: Ruma<get_keys::v1::Request>) -> Result<get_key
return Err(Error::bad_config("Federation is disabled.")); return Err(Error::bad_config("Federation is disabled."));
} }
if body
.device_keys
.iter()
.any(|(u, _)| u.server_name() != services().globals.server_name())
{
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Tried to access user from other server.",
));
}
let result = get_keys_helper(None, &body.device_keys, |u| { let result = get_keys_helper(None, &body.device_keys, |u| {
Some(u.server_name()) == body.sender_servername.as_deref() Some(u.server_name()) == body.sender_servername.as_deref()
}) })
@ -1956,17 +1919,6 @@ pub async fn claim_keys_route(
return Err(Error::bad_config("Federation is disabled.")); return Err(Error::bad_config("Federation is disabled."));
} }
if body
.one_time_keys
.iter()
.any(|(u, _)| u.server_name() != services().globals.server_name())
{
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Tried to access user from other server.",
));
}
let result = claim_keys_helper(&body.one_time_keys).await?; let result = claim_keys_helper(&body.one_time_keys).await?;
Ok(claim_keys::v1::Response { Ok(claim_keys::v1::Response {

View file

@ -29,9 +29,7 @@ use crate::Result;
/// would be used for `ordinary.onion`, `matrix.myspecial.onion`, but not `hello.myspecial.onion`. /// would be used for `ordinary.onion`, `matrix.myspecial.onion`, but not `hello.myspecial.onion`.
#[derive(Clone, Debug, Deserialize)] #[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "snake_case")] #[serde(rename_all = "snake_case")]
#[derive(Default)]
pub enum ProxyConfig { pub enum ProxyConfig {
#[default]
None, None,
Global { Global {
#[serde(deserialize_with = "crate::utils::deserialize_from_str")] #[serde(deserialize_with = "crate::utils::deserialize_from_str")]
@ -50,6 +48,11 @@ impl ProxyConfig {
}) })
} }
} }
impl Default for ProxyConfig {
fn default() -> Self {
ProxyConfig::None
}
}
#[derive(Clone, Debug, Deserialize)] #[derive(Clone, Debug, Deserialize)]
pub struct PartialProxyConfig { pub struct PartialProxyConfig {

View file

@ -116,7 +116,7 @@ impl KvTree for PersyTree {
match iter { match iter {
Ok(iter) => Box::new(iter.filter_map(|(k, v)| { Ok(iter) => Box::new(iter.filter_map(|(k, v)| {
v.into_iter() v.into_iter()
.map(|val| ((*k).to_owned(), (*val).to_owned())) .map(|val| ((*k).to_owned().into(), (*val).to_owned().into()))
.next() .next()
})), })),
Err(e) => { Err(e) => {
@ -142,7 +142,7 @@ impl KvTree for PersyTree {
Ok(iter) => { Ok(iter) => {
let map = iter.filter_map(|(k, v)| { let map = iter.filter_map(|(k, v)| {
v.into_iter() v.into_iter()
.map(|val| ((*k).to_owned(), (*val).to_owned())) .map(|val| ((*k).to_owned().into(), (*val).to_owned().into()))
.next() .next()
}); });
if backwards { if backwards {
@ -179,7 +179,7 @@ impl KvTree for PersyTree {
iter.take_while(move |(k, _)| (*k).starts_with(&owned_prefix)) iter.take_while(move |(k, _)| (*k).starts_with(&owned_prefix))
.filter_map(|(k, v)| { .filter_map(|(k, v)| {
v.into_iter() v.into_iter()
.map(|val| ((*k).to_owned(), (*val).to_owned())) .map(|val| ((*k).to_owned().into(), (*val).to_owned().into()))
.next() .next()
}), }),
) )

View file

@ -33,7 +33,7 @@ impl Iterator for PreparedStatementIterator<'_> {
struct NonAliasingBox<T>(*mut T); struct NonAliasingBox<T>(*mut T);
impl<T> Drop for NonAliasingBox<T> { impl<T> Drop for NonAliasingBox<T> {
fn drop(&mut self) { fn drop(&mut self) {
drop(unsafe { Box::from_raw(self.0) }); unsafe { Box::from_raw(self.0) };
} }
} }

View file

@ -8,7 +8,6 @@ use tokio::sync::watch;
#[derive(Default)] #[derive(Default)]
pub(super) struct Watchers { pub(super) struct Watchers {
#[allow(clippy::type_complexity)]
watchers: RwLock<HashMap<Vec<u8>, (watch::Sender<()>, watch::Receiver<()>)>>, watchers: RwLock<HashMap<Vec<u8>, (watch::Sender<()>, watch::Receiver<()>)>>,
} }

View file

@ -123,12 +123,13 @@ impl service::account_data::Data for KeyValueDatabase {
.take_while(move |(k, _)| k.starts_with(&prefix)) .take_while(move |(k, _)| k.starts_with(&prefix))
.map(|(k, v)| { .map(|(k, v)| {
Ok::<_, Error>(( Ok::<_, Error>((
RoomAccountDataEventType::from( RoomAccountDataEventType::try_from(
utils::string_from_bytes(k.rsplit(|&b| b == 0xff).next().ok_or_else( utils::string_from_bytes(k.rsplit(|&b| b == 0xff).next().ok_or_else(
|| Error::bad_database("RoomUserData ID in db is invalid."), || Error::bad_database("RoomUserData ID in db is invalid."),
)?) )?)
.map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?, .map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?,
), )
.map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?,
serde_json::from_slice::<Raw<AnyEphemeralRoomEvent>>(&v).map_err(|_| { serde_json::from_slice::<Raw<AnyEphemeralRoomEvent>>(&v).map_err(|_| {
Error::bad_database("Database contains invalid account data.") Error::bad_database("Database contains invalid account data.")
})?, })?,

View file

@ -256,8 +256,8 @@ lasttimelinecount_cache: {lasttimelinecount_cache}\n"
.. ..
} = new_keys; } = new_keys;
keys.verify_keys.extend(verify_keys); keys.verify_keys.extend(verify_keys.into_iter());
keys.old_verify_keys.extend(old_verify_keys); keys.old_verify_keys.extend(old_verify_keys.into_iter());
self.server_signingkeys.insert( self.server_signingkeys.insert(
origin.as_bytes(), origin.as_bytes(),

View file

@ -157,9 +157,10 @@ impl service::rooms::short::Data for KeyValueDatabase {
.ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?;
let event_type = let event_type =
StateEventType::from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| {
Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.")
})?); })?)
.map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?;
let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| {
Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.")

View file

@ -20,7 +20,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
let parsed = services() let parsed = services()
.rooms .rooms
.state_compressor .state_compressor
.parse_compressed_state_event(compressed)?; .parse_compressed_state_event(&compressed)?;
result.insert(parsed.0, parsed.1); result.insert(parsed.0, parsed.1);
i += 1; i += 1;
@ -49,7 +49,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
let (_, eventid) = services() let (_, eventid) = services()
.rooms .rooms
.state_compressor .state_compressor
.parse_compressed_state_event(compressed)?; .parse_compressed_state_event(&compressed)?;
if let Some(pdu) = services().rooms.timeline.get_pdu(&eventid)? { if let Some(pdu) = services().rooms.timeline.get_pdu(&eventid)? {
result.insert( result.insert(
( (
@ -101,7 +101,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
services() services()
.rooms .rooms
.state_compressor .state_compressor
.parse_compressed_state_event(compressed) .parse_compressed_state_event(&compressed)
.ok() .ok()
.map(|(_, id)| id) .map(|(_, id)| id)
})) }))

View file

@ -471,7 +471,6 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
} }
/// Returns an iterator over all rooms a user was invited to. /// Returns an iterator over all rooms a user was invited to.
#[allow(clippy::type_complexity)]
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
fn rooms_invited<'a>( fn rooms_invited<'a>(
&'a self, &'a self,
@ -550,7 +549,6 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
} }
/// Returns an iterator over all rooms a user left. /// Returns an iterator over all rooms a user left.
#[allow(clippy::type_complexity)]
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
fn rooms_left<'a>( fn rooms_left<'a>(
&'a self, &'a self,

View file

@ -10,7 +10,7 @@ impl service::rooms::threads::Data for KeyValueDatabase {
user_id: &'a UserId, user_id: &'a UserId,
room_id: &'a RoomId, room_id: &'a RoomId,
until: u64, until: u64,
_include: &'a IncludeThreads, include: &'a IncludeThreads,
) -> Result<Box<dyn Iterator<Item = Result<(u64, PduEvent)>> + 'a>> { ) -> Result<Box<dyn Iterator<Item = Result<(u64, PduEvent)>> + 'a>> {
let prefix = services() let prefix = services()
.rooms .rooms
@ -27,7 +27,7 @@ impl service::rooms::threads::Data for KeyValueDatabase {
self.threadid_userids self.threadid_userids
.iter_from(&current, true) .iter_from(&current, true)
.take_while(move |(k, _)| k.starts_with(&prefix)) .take_while(move |(k, _)| k.starts_with(&prefix))
.map(move |(pduid, _users)| { .map(move |(pduid, users)| {
let count = utils::u64_from_bytes(&pduid[(mem::size_of::<u64>())..]) let count = utils::u64_from_bytes(&pduid[(mem::size_of::<u64>())..])
.map_err(|_| Error::bad_database("Invalid pduid in threadid_userids."))?; .map_err(|_| Error::bad_database("Invalid pduid in threadid_userids."))?;
let mut pdu = services() let mut pdu = services()
@ -52,13 +52,13 @@ impl service::rooms::threads::Data for KeyValueDatabase {
.collect::<Vec<_>>() .collect::<Vec<_>>()
.join(&[0xff][..]); .join(&[0xff][..]);
self.threadid_userids.insert(root_id, &users)?; self.threadid_userids.insert(&root_id, &users)?;
Ok(()) Ok(())
} }
fn get_participants(&self, root_id: &[u8]) -> Result<Option<Vec<OwnedUserId>>> { fn get_participants(&self, root_id: &[u8]) -> Result<Option<Vec<OwnedUserId>>> {
if let Some(users) = self.threadid_userids.get(root_id)? { if let Some(users) = self.threadid_userids.get(&root_id)? {
Ok(Some( Ok(Some(
users users
.split(|b| *b == 0xff) .split(|b| *b == 0xff)

View file

@ -39,10 +39,11 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
/// Returns the `count` of this pdu's id. /// Returns the `count` of this pdu's id.
fn get_pdu_count(&self, event_id: &EventId) -> Result<Option<PduCount>> { fn get_pdu_count(&self, event_id: &EventId) -> Result<Option<PduCount>> {
self.eventid_pduid Ok(self
.eventid_pduid
.get(event_id.as_bytes())? .get(event_id.as_bytes())?
.map(|pdu_id| pdu_count(&pdu_id)) .map(|pdu_id| pdu_count(&pdu_id))
.transpose() .transpose()?)
} }
/// Returns the json of a pdu. /// Returns the json of a pdu.
@ -79,10 +80,12 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
/// Returns the pdu's id. /// Returns the pdu's id.
fn get_pdu_id(&self, event_id: &EventId) -> Result<Option<Vec<u8>>> { fn get_pdu_id(&self, event_id: &EventId) -> Result<Option<Vec<u8>>> {
self.eventid_pduid.get(event_id.as_bytes()) Ok(self.eventid_pduid.get(event_id.as_bytes())?)
} }
/// Returns the pdu. /// Returns the pdu.
///
/// Checks the `eventid_outlierpdu` Tree if not found in the timeline.
fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result<Option<PduEvent>> { fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result<Option<PduEvent>> {
self.eventid_pduid self.eventid_pduid
.get(event_id.as_bytes())? .get(event_id.as_bytes())?
@ -229,7 +232,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
room_id: &RoomId, room_id: &RoomId,
until: PduCount, until: PduCount,
) -> Result<Box<dyn Iterator<Item = Result<(PduCount, PduEvent)>> + 'a>> { ) -> Result<Box<dyn Iterator<Item = Result<(PduCount, PduEvent)>> + 'a>> {
let (prefix, current) = count_to_id(room_id, until, 1, true)?; let (prefix, current) = count_to_id(&room_id, until, 1, true)?;
let user_id = user_id.to_owned(); let user_id = user_id.to_owned();
@ -256,7 +259,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
room_id: &RoomId, room_id: &RoomId,
from: PduCount, from: PduCount,
) -> Result<Box<dyn Iterator<Item = Result<(PduCount, PduEvent)>> + 'a>> { ) -> Result<Box<dyn Iterator<Item = Result<(PduCount, PduEvent)>> + 'a>> {
let (prefix, current) = count_to_id(room_id, from, 1, false)?; let (prefix, current) = count_to_id(&room_id, from, 1, false)?;
let user_id = user_id.to_owned(); let user_id = user_id.to_owned();
@ -331,7 +334,7 @@ fn count_to_id(
.rooms .rooms
.short .short
.get_shortroomid(room_id)? .get_shortroomid(room_id)?
.ok_or_else(|| Error::bad_database("Looked for bad shortroomid in timeline"))? .expect("room exists")
.to_be_bytes() .to_be_bytes()
.to_vec(); .to_vec();
let mut pdu_id = prefix.clone(); let mut pdu_id = prefix.clone();

View file

@ -146,9 +146,10 @@ impl service::users::Data for KeyValueDatabase {
self.userid_avatarurl self.userid_avatarurl
.get(user_id.as_bytes())? .get(user_id.as_bytes())?
.map(|bytes| { .map(|bytes| {
utils::string_from_bytes(&bytes) let s = utils::string_from_bytes(&bytes)
.map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?;
s.try_into()
.map_err(|_| Error::bad_database("Avatar URL in db is invalid.")) .map_err(|_| Error::bad_database("Avatar URL in db is invalid."))
.map(Into::into)
}) })
.transpose() .transpose()
} }

View file

@ -852,9 +852,7 @@ impl KeyValueDatabase {
if rule.is_some() { if rule.is_some() {
let mut rule = rule.unwrap().clone(); let mut rule = rule.unwrap().clone();
rule.rule_id = content_rule_transformation[1].to_owned(); rule.rule_id = content_rule_transformation[1].to_owned();
rules_list rules_list.content.remove(content_rule_transformation[0]);
.content
.shift_remove(content_rule_transformation[0]);
rules_list.content.insert(rule); rules_list.content.insert(rule);
} }
} }
@ -877,7 +875,7 @@ impl KeyValueDatabase {
if let Some(rule) = rule { if let Some(rule) = rule {
let mut rule = rule.clone(); let mut rule = rule.clone();
rule.rule_id = transformation[1].to_owned(); rule.rule_id = transformation[1].to_owned();
rules_list.underride.shift_remove(transformation[0]); rules_list.underride.remove(transformation[0]);
rules_list.underride.insert(rule); rules_list.underride.insert(rule);
} }
} }

View file

@ -1,3 +1,12 @@
#![warn(
rust_2018_idioms,
unused_qualifications,
clippy::cloned_instead_of_copied,
clippy::str_to_string
)]
#![allow(clippy::suspicious_else_formatting)]
#![deny(clippy::dbg_macro)]
pub mod api; pub mod api;
mod config; mod config;
mod database; mod database;

View file

@ -1,3 +1,13 @@
#![warn(
rust_2018_idioms,
unused_qualifications,
clippy::cloned_instead_of_copied,
clippy::str_to_string,
clippy::future_not_send
)]
#![allow(clippy::suspicious_else_formatting)]
#![deny(clippy::dbg_macro)]
use std::{future::Future, io, net::SocketAddr, sync::atomic, time::Duration}; use std::{future::Future, io, net::SocketAddr, sync::atomic, time::Duration};
use axum::{ use axum::{
@ -228,7 +238,7 @@ async fn spawn_task<B: Send + 'static>(
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR) .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)
} }
async fn unrecognized_method<B: Send>( async fn unrecognized_method<B>(
req: axum::http::Request<B>, req: axum::http::Request<B>,
next: axum::middleware::Next<B>, next: axum::middleware::Next<B>,
) -> std::result::Result<axum::response::Response, StatusCode> { ) -> std::result::Result<axum::response::Response, StatusCode> {

View file

@ -50,7 +50,7 @@ enum AdminCommand {
/// Registering a new bridge using the ID of an existing bridge will replace /// Registering a new bridge using the ID of an existing bridge will replace
/// the old one. /// the old one.
/// ///
/// [commandbody]() /// [commandbody]
/// # ``` /// # ```
/// # yaml content here /// # yaml content here
/// # ``` /// # ```
@ -96,7 +96,7 @@ enum AdminCommand {
/// Removing a mass amount of users from a room may cause a significant amount of leave events. /// Removing a mass amount of users from a room may cause a significant amount of leave events.
/// The time to leave rooms may depend significantly on joined rooms and servers. /// The time to leave rooms may depend significantly on joined rooms and servers.
/// ///
/// [commandbody]() /// [commandbody]
/// # ``` /// # ```
/// # User list here /// # User list here
/// # ``` /// # ```
@ -121,7 +121,7 @@ enum AdminCommand {
/// The PDU event is only checked for validity and is not added to the /// The PDU event is only checked for validity and is not added to the
/// database. /// database.
/// ///
/// [commandbody]() /// [commandbody]
/// # ``` /// # ```
/// # PDU json content here /// # PDU json content here
/// # ``` /// # ```
@ -165,14 +165,14 @@ enum AdminCommand {
EnableRoom { room_id: Box<RoomId> }, EnableRoom { room_id: Box<RoomId> },
/// Verify json signatures /// Verify json signatures
/// [commandbody]() /// [commandbody]
/// # ``` /// # ```
/// # json here /// # json here
/// # ``` /// # ```
SignJson, SignJson,
/// Verify json signatures /// Verify json signatures
/// [commandbody]() /// [commandbody]
/// # ``` /// # ```
/// # json here /// # json here
/// # ``` /// # ```
@ -858,15 +858,12 @@ impl Service {
.expect("Regex compilation should not fail"); .expect("Regex compilation should not fail");
let text = re.replace_all(&text, "<code>$1</code>: $4"); let text = re.replace_all(&text, "<code>$1</code>: $4");
// Look for a `[commandbody]()` tag. If it exists, use all lines below it that // Look for a `[commandbody]` tag. If it exists, use all lines below it that
// start with a `#` in the USAGE section. // start with a `#` in the USAGE section.
let mut text_lines: Vec<&str> = text.lines().collect(); let mut text_lines: Vec<&str> = text.lines().collect();
let mut command_body = String::new(); let mut command_body = String::new();
if let Some(line_index) = text_lines if let Some(line_index) = text_lines.iter().position(|line| *line == "[commandbody]") {
.iter()
.position(|line| *line == "[commandbody]()")
{
text_lines.remove(line_index); text_lines.remove(line_index);
while text_lines while text_lines
@ -935,7 +932,7 @@ impl Service {
services().users.create(&conduit_user, None)?; services().users.create(&conduit_user, None)?;
let mut content = RoomCreateEventContent::new_v1(conduit_user.clone()); let mut content = RoomCreateEventContent::new(conduit_user.clone());
content.federate = true; content.federate = true;
content.predecessor = None; content.predecessor = None;
content.room_version = services().globals.default_room_version(); content.room_version = services().globals.default_room_version();
@ -1051,7 +1048,7 @@ impl Service {
services().rooms.timeline.build_and_append_pdu( services().rooms.timeline.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: TimelineEventType::RoomName, event_type: TimelineEventType::RoomName,
content: to_raw_value(&RoomNameEventContent::new(room_name)) content: to_raw_value(&RoomNameEventContent::new(Some(room_name)))
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),

View file

@ -8,12 +8,6 @@ use ruma::{
use crate::api::server_server::FedDest; use crate::api::server_server::FedDest;
use crate::{services, Config, Error, Result}; use crate::{services, Config, Error, Result};
use futures_util::FutureExt;
use hyper::{
client::connect::dns::{GaiResolver, Name},
service::Service as HyperService,
};
use reqwest::dns::{Addrs, Resolve, Resolving};
use ruma::{ use ruma::{
api::{ api::{
client::sync::sync_events, client::sync::sync_events,
@ -23,10 +17,8 @@ use ruma::{
}; };
use std::{ use std::{
collections::{BTreeMap, HashMap}, collections::{BTreeMap, HashMap},
error::Error as StdError,
fs, fs,
future::{self, Future}, future::Future,
iter,
net::{IpAddr, SocketAddr}, net::{IpAddr, SocketAddr},
path::PathBuf, path::PathBuf,
sync::{ sync::{
@ -64,7 +56,6 @@ pub struct Service {
pub unstable_room_versions: Vec<RoomVersionId>, pub unstable_room_versions: Vec<RoomVersionId>,
pub bad_event_ratelimiter: Arc<RwLock<HashMap<OwnedEventId, RateLimitState>>>, pub bad_event_ratelimiter: Arc<RwLock<HashMap<OwnedEventId, RateLimitState>>>,
pub bad_signature_ratelimiter: Arc<RwLock<HashMap<Vec<String>, RateLimitState>>>, pub bad_signature_ratelimiter: Arc<RwLock<HashMap<Vec<String>, RateLimitState>>>,
pub bad_query_ratelimiter: Arc<RwLock<HashMap<OwnedServerName, RateLimitState>>>,
pub servername_ratelimiter: Arc<RwLock<HashMap<OwnedServerName, Arc<Semaphore>>>>, pub servername_ratelimiter: Arc<RwLock<HashMap<OwnedServerName, Arc<Semaphore>>>>,
pub sync_receivers: RwLock<HashMap<(OwnedUserId, OwnedDeviceId), SyncHandle>>, pub sync_receivers: RwLock<HashMap<(OwnedUserId, OwnedDeviceId), SyncHandle>>,
pub roomid_mutex_insert: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>, pub roomid_mutex_insert: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>,
@ -107,45 +98,6 @@ impl Default for RotationHandler {
} }
} }
pub struct Resolver {
inner: GaiResolver,
overrides: Arc<RwLock<TlsNameMap>>,
}
impl Resolver {
pub fn new(overrides: Arc<RwLock<TlsNameMap>>) -> Self {
Resolver {
inner: GaiResolver::new(),
overrides,
}
}
}
impl Resolve for Resolver {
fn resolve(&self, name: Name) -> Resolving {
self.overrides
.read()
.expect("lock should not be poisoned")
.get(name.as_str())
.and_then(|(override_name, port)| {
override_name.first().map(|first_name| {
let x: Box<dyn Iterator<Item = SocketAddr> + Send> =
Box::new(iter::once(SocketAddr::new(*first_name, *port)));
let x: Resolving = Box::pin(future::ready(Ok(x)));
x
})
})
.unwrap_or_else(|| {
let this = &mut self.inner.clone();
Box::pin(HyperService::<Name>::call(this, name).map(|result| {
result
.map(|addrs| -> Addrs { Box::new(addrs) })
.map_err(|err| -> Box<dyn StdError + Send + Sync> { Box::new(err) })
}))
})
}
}
impl Service { impl Service {
pub fn load(db: &'static dyn Data, config: Config) -> Result<Self> { pub fn load(db: &'static dyn Data, config: Config) -> Result<Self> {
let keypair = db.load_keypair(); let keypair = db.load_keypair();
@ -167,8 +119,14 @@ impl Service {
.map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes())); .map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes()));
let default_client = reqwest_client_builder(&config)?.build()?; let default_client = reqwest_client_builder(&config)?.build()?;
let name_override = Arc::clone(&tls_name_override);
let federation_client = reqwest_client_builder(&config)? let federation_client = reqwest_client_builder(&config)?
.dns_resolver(Arc::new(Resolver::new(tls_name_override.clone()))) .resolve_fn(move |domain| {
let read_guard = name_override.read().unwrap();
let (override_name, port) = read_guard.get(&domain)?;
let first_name = override_name.get(0)?;
Some(SocketAddr::new(*first_name, *port))
})
.build()?; .build()?;
// Supported and stable room versions // Supported and stable room versions
@ -202,7 +160,6 @@ impl Service {
unstable_room_versions, unstable_room_versions,
bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())), bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
bad_signature_ratelimiter: Arc::new(RwLock::new(HashMap::new())), bad_signature_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
bad_query_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
servername_ratelimiter: Arc::new(RwLock::new(HashMap::new())), servername_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
roomid_mutex_state: RwLock::new(HashMap::new()), roomid_mutex_state: RwLock::new(HashMap::new()),
roomid_mutex_insert: RwLock::new(HashMap::new()), roomid_mutex_insert: RwLock::new(HashMap::new()),

View file

@ -385,7 +385,7 @@ impl PartialEq for PduEvent {
} }
impl PartialOrd for PduEvent { impl PartialOrd for PduEvent {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> { fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other)) self.event_id.partial_cmp(&other.event_id)
} }
} }
impl Ord for PduEvent { impl Ord for PduEvent {

View file

@ -1,6 +1,6 @@
mod data; mod data;
pub use data::Data; pub use data::Data;
use ruma::{events::AnySyncTimelineEvent, push::PushConditionPowerLevelsCtx}; use ruma::events::AnySyncTimelineEvent;
use crate::{services, Error, PduEvent, Result}; use crate::{services, Error, PduEvent, Result};
use bytes::BytesMut; use bytes::BytesMut;
@ -193,12 +193,6 @@ impl Service {
pdu: &Raw<AnySyncTimelineEvent>, pdu: &Raw<AnySyncTimelineEvent>,
room_id: &RoomId, room_id: &RoomId,
) -> Result<&'a [Action]> { ) -> Result<&'a [Action]> {
let power_levels = PushConditionPowerLevelsCtx {
users: power_levels.users.clone(),
users_default: power_levels.users_default,
notifications: power_levels.notifications.clone(),
};
let ctx = PushConditionRoomCtx { let ctx = PushConditionRoomCtx {
room_id: room_id.to_owned(), room_id: room_id.to_owned(),
member_count: 10_u32.into(), // TODO: get member count efficiently member_count: 10_u32.into(), // TODO: get member count efficiently
@ -207,7 +201,9 @@ impl Service {
.users .users
.displayname(user)? .displayname(user)?
.unwrap_or_else(|| user.localpart().to_owned()), .unwrap_or_else(|| user.localpart().to_owned()),
power_levels: Some(power_levels), users_power_levels: power_levels.users.clone(),
default_power_level: power_levels.users_default,
notification_power_levels: power_levels.notifications.clone(),
}; };
Ok(ruleset.get_actions(pdu, &ctx)) Ok(ruleset.get_actions(pdu, &ctx))

View file

@ -11,7 +11,6 @@ pub trait Data: Send + Sync {
) -> Result<()>; ) -> Result<()>;
/// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`.
#[allow(clippy::type_complexity)]
fn readreceipts_since<'a>( fn readreceipts_since<'a>(
&'a self, &'a self,
room_id: &RoomId, room_id: &RoomId,

View file

@ -92,8 +92,6 @@ impl Service {
)); ));
} }
services().rooms.event_handler.acl_check(origin, room_id)?;
// 1. Skip the PDU if we already have it as a timeline event // 1. Skip the PDU if we already have it as a timeline event
if let Some(pdu_id) = services().rooms.timeline.get_pdu_id(event_id)? { if let Some(pdu_id) = services().rooms.timeline.get_pdu_id(event_id)? {
return Ok(Some(pdu_id.to_vec())); return Ok(Some(pdu_id.to_vec()));
@ -119,15 +117,7 @@ impl Service {
.ok_or_else(|| Error::bad_database("Failed to find first pdu in db."))?; .ok_or_else(|| Error::bad_database("Failed to find first pdu in db."))?;
let (incoming_pdu, val) = self let (incoming_pdu, val) = self
.handle_outlier_pdu( .handle_outlier_pdu(origin, &create_event, event_id, room_id, value, pub_key_map)
origin,
&create_event,
event_id,
room_id,
value,
false,
pub_key_map,
)
.await?; .await?;
self.check_room_id(room_id, &incoming_pdu)?; self.check_room_id(room_id, &incoming_pdu)?;
@ -184,22 +174,7 @@ impl Service {
} }
if errors >= 5 { if errors >= 5 {
// Timeout other events break;
match services()
.globals
.bad_event_ratelimiter
.write()
.unwrap()
.entry((*prev_id).to_owned())
{
hash_map::Entry::Vacant(e) => {
e.insert((Instant::now(), 1));
}
hash_map::Entry::Occupied(mut e) => {
*e.get_mut() = (Instant::now(), e.get().1 + 1)
}
}
continue;
} }
if let Some((pdu, json)) = eventid_info.remove(&*prev_id) { if let Some((pdu, json)) = eventid_info.remove(&*prev_id) {
@ -251,7 +226,7 @@ impl Service {
.write() .write()
.unwrap() .unwrap()
.remove(&room_id.to_owned()); .remove(&room_id.to_owned());
debug!( warn!(
"Handling prev event {} took {}m{}s", "Handling prev event {} took {}m{}s",
prev_id, prev_id,
elapsed.as_secs() / 60, elapsed.as_secs() / 60,
@ -291,7 +266,6 @@ impl Service {
r r
} }
#[allow(clippy::type_complexity, clippy::too_many_arguments)]
#[tracing::instrument(skip(self, create_event, value, pub_key_map))] #[tracing::instrument(skip(self, create_event, value, pub_key_map))]
fn handle_outlier_pdu<'a>( fn handle_outlier_pdu<'a>(
&'a self, &'a self,
@ -300,7 +274,6 @@ impl Service {
event_id: &'a EventId, event_id: &'a EventId,
room_id: &'a RoomId, room_id: &'a RoomId,
mut value: BTreeMap<String, CanonicalJsonValue>, mut value: BTreeMap<String, CanonicalJsonValue>,
auth_events_known: bool,
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>, pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
) -> AsyncRecursiveType<'a, Result<(Arc<PduEvent>, BTreeMap<String, CanonicalJsonValue>)>> { ) -> AsyncRecursiveType<'a, Result<(Arc<PduEvent>, BTreeMap<String, CanonicalJsonValue>)>> {
Box::pin(async move { Box::pin(async move {
@ -342,7 +315,7 @@ impl Service {
Ok(ruma::signatures::Verified::Signatures) => { Ok(ruma::signatures::Verified::Signatures) => {
// Redact // Redact
warn!("Calculated hash does not match: {}", event_id); warn!("Calculated hash does not match: {}", event_id);
let obj = match ruma::canonical_json::redact(value, room_version_id, None) { match ruma::canonical_json::redact(value, room_version_id, None) {
Ok(obj) => obj, Ok(obj) => obj,
Err(_) => { Err(_) => {
return Err(Error::BadRequest( return Err(Error::BadRequest(
@ -350,17 +323,7 @@ impl Service {
"Redaction failed", "Redaction failed",
)) ))
} }
};
// Skip the PDU if it is redacted and we already have it as an outlier event
if services().rooms.timeline.get_pdu_json(event_id)?.is_some() {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Event was redacted and we already knew about it",
));
} }
obj
} }
Ok(ruma::signatures::Verified::All) => value, Ok(ruma::signatures::Verified::All) => value,
}; };
@ -378,7 +341,6 @@ impl Service {
self.check_room_id(room_id, &incoming_pdu)?; self.check_room_id(room_id, &incoming_pdu)?;
if !auth_events_known {
// 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events
// 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events"
// NOTE: Step 5 is not applied anymore because it failed too often // NOTE: Step 5 is not applied anymore because it failed too often
@ -396,7 +358,6 @@ impl Service {
pub_key_map, pub_key_map,
) )
.await; .await;
}
// 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events // 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events
debug!( debug!(
@ -981,21 +942,14 @@ impl Service {
debug!("Resolving state"); debug!("Resolving state");
let fetch_event = |id: &_| { let lock = services().globals.stateres_mutex.lock();
let state = match state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| {
let res = services().rooms.timeline.get_pdu(id); let res = services().rooms.timeline.get_pdu(id);
if let Err(e) = &res { if let Err(e) = &res {
error!("LOOK AT ME Failed to fetch event: {}", e); error!("LOOK AT ME Failed to fetch event: {}", e);
} }
res.ok().flatten() res.ok().flatten()
}; }) {
let lock = services().globals.stateres_mutex.lock();
let state = match state_res::resolve(
room_version_id,
&fork_states,
auth_chain_sets,
fetch_event,
) {
Ok(new_state) => new_state, Ok(new_state) => new_state,
Err(_) => { Err(_) => {
return Err(Error::bad_database("State resolution failed, either an event could not be found or deserialization")); return Err(Error::bad_database("State resolution failed, either an event could not be found or deserialization"));
@ -1032,7 +986,6 @@ impl Service {
/// b. Look at outlier pdu tree /// b. Look at outlier pdu tree
/// c. Ask origin server over federation /// c. Ask origin server over federation
/// d. TODO: Ask other servers over federation? /// d. TODO: Ask other servers over federation?
#[allow(clippy::type_complexity)]
#[tracing::instrument(skip_all)] #[tracing::instrument(skip_all)]
pub(crate) fn fetch_and_handle_outliers<'a>( pub(crate) fn fetch_and_handle_outliers<'a>(
&'a self, &'a self,
@ -1060,6 +1013,26 @@ impl Service {
let mut pdus = vec![]; let mut pdus = vec![];
for id in events { for id in events {
if let Some((time, tries)) = services()
.globals
.bad_event_ratelimiter
.read()
.unwrap()
.get(&**id)
{
// Exponential backoff
let mut min_elapsed_duration =
Duration::from_secs(5 * 60) * (*tries) * (*tries);
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
}
if time.elapsed() < min_elapsed_duration {
info!("Backing off from {}", id);
continue;
}
}
// a. Look in the main timeline (pduid_pdu tree) // a. Look in the main timeline (pduid_pdu tree)
// b. Look at outlier pdu tree // b. Look at outlier pdu tree
// (get_pdu_json checks both) // (get_pdu_json checks both)
@ -1077,26 +1050,6 @@ impl Service {
let mut events_all = HashSet::new(); let mut events_all = HashSet::new();
let mut i = 0; let mut i = 0;
while let Some(next_id) = todo_auth_events.pop() { while let Some(next_id) = todo_auth_events.pop() {
if let Some((time, tries)) = services()
.globals
.bad_event_ratelimiter
.read()
.unwrap()
.get(&*next_id)
{
// Exponential backoff
let mut min_elapsed_duration =
Duration::from_secs(5 * 60) * (*tries) * (*tries);
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
}
if time.elapsed() < min_elapsed_duration {
info!("Backing off from {}", next_id);
continue;
}
}
if events_all.contains(&next_id) { if events_all.contains(&next_id) {
continue; continue;
} }
@ -1107,7 +1060,7 @@ impl Service {
} }
if let Ok(Some(_)) = services().rooms.timeline.get_pdu(&next_id) { if let Ok(Some(_)) = services().rooms.timeline.get_pdu(&next_id) {
trace!("Found {} in db", next_id); trace!("Found {} in db", id);
continue; continue;
} }
@ -1166,26 +1119,6 @@ impl Service {
} }
for (next_id, value) in events_in_reverse_order.iter().rev() { for (next_id, value) in events_in_reverse_order.iter().rev() {
if let Some((time, tries)) = services()
.globals
.bad_event_ratelimiter
.read()
.unwrap()
.get(&**next_id)
{
// Exponential backoff
let mut min_elapsed_duration =
Duration::from_secs(5 * 60) * (*tries) * (*tries);
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
}
if time.elapsed() < min_elapsed_duration {
info!("Backing off from {}", next_id);
continue;
}
}
match self match self
.handle_outlier_pdu( .handle_outlier_pdu(
origin, origin,
@ -1193,7 +1126,6 @@ impl Service {
next_id, next_id,
room_id, room_id,
value.clone(), value.clone(),
true,
pub_key_map, pub_key_map,
) )
.await .await
@ -1598,11 +1530,6 @@ impl Service {
} }
}; };
if acl_event_content.allow.is_empty() {
// Ignore broken acl events
return Ok(());
}
if acl_event_content.is_allowed(server_name) { if acl_event_content.is_allowed(server_name) {
Ok(()) Ok(())
} else { } else {

View file

@ -14,7 +14,6 @@ use super::timeline::PduCount;
pub struct Service { pub struct Service {
pub db: &'static dyn Data, pub db: &'static dyn Data,
#[allow(clippy::type_complexity)]
pub lazy_load_waiting: pub lazy_load_waiting:
Mutex<HashMap<(OwnedUserId, OwnedDeviceId, OwnedRoomId, PduCount), HashSet<OwnedUserId>>>, Mutex<HashMap<(OwnedUserId, OwnedDeviceId, OwnedRoomId, PduCount), HashSet<OwnedUserId>>>,
} }

View file

@ -5,7 +5,6 @@ use ruma::{EventId, RoomId, UserId};
pub trait Data: Send + Sync { pub trait Data: Send + Sync {
fn add_relation(&self, from: u64, to: u64) -> Result<()>; fn add_relation(&self, from: u64, to: u64) -> Result<()>;
#[allow(clippy::type_complexity)]
fn relations_until<'a>( fn relations_until<'a>(
&'a self, &'a self,
user_id: &'a UserId, user_id: &'a UserId,

View file

@ -40,7 +40,6 @@ impl Service {
} }
} }
#[allow(clippy::too_many_arguments)]
pub fn paginate_relations_with_filter( pub fn paginate_relations_with_filter(
&self, &self,
sender_user: &UserId, sender_user: &UserId,
@ -83,7 +82,7 @@ impl Service {
services() services()
.rooms .rooms
.state_accessor .state_accessor
.user_can_see_event(sender_user, room_id, &pdu.event_id) .user_can_see_event(sender_user, &room_id, &pdu.event_id)
.unwrap_or(false) .unwrap_or(false)
}) })
.take_while(|&(k, _)| Some(k) != to) // Stop at `to` .take_while(|&(k, _)| Some(k) != to) // Stop at `to`
@ -107,7 +106,7 @@ impl Service {
let events_before: Vec<_> = services() let events_before: Vec<_> = services()
.rooms .rooms
.pdu_metadata .pdu_metadata
.relations_until(sender_user, room_id, target, from)? .relations_until(sender_user, &room_id, target, from)?
.filter(|r| { .filter(|r| {
r.as_ref().map_or(true, |(_, pdu)| { r.as_ref().map_or(true, |(_, pdu)| {
filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t) filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t)
@ -130,7 +129,7 @@ impl Service {
services() services()
.rooms .rooms
.state_accessor .state_accessor
.user_can_see_event(sender_user, room_id, &pdu.event_id) .user_can_see_event(sender_user, &room_id, &pdu.event_id)
.unwrap_or(false) .unwrap_or(false)
}) })
.take_while(|&(k, _)| Some(k) != to) // Stop at `to` .take_while(|&(k, _)| Some(k) != to) // Stop at `to`

View file

@ -4,7 +4,6 @@ use ruma::RoomId;
pub trait Data: Send + Sync { pub trait Data: Send + Sync {
fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>; fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>;
#[allow(clippy::type_complexity)]
fn search_pdus<'a>( fn search_pdus<'a>(
&'a self, &'a self,
room_id: &RoomId, room_id: &RoomId,

View file

@ -134,7 +134,7 @@ impl Service {
if serde_json::from_str::<SpaceChildEventContent>(pdu.content.get()) if serde_json::from_str::<SpaceChildEventContent>(pdu.content.get())
.ok() .ok()
.map(|c| c.via) .and_then(|c| c.via)
.map_or(true, |v| v.is_empty()) .map_or(true, |v| v.is_empty())
{ {
continue; continue;
@ -185,9 +185,7 @@ impl Service {
stack.push(children_ids); stack.push(children_ids);
} }
} else { } else {
let server = current_room let server = current_room.server_name();
.server_name()
.expect("Room IDs should always have a server name");
if server == services().globals.server_name() { if server == services().globals.server_name() {
continue; continue;
} }
@ -195,11 +193,11 @@ impl Service {
// Early return so the client can see some data already // Early return so the client can see some data already
break; break;
} }
debug!("Asking {server} for /hierarchy"); warn!("Asking {server} for /hierarchy");
if let Ok(response) = services() if let Ok(response) = services()
.sending .sending
.send_federation_request( .send_federation_request(
server, &server,
federation::space::get_hierarchy::v1::Request { federation::space::get_hierarchy::v1::Request {
room_id: current_room.to_owned(), room_id: current_room.to_owned(),
suggested_only, suggested_only,
@ -237,7 +235,7 @@ impl Service {
.room .room
.allowed_room_ids .allowed_room_ids
.into_iter() .into_iter()
.map(AllowRule::room_membership) .map(|room| AllowRule::room_membership(room))
.collect(), .collect(),
}) })
} }
@ -247,7 +245,7 @@ impl Service {
.room .room
.allowed_room_ids .allowed_room_ids
.into_iter() .into_iter()
.map(AllowRule::room_membership) .map(|room| AllowRule::room_membership(room))
.collect(), .collect(),
}) })
} }
@ -315,7 +313,7 @@ impl Service {
canonical_alias: services() canonical_alias: services()
.rooms .rooms
.state_accessor .state_accessor
.room_state_get(room_id, &StateEventType::RoomCanonicalAlias, "")? .room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "")?
.map_or(Ok(None), |s| { .map_or(Ok(None), |s| {
serde_json::from_str(s.content.get()) serde_json::from_str(s.content.get())
.map(|c: RoomCanonicalAliasEventContent| c.alias) .map(|c: RoomCanonicalAliasEventContent| c.alias)
@ -323,11 +321,11 @@ impl Service {
Error::bad_database("Invalid canonical alias event in database.") Error::bad_database("Invalid canonical alias event in database.")
}) })
})?, })?,
name: services().rooms.state_accessor.get_name(room_id)?, name: services().rooms.state_accessor.get_name(&room_id)?,
num_joined_members: services() num_joined_members: services()
.rooms .rooms
.state_cache .state_cache
.room_joined_count(room_id)? .room_joined_count(&room_id)?
.unwrap_or_else(|| { .unwrap_or_else(|| {
warn!("Room {} has no member count", room_id); warn!("Room {} has no member count", room_id);
0 0
@ -338,7 +336,7 @@ impl Service {
topic: services() topic: services()
.rooms .rooms
.state_accessor .state_accessor
.room_state_get(room_id, &StateEventType::RoomTopic, "")? .room_state_get(&room_id, &StateEventType::RoomTopic, "")?
.map_or(Ok(None), |s| { .map_or(Ok(None), |s| {
serde_json::from_str(s.content.get()) serde_json::from_str(s.content.get())
.map(|c: RoomTopicEventContent| Some(c.topic)) .map(|c: RoomTopicEventContent| Some(c.topic))
@ -350,7 +348,7 @@ impl Service {
world_readable: services() world_readable: services()
.rooms .rooms
.state_accessor .state_accessor
.room_state_get(room_id, &StateEventType::RoomHistoryVisibility, "")? .room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")?
.map_or(Ok(false), |s| { .map_or(Ok(false), |s| {
serde_json::from_str(s.content.get()) serde_json::from_str(s.content.get())
.map(|c: RoomHistoryVisibilityEventContent| { .map(|c: RoomHistoryVisibilityEventContent| {
@ -365,7 +363,7 @@ impl Service {
guest_can_join: services() guest_can_join: services()
.rooms .rooms
.state_accessor .state_accessor
.room_state_get(room_id, &StateEventType::RoomGuestAccess, "")? .room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")?
.map_or(Ok(false), |s| { .map_or(Ok(false), |s| {
serde_json::from_str(s.content.get()) serde_json::from_str(s.content.get())
.map(|c: RoomGuestAccessEventContent| { .map(|c: RoomGuestAccessEventContent| {
@ -378,7 +376,7 @@ impl Service {
avatar_url: services() avatar_url: services()
.rooms .rooms
.state_accessor .state_accessor
.room_state_get(room_id, &StateEventType::RoomAvatar, "")? .room_state_get(&room_id, &StateEventType::RoomAvatar, "")?
.map(|s| { .map(|s| {
serde_json::from_str(s.content.get()) serde_json::from_str(s.content.get())
.map(|c: RoomAvatarEventContent| c.url) .map(|c: RoomAvatarEventContent| c.url)
@ -391,7 +389,7 @@ impl Service {
let join_rule = services() let join_rule = services()
.rooms .rooms
.state_accessor .state_accessor
.room_state_get(room_id, &StateEventType::RoomJoinRules, "")? .room_state_get(&room_id, &StateEventType::RoomJoinRules, "")?
.map(|s| { .map(|s| {
serde_json::from_str(s.content.get()) serde_json::from_str(s.content.get())
.map(|c: RoomJoinRulesEventContent| c.join_rule) .map(|c: RoomJoinRulesEventContent| c.join_rule)
@ -417,7 +415,7 @@ impl Service {
room_type: services() room_type: services()
.rooms .rooms
.state_accessor .state_accessor
.room_state_get(room_id, &StateEventType::RoomCreate, "")? .room_state_get(&room_id, &StateEventType::RoomCreate, "")?
.map(|s| { .map(|s| {
serde_json::from_str::<RoomCreateEventContent>(s.content.get()).map_err(|e| { serde_json::from_str::<RoomCreateEventContent>(s.content.get()).map_err(|e| {
error!("Invalid room create event in database: {}", e); error!("Invalid room create event in database: {}", e);
@ -457,7 +455,7 @@ impl Service {
SpaceRoomJoinRule::Invite => services() SpaceRoomJoinRule::Invite => services()
.rooms .rooms
.state_cache .state_cache
.is_joined(sender_user, room_id)?, .is_joined(sender_user, &room_id)?,
_ => false, _ => false,
}; };
@ -481,7 +479,8 @@ impl Service {
match join_rule { match join_rule {
JoinRule::Restricted(r) => { JoinRule::Restricted(r) => {
for rule in &r.allow { for rule in &r.allow {
if let join_rules::AllowRule::RoomMembership(rm) = rule { match rule {
join_rules::AllowRule::RoomMembership(rm) => {
if let Ok(true) = services() if let Ok(true) = services()
.rooms .rooms
.state_cache .state_cache
@ -490,6 +489,8 @@ impl Service {
return Ok(true); return Ok(true);
} }
} }
_ => {}
}
} }
Ok(false) Ok(false)

View file

@ -6,7 +6,6 @@ use std::{
pub use data::Data; pub use data::Data;
use ruma::{ use ruma::{
api::client::error::ErrorKind,
events::{ events::{
room::{create::RoomCreateEventContent, member::MembershipState}, room::{create::RoomCreateEventContent, member::MembershipState},
AnyStrippedStateEvent, StateEventType, TimelineEventType, AnyStrippedStateEvent, StateEventType, TimelineEventType,
@ -41,7 +40,7 @@ impl Service {
services() services()
.rooms .rooms
.state_compressor .state_compressor
.parse_compressed_state_event(new) .parse_compressed_state_event(&new)
.ok() .ok()
.map(|(_, id)| id) .map(|(_, id)| id)
}) { }) {
@ -332,7 +331,7 @@ impl Service {
"", "",
)?; )?;
let create_event_content: RoomCreateEventContent = create_event let create_event_content: Option<RoomCreateEventContent> = create_event
.as_ref() .as_ref()
.map(|create_event| { .map(|create_event| {
serde_json::from_str(create_event.content.get()).map_err(|e| { serde_json::from_str(create_event.content.get()).map_err(|e| {
@ -340,10 +339,14 @@ impl Service {
Error::bad_database("Invalid create event in db.") Error::bad_database("Invalid create event in db.")
}) })
}) })
.transpose()? .transpose()?;
.ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "No create event found"))?; let room_version = create_event_content
.map(|create_event| create_event.room_version)
Ok(create_event_content.room_version) .ok_or_else(|| {
warn!("Invalid room version for room {room_id}");
Error::BadDatabase("Invalid room version")
})?;
Ok(room_version)
} }
pub fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result<Option<u64>> { pub fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result<Option<u64>> {
@ -412,7 +415,7 @@ impl Service {
services() services()
.rooms .rooms
.state_compressor .state_compressor
.parse_compressed_state_event(compressed) .parse_compressed_state_event(&compressed)
.ok() .ok()
}) })
.filter_map(|(shortstatekey, event_id)| { .filter_map(|(shortstatekey, event_id)| {

View file

@ -16,7 +16,7 @@ use ruma::{
}, },
StateEventType, StateEventType,
}, },
EventId, JsOption, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, EventId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId,
}; };
use tracing::error; use tracing::error;
@ -180,7 +180,7 @@ impl Service {
return Ok(*visibility); return Ok(*visibility);
} }
let currently_member = services().rooms.state_cache.is_joined(user_id, room_id)?; let currently_member = services().rooms.state_cache.is_joined(&user_id, &room_id)?;
let history_visibility = self let history_visibility = self
.state_get(shortstatehash, &StateEventType::RoomHistoryVisibility, "")? .state_get(shortstatehash, &StateEventType::RoomHistoryVisibility, "")?
@ -197,11 +197,11 @@ impl Service {
HistoryVisibility::Shared => currently_member, HistoryVisibility::Shared => currently_member,
HistoryVisibility::Invited => { HistoryVisibility::Invited => {
// Allow if any member on requesting server was AT LEAST invited, else deny // Allow if any member on requesting server was AT LEAST invited, else deny
self.user_was_invited(shortstatehash, user_id) self.user_was_invited(shortstatehash, &user_id)
} }
HistoryVisibility::Joined => { HistoryVisibility::Joined => {
// Allow if any member on requested server was joined, else deny // Allow if any member on requested server was joined, else deny
self.user_was_joined(shortstatehash, user_id) self.user_was_joined(shortstatehash, &user_id)
} }
_ => { _ => {
error!("Unknown history visibility {history_visibility}"); error!("Unknown history visibility {history_visibility}");
@ -221,10 +221,10 @@ impl Service {
/// the room's history_visibility at that event's state. /// the room's history_visibility at that event's state.
#[tracing::instrument(skip(self, user_id, room_id))] #[tracing::instrument(skip(self, user_id, room_id))]
pub fn user_can_see_state_events(&self, user_id: &UserId, room_id: &RoomId) -> Result<bool> { pub fn user_can_see_state_events(&self, user_id: &UserId, room_id: &RoomId) -> Result<bool> {
let currently_member = services().rooms.state_cache.is_joined(user_id, room_id)?; let currently_member = services().rooms.state_cache.is_joined(&user_id, &room_id)?;
let history_visibility = self let history_visibility = self
.room_state_get(room_id, &StateEventType::RoomHistoryVisibility, "")? .room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")?
.map_or(Ok(HistoryVisibility::Shared), |s| { .map_or(Ok(HistoryVisibility::Shared), |s| {
serde_json::from_str(s.content.get()) serde_json::from_str(s.content.get())
.map(|c: RoomHistoryVisibilityEventContent| c.history_visibility) .map(|c: RoomHistoryVisibilityEventContent| c.history_visibility)
@ -276,26 +276,20 @@ impl Service {
services() services()
.rooms .rooms
.state_accessor .state_accessor
.room_state_get(room_id, &StateEventType::RoomName, "")? .room_state_get(&room_id, &StateEventType::RoomName, "")?
.map_or(Ok(None), |s| { .map_or(Ok(None), |s| {
serde_json::from_str(s.content.get()) serde_json::from_str(s.content.get())
.map(|c: RoomNameEventContent| Some(c.name)) .map(|c: RoomNameEventContent| c.name)
.map_err(|e| { .map_err(|_| Error::bad_database("Invalid room name event in database."))
error!(
"Invalid room name event in database for room {}. {}",
room_id, e
);
Error::bad_database("Invalid room name event in database.")
})
}) })
} }
pub fn get_avatar(&self, room_id: &RoomId) -> Result<JsOption<RoomAvatarEventContent>> { pub fn get_avatar(&self, room_id: &RoomId) -> Result<Option<RoomAvatarEventContent>> {
services() services()
.rooms .rooms
.state_accessor .state_accessor
.room_state_get(room_id, &StateEventType::RoomAvatar, "")? .room_state_get(&room_id, &StateEventType::RoomAvatar, "")?
.map_or(Ok(JsOption::Undefined), |s| { .map_or(Ok(None), |s| {
serde_json::from_str(s.content.get()) serde_json::from_str(s.content.get())
.map_err(|_| Error::bad_database("Invalid room avatar event in database.")) .map_err(|_| Error::bad_database("Invalid room avatar event in database."))
}) })
@ -309,7 +303,7 @@ impl Service {
services() services()
.rooms .rooms
.state_accessor .state_accessor
.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? .room_state_get(&room_id, &StateEventType::RoomMember, user_id.as_str())?
.map_or(Ok(None), |s| { .map_or(Ok(None), |s| {
serde_json::from_str(s.content.get()) serde_json::from_str(s.content.get())
.map_err(|_| Error::bad_database("Invalid room member event in database.")) .map_err(|_| Error::bad_database("Invalid room member event in database."))

View file

@ -78,7 +78,6 @@ pub trait Data: Send + Sync {
) -> Box<dyn Iterator<Item = Result<OwnedRoomId>> + 'a>; ) -> Box<dyn Iterator<Item = Result<OwnedRoomId>> + 'a>;
/// Returns an iterator over all rooms a user was invited to. /// Returns an iterator over all rooms a user was invited to.
#[allow(clippy::type_complexity)]
fn rooms_invited<'a>( fn rooms_invited<'a>(
&'a self, &'a self,
user_id: &UserId, user_id: &UserId,
@ -97,7 +96,6 @@ pub trait Data: Send + Sync {
) -> Result<Option<Vec<Raw<AnyStrippedStateEvent>>>>; ) -> Result<Option<Vec<Raw<AnyStrippedStateEvent>>>>;
/// Returns an iterator over all rooms a user left. /// Returns an iterator over all rooms a user left.
#[allow(clippy::type_complexity)]
fn rooms_left<'a>( fn rooms_left<'a>(
&'a self, &'a self,
user_id: &UserId, user_id: &UserId,

View file

@ -16,7 +16,6 @@ use self::data::StateDiff;
pub struct Service { pub struct Service {
pub db: &'static dyn Data, pub db: &'static dyn Data,
#[allow(clippy::type_complexity)]
pub stateinfo_cache: Mutex< pub stateinfo_cache: Mutex<
LruCache< LruCache<
u64, u64,
@ -34,7 +33,6 @@ pub type CompressedStateEvent = [u8; 2 * size_of::<u64>()];
impl Service { impl Service {
/// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer.
#[allow(clippy::type_complexity)]
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn load_shortstatehash_info( pub fn load_shortstatehash_info(
&self, &self,
@ -133,7 +131,6 @@ impl Service {
/// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid
/// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer
/// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer
#[allow(clippy::type_complexity)]
#[tracing::instrument(skip( #[tracing::instrument(skip(
self, self,
statediffnew, statediffnew,
@ -167,7 +164,7 @@ impl Service {
for removed in statediffremoved.iter() { for removed in statediffremoved.iter() {
if !parent_new.remove(removed) { if !parent_new.remove(removed) {
// It was not added in the parent and we removed it // It was not added in the parent and we removed it
parent_removed.insert(*removed); parent_removed.insert(removed.clone());
} }
// Else it was added in the parent and we removed it again. We can forget this change // Else it was added in the parent and we removed it again. We can forget this change
} }
@ -175,7 +172,7 @@ impl Service {
for new in statediffnew.iter() { for new in statediffnew.iter() {
if !parent_removed.remove(new) { if !parent_removed.remove(new) {
// It was not touched in the parent and we added it // It was not touched in the parent and we added it
parent_new.insert(*new); parent_new.insert(new.clone());
} }
// Else it was removed in the parent and we added it again. We can forget this change // Else it was removed in the parent and we added it again. We can forget this change
} }
@ -220,7 +217,7 @@ impl Service {
for removed in statediffremoved.iter() { for removed in statediffremoved.iter() {
if !parent_new.remove(removed) { if !parent_new.remove(removed) {
// It was not added in the parent and we removed it // It was not added in the parent and we removed it
parent_removed.insert(*removed); parent_removed.insert(removed.clone());
} }
// Else it was added in the parent and we removed it again. We can forget this change // Else it was added in the parent and we removed it again. We can forget this change
} }
@ -228,7 +225,7 @@ impl Service {
for new in statediffnew.iter() { for new in statediffnew.iter() {
if !parent_removed.remove(new) { if !parent_removed.remove(new) {
// It was not touched in the parent and we added it // It was not touched in the parent and we added it
parent_new.insert(*new); parent_new.insert(new.clone());
} }
// Else it was removed in the parent and we added it again. We can forget this change // Else it was removed in the parent and we added it again. We can forget this change
} }
@ -256,7 +253,6 @@ impl Service {
} }
/// Returns the new shortstatehash, and the state diff from the previous room state /// Returns the new shortstatehash, and the state diff from the previous room state
#[allow(clippy::type_complexity)]
pub fn save_state( pub fn save_state(
&self, &self,
room_id: &RoomId, room_id: &RoomId,

View file

@ -2,7 +2,6 @@ use crate::{PduEvent, Result};
use ruma::{api::client::threads::get_threads::v1::IncludeThreads, OwnedUserId, RoomId, UserId}; use ruma::{api::client::threads::get_threads::v1::IncludeThreads, OwnedUserId, RoomId, UserId};
pub trait Data: Send + Sync { pub trait Data: Send + Sync {
#[allow(clippy::type_complexity)]
fn threads_until<'a>( fn threads_until<'a>(
&'a self, &'a self,
user_id: &'a UserId, user_id: &'a UserId,

View file

@ -26,7 +26,7 @@ impl Service {
self.db.threads_until(user_id, room_id, until, include) self.db.threads_until(user_id, room_id, until, include)
} }
pub fn add_to_thread(&self, root_event_id: &EventId, pdu: &PduEvent) -> Result<()> { pub fn add_to_thread<'a>(&'a self, root_event_id: &EventId, pdu: &PduEvent) -> Result<()> {
let root_id = &services() let root_id = &services()
.rooms .rooms
.timeline .timeline
@ -103,7 +103,7 @@ impl Service {
} }
let mut users = Vec::new(); let mut users = Vec::new();
if let Some(userids) = self.db.get_participants(root_id)? { if let Some(userids) = self.db.get_participants(&root_id)? {
users.extend_from_slice(&userids); users.extend_from_slice(&userids);
users.push(pdu.sender.clone()); users.push(pdu.sender.clone());
} else { } else {

View file

@ -66,7 +66,6 @@ pub trait Data: Send + Sync {
/// Returns an iterator over all events and their tokens in a room that happened before the /// Returns an iterator over all events and their tokens in a room that happened before the
/// event with id `until` in reverse-chronological order. /// event with id `until` in reverse-chronological order.
#[allow(clippy::type_complexity)]
fn pdus_until<'a>( fn pdus_until<'a>(
&'a self, &'a self,
user_id: &UserId, user_id: &UserId,
@ -76,7 +75,6 @@ pub trait Data: Send + Sync {
/// Returns an iterator over all events in a room that happened after the event with id `from` /// Returns an iterator over all events in a room that happened after the event with id `from`
/// in chronological order. /// in chronological order.
#[allow(clippy::type_complexity)]
fn pdus_after<'a>( fn pdus_after<'a>(
&'a self, &'a self,
user_id: &UserId, user_id: &UserId,

View file

@ -58,8 +58,8 @@ impl PduCount {
} }
pub fn try_from_string(token: &str) -> Result<Self> { pub fn try_from_string(token: &str) -> Result<Self> {
if let Some(stripped) = token.strip_prefix('-') { if token.starts_with('-') {
stripped.parse().map(PduCount::Backfilled) token[1..].parse().map(PduCount::Backfilled)
} else { } else {
token.parse().map(PduCount::Normal) token.parse().map(PduCount::Normal)
} }
@ -90,6 +90,18 @@ impl Ord for PduCount {
} }
} }
} }
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn comparisons() {
assert!(PduCount::Normal(1) < PduCount::Normal(2));
assert!(PduCount::Backfilled(2) < PduCount::Backfilled(1));
assert!(PduCount::Normal(1) > PduCount::Backfilled(1));
assert!(PduCount::Backfilled(1) < PduCount::Normal(1));
}
}
pub struct Service { pub struct Service {
pub db: &'static dyn Data, pub db: &'static dyn Data,
@ -100,7 +112,7 @@ pub struct Service {
impl Service { impl Service {
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result<Option<Arc<PduEvent>>> { pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result<Option<Arc<PduEvent>>> {
self.all_pdus(user_id!("@doesntmatter:conduit.rs"), room_id)? self.all_pdus(&user_id!("@doesntmatter:conduit.rs"), &room_id)?
.next() .next()
.map(|o| o.map(|(_, p)| Arc::new(p))) .map(|o| o.map(|(_, p)| Arc::new(p)))
.transpose() .transpose()
@ -308,25 +320,12 @@ impl Service {
let mut notifies = Vec::new(); let mut notifies = Vec::new();
let mut highlights = Vec::new(); let mut highlights = Vec::new();
let mut push_target = services() for user in services()
.rooms .rooms
.state_cache .state_cache
.get_our_real_users(&pdu.room_id)?; .get_our_real_users(&pdu.room_id)?
.iter()
if pdu.kind == TimelineEventType::RoomMember { {
if let Some(state_key) = &pdu.state_key {
let target_user_id = UserId::parse(state_key.clone())
.expect("This state_key was previously validated");
if !push_target.contains(&target_user_id) {
let mut target = push_target.as_ref().clone();
target.insert(target_user_id);
push_target = Arc::new(target);
}
}
}
for user in push_target.iter() {
// Don't notify the user of their own events // Don't notify the user of their own events
if user == &pdu.sender { if user == &pdu.sender {
continue; continue;
@ -459,7 +458,7 @@ impl Service {
let to_conduit = body.starts_with(&format!("{server_user}: ")) let to_conduit = body.starts_with(&format!("{server_user}: "))
|| body.starts_with(&format!("{server_user} ")) || body.starts_with(&format!("{server_user} "))
|| body == format!("{server_user}:") || body == format!("{server_user}:")
|| body == server_user; || body == format!("{server_user}");
// This will evaluate to false if the emergency password is set up so that // This will evaluate to false if the emergency password is set up so that
// the administrator can execute commands as conduit // the administrator can execute commands as conduit
@ -843,7 +842,7 @@ impl Service {
let target = pdu let target = pdu
.state_key() .state_key()
.filter(|v| v.starts_with('@')) .filter(|v| v.starts_with("@"))
.unwrap_or(sender.as_str()); .unwrap_or(sender.as_str());
let server_name = services().globals.server_name(); let server_name = services().globals.server_name();
let server_user = format!("@conduit:{}", server_name); let server_user = format!("@conduit:{}", server_name);
@ -851,7 +850,7 @@ impl Service {
.map_err(|_| Error::bad_database("Invalid content in pdu."))?; .map_err(|_| Error::bad_database("Invalid content in pdu."))?;
if content.membership == MembershipState::Leave { if content.membership == MembershipState::Leave {
if target == server_user { if target == &server_user {
warn!("Conduit user cannot leave from admins room"); warn!("Conduit user cannot leave from admins room");
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::Forbidden, ErrorKind::Forbidden,
@ -877,7 +876,7 @@ impl Service {
} }
if content.membership == MembershipState::Ban && pdu.state_key().is_some() { if content.membership == MembershipState::Ban && pdu.state_key().is_some() {
if target == server_user { if target == &server_user {
warn!("Conduit user cannot be banned in admins room"); warn!("Conduit user cannot be banned in admins room");
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::Forbidden, ErrorKind::Forbidden,
@ -1049,7 +1048,7 @@ impl Service {
#[tracing::instrument(skip(self, room_id))] #[tracing::instrument(skip(self, room_id))]
pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Result<()> { pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Result<()> {
let first_pdu = self let first_pdu = self
.all_pdus(user_id!("@doesntmatter:conduit.rs"), room_id)? .all_pdus(&user_id!("@doesntmatter:conduit.rs"), &room_id)?
.next() .next()
.expect("Room is not empty")?; .expect("Room is not empty")?;
@ -1061,7 +1060,7 @@ impl Service {
let power_levels: RoomPowerLevelsEventContent = services() let power_levels: RoomPowerLevelsEventContent = services()
.rooms .rooms
.state_accessor .state_accessor
.room_state_get(room_id, &StateEventType::RoomPowerLevels, "")? .room_state_get(&room_id, &StateEventType::RoomPowerLevels, "")?
.map(|ev| { .map(|ev| {
serde_json::from_str(ev.content.get()) serde_json::from_str(ev.content.get())
.map_err(|_| Error::bad_database("invalid m.room.power_levels event")) .map_err(|_| Error::bad_database("invalid m.room.power_levels event"))
@ -1092,9 +1091,11 @@ impl Service {
.await; .await;
match response { match response {
Ok(response) => { Ok(response) => {
let pub_key_map = RwLock::new(BTreeMap::new()); let mut pub_key_map = RwLock::new(BTreeMap::new());
for pdu in response.pdus { for pdu in response.pdus {
if let Err(e) = self.backfill_pdu(backfill_server, pdu, &pub_key_map).await if let Err(e) = self
.backfill_pdu(backfill_server, pdu, &mut pub_key_map)
.await
{ {
warn!("Failed to add backfilled pdu: {e}"); warn!("Failed to add backfilled pdu: {e}");
} }
@ -1141,7 +1142,7 @@ impl Service {
services() services()
.rooms .rooms
.event_handler .event_handler
.handle_incoming_pdu(origin, &event_id, &room_id, value, false, pub_key_map) .handle_incoming_pdu(origin, &event_id, &room_id, value, false, &pub_key_map)
.await?; .await?;
let value = self.get_pdu_json(&event_id)?.expect("We just created it"); let value = self.get_pdu_json(&event_id)?.expect("We just created it");
@ -1174,7 +1175,8 @@ impl Service {
drop(insert_lock); drop(insert_lock);
if pdu.kind == TimelineEventType::RoomMessage { match pdu.kind {
TimelineEventType::RoomMessage => {
#[derive(Deserialize)] #[derive(Deserialize)]
struct ExtractBody { struct ExtractBody {
body: Option<String>, body: Option<String>,
@ -1190,22 +1192,11 @@ impl Service {
.index_pdu(shortroomid, &pdu_id, &body)?; .index_pdu(shortroomid, &pdu_id, &body)?;
} }
} }
_ => {}
}
drop(mutex_lock); drop(mutex_lock);
info!("Prepended backfill pdu"); info!("Prepended backfill pdu");
Ok(()) Ok(())
} }
} }
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn comparisons() {
assert!(PduCount::Normal(1) < PduCount::Normal(2));
assert!(PduCount::Backfilled(2) < PduCount::Backfilled(1));
assert!(PduCount::Normal(1) > PduCount::Backfilled(1));
assert!(PduCount::Backfilled(1) < PduCount::Normal(1));
}
}

View file

@ -5,7 +5,6 @@ use crate::Result;
use super::{OutgoingKind, SendingEventType}; use super::{OutgoingKind, SendingEventType};
pub trait Data: Send + Sync { pub trait Data: Send + Sync {
#[allow(clippy::type_complexity)]
fn active_requests<'a>( fn active_requests<'a>(
&'a self, &'a self,
) -> Box<dyn Iterator<Item = Result<(Vec<u8>, OutgoingKind, SendingEventType)>> + 'a>; ) -> Box<dyn Iterator<Item = Result<(Vec<u8>, OutgoingKind, SendingEventType)>> + 'a>;

View file

@ -131,7 +131,7 @@ impl Service {
for (key, outgoing_kind, event) in self.db.active_requests().filter_map(|r| r.ok()) { for (key, outgoing_kind, event) in self.db.active_requests().filter_map(|r| r.ok()) {
let entry = initial_transactions let entry = initial_transactions
.entry(outgoing_kind.clone()) .entry(outgoing_kind.clone())
.or_default(); .or_insert_with(Vec::new);
if entry.len() > 30 { if entry.len() > 30 {
warn!( warn!(

View file

@ -1,6 +1,6 @@
mod data; mod data;
use std::{ use std::{
collections::{BTreeMap, BTreeSet}, collections::BTreeMap,
mem, mem,
sync::{Arc, Mutex}, sync::{Arc, Mutex},
}; };
@ -28,13 +28,12 @@ use crate::{services, Error, Result};
pub struct SlidingSyncCache { pub struct SlidingSyncCache {
lists: BTreeMap<String, SyncRequestList>, lists: BTreeMap<String, SyncRequestList>,
subscriptions: BTreeMap<OwnedRoomId, sync_events::v4::RoomSubscription>, subscriptions: BTreeMap<OwnedRoomId, sync_events::v4::RoomSubscription>,
known_rooms: BTreeMap<String, BTreeMap<OwnedRoomId, u64>>, // For every room, the roomsince number known_rooms: BTreeMap<String, BTreeMap<OwnedRoomId, bool>>,
extensions: ExtensionsConfig, extensions: ExtensionsConfig,
} }
pub struct Service { pub struct Service {
pub db: &'static dyn Data, pub db: &'static dyn Data,
#[allow(clippy::type_complexity)]
pub connections: pub connections:
Mutex<BTreeMap<(OwnedUserId, OwnedDeviceId, String), Arc<Mutex<SlidingSyncCache>>>>, Mutex<BTreeMap<(OwnedUserId, OwnedDeviceId, String), Arc<Mutex<SlidingSyncCache>>>>,
} }
@ -62,7 +61,7 @@ impl Service {
user_id: OwnedUserId, user_id: OwnedUserId,
device_id: OwnedDeviceId, device_id: OwnedDeviceId,
request: &mut sync_events::v4::Request, request: &mut sync_events::v4::Request,
) -> BTreeMap<String, BTreeMap<OwnedRoomId, u64>> { ) -> BTreeMap<String, BTreeMap<OwnedRoomId, bool>> {
let Some(conn_id) = request.conn_id.clone() else { let Some(conn_id) = request.conn_id.clone() else {
return BTreeMap::new(); return BTreeMap::new();
}; };
@ -128,7 +127,6 @@ impl Service {
} }
} }
(_, Some(cached_filters)) => list.filters = Some(cached_filters), (_, Some(cached_filters)) => list.filters = Some(cached_filters),
(Some(list_filters), _) => list.filters = Some(list_filters.clone()),
(_, _) => {} (_, _) => {}
} }
if list.bump_event_types.is_empty() { if list.bump_event_types.is_empty() {
@ -138,18 +136,12 @@ impl Service {
cached.lists.insert(list_id.clone(), list.clone()); cached.lists.insert(list_id.clone(), list.clone());
} }
cached.subscriptions.extend(
request
.room_subscriptions
.iter()
.map(|(k, v)| (k.clone(), v.clone())),
);
request.room_subscriptions.extend(
cached cached
.subscriptions .subscriptions
.iter() .extend(request.room_subscriptions.clone().into_iter());
.map(|(k, v)| (k.clone(), v.clone())), request
); .room_subscriptions
.extend(cached.subscriptions.clone().into_iter());
request.extensions.e2ee.enabled = request request.extensions.e2ee.enabled = request
.extensions .extensions
@ -218,8 +210,7 @@ impl Service {
device_id: OwnedDeviceId, device_id: OwnedDeviceId,
conn_id: String, conn_id: String,
list_id: String, list_id: String,
new_cached_rooms: BTreeSet<OwnedRoomId>, new_cached_rooms: BTreeMap<OwnedRoomId, bool>,
globalsince: u64,
) { ) {
let mut cache = self.connections.lock().unwrap(); let mut cache = self.connections.lock().unwrap();
let cached = Arc::clone( let cached = Arc::clone(
@ -237,20 +228,7 @@ impl Service {
let cached = &mut cached.lock().unwrap(); let cached = &mut cached.lock().unwrap();
drop(cache); drop(cache);
for (roomid, lastsince) in cached cached.known_rooms.insert(list_id, new_cached_rooms);
.known_rooms
.entry(list_id.clone())
.or_default()
.iter_mut()
{
if !new_cached_rooms.contains(roomid) {
*lastsince = 0;
}
}
let list = cached.known_rooms.entry(list_id).or_default();
for roomid in new_cached_rooms {
list.insert(roomid, globalsince);
}
} }
/// Check if account is deactivated /// Check if account is deactivated

View file

@ -116,11 +116,9 @@ impl Error {
Self::BadRequest(kind, _) => ( Self::BadRequest(kind, _) => (
kind.clone(), kind.clone(),
match kind { match kind {
WrongRoomKeysVersion { .. } Forbidden | GuestAccessForbidden | ThreepidAuthFailed | ThreepidDenied => {
| Forbidden StatusCode::FORBIDDEN
| GuestAccessForbidden }
| ThreepidAuthFailed
| ThreepidDenied => StatusCode::FORBIDDEN,
Unauthorized | UnknownToken { .. } | MissingToken => StatusCode::UNAUTHORIZED, Unauthorized | UnknownToken { .. } | MissingToken => StatusCode::UNAUTHORIZED,
NotFound | Unrecognized => StatusCode::NOT_FOUND, NotFound | Unrecognized => StatusCode::NOT_FOUND,
LimitExceeded { .. } => StatusCode::TOO_MANY_REQUESTS, LimitExceeded { .. } => StatusCode::TOO_MANY_REQUESTS,