Compare commits

..

No commits in common. "next" and "master" have entirely different histories.
next ... master

68 changed files with 1842 additions and 2335 deletions

4
.envrc
View file

@ -1,5 +1 @@
#!/usr/bin/env bash
use flake
PATH_add bin

3
.gitignore vendored
View file

@ -68,6 +68,3 @@ cached_target
# Direnv cache
/.direnv
# Gitlab CI cache
/.gitlab-ci.d

View file

@ -1,180 +1,244 @@
stages:
- ci
- artifacts
- publish
- build
- build docker image
- test
- upload artifacts
variables:
# Makes some things print in color
TERM: ansi
# Make GitLab CI go fast:
GIT_SUBMODULE_STRATEGY: recursive
FF_USE_FASTZIP: 1
CACHE_COMPRESSION_LEVEL: fastest
before_script:
# Enable nix-command and flakes
- if command -v nix > /dev/null; then echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi
# --------------------------------------------------------------------- #
# Create and publish docker image #
# --------------------------------------------------------------------- #
# Add our own binary cache
- if command -v nix > /dev/null; then echo "extra-substituters = https://nix.computer.surgery/conduit" >> /etc/nix/nix.conf; fi
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = conduit:ZGAf6P6LhNvnoJJ3Me3PRg7tlLSrPxcQ2RiE5LIppjo=" >> /etc/nix/nix.conf; fi
# Add crane binary cache
- if command -v nix > /dev/null; then echo "extra-substituters = https://crane.cachix.org" >> /etc/nix/nix.conf; fi
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=" >> /etc/nix/nix.conf; fi
# Add nix-community binary cache
- if command -v nix > /dev/null; then echo "extra-substituters = https://nix-community.cachix.org" >> /etc/nix/nix.conf; fi
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=" >> /etc/nix/nix.conf; fi
# Install direnv and nix-direnv
- if command -v nix > /dev/null; then nix-env -iA nixpkgs.direnv nixpkgs.nix-direnv; fi
# Allow .envrc
- if command -v nix > /dev/null; then direnv allow; fi
# Set CARGO_HOME to a cacheable path
- export CARGO_HOME="$(git rev-parse --show-toplevel)/.gitlab-ci.d/cargo"
ci:
stage: ci
image: nixos/nix:2.19.2
script:
- direnv exec . engage
cache:
key: nix
paths:
- target
- .gitlab-ci.d
static:x86_64-unknown-linux-musl:
stage: artifacts
image: nixos/nix:2.19.2
script:
# Push artifacts and build requirements to binary cache
- ./bin/nix-build-and-cache .#static-x86_64-unknown-linux-musl
# Make the output less difficult to find
- cp result/bin/conduit conduit
artifacts:
paths:
- conduit
static:aarch64-unknown-linux-musl:
stage: artifacts
image: nixos/nix:2.19.2
script:
# Push artifacts and build requirements to binary cache
- ./bin/nix-build-and-cache .#static-aarch64-unknown-linux-musl
# Make the output less difficult to find
- cp result/bin/conduit conduit
artifacts:
paths:
- conduit
# Note that although we have an `oci-image-x86_64-unknown-linux-musl` output,
# we don't build it because it would be largely redundant to this one since it's
# all containerized anyway.
oci-image:x86_64-unknown-linux-gnu:
stage: artifacts
image: nixos/nix:2.19.2
script:
# Push artifacts and build requirements to binary cache
#
# Since the OCI image package is based on the binary package, this has the
# fun side effect of uploading the normal binary too. Conduit users who are
# deploying with Nix can leverage this fact by adding our binary cache to
# their systems.
- ./bin/nix-build-and-cache .#oci-image
# Make the output less difficult to find
- cp result oci-image-amd64.tar.gz
artifacts:
paths:
- oci-image-amd64.tar.gz
oci-image:aarch64-unknown-linux-musl:
stage: artifacts
needs:
# Wait for the static binary job to finish before starting so we don't have
# to build that twice for no reason
- static:aarch64-unknown-linux-musl
image: nixos/nix:2.19.2
script:
# Push artifacts and build requirements to binary cache
- ./bin/nix-build-and-cache .#oci-image-aarch64-unknown-linux-musl
# Make the output less difficult to find
- cp result oci-image-arm64v8.tar.gz
artifacts:
paths:
- oci-image-arm64v8.tar.gz
debian:x86_64-unknown-linux-gnu:
stage: artifacts
# See also `rust-toolchain.toml`
image: rust:1.75.0
script:
- apt-get update && apt-get install -y --no-install-recommends libclang-dev
- cargo install cargo-deb
- cargo deb
# Make the output less difficult to find
- mv target/debian/*.deb conduit.deb
artifacts:
paths:
- conduit.deb
cache:
key: debian
paths:
- target
- .gitlab-ci.d
.push-oci-image:
stage: publish
image: docker:25.0.0
.docker-shared-settings:
stage: "build docker image"
needs: []
tags: [ "docker" ]
variables:
# Docker in Docker:
DOCKER_BUILDKIT: 1
image:
name: docker.io/docker
services:
- docker:25.0.0-dind
variables:
IMAGE_SUFFIX_AMD64: amd64
IMAGE_SUFFIX_ARM64V8: arm64v8
- name: docker.io/docker:dind
alias: docker
script:
- docker load -i oci-image-amd64.tar.gz
- IMAGE_ID_AMD64=$(docker images -q conduit:next)
- docker load -i oci-image-arm64v8.tar.gz
- IMAGE_ID_ARM64V8=$(docker images -q conduit:next)
# Tag and push the architecture specific images
- docker tag $IMAGE_ID_AMD64 $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64
- docker tag $IMAGE_ID_ARM64V8 $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
- docker push $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64
- docker push $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
# Tag the multi-arch image
- docker manifest create $IMAGE_NAME:$CI_COMMIT_SHA --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
- docker manifest push $IMAGE_NAME:$CI_COMMIT_SHA
# Tag and push the git ref
- docker manifest create $IMAGE_NAME:$CI_COMMIT_REF_NAME --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
- docker manifest push $IMAGE_NAME:$CI_COMMIT_REF_NAME
# Tag git tags as 'latest'
- |
if [[ -n "$CI_COMMIT_TAG" ]]; then
docker manifest create $IMAGE_NAME:latest --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
docker manifest push $IMAGE_NAME:latest
fi
dependencies:
- oci-image:x86_64-unknown-linux-gnu
- oci-image:aarch64-unknown-linux-musl
only:
- next
- master
- tags
- apk add openssh-client
- eval $(ssh-agent -s)
- mkdir -p ~/.ssh && chmod 700 ~/.ssh
- printf "Host *\n\tStrictHostKeyChecking no\n\n" >> ~/.ssh/config
- sh .gitlab/setup-buildx-remote-builders.sh
# Authorize against this project's own image registry:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
# Build multiplatform image and push to temporary tag:
- >
docker buildx build
--platform "linux/arm/v7,linux/arm64,linux/amd64"
--pull
--tag "$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID"
--push
--provenance=false
--file "Dockerfile" .
# Build multiplatform image to deb stage and extract their .deb files:
- >
docker buildx build
--platform "linux/arm/v7,linux/arm64,linux/amd64"
--target "packager-result"
--output="type=local,dest=/tmp/build-output"
--provenance=false
--file "Dockerfile" .
# Build multiplatform image to binary stage and extract their binaries:
- >
docker buildx build
--platform "linux/arm/v7,linux/arm64,linux/amd64"
--target "builder-result"
--output="type=local,dest=/tmp/build-output"
--provenance=false
--file "Dockerfile" .
# Copy to GitLab container registry:
- >
docker buildx imagetools create
--tag "$CI_REGISTRY_IMAGE/$TAG"
--tag "$CI_REGISTRY_IMAGE/$TAG-bullseye"
--tag "$CI_REGISTRY_IMAGE/$TAG-commit-$CI_COMMIT_SHORT_SHA"
"$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID"
# if DockerHub credentials exist, also copy to dockerhub:
- if [ -n "${DOCKER_HUB}" ]; then docker login -u "$DOCKER_HUB_USER" -p "$DOCKER_HUB_PASSWORD" "$DOCKER_HUB"; fi
- >
if [ -n "${DOCKER_HUB}" ]; then
docker buildx imagetools create
--tag "$DOCKER_HUB_IMAGE/$TAG"
--tag "$DOCKER_HUB_IMAGE/$TAG-bullseye"
--tag "$DOCKER_HUB_IMAGE/$TAG-commit-$CI_COMMIT_SHORT_SHA"
"$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID"
; fi
- mv /tmp/build-output ./
artifacts:
paths:
- "./build-output/"
oci-image:push-gitlab:
extends: .push-oci-image
docker:next:
extends: .docker-shared-settings
rules:
- if: '$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_BRANCH == "next"'
variables:
IMAGE_NAME: $CI_REGISTRY_IMAGE/matrix-conduit
before_script:
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
TAG: "matrix-conduit:next"
oci-image:push-dockerhub:
extends: .push-oci-image
docker:master:
extends: .docker-shared-settings
rules:
- if: '$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_BRANCH == "master"'
variables:
IMAGE_NAME: matrixconduit/matrix-conduit
TAG: "matrix-conduit:latest"
docker:tags:
extends: .docker-shared-settings
rules:
- if: "$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_TAG"
variables:
TAG: "matrix-conduit:$CI_COMMIT_TAG"
docker build debugging:
extends: .docker-shared-settings
rules:
- if: "$CI_MERGE_REQUEST_TITLE =~ /.*[Dd]ocker.*/"
variables:
TAG: "matrix-conduit-docker-tests:latest"
# --------------------------------------------------------------------- #
# Run tests #
# --------------------------------------------------------------------- #
cargo check:
stage: test
image: docker.io/rust:1.70.0-bullseye
needs: []
interruptible: true
before_script:
- docker login -u $DOCKER_HUB_USER -p $DOCKER_HUB_PASSWORD
- "rustup show && rustc --version && cargo --version" # Print version info for debugging
- apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb
script:
- cargo check
.test-shared-settings:
stage: "test"
needs: []
image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:latest"
tags: ["docker"]
variables:
CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow
interruptible: true
test:cargo:
extends: .test-shared-settings
before_script:
- apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb
script:
- rustc --version && cargo --version # Print version info for debugging
- "cargo test --color always --workspace --verbose --locked --no-fail-fast"
test:clippy:
extends: .test-shared-settings
allow_failure: true
before_script:
- rustup component add clippy
- apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb
script:
- rustc --version && cargo --version # Print version info for debugging
- "cargo clippy --color always --verbose --message-format=json | gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json"
artifacts:
when: always
reports:
codequality: gl-code-quality-report.json
test:format:
extends: .test-shared-settings
before_script:
- rustup component add rustfmt
script:
- cargo fmt --all -- --check
test:audit:
extends: .test-shared-settings
allow_failure: true
script:
- cargo audit --color always || true
- cargo audit --stale --json | gitlab-report -p audit > gl-sast-report.json
artifacts:
when: always
reports:
sast: gl-sast-report.json
test:dockerlint:
stage: "test"
needs: []
image: "ghcr.io/hadolint/hadolint@sha256:6c4b7c23f96339489dd35f21a711996d7ce63047467a9a562287748a03ad5242" # 2.8.0-alpine
interruptible: true
script:
- hadolint --version
# First pass: Print for CI log:
- >
hadolint
--no-fail --verbose
./Dockerfile
# Then output the results into a json for GitLab to pretty-print this in the MR:
- >
hadolint
--format gitlab_codeclimate
--failure-threshold error
./Dockerfile > dockerlint.json
artifacts:
when: always
reports:
codequality: dockerlint.json
paths:
- dockerlint.json
rules:
- if: '$CI_COMMIT_REF_NAME != "master"'
changes:
- docker/*Dockerfile
- Dockerfile
- .gitlab-ci.yml
- if: '$CI_COMMIT_REF_NAME == "master"'
- if: '$CI_COMMIT_REF_NAME == "next"'
# --------------------------------------------------------------------- #
# Store binaries as package so they have download urls #
# --------------------------------------------------------------------- #
# DISABLED FOR NOW, NEEDS TO BE FIXED AT A LATER TIME:
#publish:package:
# stage: "upload artifacts"
# needs:
# - "docker:tags"
# rules:
# - if: "$CI_COMMIT_TAG"
# image: curlimages/curl:latest
# tags: ["docker"]
# variables:
# GIT_STRATEGY: "none" # Don't need a clean copy of the code, we just operate on artifacts
# script:
# - 'BASE_URL="${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}"'
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_amd64/conduit "${BASE_URL}/conduit-x86_64-unknown-linux-gnu"'
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm_v7/conduit "${BASE_URL}/conduit-armv7-unknown-linux-gnu"'
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm64/conduit "${BASE_URL}/conduit-aarch64-unknown-linux-gnu"'
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_amd64/conduit.deb "${BASE_URL}/conduit-x86_64-unknown-linux-gnu.deb"'
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm_v7/conduit.deb "${BASE_URL}/conduit-armv7-unknown-linux-gnu.deb"'
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm64/conduit.deb "${BASE_URL}/conduit-aarch64-unknown-linux-gnu.deb"'
# Avoid duplicate pipelines
# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines
workflow:
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
- if: "$CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS"
when: never
- if: "$CI_COMMIT_BRANCH"
- if: "$CI_COMMIT_TAG"

1659
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,14 +1,3 @@
# Keep alphabetically sorted
[workspace.lints.rust]
explicit_outlives_requirements = "warn"
unused_qualifications = "warn"
# Keep alphabetically sorted
[workspace.lints.clippy]
cloned_instead_of_copied = "warn"
dbg_macro = "warn"
str_to_string = "warn"
[package]
name = "conduit"
description = "A Matrix homeserver written in Rust"
@ -17,17 +6,17 @@ authors = ["timokoesters <timo@koesters.xyz>"]
homepage = "https://conduit.rs"
repository = "https://gitlab.com/famedly/conduit"
readme = "README.md"
version = "0.7.0-alpha"
version = "0.6.0"
edition = "2021"
# See also `rust-toolchain.toml`
rust-version = "1.75.0"
# When changing this, make sure to update the `flake.lock` file by running
# `nix flake update`. If you don't have Nix installed or otherwise don't know
# how to do this, ping `@charles:computer.surgery` or `@dusk:gaze.systems` in
# the matrix room.
rust-version = "1.70.0"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lints]
workspace = true
[dependencies]
# Web framework
axum = { version = "0.6.18", default-features = false, features = ["form", "headers", "http1", "http2", "json", "matched-path"], optional = true }
@ -37,7 +26,7 @@ tower-http = { version = "0.4.1", features = ["add-extension", "cors", "sensitiv
# Used for matrix spec type definitions and helpers
#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
ruma = { git = "https://github.com/ruma/ruma", rev = "1a1c61ee1e8f0936e956a3b69c931ce12ee28475", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
ruma = { git = "https://github.com/ruma/ruma", rev = "3bd58e3c899457c2d55c45268dcb8a65ae682d54", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
#ruma = { git = "https://github.com/timokoesters/ruma", rev = "4ec9c69bb7e09391add2382b3ebac97b6e8f4c64", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
@ -64,8 +53,7 @@ rand = "0.8.5"
# Used to hash passwords
rust-argon2 = "1.0.0"
# Used to send requests
hyper = "0.14.26"
reqwest = { version = "0.11.18", default-features = false, features = ["rustls-tls-native-roots", "socks"] }
reqwest = { default-features = false, features = ["rustls-tls-native-roots", "socks"], git = "https://github.com/timokoesters/reqwest", rev = "57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" }
# Used for conduit::Error type
thiserror = "1.0.40"
# Used to generate thumbnails for images
@ -73,13 +61,13 @@ image = { version = "0.24.6", default-features = false, features = ["jpeg", "png
# Used to encode server public key
base64 = "0.21.2"
# Used when hashing the state
ring = "0.17.7"
ring = "0.16.20"
# Used when querying the SRV record of other servers
trust-dns-resolver = "0.22.0"
# Used to find matching events for appservices
regex = "1.8.1"
# jwt jsonwebtokens
jsonwebtoken = "9.2.0"
jsonwebtoken = "8.3.0"
# Performance measurements
tracing = { version = "0.1.37", features = [] }
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
@ -90,10 +78,10 @@ tracing-opentelemetry = "0.18.0"
lru-cache = "0.1.2"
rusqlite = { version = "0.29.0", optional = true, features = ["bundled"] }
parking_lot = { version = "0.12.1", optional = true }
# crossbeam = { version = "0.8.2", optional = true }
crossbeam = { version = "0.8.2", optional = true }
num_cpus = "1.15.0"
threadpool = "1.8.1"
# heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true }
heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true }
# Used for ruma wrapper
serde_html_form = "0.2.0"
@ -124,7 +112,7 @@ default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "systemd"]
#backend_sled = ["sled"]
backend_persy = ["persy", "parking_lot"]
backend_sqlite = ["sqlite"]
#backend_heed = ["heed", "crossbeam"]
backend_heed = ["heed", "crossbeam"]
backend_rocksdb = ["rocksdb"]
jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator"]
sqlite = ["rusqlite", "parking_lot", "tokio/signal"]

View file

@ -12,13 +12,11 @@ only offer Linux binaries.
You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the appropriate url:
**Stable versions:**
| CPU Architecture | Download stable version |
| ------------------------------------------- | --------------------------------------------------------------- |
| x84_64 / amd64 (Most servers and computers) | [Binary][x84_64-glibc-master] / [.deb][x84_64-glibc-master-deb] |
| armv7 (e.g. Raspberry Pi by default) | [Binary][armv7-glibc-master] / [.deb][armv7-glibc-master-deb] |
| armv8 / aarch64 | [Binary][armv8-glibc-master] / [.deb][armv8-glibc-master-deb] |
| CPU Architecture | Download stable version | Download development version |
| ------------------------------------------- | --------------------------------------------------------------- | ----------------------------------------------------------- |
| x84_64 / amd64 (Most servers and computers) | [Binary][x84_64-glibc-master] / [.deb][x84_64-glibc-master-deb] | [Binary][x84_64-glibc-next] / [.deb][x84_64-glibc-next-deb] |
| armv7 (e.g. Raspberry Pi by default) | [Binary][armv7-glibc-master] / [.deb][armv7-glibc-master-deb] | [Binary][armv7-glibc-next] / [.deb][armv7-glibc-next-deb] |
| armv8 / aarch64 | [Binary][armv8-glibc-master] / [.deb][armv8-glibc-master-deb] | [Binary][armv8-glibc-next] / [.deb][armv8-glibc-next-deb] |
These builds were created on and linked against the glibc version shipped with Debian bullseye.
If you use a system with an older glibc version (e.g. RHEL8), you might need to compile Conduit yourself.
@ -26,19 +24,15 @@ If you use a system with an older glibc version (e.g. RHEL8), you might need to
[x84_64-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit?job=docker:master
[armv7-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit?job=docker:master
[armv8-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit?job=docker:master
[x84_64-glibc-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_amd64/conduit?job=docker:next
[armv7-glibc-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm_v7/conduit?job=docker:next
[armv8-glibc-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm64/conduit?job=docker:next
[x84_64-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit.deb?job=docker:master
[armv7-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit.deb?job=docker:master
[armv8-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit.deb?job=docker:master
**Latest versions:**
| Target | Type | Download |
|-|-|-|
| `x86_64-unknown-linux-gnu` | Dynamically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/conduit.deb?job=debian:x86_64-unknown-linux-gnu) |
| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/conduit?job=static:x86_64-unknown-linux-musl) |
| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/conduit?job=static:aarch64-unknown-linux-musl) |
| `x86_64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-amd64.tar.gz?job=oci-image:x86_64-unknown-linux-musl) |
| `aarch64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-arm64v8.tar.gz?job=oci-image:aarch64-unknown-linux-musl) |
[x84_64-glibc-next-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_amd64/conduit.deb?job=docker:next
[armv7-glibc-next-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm_v7/conduit.deb?job=docker:next
[armv8-glibc-next-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm64/conduit.deb?job=docker:next
```bash
$ sudo wget -O /usr/local/bin/matrix-conduit <url>
@ -279,7 +273,7 @@ server {
client_max_body_size 20M;
location /_matrix/ {
proxy_pass http://127.0.0.1:6167;
proxy_pass http://127.0.0.1:6167$request_uri;
proxy_set_header Host $http_host;
proxy_buffering off;
proxy_read_timeout 5m;

132
Dockerfile Normal file
View file

@ -0,0 +1,132 @@
# syntax=docker/dockerfile:1
FROM docker.io/rust:1.70-bullseye AS base
FROM base AS builder
WORKDIR /usr/src/conduit
# Install required packages to build Conduit and it's dependencies
RUN apt-get update && \
apt-get -y --no-install-recommends install libclang-dev=1:11.0-51+nmu5
# == Build dependencies without our own code separately for caching ==
#
# Need a fake main.rs since Cargo refuses to build anything otherwise.
#
# See https://github.com/rust-lang/cargo/issues/2644 for a Cargo feature
# request that would allow just dependencies to be compiled, presumably
# regardless of whether source files are available.
RUN mkdir src && touch src/lib.rs && echo 'fn main() {}' > src/main.rs
COPY Cargo.toml Cargo.lock ./
RUN cargo build --release && rm -r src
# Copy over actual Conduit sources
COPY src src
# main.rs and lib.rs need their timestamp updated for this to work correctly since
# otherwise the build with the fake main.rs from above is newer than the
# source files (COPY preserves timestamps).
#
# Builds conduit and places the binary at /usr/src/conduit/target/release/conduit
RUN touch src/main.rs && touch src/lib.rs && cargo build --release
# ONLY USEFUL FOR CI: target stage to extract build artifacts
FROM scratch AS builder-result
COPY --from=builder /usr/src/conduit/target/release/conduit /conduit
# ---------------------------------------------------------------------------------------------------------------
# Build cargo-deb, a tool to package up rust binaries into .deb packages for Debian/Ubuntu based systems:
# ---------------------------------------------------------------------------------------------------------------
FROM base AS build-cargo-deb
RUN apt-get update && \
apt-get install -y --no-install-recommends \
dpkg \
dpkg-dev \
liblzma-dev
RUN cargo install cargo-deb
# => binary is in /usr/local/cargo/bin/cargo-deb
# ---------------------------------------------------------------------------------------------------------------
# Package conduit build-result into a .deb package:
# ---------------------------------------------------------------------------------------------------------------
FROM builder AS packager
WORKDIR /usr/src/conduit
COPY ./LICENSE ./LICENSE
COPY ./README.md ./README.md
COPY debian ./debian
COPY --from=build-cargo-deb /usr/local/cargo/bin/cargo-deb /usr/local/cargo/bin/cargo-deb
# --no-build makes cargo-deb reuse already compiled project
RUN cargo deb --no-build
# => Package is in /usr/src/conduit/target/debian/<project_name>_<version>_<arch>.deb
# ONLY USEFUL FOR CI: target stage to extract build artifacts
FROM scratch AS packager-result
COPY --from=packager /usr/src/conduit/target/debian/*.deb /conduit.deb
# ---------------------------------------------------------------------------------------------------------------
# Stuff below this line actually ends up in the resulting docker image
# ---------------------------------------------------------------------------------------------------------------
FROM docker.io/debian:bullseye-slim AS runner
# Standard port on which Conduit launches.
# You still need to map the port when using the docker command or docker-compose.
EXPOSE 6167
ARG DEFAULT_DB_PATH=/var/lib/matrix-conduit
ENV CONDUIT_PORT=6167 \
CONDUIT_ADDRESS="0.0.0.0" \
CONDUIT_DATABASE_PATH=${DEFAULT_DB_PATH} \
CONDUIT_CONFIG=''
# └─> Set no config file to do all configuration with env vars
# Conduit needs:
# dpkg: to install conduit.deb
# ca-certificates: for https
# iproute2 & wget: for the healthcheck script
RUN apt-get update && apt-get -y --no-install-recommends install \
dpkg \
ca-certificates \
iproute2 \
wget \
&& rm -rf /var/lib/apt/lists/*
# Test if Conduit is still alive, uses the same endpoint as Element
COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh
HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh
# Install conduit.deb:
COPY --from=packager /usr/src/conduit/target/debian/*.deb /srv/conduit/
RUN dpkg -i /srv/conduit/*.deb
# Improve security: Don't run stuff as root, that does not need to run as root
# Most distros also use 1000:1000 for the first real user, so this should resolve volume mounting problems.
ARG USER_ID=1000
ARG GROUP_ID=1000
RUN set -x ; \
groupadd -r -g ${GROUP_ID} conduit ; \
useradd -l -r -M -d /srv/conduit -o -u ${USER_ID} -g conduit conduit && exit 0 ; exit 1
# Create database directory, change ownership of Conduit files to conduit user and group and make the healthcheck executable:
RUN chown -cR conduit:conduit /srv/conduit && \
chmod +x /srv/conduit/healthcheck.sh && \
mkdir -p ${DEFAULT_DB_PATH} && \
chown -cR conduit:conduit ${DEFAULT_DB_PATH}
# Change user to conduit, no root permissions afterwards:
USER conduit
# Set container home directory
WORKDIR /srv/conduit
# Run Conduit and print backtraces on panics
ENV RUST_BACKTRACE=1
ENTRYPOINT [ "/usr/sbin/matrix-conduit" ]

View file

@ -1,37 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Path to Complement's source code
COMPLEMENT_SRC="$1"
# A `.jsonl` file to write test logs to
LOG_FILE="$2"
# A `.jsonl` file to write test results to
RESULTS_FILE="$3"
OCI_IMAGE="complement-conduit:dev"
env \
-C "$(git rev-parse --show-toplevel)" \
docker build \
--tag "$OCI_IMAGE" \
--file complement/Dockerfile \
.
# It's okay (likely, even) that `go test` exits nonzero
set +o pipefail
env \
-C "$COMPLEMENT_SRC" \
COMPLEMENT_BASE_IMAGE="$OCI_IMAGE" \
go test -json ./tests | tee "$LOG_FILE"
set -o pipefail
# Post-process the results into an easy-to-compare format
cat "$LOG_FILE" | jq -c '
select(
(.Action == "pass" or .Action == "fail" or .Action == "skip")
and .Test != null
) | {Action: .Action, Test: .Test}
' | sort > "$RESULTS_FILE"

View file

@ -1,31 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# The first argument must be the desired installable
INSTALLABLE="$1"
# Build the installable and forward any other arguments too
nix build "$@"
if [ ! -z ${ATTIC_TOKEN+x} ]; then
nix run --inputs-from . attic -- login \
conduit \
https://nix.computer.surgery/conduit \
"$ATTIC_TOKEN"
push_args=(
# Attic and its build dependencies
"$(nix path-info --inputs-from . attic)"
"$(nix path-info --inputs-from . attic --derivation)"
# The target installable and its build dependencies
"$(nix path-info "$INSTALLABLE" --derivation)"
"$(nix path-info "$INSTALLABLE")"
)
nix run --inputs-from . attic -- push conduit "${push_args[@]}"
else
echo "\$ATTIC_TOKEN is unset, skipping uploading to the binary cache"
fi

View file

@ -1,30 +1,26 @@
FROM rust:1.75.0
# For use in our CI only. This requires a build artifact created by a previous run pipline stage to be placed in cached_target/release/conduit
FROM registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:commit-16a08e9b as builder
#FROM rust:latest as builder
WORKDIR /workdir
RUN apt-get update && apt-get install -y --no-install-recommends \
libclang-dev
ARG RUSTC_WRAPPER
ARG AWS_ACCESS_KEY_ID
ARG AWS_SECRET_ACCESS_KEY
ARG SCCACHE_BUCKET
ARG SCCACHE_ENDPOINT
ARG SCCACHE_S3_USE_SSL
COPY Cargo.toml Cargo.toml
COPY Cargo.lock Cargo.lock
COPY src src
RUN cargo build --release \
&& mv target/release/conduit conduit \
&& rm -rf target
COPY . .
RUN mkdir -p target/release
RUN test -e cached_target/release/conduit && cp cached_target/release/conduit target/release/conduit || cargo build --release
## Actual image
FROM debian:bullseye
WORKDIR /workdir
# Install caddy
RUN apt-get update \
&& apt-get install -y \
debian-keyring \
debian-archive-keyring \
apt-transport-https \
curl \
&& curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/gpg.key' \
| gpg --dearmor -o /usr/share/keyrings/caddy-testing-archive-keyring.gpg \
&& curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/debian.deb.txt' \
| tee /etc/apt/sources.list.d/caddy-testing.list \
&& apt-get update \
&& apt-get install -y caddy
RUN apt-get update && apt-get install -y debian-keyring debian-archive-keyring apt-transport-https curl && curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/gpg.key' | gpg --dearmor -o /usr/share/keyrings/caddy-testing-archive-keyring.gpg && curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/debian.deb.txt' | tee /etc/apt/sources.list.d/caddy-testing.list && apt-get update && apt-get install -y caddy
COPY conduit-example.toml conduit.toml
COPY complement/caddy.json caddy.json
@ -33,9 +29,16 @@ ENV SERVER_NAME=localhost
ENV CONDUIT_CONFIG=/workdir/conduit.toml
RUN sed -i "s/port = 6167/port = 8008/g" conduit.toml
RUN echo "allow_federation = true" >> conduit.toml
RUN echo "allow_check_for_updates = true" >> conduit.toml
RUN echo "allow_encryption = true" >> conduit.toml
RUN echo "allow_registration = true" >> conduit.toml
RUN echo "log = \"warn,_=off,sled=off\"" >> conduit.toml
RUN sed -i "s/address = \"127.0.0.1\"/address = \"0.0.0.0\"/g" conduit.toml
COPY --from=builder /workdir/target/release/conduit /workdir/conduit
RUN chmod +x /workdir/conduit
EXPOSE 8008 8448
CMD uname -a && \

View file

@ -1,11 +1,13 @@
# Complement
# Running Conduit on Complement
## What's that?
This assumes that you're familiar with complement, if not, please readme
[their readme](https://github.com/matrix-org/complement#running).
Have a look at [its repository](https://github.com/matrix-org/complement).
Complement works with "base images", this directory (and Dockerfile) helps build the conduit complement-ready docker
image.
## How do I use it with Conduit?
To build, `cd` to the base directory of the workspace, and run this:
The script at [`../bin/complement`](../bin/complement) has automation for this.
It takes a few command line arguments, you can read the script to find out what
those are.
`docker build -t complement-conduit:dev -f complement/Dockerfile .`
Then use `complement-conduit:dev` as a base image for running complement tests.

View file

@ -1,10 +0,0 @@
(import
(
let lock = builtins.fromJSON (builtins.readFile ./flake.lock); in
fetchTarball {
url = lock.nodes.flake-compat.locked.url or "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz";
sha256 = lock.nodes.flake-compat.locked.narHash;
}
)
{ src = ./.; }
).defaultNix

View file

@ -76,7 +76,7 @@ to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible
If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it.
### Docker-compose
## Docker-compose
If the `docker run` command is not for you or your setup, you can also use one of the provided `docker-compose` files.
@ -161,58 +161,3 @@ So...step by step:
6. Run `docker-compose up -d`
7. Connect to your homeserver with your preferred client and create a user. You should do this immediately after starting Conduit, because the first created user is the admin.
## Voice communication
In order to make or receive calls, a TURN server is required. Conduit suggests using [Coturn](https://github.com/coturn/coturn) for this purpose, which is also available as a Docker image. Before proceeding with the software installation, it is essential to have the necessary configurations in place.
### Configuration
Create a configuration file called `coturn.conf` containing:
```conf
use-auth-secret
static-auth-secret=<a secret key>
realm=<your server domain>
```
A common way to generate a suitable alphanumeric secret key is by using `pwgen -s 64 1`.
These same values need to be set in conduit. You can either modify conduit.toml to include these lines:
```
turn_uris = ["turn:<your server domain>?transport=udp", "turn:<your server domain>?transport=tcp"]
turn_secret = "<secret key from coturn configuration>"
```
or append the following to the docker environment variables dependig on which configuration method you used earlier:
```yml
CONDUIT_TURN_URIS: '["turn:<your server domain>?transport=udp", "turn:<your server domain>?transport=tcp"]'
CONDUIT_TURN_SECRET: "<secret key from coturn configuration>"
```
Restart Conduit to apply these changes.
### Run
Run the [Coturn](https://hub.docker.com/r/coturn/coturn) image using
```bash
docker run -d --network=host -v $(pwd)/coturn.conf:/etc/coturn/turnserver.conf coturn/coturn
```
or docker-compose. For the latter, paste the following section into a file called `docker-compose.yml`
and run `docker-compose up -d` in the same directory.
```yml
version: 3
services:
turn:
container_name: coturn-server
image: docker.io/coturn/coturn
restart: unless-stopped
network_mode: "host"
volumes:
- ./coturn.conf:/etc/coturn/turnserver.conf
```
To understand why the host networking mode is used and explore alternative configuration options, please visit the following link: https://github.com/coturn/coturn/blob/master/docker/coturn/README.md.
For security recommendations see Synapse's [Coturn documentation](https://github.com/matrix-org/synapse/blob/develop/docs/setup/turn/coturn.md#configuration).

View file

@ -1,64 +0,0 @@
interpreter = ["bash", "-euo", "pipefail", "-c"]
[[task]]
name = "engage"
group = "versions"
script = "engage --version"
[[task]]
name = "rustc"
group = "versions"
script = "rustc --version"
[[task]]
name = "cargo"
group = "versions"
script = "cargo --version"
[[task]]
name = "cargo-fmt"
group = "versions"
script = "cargo fmt --version"
[[task]]
name = "rustdoc"
group = "versions"
script = "rustdoc --version"
[[task]]
name = "cargo-clippy"
group = "versions"
script = "cargo clippy -- --version"
[[task]]
name = "cargo-fmt"
group = "lints"
script = "cargo fmt --check -- --color=always"
[[task]]
name = "cargo-doc"
group = "lints"
script = """
RUSTDOCFLAGS="-D warnings" cargo doc \
--workspace \
--no-deps \
--document-private-items \
--color always
"""
[[task]]
name = "cargo-clippy"
group = "lints"
script = "cargo clippy --workspace --all-targets --color=always -- -D warnings"
[[task]]
name = "cargo"
group = "tests"
script = """
cargo test \
--workspace \
--all-targets \
--color=always \
-- \
--color=always
"""

View file

@ -1,41 +1,22 @@
{
"nodes": {
"attic": {
"inputs": {
"crane": "crane",
"flake-compat": "flake-compat",
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs",
"nixpkgs-stable": "nixpkgs-stable"
},
"locked": {
"lastModified": 1705617092,
"narHash": "sha256-n9PK4O4X4S1JkwpkMuYm1wHZYJzRqif8g3RuVIPD+rY=",
"owner": "zhaofengli",
"repo": "attic",
"rev": "fbe252a5c21febbe920c025560cbd63b20e24f3b",
"type": "github"
},
"original": {
"owner": "zhaofengli",
"ref": "main",
"repo": "attic",
"type": "github"
}
},
"crane": {
"inputs": {
"flake-compat": "flake-compat",
"flake-utils": [
"flake-utils"
],
"nixpkgs": [
"attic",
"nixpkgs"
]
],
"rust-overlay": "rust-overlay"
},
"locked": {
"lastModified": 1702918879,
"narHash": "sha256-tWJqzajIvYcaRWxn+cLUB9L9Pv4dQ3Bfit/YjU5ze3g=",
"lastModified": 1688772518,
"narHash": "sha256-ol7gZxwvgLnxNSZwFTDJJ49xVY5teaSvF7lzlo3YQfM=",
"owner": "ipetkov",
"repo": "crane",
"rev": "7195c00c272fdd92fc74e7d5a0a2844b9fadb2fb",
"rev": "8b08e96c9af8c6e3a2b69af5a7fa168750fcf88e",
"type": "github"
},
"original": {
@ -44,27 +25,6 @@
"type": "github"
}
},
"crane_2": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1706473964,
"narHash": "sha256-Fq6xleee/TsX6NbtoRuI96bBuDHMU57PrcK9z1QEKbk=",
"owner": "ipetkov",
"repo": "crane",
"rev": "c798790eabec3e3da48190ae3698ac227aab770c",
"type": "github"
},
"original": {
"owner": "ipetkov",
"ref": "master",
"repo": "crane",
"type": "github"
}
},
"fenix": {
"inputs": {
"nixpkgs": [
@ -73,11 +33,11 @@
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1705559032,
"narHash": "sha256-Cb+Jd1+Gz4Wi+8elPnUIHnqQmE1qjDRZ+PsJaPaAffY=",
"lastModified": 1689488573,
"narHash": "sha256-diVASflKCCryTYv0djvMnP2444mFsIG0ge5pa7ahauQ=",
"owner": "nix-community",
"repo": "fenix",
"rev": "e132ea0eb0c799a2109a91688e499d7bf4962801",
"rev": "39096fe3f379036ff4a5fa198950b8e79defe939",
"type": "github"
},
"original": {
@ -102,47 +62,16 @@
"type": "github"
}
},
"flake-compat_2": {
"flake": false,
"locked": {
"lastModified": 1696426674,
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-utils": {
"locked": {
"lastModified": 1667395993,
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_2": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1705309234,
"narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=",
"lastModified": 1689068808,
"narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26",
"rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4",
"type": "github"
},
"original": {
@ -151,60 +80,13 @@
"type": "github"
}
},
"nix-filter": {
"locked": {
"lastModified": 1705332318,
"narHash": "sha256-kcw1yFeJe9N4PjQji9ZeX47jg0p9A0DuU4djKvg1a7I=",
"owner": "numtide",
"repo": "nix-filter",
"rev": "3449dc925982ad46246cfc36469baf66e1b64f17",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "nix-filter",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1702539185,
"narHash": "sha256-KnIRG5NMdLIpEkZTnN5zovNYc0hhXjAgv6pfd5Z4c7U=",
"lastModified": 1689444953,
"narHash": "sha256-0o56bfb2LC38wrinPdCGLDScd77LVcr7CrH1zK7qvDg=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "aa9d4729cbc99dabacb50e3994dcefb3ea0f7447",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-stable": {
"locked": {
"lastModified": 1702780907,
"narHash": "sha256-blbrBBXjjZt6OKTcYX1jpe9SRof2P9ZYWPzq22tzXAA=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "1e2e384c5b7c50dbf8e9c441a9e58d85f408b01f",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-23.11",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1705496572,
"narHash": "sha256-rPIe9G5EBLXdBdn9ilGc0nq082lzQd0xGGe092R/5QE=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "842d9d80cfd4560648c785f8a4e6f3b096790e19",
"rev": "8acef304efe70152463a6399f73e636bcc363813",
"type": "github"
},
"original": {
@ -216,23 +98,20 @@
},
"root": {
"inputs": {
"attic": "attic",
"crane": "crane_2",
"crane": "crane",
"fenix": "fenix",
"flake-compat": "flake-compat_2",
"flake-utils": "flake-utils_2",
"nix-filter": "nix-filter",
"nixpkgs": "nixpkgs_2"
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs"
}
},
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1705523001,
"narHash": "sha256-TWq5vJ6m+9HGSDMsQAmz1TMegMi79R3TTyKjnPWsQp8=",
"lastModified": 1689441253,
"narHash": "sha256-4MSDZaFI4DOfsLIZYPMBl0snzWhX1/OqR/QHir382CY=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "9d9b34354d2f13e33568c9c55b226dd014a146a0",
"rev": "996e054f1eb1dbfc8455ecabff0f6ff22ba7f7c8",
"type": "github"
},
"original": {
@ -242,6 +121,31 @@
"type": "github"
}
},
"rust-overlay": {
"inputs": {
"flake-utils": [
"crane",
"flake-utils"
],
"nixpkgs": [
"crane",
"nixpkgs"
]
},
"locked": {
"lastModified": 1688351637,
"narHash": "sha256-CLTufJ29VxNOIZ8UTg0lepsn3X03AmopmaLTTeHDCL4=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "f9b92316727af9e6c7fee4a761242f7f46880329",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"type": "github"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,

264
flake.nix
View file

@ -2,258 +2,92 @@
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable";
flake-utils.url = "github:numtide/flake-utils";
nix-filter.url = "github:numtide/nix-filter";
flake-compat = {
url = "github:edolstra/flake-compat";
flake = false;
};
fenix = {
url = "github:nix-community/fenix";
inputs.nixpkgs.follows = "nixpkgs";
};
crane = {
url = "github:ipetkov/crane?ref=master";
url = "github:ipetkov/crane";
inputs.nixpkgs.follows = "nixpkgs";
inputs.flake-utils.follows = "flake-utils";
};
attic.url = "github:zhaofengli/attic?ref=main";
};
outputs =
{ self
, nixpkgs
, flake-utils
, nix-filter
, fenix
, crane
, ...
}: flake-utils.lib.eachDefaultSystem (system:
let
pkgsHost = nixpkgs.legacyPackages.${system};
pkgs = nixpkgs.legacyPackages.${system};
# Use mold on Linux
stdenv = if pkgs.stdenv.isLinux then
pkgs.stdenvAdapters.useMoldLinker pkgs.stdenv
else
pkgs.stdenv;
# Nix-accessible `Cargo.toml`
cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml);
# The Rust toolchain to use
toolchain = fenix.packages.${system}.fromToolchainFile {
file = ./rust-toolchain.toml;
toolchain = fenix.packages.${system}.toolchainOf {
# Use the Rust version defined in `Cargo.toml`
channel = cargoToml.package.rust-version;
# See also `rust-toolchain.toml`
sha256 = "sha256-SXRtAuO4IqNOQq+nLbrsDFbVk+3aVA8NNpSZsKlVH/8=";
# THE rust-version HASH
sha256 = "sha256-gdYqng0y9iHYzYPAdkC/ka3DRny3La/S5G8ASj0Ayyc=";
};
builder = pkgs:
((crane.mkLib pkgs).overrideToolchain toolchain).buildPackage;
nativeBuildInputs = pkgs: [
# bindgen needs the build platform's libclang. Apparently due to
# "splicing weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't
# quite do the right thing here.
pkgs.buildPackages.rustPlatform.bindgenHook
];
env = pkgs: {
# The system's RocksDB
ROCKSDB_INCLUDE_DIR = "${pkgs.rocksdb}/include";
ROCKSDB_LIB_DIR = "${pkgs.rocksdb}/lib";
}
// pkgs.lib.optionalAttrs pkgs.stdenv.hostPlatform.isStatic {
ROCKSDB_STATIC = "";
}
// {
CARGO_BUILD_RUSTFLAGS = let inherit (pkgs) lib stdenv; in
lib.concatStringsSep " " ([]
++ lib.optionals
# This disables PIE for static builds, which isn't great in terms
# of security. Unfortunately, my hand is forced because nixpkgs'
# `libstdc++.a` is built without `-fPIE`, which precludes us from
# leaving PIE enabled.
stdenv.hostPlatform.isStatic
["-C" "relocation-model=static"]
++ lib.optionals
(stdenv.buildPlatform.config != stdenv.hostPlatform.config)
["-l" "c"]
++ lib.optionals
# This check has to match the one [here][0]. We only need to set
# these flags when using a different linker. Don't ask me why,
# though, because I don't know. All I know is it breaks otherwise.
#
# [0]: https://github.com/NixOS/nixpkgs/blob/612f97239e2cc474c13c9dafa0df378058c5ad8d/pkgs/build-support/rust/lib/default.nix#L36-L39
(
# Nixpkgs doesn't check for x86_64 here but we do, because I
# observed a failure building statically for x86_64 without
# including it here. Linkers are weird.
(stdenv.hostPlatform.isAarch64 || stdenv.hostPlatform.isx86_64)
&& stdenv.hostPlatform.isStatic
&& !stdenv.isDarwin
&& !stdenv.cc.bintools.isLLVM
)
[
"-l"
"stdc++"
"-L"
"${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib"
]
);
}
# What follows is stolen from [here][0]. Its purpose is to properly
# configure compilers and linkers for various stages of the build, and
# even covers the case of build scripts that need native code compiled and
# run on the build platform (I think).
#
# [0]: https://github.com/NixOS/nixpkgs/blob/612f97239e2cc474c13c9dafa0df378058c5ad8d/pkgs/build-support/rust/lib/default.nix#L64-L78
// (
let
inherit (pkgs.rust.lib) envVars;
in
pkgs.lib.optionalAttrs
(pkgs.stdenv.targetPlatform.rust.rustcTarget
!= pkgs.stdenv.hostPlatform.rust.rustcTarget)
(
let
inherit (pkgs.stdenv.targetPlatform.rust) cargoEnvVarTarget;
in
{
"CC_${cargoEnvVarTarget}" = envVars.ccForTarget;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" =
envVars.linkerForTarget;
}
)
// (
let
inherit (pkgs.stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget;
in
{
"CC_${cargoEnvVarTarget}" = envVars.ccForHost;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForHost;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForHost;
CARGO_BUILD_TARGET = rustcTarget;
}
)
// (
let
inherit (pkgs.stdenv.buildPlatform.rust) cargoEnvVarTarget;
in
{
"CC_${cargoEnvVarTarget}" = envVars.ccForBuild;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForBuild;
HOST_CC = "${pkgs.buildPackages.stdenv.cc}/bin/cc";
HOST_CXX = "${pkgs.buildPackages.stdenv.cc}/bin/c++";
}
));
# Shared between the package and the devShell
nativeBuildInputs = (with pkgs.rustPlatform; [
bindgenHook
]);
package = pkgs: builder pkgs {
src = nix-filter {
root = ./.;
include = [
"src"
"Cargo.toml"
"Cargo.lock"
];
builder =
((crane.mkLib pkgs).overrideToolchain toolchain.toolchain).buildPackage;
in
{
packages.default = builder {
src = ./.;
inherit
stdenv
nativeBuildInputs
ROCKSDB_INCLUDE_DIR
ROCKSDB_LIB_DIR;
};
# This is redundant with CI
doCheck = false;
env = env pkgs;
nativeBuildInputs = nativeBuildInputs pkgs;
meta.mainProgram = cargoToml.package.name;
};
mkOciImage = pkgs: package:
pkgs.dockerTools.buildImage {
name = package.pname;
tag = "next";
copyToRoot = [
pkgs.dockerTools.caCertificates
];
config = {
# Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT)
# are handled as expected
Entrypoint = [
"${pkgs.lib.getExe' pkgs.tini "tini"}"
"--"
];
Cmd = [
"${pkgs.lib.getExe package}"
];
};
};
in
{
packages = {
default = package pkgsHost;
oci-image = mkOciImage pkgsHost self.packages.${system}.default;
}
//
builtins.listToAttrs
(builtins.concatLists
(builtins.map
(crossSystem:
let
binaryName = "static-${crossSystem}";
pkgsCrossStatic =
(import nixpkgs {
inherit system;
crossSystem = {
config = crossSystem;
};
}).pkgsStatic;
in
[
# An output for a statically-linked binary
{
name = binaryName;
value = package pkgsCrossStatic;
}
# An output for an OCI image based on that binary
{
name = "oci-image-${crossSystem}";
value = mkOciImage
pkgsCrossStatic
self.packages.${system}.${binaryName};
}
]
)
[
"x86_64-unknown-linux-musl"
"aarch64-unknown-linux-musl"
]
)
);
devShells.default = pkgsHost.mkShell {
env = env pkgsHost // {
devShells.default = (pkgs.mkShell.override { inherit stdenv; }) {
# Rust Analyzer needs to be able to find the path to default crate
# sources, and it can read this environment variable to do so. The
# `rust-src` component is required in order for this to work.
RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library";
};
# sources, and it can read this environment variable to do so
RUST_SRC_PATH = "${toolchain.rust-src}/lib/rustlib/src/rust/library";
inherit
ROCKSDB_INCLUDE_DIR
ROCKSDB_LIB_DIR;
# Development tools
nativeBuildInputs = nativeBuildInputs pkgsHost ++ [
# Always use nightly rustfmt because most of its options are unstable
#
# This needs to come before `toolchain` in this list, otherwise
# `$PATH` will have stable rustfmt instead.
fenix.packages.${system}.latest.rustfmt
toolchain
] ++ (with pkgsHost; [
engage
# Needed for Complement
go
olm
# Needed for our script for Complement
jq
nativeBuildInputs = nativeBuildInputs ++ (with toolchain; [
cargo
clippy
rust-src
rustc
rustfmt
]);
};
checks = {
packagesDefault = self.packages.${system}.default;
devShellsDefault = self.devShells.${system}.default;
};
});
}

View file

@ -1,22 +0,0 @@
# This is the authoritiative configuration of this project's Rust toolchain.
#
# Other files that need upkeep when this changes:
#
# * `.gitlab-ci.yml`
# * `Cargo.toml`
# * `flake.nix`
#
# Search in those files for `rust-toolchain.toml` to find the relevant places.
# If you're having trouble making the relevant changes, bug a maintainer.
[toolchain]
channel = "1.75.0"
components = [
# For rust-analyzer
"rust-src",
]
targets = [
"x86_64-unknown-linux-gnu",
"x86_64-unknown-linux-musl",
"aarch64-unknown-linux-musl",
]

View file

@ -17,11 +17,7 @@ use ruma::{
DeviceKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId,
};
use serde_json::json;
use std::{
collections::{hash_map, BTreeMap, HashMap, HashSet},
time::{Duration, Instant},
};
use tracing::debug;
use std::collections::{BTreeMap, HashMap, HashSet};
/// # `POST /_matrix/client/r0/keys/upload`
///
@ -339,68 +335,31 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
let mut failures = BTreeMap::new();
let back_off = |id| match services()
.globals
.bad_query_ratelimiter
.write()
.unwrap()
.entry(id)
{
hash_map::Entry::Vacant(e) => {
e.insert((Instant::now(), 1));
}
hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
};
let mut futures: FuturesUnordered<_> = get_over_federation
.into_iter()
.map(|(server, vec)| async move {
if let Some((time, tries)) = services()
.globals
.bad_query_ratelimiter
.read()
.unwrap()
.get(server)
{
// Exponential backoff
let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries);
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
}
if time.elapsed() < min_elapsed_duration {
debug!("Backing off query from {:?}", server);
return (
server,
Err(Error::BadServerResponse("bad query, still backing off")),
);
}
}
let mut device_keys_input_fed = BTreeMap::new();
for (user_id, keys) in vec {
device_keys_input_fed.insert(user_id.to_owned(), keys.clone());
}
(
server,
tokio::time::timeout(
Duration::from_secs(25),
services().sending.send_federation_request(
services()
.sending
.send_federation_request(
server,
federation::keys::get_keys::v1::Request {
device_keys: device_keys_input_fed,
},
),
)
.await
.map_err(|_e| Error::BadServerResponse("Query took too long")),
.await,
)
})
.collect();
while let Some((server, response)) = futures.next().await {
match response {
Ok(Ok(response)) => {
Ok(response) => {
for (user, masterkey) in response.master_keys {
let (master_key_id, mut master_key) =
services().users.parse_master_key(&user, &masterkey)?;
@ -427,8 +386,7 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
self_signing_keys.extend(response.self_signing_keys);
device_keys.extend(response.device_keys);
}
_ => {
back_off(server.to_owned());
Err(_e) => {
failures.insert(server.to_string(), json!({}));
}
}

View file

@ -51,7 +51,7 @@ pub async fn create_content_route(
.await?;
Ok(create_content::v3::Response {
content_uri: mxc.into(),
content_uri: mxc.try_into().expect("Invalid mxc:// URI"),
blurhash: None,
})
}

View file

@ -64,12 +64,7 @@ pub async fn join_room_by_id_route(
.map(|user| user.server_name().to_owned()),
);
servers.push(
body.room_id
.server_name()
.expect("Room IDs should always have a server name")
.into(),
);
servers.push(body.room_id.server_name().to_owned());
join_room_by_id_helper(
body.sender_user.as_deref(),
@ -110,12 +105,7 @@ pub async fn join_room_by_id_or_alias_route(
.map(|user| user.server_name().to_owned()),
);
servers.push(
room_id
.server_name()
.expect("Room IDs should always have a server name")
.into(),
);
servers.push(room_id.server_name().to_owned());
(servers, room_id)
}
@ -410,7 +400,7 @@ pub async fn get_member_events_route(
if !services()
.rooms
.state_accessor
.user_can_see_state_events(sender_user, &body.room_id)?
.user_can_see_state_events(&sender_user, &body.room_id)?
{
return Err(Error::BadRequest(
ErrorKind::Forbidden,
@ -445,7 +435,7 @@ pub async fn joined_members_route(
if !services()
.rooms
.state_accessor
.user_can_see_state_events(sender_user, &body.room_id)?
.user_can_see_state_events(&sender_user, &body.room_id)?
{
return Err(Error::BadRequest(
ErrorKind::Forbidden,
@ -629,7 +619,7 @@ async fn join_room_by_id_helper(
));
}
match signed_value["signatures"]
if let Ok(signature) = signed_value["signatures"]
.as_object()
.ok_or(Error::BadRequest(
ErrorKind::InvalidParam,
@ -640,22 +630,20 @@ async fn join_room_by_id_helper(
ErrorKind::InvalidParam,
"Server did not send its signature",
))
}) {
Ok(signature) => {
})
{
join_event
.get_mut("signatures")
.expect("we created a valid pdu")
.as_object_mut()
.expect("we created a valid pdu")
.insert(remote_server.to_string(), signature.clone());
}
Err(e) => {
} else {
warn!(
"Server {remote_server} sent invalid signature in sendjoin signatures for event {signed_value:?}: {e:?}",
"Server {remote_server} sent invalid signature in sendjoin signatures for event {signed_value:?}",
);
}
}
}
services().rooms.short.get_or_create_shortroomid(room_id)?;
@ -722,7 +710,7 @@ async fn join_room_by_id_helper(
}
info!("Running send_join auth check");
let authenticated = state_res::event_auth::auth_check(
if !state_res::event_auth::auth_check(
&state_res::RoomVersion::new(&room_version_id).expect("room version is supported"),
&parsed_join_pdu,
None::<PduEvent>, // TODO: third party invite
@ -745,9 +733,7 @@ async fn join_room_by_id_helper(
.map_err(|e| {
warn!("Auth check failed: {e}");
Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed")
})?;
if !authenticated {
})? {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Auth check failed",
@ -1376,7 +1362,7 @@ pub async fn leave_all_rooms(user_id: &UserId) -> Result<()> {
pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option<String>) -> Result<()> {
// Ask a remote server if we don't have this room
if !services().rooms.metadata.exists(room_id)?
&& room_id.server_name() != Some(services().globals.server_name())
&& room_id.server_name() != services().globals.server_name()
{
if let Err(e) = remote_leave_room(user_id, room_id).await {
warn!("Failed to leave room {} remotely: {}", user_id, e);

View file

@ -124,7 +124,7 @@ pub async fn get_message_events_route(
let to = body
.to
.as_ref()
.and_then(|t| PduCount::try_from_string(t).ok());
.and_then(|t| PduCount::try_from_string(&t).ok());
services().rooms.lazy_loading.lazy_load_confirm_delivery(
sender_user,

View file

@ -1,8 +1,5 @@
use crate::{services, utils, Error, Result, Ruma};
use ruma::api::client::{
error::ErrorKind,
presence::{get_presence, set_presence},
};
use crate::{services, utils, Result, Ruma};
use ruma::api::client::presence::{get_presence, set_presence};
use std::time::Duration;
/// # `PUT /_matrix/client/r0/presence/{userId}/status`
@ -82,9 +79,6 @@ pub async fn get_presence_route(
presence: presence.content.presence,
})
} else {
Err(Error::BadRequest(
ErrorKind::NotFound,
"Presence state for this user was not found",
))
todo!();
}
}

View file

@ -34,7 +34,6 @@ pub async fn redact_event_route(
PduBuilder {
event_type: TimelineEventType::RoomRedaction,
content: to_raw_value(&RoomRedactionEventContent {
redacts: Some(body.event_id.clone()),
reason: body.reason.clone(),
})
.expect("event is valid, we just created it"),

View file

@ -23,7 +23,7 @@ pub async fn get_relating_events_with_rel_type_and_event_type_route(
let to = body
.to
.as_ref()
.and_then(|t| PduCount::try_from_string(t).ok());
.and_then(|t| PduCount::try_from_string(&t).ok());
// Use limit or else 10, with maximum 100
let limit = body
@ -73,7 +73,7 @@ pub async fn get_relating_events_with_rel_type_route(
let to = body
.to
.as_ref()
.and_then(|t| PduCount::try_from_string(t).ok());
.and_then(|t| PduCount::try_from_string(&t).ok());
// Use limit or else 10, with maximum 100
let limit = body
@ -121,7 +121,7 @@ pub async fn get_relating_events_route(
let to = body
.to
.as_ref()
.and_then(|t| PduCount::try_from_string(t).ok());
.and_then(|t| PduCount::try_from_string(&t).ok());
// Use limit or else 10, with maximum 100
let limit = body

View file

@ -142,9 +142,8 @@ pub async fn create_room_route(
content
}
None => {
// TODO: Add correct value for v11
let mut content = serde_json::from_str::<CanonicalJsonObject>(
to_raw_value(&RoomCreateEventContent::new_v1(sender_user.clone()))
to_raw_value(&RoomCreateEventContent::new(sender_user.clone()))
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid creation content"))?
.get(),
)
@ -366,7 +365,7 @@ pub async fn create_room_route(
services().rooms.timeline.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomName,
content: to_raw_value(&RoomNameEventContent::new(name.clone()))
content: to_raw_value(&RoomNameEventContent::new(Some(name.clone())))
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),

View file

@ -42,31 +42,24 @@ pub async fn get_login_types_route(
/// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see
/// supported login types.
pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Response> {
// To allow deprecated login methods
#![allow(deprecated)]
// Validate login method
// TODO: Other login methods
let user_id = match &body.login_info {
login::v3::LoginInfo::Password(login::v3::Password {
identifier,
password,
user,
address: _,
medium: _,
}) => {
let user_id = if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier {
UserId::parse_with_server_name(
user_id.to_lowercase(),
services().globals.server_name(),
)
} else if let Some(user) = user {
UserId::parse(user)
let username = if let UserIdentifier::UserIdOrLocalpart(user_id) = identifier {
user_id.to_lowercase()
} else {
warn!("Bad login type: {:?}", &body.login_info);
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
}
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?;
};
let user_id =
UserId::parse_with_server_name(username, services().globals.server_name())
.map_err(|_| {
Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.")
})?;
let hash = services()
.users
.password_hash(&user_id)?
@ -112,28 +105,24 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
));
}
}
login::v3::LoginInfo::ApplicationService(login::v3::ApplicationService {
identifier,
user,
}) => {
login::v3::LoginInfo::ApplicationService(login::v3::ApplicationService { identifier }) => {
if !body.from_appservice {
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Forbidden login type.",
));
};
if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier {
UserId::parse_with_server_name(
user_id.to_lowercase(),
services().globals.server_name(),
)
} else if let Some(user) = user {
UserId::parse(user)
let username = if let UserIdentifier::UserIdOrLocalpart(user_id) = identifier {
user_id.to_lowercase()
} else {
warn!("Bad login type: {:?}", &body.login_info);
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
}
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?
};
let user_id =
UserId::parse_with_server_name(username, services().globals.server_name())
.map_err(|_| {
Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.")
})?;
user_id
}
_ => {
warn!("Unsupported or unknown login type: {:?}", &body.login_info);
@ -174,8 +163,6 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
info!("{} logged in", user_id);
// Homeservers are still required to send the `home_server` field
#[allow(deprecated)]
Ok(login::v3::Response {
user_id,
access_token: token,

View file

@ -85,7 +85,7 @@ pub async fn get_state_events_route(
if !services()
.rooms
.state_accessor
.user_can_see_state_events(sender_user, &body.room_id)?
.user_can_see_state_events(&sender_user, &body.room_id)?
{
return Err(Error::BadRequest(
ErrorKind::Forbidden,
@ -118,7 +118,7 @@ pub async fn get_state_events_for_key_route(
if !services()
.rooms
.state_accessor
.user_can_see_state_events(sender_user, &body.room_id)?
.user_can_see_state_events(&sender_user, &body.room_id)?
{
return Err(Error::BadRequest(
ErrorKind::Forbidden,
@ -157,7 +157,7 @@ pub async fn get_state_events_for_empty_key_route(
if !services()
.rooms
.state_accessor
.user_can_see_state_events(sender_user, &body.room_id)?
.user_can_see_state_events(&sender_user, &body.room_id)?
{
return Err(Error::BadRequest(
ErrorKind::Forbidden,

View file

@ -20,7 +20,7 @@ use ruma::{
StateEventType, TimelineEventType,
},
serde::Raw,
uint, DeviceId, JsOption, OwnedDeviceId, OwnedUserId, RoomId, UInt, UserId,
uint, DeviceId, OwnedDeviceId, OwnedUserId, RoomId, UInt, UserId,
};
use std::{
collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet},
@ -554,7 +554,6 @@ async fn sync_helper(
}
}
#[allow(clippy::too_many_arguments)]
async fn load_joined_room(
sender_user: &UserId,
sender_device: &DeviceId,
@ -591,7 +590,7 @@ async fn load_joined_room(
|| services()
.rooms
.user
.last_notification_read(sender_user, room_id)?
.last_notification_read(&sender_user, &room_id)?
> since;
let mut timeline_users = HashSet::new();
@ -600,16 +599,16 @@ async fn load_joined_room(
}
services().rooms.lazy_loading.lazy_load_confirm_delivery(
sender_user,
sender_device,
room_id,
&sender_user,
&sender_device,
&room_id,
sincecount,
)?;
// Database queries:
let current_shortstatehash =
if let Some(s) = services().rooms.state.get_room_shortstatehash(room_id)? {
if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? {
s
} else {
error!("Room {} has no state", room_id);
@ -619,7 +618,7 @@ async fn load_joined_room(
let since_shortstatehash = services()
.rooms
.user
.get_token_shortstatehash(room_id, since)?;
.get_token_shortstatehash(&room_id, since)?;
let (heroes, joined_member_count, invited_member_count, joined_since_last_sync, state_events) =
if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) {
@ -631,12 +630,12 @@ async fn load_joined_room(
let joined_member_count = services()
.rooms
.state_cache
.room_joined_count(room_id)?
.room_joined_count(&room_id)?
.unwrap_or(0);
let invited_member_count = services()
.rooms
.state_cache
.room_invited_count(room_id)?
.room_invited_count(&room_id)?
.unwrap_or(0);
// Recalculate heroes (first 5 members)
@ -649,7 +648,7 @@ async fn load_joined_room(
for hero in services()
.rooms
.timeline
.all_pdus(sender_user, room_id)?
.all_pdus(&sender_user, &room_id)?
.filter_map(|pdu| pdu.ok()) // Ignore all broken pdus
.filter(|(_, pdu)| pdu.kind == TimelineEventType::RoomMember)
.map(|(_, pdu)| {
@ -670,11 +669,11 @@ async fn load_joined_room(
) && (services()
.rooms
.state_cache
.is_joined(&user_id, room_id)?
.is_joined(&user_id, &room_id)?
|| services()
.rooms
.state_cache
.is_invited(&user_id, room_id)?)
.is_invited(&user_id, &room_id)?)
{
Ok::<_, Error>(Some(state_key.clone()))
} else {
@ -790,17 +789,17 @@ async fn load_joined_room(
// Reset lazy loading because this is an initial sync
services().rooms.lazy_loading.lazy_load_reset(
sender_user,
sender_device,
room_id,
&sender_user,
&sender_device,
&room_id,
)?;
// The state_events above should contain all timeline_users, let's mark them as lazy
// loaded.
services().rooms.lazy_loading.lazy_load_mark_sent(
sender_user,
sender_device,
room_id,
&sender_user,
&sender_device,
&room_id,
lazy_loaded,
next_batchcount,
);
@ -867,14 +866,14 @@ async fn load_joined_room(
}
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
sender_user,
sender_device,
room_id,
&sender_user,
&sender_device,
&room_id,
&event.sender,
)? || lazy_load_send_redundant
{
if let Some(member_event) = services().rooms.state_accessor.room_state_get(
room_id,
&room_id,
&StateEventType::RoomMember,
event.sender.as_str(),
)? {
@ -885,9 +884,9 @@ async fn load_joined_room(
}
services().rooms.lazy_loading.lazy_load_mark_sent(
sender_user,
sender_device,
room_id,
&sender_user,
&sender_device,
&room_id,
lazy_loaded,
next_batchcount,
);
@ -935,7 +934,7 @@ async fn load_joined_room(
match new_membership {
MembershipState::Join => {
// A new user joined an encrypted room
if !share_encrypted_room(sender_user, &user_id, room_id)? {
if !share_encrypted_room(&sender_user, &user_id, &room_id)? {
device_list_updates.insert(user_id);
}
}
@ -955,15 +954,15 @@ async fn load_joined_room(
services()
.rooms
.state_cache
.room_members(room_id)
.room_members(&room_id)
.flatten()
.filter(|user_id| {
// Don't send key updates from the sender to the sender
sender_user != user_id
&sender_user != user_id
})
.filter(|user_id| {
// Only send keys if the sender doesn't share an encrypted room with the target already
!share_encrypted_room(sender_user, user_id, room_id)
!share_encrypted_room(&sender_user, user_id, &room_id)
.unwrap_or(false)
}),
);
@ -998,7 +997,7 @@ async fn load_joined_room(
services()
.rooms
.user
.notification_count(sender_user, room_id)?
.notification_count(&sender_user, &room_id)?
.try_into()
.expect("notification count can't go that high"),
)
@ -1011,7 +1010,7 @@ async fn load_joined_room(
services()
.rooms
.user
.highlight_count(sender_user, room_id)?
.highlight_count(&sender_user, &room_id)?
.try_into()
.expect("highlight count can't go that high"),
)
@ -1040,15 +1039,15 @@ async fn load_joined_room(
.rooms
.edus
.read_receipt
.readreceipts_since(room_id, since)
.readreceipts_since(&room_id, since)
.filter_map(|r| r.ok()) // Filter out buggy events
.map(|(_, _, v)| v)
.collect();
if services().rooms.edus.typing.last_typing_update(room_id)? > since {
if services().rooms.edus.typing.last_typing_update(&room_id)? > since {
edus.push(
serde_json::from_str(
&serde_json::to_string(&services().rooms.edus.typing.typings_all(room_id)?)
&serde_json::to_string(&services().rooms.edus.typing.typings_all(&room_id)?)
.expect("event is valid, we just created it"),
)
.expect("event is valid, we just created it"),
@ -1057,7 +1056,7 @@ async fn load_joined_room(
// Save the state after this sync so we can send the correct state diff next sync
services().rooms.user.associate_token_shortstatehash(
room_id,
&room_id,
next_batch,
current_shortstatehash,
)?;
@ -1066,7 +1065,7 @@ async fn load_joined_room(
account_data: RoomAccountData {
events: services()
.account_data
.changes_since(Some(room_id), sender_user, since)?
.changes_since(Some(&room_id), &sender_user, since)?
.into_iter()
.filter_map(|(_, v)| {
serde_json::from_str(v.json().get())
@ -1103,7 +1102,7 @@ async fn load_joined_room(
fn load_timeline(
sender_user: &UserId,
room_id: &RoomId,
roomsincecount: PduCount,
sincecount: PduCount,
limit: u64,
) -> Result<(Vec<(PduCount, PduEvent)>, bool), Error> {
let timeline_pdus;
@ -1111,13 +1110,13 @@ fn load_timeline(
if services()
.rooms
.timeline
.last_timeline_count(sender_user, room_id)?
> roomsincecount
.last_timeline_count(&sender_user, &room_id)?
> sincecount
{
let mut non_timeline_pdus = services()
.rooms
.timeline
.pdus_until(sender_user, room_id, PduCount::max())?
.pdus_until(&sender_user, &room_id, PduCount::max())?
.filter_map(|r| {
// Filter out buggy events
if r.is_err() {
@ -1125,7 +1124,7 @@ fn load_timeline(
}
r.ok()
})
.take_while(|(pducount, _)| pducount > &roomsincecount);
.take_while(|(pducount, _)| pducount > &sincecount);
// Take the last events for the timeline
timeline_pdus = non_timeline_pdus
@ -1179,15 +1178,16 @@ pub async fn sync_events_v4_route(
// Setup watchers, so if there's no response, we can wait for them
let watcher = services().globals.watch(&sender_user, &sender_device);
let next_batch = services().globals.next_count()?;
let next_batch = services().globals.current_count()?;
let globalsince = body
let since = body
.pos
.as_ref()
.and_then(|string| string.parse().ok())
.unwrap_or(0);
let sincecount = PduCount::Normal(since);
if globalsince == 0 {
if since == 0 {
if let Some(conn_id) = &body.conn_id {
services().users.forget_sync_request_connection(
sender_user.clone(),
@ -1214,7 +1214,7 @@ pub async fn sync_events_v4_route(
if body.extensions.to_device.enabled.unwrap_or(false) {
services()
.users
.remove_to_device_events(&sender_user, &sender_device, globalsince)?;
.remove_to_device_events(&sender_user, &sender_device, since)?;
}
let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in
@ -1226,13 +1226,13 @@ pub async fn sync_events_v4_route(
device_list_changes.extend(
services()
.users
.keys_changed(sender_user.as_ref(), globalsince, None)
.keys_changed(sender_user.as_ref(), since, None)
.filter_map(|r| r.ok()),
);
for room_id in &all_joined_rooms {
let current_shortstatehash =
if let Some(s) = services().rooms.state.get_room_shortstatehash(room_id)? {
if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? {
s
} else {
error!("Room {} has no state", room_id);
@ -1242,7 +1242,7 @@ pub async fn sync_events_v4_route(
let since_shortstatehash = services()
.rooms
.user
.get_token_shortstatehash(room_id, globalsince)?;
.get_token_shortstatehash(&room_id, since)?;
let since_sender_member: Option<RoomMemberEventContent> = since_shortstatehash
.and_then(|shortstatehash| {
@ -1331,7 +1331,7 @@ pub async fn sync_events_v4_route(
if !share_encrypted_room(
&sender_user,
&user_id,
room_id,
&room_id,
)? {
device_list_changes.insert(user_id);
}
@ -1352,7 +1352,7 @@ pub async fn sync_events_v4_route(
services()
.rooms
.state_cache
.room_members(room_id)
.room_members(&room_id)
.flatten()
.filter(|user_id| {
// Don't send key updates from the sender to the sender
@ -1360,7 +1360,7 @@ pub async fn sync_events_v4_route(
})
.filter(|user_id| {
// Only send keys if the sender doesn't share an encrypted room with the target already
!share_encrypted_room(&sender_user, user_id, room_id)
!share_encrypted_room(&sender_user, user_id, &room_id)
.unwrap_or(false)
}),
);
@ -1371,7 +1371,7 @@ pub async fn sync_events_v4_route(
device_list_changes.extend(
services()
.users
.keys_changed(room_id.as_ref(), globalsince, None)
.keys_changed(room_id.as_ref(), since, None)
.filter_map(|r| r.ok()),
);
}
@ -1408,7 +1408,7 @@ pub async fn sync_events_v4_route(
continue;
}
let mut new_known_rooms = BTreeSet::new();
let mut new_known_rooms = BTreeMap::new();
lists.insert(
list_id.clone(),
@ -1424,12 +1424,12 @@ pub async fn sync_events_v4_route(
let room_ids = all_joined_rooms
[(u64::from(r.0) as usize)..=(u64::from(r.1) as usize)]
.to_vec();
new_known_rooms.extend(room_ids.iter().cloned());
new_known_rooms.extend(room_ids.iter().cloned().map(|r| (r, true)));
for room_id in &room_ids {
let todo_room = todo_rooms.entry(room_id.clone()).or_insert((
BTreeSet::new(),
0,
u64::MAX,
true,
));
let limit = list
.room_details
@ -1440,18 +1440,14 @@ pub async fn sync_events_v4_route(
.0
.extend(list.room_details.required_state.iter().cloned());
todo_room.1 = todo_room.1.max(limit);
// 0 means unknown because it got out of date
todo_room.2 = todo_room.2.min(
known_rooms
.get(&list_id)
.and_then(|k| k.get(room_id))
.copied()
.unwrap_or(0),
);
if known_rooms.get(&list_id).and_then(|k| k.get(room_id)) != Some(&true)
{
todo_room.2 = false;
}
}
sync_events::v4::SyncOp {
op: SlidingOp::Sync,
range: Some(r),
range: Some(r.clone()),
index: None,
room_ids,
room_id: None,
@ -1469,31 +1465,26 @@ pub async fn sync_events_v4_route(
conn_id.clone(),
list_id,
new_known_rooms,
globalsince,
);
}
}
let mut known_subscription_rooms = BTreeSet::new();
let mut known_subscription_rooms = BTreeMap::new();
for (room_id, room) in &body.room_subscriptions {
if !services().rooms.metadata.exists(room_id)? {
continue;
}
let todo_room = todo_rooms
.entry(room_id.clone())
.or_insert((BTreeSet::new(), 0, u64::MAX));
.or_insert((BTreeSet::new(), 0, true));
let limit = room.timeline_limit.map_or(10, u64::from).min(100);
todo_room.0.extend(room.required_state.iter().cloned());
todo_room.1 = todo_room.1.max(limit);
// 0 means unknown because it got out of date
todo_room.2 = todo_room.2.min(
known_rooms
if known_rooms
.get("subscriptions")
.and_then(|k| k.get(room_id))
.copied()
.unwrap_or(0),
);
known_subscription_rooms.insert(room_id.clone());
!= Some(&true)
{
todo_room.2 = false;
}
known_subscription_rooms.insert(room_id.clone(), true);
}
for r in body.unsubscribe_rooms {
@ -1508,7 +1499,6 @@ pub async fn sync_events_v4_route(
conn_id.clone(),
"subscriptions".to_owned(),
known_subscription_rooms,
globalsince,
);
}
@ -1522,13 +1512,12 @@ pub async fn sync_events_v4_route(
}
let mut rooms = BTreeMap::new();
for (room_id, (required_state_request, timeline_limit, roomsince)) in &todo_rooms {
let roomsincecount = PduCount::Normal(*roomsince);
for (room_id, (required_state_request, timeline_limit, known)) in &todo_rooms {
// TODO: per-room sync tokens
let (timeline_pdus, limited) =
load_timeline(&sender_user, room_id, roomsincecount, *timeline_limit)?;
load_timeline(&sender_user, &room_id, sincecount, *timeline_limit)?;
if roomsince != &0 && timeline_pdus.is_empty() {
if *known && timeline_pdus.is_empty() {
continue;
}
@ -1544,8 +1533,8 @@ pub async fn sync_events_v4_route(
}))
})?
.or_else(|| {
if roomsince != &0 {
Some(roomsince.to_string())
if since != 0 {
Some(since.to_string())
} else {
None
}
@ -1558,31 +1547,30 @@ pub async fn sync_events_v4_route(
let required_state = required_state_request
.iter()
.flat_map(|state| {
.map(|state| {
services()
.rooms
.state_accessor
.room_state_get(room_id, &state.0, &state.1)
.ok()
.flatten()
.map(|state| state.to_sync_state_event())
.room_state_get(&room_id, &state.0, &state.1)
})
.filter_map(|r| r.ok())
.filter_map(|o| o)
.map(|state| state.to_sync_state_event())
.collect();
// Heroes
let heroes = services()
.rooms
.state_cache
.room_members(room_id)
.room_members(&room_id)
.filter_map(|r| r.ok())
.filter(|member| member != &sender_user)
.flat_map(|member| {
.map(|member| {
Ok::<_, Error>(
services()
.rooms
.state_accessor
.get_member(room_id, &member)
.ok()
.flatten()
.get_member(&room_id, &member)?
.map(|memberevent| {
(
memberevent
@ -1590,26 +1578,32 @@ pub async fn sync_events_v4_route(
.unwrap_or_else(|| member.to_string()),
memberevent.avatar_url,
)
}),
)
})
})
.filter_map(|r| r.ok())
.filter_map(|o| o)
.take(5)
.collect::<Vec<_>>();
let name = match &heroes[..] {
[] => None,
[only] => Some(only.0.clone()),
[firsts @ .., last] => Some(
firsts
let name = if heroes.len() > 1 {
let last = heroes[0].0.clone();
Some(
heroes[1..]
.iter()
.map(|h| h.0.clone())
.collect::<Vec<_>>()
.join(", ")
+ " and "
+ &last.0,
),
+ &last,
)
} else if heroes.len() == 1 {
Some(heroes[0].0.clone())
} else {
None
};
let avatar = if let [only] = &heroes[..] {
only.1.clone()
let avatar = if heroes.len() == 1 {
heroes[0].1.clone()
} else {
None
};
@ -1617,17 +1611,17 @@ pub async fn sync_events_v4_route(
rooms.insert(
room_id.clone(),
sync_events::v4::SlidingSyncRoom {
name: services().rooms.state_accessor.get_name(room_id)?.or(name),
avatar: if let Some(avatar) = avatar {
JsOption::Some(avatar)
} else {
match services().rooms.state_accessor.get_avatar(room_id)? {
JsOption::Some(avatar) => JsOption::from_option(avatar.url),
JsOption::Null => JsOption::Null,
JsOption::Undefined => JsOption::Undefined,
}
},
initial: Some(roomsince == &0),
name: services()
.rooms
.state_accessor
.get_name(&room_id)?
.or_else(|| name),
avatar: services()
.rooms
.state_accessor
.get_avatar(&room_id)?
.map_or(avatar, |a| a.url),
initial: Some(!known),
is_dm: None,
invite_state: None,
unread_notifications: UnreadNotificationsCount {
@ -1635,7 +1629,7 @@ pub async fn sync_events_v4_route(
services()
.rooms
.user
.highlight_count(&sender_user, room_id)?
.highlight_count(&sender_user, &room_id)?
.try_into()
.expect("notification count can't go that high"),
),
@ -1643,7 +1637,7 @@ pub async fn sync_events_v4_route(
services()
.rooms
.user
.notification_count(&sender_user, room_id)?
.notification_count(&sender_user, &room_id)?
.try_into()
.expect("notification count can't go that high"),
),
@ -1656,7 +1650,7 @@ pub async fn sync_events_v4_route(
(services()
.rooms
.state_cache
.room_joined_count(room_id)?
.room_joined_count(&room_id)?
.unwrap_or(0) as u32)
.into(),
),
@ -1664,12 +1658,11 @@ pub async fn sync_events_v4_route(
(services()
.rooms
.state_cache
.room_invited_count(room_id)?
.room_invited_count(&room_id)?
.unwrap_or(0) as u32)
.into(),
),
num_live: None, // Count events in timeline greater than global sync counter
timestamp: None,
},
);
}
@ -1688,7 +1681,7 @@ pub async fn sync_events_v4_route(
}
Ok(sync_events::v4::Response {
initial: globalsince == 0,
initial: since == 0,
txn_id: body.txn_id.clone(),
pos: next_batch.to_string(),
lists,
@ -1719,7 +1712,7 @@ pub async fn sync_events_v4_route(
global: if body.extensions.account_data.enabled.unwrap_or(false) {
services()
.account_data
.changes_since(None, &sender_user, globalsince)?
.changes_since(None, &sender_user, since)?
.into_iter()
.filter_map(|(_, v)| {
serde_json::from_str(v.json().get())

View file

@ -26,7 +26,6 @@ pub async fn get_supported_versions_route(
"v1.2".to_owned(),
"v1.3".to_owned(),
"v1.4".to_owned(),
"v1.5".to_owned(),
],
unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]),
};

View file

@ -341,7 +341,7 @@ fn add_port_to_hostname(destination_str: &str) -> FedDest {
}
/// Returns: actual_destination, host header
/// Implemented according to the specification at <https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names>
/// Implemented according to the specification at https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names
/// Numbers in comments below refer to bullet points in linked section of specification
async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDest) {
debug!("Finding actual destination for {destination}");
@ -666,7 +666,7 @@ pub fn parse_incoming_pdu(
let room_version_id = services().rooms.state.get_room_version(&room_id)?;
let (event_id, value) = match gen_event_id_canonical_json(pdu, &room_version_id) {
let (event_id, value) = match gen_event_id_canonical_json(&pdu, &room_version_id) {
Ok(t) => t,
Err(_) => {
// Event could not be converted to canonical json
@ -707,24 +707,7 @@ pub async fn send_transaction_message_route(
// let mut auth_cache = EventMap::new();
for pdu in &body.pdus {
let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {
warn!("Error parsing incoming event {:?}: {:?}", pdu, e);
Error::BadServerResponse("Invalid PDU in server response")
})?;
let room_id: OwnedRoomId = value
.get("room_id")
.and_then(|id| RoomId::parse(id.as_str()?).ok())
.ok_or(Error::BadRequest(
ErrorKind::InvalidParam,
"Invalid room id in pdu",
))?;
if services().rooms.state.get_room_version(&room_id).is_err() {
debug!("Server is not in room {room_id}");
continue;
}
let r = parse_incoming_pdu(pdu);
let r = parse_incoming_pdu(&pdu);
let (event_id, value, room_id) = match r {
Ok(t) => t,
Err(e) => {
@ -735,6 +718,11 @@ pub async fn send_transaction_message_route(
};
// We do not add the event_id field to the pdu here because of signature and hashes checks
services()
.rooms
.event_handler
.acl_check(sender_servername, &room_id)?;
let mutex = Arc::clone(
services()
.globals
@ -992,7 +980,7 @@ pub async fn get_event_route(
if !services().rooms.state_accessor.server_can_see_event(
sender_servername,
room_id,
&room_id,
&body.event_id,
)? {
return Err(Error::BadRequest(
@ -1058,7 +1046,7 @@ pub async fn get_backfill_route(
let all_events = services()
.rooms
.timeline
.pdus_until(user_id!("@doesntmatter:conduit.rs"), &body.room_id, until)?
.pdus_until(&user_id!("@doesntmatter:conduit.rs"), &body.room_id, until)?
.take(limit.try_into().unwrap());
let events = all_events
@ -1075,7 +1063,7 @@ pub async fn get_backfill_route(
})
.map(|(_, pdu)| services().rooms.timeline.get_pdu_json(&pdu.event_id))
.filter_map(|r| r.ok().flatten())
.map(PduEvent::convert_to_outgoing_federation_event)
.map(|pdu| PduEvent::convert_to_outgoing_federation_event(pdu))
.collect();
Ok(get_backfill::v1::Response {
@ -1799,13 +1787,6 @@ pub async fn get_devices_route(
return Err(Error::bad_config("Federation is disabled."));
}
if body.user_id.server_name() != services().globals.server_name() {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Tried to access user from other server.",
));
}
let sender_servername = body
.sender_servername
.as_ref()
@ -1880,13 +1861,6 @@ pub async fn get_profile_information_route(
return Err(Error::bad_config("Federation is disabled."));
}
if body.user_id.server_name() != services().globals.server_name() {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Tried to access user from other server.",
));
}
let mut displayname = None;
let mut avatar_url = None;
let mut blurhash = None;
@ -1923,17 +1897,6 @@ pub async fn get_keys_route(body: Ruma<get_keys::v1::Request>) -> Result<get_key
return Err(Error::bad_config("Federation is disabled."));
}
if body
.device_keys
.iter()
.any(|(u, _)| u.server_name() != services().globals.server_name())
{
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Tried to access user from other server.",
));
}
let result = get_keys_helper(None, &body.device_keys, |u| {
Some(u.server_name()) == body.sender_servername.as_deref()
})
@ -1956,17 +1919,6 @@ pub async fn claim_keys_route(
return Err(Error::bad_config("Federation is disabled."));
}
if body
.one_time_keys
.iter()
.any(|(u, _)| u.server_name() != services().globals.server_name())
{
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Tried to access user from other server.",
));
}
let result = claim_keys_helper(&body.one_time_keys).await?;
Ok(claim_keys::v1::Response {

View file

@ -29,9 +29,7 @@ use crate::Result;
/// would be used for `ordinary.onion`, `matrix.myspecial.onion`, but not `hello.myspecial.onion`.
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "snake_case")]
#[derive(Default)]
pub enum ProxyConfig {
#[default]
None,
Global {
#[serde(deserialize_with = "crate::utils::deserialize_from_str")]
@ -50,6 +48,11 @@ impl ProxyConfig {
})
}
}
impl Default for ProxyConfig {
fn default() -> Self {
ProxyConfig::None
}
}
#[derive(Clone, Debug, Deserialize)]
pub struct PartialProxyConfig {

View file

@ -116,7 +116,7 @@ impl KvTree for PersyTree {
match iter {
Ok(iter) => Box::new(iter.filter_map(|(k, v)| {
v.into_iter()
.map(|val| ((*k).to_owned(), (*val).to_owned()))
.map(|val| ((*k).to_owned().into(), (*val).to_owned().into()))
.next()
})),
Err(e) => {
@ -142,7 +142,7 @@ impl KvTree for PersyTree {
Ok(iter) => {
let map = iter.filter_map(|(k, v)| {
v.into_iter()
.map(|val| ((*k).to_owned(), (*val).to_owned()))
.map(|val| ((*k).to_owned().into(), (*val).to_owned().into()))
.next()
});
if backwards {
@ -179,7 +179,7 @@ impl KvTree for PersyTree {
iter.take_while(move |(k, _)| (*k).starts_with(&owned_prefix))
.filter_map(|(k, v)| {
v.into_iter()
.map(|val| ((*k).to_owned(), (*val).to_owned()))
.map(|val| ((*k).to_owned().into(), (*val).to_owned().into()))
.next()
}),
)

View file

@ -33,7 +33,7 @@ impl Iterator for PreparedStatementIterator<'_> {
struct NonAliasingBox<T>(*mut T);
impl<T> Drop for NonAliasingBox<T> {
fn drop(&mut self) {
drop(unsafe { Box::from_raw(self.0) });
unsafe { Box::from_raw(self.0) };
}
}

View file

@ -8,7 +8,6 @@ use tokio::sync::watch;
#[derive(Default)]
pub(super) struct Watchers {
#[allow(clippy::type_complexity)]
watchers: RwLock<HashMap<Vec<u8>, (watch::Sender<()>, watch::Receiver<()>)>>,
}

View file

@ -123,12 +123,13 @@ impl service::account_data::Data for KeyValueDatabase {
.take_while(move |(k, _)| k.starts_with(&prefix))
.map(|(k, v)| {
Ok::<_, Error>((
RoomAccountDataEventType::from(
RoomAccountDataEventType::try_from(
utils::string_from_bytes(k.rsplit(|&b| b == 0xff).next().ok_or_else(
|| Error::bad_database("RoomUserData ID in db is invalid."),
)?)
.map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?,
),
)
.map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?,
serde_json::from_slice::<Raw<AnyEphemeralRoomEvent>>(&v).map_err(|_| {
Error::bad_database("Database contains invalid account data.")
})?,

View file

@ -256,8 +256,8 @@ lasttimelinecount_cache: {lasttimelinecount_cache}\n"
..
} = new_keys;
keys.verify_keys.extend(verify_keys);
keys.old_verify_keys.extend(old_verify_keys);
keys.verify_keys.extend(verify_keys.into_iter());
keys.old_verify_keys.extend(old_verify_keys.into_iter());
self.server_signingkeys.insert(
origin.as_bytes(),

View file

@ -157,9 +157,10 @@ impl service::rooms::short::Data for KeyValueDatabase {
.ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?;
let event_type =
StateEventType::from(utils::string_from_bytes(eventtype_bytes).map_err(|_| {
StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| {
Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.")
})?);
})?)
.map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?;
let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| {
Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.")

View file

@ -20,7 +20,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
let parsed = services()
.rooms
.state_compressor
.parse_compressed_state_event(compressed)?;
.parse_compressed_state_event(&compressed)?;
result.insert(parsed.0, parsed.1);
i += 1;
@ -49,7 +49,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
let (_, eventid) = services()
.rooms
.state_compressor
.parse_compressed_state_event(compressed)?;
.parse_compressed_state_event(&compressed)?;
if let Some(pdu) = services().rooms.timeline.get_pdu(&eventid)? {
result.insert(
(
@ -101,7 +101,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
services()
.rooms
.state_compressor
.parse_compressed_state_event(compressed)
.parse_compressed_state_event(&compressed)
.ok()
.map(|(_, id)| id)
}))

View file

@ -471,7 +471,6 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
}
/// Returns an iterator over all rooms a user was invited to.
#[allow(clippy::type_complexity)]
#[tracing::instrument(skip(self))]
fn rooms_invited<'a>(
&'a self,
@ -550,7 +549,6 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
}
/// Returns an iterator over all rooms a user left.
#[allow(clippy::type_complexity)]
#[tracing::instrument(skip(self))]
fn rooms_left<'a>(
&'a self,

View file

@ -10,7 +10,7 @@ impl service::rooms::threads::Data for KeyValueDatabase {
user_id: &'a UserId,
room_id: &'a RoomId,
until: u64,
_include: &'a IncludeThreads,
include: &'a IncludeThreads,
) -> Result<Box<dyn Iterator<Item = Result<(u64, PduEvent)>> + 'a>> {
let prefix = services()
.rooms
@ -27,7 +27,7 @@ impl service::rooms::threads::Data for KeyValueDatabase {
self.threadid_userids
.iter_from(&current, true)
.take_while(move |(k, _)| k.starts_with(&prefix))
.map(move |(pduid, _users)| {
.map(move |(pduid, users)| {
let count = utils::u64_from_bytes(&pduid[(mem::size_of::<u64>())..])
.map_err(|_| Error::bad_database("Invalid pduid in threadid_userids."))?;
let mut pdu = services()
@ -52,13 +52,13 @@ impl service::rooms::threads::Data for KeyValueDatabase {
.collect::<Vec<_>>()
.join(&[0xff][..]);
self.threadid_userids.insert(root_id, &users)?;
self.threadid_userids.insert(&root_id, &users)?;
Ok(())
}
fn get_participants(&self, root_id: &[u8]) -> Result<Option<Vec<OwnedUserId>>> {
if let Some(users) = self.threadid_userids.get(root_id)? {
if let Some(users) = self.threadid_userids.get(&root_id)? {
Ok(Some(
users
.split(|b| *b == 0xff)

View file

@ -39,10 +39,11 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
/// Returns the `count` of this pdu's id.
fn get_pdu_count(&self, event_id: &EventId) -> Result<Option<PduCount>> {
self.eventid_pduid
Ok(self
.eventid_pduid
.get(event_id.as_bytes())?
.map(|pdu_id| pdu_count(&pdu_id))
.transpose()
.transpose()?)
}
/// Returns the json of a pdu.
@ -79,10 +80,12 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
/// Returns the pdu's id.
fn get_pdu_id(&self, event_id: &EventId) -> Result<Option<Vec<u8>>> {
self.eventid_pduid.get(event_id.as_bytes())
Ok(self.eventid_pduid.get(event_id.as_bytes())?)
}
/// Returns the pdu.
///
/// Checks the `eventid_outlierpdu` Tree if not found in the timeline.
fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result<Option<PduEvent>> {
self.eventid_pduid
.get(event_id.as_bytes())?
@ -229,7 +232,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
room_id: &RoomId,
until: PduCount,
) -> Result<Box<dyn Iterator<Item = Result<(PduCount, PduEvent)>> + 'a>> {
let (prefix, current) = count_to_id(room_id, until, 1, true)?;
let (prefix, current) = count_to_id(&room_id, until, 1, true)?;
let user_id = user_id.to_owned();
@ -256,7 +259,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
room_id: &RoomId,
from: PduCount,
) -> Result<Box<dyn Iterator<Item = Result<(PduCount, PduEvent)>> + 'a>> {
let (prefix, current) = count_to_id(room_id, from, 1, false)?;
let (prefix, current) = count_to_id(&room_id, from, 1, false)?;
let user_id = user_id.to_owned();
@ -331,7 +334,7 @@ fn count_to_id(
.rooms
.short
.get_shortroomid(room_id)?
.ok_or_else(|| Error::bad_database("Looked for bad shortroomid in timeline"))?
.expect("room exists")
.to_be_bytes()
.to_vec();
let mut pdu_id = prefix.clone();

View file

@ -146,9 +146,10 @@ impl service::users::Data for KeyValueDatabase {
self.userid_avatarurl
.get(user_id.as_bytes())?
.map(|bytes| {
utils::string_from_bytes(&bytes)
let s = utils::string_from_bytes(&bytes)
.map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?;
s.try_into()
.map_err(|_| Error::bad_database("Avatar URL in db is invalid."))
.map(Into::into)
})
.transpose()
}

View file

@ -852,9 +852,7 @@ impl KeyValueDatabase {
if rule.is_some() {
let mut rule = rule.unwrap().clone();
rule.rule_id = content_rule_transformation[1].to_owned();
rules_list
.content
.shift_remove(content_rule_transformation[0]);
rules_list.content.remove(content_rule_transformation[0]);
rules_list.content.insert(rule);
}
}
@ -877,7 +875,7 @@ impl KeyValueDatabase {
if let Some(rule) = rule {
let mut rule = rule.clone();
rule.rule_id = transformation[1].to_owned();
rules_list.underride.shift_remove(transformation[0]);
rules_list.underride.remove(transformation[0]);
rules_list.underride.insert(rule);
}
}

View file

@ -1,3 +1,12 @@
#![warn(
rust_2018_idioms,
unused_qualifications,
clippy::cloned_instead_of_copied,
clippy::str_to_string
)]
#![allow(clippy::suspicious_else_formatting)]
#![deny(clippy::dbg_macro)]
pub mod api;
mod config;
mod database;

View file

@ -1,3 +1,13 @@
#![warn(
rust_2018_idioms,
unused_qualifications,
clippy::cloned_instead_of_copied,
clippy::str_to_string,
clippy::future_not_send
)]
#![allow(clippy::suspicious_else_formatting)]
#![deny(clippy::dbg_macro)]
use std::{future::Future, io, net::SocketAddr, sync::atomic, time::Duration};
use axum::{
@ -228,7 +238,7 @@ async fn spawn_task<B: Send + 'static>(
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)
}
async fn unrecognized_method<B: Send>(
async fn unrecognized_method<B>(
req: axum::http::Request<B>,
next: axum::middleware::Next<B>,
) -> std::result::Result<axum::response::Response, StatusCode> {

View file

@ -50,7 +50,7 @@ enum AdminCommand {
/// Registering a new bridge using the ID of an existing bridge will replace
/// the old one.
///
/// [commandbody]()
/// [commandbody]
/// # ```
/// # yaml content here
/// # ```
@ -96,7 +96,7 @@ enum AdminCommand {
/// Removing a mass amount of users from a room may cause a significant amount of leave events.
/// The time to leave rooms may depend significantly on joined rooms and servers.
///
/// [commandbody]()
/// [commandbody]
/// # ```
/// # User list here
/// # ```
@ -121,7 +121,7 @@ enum AdminCommand {
/// The PDU event is only checked for validity and is not added to the
/// database.
///
/// [commandbody]()
/// [commandbody]
/// # ```
/// # PDU json content here
/// # ```
@ -165,14 +165,14 @@ enum AdminCommand {
EnableRoom { room_id: Box<RoomId> },
/// Verify json signatures
/// [commandbody]()
/// [commandbody]
/// # ```
/// # json here
/// # ```
SignJson,
/// Verify json signatures
/// [commandbody]()
/// [commandbody]
/// # ```
/// # json here
/// # ```
@ -858,15 +858,12 @@ impl Service {
.expect("Regex compilation should not fail");
let text = re.replace_all(&text, "<code>$1</code>: $4");
// Look for a `[commandbody]()` tag. If it exists, use all lines below it that
// Look for a `[commandbody]` tag. If it exists, use all lines below it that
// start with a `#` in the USAGE section.
let mut text_lines: Vec<&str> = text.lines().collect();
let mut command_body = String::new();
if let Some(line_index) = text_lines
.iter()
.position(|line| *line == "[commandbody]()")
{
if let Some(line_index) = text_lines.iter().position(|line| *line == "[commandbody]") {
text_lines.remove(line_index);
while text_lines
@ -935,7 +932,7 @@ impl Service {
services().users.create(&conduit_user, None)?;
let mut content = RoomCreateEventContent::new_v1(conduit_user.clone());
let mut content = RoomCreateEventContent::new(conduit_user.clone());
content.federate = true;
content.predecessor = None;
content.room_version = services().globals.default_room_version();
@ -1051,7 +1048,7 @@ impl Service {
services().rooms.timeline.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomName,
content: to_raw_value(&RoomNameEventContent::new(room_name))
content: to_raw_value(&RoomNameEventContent::new(Some(room_name)))
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),

View file

@ -8,12 +8,6 @@ use ruma::{
use crate::api::server_server::FedDest;
use crate::{services, Config, Error, Result};
use futures_util::FutureExt;
use hyper::{
client::connect::dns::{GaiResolver, Name},
service::Service as HyperService,
};
use reqwest::dns::{Addrs, Resolve, Resolving};
use ruma::{
api::{
client::sync::sync_events,
@ -23,10 +17,8 @@ use ruma::{
};
use std::{
collections::{BTreeMap, HashMap},
error::Error as StdError,
fs,
future::{self, Future},
iter,
future::Future,
net::{IpAddr, SocketAddr},
path::PathBuf,
sync::{
@ -64,7 +56,6 @@ pub struct Service {
pub unstable_room_versions: Vec<RoomVersionId>,
pub bad_event_ratelimiter: Arc<RwLock<HashMap<OwnedEventId, RateLimitState>>>,
pub bad_signature_ratelimiter: Arc<RwLock<HashMap<Vec<String>, RateLimitState>>>,
pub bad_query_ratelimiter: Arc<RwLock<HashMap<OwnedServerName, RateLimitState>>>,
pub servername_ratelimiter: Arc<RwLock<HashMap<OwnedServerName, Arc<Semaphore>>>>,
pub sync_receivers: RwLock<HashMap<(OwnedUserId, OwnedDeviceId), SyncHandle>>,
pub roomid_mutex_insert: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>,
@ -107,45 +98,6 @@ impl Default for RotationHandler {
}
}
pub struct Resolver {
inner: GaiResolver,
overrides: Arc<RwLock<TlsNameMap>>,
}
impl Resolver {
pub fn new(overrides: Arc<RwLock<TlsNameMap>>) -> Self {
Resolver {
inner: GaiResolver::new(),
overrides,
}
}
}
impl Resolve for Resolver {
fn resolve(&self, name: Name) -> Resolving {
self.overrides
.read()
.expect("lock should not be poisoned")
.get(name.as_str())
.and_then(|(override_name, port)| {
override_name.first().map(|first_name| {
let x: Box<dyn Iterator<Item = SocketAddr> + Send> =
Box::new(iter::once(SocketAddr::new(*first_name, *port)));
let x: Resolving = Box::pin(future::ready(Ok(x)));
x
})
})
.unwrap_or_else(|| {
let this = &mut self.inner.clone();
Box::pin(HyperService::<Name>::call(this, name).map(|result| {
result
.map(|addrs| -> Addrs { Box::new(addrs) })
.map_err(|err| -> Box<dyn StdError + Send + Sync> { Box::new(err) })
}))
})
}
}
impl Service {
pub fn load(db: &'static dyn Data, config: Config) -> Result<Self> {
let keypair = db.load_keypair();
@ -167,8 +119,14 @@ impl Service {
.map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes()));
let default_client = reqwest_client_builder(&config)?.build()?;
let name_override = Arc::clone(&tls_name_override);
let federation_client = reqwest_client_builder(&config)?
.dns_resolver(Arc::new(Resolver::new(tls_name_override.clone())))
.resolve_fn(move |domain| {
let read_guard = name_override.read().unwrap();
let (override_name, port) = read_guard.get(&domain)?;
let first_name = override_name.get(0)?;
Some(SocketAddr::new(*first_name, *port))
})
.build()?;
// Supported and stable room versions
@ -202,7 +160,6 @@ impl Service {
unstable_room_versions,
bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
bad_signature_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
bad_query_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
servername_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
roomid_mutex_state: RwLock::new(HashMap::new()),
roomid_mutex_insert: RwLock::new(HashMap::new()),

View file

@ -385,7 +385,7 @@ impl PartialEq for PduEvent {
}
impl PartialOrd for PduEvent {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
self.event_id.partial_cmp(&other.event_id)
}
}
impl Ord for PduEvent {

View file

@ -1,6 +1,6 @@
mod data;
pub use data::Data;
use ruma::{events::AnySyncTimelineEvent, push::PushConditionPowerLevelsCtx};
use ruma::events::AnySyncTimelineEvent;
use crate::{services, Error, PduEvent, Result};
use bytes::BytesMut;
@ -193,12 +193,6 @@ impl Service {
pdu: &Raw<AnySyncTimelineEvent>,
room_id: &RoomId,
) -> Result<&'a [Action]> {
let power_levels = PushConditionPowerLevelsCtx {
users: power_levels.users.clone(),
users_default: power_levels.users_default,
notifications: power_levels.notifications.clone(),
};
let ctx = PushConditionRoomCtx {
room_id: room_id.to_owned(),
member_count: 10_u32.into(), // TODO: get member count efficiently
@ -207,7 +201,9 @@ impl Service {
.users
.displayname(user)?
.unwrap_or_else(|| user.localpart().to_owned()),
power_levels: Some(power_levels),
users_power_levels: power_levels.users.clone(),
default_power_level: power_levels.users_default,
notification_power_levels: power_levels.notifications.clone(),
};
Ok(ruleset.get_actions(pdu, &ctx))

View file

@ -11,7 +11,6 @@ pub trait Data: Send + Sync {
) -> Result<()>;
/// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`.
#[allow(clippy::type_complexity)]
fn readreceipts_since<'a>(
&'a self,
room_id: &RoomId,

View file

@ -92,8 +92,6 @@ impl Service {
));
}
services().rooms.event_handler.acl_check(origin, room_id)?;
// 1. Skip the PDU if we already have it as a timeline event
if let Some(pdu_id) = services().rooms.timeline.get_pdu_id(event_id)? {
return Ok(Some(pdu_id.to_vec()));
@ -119,15 +117,7 @@ impl Service {
.ok_or_else(|| Error::bad_database("Failed to find first pdu in db."))?;
let (incoming_pdu, val) = self
.handle_outlier_pdu(
origin,
&create_event,
event_id,
room_id,
value,
false,
pub_key_map,
)
.handle_outlier_pdu(origin, &create_event, event_id, room_id, value, pub_key_map)
.await?;
self.check_room_id(room_id, &incoming_pdu)?;
@ -184,22 +174,7 @@ impl Service {
}
if errors >= 5 {
// Timeout other events
match services()
.globals
.bad_event_ratelimiter
.write()
.unwrap()
.entry((*prev_id).to_owned())
{
hash_map::Entry::Vacant(e) => {
e.insert((Instant::now(), 1));
}
hash_map::Entry::Occupied(mut e) => {
*e.get_mut() = (Instant::now(), e.get().1 + 1)
}
}
continue;
break;
}
if let Some((pdu, json)) = eventid_info.remove(&*prev_id) {
@ -251,7 +226,7 @@ impl Service {
.write()
.unwrap()
.remove(&room_id.to_owned());
debug!(
warn!(
"Handling prev event {} took {}m{}s",
prev_id,
elapsed.as_secs() / 60,
@ -291,7 +266,6 @@ impl Service {
r
}
#[allow(clippy::type_complexity, clippy::too_many_arguments)]
#[tracing::instrument(skip(self, create_event, value, pub_key_map))]
fn handle_outlier_pdu<'a>(
&'a self,
@ -300,7 +274,6 @@ impl Service {
event_id: &'a EventId,
room_id: &'a RoomId,
mut value: BTreeMap<String, CanonicalJsonValue>,
auth_events_known: bool,
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
) -> AsyncRecursiveType<'a, Result<(Arc<PduEvent>, BTreeMap<String, CanonicalJsonValue>)>> {
Box::pin(async move {
@ -342,7 +315,7 @@ impl Service {
Ok(ruma::signatures::Verified::Signatures) => {
// Redact
warn!("Calculated hash does not match: {}", event_id);
let obj = match ruma::canonical_json::redact(value, room_version_id, None) {
match ruma::canonical_json::redact(value, room_version_id, None) {
Ok(obj) => obj,
Err(_) => {
return Err(Error::BadRequest(
@ -350,17 +323,7 @@ impl Service {
"Redaction failed",
))
}
};
// Skip the PDU if it is redacted and we already have it as an outlier event
if services().rooms.timeline.get_pdu_json(event_id)?.is_some() {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Event was redacted and we already knew about it",
));
}
obj
}
Ok(ruma::signatures::Verified::All) => value,
};
@ -378,7 +341,6 @@ impl Service {
self.check_room_id(room_id, &incoming_pdu)?;
if !auth_events_known {
// 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events
// 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events"
// NOTE: Step 5 is not applied anymore because it failed too often
@ -396,7 +358,6 @@ impl Service {
pub_key_map,
)
.await;
}
// 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events
debug!(
@ -981,21 +942,14 @@ impl Service {
debug!("Resolving state");
let fetch_event = |id: &_| {
let lock = services().globals.stateres_mutex.lock();
let state = match state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| {
let res = services().rooms.timeline.get_pdu(id);
if let Err(e) = &res {
error!("LOOK AT ME Failed to fetch event: {}", e);
}
res.ok().flatten()
};
let lock = services().globals.stateres_mutex.lock();
let state = match state_res::resolve(
room_version_id,
&fork_states,
auth_chain_sets,
fetch_event,
) {
}) {
Ok(new_state) => new_state,
Err(_) => {
return Err(Error::bad_database("State resolution failed, either an event could not be found or deserialization"));
@ -1032,7 +986,6 @@ impl Service {
/// b. Look at outlier pdu tree
/// c. Ask origin server over federation
/// d. TODO: Ask other servers over federation?
#[allow(clippy::type_complexity)]
#[tracing::instrument(skip_all)]
pub(crate) fn fetch_and_handle_outliers<'a>(
&'a self,
@ -1060,6 +1013,26 @@ impl Service {
let mut pdus = vec![];
for id in events {
if let Some((time, tries)) = services()
.globals
.bad_event_ratelimiter
.read()
.unwrap()
.get(&**id)
{
// Exponential backoff
let mut min_elapsed_duration =
Duration::from_secs(5 * 60) * (*tries) * (*tries);
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
}
if time.elapsed() < min_elapsed_duration {
info!("Backing off from {}", id);
continue;
}
}
// a. Look in the main timeline (pduid_pdu tree)
// b. Look at outlier pdu tree
// (get_pdu_json checks both)
@ -1077,26 +1050,6 @@ impl Service {
let mut events_all = HashSet::new();
let mut i = 0;
while let Some(next_id) = todo_auth_events.pop() {
if let Some((time, tries)) = services()
.globals
.bad_event_ratelimiter
.read()
.unwrap()
.get(&*next_id)
{
// Exponential backoff
let mut min_elapsed_duration =
Duration::from_secs(5 * 60) * (*tries) * (*tries);
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
}
if time.elapsed() < min_elapsed_duration {
info!("Backing off from {}", next_id);
continue;
}
}
if events_all.contains(&next_id) {
continue;
}
@ -1107,7 +1060,7 @@ impl Service {
}
if let Ok(Some(_)) = services().rooms.timeline.get_pdu(&next_id) {
trace!("Found {} in db", next_id);
trace!("Found {} in db", id);
continue;
}
@ -1166,26 +1119,6 @@ impl Service {
}
for (next_id, value) in events_in_reverse_order.iter().rev() {
if let Some((time, tries)) = services()
.globals
.bad_event_ratelimiter
.read()
.unwrap()
.get(&**next_id)
{
// Exponential backoff
let mut min_elapsed_duration =
Duration::from_secs(5 * 60) * (*tries) * (*tries);
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
}
if time.elapsed() < min_elapsed_duration {
info!("Backing off from {}", next_id);
continue;
}
}
match self
.handle_outlier_pdu(
origin,
@ -1193,7 +1126,6 @@ impl Service {
next_id,
room_id,
value.clone(),
true,
pub_key_map,
)
.await
@ -1598,11 +1530,6 @@ impl Service {
}
};
if acl_event_content.allow.is_empty() {
// Ignore broken acl events
return Ok(());
}
if acl_event_content.is_allowed(server_name) {
Ok(())
} else {

View file

@ -14,7 +14,6 @@ use super::timeline::PduCount;
pub struct Service {
pub db: &'static dyn Data,
#[allow(clippy::type_complexity)]
pub lazy_load_waiting:
Mutex<HashMap<(OwnedUserId, OwnedDeviceId, OwnedRoomId, PduCount), HashSet<OwnedUserId>>>,
}

View file

@ -5,7 +5,6 @@ use ruma::{EventId, RoomId, UserId};
pub trait Data: Send + Sync {
fn add_relation(&self, from: u64, to: u64) -> Result<()>;
#[allow(clippy::type_complexity)]
fn relations_until<'a>(
&'a self,
user_id: &'a UserId,

View file

@ -40,7 +40,6 @@ impl Service {
}
}
#[allow(clippy::too_many_arguments)]
pub fn paginate_relations_with_filter(
&self,
sender_user: &UserId,
@ -83,7 +82,7 @@ impl Service {
services()
.rooms
.state_accessor
.user_can_see_event(sender_user, room_id, &pdu.event_id)
.user_can_see_event(sender_user, &room_id, &pdu.event_id)
.unwrap_or(false)
})
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
@ -107,7 +106,7 @@ impl Service {
let events_before: Vec<_> = services()
.rooms
.pdu_metadata
.relations_until(sender_user, room_id, target, from)?
.relations_until(sender_user, &room_id, target, from)?
.filter(|r| {
r.as_ref().map_or(true, |(_, pdu)| {
filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t)
@ -130,7 +129,7 @@ impl Service {
services()
.rooms
.state_accessor
.user_can_see_event(sender_user, room_id, &pdu.event_id)
.user_can_see_event(sender_user, &room_id, &pdu.event_id)
.unwrap_or(false)
})
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`

View file

@ -4,7 +4,6 @@ use ruma::RoomId;
pub trait Data: Send + Sync {
fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>;
#[allow(clippy::type_complexity)]
fn search_pdus<'a>(
&'a self,
room_id: &RoomId,

View file

@ -134,7 +134,7 @@ impl Service {
if serde_json::from_str::<SpaceChildEventContent>(pdu.content.get())
.ok()
.map(|c| c.via)
.and_then(|c| c.via)
.map_or(true, |v| v.is_empty())
{
continue;
@ -185,9 +185,7 @@ impl Service {
stack.push(children_ids);
}
} else {
let server = current_room
.server_name()
.expect("Room IDs should always have a server name");
let server = current_room.server_name();
if server == services().globals.server_name() {
continue;
}
@ -195,11 +193,11 @@ impl Service {
// Early return so the client can see some data already
break;
}
debug!("Asking {server} for /hierarchy");
warn!("Asking {server} for /hierarchy");
if let Ok(response) = services()
.sending
.send_federation_request(
server,
&server,
federation::space::get_hierarchy::v1::Request {
room_id: current_room.to_owned(),
suggested_only,
@ -237,7 +235,7 @@ impl Service {
.room
.allowed_room_ids
.into_iter()
.map(AllowRule::room_membership)
.map(|room| AllowRule::room_membership(room))
.collect(),
})
}
@ -247,7 +245,7 @@ impl Service {
.room
.allowed_room_ids
.into_iter()
.map(AllowRule::room_membership)
.map(|room| AllowRule::room_membership(room))
.collect(),
})
}
@ -315,7 +313,7 @@ impl Service {
canonical_alias: services()
.rooms
.state_accessor
.room_state_get(room_id, &StateEventType::RoomCanonicalAlias, "")?
.room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "")?
.map_or(Ok(None), |s| {
serde_json::from_str(s.content.get())
.map(|c: RoomCanonicalAliasEventContent| c.alias)
@ -323,11 +321,11 @@ impl Service {
Error::bad_database("Invalid canonical alias event in database.")
})
})?,
name: services().rooms.state_accessor.get_name(room_id)?,
name: services().rooms.state_accessor.get_name(&room_id)?,
num_joined_members: services()
.rooms
.state_cache
.room_joined_count(room_id)?
.room_joined_count(&room_id)?
.unwrap_or_else(|| {
warn!("Room {} has no member count", room_id);
0
@ -338,7 +336,7 @@ impl Service {
topic: services()
.rooms
.state_accessor
.room_state_get(room_id, &StateEventType::RoomTopic, "")?
.room_state_get(&room_id, &StateEventType::RoomTopic, "")?
.map_or(Ok(None), |s| {
serde_json::from_str(s.content.get())
.map(|c: RoomTopicEventContent| Some(c.topic))
@ -350,7 +348,7 @@ impl Service {
world_readable: services()
.rooms
.state_accessor
.room_state_get(room_id, &StateEventType::RoomHistoryVisibility, "")?
.room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")?
.map_or(Ok(false), |s| {
serde_json::from_str(s.content.get())
.map(|c: RoomHistoryVisibilityEventContent| {
@ -365,7 +363,7 @@ impl Service {
guest_can_join: services()
.rooms
.state_accessor
.room_state_get(room_id, &StateEventType::RoomGuestAccess, "")?
.room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")?
.map_or(Ok(false), |s| {
serde_json::from_str(s.content.get())
.map(|c: RoomGuestAccessEventContent| {
@ -378,7 +376,7 @@ impl Service {
avatar_url: services()
.rooms
.state_accessor
.room_state_get(room_id, &StateEventType::RoomAvatar, "")?
.room_state_get(&room_id, &StateEventType::RoomAvatar, "")?
.map(|s| {
serde_json::from_str(s.content.get())
.map(|c: RoomAvatarEventContent| c.url)
@ -391,7 +389,7 @@ impl Service {
let join_rule = services()
.rooms
.state_accessor
.room_state_get(room_id, &StateEventType::RoomJoinRules, "")?
.room_state_get(&room_id, &StateEventType::RoomJoinRules, "")?
.map(|s| {
serde_json::from_str(s.content.get())
.map(|c: RoomJoinRulesEventContent| c.join_rule)
@ -417,7 +415,7 @@ impl Service {
room_type: services()
.rooms
.state_accessor
.room_state_get(room_id, &StateEventType::RoomCreate, "")?
.room_state_get(&room_id, &StateEventType::RoomCreate, "")?
.map(|s| {
serde_json::from_str::<RoomCreateEventContent>(s.content.get()).map_err(|e| {
error!("Invalid room create event in database: {}", e);
@ -457,7 +455,7 @@ impl Service {
SpaceRoomJoinRule::Invite => services()
.rooms
.state_cache
.is_joined(sender_user, room_id)?,
.is_joined(sender_user, &room_id)?,
_ => false,
};
@ -481,7 +479,8 @@ impl Service {
match join_rule {
JoinRule::Restricted(r) => {
for rule in &r.allow {
if let join_rules::AllowRule::RoomMembership(rm) = rule {
match rule {
join_rules::AllowRule::RoomMembership(rm) => {
if let Ok(true) = services()
.rooms
.state_cache
@ -490,6 +489,8 @@ impl Service {
return Ok(true);
}
}
_ => {}
}
}
Ok(false)

View file

@ -6,7 +6,6 @@ use std::{
pub use data::Data;
use ruma::{
api::client::error::ErrorKind,
events::{
room::{create::RoomCreateEventContent, member::MembershipState},
AnyStrippedStateEvent, StateEventType, TimelineEventType,
@ -41,7 +40,7 @@ impl Service {
services()
.rooms
.state_compressor
.parse_compressed_state_event(new)
.parse_compressed_state_event(&new)
.ok()
.map(|(_, id)| id)
}) {
@ -332,7 +331,7 @@ impl Service {
"",
)?;
let create_event_content: RoomCreateEventContent = create_event
let create_event_content: Option<RoomCreateEventContent> = create_event
.as_ref()
.map(|create_event| {
serde_json::from_str(create_event.content.get()).map_err(|e| {
@ -340,10 +339,14 @@ impl Service {
Error::bad_database("Invalid create event in db.")
})
})
.transpose()?
.ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "No create event found"))?;
Ok(create_event_content.room_version)
.transpose()?;
let room_version = create_event_content
.map(|create_event| create_event.room_version)
.ok_or_else(|| {
warn!("Invalid room version for room {room_id}");
Error::BadDatabase("Invalid room version")
})?;
Ok(room_version)
}
pub fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result<Option<u64>> {
@ -412,7 +415,7 @@ impl Service {
services()
.rooms
.state_compressor
.parse_compressed_state_event(compressed)
.parse_compressed_state_event(&compressed)
.ok()
})
.filter_map(|(shortstatekey, event_id)| {

View file

@ -16,7 +16,7 @@ use ruma::{
},
StateEventType,
},
EventId, JsOption, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId,
EventId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId,
};
use tracing::error;
@ -180,7 +180,7 @@ impl Service {
return Ok(*visibility);
}
let currently_member = services().rooms.state_cache.is_joined(user_id, room_id)?;
let currently_member = services().rooms.state_cache.is_joined(&user_id, &room_id)?;
let history_visibility = self
.state_get(shortstatehash, &StateEventType::RoomHistoryVisibility, "")?
@ -197,11 +197,11 @@ impl Service {
HistoryVisibility::Shared => currently_member,
HistoryVisibility::Invited => {
// Allow if any member on requesting server was AT LEAST invited, else deny
self.user_was_invited(shortstatehash, user_id)
self.user_was_invited(shortstatehash, &user_id)
}
HistoryVisibility::Joined => {
// Allow if any member on requested server was joined, else deny
self.user_was_joined(shortstatehash, user_id)
self.user_was_joined(shortstatehash, &user_id)
}
_ => {
error!("Unknown history visibility {history_visibility}");
@ -221,10 +221,10 @@ impl Service {
/// the room's history_visibility at that event's state.
#[tracing::instrument(skip(self, user_id, room_id))]
pub fn user_can_see_state_events(&self, user_id: &UserId, room_id: &RoomId) -> Result<bool> {
let currently_member = services().rooms.state_cache.is_joined(user_id, room_id)?;
let currently_member = services().rooms.state_cache.is_joined(&user_id, &room_id)?;
let history_visibility = self
.room_state_get(room_id, &StateEventType::RoomHistoryVisibility, "")?
.room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")?
.map_or(Ok(HistoryVisibility::Shared), |s| {
serde_json::from_str(s.content.get())
.map(|c: RoomHistoryVisibilityEventContent| c.history_visibility)
@ -276,26 +276,20 @@ impl Service {
services()
.rooms
.state_accessor
.room_state_get(room_id, &StateEventType::RoomName, "")?
.room_state_get(&room_id, &StateEventType::RoomName, "")?
.map_or(Ok(None), |s| {
serde_json::from_str(s.content.get())
.map(|c: RoomNameEventContent| Some(c.name))
.map_err(|e| {
error!(
"Invalid room name event in database for room {}. {}",
room_id, e
);
Error::bad_database("Invalid room name event in database.")
})
.map(|c: RoomNameEventContent| c.name)
.map_err(|_| Error::bad_database("Invalid room name event in database."))
})
}
pub fn get_avatar(&self, room_id: &RoomId) -> Result<JsOption<RoomAvatarEventContent>> {
pub fn get_avatar(&self, room_id: &RoomId) -> Result<Option<RoomAvatarEventContent>> {
services()
.rooms
.state_accessor
.room_state_get(room_id, &StateEventType::RoomAvatar, "")?
.map_or(Ok(JsOption::Undefined), |s| {
.room_state_get(&room_id, &StateEventType::RoomAvatar, "")?
.map_or(Ok(None), |s| {
serde_json::from_str(s.content.get())
.map_err(|_| Error::bad_database("Invalid room avatar event in database."))
})
@ -309,7 +303,7 @@ impl Service {
services()
.rooms
.state_accessor
.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())?
.room_state_get(&room_id, &StateEventType::RoomMember, user_id.as_str())?
.map_or(Ok(None), |s| {
serde_json::from_str(s.content.get())
.map_err(|_| Error::bad_database("Invalid room member event in database."))

View file

@ -78,7 +78,6 @@ pub trait Data: Send + Sync {
) -> Box<dyn Iterator<Item = Result<OwnedRoomId>> + 'a>;
/// Returns an iterator over all rooms a user was invited to.
#[allow(clippy::type_complexity)]
fn rooms_invited<'a>(
&'a self,
user_id: &UserId,
@ -97,7 +96,6 @@ pub trait Data: Send + Sync {
) -> Result<Option<Vec<Raw<AnyStrippedStateEvent>>>>;
/// Returns an iterator over all rooms a user left.
#[allow(clippy::type_complexity)]
fn rooms_left<'a>(
&'a self,
user_id: &UserId,

View file

@ -16,7 +16,6 @@ use self::data::StateDiff;
pub struct Service {
pub db: &'static dyn Data,
#[allow(clippy::type_complexity)]
pub stateinfo_cache: Mutex<
LruCache<
u64,
@ -34,7 +33,6 @@ pub type CompressedStateEvent = [u8; 2 * size_of::<u64>()];
impl Service {
/// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer.
#[allow(clippy::type_complexity)]
#[tracing::instrument(skip(self))]
pub fn load_shortstatehash_info(
&self,
@ -133,7 +131,6 @@ impl Service {
/// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid
/// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer
/// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer
#[allow(clippy::type_complexity)]
#[tracing::instrument(skip(
self,
statediffnew,
@ -167,7 +164,7 @@ impl Service {
for removed in statediffremoved.iter() {
if !parent_new.remove(removed) {
// It was not added in the parent and we removed it
parent_removed.insert(*removed);
parent_removed.insert(removed.clone());
}
// Else it was added in the parent and we removed it again. We can forget this change
}
@ -175,7 +172,7 @@ impl Service {
for new in statediffnew.iter() {
if !parent_removed.remove(new) {
// It was not touched in the parent and we added it
parent_new.insert(*new);
parent_new.insert(new.clone());
}
// Else it was removed in the parent and we added it again. We can forget this change
}
@ -220,7 +217,7 @@ impl Service {
for removed in statediffremoved.iter() {
if !parent_new.remove(removed) {
// It was not added in the parent and we removed it
parent_removed.insert(*removed);
parent_removed.insert(removed.clone());
}
// Else it was added in the parent and we removed it again. We can forget this change
}
@ -228,7 +225,7 @@ impl Service {
for new in statediffnew.iter() {
if !parent_removed.remove(new) {
// It was not touched in the parent and we added it
parent_new.insert(*new);
parent_new.insert(new.clone());
}
// Else it was removed in the parent and we added it again. We can forget this change
}
@ -256,7 +253,6 @@ impl Service {
}
/// Returns the new shortstatehash, and the state diff from the previous room state
#[allow(clippy::type_complexity)]
pub fn save_state(
&self,
room_id: &RoomId,

View file

@ -2,7 +2,6 @@ use crate::{PduEvent, Result};
use ruma::{api::client::threads::get_threads::v1::IncludeThreads, OwnedUserId, RoomId, UserId};
pub trait Data: Send + Sync {
#[allow(clippy::type_complexity)]
fn threads_until<'a>(
&'a self,
user_id: &'a UserId,

View file

@ -26,7 +26,7 @@ impl Service {
self.db.threads_until(user_id, room_id, until, include)
}
pub fn add_to_thread(&self, root_event_id: &EventId, pdu: &PduEvent) -> Result<()> {
pub fn add_to_thread<'a>(&'a self, root_event_id: &EventId, pdu: &PduEvent) -> Result<()> {
let root_id = &services()
.rooms
.timeline
@ -103,7 +103,7 @@ impl Service {
}
let mut users = Vec::new();
if let Some(userids) = self.db.get_participants(root_id)? {
if let Some(userids) = self.db.get_participants(&root_id)? {
users.extend_from_slice(&userids);
users.push(pdu.sender.clone());
} else {

View file

@ -66,7 +66,6 @@ pub trait Data: Send + Sync {
/// Returns an iterator over all events and their tokens in a room that happened before the
/// event with id `until` in reverse-chronological order.
#[allow(clippy::type_complexity)]
fn pdus_until<'a>(
&'a self,
user_id: &UserId,
@ -76,7 +75,6 @@ pub trait Data: Send + Sync {
/// Returns an iterator over all events in a room that happened after the event with id `from`
/// in chronological order.
#[allow(clippy::type_complexity)]
fn pdus_after<'a>(
&'a self,
user_id: &UserId,

View file

@ -58,8 +58,8 @@ impl PduCount {
}
pub fn try_from_string(token: &str) -> Result<Self> {
if let Some(stripped) = token.strip_prefix('-') {
stripped.parse().map(PduCount::Backfilled)
if token.starts_with('-') {
token[1..].parse().map(PduCount::Backfilled)
} else {
token.parse().map(PduCount::Normal)
}
@ -90,6 +90,18 @@ impl Ord for PduCount {
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn comparisons() {
assert!(PduCount::Normal(1) < PduCount::Normal(2));
assert!(PduCount::Backfilled(2) < PduCount::Backfilled(1));
assert!(PduCount::Normal(1) > PduCount::Backfilled(1));
assert!(PduCount::Backfilled(1) < PduCount::Normal(1));
}
}
pub struct Service {
pub db: &'static dyn Data,
@ -100,7 +112,7 @@ pub struct Service {
impl Service {
#[tracing::instrument(skip(self))]
pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result<Option<Arc<PduEvent>>> {
self.all_pdus(user_id!("@doesntmatter:conduit.rs"), room_id)?
self.all_pdus(&user_id!("@doesntmatter:conduit.rs"), &room_id)?
.next()
.map(|o| o.map(|(_, p)| Arc::new(p)))
.transpose()
@ -308,25 +320,12 @@ impl Service {
let mut notifies = Vec::new();
let mut highlights = Vec::new();
let mut push_target = services()
for user in services()
.rooms
.state_cache
.get_our_real_users(&pdu.room_id)?;
if pdu.kind == TimelineEventType::RoomMember {
if let Some(state_key) = &pdu.state_key {
let target_user_id = UserId::parse(state_key.clone())
.expect("This state_key was previously validated");
if !push_target.contains(&target_user_id) {
let mut target = push_target.as_ref().clone();
target.insert(target_user_id);
push_target = Arc::new(target);
}
}
}
for user in push_target.iter() {
.get_our_real_users(&pdu.room_id)?
.iter()
{
// Don't notify the user of their own events
if user == &pdu.sender {
continue;
@ -459,7 +458,7 @@ impl Service {
let to_conduit = body.starts_with(&format!("{server_user}: "))
|| body.starts_with(&format!("{server_user} "))
|| body == format!("{server_user}:")
|| body == server_user;
|| body == format!("{server_user}");
// This will evaluate to false if the emergency password is set up so that
// the administrator can execute commands as conduit
@ -843,7 +842,7 @@ impl Service {
let target = pdu
.state_key()
.filter(|v| v.starts_with('@'))
.filter(|v| v.starts_with("@"))
.unwrap_or(sender.as_str());
let server_name = services().globals.server_name();
let server_user = format!("@conduit:{}", server_name);
@ -851,7 +850,7 @@ impl Service {
.map_err(|_| Error::bad_database("Invalid content in pdu."))?;
if content.membership == MembershipState::Leave {
if target == server_user {
if target == &server_user {
warn!("Conduit user cannot leave from admins room");
return Err(Error::BadRequest(
ErrorKind::Forbidden,
@ -877,7 +876,7 @@ impl Service {
}
if content.membership == MembershipState::Ban && pdu.state_key().is_some() {
if target == server_user {
if target == &server_user {
warn!("Conduit user cannot be banned in admins room");
return Err(Error::BadRequest(
ErrorKind::Forbidden,
@ -1049,7 +1048,7 @@ impl Service {
#[tracing::instrument(skip(self, room_id))]
pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Result<()> {
let first_pdu = self
.all_pdus(user_id!("@doesntmatter:conduit.rs"), room_id)?
.all_pdus(&user_id!("@doesntmatter:conduit.rs"), &room_id)?
.next()
.expect("Room is not empty")?;
@ -1061,7 +1060,7 @@ impl Service {
let power_levels: RoomPowerLevelsEventContent = services()
.rooms
.state_accessor
.room_state_get(room_id, &StateEventType::RoomPowerLevels, "")?
.room_state_get(&room_id, &StateEventType::RoomPowerLevels, "")?
.map(|ev| {
serde_json::from_str(ev.content.get())
.map_err(|_| Error::bad_database("invalid m.room.power_levels event"))
@ -1092,9 +1091,11 @@ impl Service {
.await;
match response {
Ok(response) => {
let pub_key_map = RwLock::new(BTreeMap::new());
let mut pub_key_map = RwLock::new(BTreeMap::new());
for pdu in response.pdus {
if let Err(e) = self.backfill_pdu(backfill_server, pdu, &pub_key_map).await
if let Err(e) = self
.backfill_pdu(backfill_server, pdu, &mut pub_key_map)
.await
{
warn!("Failed to add backfilled pdu: {e}");
}
@ -1141,7 +1142,7 @@ impl Service {
services()
.rooms
.event_handler
.handle_incoming_pdu(origin, &event_id, &room_id, value, false, pub_key_map)
.handle_incoming_pdu(origin, &event_id, &room_id, value, false, &pub_key_map)
.await?;
let value = self.get_pdu_json(&event_id)?.expect("We just created it");
@ -1174,7 +1175,8 @@ impl Service {
drop(insert_lock);
if pdu.kind == TimelineEventType::RoomMessage {
match pdu.kind {
TimelineEventType::RoomMessage => {
#[derive(Deserialize)]
struct ExtractBody {
body: Option<String>,
@ -1190,22 +1192,11 @@ impl Service {
.index_pdu(shortroomid, &pdu_id, &body)?;
}
}
_ => {}
}
drop(mutex_lock);
info!("Prepended backfill pdu");
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn comparisons() {
assert!(PduCount::Normal(1) < PduCount::Normal(2));
assert!(PduCount::Backfilled(2) < PduCount::Backfilled(1));
assert!(PduCount::Normal(1) > PduCount::Backfilled(1));
assert!(PduCount::Backfilled(1) < PduCount::Normal(1));
}
}

View file

@ -5,7 +5,6 @@ use crate::Result;
use super::{OutgoingKind, SendingEventType};
pub trait Data: Send + Sync {
#[allow(clippy::type_complexity)]
fn active_requests<'a>(
&'a self,
) -> Box<dyn Iterator<Item = Result<(Vec<u8>, OutgoingKind, SendingEventType)>> + 'a>;

View file

@ -131,7 +131,7 @@ impl Service {
for (key, outgoing_kind, event) in self.db.active_requests().filter_map(|r| r.ok()) {
let entry = initial_transactions
.entry(outgoing_kind.clone())
.or_default();
.or_insert_with(Vec::new);
if entry.len() > 30 {
warn!(

View file

@ -1,6 +1,6 @@
mod data;
use std::{
collections::{BTreeMap, BTreeSet},
collections::BTreeMap,
mem,
sync::{Arc, Mutex},
};
@ -28,13 +28,12 @@ use crate::{services, Error, Result};
pub struct SlidingSyncCache {
lists: BTreeMap<String, SyncRequestList>,
subscriptions: BTreeMap<OwnedRoomId, sync_events::v4::RoomSubscription>,
known_rooms: BTreeMap<String, BTreeMap<OwnedRoomId, u64>>, // For every room, the roomsince number
known_rooms: BTreeMap<String, BTreeMap<OwnedRoomId, bool>>,
extensions: ExtensionsConfig,
}
pub struct Service {
pub db: &'static dyn Data,
#[allow(clippy::type_complexity)]
pub connections:
Mutex<BTreeMap<(OwnedUserId, OwnedDeviceId, String), Arc<Mutex<SlidingSyncCache>>>>,
}
@ -62,7 +61,7 @@ impl Service {
user_id: OwnedUserId,
device_id: OwnedDeviceId,
request: &mut sync_events::v4::Request,
) -> BTreeMap<String, BTreeMap<OwnedRoomId, u64>> {
) -> BTreeMap<String, BTreeMap<OwnedRoomId, bool>> {
let Some(conn_id) = request.conn_id.clone() else {
return BTreeMap::new();
};
@ -128,7 +127,6 @@ impl Service {
}
}
(_, Some(cached_filters)) => list.filters = Some(cached_filters),
(Some(list_filters), _) => list.filters = Some(list_filters.clone()),
(_, _) => {}
}
if list.bump_event_types.is_empty() {
@ -138,18 +136,12 @@ impl Service {
cached.lists.insert(list_id.clone(), list.clone());
}
cached.subscriptions.extend(
request
.room_subscriptions
.iter()
.map(|(k, v)| (k.clone(), v.clone())),
);
request.room_subscriptions.extend(
cached
.subscriptions
.iter()
.map(|(k, v)| (k.clone(), v.clone())),
);
.extend(request.room_subscriptions.clone().into_iter());
request
.room_subscriptions
.extend(cached.subscriptions.clone().into_iter());
request.extensions.e2ee.enabled = request
.extensions
@ -218,8 +210,7 @@ impl Service {
device_id: OwnedDeviceId,
conn_id: String,
list_id: String,
new_cached_rooms: BTreeSet<OwnedRoomId>,
globalsince: u64,
new_cached_rooms: BTreeMap<OwnedRoomId, bool>,
) {
let mut cache = self.connections.lock().unwrap();
let cached = Arc::clone(
@ -237,20 +228,7 @@ impl Service {
let cached = &mut cached.lock().unwrap();
drop(cache);
for (roomid, lastsince) in cached
.known_rooms
.entry(list_id.clone())
.or_default()
.iter_mut()
{
if !new_cached_rooms.contains(roomid) {
*lastsince = 0;
}
}
let list = cached.known_rooms.entry(list_id).or_default();
for roomid in new_cached_rooms {
list.insert(roomid, globalsince);
}
cached.known_rooms.insert(list_id, new_cached_rooms);
}
/// Check if account is deactivated

View file

@ -116,11 +116,9 @@ impl Error {
Self::BadRequest(kind, _) => (
kind.clone(),
match kind {
WrongRoomKeysVersion { .. }
| Forbidden
| GuestAccessForbidden
| ThreepidAuthFailed
| ThreepidDenied => StatusCode::FORBIDDEN,
Forbidden | GuestAccessForbidden | ThreepidAuthFailed | ThreepidDenied => {
StatusCode::FORBIDDEN
}
Unauthorized | UnknownToken { .. } | MissingToken => StatusCode::UNAUTHORIZED,
NotFound | Unrecognized => StatusCode::NOT_FOUND,
LimitExceeded { .. } => StatusCode::TOO_MANY_REQUESTS,