Compare commits
No commits in common. "next" and "v0.3.0" have entirely different histories.
219 changed files with 20728 additions and 29916 deletions
|
@ -25,4 +25,4 @@ docker-compose*
|
||||||
rustfmt.toml
|
rustfmt.toml
|
||||||
|
|
||||||
# Documentation
|
# Documentation
|
||||||
#*.md
|
*.md
|
||||||
|
|
5
.envrc
5
.envrc
|
@ -1,5 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
use flake
|
|
||||||
|
|
||||||
PATH_add bin
|
|
12
.gitignore
vendored
12
.gitignore
vendored
|
@ -31,6 +31,7 @@ modules.xml
|
||||||
|
|
||||||
### vscode ###
|
### vscode ###
|
||||||
.vscode/*
|
.vscode/*
|
||||||
|
!.vscode/settings.json
|
||||||
!.vscode/tasks.json
|
!.vscode/tasks.json
|
||||||
!.vscode/launch.json
|
!.vscode/launch.json
|
||||||
!.vscode/extensions.json
|
!.vscode/extensions.json
|
||||||
|
@ -56,18 +57,9 @@ $RECYCLE.BIN/
|
||||||
*.lnk
|
*.lnk
|
||||||
|
|
||||||
# Conduit
|
# Conduit
|
||||||
|
Rocket.toml
|
||||||
conduit.toml
|
conduit.toml
|
||||||
conduit.db
|
conduit.db
|
||||||
|
|
||||||
# Etc.
|
# Etc.
|
||||||
**/*.rs.bk
|
**/*.rs.bk
|
||||||
cached_target
|
|
||||||
|
|
||||||
# Nix artifacts
|
|
||||||
/result*
|
|
||||||
|
|
||||||
# Direnv cache
|
|
||||||
/.direnv
|
|
||||||
|
|
||||||
# Gitlab CI cache
|
|
||||||
/.gitlab-ci.d
|
|
||||||
|
|
433
.gitlab-ci.yml
433
.gitlab-ci.yml
|
@ -1,180 +1,315 @@
|
||||||
stages:
|
stages:
|
||||||
- ci
|
- build
|
||||||
- artifacts
|
- build docker image
|
||||||
- publish
|
- test
|
||||||
|
- upload artifacts
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
# Makes some things print in color
|
GIT_SUBMODULE_STRATEGY: recursive
|
||||||
TERM: ansi
|
FF_USE_FASTZIP: 1
|
||||||
|
CACHE_COMPRESSION_LEVEL: fastest
|
||||||
|
# Docker in Docker
|
||||||
|
DOCKER_HOST: tcp://docker:2375/
|
||||||
|
DOCKER_TLS_CERTDIR: ""
|
||||||
|
DOCKER_DRIVER: overlay2
|
||||||
|
|
||||||
|
# --------------------------------------------------------------------- #
|
||||||
|
# Cargo: Compiling for different architectures #
|
||||||
|
# --------------------------------------------------------------------- #
|
||||||
|
|
||||||
|
.build-cargo-shared-settings:
|
||||||
|
stage: "build"
|
||||||
|
needs: []
|
||||||
|
rules:
|
||||||
|
- if: '$CI_COMMIT_BRANCH == "master"'
|
||||||
|
- if: '$CI_COMMIT_BRANCH == "next"'
|
||||||
|
- if: "$CI_COMMIT_TAG"
|
||||||
|
- if: '($CI_MERGE_REQUEST_APPROVED == "true") || $BUILD_EVERYTHING' # Once MR is approved, test all builds. Or if BUILD_EVERYTHING is set.
|
||||||
|
interruptible: true
|
||||||
|
image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:latest"
|
||||||
|
tags: ["docker"]
|
||||||
|
services: ["docker:dind"]
|
||||||
|
variables:
|
||||||
|
SHARED_PATH: $CI_PROJECT_DIR/shared
|
||||||
|
CARGO_PROFILE_RELEASE_LTO: "true"
|
||||||
|
CARGO_PROFILE_RELEASE_CODEGEN_UNITS: "1"
|
||||||
|
CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow
|
||||||
before_script:
|
before_script:
|
||||||
# Enable nix-command and flakes
|
- 'echo "Building for target $TARGET"'
|
||||||
- if command -v nix > /dev/null; then echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi
|
- "rustup show && rustc --version && cargo --version" # Print version info for debugging
|
||||||
|
# fix cargo and rustup mounts from this container (https://gitlab.com/gitlab-org/gitlab-foss/-/issues/41227)
|
||||||
# Add our own binary cache
|
- "mkdir -p $SHARED_PATH/cargo"
|
||||||
- if command -v nix > /dev/null; then echo "extra-substituters = https://nix.computer.surgery/conduit" >> /etc/nix/nix.conf; fi
|
- "cp -r $CARGO_HOME/bin $SHARED_PATH/cargo"
|
||||||
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = conduit:ZGAf6P6LhNvnoJJ3Me3PRg7tlLSrPxcQ2RiE5LIppjo=" >> /etc/nix/nix.conf; fi
|
- "cp -r $RUSTUP_HOME $SHARED_PATH"
|
||||||
|
- "export CARGO_HOME=$SHARED_PATH/cargo RUSTUP_HOME=$SHARED_PATH/rustup"
|
||||||
# Add crane binary cache
|
# If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results.
|
||||||
- if command -v nix > /dev/null; then echo "extra-substituters = https://crane.cachix.org" >> /etc/nix/nix.conf; fi
|
- if [ -n "${SCCACHE_ENDPOINT}" ]; then export RUSTC_WRAPPER=/sccache; fi
|
||||||
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=" >> /etc/nix/nix.conf; fi
|
|
||||||
|
|
||||||
# Add nix-community binary cache
|
|
||||||
- if command -v nix > /dev/null; then echo "extra-substituters = https://nix-community.cachix.org" >> /etc/nix/nix.conf; fi
|
|
||||||
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=" >> /etc/nix/nix.conf; fi
|
|
||||||
|
|
||||||
# Install direnv and nix-direnv
|
|
||||||
- if command -v nix > /dev/null; then nix-env -iA nixpkgs.direnv nixpkgs.nix-direnv; fi
|
|
||||||
|
|
||||||
# Allow .envrc
|
|
||||||
- if command -v nix > /dev/null; then direnv allow; fi
|
|
||||||
|
|
||||||
# Set CARGO_HOME to a cacheable path
|
|
||||||
- export CARGO_HOME="$(git rev-parse --show-toplevel)/.gitlab-ci.d/cargo"
|
|
||||||
|
|
||||||
ci:
|
|
||||||
stage: ci
|
|
||||||
image: nixos/nix:2.19.2
|
|
||||||
script:
|
script:
|
||||||
- direnv exec . engage
|
# cross-compile conduit for target
|
||||||
|
- 'time cross build --target="$TARGET" --locked --release'
|
||||||
|
- 'mv "target/$TARGET/release/conduit" "conduit-$TARGET"'
|
||||||
|
# print information about linking for debugging
|
||||||
|
- "file conduit-$TARGET" # print file information
|
||||||
|
- 'readelf --dynamic conduit-$TARGET | sed -e "/NEEDED/q1"' # ensure statically linked
|
||||||
cache:
|
cache:
|
||||||
key: nix
|
# https://doc.rust-lang.org/cargo/guide/cargo-home.html#caching-the-cargo-home-in-ci
|
||||||
|
key: "cargo-cache-$TARGET"
|
||||||
paths:
|
paths:
|
||||||
- target
|
- $SHARED_PATH/cargo/registry/index
|
||||||
- .gitlab-ci.d
|
- $SHARED_PATH/cargo/registry/cache
|
||||||
|
- $SHARED_PATH/cargo/git/db
|
||||||
static:x86_64-unknown-linux-musl:
|
|
||||||
stage: artifacts
|
|
||||||
image: nixos/nix:2.19.2
|
|
||||||
script:
|
|
||||||
# Push artifacts and build requirements to binary cache
|
|
||||||
- ./bin/nix-build-and-cache .#static-x86_64-unknown-linux-musl
|
|
||||||
|
|
||||||
# Make the output less difficult to find
|
|
||||||
- cp result/bin/conduit conduit
|
|
||||||
artifacts:
|
artifacts:
|
||||||
paths:
|
expire_in: never
|
||||||
- conduit
|
|
||||||
|
|
||||||
static:aarch64-unknown-linux-musl:
|
build:release:cargo:x86_64-unknown-linux-musl-with-debug:
|
||||||
stage: artifacts
|
extends: .build-cargo-shared-settings
|
||||||
image: nixos/nix:2.19.2
|
variables:
|
||||||
script:
|
CARGO_PROFILE_RELEASE_DEBUG: 2 # Enable debug info for flamegraph profiling
|
||||||
# Push artifacts and build requirements to binary cache
|
TARGET: "x86_64-unknown-linux-musl"
|
||||||
- ./bin/nix-build-and-cache .#static-aarch64-unknown-linux-musl
|
after_script:
|
||||||
|
- "mv ./conduit-x86_64-unknown-linux-musl ./conduit-x86_64-unknown-linux-musl-with-debug"
|
||||||
# Make the output less difficult to find
|
|
||||||
- cp result/bin/conduit conduit
|
|
||||||
artifacts:
|
artifacts:
|
||||||
|
name: "conduit-x86_64-unknown-linux-musl-with-debug"
|
||||||
paths:
|
paths:
|
||||||
- conduit
|
- "conduit-x86_64-unknown-linux-musl-with-debug"
|
||||||
|
expose_as: "Conduit for x86_64-unknown-linux-musl-with-debug"
|
||||||
|
|
||||||
# Note that although we have an `oci-image-x86_64-unknown-linux-musl` output,
|
build:release:cargo:x86_64-unknown-linux-musl:
|
||||||
# we don't build it because it would be largely redundant to this one since it's
|
extends: .build-cargo-shared-settings
|
||||||
# all containerized anyway.
|
variables:
|
||||||
oci-image:x86_64-unknown-linux-gnu:
|
TARGET: "x86_64-unknown-linux-musl"
|
||||||
stage: artifacts
|
|
||||||
image: nixos/nix:2.19.2
|
|
||||||
script:
|
|
||||||
# Push artifacts and build requirements to binary cache
|
|
||||||
#
|
|
||||||
# Since the OCI image package is based on the binary package, this has the
|
|
||||||
# fun side effect of uploading the normal binary too. Conduit users who are
|
|
||||||
# deploying with Nix can leverage this fact by adding our binary cache to
|
|
||||||
# their systems.
|
|
||||||
- ./bin/nix-build-and-cache .#oci-image
|
|
||||||
|
|
||||||
# Make the output less difficult to find
|
|
||||||
- cp result oci-image-amd64.tar.gz
|
|
||||||
artifacts:
|
artifacts:
|
||||||
|
name: "conduit-x86_64-unknown-linux-musl"
|
||||||
paths:
|
paths:
|
||||||
- oci-image-amd64.tar.gz
|
- "conduit-x86_64-unknown-linux-musl"
|
||||||
|
expose_as: "Conduit for x86_64-unknown-linux-musl"
|
||||||
|
|
||||||
oci-image:aarch64-unknown-linux-musl:
|
build:release:cargo:arm-unknown-linux-musleabihf:
|
||||||
stage: artifacts
|
extends: .build-cargo-shared-settings
|
||||||
needs:
|
variables:
|
||||||
# Wait for the static binary job to finish before starting so we don't have
|
TARGET: "arm-unknown-linux-musleabihf"
|
||||||
# to build that twice for no reason
|
|
||||||
- static:aarch64-unknown-linux-musl
|
|
||||||
image: nixos/nix:2.19.2
|
|
||||||
script:
|
|
||||||
# Push artifacts and build requirements to binary cache
|
|
||||||
- ./bin/nix-build-and-cache .#oci-image-aarch64-unknown-linux-musl
|
|
||||||
|
|
||||||
# Make the output less difficult to find
|
|
||||||
- cp result oci-image-arm64v8.tar.gz
|
|
||||||
artifacts:
|
artifacts:
|
||||||
|
name: "conduit-arm-unknown-linux-musleabihf"
|
||||||
paths:
|
paths:
|
||||||
- oci-image-arm64v8.tar.gz
|
- "conduit-arm-unknown-linux-musleabihf"
|
||||||
|
expose_as: "Conduit for arm-unknown-linux-musleabihf"
|
||||||
|
|
||||||
debian:x86_64-unknown-linux-gnu:
|
build:release:cargo:armv7-unknown-linux-musleabihf:
|
||||||
stage: artifacts
|
extends: .build-cargo-shared-settings
|
||||||
# See also `rust-toolchain.toml`
|
variables:
|
||||||
image: rust:1.75.0
|
TARGET: "armv7-unknown-linux-musleabihf"
|
||||||
script:
|
|
||||||
- apt-get update && apt-get install -y --no-install-recommends libclang-dev
|
|
||||||
- cargo install cargo-deb
|
|
||||||
- cargo deb
|
|
||||||
|
|
||||||
# Make the output less difficult to find
|
|
||||||
- mv target/debian/*.deb conduit.deb
|
|
||||||
artifacts:
|
artifacts:
|
||||||
|
name: "conduit-armv7-unknown-linux-musleabihf"
|
||||||
paths:
|
paths:
|
||||||
- conduit.deb
|
- "conduit-armv7-unknown-linux-musleabihf"
|
||||||
|
expose_as: "Conduit for armv7-unknown-linux-musleabihf"
|
||||||
|
|
||||||
|
build:release:cargo:aarch64-unknown-linux-musl:
|
||||||
|
extends: .build-cargo-shared-settings
|
||||||
|
variables:
|
||||||
|
TARGET: "aarch64-unknown-linux-musl"
|
||||||
|
artifacts:
|
||||||
|
name: "conduit-aarch64-unknown-linux-musl"
|
||||||
|
paths:
|
||||||
|
- "conduit-aarch64-unknown-linux-musl"
|
||||||
|
expose_as: "Conduit for aarch64-unknown-linux-musl"
|
||||||
|
|
||||||
|
.cargo-debug-shared-settings:
|
||||||
|
extends: ".build-cargo-shared-settings"
|
||||||
|
rules:
|
||||||
|
- when: "always"
|
||||||
cache:
|
cache:
|
||||||
key: debian
|
key: "build_cache--$TARGET--$CI_COMMIT_BRANCH--debug"
|
||||||
paths:
|
script:
|
||||||
- target
|
# cross-compile conduit for target
|
||||||
- .gitlab-ci.d
|
- 'time time cross build --target="$TARGET" --locked'
|
||||||
|
- 'mv "target/$TARGET/debug/conduit" "conduit-debug-$TARGET"'
|
||||||
|
# print information about linking for debugging
|
||||||
|
- "file conduit-debug-$TARGET" # print file information
|
||||||
|
- 'readelf --dynamic conduit-debug-$TARGET | sed -e "/NEEDED/q1"' # ensure statically linked
|
||||||
|
artifacts:
|
||||||
|
expire_in: 4 weeks
|
||||||
|
|
||||||
.push-oci-image:
|
build:debug:cargo:x86_64-unknown-linux-musl:
|
||||||
stage: publish
|
extends: ".cargo-debug-shared-settings"
|
||||||
image: docker:25.0.0
|
variables:
|
||||||
|
TARGET: "x86_64-unknown-linux-musl"
|
||||||
|
artifacts:
|
||||||
|
name: "conduit-debug-x86_64-unknown-linux-musl"
|
||||||
|
paths:
|
||||||
|
- "conduit-debug-x86_64-unknown-linux-musl"
|
||||||
|
expose_as: "Conduit DEBUG for x86_64-unknown-linux-musl"
|
||||||
|
|
||||||
|
# --------------------------------------------------------------------- #
|
||||||
|
# Create and publish docker image #
|
||||||
|
# --------------------------------------------------------------------- #
|
||||||
|
|
||||||
|
.docker-shared-settings:
|
||||||
|
stage: "build docker image"
|
||||||
|
image: jdrouet/docker-with-buildx:stable
|
||||||
|
tags: ["docker"]
|
||||||
services:
|
services:
|
||||||
- docker:25.0.0-dind
|
- docker:dind
|
||||||
|
needs:
|
||||||
|
- "build:release:cargo:x86_64-unknown-linux-musl"
|
||||||
|
- "build:release:cargo:arm-unknown-linux-musleabihf"
|
||||||
|
- "build:release:cargo:armv7-unknown-linux-musleabihf"
|
||||||
|
- "build:release:cargo:aarch64-unknown-linux-musl"
|
||||||
variables:
|
variables:
|
||||||
IMAGE_SUFFIX_AMD64: amd64
|
PLATFORMS: "linux/arm/v6,linux/arm/v7,linux/arm64,linux/amd64"
|
||||||
IMAGE_SUFFIX_ARM64V8: arm64v8
|
DOCKER_FILE: "docker/ci-binaries-packaging.Dockerfile"
|
||||||
|
cache:
|
||||||
|
paths:
|
||||||
|
- docker_cache
|
||||||
|
key: "$CI_JOB_NAME"
|
||||||
|
before_script:
|
||||||
|
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
|
||||||
|
# Only log in to Dockerhub if the credentials are given:
|
||||||
|
- if [ -n "${DOCKER_HUB}" ]; then docker login -u "$DOCKER_HUB_USER" -p "$DOCKER_HUB_PASSWORD" "$DOCKER_HUB"; fi
|
||||||
script:
|
script:
|
||||||
- docker load -i oci-image-amd64.tar.gz
|
# Prepare buildx to build multiarch stuff:
|
||||||
- IMAGE_ID_AMD64=$(docker images -q conduit:next)
|
- docker context create 'ci-context'
|
||||||
- docker load -i oci-image-arm64v8.tar.gz
|
- docker buildx create --name 'multiarch-builder' --use 'ci-context'
|
||||||
- IMAGE_ID_ARM64V8=$(docker images -q conduit:next)
|
# Copy binaries to their docker arch path
|
||||||
# Tag and push the architecture specific images
|
- mkdir -p linux/ && mv ./conduit-x86_64-unknown-linux-musl linux/amd64
|
||||||
- docker tag $IMAGE_ID_AMD64 $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64
|
- mkdir -p linux/arm/ && mv ./conduit-arm-unknown-linux-musleabihf linux/arm/v6
|
||||||
- docker tag $IMAGE_ID_ARM64V8 $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
- mkdir -p linux/arm/ && mv ./conduit-armv7-unknown-linux-musleabihf linux/arm/v7
|
||||||
- docker push $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64
|
- mv ./conduit-aarch64-unknown-linux-musl linux/arm64
|
||||||
- docker push $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
- 'export CREATED=$(date -u +''%Y-%m-%dT%H:%M:%SZ'') && echo "Docker image creation date: $CREATED"'
|
||||||
# Tag the multi-arch image
|
# Build and push image:
|
||||||
- docker manifest create $IMAGE_NAME:$CI_COMMIT_SHA --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
- >
|
||||||
- docker manifest push $IMAGE_NAME:$CI_COMMIT_SHA
|
docker buildx build
|
||||||
# Tag and push the git ref
|
--pull
|
||||||
- docker manifest create $IMAGE_NAME:$CI_COMMIT_REF_NAME --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
--push
|
||||||
- docker manifest push $IMAGE_NAME:$CI_COMMIT_REF_NAME
|
--cache-from=type=local,src=$CI_PROJECT_DIR/docker_cache
|
||||||
# Tag git tags as 'latest'
|
--cache-to=type=local,dest=$CI_PROJECT_DIR/docker_cache
|
||||||
- |
|
--build-arg CREATED=$CREATED
|
||||||
if [[ -n "$CI_COMMIT_TAG" ]]; then
|
--build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)
|
||||||
docker manifest create $IMAGE_NAME:latest --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
--build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA"
|
||||||
docker manifest push $IMAGE_NAME:latest
|
--platform "$PLATFORMS"
|
||||||
fi
|
--tag "$TAG"
|
||||||
dependencies:
|
--tag "$TAG-alpine"
|
||||||
- oci-image:x86_64-unknown-linux-gnu
|
--tag "$TAG-commit-$CI_COMMIT_SHORT_SHA"
|
||||||
- oci-image:aarch64-unknown-linux-musl
|
--file "$DOCKER_FILE" .
|
||||||
only:
|
|
||||||
- next
|
|
||||||
- master
|
|
||||||
- tags
|
|
||||||
|
|
||||||
oci-image:push-gitlab:
|
docker:next:gitlab:
|
||||||
extends: .push-oci-image
|
extends: .docker-shared-settings
|
||||||
|
rules:
|
||||||
|
- if: '$CI_COMMIT_BRANCH == "next"'
|
||||||
variables:
|
variables:
|
||||||
IMAGE_NAME: $CI_REGISTRY_IMAGE/matrix-conduit
|
TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:next"
|
||||||
before_script:
|
|
||||||
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
|
|
||||||
|
|
||||||
oci-image:push-dockerhub:
|
docker:next:dockerhub:
|
||||||
extends: .push-oci-image
|
extends: .docker-shared-settings
|
||||||
|
rules:
|
||||||
|
- if: '$CI_COMMIT_BRANCH == "next" && $DOCKER_HUB'
|
||||||
variables:
|
variables:
|
||||||
IMAGE_NAME: matrixconduit/matrix-conduit
|
TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next"
|
||||||
|
|
||||||
|
docker:master:gitlab:
|
||||||
|
extends: .docker-shared-settings
|
||||||
|
rules:
|
||||||
|
- if: '$CI_COMMIT_BRANCH == "master"'
|
||||||
|
variables:
|
||||||
|
TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:latest"
|
||||||
|
|
||||||
|
docker:master:dockerhub:
|
||||||
|
extends: .docker-shared-settings
|
||||||
|
rules:
|
||||||
|
- if: '$CI_COMMIT_BRANCH == "master" && $DOCKER_HUB'
|
||||||
|
variables:
|
||||||
|
TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest"
|
||||||
|
|
||||||
|
# --------------------------------------------------------------------- #
|
||||||
|
# Run tests #
|
||||||
|
# --------------------------------------------------------------------- #
|
||||||
|
|
||||||
|
test:cargo:
|
||||||
|
stage: "test"
|
||||||
|
needs: []
|
||||||
|
image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:latest"
|
||||||
|
tags: ["docker"]
|
||||||
|
variables:
|
||||||
|
CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow
|
||||||
|
interruptible: true
|
||||||
before_script:
|
before_script:
|
||||||
- docker login -u $DOCKER_HUB_USER -p $DOCKER_HUB_PASSWORD
|
- rustup component add clippy rustfmt
|
||||||
|
# If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results:
|
||||||
|
- if [ -n "${SCCACHE_ENDPOINT}" ]; then export RUSTC_WRAPPER=/usr/local/cargo/bin/sccache; fi
|
||||||
|
script:
|
||||||
|
- rustc --version && cargo --version # Print version info for debugging
|
||||||
|
- cargo fmt --all -- --check
|
||||||
|
- "cargo test --color always --workspace --verbose --locked --no-fail-fast -- -Z unstable-options --format json | gitlab-report -p test > $CI_PROJECT_DIR/report.xml"
|
||||||
|
- "cargo clippy --color always --verbose --message-format=json | gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json"
|
||||||
|
artifacts:
|
||||||
|
when: always
|
||||||
|
reports:
|
||||||
|
junit: report.xml
|
||||||
|
codequality: gl-code-quality-report.json
|
||||||
|
|
||||||
|
test:sytest:
|
||||||
|
stage: "test"
|
||||||
|
allow_failure: true
|
||||||
|
needs:
|
||||||
|
- "build:debug:cargo:x86_64-unknown-linux-musl"
|
||||||
|
image:
|
||||||
|
name: "valkum/sytest-conduit:latest"
|
||||||
|
entrypoint: [""]
|
||||||
|
tags: ["docker"]
|
||||||
|
variables:
|
||||||
|
PLUGINS: "https://github.com/valkum/sytest_conduit/archive/master.tar.gz"
|
||||||
|
before_script:
|
||||||
|
- "mkdir -p /app"
|
||||||
|
- "cp ./conduit-debug-x86_64-unknown-linux-musl /app/conduit"
|
||||||
|
- "chmod +x /app/conduit"
|
||||||
|
- "rm -rf /src && ln -s $CI_PROJECT_DIR/ /src"
|
||||||
|
- "mkdir -p /work/server-0/database/ && mkdir -p /work/server-1/database/ && mkdir -p /work/server-2/database/"
|
||||||
|
- "cd /"
|
||||||
|
script:
|
||||||
|
- "SYTEST_EXIT_CODE=0"
|
||||||
|
- "/bootstrap.sh conduit || SYTEST_EXIT_CODE=1"
|
||||||
|
- 'perl /sytest/tap-to-junit-xml.pl --puretap --input /logs/results.tap --output $CI_PROJECT_DIR/sytest.xml "Sytest" && cp /logs/results.tap $CI_PROJECT_DIR/results.tap'
|
||||||
|
- "exit $SYTEST_EXIT_CODE"
|
||||||
|
artifacts:
|
||||||
|
when: always
|
||||||
|
paths:
|
||||||
|
- "$CI_PROJECT_DIR/sytest.xml"
|
||||||
|
- "$CI_PROJECT_DIR/results.tap"
|
||||||
|
reports:
|
||||||
|
junit: "$CI_PROJECT_DIR/sytest.xml"
|
||||||
|
|
||||||
|
# --------------------------------------------------------------------- #
|
||||||
|
# Store binaries as package so they have download urls #
|
||||||
|
# --------------------------------------------------------------------- #
|
||||||
|
|
||||||
|
publish:package:
|
||||||
|
stage: "upload artifacts"
|
||||||
|
needs:
|
||||||
|
- "build:release:cargo:x86_64-unknown-linux-musl"
|
||||||
|
- "build:release:cargo:arm-unknown-linux-musleabihf"
|
||||||
|
- "build:release:cargo:armv7-unknown-linux-musleabihf"
|
||||||
|
- "build:release:cargo:aarch64-unknown-linux-musl"
|
||||||
|
# - "build:cargo-deb:x86_64-unknown-linux-gnu"
|
||||||
|
rules:
|
||||||
|
- if: '$CI_COMMIT_BRANCH == "master"'
|
||||||
|
- if: '$CI_COMMIT_BRANCH == "next"'
|
||||||
|
- if: "$CI_COMMIT_TAG"
|
||||||
|
image: curlimages/curl:latest
|
||||||
|
tags: ["docker"]
|
||||||
|
variables:
|
||||||
|
GIT_STRATEGY: "none" # Don't need a clean copy of the code, we just operate on artifacts
|
||||||
|
script:
|
||||||
|
- 'BASE_URL="${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}"'
|
||||||
|
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-musl "${BASE_URL}/conduit-x86_64-unknown-linux-musl"'
|
||||||
|
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-arm-unknown-linux-musleabihf "${BASE_URL}/conduit-arm-unknown-linux-musleabihf"'
|
||||||
|
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-armv7-unknown-linux-musleabihf "${BASE_URL}/conduit-armv7-unknown-linux-musleabihf"'
|
||||||
|
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-aarch64-unknown-linux-musl "${BASE_URL}/conduit-aarch64-unknown-linux-musl"'
|
||||||
|
|
||||||
|
# Avoid duplicate pipelines
|
||||||
|
# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines
|
||||||
|
workflow:
|
||||||
|
rules:
|
||||||
|
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
|
||||||
|
- if: "$CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS"
|
||||||
|
when: never
|
||||||
|
- if: "$CI_COMMIT_BRANCH"
|
||||||
|
|
|
@ -1,5 +0,0 @@
|
||||||
# Nix things
|
|
||||||
.envrc @CobaltCause
|
|
||||||
flake.lock @CobaltCause
|
|
||||||
flake.nix @CobaltCause
|
|
||||||
nix/ @CobaltCause
|
|
|
@ -1,37 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
set -eux
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------- #
|
|
||||||
# #
|
|
||||||
# Configures docker buildx to use a remote server for arm building. #
|
|
||||||
# Expects $SSH_PRIVATE_KEY to be a valid ssh ed25519 private key with #
|
|
||||||
# access to the server $ARM_SERVER_USER@$ARM_SERVER_IP #
|
|
||||||
# #
|
|
||||||
# This is expected to only be used in the official CI/CD pipeline! #
|
|
||||||
# #
|
|
||||||
# Requirements: openssh-client, docker buildx #
|
|
||||||
# Inspired by: https://depot.dev/blog/building-arm-containers #
|
|
||||||
# #
|
|
||||||
# --------------------------------------------------------------------- #
|
|
||||||
|
|
||||||
cat "$BUILD_SERVER_SSH_PRIVATE_KEY" | ssh-add -
|
|
||||||
|
|
||||||
# Test server connections:
|
|
||||||
ssh "$ARM_SERVER_USER@$ARM_SERVER_IP" "uname -a"
|
|
||||||
ssh "$AMD_SERVER_USER@$AMD_SERVER_IP" "uname -a"
|
|
||||||
|
|
||||||
# Connect remote arm64 server for all arm builds:
|
|
||||||
docker buildx create \
|
|
||||||
--name "multi" \
|
|
||||||
--driver "docker-container" \
|
|
||||||
--platform "linux/arm64,linux/arm/v7" \
|
|
||||||
"ssh://$ARM_SERVER_USER@$ARM_SERVER_IP"
|
|
||||||
|
|
||||||
# Connect remote amd64 server for adm64 builds:
|
|
||||||
docker buildx create --append \
|
|
||||||
--name "multi" \
|
|
||||||
--driver "docker-container" \
|
|
||||||
--platform "linux/amd64" \
|
|
||||||
"ssh://$AMD_SERVER_USER@$AMD_SERVER_IP"
|
|
||||||
|
|
||||||
docker buildx use multi
|
|
11
.vscode/extensions.json
vendored
11
.vscode/extensions.json
vendored
|
@ -1,11 +0,0 @@
|
||||||
{
|
|
||||||
"recommendations": [
|
|
||||||
"rust-lang.rust-analyzer",
|
|
||||||
"bungcip.better-toml",
|
|
||||||
"ms-azuretools.vscode-docker",
|
|
||||||
"eamodio.gitlens",
|
|
||||||
"serayuzgur.crates",
|
|
||||||
"vadimcn.vscode-lldb",
|
|
||||||
"timonwong.shellcheck"
|
|
||||||
]
|
|
||||||
}
|
|
35
.vscode/launch.json
vendored
35
.vscode/launch.json
vendored
|
@ -1,35 +0,0 @@
|
||||||
{
|
|
||||||
// Use IntelliSense to learn about possible attributes.
|
|
||||||
// Hover to view descriptions of existing attributes.
|
|
||||||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
|
||||||
"version": "0.2.0",
|
|
||||||
"configurations": [
|
|
||||||
{
|
|
||||||
"type": "lldb",
|
|
||||||
"request": "launch",
|
|
||||||
"name": "Debug conduit",
|
|
||||||
"sourceLanguages": ["rust"],
|
|
||||||
"cargo": {
|
|
||||||
"args": [
|
|
||||||
"build",
|
|
||||||
"--bin=conduit",
|
|
||||||
"--package=conduit"
|
|
||||||
],
|
|
||||||
"filter": {
|
|
||||||
"name": "conduit",
|
|
||||||
"kind": "bin"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"args": [],
|
|
||||||
"env": {
|
|
||||||
"RUST_BACKTRACE": "1",
|
|
||||||
"CONDUIT_CONFIG": "",
|
|
||||||
"CONDUIT_SERVER_NAME": "localhost",
|
|
||||||
"CONDUIT_DATABASE_PATH": "/tmp",
|
|
||||||
"CONDUIT_ADDRESS": "0.0.0.0",
|
|
||||||
"CONDUIT_PORT": "6167"
|
|
||||||
},
|
|
||||||
"cwd": "${workspaceFolder}"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
3
.vscode/settings.json
vendored
Normal file
3
.vscode/settings.json
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
{
|
||||||
|
"rust-analyzer.procMacro.enable": true
|
||||||
|
}
|
|
@ -1,134 +0,0 @@
|
||||||
|
|
||||||
# Contributor Covenant Code of Conduct
|
|
||||||
|
|
||||||
## Our Pledge
|
|
||||||
|
|
||||||
We as members, contributors, and leaders pledge to make participation in our
|
|
||||||
community a harassment-free experience for everyone, regardless of age, body
|
|
||||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
|
||||||
identity and expression, level of experience, education, socio-economic status,
|
|
||||||
nationality, personal appearance, race, caste, color, religion, or sexual
|
|
||||||
identity and orientation.
|
|
||||||
|
|
||||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
|
||||||
diverse, inclusive, and healthy community.
|
|
||||||
|
|
||||||
## Our Standards
|
|
||||||
|
|
||||||
Examples of behavior that contributes to a positive environment for our
|
|
||||||
community include:
|
|
||||||
|
|
||||||
* Demonstrating empathy and kindness toward other people
|
|
||||||
* Being respectful of differing opinions, viewpoints, and experiences
|
|
||||||
* Giving and gracefully accepting constructive feedback
|
|
||||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
|
||||||
and learning from the experience
|
|
||||||
* Focusing on what is best not just for us as individuals, but for the overall
|
|
||||||
community
|
|
||||||
|
|
||||||
Examples of unacceptable behavior include:
|
|
||||||
|
|
||||||
* The use of sexualized language or imagery, and sexual attention or advances of
|
|
||||||
any kind
|
|
||||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
|
||||||
* Public or private harassment
|
|
||||||
* Publishing others' private information, such as a physical or email address,
|
|
||||||
without their explicit permission
|
|
||||||
* Other conduct which could reasonably be considered inappropriate in a
|
|
||||||
professional setting
|
|
||||||
|
|
||||||
## Enforcement Responsibilities
|
|
||||||
|
|
||||||
Community leaders are responsible for clarifying and enforcing our standards of
|
|
||||||
acceptable behavior and will take appropriate and fair corrective action in
|
|
||||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
|
||||||
or harmful.
|
|
||||||
|
|
||||||
Community leaders have the right and responsibility to remove, edit, or reject
|
|
||||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
|
||||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
|
||||||
decisions when appropriate.
|
|
||||||
|
|
||||||
## Scope
|
|
||||||
|
|
||||||
This Code of Conduct applies within all community spaces, and also applies when
|
|
||||||
an individual is officially representing the community in public spaces.
|
|
||||||
Examples of representing our community include using an official e-mail address,
|
|
||||||
posting via an official social media account, or acting as an appointed
|
|
||||||
representative at an online or offline event.
|
|
||||||
|
|
||||||
## Enforcement
|
|
||||||
|
|
||||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
|
||||||
reported to the community leaders responsible for enforcement over email at
|
|
||||||
coc@koesters.xyz or over Matrix at @timo:conduit.rs.
|
|
||||||
All complaints will be reviewed and investigated promptly and fairly.
|
|
||||||
|
|
||||||
All community leaders are obligated to respect the privacy and security of the
|
|
||||||
reporter of any incident.
|
|
||||||
|
|
||||||
## Enforcement Guidelines
|
|
||||||
|
|
||||||
Community leaders will follow these Community Impact Guidelines in determining
|
|
||||||
the consequences for any action they deem in violation of this Code of Conduct:
|
|
||||||
|
|
||||||
### 1. Correction
|
|
||||||
|
|
||||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
|
||||||
unprofessional or unwelcome in the community.
|
|
||||||
|
|
||||||
**Consequence**: A private, written warning from community leaders, providing
|
|
||||||
clarity around the nature of the violation and an explanation of why the
|
|
||||||
behavior was inappropriate. A public apology may be requested.
|
|
||||||
|
|
||||||
### 2. Warning
|
|
||||||
|
|
||||||
**Community Impact**: A violation through a single incident or series of
|
|
||||||
actions.
|
|
||||||
|
|
||||||
**Consequence**: A warning with consequences for continued behavior. No
|
|
||||||
interaction with the people involved, including unsolicited interaction with
|
|
||||||
those enforcing the Code of Conduct, for a specified period of time. This
|
|
||||||
includes avoiding interactions in community spaces as well as external channels
|
|
||||||
like social media. Violating these terms may lead to a temporary or permanent
|
|
||||||
ban.
|
|
||||||
|
|
||||||
### 3. Temporary Ban
|
|
||||||
|
|
||||||
**Community Impact**: A serious violation of community standards, including
|
|
||||||
sustained inappropriate behavior.
|
|
||||||
|
|
||||||
**Consequence**: A temporary ban from any sort of interaction or public
|
|
||||||
communication with the community for a specified period of time. No public or
|
|
||||||
private interaction with the people involved, including unsolicited interaction
|
|
||||||
with those enforcing the Code of Conduct, is allowed during this period.
|
|
||||||
Violating these terms may lead to a permanent ban.
|
|
||||||
|
|
||||||
### 4. Permanent Ban
|
|
||||||
|
|
||||||
**Community Impact**: Demonstrating a pattern of violation of community
|
|
||||||
standards, including sustained inappropriate behavior, harassment of an
|
|
||||||
individual, or aggression toward or disparagement of classes of individuals.
|
|
||||||
|
|
||||||
**Consequence**: A permanent ban from any sort of public interaction within the
|
|
||||||
community.
|
|
||||||
|
|
||||||
## Attribution
|
|
||||||
|
|
||||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
|
||||||
version 2.1, available at
|
|
||||||
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
|
|
||||||
|
|
||||||
Community Impact Guidelines were inspired by
|
|
||||||
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
|
|
||||||
|
|
||||||
For answers to common questions about this code of conduct, see the FAQ at
|
|
||||||
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
|
|
||||||
[https://www.contributor-covenant.org/translations][translations].
|
|
||||||
|
|
||||||
[homepage]: https://www.contributor-covenant.org
|
|
||||||
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
|
|
||||||
[Mozilla CoC]: https://github.com/mozilla/diversity
|
|
||||||
[FAQ]: https://www.contributor-covenant.org/faq
|
|
||||||
[translations]: https://www.contributor-covenant.org/translations
|
|
||||||
|
|
3220
Cargo.lock
generated
3220
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
137
Cargo.toml
137
Cargo.toml
|
@ -1,14 +1,3 @@
|
||||||
# Keep alphabetically sorted
|
|
||||||
[workspace.lints.rust]
|
|
||||||
explicit_outlives_requirements = "warn"
|
|
||||||
unused_qualifications = "warn"
|
|
||||||
|
|
||||||
# Keep alphabetically sorted
|
|
||||||
[workspace.lints.clippy]
|
|
||||||
cloned_instead_of_copied = "warn"
|
|
||||||
dbg_macro = "warn"
|
|
||||||
str_to_string = "warn"
|
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "conduit"
|
name = "conduit"
|
||||||
description = "A Matrix homeserver written in Rust"
|
description = "A Matrix homeserver written in Rust"
|
||||||
|
@ -17,119 +6,99 @@ authors = ["timokoesters <timo@koesters.xyz>"]
|
||||||
homepage = "https://conduit.rs"
|
homepage = "https://conduit.rs"
|
||||||
repository = "https://gitlab.com/famedly/conduit"
|
repository = "https://gitlab.com/famedly/conduit"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
version = "0.7.0-alpha"
|
version = "0.3.0"
|
||||||
|
rust-version = "1.56"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
# See also `rust-toolchain.toml`
|
|
||||||
rust-version = "1.75.0"
|
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[lints]
|
|
||||||
workspace = true
|
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
# Web framework
|
# Used to handle requests
|
||||||
axum = { version = "0.6.18", default-features = false, features = ["form", "headers", "http1", "http2", "json", "matched-path"], optional = true }
|
# TODO: This can become optional as soon as proper configs are supported
|
||||||
axum-server = { version = "0.5.1", features = ["tls-rustls"] }
|
# rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "801e04bd5369eb39e126c75f6d11e1e9597304d8", features = ["tls"] } # Used to handle requests
|
||||||
tower = { version = "0.4.13", features = ["util"] }
|
rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle requests
|
||||||
tower-http = { version = "0.4.1", features = ["add-extension", "cors", "sensitive-headers", "trace", "util"] }
|
|
||||||
|
|
||||||
# Used for matrix spec type definitions and helpers
|
# Used for matrix spec type definitions and helpers
|
||||||
#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
||||||
ruma = { git = "https://github.com/ruma/ruma", rev = "1a1c61ee1e8f0936e956a3b69c931ce12ee28475", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
|
ruma = { git = "https://github.com/ruma/ruma", rev = "f7a10a7e471b59d3096be2695c2a05d407d80df1", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
||||||
#ruma = { git = "https://github.com/timokoesters/ruma", rev = "4ec9c69bb7e09391add2382b3ebac97b6e8f4c64", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
|
#ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
||||||
#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
|
#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
||||||
|
|
||||||
# Async runtime and utilities
|
# Used for long polling and federation sender, should be the same as rocket::tokio
|
||||||
tokio = { version = "1.28.1", features = ["fs", "macros", "signal", "sync"] }
|
tokio = "1.11.0"
|
||||||
# Used for storing data permanently
|
# Used for storing data permanently
|
||||||
#sled = { version = "0.34.7", features = ["compression", "no_metrics"], optional = true }
|
sled = { version = "0.34.6", features = ["compression", "no_metrics"], optional = true }
|
||||||
#sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] }
|
#sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] }
|
||||||
persy = { version = "1.4.4", optional = true, features = ["background_ops"] }
|
persy = { version = "1.2" , optional = true, features=["background_ops"] }
|
||||||
|
|
||||||
# Used for the http request / response body type for Ruma endpoints used with reqwest
|
# Used for the http request / response body type for Ruma endpoints used with reqwest
|
||||||
bytes = "1.4.0"
|
bytes = "1.1.0"
|
||||||
http = "0.2.9"
|
# Used for rocket<->ruma conversions
|
||||||
|
http = "0.2.4"
|
||||||
# Used to find data directory for default db path
|
# Used to find data directory for default db path
|
||||||
directories = "4.0.1"
|
directories = "3.0.2"
|
||||||
# Used for ruma wrapper
|
# Used for ruma wrapper
|
||||||
serde_json = { version = "1.0.96", features = ["raw_value"] }
|
serde_json = { version = "1.0.70", features = ["raw_value"] }
|
||||||
# Used for appservice registration files
|
# Used for appservice registration files
|
||||||
serde_yaml = "0.9.21"
|
serde_yaml = "0.8.20"
|
||||||
# Used for pdu definition
|
# Used for pdu definition
|
||||||
serde = { version = "1.0.163", features = ["rc"] }
|
serde = { version = "1.0.130", features = ["rc"] }
|
||||||
# Used for secure identifiers
|
# Used for secure identifiers
|
||||||
rand = "0.8.5"
|
rand = "0.8.4"
|
||||||
# Used to hash passwords
|
# Used to hash passwords
|
||||||
rust-argon2 = "1.0.0"
|
rust-argon2 = "0.8.3"
|
||||||
# Used to send requests
|
# Used to send requests
|
||||||
hyper = "0.14.26"
|
reqwest = { default-features = false, features = ["rustls-tls", "socks"], git = "https://github.com/timokoesters/reqwest", rev = "57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" }
|
||||||
reqwest = { version = "0.11.18", default-features = false, features = ["rustls-tls-native-roots", "socks"] }
|
|
||||||
# Used for conduit::Error type
|
# Used for conduit::Error type
|
||||||
thiserror = "1.0.40"
|
thiserror = "1.0.28"
|
||||||
# Used to generate thumbnails for images
|
# Used to generate thumbnails for images
|
||||||
image = { version = "0.24.6", default-features = false, features = ["jpeg", "png", "gif"] }
|
image = { version = "0.23.14", default-features = false, features = ["jpeg", "png", "gif"] }
|
||||||
# Used to encode server public key
|
# Used to encode server public key
|
||||||
base64 = "0.21.2"
|
base64 = "0.13.0"
|
||||||
# Used when hashing the state
|
# Used when hashing the state
|
||||||
ring = "0.17.7"
|
ring = "0.16.20"
|
||||||
# Used when querying the SRV record of other servers
|
# Used when querying the SRV record of other servers
|
||||||
trust-dns-resolver = "0.22.0"
|
trust-dns-resolver = "0.20.3"
|
||||||
# Used to find matching events for appservices
|
# Used to find matching events for appservices
|
||||||
regex = "1.8.1"
|
regex = "1.5.4"
|
||||||
# jwt jsonwebtokens
|
# jwt jsonwebtokens
|
||||||
jsonwebtoken = "9.2.0"
|
jsonwebtoken = "7.2.0"
|
||||||
# Performance measurements
|
# Performance measurements
|
||||||
tracing = { version = "0.1.37", features = [] }
|
tracing = { version = "0.1.26", features = ["release_max_level_warn"] }
|
||||||
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
|
tracing-subscriber = "0.2.20"
|
||||||
tracing-flame = "0.2.0"
|
tracing-flame = "0.1.0"
|
||||||
opentelemetry = { version = "0.18.0", features = ["rt-tokio"] }
|
opentelemetry = { version = "0.16.0", features = ["rt-tokio"] }
|
||||||
opentelemetry-jaeger = { version = "0.17.0", features = ["rt-tokio"] }
|
opentelemetry-jaeger = { version = "0.15.0", features = ["rt-tokio"] }
|
||||||
tracing-opentelemetry = "0.18.0"
|
|
||||||
lru-cache = "0.1.2"
|
lru-cache = "0.1.2"
|
||||||
rusqlite = { version = "0.29.0", optional = true, features = ["bundled"] }
|
rusqlite = { version = "0.25.3", optional = true, features = ["bundled"] }
|
||||||
parking_lot = { version = "0.12.1", optional = true }
|
parking_lot = { version = "0.11.2", optional = true }
|
||||||
# crossbeam = { version = "0.8.2", optional = true }
|
crossbeam = { version = "0.8.1", optional = true }
|
||||||
num_cpus = "1.15.0"
|
num_cpus = "1.13.0"
|
||||||
threadpool = "1.8.1"
|
threadpool = "1.8.1"
|
||||||
# heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true }
|
heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true }
|
||||||
# Used for ruma wrapper
|
rocksdb = { version = "0.17.0", default-features = false, features = ["multi-threaded-cf", "zstd"], optional = true }
|
||||||
serde_html_form = "0.2.0"
|
|
||||||
|
|
||||||
rocksdb = { version = "0.21.0", default-features = true, features = ["multi-threaded-cf", "zstd"], optional = true }
|
thread_local = "1.1.3"
|
||||||
|
|
||||||
thread_local = "1.1.7"
|
|
||||||
# used for TURN server authentication
|
# used for TURN server authentication
|
||||||
hmac = "0.12.1"
|
hmac = "0.11.0"
|
||||||
sha-1 = "0.10.1"
|
sha-1 = "0.9.8"
|
||||||
# used for conduit's CLI and admin room command parsing
|
# used for conduit's CLI and admin room command parsing
|
||||||
clap = { version = "4.3.0", default-features = false, features = ["std", "derive", "help", "usage", "error-context"] }
|
clap = { version = "3.0.10", default-features = false, features = ["std", "derive"] }
|
||||||
futures-util = { version = "0.3.28", default-features = false }
|
maplit = "1.0.2"
|
||||||
# Used for reading the configuration from conduit.toml & environment variables
|
|
||||||
figment = { version = "0.10.8", features = ["env", "toml"] }
|
|
||||||
|
|
||||||
tikv-jemalloc-ctl = { version = "0.5.0", features = ["use_std"], optional = true }
|
tikv-jemalloc-ctl = { version = "0.4.2", features = ["use_std"], optional = true }
|
||||||
tikv-jemallocator = { version = "0.5.0", features = ["unprefixed_malloc_on_supported_platforms"], optional = true }
|
tikv-jemallocator = { version = "0.4.1", features = ["unprefixed_malloc_on_supported_platforms"], optional = true }
|
||||||
lazy_static = "1.4.0"
|
|
||||||
async-trait = "0.1.68"
|
|
||||||
|
|
||||||
sd-notify = { version = "0.4.1", optional = true }
|
|
||||||
|
|
||||||
[target.'cfg(unix)'.dependencies]
|
|
||||||
nix = { version = "0.26.2", features = ["resource"] }
|
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "systemd"]
|
default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "jemalloc"]
|
||||||
#backend_sled = ["sled"]
|
backend_sled = ["sled"]
|
||||||
backend_persy = ["persy", "parking_lot"]
|
backend_persy = ["persy", "parking_lot"]
|
||||||
backend_sqlite = ["sqlite"]
|
backend_sqlite = ["sqlite"]
|
||||||
#backend_heed = ["heed", "crossbeam"]
|
backend_heed = ["heed", "crossbeam"]
|
||||||
backend_rocksdb = ["rocksdb"]
|
backend_rocksdb = ["rocksdb"]
|
||||||
jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator"]
|
jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator"]
|
||||||
sqlite = ["rusqlite", "parking_lot", "tokio/signal"]
|
sqlite = ["rusqlite", "parking_lot", "tokio/signal"]
|
||||||
conduit_bin = ["axum"]
|
conduit_bin = [] # TODO: add rocket to this when it is optional
|
||||||
systemd = ["sd-notify"]
|
|
||||||
|
|
||||||
[[bin]]
|
[[bin]]
|
||||||
name = "conduit"
|
name = "conduit"
|
||||||
|
@ -152,7 +121,7 @@ instead of a server that has high scalability."""
|
||||||
section = "net"
|
section = "net"
|
||||||
priority = "optional"
|
priority = "optional"
|
||||||
assets = [
|
assets = [
|
||||||
["debian/README.md", "usr/share/doc/matrix-conduit/README.Debian", "644"],
|
["debian/README.Debian", "usr/share/doc/matrix-conduit/", "644"],
|
||||||
["README.md", "usr/share/doc/matrix-conduit/", "644"],
|
["README.md", "usr/share/doc/matrix-conduit/", "644"],
|
||||||
["target/release/conduit", "usr/sbin/matrix-conduit", "755"],
|
["target/release/conduit", "usr/sbin/matrix-conduit", "755"],
|
||||||
]
|
]
|
||||||
|
|
23
Cross.toml
Normal file
23
Cross.toml
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
[build.env]
|
||||||
|
# CI uses an S3 endpoint to store sccache artifacts, so their config needs to
|
||||||
|
# be available in the cross container as well
|
||||||
|
passthrough = [
|
||||||
|
"RUSTC_WRAPPER",
|
||||||
|
"AWS_ACCESS_KEY_ID",
|
||||||
|
"AWS_SECRET_ACCESS_KEY",
|
||||||
|
"SCCACHE_BUCKET",
|
||||||
|
"SCCACHE_ENDPOINT",
|
||||||
|
"SCCACHE_S3_USE_SSL",
|
||||||
|
]
|
||||||
|
|
||||||
|
[target.aarch64-unknown-linux-musl]
|
||||||
|
image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-aarch64-unknown-linux-musl:latest"
|
||||||
|
|
||||||
|
[target.arm-unknown-linux-musleabihf]
|
||||||
|
image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-arm-unknown-linux-musleabihf:latest"
|
||||||
|
|
||||||
|
[target.armv7-unknown-linux-musleabihf]
|
||||||
|
image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-armv7-unknown-linux-musleabihf:latest"
|
||||||
|
|
||||||
|
[target.x86_64-unknown-linux-musl]
|
||||||
|
image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-x86_64-unknown-linux-musl:latest"
|
203
DEPLOY.md
203
DEPLOY.md
|
@ -2,113 +2,62 @@
|
||||||
|
|
||||||
> ## Getting help
|
> ## Getting help
|
||||||
>
|
>
|
||||||
> If you run into any problems while setting up Conduit, write an email to `conduit@koesters.xyz`, ask us
|
> If you run into any problems while setting up Conduit, write an email to `timo@koesters.xyz`, ask us
|
||||||
> in `#conduit:fachschaften.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new).
|
> in `#conduit:fachschaften.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new).
|
||||||
|
|
||||||
## Installing Conduit
|
## Installing Conduit
|
||||||
|
|
||||||
Although you might be able to compile Conduit for Windows, we do recommend running it on a Linux server. We therefore
|
Although you might be able to compile Conduit for Windows, we do recommend running it on a linux server. We therefore
|
||||||
only offer Linux binaries.
|
only offer Linux binaries.
|
||||||
|
|
||||||
You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the appropriate url:
|
You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the right url:
|
||||||
|
|
||||||
**Stable versions:**
|
| CPU Architecture | Download stable version | Download development version |
|
||||||
|
| ------------------------------------------- | ------------------------------ | ---------------------------- |
|
||||||
|
| x84_64 / amd64 (Most servers and computers) | [Download][x84_64-musl-master] | [Download][x84_64-musl-next] |
|
||||||
|
| armv6 | [Download][armv6-musl-master] | [Download][armv6-musl-next] |
|
||||||
|
| armv7 (e.g. Raspberry Pi by default) | [Download][armv7-musl-master] | [Download][armv7-musl-next] |
|
||||||
|
| armv8 / aarch64 | [Download][armv8-musl-master] | [Download][armv8-musl-next] |
|
||||||
|
|
||||||
| CPU Architecture | Download stable version |
|
[x84_64-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-musl?job=build:release:cargo:x86_64-unknown-linux-musl
|
||||||
| ------------------------------------------- | --------------------------------------------------------------- |
|
[armv6-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-arm-unknown-linux-musleabihf?job=build:release:cargo:arm-unknown-linux-musleabihf
|
||||||
| x84_64 / amd64 (Most servers and computers) | [Binary][x84_64-glibc-master] / [.deb][x84_64-glibc-master-deb] |
|
[armv7-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-armv7-unknown-linux-musleabihf?job=build:release:cargo:armv7-unknown-linux-musleabihf
|
||||||
| armv7 (e.g. Raspberry Pi by default) | [Binary][armv7-glibc-master] / [.deb][armv7-glibc-master-deb] |
|
[armv8-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-aarch64-unknown-linux-musl?job=build:release:cargo:aarch64-unknown-linux-musl
|
||||||
| armv8 / aarch64 | [Binary][armv8-glibc-master] / [.deb][armv8-glibc-master-deb] |
|
[x84_64-musl-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/conduit-x86_64-unknown-linux-musl?job=build:release:cargo:x86_64-unknown-linux-musl
|
||||||
|
[armv6-musl-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/conduit-arm-unknown-linux-musleabihf?job=build:release:cargo:arm-unknown-linux-musleabihf
|
||||||
These builds were created on and linked against the glibc version shipped with Debian bullseye.
|
[armv7-musl-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/conduit-armv7-unknown-linux-musleabihf?job=build:release:cargo:armv7-unknown-linux-musleabihf
|
||||||
If you use a system with an older glibc version (e.g. RHEL8), you might need to compile Conduit yourself.
|
[armv8-musl-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/conduit-aarch64-unknown-linux-musl?job=build:release:cargo:aarch64-unknown-linux-musl
|
||||||
|
|
||||||
[x84_64-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit?job=docker:master
|
|
||||||
[armv7-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit?job=docker:master
|
|
||||||
[armv8-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit?job=docker:master
|
|
||||||
[x84_64-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit.deb?job=docker:master
|
|
||||||
[armv7-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit.deb?job=docker:master
|
|
||||||
[armv8-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit.deb?job=docker:master
|
|
||||||
|
|
||||||
**Latest versions:**
|
|
||||||
|
|
||||||
| Target | Type | Download |
|
|
||||||
|-|-|-|
|
|
||||||
| `x86_64-unknown-linux-gnu` | Dynamically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/conduit.deb?job=debian:x86_64-unknown-linux-gnu) |
|
|
||||||
| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/conduit?job=static:x86_64-unknown-linux-musl) |
|
|
||||||
| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/conduit?job=static:aarch64-unknown-linux-musl) |
|
|
||||||
| `x86_64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-amd64.tar.gz?job=oci-image:x86_64-unknown-linux-musl) |
|
|
||||||
| `aarch64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-arm64v8.tar.gz?job=oci-image:aarch64-unknown-linux-musl) |
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ sudo wget -O /usr/local/bin/matrix-conduit <url>
|
$ sudo wget -O /usr/local/bin/matrix-conduit <url>
|
||||||
$ sudo chmod +x /usr/local/bin/matrix-conduit
|
$ sudo chmod +x /usr/local/bin/matrix-conduit
|
||||||
```
|
```
|
||||||
|
|
||||||
Alternatively, you may compile the binary yourself. First, install any dependencies:
|
Alternatively, you may compile the binary yourself
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Debian
|
|
||||||
$ sudo apt install libclang-dev build-essential
|
$ sudo apt install libclang-dev build-essential
|
||||||
|
|
||||||
# RHEL
|
|
||||||
$ sudo dnf install clang
|
|
||||||
```
|
```
|
||||||
Then, `cd` into the source tree of conduit-next and run:
|
|
||||||
```bash
|
```bash
|
||||||
$ cargo build --release
|
$ cargo build --release
|
||||||
```
|
```
|
||||||
|
|
||||||
If you want to cross compile Conduit to another architecture, read the guide below.
|
Note that this currently requires Rust 1.50.
|
||||||
|
|
||||||
<details>
|
If you want to cross compile Conduit to another architecture, read the [Cross-Compile Guide](cross/README.md).
|
||||||
<summary>Cross compilation</summary>
|
|
||||||
|
|
||||||
As easiest way to compile conduit for another platform [cross-rs](https://github.com/cross-rs/cross) is recommended, so install it first.
|
|
||||||
|
|
||||||
In order to use RockDB as storage backend append `-latomic` to linker flags.
|
|
||||||
|
|
||||||
For example, to build a binary for Raspberry Pi Zero W (ARMv6) you need `arm-unknown-linux-gnueabihf` as compilation
|
|
||||||
target.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
git clone https://gitlab.com/famedly/conduit.git
|
|
||||||
cd conduit
|
|
||||||
export RUSTFLAGS='-C link-arg=-lgcc -Clink-arg=-latomic -Clink-arg=-static-libgcc'
|
|
||||||
cross build --release --no-default-features --features conduit_bin,backend_rocksdb,jemalloc --target=arm-unknown-linux-gnueabihf
|
|
||||||
```
|
|
||||||
</details>
|
|
||||||
|
|
||||||
## Adding a Conduit user
|
## Adding a Conduit user
|
||||||
|
|
||||||
While Conduit can run as any user it is usually better to use dedicated users for different services. This also allows
|
While Conduit can run as any user it is usually better to use dedicated users for different services. This also allows
|
||||||
you to make sure that the file permissions are correctly set up.
|
you to make sure that the file permissions are correctly set up.
|
||||||
|
|
||||||
In Debian or RHEL, you can use this command to create a Conduit user:
|
In Debian you can use this command to create a Conduit user:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo adduser --system conduit --group --disabled-login --no-create-home
|
sudo adduser --system conduit --no-create-home
|
||||||
```
|
```
|
||||||
|
|
||||||
## Forwarding ports in the firewall or the router
|
|
||||||
|
|
||||||
Conduit uses the ports 443 and 8448 both of which need to be open in the firewall.
|
|
||||||
|
|
||||||
If Conduit runs behind a router or in a container and has a different public IP address than the host system these public ports need to be forwarded directly or indirectly to the port mentioned in the config.
|
|
||||||
|
|
||||||
## Optional: Avoid port 8448
|
|
||||||
|
|
||||||
If Conduit runs behind Cloudflare reverse proxy, which doesn't support port 8448 on free plans, [delegation](https://matrix-org.github.io/synapse/latest/delegate.html) can be set up to have federation traffic routed to port 443:
|
|
||||||
```apache
|
|
||||||
# .well-known delegation on Apache
|
|
||||||
<Files "/.well-known/matrix/server">
|
|
||||||
ErrorDocument 200 '{"m.server": "your.server.name:443"}'
|
|
||||||
Header always set Content-Type application/json
|
|
||||||
Header always set Access-Control-Allow-Origin *
|
|
||||||
</Files>
|
|
||||||
```
|
|
||||||
[SRV DNS record](https://spec.matrix.org/latest/server-server-api/#resolving-server-names) delegation is also [possible](https://www.cloudflare.com/en-gb/learning/dns/dns-records/dns-srv-record/).
|
|
||||||
|
|
||||||
## Setting up a systemd service
|
## Setting up a systemd service
|
||||||
|
|
||||||
Now we'll set up a systemd service for Conduit, so it's easy to start/stop Conduit and set it to autostart when your
|
Now we'll set up a systemd service for Conduit, so it's easy to start/stop Conduit and set it to autostart when your
|
||||||
|
@ -123,7 +72,7 @@ After=network.target
|
||||||
[Service]
|
[Service]
|
||||||
Environment="CONDUIT_CONFIG=/etc/matrix-conduit/conduit.toml"
|
Environment="CONDUIT_CONFIG=/etc/matrix-conduit/conduit.toml"
|
||||||
User=conduit
|
User=conduit
|
||||||
Group=conduit
|
Group=nogroup
|
||||||
Restart=always
|
Restart=always
|
||||||
ExecStart=/usr/local/bin/matrix-conduit
|
ExecStart=/usr/local/bin/matrix-conduit
|
||||||
|
|
||||||
|
@ -141,34 +90,27 @@ $ sudo systemctl daemon-reload
|
||||||
|
|
||||||
Now we need to create the Conduit's config file in `/etc/matrix-conduit/conduit.toml`. Paste this in **and take a moment
|
Now we need to create the Conduit's config file in `/etc/matrix-conduit/conduit.toml`. Paste this in **and take a moment
|
||||||
to read it. You need to change at least the server name.**
|
to read it. You need to change at least the server name.**
|
||||||
You can also choose to use a different database backend, but right now only `rocksdb` and `sqlite` are recommended.
|
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
[global]
|
[global]
|
||||||
# The server_name is the pretty name of this server. It is used as a suffix for user
|
# The server_name is the name of this server. It is used as a suffix for user
|
||||||
# and room ids. Examples: matrix.org, conduit.rs
|
# and room ids. Examples: matrix.org, conduit.rs
|
||||||
|
# The Conduit server needs to be reachable at https://your.server.name/ on port
|
||||||
# The Conduit server needs all /_matrix/ requests to be reachable at
|
# 443 (client-server) and 8448 (federation) OR you can create /.well-known
|
||||||
# https://your.server.name/ on port 443 (client-server) and 8448 (federation).
|
# files to redirect requests. See
|
||||||
|
|
||||||
# If that's not possible for you, you can create /.well-known files to redirect
|
|
||||||
# requests. See
|
|
||||||
# https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client
|
# https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client
|
||||||
# and
|
# and https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server
|
||||||
# https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server
|
|
||||||
# for more information
|
# for more information
|
||||||
|
|
||||||
# YOU NEED TO EDIT THIS
|
# YOU NEED TO EDIT THIS
|
||||||
#server_name = "your.server.name"
|
#server_name = "your.server.name"
|
||||||
|
|
||||||
# This is the only directory where Conduit will save its data
|
# This is the only directory where Conduit will save its data
|
||||||
database_path = "/var/lib/matrix-conduit/"
|
database_path = "/var/lib/matrix-conduit/conduit_db"
|
||||||
database_backend = "rocksdb"
|
|
||||||
|
|
||||||
# The port Conduit will be running on. You need to set up a reverse proxy in
|
# The port Conduit will be running on. You need to set up a reverse proxy in
|
||||||
# your web server (e.g. apache or nginx), so all requests to /_matrix on port
|
# your web server (e.g. apache or nginx), so all requests to /_matrix on port
|
||||||
# 443 and 8448 will be forwarded to the Conduit instance running on this port
|
# 443 and 8448 will be forwarded to the Conduit instance running on this port
|
||||||
# Docker users: Don't change this, you'll need to map an external port to this.
|
|
||||||
port = 6167
|
port = 6167
|
||||||
|
|
||||||
# Max size for uploads
|
# Max size for uploads
|
||||||
|
@ -177,51 +119,47 @@ max_request_size = 20_000_000 # in bytes
|
||||||
# Enables registration. If set to false, no users can register on this server.
|
# Enables registration. If set to false, no users can register on this server.
|
||||||
allow_registration = true
|
allow_registration = true
|
||||||
|
|
||||||
|
# Disable encryption, so no new encrypted rooms can be created
|
||||||
|
# Note: existing rooms will continue to work
|
||||||
|
allow_encryption = true
|
||||||
allow_federation = true
|
allow_federation = true
|
||||||
allow_check_for_updates = true
|
|
||||||
|
|
||||||
# Server to get public keys from. You probably shouldn't change this
|
|
||||||
trusted_servers = ["matrix.org"]
|
trusted_servers = ["matrix.org"]
|
||||||
|
|
||||||
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
||||||
#log = "warn,state_res=warn,rocket=off,_=off,sled=off"
|
#workers = 4 # default: cpu core count * 2
|
||||||
|
|
||||||
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
|
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
|
||||||
#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it.
|
|
||||||
|
# The total amount of memory that the database will use.
|
||||||
|
#db_cache_capacity_mb = 200
|
||||||
```
|
```
|
||||||
|
|
||||||
## Setting the correct file permissions
|
## Setting the correct file permissions
|
||||||
|
|
||||||
As we are using a Conduit specific user we need to allow it to read the config. To do that you can run this command on
|
As we are using a Conduit specific user we need to allow it to read the config. To do that you can run this command on
|
||||||
Debian or RHEL:
|
Debian:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo chown -R root:root /etc/matrix-conduit
|
sudo chown -R conduit:nogroup /etc/matrix-conduit
|
||||||
sudo chmod 755 /etc/matrix-conduit
|
|
||||||
```
|
```
|
||||||
|
|
||||||
If you use the default database path you also need to run this:
|
If you use the default database path you also need to run this:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo mkdir -p /var/lib/matrix-conduit/
|
sudo mkdir -p /var/lib/matrix-conduit/conduit_db
|
||||||
sudo chown -R conduit:conduit /var/lib/matrix-conduit/
|
sudo chown -R conduit:nogroup /var/lib/matrix-conduit/conduit_db
|
||||||
sudo chmod 700 /var/lib/matrix-conduit/
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Setting up the Reverse Proxy
|
## Setting up the Reverse Proxy
|
||||||
|
|
||||||
This depends on whether you use Apache, Caddy, Nginx or another web server.
|
This depends on whether you use Apache, Nginx or another web server.
|
||||||
|
|
||||||
### Apache
|
### Apache
|
||||||
|
|
||||||
Create `/etc/apache2/sites-enabled/050-conduit.conf` and copy-and-paste this:
|
Create `/etc/apache2/sites-enabled/050-conduit.conf` and copy-and-paste this:
|
||||||
|
|
||||||
```apache
|
```apache
|
||||||
# Requires mod_proxy and mod_proxy_http
|
|
||||||
#
|
|
||||||
# On Apache instance compiled from source,
|
|
||||||
# paste into httpd-ssl.conf or httpd.conf
|
|
||||||
|
|
||||||
Listen 8448
|
Listen 8448
|
||||||
|
|
||||||
<VirtualHost *:443 *:8448>
|
<VirtualHost *:443 *:8448>
|
||||||
|
@ -229,7 +167,7 @@ Listen 8448
|
||||||
ServerName your.server.name # EDIT THIS
|
ServerName your.server.name # EDIT THIS
|
||||||
|
|
||||||
AllowEncodedSlashes NoDecode
|
AllowEncodedSlashes NoDecode
|
||||||
ProxyPass /_matrix/ http://127.0.0.1:6167/_matrix/ timeout=300 nocanon
|
ProxyPass /_matrix/ http://127.0.0.1:6167/_matrix/ nocanon
|
||||||
ProxyPassReverse /_matrix/ http://127.0.0.1:6167/_matrix/
|
ProxyPassReverse /_matrix/ http://127.0.0.1:6167/_matrix/
|
||||||
|
|
||||||
</VirtualHost>
|
</VirtualHost>
|
||||||
|
@ -238,27 +176,7 @@ ProxyPassReverse /_matrix/ http://127.0.0.1:6167/_matrix/
|
||||||
**You need to make some edits again.** When you are done, run
|
**You need to make some edits again.** When you are done, run
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Debian
|
|
||||||
$ sudo systemctl reload apache2
|
$ sudo systemctl reload apache2
|
||||||
|
|
||||||
# Installed from source
|
|
||||||
$ sudo apachectl -k graceful
|
|
||||||
```
|
|
||||||
|
|
||||||
### Caddy
|
|
||||||
|
|
||||||
Create `/etc/caddy/conf.d/conduit_caddyfile` and enter this (substitute for your server name).
|
|
||||||
|
|
||||||
```caddy
|
|
||||||
your.server.name, your.server.name:8448 {
|
|
||||||
reverse_proxy /_matrix/* 127.0.0.1:6167
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
That's it! Just start or enable the service and you're set.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ sudo systemctl enable caddy
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Nginx
|
### Nginx
|
||||||
|
@ -274,15 +192,10 @@ server {
|
||||||
server_name your.server.name; # EDIT THIS
|
server_name your.server.name; # EDIT THIS
|
||||||
merge_slashes off;
|
merge_slashes off;
|
||||||
|
|
||||||
# Nginx defaults to only allow 1MB uploads
|
|
||||||
# Increase this to allow posting large files such as videos
|
|
||||||
client_max_body_size 20M;
|
|
||||||
|
|
||||||
location /_matrix/ {
|
location /_matrix/ {
|
||||||
proxy_pass http://127.0.0.1:6167;
|
proxy_pass http://127.0.0.1:6167$request_uri;
|
||||||
proxy_set_header Host $http_host;
|
proxy_set_header Host $http_host;
|
||||||
proxy_buffering off;
|
proxy_buffering off;
|
||||||
proxy_read_timeout 5m;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ssl_certificate /etc/letsencrypt/live/your.server.name/fullchain.pem; # EDIT THIS
|
ssl_certificate /etc/letsencrypt/live/your.server.name/fullchain.pem; # EDIT THIS
|
||||||
|
@ -300,21 +213,11 @@ $ sudo systemctl reload nginx
|
||||||
|
|
||||||
## SSL Certificate
|
## SSL Certificate
|
||||||
|
|
||||||
If you chose Caddy as your web proxy SSL certificates are handled automatically and you can skip this step.
|
The easiest way to get an SSL certificate, if you don't have one already, is to install `certbot` and run this:
|
||||||
|
|
||||||
The easiest way to get an SSL certificate, if you don't have one already, is to [install](https://certbot.eff.org/instructions) `certbot` and run this:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# To use ECC for the private key,
|
|
||||||
# paste into /etc/letsencrypt/cli.ini:
|
|
||||||
# key-type = ecdsa
|
|
||||||
# elliptic-curve = secp384r1
|
|
||||||
|
|
||||||
$ sudo certbot -d your.server.name
|
$ sudo certbot -d your.server.name
|
||||||
```
|
```
|
||||||
[Automated renewal](https://eff-certbot.readthedocs.io/en/stable/using.html#automated-renewals) is usually preconfigured.
|
|
||||||
|
|
||||||
If using Cloudflare, configure instead the edge and origin certificates in dashboard. In case you’re already running a website on the same Apache server, you can just copy-and-paste the SSL configuration from your main virtual host on port 443 into the above-mentioned vhost.
|
|
||||||
|
|
||||||
## You're done!
|
## You're done!
|
||||||
|
|
||||||
|
@ -338,20 +241,8 @@ You can also use these commands as a quick health check.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ curl https://your.server.name/_matrix/client/versions
|
$ curl https://your.server.name/_matrix/client/versions
|
||||||
|
|
||||||
# If using port 8448
|
|
||||||
$ curl https://your.server.name:8448/_matrix/client/versions
|
$ curl https://your.server.name:8448/_matrix/client/versions
|
||||||
```
|
```
|
||||||
|
|
||||||
- To check if your server can talk with other homeservers, you can use the [Matrix Federation Tester](https://federationtester.matrix.org/).
|
- To check if your server can talk with other homeservers, you can use the [Matrix Federation Tester](https://federationtester.matrix.org/)
|
||||||
If you can register but cannot join federated rooms check your config again and also check if the port 8448 is open and forwarded correctly.
|
- If you want to set up an appservice, take a look at the [Appservice Guide](APPSERVICES.md).
|
||||||
|
|
||||||
# What's next?
|
|
||||||
|
|
||||||
## Audio/Video calls
|
|
||||||
|
|
||||||
For Audio/Video call functionality see the [TURN Guide](TURN.md).
|
|
||||||
|
|
||||||
## Appservices
|
|
||||||
|
|
||||||
If you want to set up an appservice, take a look at the [Appservice Guide](APPSERVICES.md).
|
|
||||||
|
|
80
Dockerfile
Normal file
80
Dockerfile
Normal file
|
@ -0,0 +1,80 @@
|
||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
FROM docker.io/rust:1.58-bullseye AS builder
|
||||||
|
WORKDIR /usr/src/conduit
|
||||||
|
|
||||||
|
# Install required packages to build Conduit and it's dependencies
|
||||||
|
RUN apt update && apt -y install libclang-dev
|
||||||
|
|
||||||
|
# == Build dependencies without our own code separately for caching ==
|
||||||
|
#
|
||||||
|
# Need a fake main.rs since Cargo refuses to build anything otherwise.
|
||||||
|
#
|
||||||
|
# See https://github.com/rust-lang/cargo/issues/2644 for a Cargo feature
|
||||||
|
# request that would allow just dependencies to be compiled, presumably
|
||||||
|
# regardless of whether source files are available.
|
||||||
|
RUN mkdir src && touch src/lib.rs && echo 'fn main() {}' > src/main.rs
|
||||||
|
COPY Cargo.toml Cargo.lock ./
|
||||||
|
RUN cargo build --release && rm -r src
|
||||||
|
|
||||||
|
# Copy over actual Conduit sources
|
||||||
|
COPY src src
|
||||||
|
|
||||||
|
# main.rs and lib.rs need their timestamp updated for this to work correctly since
|
||||||
|
# otherwise the build with the fake main.rs from above is newer than the
|
||||||
|
# source files (COPY preserves timestamps).
|
||||||
|
#
|
||||||
|
# Builds conduit and places the binary at /usr/src/conduit/target/release/conduit
|
||||||
|
RUN touch src/main.rs && touch src/lib.rs && cargo build --release
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------------------------------------------
|
||||||
|
# Stuff below this line actually ends up in the resulting docker image
|
||||||
|
# ---------------------------------------------------------------------------------------------------------------
|
||||||
|
FROM docker.io/debian:bullseye-slim AS runner
|
||||||
|
|
||||||
|
# Standard port on which Conduit launches.
|
||||||
|
# You still need to map the port when using the docker command or docker-compose.
|
||||||
|
EXPOSE 6167
|
||||||
|
|
||||||
|
# Note from @jfowl: I would like to remove the config file in the future and just have the Docker version be configured with envs.
|
||||||
|
ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" \
|
||||||
|
CONDUIT_PORT=6167
|
||||||
|
|
||||||
|
# Conduit needs:
|
||||||
|
# ca-certificates: for https
|
||||||
|
# iproute2 & wget: for the healthcheck script
|
||||||
|
RUN apt update && apt -y install \
|
||||||
|
ca-certificates \
|
||||||
|
iproute2 \
|
||||||
|
wget \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Created directory for the database and media files
|
||||||
|
RUN mkdir -p /srv/conduit/.local/share/conduit
|
||||||
|
|
||||||
|
# Test if Conduit is still alive, uses the same endpoint as Element
|
||||||
|
COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh
|
||||||
|
HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh
|
||||||
|
|
||||||
|
# Copy over the actual Conduit binary from the builder stage
|
||||||
|
COPY --from=builder /usr/src/conduit/target/release/conduit /srv/conduit/conduit
|
||||||
|
|
||||||
|
# Improve security: Don't run stuff as root, that does not need to run as root
|
||||||
|
# Add 'conduit' user and group (100:82). The UID:GID choice is to be compatible
|
||||||
|
# with previous, Alpine-based containers, where the user and group were both
|
||||||
|
# named 'www-data'.
|
||||||
|
RUN set -x ; \
|
||||||
|
groupadd -r -g 82 conduit ; \
|
||||||
|
useradd -r -M -d /srv/conduit -o -u 100 -g conduit conduit && exit 0 ; exit 1
|
||||||
|
|
||||||
|
# Change ownership of Conduit files to conduit user and group and make the healthcheck executable:
|
||||||
|
RUN chown -cR conduit:conduit /srv/conduit && \
|
||||||
|
chmod +x /srv/conduit/healthcheck.sh
|
||||||
|
|
||||||
|
# Change user to conduit, no root permissions afterwards:
|
||||||
|
USER conduit
|
||||||
|
# Set container home directory
|
||||||
|
WORKDIR /srv/conduit
|
||||||
|
|
||||||
|
# Run Conduit and print backtraces on panics
|
||||||
|
ENV RUST_BACKTRACE=1
|
||||||
|
ENTRYPOINT [ "/srv/conduit/conduit" ]
|
34
README.md
34
README.md
|
@ -1,11 +1,6 @@
|
||||||
# Conduit
|
# Conduit
|
||||||
### A Matrix homeserver written in Rust
|
|
||||||
|
|
||||||
#### What is Matrix?
|
### A Matrix homeserver written in Rust
|
||||||
[Matrix](https://matrix.org) is an open network for secure and decentralized
|
|
||||||
communication. Users from every Matrix homeserver can chat with users from all
|
|
||||||
other Matrix servers. You can even use bridges (also called Matrix appservices)
|
|
||||||
to communicate with users outside of Matrix, like a community on Discord.
|
|
||||||
|
|
||||||
#### What is the goal?
|
#### What is the goal?
|
||||||
|
|
||||||
|
@ -16,28 +11,29 @@ friends or company.
|
||||||
#### Can I try it out?
|
#### Can I try it out?
|
||||||
|
|
||||||
Yes! You can test our Conduit instance by opening a Matrix client (<https://app.element.io> or Element Android for
|
Yes! You can test our Conduit instance by opening a Matrix client (<https://app.element.io> or Element Android for
|
||||||
example) and registering on the `conduit.rs` homeserver. The registration token is "for_testing_only". Don't share personal information.
|
example) and registering on the `conduit.rs` homeserver.
|
||||||
|
|
||||||
Server hosting for conduit.rs is donated by the Matrix.org Foundation.
|
It is hosted on a ODROID HC 2 with 2GB RAM and a SAMSUNG Exynos 5422 CPU, which
|
||||||
|
was used in the Samsung Galaxy S5. It joined many big rooms including Matrix
|
||||||
|
HQ.
|
||||||
|
|
||||||
#### What is the current status?
|
#### What is the current status?
|
||||||
|
|
||||||
Conduit is Beta, meaning you can join and participate in most
|
As of 2021-09-01, Conduit is Beta, meaning you can join and participate in most
|
||||||
Matrix rooms, but not all features are supported and you might run into bugs
|
Matrix rooms, but not all features are supported and you might run into bugs
|
||||||
from time to time.
|
from time to time.
|
||||||
|
|
||||||
There are still a few important features missing:
|
There are still a few important features missing:
|
||||||
|
|
||||||
- E2EE emoji comparison over federation (E2EE chat works)
|
- E2EE verification over federation
|
||||||
- Outgoing read receipts, typing, presence over federation (incoming works)
|
- Outgoing read receipts, typing, presence over federation
|
||||||
|
|
||||||
Check out the [Conduit 1.0 Release Milestone](https://gitlab.com/famedly/conduit/-/milestones/3).
|
Check out the [Conduit 1.0 Release Milestone](https://gitlab.com/famedly/conduit/-/milestones/3).
|
||||||
|
|
||||||
#### How can I deploy my own?
|
#### How can I deploy my own?
|
||||||
|
|
||||||
- Simple install (this was tested the most): [DEPLOY.md](DEPLOY.md)
|
- Simple install (this was tested the most): [DEPLOY.md](DEPLOY.md)
|
||||||
- Debian package: [debian/README.md](debian/README.md)
|
- Debian package: [debian/README.Debian](debian/README.Debian)
|
||||||
- Nix/NixOS: [nix/README.md](nix/README.md)
|
|
||||||
- Docker: [docker/README.md](docker/README.md)
|
- Docker: [docker/README.md](docker/README.md)
|
||||||
|
|
||||||
If you want to connect an Appservice to Conduit, take a look at [APPSERVICES.md](APPSERVICES.md).
|
If you want to connect an Appservice to Conduit, take a look at [APPSERVICES.md](APPSERVICES.md).
|
||||||
|
@ -53,20 +49,12 @@ If you want to connect an Appservice to Conduit, take a look at [APPSERVICES.md]
|
||||||
|
|
||||||
#### Thanks to
|
#### Thanks to
|
||||||
|
|
||||||
Thanks to FUTO, Famedly, Prototype Fund (DLR and German BMBF) and all individuals for financially supporting this project.
|
Thanks to Famedly, Prototype Fund (DLR and German BMBF) and all other individuals for financially supporting this project.
|
||||||
|
|
||||||
Thanks to the contributors to Conduit and all libraries we use, for example:
|
Thanks to the contributors to Conduit and all libraries we use, for example:
|
||||||
|
|
||||||
- Ruma: A clean library for the Matrix Spec in Rust
|
- Ruma: A clean library for the Matrix Spec in Rust
|
||||||
- axum: A modular web framework
|
- Rocket: A flexible web framework
|
||||||
|
|
||||||
#### Contact
|
|
||||||
|
|
||||||
If you run into any question, feel free to
|
|
||||||
- Ask us in `#conduit:fachschaften.org` on Matrix
|
|
||||||
- Write an E-Mail to `conduit@koesters.xyz`
|
|
||||||
- Send an direct message to `timokoesters@fachschaften.org` on Matrix
|
|
||||||
- [Open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new)
|
|
||||||
|
|
||||||
#### Donate
|
#### Donate
|
||||||
|
|
||||||
|
|
25
TURN.md
25
TURN.md
|
@ -1,25 +0,0 @@
|
||||||
# Setting up TURN/STURN
|
|
||||||
|
|
||||||
## General instructions
|
|
||||||
|
|
||||||
* It is assumed you have a [Coturn server](https://github.com/coturn/coturn) up and running. See [Synapse reference implementation](https://github.com/matrix-org/synapse/blob/develop/docs/turn-howto.md).
|
|
||||||
|
|
||||||
## Edit/Add a few settings to your existing conduit.toml
|
|
||||||
|
|
||||||
```
|
|
||||||
# Refer to your Coturn settings.
|
|
||||||
# `your.turn.url` has to match the REALM setting of your Coturn as well as `transport`.
|
|
||||||
turn_uris = ["turn:your.turn.url?transport=udp", "turn:your.turn.url?transport=tcp"]
|
|
||||||
|
|
||||||
# static-auth-secret of your turnserver
|
|
||||||
turn_secret = "ADD SECRET HERE"
|
|
||||||
|
|
||||||
# If you have your TURN server configured to use a username and password
|
|
||||||
# you can provide these information too. In this case comment out `turn_secret above`!
|
|
||||||
#turn_username = ""
|
|
||||||
#turn_password = ""
|
|
||||||
```
|
|
||||||
|
|
||||||
## Apply settings
|
|
||||||
|
|
||||||
Restart Conduit.
|
|
|
@ -1,37 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
# Path to Complement's source code
|
|
||||||
COMPLEMENT_SRC="$1"
|
|
||||||
|
|
||||||
# A `.jsonl` file to write test logs to
|
|
||||||
LOG_FILE="$2"
|
|
||||||
|
|
||||||
# A `.jsonl` file to write test results to
|
|
||||||
RESULTS_FILE="$3"
|
|
||||||
|
|
||||||
OCI_IMAGE="complement-conduit:dev"
|
|
||||||
|
|
||||||
env \
|
|
||||||
-C "$(git rev-parse --show-toplevel)" \
|
|
||||||
docker build \
|
|
||||||
--tag "$OCI_IMAGE" \
|
|
||||||
--file complement/Dockerfile \
|
|
||||||
.
|
|
||||||
|
|
||||||
# It's okay (likely, even) that `go test` exits nonzero
|
|
||||||
set +o pipefail
|
|
||||||
env \
|
|
||||||
-C "$COMPLEMENT_SRC" \
|
|
||||||
COMPLEMENT_BASE_IMAGE="$OCI_IMAGE" \
|
|
||||||
go test -json ./tests | tee "$LOG_FILE"
|
|
||||||
set -o pipefail
|
|
||||||
|
|
||||||
# Post-process the results into an easy-to-compare format
|
|
||||||
cat "$LOG_FILE" | jq -c '
|
|
||||||
select(
|
|
||||||
(.Action == "pass" or .Action == "fail" or .Action == "skip")
|
|
||||||
and .Test != null
|
|
||||||
) | {Action: .Action, Test: .Test}
|
|
||||||
' | sort > "$RESULTS_FILE"
|
|
|
@ -1,31 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
# The first argument must be the desired installable
|
|
||||||
INSTALLABLE="$1"
|
|
||||||
|
|
||||||
# Build the installable and forward any other arguments too
|
|
||||||
nix build "$@"
|
|
||||||
|
|
||||||
if [ ! -z ${ATTIC_TOKEN+x} ]; then
|
|
||||||
|
|
||||||
nix run --inputs-from . attic -- login \
|
|
||||||
conduit \
|
|
||||||
https://nix.computer.surgery/conduit \
|
|
||||||
"$ATTIC_TOKEN"
|
|
||||||
|
|
||||||
push_args=(
|
|
||||||
# Attic and its build dependencies
|
|
||||||
"$(nix path-info --inputs-from . attic)"
|
|
||||||
"$(nix path-info --inputs-from . attic --derivation)"
|
|
||||||
|
|
||||||
# The target installable and its build dependencies
|
|
||||||
"$(nix path-info "$INSTALLABLE" --derivation)"
|
|
||||||
"$(nix path-info "$INSTALLABLE")"
|
|
||||||
)
|
|
||||||
|
|
||||||
nix run --inputs-from . attic -- push conduit "${push_args[@]}"
|
|
||||||
else
|
|
||||||
echo "\$ATTIC_TOKEN is unset, skipping uploading to the binary cache"
|
|
||||||
fi
|
|
|
@ -1,45 +0,0 @@
|
||||||
FROM rust:1.75.0
|
|
||||||
|
|
||||||
WORKDIR /workdir
|
|
||||||
|
|
||||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
|
||||||
libclang-dev
|
|
||||||
|
|
||||||
COPY Cargo.toml Cargo.toml
|
|
||||||
COPY Cargo.lock Cargo.lock
|
|
||||||
COPY src src
|
|
||||||
RUN cargo build --release \
|
|
||||||
&& mv target/release/conduit conduit \
|
|
||||||
&& rm -rf target
|
|
||||||
|
|
||||||
# Install caddy
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
debian-keyring \
|
|
||||||
debian-archive-keyring \
|
|
||||||
apt-transport-https \
|
|
||||||
curl \
|
|
||||||
&& curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/gpg.key' \
|
|
||||||
| gpg --dearmor -o /usr/share/keyrings/caddy-testing-archive-keyring.gpg \
|
|
||||||
&& curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/debian.deb.txt' \
|
|
||||||
| tee /etc/apt/sources.list.d/caddy-testing.list \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y caddy
|
|
||||||
|
|
||||||
COPY conduit-example.toml conduit.toml
|
|
||||||
COPY complement/caddy.json caddy.json
|
|
||||||
|
|
||||||
ENV SERVER_NAME=localhost
|
|
||||||
ENV CONDUIT_CONFIG=/workdir/conduit.toml
|
|
||||||
|
|
||||||
RUN sed -i "s/port = 6167/port = 8008/g" conduit.toml
|
|
||||||
RUN echo "log = \"warn,_=off,sled=off\"" >> conduit.toml
|
|
||||||
RUN sed -i "s/address = \"127.0.0.1\"/address = \"0.0.0.0\"/g" conduit.toml
|
|
||||||
|
|
||||||
EXPOSE 8008 8448
|
|
||||||
|
|
||||||
CMD uname -a && \
|
|
||||||
sed -i "s/#server_name = \"your.server.name\"/server_name = \"${SERVER_NAME}\"/g" conduit.toml && \
|
|
||||||
sed -i "s/your.server.name/${SERVER_NAME}/g" caddy.json && \
|
|
||||||
caddy start --config caddy.json > /dev/null && \
|
|
||||||
/workdir/conduit
|
|
|
@ -1,11 +0,0 @@
|
||||||
# Complement
|
|
||||||
|
|
||||||
## What's that?
|
|
||||||
|
|
||||||
Have a look at [its repository](https://github.com/matrix-org/complement).
|
|
||||||
|
|
||||||
## How do I use it with Conduit?
|
|
||||||
|
|
||||||
The script at [`../bin/complement`](../bin/complement) has automation for this.
|
|
||||||
It takes a few command line arguments, you can read the script to find out what
|
|
||||||
those are.
|
|
|
@ -1,72 +0,0 @@
|
||||||
{
|
|
||||||
"logging": {
|
|
||||||
"logs": {
|
|
||||||
"default": {
|
|
||||||
"level": "WARN"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"apps": {
|
|
||||||
"http": {
|
|
||||||
"https_port": 8448,
|
|
||||||
"servers": {
|
|
||||||
"srv0": {
|
|
||||||
"listen": [":8448"],
|
|
||||||
"routes": [{
|
|
||||||
"match": [{
|
|
||||||
"host": ["your.server.name"]
|
|
||||||
}],
|
|
||||||
"handle": [{
|
|
||||||
"handler": "subroute",
|
|
||||||
"routes": [{
|
|
||||||
"handle": [{
|
|
||||||
"handler": "reverse_proxy",
|
|
||||||
"upstreams": [{
|
|
||||||
"dial": "127.0.0.1:8008"
|
|
||||||
}]
|
|
||||||
}]
|
|
||||||
}]
|
|
||||||
}],
|
|
||||||
"terminal": true
|
|
||||||
}],
|
|
||||||
"tls_connection_policies": [{
|
|
||||||
"match": {
|
|
||||||
"sni": ["your.server.name"]
|
|
||||||
}
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"pki": {
|
|
||||||
"certificate_authorities": {
|
|
||||||
"local": {
|
|
||||||
"name": "Complement CA",
|
|
||||||
"root": {
|
|
||||||
"certificate": "/complement/ca/ca.crt",
|
|
||||||
"private_key": "/complement/ca/ca.key"
|
|
||||||
},
|
|
||||||
"intermediate": {
|
|
||||||
"certificate": "/complement/ca/ca.crt",
|
|
||||||
"private_key": "/complement/ca/ca.key"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"tls": {
|
|
||||||
"automation": {
|
|
||||||
"policies": [{
|
|
||||||
"subjects": ["your.server.name"],
|
|
||||||
"issuers": [{
|
|
||||||
"module": "internal"
|
|
||||||
}],
|
|
||||||
"on_demand": true
|
|
||||||
}, {
|
|
||||||
"issuers": [{
|
|
||||||
"module": "internal",
|
|
||||||
"ca": "local"
|
|
||||||
}]
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,10 +1,3 @@
|
||||||
# =============================================================================
|
|
||||||
# This is the official example config for Conduit.
|
|
||||||
# If you use it for your server, you will need to adjust it to your own needs.
|
|
||||||
# At the very least, change the server_name field!
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
|
|
||||||
[global]
|
[global]
|
||||||
# The server_name is the pretty name of this server. It is used as a suffix for user
|
# The server_name is the pretty name of this server. It is used as a suffix for user
|
||||||
# and room ids. Examples: matrix.org, conduit.rs
|
# and room ids. Examples: matrix.org, conduit.rs
|
||||||
|
@ -23,7 +16,7 @@
|
||||||
#server_name = "your.server.name"
|
#server_name = "your.server.name"
|
||||||
|
|
||||||
# This is the only directory where Conduit will save its data
|
# This is the only directory where Conduit will save its data
|
||||||
database_path = "/var/lib/matrix-conduit/"
|
database_path = "/var/lib/conduit/"
|
||||||
database_backend = "rocksdb"
|
database_backend = "rocksdb"
|
||||||
|
|
||||||
# The port Conduit will be running on. You need to set up a reverse proxy in
|
# The port Conduit will be running on. You need to set up a reverse proxy in
|
||||||
|
@ -38,20 +31,24 @@ max_request_size = 20_000_000 # in bytes
|
||||||
# Enables registration. If set to false, no users can register on this server.
|
# Enables registration. If set to false, no users can register on this server.
|
||||||
allow_registration = true
|
allow_registration = true
|
||||||
|
|
||||||
allow_federation = true
|
# Disable encryption, so no new encrypted rooms can be created
|
||||||
allow_check_for_updates = true
|
# Note: existing rooms will continue to work
|
||||||
|
#allow_encryption = false
|
||||||
|
#allow_federation = false
|
||||||
|
|
||||||
# Enable the display name lightning bolt on registration.
|
# Enable jaeger to support monitoring and troubleshooting through jaeger
|
||||||
enable_lightning_bolt = true
|
#allow_jaeger = false
|
||||||
|
|
||||||
# Servers listed here will be used to gather public keys of other servers.
|
|
||||||
# Generally, copying this exactly should be enough. (Currently, Conduit doesn't
|
|
||||||
# support batched key requests, so this list should only contain Synapse
|
|
||||||
# servers.)
|
|
||||||
trusted_servers = ["matrix.org"]
|
trusted_servers = ["matrix.org"]
|
||||||
|
|
||||||
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
||||||
#log = "warn,state_res=warn,rocket=off,_=off,sled=off"
|
#log = "info,state_res=warn,rocket=off,_=off,sled=off"
|
||||||
|
#workers = 4 # default: cpu core count * 2
|
||||||
|
|
||||||
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
|
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
|
||||||
#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it.
|
#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it.
|
||||||
|
|
||||||
|
proxy = "none" # more examples can be found at src/database/proxy.rs:6
|
||||||
|
|
||||||
|
# The total amount of memory that the database will use.
|
||||||
|
#db_cache_capacity_mb = 200
|
||||||
|
|
37
cross/README.md
Normal file
37
cross/README.md
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
## Cross compilation
|
||||||
|
|
||||||
|
The `cross` folder contains a set of convenience scripts (`build.sh` and `test.sh`) for cross-compiling Conduit.
|
||||||
|
|
||||||
|
Currently supported targets are
|
||||||
|
|
||||||
|
- aarch64-unknown-linux-musl
|
||||||
|
- arm-unknown-linux-musleabihf
|
||||||
|
- armv7-unknown-linux-musleabihf
|
||||||
|
- x86\_64-unknown-linux-musl
|
||||||
|
|
||||||
|
### Install prerequisites
|
||||||
|
#### Docker
|
||||||
|
[Installation guide](https://docs.docker.com/get-docker/).
|
||||||
|
```sh
|
||||||
|
$ sudo apt install docker
|
||||||
|
$ sudo systemctl start docker
|
||||||
|
$ sudo usermod -aG docker $USER
|
||||||
|
$ newgrp docker
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Cross
|
||||||
|
[Installation guide](https://github.com/rust-embedded/cross/#installation).
|
||||||
|
```sh
|
||||||
|
$ cargo install cross
|
||||||
|
```
|
||||||
|
|
||||||
|
### Buiding Conduit
|
||||||
|
```sh
|
||||||
|
$ TARGET=armv7-unknown-linux-musleabihf ./cross/build.sh --release
|
||||||
|
```
|
||||||
|
The cross-compiled binary is at `target/armv7-unknown-linux-musleabihf/release/conduit`
|
||||||
|
|
||||||
|
### Testing Conduit
|
||||||
|
```sh
|
||||||
|
$ TARGET=armv7-unknown-linux-musleabihf ./cross/test.sh --release
|
||||||
|
```
|
18
debian/README.md → debian/README.Debian
vendored
18
debian/README.md → debian/README.Debian
vendored
|
@ -1,36 +1,28 @@
|
||||||
Conduit for Debian
|
Conduit for Debian
|
||||||
==================
|
==================
|
||||||
|
|
||||||
Installation
|
|
||||||
------------
|
|
||||||
|
|
||||||
Information about downloading, building and deploying the Debian package, see
|
|
||||||
the "Installing Conduit" section in [DEPLOY.md](../DEPLOY.md).
|
|
||||||
All following sections until "Setting up the Reverse Proxy" be ignored because
|
|
||||||
this is handled automatically by the packaging.
|
|
||||||
|
|
||||||
Configuration
|
Configuration
|
||||||
-------------
|
-------------
|
||||||
|
|
||||||
When installed, Debconf generates the configuration of the homeserver
|
When installed, Debconf generates the configuration of the homeserver
|
||||||
(host)name, the address and port it listens on. This configuration ends up in
|
(host)name, the address and port it listens on. This configuration ends up in
|
||||||
`/etc/matrix-conduit/conduit.toml`.
|
/etc/matrix-conduit/conduit.toml.
|
||||||
|
|
||||||
You can tweak more detailed settings by uncommenting and setting the variables
|
You can tweak more detailed settings by uncommenting and setting the variables
|
||||||
in `/etc/matrix-conduit/conduit.toml`. This involves settings such as the maximum
|
in /etc/matrix-conduit/conduit.toml. This involves settings such as the maximum
|
||||||
file size for download/upload, enabling federation, etc.
|
file size for download/upload, enabling federation, etc.
|
||||||
|
|
||||||
Running
|
Running
|
||||||
-------
|
-------
|
||||||
|
|
||||||
The package uses the `matrix-conduit.service` systemd unit file to start and
|
The package uses the matrix-conduit.service systemd unit file to start and
|
||||||
stop Conduit. It loads the configuration file mentioned above to set up the
|
stop Conduit. It loads the configuration file mentioned above to set up the
|
||||||
environment before running the server.
|
environment before running the server.
|
||||||
|
|
||||||
This package assumes by default that Conduit will be placed behind a reverse
|
This package assumes by default that Conduit will be placed behind a reverse
|
||||||
proxy such as Apache or nginx. This default deployment entails just listening
|
proxy such as Apache or nginx. This default deployment entails just listening
|
||||||
on `127.0.0.1` and the free port `6167` and is reachable via a client using the URL
|
on 127.0.0.1 and the free port 6167 and is reachable via a client using the URL
|
||||||
<http://localhost:6167>.
|
http://localhost:6167.
|
||||||
|
|
||||||
At a later stage this packaging may support also setting up TLS and running
|
At a later stage this packaging may support also setting up TLS and running
|
||||||
stand-alone. In this case, however, you need to set up some certificates and
|
stand-alone. In this case, however, you need to set up some certificates and
|
1
debian/matrix-conduit.service
vendored
1
debian/matrix-conduit.service
vendored
|
@ -3,7 +3,6 @@ Description=Conduit Matrix homeserver
|
||||||
After=network.target
|
After=network.target
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
DynamicUser=yes
|
|
||||||
User=_matrix-conduit
|
User=_matrix-conduit
|
||||||
Group=_matrix-conduit
|
Group=_matrix-conduit
|
||||||
Type=simple
|
Type=simple
|
||||||
|
|
46
debian/postinst
vendored
46
debian/postinst
vendored
|
@ -5,7 +5,7 @@ set -e
|
||||||
|
|
||||||
CONDUIT_CONFIG_PATH=/etc/matrix-conduit
|
CONDUIT_CONFIG_PATH=/etc/matrix-conduit
|
||||||
CONDUIT_CONFIG_FILE="${CONDUIT_CONFIG_PATH}/conduit.toml"
|
CONDUIT_CONFIG_FILE="${CONDUIT_CONFIG_PATH}/conduit.toml"
|
||||||
CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit/
|
CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit/conduit_db
|
||||||
|
|
||||||
case "$1" in
|
case "$1" in
|
||||||
configure)
|
configure)
|
||||||
|
@ -19,11 +19,11 @@ case "$1" in
|
||||||
_matrix-conduit
|
_matrix-conduit
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Create the database path if it does not exist yet and fix up ownership
|
# Create the database path if it does not exist yet.
|
||||||
# and permissions.
|
if [ ! -d "$CONDUIT_DATABASE_PATH" ]; then
|
||||||
mkdir -p "$CONDUIT_DATABASE_PATH"
|
mkdir -p "$CONDUIT_DATABASE_PATH"
|
||||||
chown _matrix-conduit "$CONDUIT_DATABASE_PATH"
|
chown _matrix-conduit "$CONDUIT_DATABASE_PATH"
|
||||||
chmod 700 "$CONDUIT_DATABASE_PATH"
|
fi
|
||||||
|
|
||||||
if [ ! -e "$CONDUIT_CONFIG_FILE" ]; then
|
if [ ! -e "$CONDUIT_CONFIG_FILE" ]; then
|
||||||
# Write the debconf values in the config.
|
# Write the debconf values in the config.
|
||||||
|
@ -36,24 +36,18 @@ case "$1" in
|
||||||
mkdir -p "$CONDUIT_CONFIG_PATH"
|
mkdir -p "$CONDUIT_CONFIG_PATH"
|
||||||
cat > "$CONDUIT_CONFIG_FILE" << EOF
|
cat > "$CONDUIT_CONFIG_FILE" << EOF
|
||||||
[global]
|
[global]
|
||||||
# The server_name is the pretty name of this server. It is used as a suffix for
|
# The server_name is the name of this server. It is used as a suffix for user
|
||||||
# user and room ids. Examples: matrix.org, conduit.rs
|
# and room ids. Examples: matrix.org, conduit.rs
|
||||||
|
# The Conduit server needs to be reachable at https://your.server.name/ on port
|
||||||
# The Conduit server needs all /_matrix/ requests to be reachable at
|
# 443 (client-server) and 8448 (federation) OR you can create /.well-known
|
||||||
# https://your.server.name/ on port 443 (client-server) and 8448 (federation).
|
# files to redirect requests. See
|
||||||
|
|
||||||
# If that's not possible for you, you can create /.well-known files to redirect
|
|
||||||
# requests. See
|
|
||||||
# https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client
|
# https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client
|
||||||
# and
|
# and https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server
|
||||||
# https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server
|
# for more information.
|
||||||
# for more information
|
|
||||||
|
|
||||||
server_name = "${CONDUIT_SERVER_NAME}"
|
server_name = "${CONDUIT_SERVER_NAME}"
|
||||||
|
|
||||||
# This is the only directory where Conduit will save its data.
|
# This is the only directory where Conduit will save its data.
|
||||||
database_path = "${CONDUIT_DATABASE_PATH}"
|
database_path = "${CONDUIT_DATABASE_PATH}"
|
||||||
database_backend = "rocksdb"
|
|
||||||
|
|
||||||
# The address Conduit will be listening on.
|
# The address Conduit will be listening on.
|
||||||
# By default the server listens on address 0.0.0.0. Change this to 127.0.0.1 to
|
# By default the server listens on address 0.0.0.0. Change this to 127.0.0.1 to
|
||||||
|
@ -62,8 +56,7 @@ address = "${CONDUIT_ADDRESS}"
|
||||||
|
|
||||||
# The port Conduit will be running on. You need to set up a reverse proxy in
|
# The port Conduit will be running on. You need to set up a reverse proxy in
|
||||||
# your web server (e.g. apache or nginx), so all requests to /_matrix on port
|
# your web server (e.g. apache or nginx), so all requests to /_matrix on port
|
||||||
# 443 and 8448 will be forwarded to the Conduit instance running on this port
|
# 443 and 8448 will be forwarded to the Conduit instance running on this port.
|
||||||
# Docker users: Don't change this, you'll need to map an external port to this.
|
|
||||||
port = ${CONDUIT_PORT}
|
port = ${CONDUIT_PORT}
|
||||||
|
|
||||||
# Max size for uploads
|
# Max size for uploads
|
||||||
|
@ -72,13 +65,20 @@ max_request_size = 20_000_000 # in bytes
|
||||||
# Enables registration. If set to false, no users can register on this server.
|
# Enables registration. If set to false, no users can register on this server.
|
||||||
allow_registration = true
|
allow_registration = true
|
||||||
|
|
||||||
allow_federation = true
|
# Disable encryption, so no new encrypted rooms can be created.
|
||||||
allow_check_for_updates = true
|
# Note: Existing rooms will continue to work.
|
||||||
|
#allow_encryption = false
|
||||||
|
#allow_federation = false
|
||||||
|
|
||||||
trusted_servers = ["matrix.org"]
|
# Enable jaeger to support monitoring and troubleshooting through jaeger.
|
||||||
|
#allow_jaeger = false
|
||||||
|
|
||||||
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
||||||
#log = "warn,state_res=warn,rocket=off,_=off,sled=off"
|
#log = "info,state_res=warn,rocket=off,_=off,sled=off"
|
||||||
|
#workers = 4 # default: cpu core count * 2
|
||||||
|
|
||||||
|
# The total amount of memory that the database will use.
|
||||||
|
#db_cache_capacity_mb = 200
|
||||||
EOF
|
EOF
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
|
|
10
default.nix
10
default.nix
|
@ -1,10 +0,0 @@
|
||||||
(import
|
|
||||||
(
|
|
||||||
let lock = builtins.fromJSON (builtins.readFile ./flake.lock); in
|
|
||||||
fetchTarball {
|
|
||||||
url = lock.nodes.flake-compat.locked.url or "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz";
|
|
||||||
sha256 = lock.nodes.flake-compat.locked.narHash;
|
|
||||||
}
|
|
||||||
)
|
|
||||||
{ src = ./.; }
|
|
||||||
).defaultNix
|
|
|
@ -20,22 +20,27 @@ services:
|
||||||
ports:
|
ports:
|
||||||
- 8448:6167
|
- 8448:6167
|
||||||
volumes:
|
volumes:
|
||||||
- db:/var/lib/matrix-conduit/
|
- db:/srv/conduit/.local/share/conduit
|
||||||
|
### Uncomment if you want to use conduit.toml to configure Conduit
|
||||||
|
### Note: Set env vars will override conduit.toml values
|
||||||
|
# - ./conduit.toml:/srv/conduit/conduit.toml
|
||||||
environment:
|
environment:
|
||||||
CONDUIT_SERVER_NAME: your.server.name # EDIT THIS
|
CONDUIT_SERVER_NAME: localhost:6167 # replace with your own name
|
||||||
CONDUIT_DATABASE_PATH: /var/lib/matrix-conduit/
|
|
||||||
CONDUIT_DATABASE_BACKEND: rocksdb
|
|
||||||
CONDUIT_PORT: 6167
|
|
||||||
CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
|
|
||||||
CONDUIT_ALLOW_REGISTRATION: 'true'
|
|
||||||
CONDUIT_ALLOW_FEDERATION: 'true'
|
|
||||||
CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
|
||||||
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
||||||
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
|
CONDUIT_ALLOW_REGISTRATION: 'true'
|
||||||
#CONDUIT_LOG: warn,rocket=off,_=off,sled=off
|
### Uncomment and change values as desired
|
||||||
CONDUIT_ADDRESS: 0.0.0.0
|
# CONDUIT_ADDRESS: 0.0.0.0
|
||||||
CONDUIT_CONFIG: '' # Ignore this
|
# CONDUIT_PORT: 6167
|
||||||
#
|
# CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string ''
|
||||||
|
# Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging
|
||||||
|
# CONDUIT_LOG: info # default is: "info,rocket=off,_=off,sled=off"
|
||||||
|
# CONDUIT_ALLOW_JAEGER: 'false'
|
||||||
|
# CONDUIT_ALLOW_ENCRYPTION: 'false'
|
||||||
|
# CONDUIT_ALLOW_FEDERATION: 'false'
|
||||||
|
# CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit
|
||||||
|
# CONDUIT_WORKERS: 10
|
||||||
|
# CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
|
||||||
|
|
||||||
### Uncomment if you want to use your own Element-Web App.
|
### Uncomment if you want to use your own Element-Web App.
|
||||||
### Note: You need to provide a config.json for Element and you also need a second
|
### Note: You need to provide a config.json for Element and you also need a second
|
||||||
### Domain or Subdomain for the communication between Element and Conduit
|
### Domain or Subdomain for the communication between Element and Conduit
|
153
docker/README.md
153
docker/README.md
|
@ -4,36 +4,7 @@
|
||||||
|
|
||||||
## Docker
|
## Docker
|
||||||
|
|
||||||
To run Conduit with Docker you can either build the image yourself or pull it from a registry.
|
### Build & Dockerfile
|
||||||
|
|
||||||
|
|
||||||
### Use a registry
|
|
||||||
|
|
||||||
OCI images for Conduit are available in the registries listed below. We recommend using the image tagged as `latest` from GitLab's own registry.
|
|
||||||
|
|
||||||
| Registry | Image | Size | Notes |
|
|
||||||
| --------------- | --------------------------------------------------------------- | ----------------------------- | ---------------------- |
|
|
||||||
| GitLab Registry | [registry.gitlab.com/famedly/conduit/matrix-conduit:latest][gl] | ![Image Size][shield-latest] | Stable image. |
|
|
||||||
| Docker Hub | [docker.io/matrixconduit/matrix-conduit:latest][dh] | ![Image Size][shield-latest] | Stable image. |
|
|
||||||
| GitLab Registry | [registry.gitlab.com/famedly/conduit/matrix-conduit:next][gl] | ![Image Size][shield-next] | Development version. |
|
|
||||||
| Docker Hub | [docker.io/matrixconduit/matrix-conduit:next][dh] | ![Image Size][shield-next] | Development version. |
|
|
||||||
|
|
||||||
|
|
||||||
[dh]: https://hub.docker.com/r/matrixconduit/matrix-conduit
|
|
||||||
[gl]: https://gitlab.com/famedly/conduit/container_registry/2497937
|
|
||||||
[shield-latest]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest
|
|
||||||
[shield-next]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/next
|
|
||||||
|
|
||||||
|
|
||||||
Use
|
|
||||||
```bash
|
|
||||||
docker image pull <link>
|
|
||||||
```
|
|
||||||
to pull it to your machine.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### Build using a dockerfile
|
|
||||||
|
|
||||||
The Dockerfile provided by Conduit has two stages, each of which creates an image.
|
The Dockerfile provided by Conduit has two stages, each of which creates an image.
|
||||||
|
|
||||||
|
@ -48,27 +19,24 @@ docker build --tag matrixconduit/matrix-conduit:latest .
|
||||||
|
|
||||||
which also will tag the resulting image as `matrixconduit/matrix-conduit:latest`.
|
which also will tag the resulting image as `matrixconduit/matrix-conduit:latest`.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### Run
|
### Run
|
||||||
|
|
||||||
When you have the image you can simply run it with
|
After building the image you can simply run it with
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker run -d -p 8448:6167 \
|
docker run -d -p 8448:6167 -v ~/conduit.toml:/srv/conduit/conduit.toml -v db:/srv/conduit/.local/share/conduit matrixconduit/matrix-conduit:latest
|
||||||
-v db:/var/lib/matrix-conduit/ \
|
|
||||||
-e CONDUIT_SERVER_NAME="your.server.name" \
|
|
||||||
-e CONDUIT_DATABASE_BACKEND="rocksdb" \
|
|
||||||
-e CONDUIT_ALLOW_REGISTRATION=true \
|
|
||||||
-e CONDUIT_ALLOW_FEDERATION=true \
|
|
||||||
-e CONDUIT_MAX_REQUEST_SIZE="20_000_000" \
|
|
||||||
-e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \
|
|
||||||
-e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \
|
|
||||||
-e CONDUIT_LOG="warn,rocket=off,_=off,sled=off" \
|
|
||||||
--name conduit <link>
|
|
||||||
```
|
```
|
||||||
|
|
||||||
or you can use [docker-compose](#docker-compose).
|
or you can skip the build step and pull the image from one of the following registries:
|
||||||
|
|
||||||
|
| Registry | Image | Size |
|
||||||
|
| --------------- | --------------------------------------------------------------- | --------------------- |
|
||||||
|
| Docker Hub | [matrixconduit/matrix-conduit:latest][dh] | ![Image Size][shield] |
|
||||||
|
| GitLab Registry | [registry.gitlab.com/famedly/conduit/matrix-conduit:latest][gl] | ![Image Size][shield] |
|
||||||
|
|
||||||
|
[dh]: https://hub.docker.com/r/matrixconduit/matrix-conduit
|
||||||
|
[gl]: https://gitlab.com/famedly/conduit/container_registry/2497937
|
||||||
|
[shield]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest
|
||||||
|
|
||||||
The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../conduit-example.toml).
|
The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../conduit-example.toml).
|
||||||
You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need
|
You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need
|
||||||
|
@ -76,20 +44,10 @@ to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible
|
||||||
|
|
||||||
If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it.
|
If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it.
|
||||||
|
|
||||||
### Docker-compose
|
## Docker-compose
|
||||||
|
|
||||||
If the `docker run` command is not for you or your setup, you can also use one of the provided `docker-compose` files.
|
If the docker command is not for you or your setup, you can also use one of the provided `docker-compose` files. Depending on your proxy setup, use the [`docker-compose.traefik.yml`](docker-compose.traefik.yml) and [`docker-compose.override.traefik.yml`](docker-compose.override.traefik.yml) for Traefik (don't forget to remove `.traefik` from the filenames) or the normal [`docker-compose.yml`](../docker-compose.yml) for every other reverse proxy. Additional info about deploying
|
||||||
|
Conduit can be found [here](../DEPLOY.md).
|
||||||
Depending on your proxy setup, you can use one of the following files;
|
|
||||||
- If you already have a `traefik` instance set up, use [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml)
|
|
||||||
- If you don't have a `traefik` instance set up (or any other reverse proxy), use [`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)
|
|
||||||
- For any other reverse proxy, use [`docker-compose.yml`](docker-compose.yml)
|
|
||||||
|
|
||||||
When picking the traefik-related compose file, rename it so it matches `docker-compose.yml`, and
|
|
||||||
rename the override file to `docker-compose.override.yml`. Edit the latter with the values you want
|
|
||||||
for your server.
|
|
||||||
|
|
||||||
Additional info about deploying Conduit can be found [here](../DEPLOY.md).
|
|
||||||
|
|
||||||
### Build
|
### Build
|
||||||
|
|
||||||
|
@ -113,23 +71,17 @@ docker-compose up -d
|
||||||
|
|
||||||
### Use Traefik as Proxy
|
### Use Traefik as Proxy
|
||||||
|
|
||||||
As a container user, you probably know about Traefik. It is a easy to use reverse proxy for making
|
As a container user, you probably know about Traefik. It is a easy to use reverse proxy for making containerized app and services available through the web. With the
|
||||||
containerized app and services available through the web. With the two provided files,
|
two provided files, [`docker-compose.traefik.yml`](docker-compose.traefik.yml) and [`docker-compose.override.traefik.yml`](docker-compose.override.traefik.yml), it is
|
||||||
[`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or
|
equally easy to deploy and use Conduit, with a little caveat. If you already took a look at the files, then you should have seen the `well-known` service, and that is
|
||||||
[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and
|
the little caveat. Traefik is simply a proxy and loadbalancer and is not able to serve any kind of content, but for Conduit to federate, we need to either expose ports
|
||||||
[`docker-compose.override.yml`](docker-compose.override.yml), it is equally easy to deploy
|
`443` and `8448` or serve two endpoints `.well-known/matrix/client` and `.well-known/matrix/server`.
|
||||||
and use Conduit, with a little caveat. If you already took a look at the files, then you should have
|
|
||||||
seen the `well-known` service, and that is the little caveat. Traefik is simply a proxy and
|
|
||||||
loadbalancer and is not able to serve any kind of content, but for Conduit to federate, we need to
|
|
||||||
either expose ports `443` and `8448` or serve two endpoints `.well-known/matrix/client` and
|
|
||||||
`.well-known/matrix/server`.
|
|
||||||
|
|
||||||
With the service `well-known` we use a single `nginx` container that will serve those two files.
|
With the service `well-known` we use a single `nginx` container that will serve those two files.
|
||||||
|
|
||||||
So...step by step:
|
So...step by step:
|
||||||
|
|
||||||
1. Copy [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or
|
1. Copy [`docker-compose.traefik.yml`](docker-compose.traefik.yml) and [`docker-compose.override.traefik.yml`](docker-compose.override.traefik.yml) from the repository and remove `.traefik` from the filenames.
|
||||||
[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and [`docker-compose.override.yml`](docker-compose.override.yml) from the repository and remove `.for-traefik` (or `.with-traefik`) from the filename.
|
|
||||||
2. Open both files and modify/adjust them to your needs. Meaning, change the `CONDUIT_SERVER_NAME` and the volume host mappings according to your needs.
|
2. Open both files and modify/adjust them to your needs. Meaning, change the `CONDUIT_SERVER_NAME` and the volume host mappings according to your needs.
|
||||||
3. Create the `conduit.toml` config file, an example can be found [here](../conduit-example.toml), or set `CONDUIT_CONFIG=""` and configure Conduit per env vars.
|
3. Create the `conduit.toml` config file, an example can be found [here](../conduit-example.toml), or set `CONDUIT_CONFIG=""` and configure Conduit per env vars.
|
||||||
4. Uncomment the `element-web` service if you want to host your own Element Web Client and create a `element_config.json`.
|
4. Uncomment the `element-web` service if you want to host your own Element Web Client and create a `element_config.json`.
|
||||||
|
@ -144,12 +96,12 @@ So...step by step:
|
||||||
|
|
||||||
location /.well-known/matrix/server {
|
location /.well-known/matrix/server {
|
||||||
return 200 '{"m.server": "<SUBDOMAIN>.<DOMAIN>:443"}';
|
return 200 '{"m.server": "<SUBDOMAIN>.<DOMAIN>:443"}';
|
||||||
types { } default_type "application/json; charset=utf-8";
|
add_header Content-Type application/json;
|
||||||
}
|
}
|
||||||
|
|
||||||
location /.well-known/matrix/client {
|
location /.well-known/matrix/client {
|
||||||
return 200 '{"m.homeserver": {"base_url": "https://<SUBDOMAIN>.<DOMAIN>"}}';
|
return 200 '{"m.homeserver": {"base_url": "https://<SUBDOMAIN>.<DOMAIN>"}}';
|
||||||
types { } default_type "application/json; charset=utf-8";
|
add_header Content-Type application/json;
|
||||||
add_header "Access-Control-Allow-Origin" *;
|
add_header "Access-Control-Allow-Origin" *;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -160,59 +112,4 @@ So...step by step:
|
||||||
```
|
```
|
||||||
|
|
||||||
6. Run `docker-compose up -d`
|
6. Run `docker-compose up -d`
|
||||||
7. Connect to your homeserver with your preferred client and create a user. You should do this immediately after starting Conduit, because the first created user is the admin.
|
7. Connect to your homeserver with your preferred client and create a user. You should do this immediatly after starting Conduit, because the first created user is the admin.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Voice communication
|
|
||||||
|
|
||||||
In order to make or receive calls, a TURN server is required. Conduit suggests using [Coturn](https://github.com/coturn/coturn) for this purpose, which is also available as a Docker image. Before proceeding with the software installation, it is essential to have the necessary configurations in place.
|
|
||||||
|
|
||||||
### Configuration
|
|
||||||
|
|
||||||
Create a configuration file called `coturn.conf` containing:
|
|
||||||
|
|
||||||
```conf
|
|
||||||
use-auth-secret
|
|
||||||
static-auth-secret=<a secret key>
|
|
||||||
realm=<your server domain>
|
|
||||||
```
|
|
||||||
A common way to generate a suitable alphanumeric secret key is by using `pwgen -s 64 1`.
|
|
||||||
|
|
||||||
These same values need to be set in conduit. You can either modify conduit.toml to include these lines:
|
|
||||||
```
|
|
||||||
turn_uris = ["turn:<your server domain>?transport=udp", "turn:<your server domain>?transport=tcp"]
|
|
||||||
turn_secret = "<secret key from coturn configuration>"
|
|
||||||
```
|
|
||||||
or append the following to the docker environment variables dependig on which configuration method you used earlier:
|
|
||||||
```yml
|
|
||||||
CONDUIT_TURN_URIS: '["turn:<your server domain>?transport=udp", "turn:<your server domain>?transport=tcp"]'
|
|
||||||
CONDUIT_TURN_SECRET: "<secret key from coturn configuration>"
|
|
||||||
```
|
|
||||||
Restart Conduit to apply these changes.
|
|
||||||
|
|
||||||
### Run
|
|
||||||
Run the [Coturn](https://hub.docker.com/r/coturn/coturn) image using
|
|
||||||
```bash
|
|
||||||
docker run -d --network=host -v $(pwd)/coturn.conf:/etc/coturn/turnserver.conf coturn/coturn
|
|
||||||
```
|
|
||||||
|
|
||||||
or docker-compose. For the latter, paste the following section into a file called `docker-compose.yml`
|
|
||||||
and run `docker-compose up -d` in the same directory.
|
|
||||||
|
|
||||||
```yml
|
|
||||||
version: 3
|
|
||||||
services:
|
|
||||||
turn:
|
|
||||||
container_name: coturn-server
|
|
||||||
image: docker.io/coturn/coturn
|
|
||||||
restart: unless-stopped
|
|
||||||
network_mode: "host"
|
|
||||||
volumes:
|
|
||||||
- ./coturn.conf:/etc/coturn/turnserver.conf
|
|
||||||
```
|
|
||||||
|
|
||||||
To understand why the host networking mode is used and explore alternative configuration options, please visit the following link: https://github.com/coturn/coturn/blob/master/docker/coturn/README.md.
|
|
||||||
For security recommendations see Synapse's [Coturn documentation](https://github.com/matrix-org/synapse/blob/develop/docs/setup/turn/coturn.md#configuration).
|
|
||||||
|
|
||||||
|
|
|
@ -7,21 +7,16 @@
|
||||||
# Credit's for the original Dockerfile: Weasy666.
|
# Credit's for the original Dockerfile: Weasy666.
|
||||||
# ---------------------------------------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
FROM docker.io/alpine:3.16.0@sha256:4ff3ca91275773af45cb4b0834e12b7eb47d1c18f770a0b151381cd227f4c253 AS runner
|
FROM docker.io/alpine:3.15.0 AS runner
|
||||||
|
|
||||||
|
|
||||||
# Standard port on which Conduit launches.
|
# Standard port on which Conduit launches.
|
||||||
# You still need to map the port when using the docker command or docker-compose.
|
# You still need to map the port when using the docker command or docker-compose.
|
||||||
EXPOSE 6167
|
EXPOSE 6167
|
||||||
|
|
||||||
# Users are expected to mount a volume to this directory:
|
# Note from @jfowl: I would like to remove the config file in the future and just have the Docker version be configured with envs.
|
||||||
ARG DEFAULT_DB_PATH=/var/lib/matrix-conduit
|
ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" \
|
||||||
|
CONDUIT_PORT=6167
|
||||||
ENV CONDUIT_PORT=6167 \
|
|
||||||
CONDUIT_ADDRESS="0.0.0.0" \
|
|
||||||
CONDUIT_DATABASE_PATH=${DEFAULT_DB_PATH} \
|
|
||||||
CONDUIT_CONFIG=''
|
|
||||||
# └─> Set no config file to do all configuration with env vars
|
|
||||||
|
|
||||||
# Conduit needs:
|
# Conduit needs:
|
||||||
# ca-certificates: for https
|
# ca-certificates: for https
|
||||||
|
@ -30,6 +25,7 @@ RUN apk add --no-cache \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
iproute2
|
iproute2
|
||||||
|
|
||||||
|
|
||||||
ARG CREATED
|
ARG CREATED
|
||||||
ARG VERSION
|
ARG VERSION
|
||||||
ARG GIT_REF
|
ARG GIT_REF
|
||||||
|
@ -48,37 +44,37 @@ LABEL org.opencontainers.image.created=${CREATED} \
|
||||||
org.opencontainers.image.documentation="https://gitlab.com/famedly/conduit" \
|
org.opencontainers.image.documentation="https://gitlab.com/famedly/conduit" \
|
||||||
org.opencontainers.image.ref.name=""
|
org.opencontainers.image.ref.name=""
|
||||||
|
|
||||||
|
# Created directory for the database and media files
|
||||||
|
RUN mkdir -p /srv/conduit/.local/share/conduit
|
||||||
|
|
||||||
# Test if Conduit is still alive, uses the same endpoint as Element
|
# Test if Conduit is still alive, uses the same endpoint as Element
|
||||||
COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh
|
COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh
|
||||||
HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh
|
HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh
|
||||||
|
|
||||||
|
|
||||||
|
# Depending on the target platform (e.g. "linux/arm/v7", "linux/arm64/v8", or "linux/amd64")
|
||||||
|
# copy the matching binary into this docker image
|
||||||
|
ARG TARGETPLATFORM
|
||||||
|
COPY ./$TARGETPLATFORM /srv/conduit/conduit
|
||||||
|
|
||||||
|
|
||||||
# Improve security: Don't run stuff as root, that does not need to run as root:
|
# Improve security: Don't run stuff as root, that does not need to run as root:
|
||||||
# Most distros also use 1000:1000 for the first real user, so this should resolve volume mounting problems.
|
# Add www-data user and group with UID 82, as used by alpine
|
||||||
ARG USER_ID=1000
|
# https://git.alpinelinux.org/aports/tree/main/nginx/nginx.pre-install
|
||||||
ARG GROUP_ID=1000
|
|
||||||
RUN set -x ; \
|
RUN set -x ; \
|
||||||
deluser --remove-home www-data ; \
|
addgroup -Sg 82 www-data 2>/dev/null ; \
|
||||||
addgroup -S -g ${GROUP_ID} conduit 2>/dev/null ; \
|
adduser -S -D -H -h /srv/conduit -G www-data -g www-data www-data 2>/dev/null ; \
|
||||||
adduser -S -u ${USER_ID} -D -H -h /srv/conduit -G conduit -g conduit conduit 2>/dev/null ; \
|
addgroup www-data www-data 2>/dev/null && exit 0 ; exit 1
|
||||||
addgroup conduit conduit 2>/dev/null && exit 0 ; exit 1
|
|
||||||
|
|
||||||
# Change ownership of Conduit files to conduit user and group
|
# Change ownership of Conduit files to www-data user and group
|
||||||
RUN chown -cR conduit:conduit /srv/conduit && \
|
RUN chown -cR www-data:www-data /srv/conduit
|
||||||
chmod +x /srv/conduit/healthcheck.sh && \
|
RUN chmod +x /srv/conduit/healthcheck.sh
|
||||||
mkdir -p ${DEFAULT_DB_PATH} && \
|
|
||||||
chown -cR conduit:conduit ${DEFAULT_DB_PATH}
|
|
||||||
|
|
||||||
# Change user to conduit
|
# Change user to www-data
|
||||||
USER conduit
|
USER www-data
|
||||||
# Set container home directory
|
# Set container home directory
|
||||||
WORKDIR /srv/conduit
|
WORKDIR /srv/conduit
|
||||||
|
|
||||||
# Run Conduit and print backtraces on panics
|
# Run Conduit and print backtraces on panics
|
||||||
ENV RUST_BACKTRACE=1
|
ENV RUST_BACKTRACE=1
|
||||||
ENTRYPOINT [ "/srv/conduit/conduit" ]
|
ENTRYPOINT [ "/srv/conduit/conduit" ]
|
||||||
|
|
||||||
# Depending on the target platform (e.g. "linux/arm/v7", "linux/arm64/v8", or "linux/amd64")
|
|
||||||
# copy the matching binary into this docker image
|
|
||||||
ARG TARGETPLATFORM
|
|
||||||
COPY --chown=conduit:conduit ./$TARGETPLATFORM /srv/conduit/conduit
|
|
||||||
|
|
|
@ -1,69 +0,0 @@
|
||||||
# Conduit - Behind Traefik Reverse Proxy
|
|
||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
|
||||||
homeserver:
|
|
||||||
### If you already built the Conduit image with 'docker build' or want to use the Docker Hub image,
|
|
||||||
### then you are ready to go.
|
|
||||||
image: matrixconduit/matrix-conduit:latest
|
|
||||||
### If you want to build a fresh image from the sources, then comment the image line and uncomment the
|
|
||||||
### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this:
|
|
||||||
### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d
|
|
||||||
# build:
|
|
||||||
# context: .
|
|
||||||
# args:
|
|
||||||
# CREATED: '2021-03-16T08:18:27Z'
|
|
||||||
# VERSION: '0.1.0'
|
|
||||||
# LOCAL: 'false'
|
|
||||||
# GIT_REF: origin/master
|
|
||||||
restart: unless-stopped
|
|
||||||
volumes:
|
|
||||||
- db:/var/lib/matrix-conduit/
|
|
||||||
networks:
|
|
||||||
- proxy
|
|
||||||
environment:
|
|
||||||
CONDUIT_SERVER_NAME: your.server.name # EDIT THIS
|
|
||||||
CONDUIT_DATABASE_PATH: /var/lib/matrix-conduit/
|
|
||||||
CONDUIT_DATABASE_BACKEND: rocksdb
|
|
||||||
CONDUIT_PORT: 6167
|
|
||||||
CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
|
|
||||||
CONDUIT_ALLOW_REGISTRATION: 'true'
|
|
||||||
CONDUIT_ALLOW_FEDERATION: 'true'
|
|
||||||
CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
|
||||||
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
|
||||||
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
|
|
||||||
#CONDUIT_LOG: warn,rocket=off,_=off,sled=off
|
|
||||||
CONDUIT_ADDRESS: 0.0.0.0
|
|
||||||
CONDUIT_CONFIG: '' # Ignore this
|
|
||||||
|
|
||||||
# We need some way to server the client and server .well-known json. The simplest way is to use a nginx container
|
|
||||||
# to serve those two as static files. If you want to use a different way, delete or comment the below service, here
|
|
||||||
# and in the docker-compose override file.
|
|
||||||
well-known:
|
|
||||||
image: nginx:latest
|
|
||||||
restart: unless-stopped
|
|
||||||
volumes:
|
|
||||||
- ./nginx/matrix.conf:/etc/nginx/conf.d/matrix.conf # the config to serve the .well-known/matrix files
|
|
||||||
- ./nginx/www:/var/www/ # location of the client and server .well-known-files
|
|
||||||
### Uncomment if you want to use your own Element-Web App.
|
|
||||||
### Note: You need to provide a config.json for Element and you also need a second
|
|
||||||
### Domain or Subdomain for the communication between Element and Conduit
|
|
||||||
### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md
|
|
||||||
# element-web:
|
|
||||||
# image: vectorim/element-web:latest
|
|
||||||
# restart: unless-stopped
|
|
||||||
# volumes:
|
|
||||||
# - ./element_config.json:/app/config.json
|
|
||||||
# networks:
|
|
||||||
# - proxy
|
|
||||||
# depends_on:
|
|
||||||
# - homeserver
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
db:
|
|
||||||
|
|
||||||
networks:
|
|
||||||
# This is the network Traefik listens to, if your network has a different
|
|
||||||
# name, don't forget to change it here and in the docker-compose.override.yml
|
|
||||||
proxy:
|
|
||||||
external: true
|
|
|
@ -33,11 +33,10 @@ services:
|
||||||
# CONDUIT_PORT: 6167
|
# CONDUIT_PORT: 6167
|
||||||
# CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string ''
|
# CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string ''
|
||||||
# Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging
|
# Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging
|
||||||
# CONDUIT_LOG: info # default is: "warn,_=off,sled=off"
|
# CONDUIT_LOG: info # default is: "info,rocket=off,_=off,sled=off"
|
||||||
# CONDUIT_ALLOW_JAEGER: 'false'
|
# CONDUIT_ALLOW_JAEGER: 'false'
|
||||||
# CONDUIT_ALLOW_ENCRYPTION: 'true'
|
# CONDUIT_ALLOW_ENCRYPTION: 'false'
|
||||||
# CONDUIT_ALLOW_FEDERATION: 'true'
|
# CONDUIT_ALLOW_FEDERATION: 'false'
|
||||||
# CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
|
||||||
# CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit
|
# CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit
|
||||||
# CONDUIT_WORKERS: 10
|
# CONDUIT_WORKERS: 10
|
||||||
# CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
|
# CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
|
||||||
|
@ -66,33 +65,11 @@ services:
|
||||||
# depends_on:
|
# depends_on:
|
||||||
# - homeserver
|
# - homeserver
|
||||||
|
|
||||||
traefik:
|
|
||||||
image: "traefik:latest"
|
|
||||||
container_name: "traefik"
|
|
||||||
restart: "unless-stopped"
|
|
||||||
ports:
|
|
||||||
- "80:80"
|
|
||||||
- "443:443"
|
|
||||||
volumes:
|
|
||||||
- "/var/run/docker.sock:/var/run/docker.sock"
|
|
||||||
# - "./traefik_config:/etc/traefik"
|
|
||||||
- "acme:/etc/traefik/acme"
|
|
||||||
labels:
|
|
||||||
- "traefik.enable=true"
|
|
||||||
|
|
||||||
# middleware redirect
|
|
||||||
- "traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https"
|
|
||||||
# global redirect to https
|
|
||||||
- "traefik.http.routers.redirs.rule=hostregexp(`{host:.+}`)"
|
|
||||||
- "traefik.http.routers.redirs.entrypoints=http"
|
|
||||||
- "traefik.http.routers.redirs.middlewares=redirect-to-https"
|
|
||||||
|
|
||||||
networks:
|
|
||||||
- proxy
|
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
db:
|
db:
|
||||||
acme:
|
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
|
# This is the network Traefik listens to, if your network has a different
|
||||||
|
# name, don't forget to change it here and in the docker-compose.override.yml
|
||||||
proxy:
|
proxy:
|
||||||
|
external: true
|
|
@ -6,14 +6,9 @@ if [ -z "${CONDUIT_PORT}" ]; then
|
||||||
CONDUIT_PORT=$(ss -tlpn | grep conduit | grep -m1 -o ':[0-9]*' | grep -m1 -o '[0-9]*')
|
CONDUIT_PORT=$(ss -tlpn | grep conduit | grep -m1 -o ':[0-9]*' | grep -m1 -o '[0-9]*')
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# If CONDUIT_ADDRESS is not set try to get the address from the process list
|
|
||||||
if [ -z "${CONDUIT_ADDRESS}" ]; then
|
|
||||||
CONDUIT_ADDRESS=$(ss -tlpn | awk -F ' +|:' '/conduit/ { print $4 }')
|
|
||||||
fi
|
|
||||||
|
|
||||||
# The actual health check.
|
# The actual health check.
|
||||||
# We try to first get a response on HTTP and when that fails on HTTPS and when that fails, we exit with code 1.
|
# We try to first get a response on HTTP and when that fails on HTTPS and when that fails, we exit with code 1.
|
||||||
# TODO: Change this to a single wget call. Do we have a config value that we can check for that?
|
# TODO: Change this to a single wget call. Do we have a config value that we can check for that?
|
||||||
wget --no-verbose --tries=1 --spider "http://${CONDUIT_ADDRESS}:${CONDUIT_PORT}/_matrix/client/versions" || \
|
wget --no-verbose --tries=1 --spider "http://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \
|
||||||
wget --no-verbose --tries=1 --spider "https://${CONDUIT_ADDRESS}:${CONDUIT_PORT}/_matrix/client/versions" || \
|
wget --no-verbose --tries=1 --spider "https://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \
|
||||||
exit 1
|
exit 1
|
||||||
|
|
64
engage.toml
64
engage.toml
|
@ -1,64 +0,0 @@
|
||||||
interpreter = ["bash", "-euo", "pipefail", "-c"]
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
name = "engage"
|
|
||||||
group = "versions"
|
|
||||||
script = "engage --version"
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
name = "rustc"
|
|
||||||
group = "versions"
|
|
||||||
script = "rustc --version"
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
name = "cargo"
|
|
||||||
group = "versions"
|
|
||||||
script = "cargo --version"
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
name = "cargo-fmt"
|
|
||||||
group = "versions"
|
|
||||||
script = "cargo fmt --version"
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
name = "rustdoc"
|
|
||||||
group = "versions"
|
|
||||||
script = "rustdoc --version"
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
name = "cargo-clippy"
|
|
||||||
group = "versions"
|
|
||||||
script = "cargo clippy -- --version"
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
name = "cargo-fmt"
|
|
||||||
group = "lints"
|
|
||||||
script = "cargo fmt --check -- --color=always"
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
name = "cargo-doc"
|
|
||||||
group = "lints"
|
|
||||||
script = """
|
|
||||||
RUSTDOCFLAGS="-D warnings" cargo doc \
|
|
||||||
--workspace \
|
|
||||||
--no-deps \
|
|
||||||
--document-private-items \
|
|
||||||
--color always
|
|
||||||
"""
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
name = "cargo-clippy"
|
|
||||||
group = "lints"
|
|
||||||
script = "cargo clippy --workspace --all-targets --color=always -- -D warnings"
|
|
||||||
|
|
||||||
[[task]]
|
|
||||||
name = "cargo"
|
|
||||||
group = "tests"
|
|
||||||
script = """
|
|
||||||
cargo test \
|
|
||||||
--workspace \
|
|
||||||
--all-targets \
|
|
||||||
--color=always \
|
|
||||||
-- \
|
|
||||||
--color=always
|
|
||||||
"""
|
|
263
flake.lock
263
flake.lock
|
@ -1,263 +0,0 @@
|
||||||
{
|
|
||||||
"nodes": {
|
|
||||||
"attic": {
|
|
||||||
"inputs": {
|
|
||||||
"crane": "crane",
|
|
||||||
"flake-compat": "flake-compat",
|
|
||||||
"flake-utils": "flake-utils",
|
|
||||||
"nixpkgs": "nixpkgs",
|
|
||||||
"nixpkgs-stable": "nixpkgs-stable"
|
|
||||||
},
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1705617092,
|
|
||||||
"narHash": "sha256-n9PK4O4X4S1JkwpkMuYm1wHZYJzRqif8g3RuVIPD+rY=",
|
|
||||||
"owner": "zhaofengli",
|
|
||||||
"repo": "attic",
|
|
||||||
"rev": "fbe252a5c21febbe920c025560cbd63b20e24f3b",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "zhaofengli",
|
|
||||||
"ref": "main",
|
|
||||||
"repo": "attic",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"crane": {
|
|
||||||
"inputs": {
|
|
||||||
"nixpkgs": [
|
|
||||||
"attic",
|
|
||||||
"nixpkgs"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1702918879,
|
|
||||||
"narHash": "sha256-tWJqzajIvYcaRWxn+cLUB9L9Pv4dQ3Bfit/YjU5ze3g=",
|
|
||||||
"owner": "ipetkov",
|
|
||||||
"repo": "crane",
|
|
||||||
"rev": "7195c00c272fdd92fc74e7d5a0a2844b9fadb2fb",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "ipetkov",
|
|
||||||
"repo": "crane",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"crane_2": {
|
|
||||||
"inputs": {
|
|
||||||
"nixpkgs": [
|
|
||||||
"nixpkgs"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1706473964,
|
|
||||||
"narHash": "sha256-Fq6xleee/TsX6NbtoRuI96bBuDHMU57PrcK9z1QEKbk=",
|
|
||||||
"owner": "ipetkov",
|
|
||||||
"repo": "crane",
|
|
||||||
"rev": "c798790eabec3e3da48190ae3698ac227aab770c",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "ipetkov",
|
|
||||||
"ref": "master",
|
|
||||||
"repo": "crane",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"fenix": {
|
|
||||||
"inputs": {
|
|
||||||
"nixpkgs": [
|
|
||||||
"nixpkgs"
|
|
||||||
],
|
|
||||||
"rust-analyzer-src": "rust-analyzer-src"
|
|
||||||
},
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1705559032,
|
|
||||||
"narHash": "sha256-Cb+Jd1+Gz4Wi+8elPnUIHnqQmE1qjDRZ+PsJaPaAffY=",
|
|
||||||
"owner": "nix-community",
|
|
||||||
"repo": "fenix",
|
|
||||||
"rev": "e132ea0eb0c799a2109a91688e499d7bf4962801",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "nix-community",
|
|
||||||
"repo": "fenix",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"flake-compat": {
|
|
||||||
"flake": false,
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1673956053,
|
|
||||||
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
|
|
||||||
"owner": "edolstra",
|
|
||||||
"repo": "flake-compat",
|
|
||||||
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "edolstra",
|
|
||||||
"repo": "flake-compat",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"flake-compat_2": {
|
|
||||||
"flake": false,
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1696426674,
|
|
||||||
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
|
|
||||||
"owner": "edolstra",
|
|
||||||
"repo": "flake-compat",
|
|
||||||
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "edolstra",
|
|
||||||
"repo": "flake-compat",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"flake-utils": {
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1667395993,
|
|
||||||
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
|
|
||||||
"owner": "numtide",
|
|
||||||
"repo": "flake-utils",
|
|
||||||
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "numtide",
|
|
||||||
"repo": "flake-utils",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"flake-utils_2": {
|
|
||||||
"inputs": {
|
|
||||||
"systems": "systems"
|
|
||||||
},
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1705309234,
|
|
||||||
"narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=",
|
|
||||||
"owner": "numtide",
|
|
||||||
"repo": "flake-utils",
|
|
||||||
"rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "numtide",
|
|
||||||
"repo": "flake-utils",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nix-filter": {
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1705332318,
|
|
||||||
"narHash": "sha256-kcw1yFeJe9N4PjQji9ZeX47jg0p9A0DuU4djKvg1a7I=",
|
|
||||||
"owner": "numtide",
|
|
||||||
"repo": "nix-filter",
|
|
||||||
"rev": "3449dc925982ad46246cfc36469baf66e1b64f17",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "numtide",
|
|
||||||
"repo": "nix-filter",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nixpkgs": {
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1702539185,
|
|
||||||
"narHash": "sha256-KnIRG5NMdLIpEkZTnN5zovNYc0hhXjAgv6pfd5Z4c7U=",
|
|
||||||
"owner": "NixOS",
|
|
||||||
"repo": "nixpkgs",
|
|
||||||
"rev": "aa9d4729cbc99dabacb50e3994dcefb3ea0f7447",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "NixOS",
|
|
||||||
"ref": "nixpkgs-unstable",
|
|
||||||
"repo": "nixpkgs",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nixpkgs-stable": {
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1702780907,
|
|
||||||
"narHash": "sha256-blbrBBXjjZt6OKTcYX1jpe9SRof2P9ZYWPzq22tzXAA=",
|
|
||||||
"owner": "NixOS",
|
|
||||||
"repo": "nixpkgs",
|
|
||||||
"rev": "1e2e384c5b7c50dbf8e9c441a9e58d85f408b01f",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "NixOS",
|
|
||||||
"ref": "nixos-23.11",
|
|
||||||
"repo": "nixpkgs",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nixpkgs_2": {
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1705496572,
|
|
||||||
"narHash": "sha256-rPIe9G5EBLXdBdn9ilGc0nq082lzQd0xGGe092R/5QE=",
|
|
||||||
"owner": "NixOS",
|
|
||||||
"repo": "nixpkgs",
|
|
||||||
"rev": "842d9d80cfd4560648c785f8a4e6f3b096790e19",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "NixOS",
|
|
||||||
"ref": "nixos-unstable",
|
|
||||||
"repo": "nixpkgs",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"root": {
|
|
||||||
"inputs": {
|
|
||||||
"attic": "attic",
|
|
||||||
"crane": "crane_2",
|
|
||||||
"fenix": "fenix",
|
|
||||||
"flake-compat": "flake-compat_2",
|
|
||||||
"flake-utils": "flake-utils_2",
|
|
||||||
"nix-filter": "nix-filter",
|
|
||||||
"nixpkgs": "nixpkgs_2"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"rust-analyzer-src": {
|
|
||||||
"flake": false,
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1705523001,
|
|
||||||
"narHash": "sha256-TWq5vJ6m+9HGSDMsQAmz1TMegMi79R3TTyKjnPWsQp8=",
|
|
||||||
"owner": "rust-lang",
|
|
||||||
"repo": "rust-analyzer",
|
|
||||||
"rev": "9d9b34354d2f13e33568c9c55b226dd014a146a0",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "rust-lang",
|
|
||||||
"ref": "nightly",
|
|
||||||
"repo": "rust-analyzer",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"systems": {
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1681028828,
|
|
||||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
|
||||||
"owner": "nix-systems",
|
|
||||||
"repo": "default",
|
|
||||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "nix-systems",
|
|
||||||
"repo": "default",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"root": "root",
|
|
||||||
"version": 7
|
|
||||||
}
|
|
259
flake.nix
259
flake.nix
|
@ -1,259 +0,0 @@
|
||||||
{
|
|
||||||
inputs = {
|
|
||||||
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable";
|
|
||||||
flake-utils.url = "github:numtide/flake-utils";
|
|
||||||
nix-filter.url = "github:numtide/nix-filter";
|
|
||||||
flake-compat = {
|
|
||||||
url = "github:edolstra/flake-compat";
|
|
||||||
flake = false;
|
|
||||||
};
|
|
||||||
|
|
||||||
fenix = {
|
|
||||||
url = "github:nix-community/fenix";
|
|
||||||
inputs.nixpkgs.follows = "nixpkgs";
|
|
||||||
};
|
|
||||||
crane = {
|
|
||||||
url = "github:ipetkov/crane?ref=master";
|
|
||||||
inputs.nixpkgs.follows = "nixpkgs";
|
|
||||||
};
|
|
||||||
attic.url = "github:zhaofengli/attic?ref=main";
|
|
||||||
};
|
|
||||||
|
|
||||||
outputs =
|
|
||||||
{ self
|
|
||||||
, nixpkgs
|
|
||||||
, flake-utils
|
|
||||||
, nix-filter
|
|
||||||
|
|
||||||
, fenix
|
|
||||||
, crane
|
|
||||||
, ...
|
|
||||||
}: flake-utils.lib.eachDefaultSystem (system:
|
|
||||||
let
|
|
||||||
pkgsHost = nixpkgs.legacyPackages.${system};
|
|
||||||
|
|
||||||
# Nix-accessible `Cargo.toml`
|
|
||||||
cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml);
|
|
||||||
|
|
||||||
# The Rust toolchain to use
|
|
||||||
toolchain = fenix.packages.${system}.fromToolchainFile {
|
|
||||||
file = ./rust-toolchain.toml;
|
|
||||||
|
|
||||||
# See also `rust-toolchain.toml`
|
|
||||||
sha256 = "sha256-SXRtAuO4IqNOQq+nLbrsDFbVk+3aVA8NNpSZsKlVH/8=";
|
|
||||||
};
|
|
||||||
|
|
||||||
builder = pkgs:
|
|
||||||
((crane.mkLib pkgs).overrideToolchain toolchain).buildPackage;
|
|
||||||
|
|
||||||
nativeBuildInputs = pkgs: [
|
|
||||||
# bindgen needs the build platform's libclang. Apparently due to
|
|
||||||
# "splicing weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't
|
|
||||||
# quite do the right thing here.
|
|
||||||
pkgs.buildPackages.rustPlatform.bindgenHook
|
|
||||||
];
|
|
||||||
|
|
||||||
env = pkgs: {
|
|
||||||
ROCKSDB_INCLUDE_DIR = "${pkgs.rocksdb}/include";
|
|
||||||
ROCKSDB_LIB_DIR = "${pkgs.rocksdb}/lib";
|
|
||||||
}
|
|
||||||
// pkgs.lib.optionalAttrs pkgs.stdenv.hostPlatform.isStatic {
|
|
||||||
ROCKSDB_STATIC = "";
|
|
||||||
}
|
|
||||||
// {
|
|
||||||
CARGO_BUILD_RUSTFLAGS = let inherit (pkgs) lib stdenv; in
|
|
||||||
lib.concatStringsSep " " ([]
|
|
||||||
++ lib.optionals
|
|
||||||
# This disables PIE for static builds, which isn't great in terms
|
|
||||||
# of security. Unfortunately, my hand is forced because nixpkgs'
|
|
||||||
# `libstdc++.a` is built without `-fPIE`, which precludes us from
|
|
||||||
# leaving PIE enabled.
|
|
||||||
stdenv.hostPlatform.isStatic
|
|
||||||
["-C" "relocation-model=static"]
|
|
||||||
++ lib.optionals
|
|
||||||
(stdenv.buildPlatform.config != stdenv.hostPlatform.config)
|
|
||||||
["-l" "c"]
|
|
||||||
++ lib.optionals
|
|
||||||
# This check has to match the one [here][0]. We only need to set
|
|
||||||
# these flags when using a different linker. Don't ask me why,
|
|
||||||
# though, because I don't know. All I know is it breaks otherwise.
|
|
||||||
#
|
|
||||||
# [0]: https://github.com/NixOS/nixpkgs/blob/612f97239e2cc474c13c9dafa0df378058c5ad8d/pkgs/build-support/rust/lib/default.nix#L36-L39
|
|
||||||
(
|
|
||||||
# Nixpkgs doesn't check for x86_64 here but we do, because I
|
|
||||||
# observed a failure building statically for x86_64 without
|
|
||||||
# including it here. Linkers are weird.
|
|
||||||
(stdenv.hostPlatform.isAarch64 || stdenv.hostPlatform.isx86_64)
|
|
||||||
&& stdenv.hostPlatform.isStatic
|
|
||||||
&& !stdenv.isDarwin
|
|
||||||
&& !stdenv.cc.bintools.isLLVM
|
|
||||||
)
|
|
||||||
[
|
|
||||||
"-l"
|
|
||||||
"stdc++"
|
|
||||||
"-L"
|
|
||||||
"${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib"
|
|
||||||
]
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
# What follows is stolen from [here][0]. Its purpose is to properly
|
|
||||||
# configure compilers and linkers for various stages of the build, and
|
|
||||||
# even covers the case of build scripts that need native code compiled and
|
|
||||||
# run on the build platform (I think).
|
|
||||||
#
|
|
||||||
# [0]: https://github.com/NixOS/nixpkgs/blob/612f97239e2cc474c13c9dafa0df378058c5ad8d/pkgs/build-support/rust/lib/default.nix#L64-L78
|
|
||||||
// (
|
|
||||||
let
|
|
||||||
inherit (pkgs.rust.lib) envVars;
|
|
||||||
in
|
|
||||||
pkgs.lib.optionalAttrs
|
|
||||||
(pkgs.stdenv.targetPlatform.rust.rustcTarget
|
|
||||||
!= pkgs.stdenv.hostPlatform.rust.rustcTarget)
|
|
||||||
(
|
|
||||||
let
|
|
||||||
inherit (pkgs.stdenv.targetPlatform.rust) cargoEnvVarTarget;
|
|
||||||
in
|
|
||||||
{
|
|
||||||
"CC_${cargoEnvVarTarget}" = envVars.ccForTarget;
|
|
||||||
"CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget;
|
|
||||||
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" =
|
|
||||||
envVars.linkerForTarget;
|
|
||||||
}
|
|
||||||
)
|
|
||||||
// (
|
|
||||||
let
|
|
||||||
inherit (pkgs.stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget;
|
|
||||||
in
|
|
||||||
{
|
|
||||||
"CC_${cargoEnvVarTarget}" = envVars.ccForHost;
|
|
||||||
"CXX_${cargoEnvVarTarget}" = envVars.cxxForHost;
|
|
||||||
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForHost;
|
|
||||||
CARGO_BUILD_TARGET = rustcTarget;
|
|
||||||
}
|
|
||||||
)
|
|
||||||
// (
|
|
||||||
let
|
|
||||||
inherit (pkgs.stdenv.buildPlatform.rust) cargoEnvVarTarget;
|
|
||||||
in
|
|
||||||
{
|
|
||||||
"CC_${cargoEnvVarTarget}" = envVars.ccForBuild;
|
|
||||||
"CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild;
|
|
||||||
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForBuild;
|
|
||||||
HOST_CC = "${pkgs.buildPackages.stdenv.cc}/bin/cc";
|
|
||||||
HOST_CXX = "${pkgs.buildPackages.stdenv.cc}/bin/c++";
|
|
||||||
}
|
|
||||||
));
|
|
||||||
|
|
||||||
package = pkgs: builder pkgs {
|
|
||||||
src = nix-filter {
|
|
||||||
root = ./.;
|
|
||||||
include = [
|
|
||||||
"src"
|
|
||||||
"Cargo.toml"
|
|
||||||
"Cargo.lock"
|
|
||||||
];
|
|
||||||
};
|
|
||||||
|
|
||||||
# This is redundant with CI
|
|
||||||
doCheck = false;
|
|
||||||
|
|
||||||
env = env pkgs;
|
|
||||||
nativeBuildInputs = nativeBuildInputs pkgs;
|
|
||||||
|
|
||||||
meta.mainProgram = cargoToml.package.name;
|
|
||||||
};
|
|
||||||
|
|
||||||
mkOciImage = pkgs: package:
|
|
||||||
pkgs.dockerTools.buildImage {
|
|
||||||
name = package.pname;
|
|
||||||
tag = "next";
|
|
||||||
copyToRoot = [
|
|
||||||
pkgs.dockerTools.caCertificates
|
|
||||||
];
|
|
||||||
config = {
|
|
||||||
# Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT)
|
|
||||||
# are handled as expected
|
|
||||||
Entrypoint = [
|
|
||||||
"${pkgs.lib.getExe' pkgs.tini "tini"}"
|
|
||||||
"--"
|
|
||||||
];
|
|
||||||
Cmd = [
|
|
||||||
"${pkgs.lib.getExe package}"
|
|
||||||
];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
in
|
|
||||||
{
|
|
||||||
packages = {
|
|
||||||
default = package pkgsHost;
|
|
||||||
oci-image = mkOciImage pkgsHost self.packages.${system}.default;
|
|
||||||
}
|
|
||||||
//
|
|
||||||
builtins.listToAttrs
|
|
||||||
(builtins.concatLists
|
|
||||||
(builtins.map
|
|
||||||
(crossSystem:
|
|
||||||
let
|
|
||||||
binaryName = "static-${crossSystem}";
|
|
||||||
pkgsCrossStatic =
|
|
||||||
(import nixpkgs {
|
|
||||||
inherit system;
|
|
||||||
crossSystem = {
|
|
||||||
config = crossSystem;
|
|
||||||
};
|
|
||||||
}).pkgsStatic;
|
|
||||||
in
|
|
||||||
[
|
|
||||||
# An output for a statically-linked binary
|
|
||||||
{
|
|
||||||
name = binaryName;
|
|
||||||
value = package pkgsCrossStatic;
|
|
||||||
}
|
|
||||||
|
|
||||||
# An output for an OCI image based on that binary
|
|
||||||
{
|
|
||||||
name = "oci-image-${crossSystem}";
|
|
||||||
value = mkOciImage
|
|
||||||
pkgsCrossStatic
|
|
||||||
self.packages.${system}.${binaryName};
|
|
||||||
}
|
|
||||||
]
|
|
||||||
)
|
|
||||||
[
|
|
||||||
"x86_64-unknown-linux-musl"
|
|
||||||
"aarch64-unknown-linux-musl"
|
|
||||||
]
|
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
devShells.default = pkgsHost.mkShell {
|
|
||||||
env = env pkgsHost // {
|
|
||||||
# Rust Analyzer needs to be able to find the path to default crate
|
|
||||||
# sources, and it can read this environment variable to do so. The
|
|
||||||
# `rust-src` component is required in order for this to work.
|
|
||||||
RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library";
|
|
||||||
};
|
|
||||||
|
|
||||||
# Development tools
|
|
||||||
nativeBuildInputs = nativeBuildInputs pkgsHost ++ [
|
|
||||||
# Always use nightly rustfmt because most of its options are unstable
|
|
||||||
#
|
|
||||||
# This needs to come before `toolchain` in this list, otherwise
|
|
||||||
# `$PATH` will have stable rustfmt instead.
|
|
||||||
fenix.packages.${system}.latest.rustfmt
|
|
||||||
|
|
||||||
toolchain
|
|
||||||
] ++ (with pkgsHost; [
|
|
||||||
engage
|
|
||||||
|
|
||||||
# Needed for Complement
|
|
||||||
go
|
|
||||||
olm
|
|
||||||
|
|
||||||
# Needed for our script for Complement
|
|
||||||
jq
|
|
||||||
]);
|
|
||||||
};
|
|
||||||
});
|
|
||||||
}
|
|
198
nix/README.md
198
nix/README.md
|
@ -1,198 +0,0 @@
|
||||||
# Conduit for Nix/NixOS
|
|
||||||
|
|
||||||
This guide assumes you have a recent version of Nix (^2.4) installed.
|
|
||||||
|
|
||||||
Since Conduit ships as a Nix flake, you'll first need to [enable
|
|
||||||
flakes][enable_flakes].
|
|
||||||
|
|
||||||
You can now use the usual Nix commands to interact with Conduit's flake. For
|
|
||||||
example, `nix run gitlab:famedly/conduit` will run Conduit (though you'll need
|
|
||||||
to provide configuration and such manually as usual).
|
|
||||||
|
|
||||||
If your NixOS configuration is defined as a flake, you can depend on this flake
|
|
||||||
to provide a more up-to-date version than provided by `nixpkgs`. In your flake,
|
|
||||||
add the following to your `inputs`:
|
|
||||||
|
|
||||||
```nix
|
|
||||||
conduit = {
|
|
||||||
url = "gitlab:famedly/conduit";
|
|
||||||
|
|
||||||
# Assuming you have an input for nixpkgs called `nixpkgs`. If you experience
|
|
||||||
# build failures while using this, try commenting/deleting this line. This
|
|
||||||
# will probably also require you to always build from source.
|
|
||||||
inputs.nixpkgs.follows = "nixpkgs";
|
|
||||||
};
|
|
||||||
```
|
|
||||||
|
|
||||||
Next, make sure you're passing your flake inputs to the `specialArgs` argument
|
|
||||||
of `nixpkgs.lib.nixosSystem` [as explained here][specialargs]. This guide will
|
|
||||||
assume you've named the group `flake-inputs`.
|
|
||||||
|
|
||||||
Now you can configure Conduit and a reverse proxy for it. Add the following to
|
|
||||||
a new Nix file and include it in your configuration:
|
|
||||||
|
|
||||||
```nix
|
|
||||||
{ config
|
|
||||||
, pkgs
|
|
||||||
, flake-inputs
|
|
||||||
, ...
|
|
||||||
}:
|
|
||||||
|
|
||||||
let
|
|
||||||
# You'll need to edit these values
|
|
||||||
|
|
||||||
# The hostname that will appear in your user and room IDs
|
|
||||||
server_name = "example.com";
|
|
||||||
|
|
||||||
# The hostname that Conduit actually runs on
|
|
||||||
#
|
|
||||||
# This can be the same as `server_name` if you want. This is only necessary
|
|
||||||
# when Conduit is running on a different machine than the one hosting your
|
|
||||||
# root domain. This configuration also assumes this is all running on a single
|
|
||||||
# machine, some tweaks will need to be made if this is not the case.
|
|
||||||
matrix_hostname = "matrix.${server_name}";
|
|
||||||
|
|
||||||
# An admin email for TLS certificate notifications
|
|
||||||
admin_email = "admin@${server_name}";
|
|
||||||
|
|
||||||
# These ones you can leave alone
|
|
||||||
|
|
||||||
# Build a dervation that stores the content of `${server_name}/.well-known/matrix/server`
|
|
||||||
well_known_server = pkgs.writeText "well-known-matrix-server" ''
|
|
||||||
{
|
|
||||||
"m.server": "${matrix_hostname}"
|
|
||||||
}
|
|
||||||
'';
|
|
||||||
|
|
||||||
# Build a dervation that stores the content of `${server_name}/.well-known/matrix/client`
|
|
||||||
well_known_client = pkgs.writeText "well-known-matrix-client" ''
|
|
||||||
{
|
|
||||||
"m.homeserver": {
|
|
||||||
"base_url": "https://${matrix_hostname}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
'';
|
|
||||||
in
|
|
||||||
|
|
||||||
{
|
|
||||||
# Configure Conduit itself
|
|
||||||
services.matrix-conduit = {
|
|
||||||
enable = true;
|
|
||||||
|
|
||||||
# This causes NixOS to use the flake defined in this repository instead of
|
|
||||||
# the build of Conduit built into nixpkgs.
|
|
||||||
package = flake-inputs.conduit.packages.${pkgs.system}.default;
|
|
||||||
|
|
||||||
settings.global = {
|
|
||||||
inherit server_name;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
# Configure automated TLS acquisition/renewal
|
|
||||||
security.acme = {
|
|
||||||
acceptTerms = true;
|
|
||||||
defaults = {
|
|
||||||
email = admin_email;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
# ACME data must be readable by the NGINX user
|
|
||||||
users.users.nginx.extraGroups = [
|
|
||||||
"acme"
|
|
||||||
];
|
|
||||||
|
|
||||||
# Configure NGINX as a reverse proxy
|
|
||||||
services.nginx = {
|
|
||||||
enable = true;
|
|
||||||
recommendedProxySettings = true;
|
|
||||||
|
|
||||||
virtualHosts = {
|
|
||||||
"${matrix_hostname}" = {
|
|
||||||
forceSSL = true;
|
|
||||||
enableACME = true;
|
|
||||||
|
|
||||||
listen = [
|
|
||||||
{
|
|
||||||
addr = "0.0.0.0";
|
|
||||||
port = 443;
|
|
||||||
ssl = true;
|
|
||||||
}
|
|
||||||
{
|
|
||||||
addr = "[::]";
|
|
||||||
port = 443;
|
|
||||||
ssl = true;
|
|
||||||
} {
|
|
||||||
addr = "0.0.0.0";
|
|
||||||
port = 8448;
|
|
||||||
ssl = true;
|
|
||||||
}
|
|
||||||
{
|
|
||||||
addr = "[::]";
|
|
||||||
port = 8448;
|
|
||||||
ssl = true;
|
|
||||||
}
|
|
||||||
];
|
|
||||||
|
|
||||||
locations."/_matrix/" = {
|
|
||||||
proxyPass = "http://backend_conduit$request_uri";
|
|
||||||
proxyWebsockets = true;
|
|
||||||
extraConfig = ''
|
|
||||||
proxy_set_header Host $host;
|
|
||||||
proxy_buffering off;
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
extraConfig = ''
|
|
||||||
merge_slashes off;
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
"${server_name}" = {
|
|
||||||
forceSSL = true;
|
|
||||||
enableACME = true;
|
|
||||||
|
|
||||||
locations."=/.well-known/matrix/server" = {
|
|
||||||
# Use the contents of the derivation built previously
|
|
||||||
alias = "${well_known_server}";
|
|
||||||
|
|
||||||
extraConfig = ''
|
|
||||||
# Set the header since by default NGINX thinks it's just bytes
|
|
||||||
default_type application/json;
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
locations."=/.well-known/matrix/client" = {
|
|
||||||
# Use the contents of the derivation built previously
|
|
||||||
alias = "${well_known_client}";
|
|
||||||
|
|
||||||
extraConfig = ''
|
|
||||||
# Set the header since by default NGINX thinks it's just bytes
|
|
||||||
default_type application/json;
|
|
||||||
|
|
||||||
# https://matrix.org/docs/spec/client_server/r0.4.0#web-browser-clients
|
|
||||||
add_header Access-Control-Allow-Origin "*";
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
upstreams = {
|
|
||||||
"backend_conduit" = {
|
|
||||||
servers = {
|
|
||||||
"[::1]:${toString config.services.matrix-conduit.settings.global.port}" = { };
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
# Open firewall ports for HTTP, HTTPS, and Matrix federation
|
|
||||||
networking.firewall.allowedTCPPorts = [ 80 443 8448 ];
|
|
||||||
networking.firewall.allowedUDPPorts = [ 80 443 8448 ];
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Now you can rebuild your system configuration and you should be good to go!
|
|
||||||
|
|
||||||
[enable_flakes]: https://nixos.wiki/wiki/Flakes#Enable_flakes
|
|
||||||
|
|
||||||
[specialargs]: https://nixos.wiki/wiki/Flakes#Using_nix_flakes_with_NixOS
|
|
|
@ -1,22 +0,0 @@
|
||||||
# This is the authoritiative configuration of this project's Rust toolchain.
|
|
||||||
#
|
|
||||||
# Other files that need upkeep when this changes:
|
|
||||||
#
|
|
||||||
# * `.gitlab-ci.yml`
|
|
||||||
# * `Cargo.toml`
|
|
||||||
# * `flake.nix`
|
|
||||||
#
|
|
||||||
# Search in those files for `rust-toolchain.toml` to find the relevant places.
|
|
||||||
# If you're having trouble making the relevant changes, bug a maintainer.
|
|
||||||
|
|
||||||
[toolchain]
|
|
||||||
channel = "1.75.0"
|
|
||||||
components = [
|
|
||||||
# For rust-analyzer
|
|
||||||
"rust-src",
|
|
||||||
]
|
|
||||||
targets = [
|
|
||||||
"x86_64-unknown-linux-gnu",
|
|
||||||
"x86_64-unknown-linux-musl",
|
|
||||||
"aarch64-unknown-linux-musl",
|
|
||||||
]
|
|
|
@ -1,362 +0,0 @@
|
||||||
use crate::{services, Error, Result, Ruma};
|
|
||||||
use ruma::api::client::{
|
|
||||||
backup::{
|
|
||||||
add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session,
|
|
||||||
create_backup_version, delete_backup_keys, delete_backup_keys_for_room,
|
|
||||||
delete_backup_keys_for_session, delete_backup_version, get_backup_info, get_backup_keys,
|
|
||||||
get_backup_keys_for_room, get_backup_keys_for_session, get_latest_backup_info,
|
|
||||||
update_backup_version,
|
|
||||||
},
|
|
||||||
error::ErrorKind,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/room_keys/version`
|
|
||||||
///
|
|
||||||
/// Creates a new backup.
|
|
||||||
pub async fn create_backup_version_route(
|
|
||||||
body: Ruma<create_backup_version::v3::Request>,
|
|
||||||
) -> Result<create_backup_version::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
let version = services()
|
|
||||||
.key_backups
|
|
||||||
.create_backup(sender_user, &body.algorithm)?;
|
|
||||||
|
|
||||||
Ok(create_backup_version::v3::Response { version })
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/room_keys/version/{version}`
|
|
||||||
///
|
|
||||||
/// Update information about an existing backup. Only `auth_data` can be modified.
|
|
||||||
pub async fn update_backup_version_route(
|
|
||||||
body: Ruma<update_backup_version::v3::Request>,
|
|
||||||
) -> Result<update_backup_version::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
services()
|
|
||||||
.key_backups
|
|
||||||
.update_backup(sender_user, &body.version, &body.algorithm)?;
|
|
||||||
|
|
||||||
Ok(update_backup_version::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/room_keys/version`
|
|
||||||
///
|
|
||||||
/// Get information about the latest backup version.
|
|
||||||
pub async fn get_latest_backup_info_route(
|
|
||||||
body: Ruma<get_latest_backup_info::v3::Request>,
|
|
||||||
) -> Result<get_latest_backup_info::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let (version, algorithm) = services()
|
|
||||||
.key_backups
|
|
||||||
.get_latest_backup(sender_user)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"Key backup does not exist.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
Ok(get_latest_backup_info::v3::Response {
|
|
||||||
algorithm,
|
|
||||||
count: (services().key_backups.count_keys(sender_user, &version)? as u32).into(),
|
|
||||||
etag: services().key_backups.get_etag(sender_user, &version)?,
|
|
||||||
version,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/room_keys/version`
|
|
||||||
///
|
|
||||||
/// Get information about an existing backup.
|
|
||||||
pub async fn get_backup_info_route(
|
|
||||||
body: Ruma<get_backup_info::v3::Request>,
|
|
||||||
) -> Result<get_backup_info::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
let algorithm = services()
|
|
||||||
.key_backups
|
|
||||||
.get_backup(sender_user, &body.version)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"Key backup does not exist.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
Ok(get_backup_info::v3::Response {
|
|
||||||
algorithm,
|
|
||||||
count: (services()
|
|
||||||
.key_backups
|
|
||||||
.count_keys(sender_user, &body.version)? as u32)
|
|
||||||
.into(),
|
|
||||||
etag: services()
|
|
||||||
.key_backups
|
|
||||||
.get_etag(sender_user, &body.version)?,
|
|
||||||
version: body.version.to_owned(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `DELETE /_matrix/client/r0/room_keys/version/{version}`
|
|
||||||
///
|
|
||||||
/// Delete an existing key backup.
|
|
||||||
///
|
|
||||||
/// - Deletes both information about the backup, as well as all key data related to the backup
|
|
||||||
pub async fn delete_backup_version_route(
|
|
||||||
body: Ruma<delete_backup_version::v3::Request>,
|
|
||||||
) -> Result<delete_backup_version::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
services()
|
|
||||||
.key_backups
|
|
||||||
.delete_backup(sender_user, &body.version)?;
|
|
||||||
|
|
||||||
Ok(delete_backup_version::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/room_keys/keys`
|
|
||||||
///
|
|
||||||
/// Add the received backup keys to the database.
|
|
||||||
///
|
|
||||||
/// - Only manipulating the most recently created version of the backup is allowed
|
|
||||||
/// - Adds the keys to the backup
|
|
||||||
/// - Returns the new number of keys in this backup and the etag
|
|
||||||
pub async fn add_backup_keys_route(
|
|
||||||
body: Ruma<add_backup_keys::v3::Request>,
|
|
||||||
) -> Result<add_backup_keys::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if Some(&body.version)
|
|
||||||
!= services()
|
|
||||||
.key_backups
|
|
||||||
.get_latest_backup_version(sender_user)?
|
|
||||||
.as_ref()
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"You may only manipulate the most recently created version of the backup.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
for (room_id, room) in &body.rooms {
|
|
||||||
for (session_id, key_data) in &room.sessions {
|
|
||||||
services().key_backups.add_key(
|
|
||||||
sender_user,
|
|
||||||
&body.version,
|
|
||||||
room_id,
|
|
||||||
session_id,
|
|
||||||
key_data,
|
|
||||||
)?
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(add_backup_keys::v3::Response {
|
|
||||||
count: (services()
|
|
||||||
.key_backups
|
|
||||||
.count_keys(sender_user, &body.version)? as u32)
|
|
||||||
.into(),
|
|
||||||
etag: services()
|
|
||||||
.key_backups
|
|
||||||
.get_etag(sender_user, &body.version)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}`
|
|
||||||
///
|
|
||||||
/// Add the received backup keys to the database.
|
|
||||||
///
|
|
||||||
/// - Only manipulating the most recently created version of the backup is allowed
|
|
||||||
/// - Adds the keys to the backup
|
|
||||||
/// - Returns the new number of keys in this backup and the etag
|
|
||||||
pub async fn add_backup_keys_for_room_route(
|
|
||||||
body: Ruma<add_backup_keys_for_room::v3::Request>,
|
|
||||||
) -> Result<add_backup_keys_for_room::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if Some(&body.version)
|
|
||||||
!= services()
|
|
||||||
.key_backups
|
|
||||||
.get_latest_backup_version(sender_user)?
|
|
||||||
.as_ref()
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"You may only manipulate the most recently created version of the backup.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
for (session_id, key_data) in &body.sessions {
|
|
||||||
services().key_backups.add_key(
|
|
||||||
sender_user,
|
|
||||||
&body.version,
|
|
||||||
&body.room_id,
|
|
||||||
session_id,
|
|
||||||
key_data,
|
|
||||||
)?
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(add_backup_keys_for_room::v3::Response {
|
|
||||||
count: (services()
|
|
||||||
.key_backups
|
|
||||||
.count_keys(sender_user, &body.version)? as u32)
|
|
||||||
.into(),
|
|
||||||
etag: services()
|
|
||||||
.key_backups
|
|
||||||
.get_etag(sender_user, &body.version)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
|
|
||||||
///
|
|
||||||
/// Add the received backup key to the database.
|
|
||||||
///
|
|
||||||
/// - Only manipulating the most recently created version of the backup is allowed
|
|
||||||
/// - Adds the keys to the backup
|
|
||||||
/// - Returns the new number of keys in this backup and the etag
|
|
||||||
pub async fn add_backup_keys_for_session_route(
|
|
||||||
body: Ruma<add_backup_keys_for_session::v3::Request>,
|
|
||||||
) -> Result<add_backup_keys_for_session::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if Some(&body.version)
|
|
||||||
!= services()
|
|
||||||
.key_backups
|
|
||||||
.get_latest_backup_version(sender_user)?
|
|
||||||
.as_ref()
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"You may only manipulate the most recently created version of the backup.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
services().key_backups.add_key(
|
|
||||||
sender_user,
|
|
||||||
&body.version,
|
|
||||||
&body.room_id,
|
|
||||||
&body.session_id,
|
|
||||||
&body.session_data,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(add_backup_keys_for_session::v3::Response {
|
|
||||||
count: (services()
|
|
||||||
.key_backups
|
|
||||||
.count_keys(sender_user, &body.version)? as u32)
|
|
||||||
.into(),
|
|
||||||
etag: services()
|
|
||||||
.key_backups
|
|
||||||
.get_etag(sender_user, &body.version)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/room_keys/keys`
|
|
||||||
///
|
|
||||||
/// Retrieves all keys from the backup.
|
|
||||||
pub async fn get_backup_keys_route(
|
|
||||||
body: Ruma<get_backup_keys::v3::Request>,
|
|
||||||
) -> Result<get_backup_keys::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let rooms = services().key_backups.get_all(sender_user, &body.version)?;
|
|
||||||
|
|
||||||
Ok(get_backup_keys::v3::Response { rooms })
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/room_keys/keys/{roomId}`
|
|
||||||
///
|
|
||||||
/// Retrieves all keys from the backup for a given room.
|
|
||||||
pub async fn get_backup_keys_for_room_route(
|
|
||||||
body: Ruma<get_backup_keys_for_room::v3::Request>,
|
|
||||||
) -> Result<get_backup_keys_for_room::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let sessions = services()
|
|
||||||
.key_backups
|
|
||||||
.get_room(sender_user, &body.version, &body.room_id)?;
|
|
||||||
|
|
||||||
Ok(get_backup_keys_for_room::v3::Response { sessions })
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
|
|
||||||
///
|
|
||||||
/// Retrieves a key from the backup.
|
|
||||||
pub async fn get_backup_keys_for_session_route(
|
|
||||||
body: Ruma<get_backup_keys_for_session::v3::Request>,
|
|
||||||
) -> Result<get_backup_keys_for_session::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let key_data = services()
|
|
||||||
.key_backups
|
|
||||||
.get_session(sender_user, &body.version, &body.room_id, &body.session_id)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"Backup key not found for this user's session.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
Ok(get_backup_keys_for_session::v3::Response { key_data })
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `DELETE /_matrix/client/r0/room_keys/keys`
|
|
||||||
///
|
|
||||||
/// Delete the keys from the backup.
|
|
||||||
pub async fn delete_backup_keys_route(
|
|
||||||
body: Ruma<delete_backup_keys::v3::Request>,
|
|
||||||
) -> Result<delete_backup_keys::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
services()
|
|
||||||
.key_backups
|
|
||||||
.delete_all_keys(sender_user, &body.version)?;
|
|
||||||
|
|
||||||
Ok(delete_backup_keys::v3::Response {
|
|
||||||
count: (services()
|
|
||||||
.key_backups
|
|
||||||
.count_keys(sender_user, &body.version)? as u32)
|
|
||||||
.into(),
|
|
||||||
etag: services()
|
|
||||||
.key_backups
|
|
||||||
.get_etag(sender_user, &body.version)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}`
|
|
||||||
///
|
|
||||||
/// Delete the keys from the backup for a given room.
|
|
||||||
pub async fn delete_backup_keys_for_room_route(
|
|
||||||
body: Ruma<delete_backup_keys_for_room::v3::Request>,
|
|
||||||
) -> Result<delete_backup_keys_for_room::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
services()
|
|
||||||
.key_backups
|
|
||||||
.delete_room_keys(sender_user, &body.version, &body.room_id)?;
|
|
||||||
|
|
||||||
Ok(delete_backup_keys_for_room::v3::Response {
|
|
||||||
count: (services()
|
|
||||||
.key_backups
|
|
||||||
.count_keys(sender_user, &body.version)? as u32)
|
|
||||||
.into(),
|
|
||||||
etag: services()
|
|
||||||
.key_backups
|
|
||||||
.get_etag(sender_user, &body.version)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
|
|
||||||
///
|
|
||||||
/// Delete a key from the backup.
|
|
||||||
pub async fn delete_backup_keys_for_session_route(
|
|
||||||
body: Ruma<delete_backup_keys_for_session::v3::Request>,
|
|
||||||
) -> Result<delete_backup_keys_for_session::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
services().key_backups.delete_room_key(
|
|
||||||
sender_user,
|
|
||||||
&body.version,
|
|
||||||
&body.room_id,
|
|
||||||
&body.session_id,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(delete_backup_keys_for_session::v3::Response {
|
|
||||||
count: (services()
|
|
||||||
.key_backups
|
|
||||||
.count_keys(sender_user, &body.version)? as u32)
|
|
||||||
.into(),
|
|
||||||
etag: services()
|
|
||||||
.key_backups
|
|
||||||
.get_etag(sender_user, &body.version)?,
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,28 +0,0 @@
|
||||||
use crate::{services, Result, Ruma};
|
|
||||||
use ruma::api::client::discovery::get_capabilities::{
|
|
||||||
self, Capabilities, RoomVersionStability, RoomVersionsCapability,
|
|
||||||
};
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/capabilities`
|
|
||||||
///
|
|
||||||
/// Get information on the supported feature set and other relevent capabilities of this server.
|
|
||||||
pub async fn get_capabilities_route(
|
|
||||||
_body: Ruma<get_capabilities::v3::Request>,
|
|
||||||
) -> Result<get_capabilities::v3::Response> {
|
|
||||||
let mut available = BTreeMap::new();
|
|
||||||
for room_version in &services().globals.unstable_room_versions {
|
|
||||||
available.insert(room_version.clone(), RoomVersionStability::Unstable);
|
|
||||||
}
|
|
||||||
for room_version in &services().globals.stable_room_versions {
|
|
||||||
available.insert(room_version.clone(), RoomVersionStability::Stable);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut capabilities = Capabilities::new();
|
|
||||||
capabilities.room_versions = RoomVersionsCapability {
|
|
||||||
default: services().globals.default_room_version(),
|
|
||||||
available,
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(get_capabilities::v3::Response { capabilities })
|
|
||||||
}
|
|
|
@ -1,34 +0,0 @@
|
||||||
use crate::{services, Error, Result, Ruma};
|
|
||||||
use ruma::api::client::{
|
|
||||||
error::ErrorKind,
|
|
||||||
filter::{create_filter, get_filter},
|
|
||||||
};
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/user/{userId}/filter/{filterId}`
|
|
||||||
///
|
|
||||||
/// Loads a filter that was previously created.
|
|
||||||
///
|
|
||||||
/// - A user can only access their own filters
|
|
||||||
pub async fn get_filter_route(
|
|
||||||
body: Ruma<get_filter::v3::Request>,
|
|
||||||
) -> Result<get_filter::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
let filter = match services().users.get_filter(sender_user, &body.filter_id)? {
|
|
||||||
Some(filter) => filter,
|
|
||||||
None => return Err(Error::BadRequest(ErrorKind::NotFound, "Filter not found.")),
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(get_filter::v3::Response::new(filter))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/user/{userId}/filter`
|
|
||||||
///
|
|
||||||
/// Creates a new filter to be used by other endpoints.
|
|
||||||
pub async fn create_filter_route(
|
|
||||||
body: Ruma<create_filter::v3::Request>,
|
|
||||||
) -> Result<create_filter::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
Ok(create_filter::v3::Response::new(
|
|
||||||
services().users.create_filter(sender_user, &body.filter)?,
|
|
||||||
))
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,432 +0,0 @@
|
||||||
use crate::{services, Error, Result, Ruma};
|
|
||||||
use ruma::{
|
|
||||||
api::client::{
|
|
||||||
error::ErrorKind,
|
|
||||||
push::{
|
|
||||||
delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, get_pushrule_enabled,
|
|
||||||
get_pushrules_all, set_pusher, set_pushrule, set_pushrule_actions,
|
|
||||||
set_pushrule_enabled, RuleScope,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
events::{push_rules::PushRulesEvent, GlobalAccountDataEventType},
|
|
||||||
push::{InsertPushRuleError, RemovePushRuleError},
|
|
||||||
};
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/pushrules`
|
|
||||||
///
|
|
||||||
/// Retrieves the push rules event for this user.
|
|
||||||
pub async fn get_pushrules_all_route(
|
|
||||||
body: Ruma<get_pushrules_all::v3::Request>,
|
|
||||||
) -> Result<get_pushrules_all::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let event = services()
|
|
||||||
.account_data
|
|
||||||
.get(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"PushRules event not found.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
let account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid account data event in db."))?
|
|
||||||
.content;
|
|
||||||
|
|
||||||
Ok(get_pushrules_all::v3::Response {
|
|
||||||
global: account_data.global,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}`
|
|
||||||
///
|
|
||||||
/// Retrieves a single specified push rule for this user.
|
|
||||||
pub async fn get_pushrule_route(
|
|
||||||
body: Ruma<get_pushrule::v3::Request>,
|
|
||||||
) -> Result<get_pushrule::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let event = services()
|
|
||||||
.account_data
|
|
||||||
.get(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"PushRules event not found.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
let account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid account data event in db."))?
|
|
||||||
.content;
|
|
||||||
|
|
||||||
let rule = account_data
|
|
||||||
.global
|
|
||||||
.get(body.kind.clone(), &body.rule_id)
|
|
||||||
.map(Into::into);
|
|
||||||
|
|
||||||
if let Some(rule) = rule {
|
|
||||||
Ok(get_pushrule::v3::Response { rule })
|
|
||||||
} else {
|
|
||||||
Err(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"Push rule not found.",
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}`
|
|
||||||
///
|
|
||||||
/// Creates a single specified push rule for this user.
|
|
||||||
pub async fn set_pushrule_route(
|
|
||||||
body: Ruma<set_pushrule::v3::Request>,
|
|
||||||
) -> Result<set_pushrule::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
let body = body.body;
|
|
||||||
|
|
||||||
if body.scope != RuleScope::Global {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Scopes other than 'global' are not supported.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let event = services()
|
|
||||||
.account_data
|
|
||||||
.get(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"PushRules event not found.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
let mut account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
|
|
||||||
|
|
||||||
if let Err(error) = account_data.content.global.insert(
|
|
||||||
body.rule.clone(),
|
|
||||||
body.after.as_deref(),
|
|
||||||
body.before.as_deref(),
|
|
||||||
) {
|
|
||||||
let err = match error {
|
|
||||||
InsertPushRuleError::ServerDefaultRuleId => Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Rule IDs starting with a dot are reserved for server-default rules.",
|
|
||||||
),
|
|
||||||
InsertPushRuleError::InvalidRuleId => Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Rule ID containing invalid characters.",
|
|
||||||
),
|
|
||||||
InsertPushRuleError::RelativeToServerDefaultRule => Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Can't place a push rule relatively to a server-default rule.",
|
|
||||||
),
|
|
||||||
InsertPushRuleError::UnknownRuleId => Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"The before or after rule could not be found.",
|
|
||||||
),
|
|
||||||
InsertPushRuleError::BeforeHigherThanAfter => Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"The before rule has a higher priority than the after rule.",
|
|
||||||
),
|
|
||||||
_ => Error::BadRequest(ErrorKind::InvalidParam, "Invalid data."),
|
|
||||||
};
|
|
||||||
|
|
||||||
return Err(err);
|
|
||||||
}
|
|
||||||
|
|
||||||
services().account_data.update(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
&serde_json::to_value(account_data).expect("to json value always works"),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(set_pushrule::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions`
|
|
||||||
///
|
|
||||||
/// Gets the actions of a single specified push rule for this user.
|
|
||||||
pub async fn get_pushrule_actions_route(
|
|
||||||
body: Ruma<get_pushrule_actions::v3::Request>,
|
|
||||||
) -> Result<get_pushrule_actions::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if body.scope != RuleScope::Global {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Scopes other than 'global' are not supported.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let event = services()
|
|
||||||
.account_data
|
|
||||||
.get(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"PushRules event not found.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
let account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid account data event in db."))?
|
|
||||||
.content;
|
|
||||||
|
|
||||||
let global = account_data.global;
|
|
||||||
let actions = global
|
|
||||||
.get(body.kind.clone(), &body.rule_id)
|
|
||||||
.map(|rule| rule.actions().to_owned())
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"Push rule not found.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
Ok(get_pushrule_actions::v3::Response { actions })
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions`
|
|
||||||
///
|
|
||||||
/// Sets the actions of a single specified push rule for this user.
|
|
||||||
pub async fn set_pushrule_actions_route(
|
|
||||||
body: Ruma<set_pushrule_actions::v3::Request>,
|
|
||||||
) -> Result<set_pushrule_actions::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if body.scope != RuleScope::Global {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Scopes other than 'global' are not supported.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let event = services()
|
|
||||||
.account_data
|
|
||||||
.get(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"PushRules event not found.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
let mut account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
|
|
||||||
|
|
||||||
if account_data
|
|
||||||
.content
|
|
||||||
.global
|
|
||||||
.set_actions(body.kind.clone(), &body.rule_id, body.actions.clone())
|
|
||||||
.is_err()
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"Push rule not found.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
services().account_data.update(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
&serde_json::to_value(account_data).expect("to json value always works"),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(set_pushrule_actions::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled`
|
|
||||||
///
|
|
||||||
/// Gets the enabled status of a single specified push rule for this user.
|
|
||||||
pub async fn get_pushrule_enabled_route(
|
|
||||||
body: Ruma<get_pushrule_enabled::v3::Request>,
|
|
||||||
) -> Result<get_pushrule_enabled::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if body.scope != RuleScope::Global {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Scopes other than 'global' are not supported.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let event = services()
|
|
||||||
.account_data
|
|
||||||
.get(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"PushRules event not found.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
let account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
|
|
||||||
|
|
||||||
let global = account_data.content.global;
|
|
||||||
let enabled = global
|
|
||||||
.get(body.kind.clone(), &body.rule_id)
|
|
||||||
.map(|r| r.enabled())
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"Push rule not found.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
Ok(get_pushrule_enabled::v3::Response { enabled })
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled`
|
|
||||||
///
|
|
||||||
/// Sets the enabled status of a single specified push rule for this user.
|
|
||||||
pub async fn set_pushrule_enabled_route(
|
|
||||||
body: Ruma<set_pushrule_enabled::v3::Request>,
|
|
||||||
) -> Result<set_pushrule_enabled::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if body.scope != RuleScope::Global {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Scopes other than 'global' are not supported.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let event = services()
|
|
||||||
.account_data
|
|
||||||
.get(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"PushRules event not found.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
let mut account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
|
|
||||||
|
|
||||||
if account_data
|
|
||||||
.content
|
|
||||||
.global
|
|
||||||
.set_enabled(body.kind.clone(), &body.rule_id, body.enabled)
|
|
||||||
.is_err()
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"Push rule not found.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
services().account_data.update(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
&serde_json::to_value(account_data).expect("to json value always works"),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(set_pushrule_enabled::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `DELETE /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}`
|
|
||||||
///
|
|
||||||
/// Deletes a single specified push rule for this user.
|
|
||||||
pub async fn delete_pushrule_route(
|
|
||||||
body: Ruma<delete_pushrule::v3::Request>,
|
|
||||||
) -> Result<delete_pushrule::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if body.scope != RuleScope::Global {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Scopes other than 'global' are not supported.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let event = services()
|
|
||||||
.account_data
|
|
||||||
.get(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"PushRules event not found.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
let mut account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
|
|
||||||
|
|
||||||
if let Err(error) = account_data
|
|
||||||
.content
|
|
||||||
.global
|
|
||||||
.remove(body.kind.clone(), &body.rule_id)
|
|
||||||
{
|
|
||||||
let err = match error {
|
|
||||||
RemovePushRuleError::ServerDefault => Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Cannot delete a server-default pushrule.",
|
|
||||||
),
|
|
||||||
RemovePushRuleError::NotFound => {
|
|
||||||
Error::BadRequest(ErrorKind::NotFound, "Push rule not found.")
|
|
||||||
}
|
|
||||||
_ => Error::BadRequest(ErrorKind::InvalidParam, "Invalid data."),
|
|
||||||
};
|
|
||||||
|
|
||||||
return Err(err);
|
|
||||||
}
|
|
||||||
|
|
||||||
services().account_data.update(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
&serde_json::to_value(account_data).expect("to json value always works"),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(delete_pushrule::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/pushers`
|
|
||||||
///
|
|
||||||
/// Gets all currently active pushers for the sender user.
|
|
||||||
pub async fn get_pushers_route(
|
|
||||||
body: Ruma<get_pushers::v3::Request>,
|
|
||||||
) -> Result<get_pushers::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
Ok(get_pushers::v3::Response {
|
|
||||||
pushers: services().pusher.get_pushers(sender_user)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/pushers/set`
|
|
||||||
///
|
|
||||||
/// Adds a pusher for the sender user.
|
|
||||||
///
|
|
||||||
/// - TODO: Handle `append`
|
|
||||||
pub async fn set_pushers_route(
|
|
||||||
body: Ruma<set_pusher::v3::Request>,
|
|
||||||
) -> Result<set_pusher::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
services()
|
|
||||||
.pusher
|
|
||||||
.set_pusher(sender_user, body.action.clone())?;
|
|
||||||
|
|
||||||
Ok(set_pusher::v3::Response::default())
|
|
||||||
}
|
|
|
@ -1,182 +0,0 @@
|
||||||
use crate::{service::rooms::timeline::PduCount, services, Error, Result, Ruma};
|
|
||||||
use ruma::{
|
|
||||||
api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt},
|
|
||||||
events::{
|
|
||||||
receipt::{ReceiptThread, ReceiptType},
|
|
||||||
RoomAccountDataEventType,
|
|
||||||
},
|
|
||||||
MilliSecondsSinceUnixEpoch,
|
|
||||||
};
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/rooms/{roomId}/read_markers`
|
|
||||||
///
|
|
||||||
/// Sets different types of read markers.
|
|
||||||
///
|
|
||||||
/// - Updates fully-read account data event to `fully_read`
|
|
||||||
/// - If `read_receipt` is set: Update private marker and public read receipt EDU
|
|
||||||
pub async fn set_read_marker_route(
|
|
||||||
body: Ruma<set_read_marker::v3::Request>,
|
|
||||||
) -> Result<set_read_marker::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if let Some(fully_read) = &body.fully_read {
|
|
||||||
let fully_read_event = ruma::events::fully_read::FullyReadEvent {
|
|
||||||
content: ruma::events::fully_read::FullyReadEventContent {
|
|
||||||
event_id: fully_read.clone(),
|
|
||||||
},
|
|
||||||
};
|
|
||||||
services().account_data.update(
|
|
||||||
Some(&body.room_id),
|
|
||||||
sender_user,
|
|
||||||
RoomAccountDataEventType::FullyRead,
|
|
||||||
&serde_json::to_value(fully_read_event).expect("to json value always works"),
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if body.private_read_receipt.is_some() || body.read_receipt.is_some() {
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.user
|
|
||||||
.reset_notification_counts(sender_user, &body.room_id)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(event) = &body.private_read_receipt {
|
|
||||||
let count = services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.get_pdu_count(event)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Event does not exist.",
|
|
||||||
))?;
|
|
||||||
let count = match count {
|
|
||||||
PduCount::Backfilled(_) => {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Read receipt is in backfilled timeline",
|
|
||||||
))
|
|
||||||
}
|
|
||||||
PduCount::Normal(c) => c,
|
|
||||||
};
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.edus
|
|
||||||
.read_receipt
|
|
||||||
.private_read_set(&body.room_id, sender_user, count)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(event) = &body.read_receipt {
|
|
||||||
let mut user_receipts = BTreeMap::new();
|
|
||||||
user_receipts.insert(
|
|
||||||
sender_user.clone(),
|
|
||||||
ruma::events::receipt::Receipt {
|
|
||||||
ts: Some(MilliSecondsSinceUnixEpoch::now()),
|
|
||||||
thread: ReceiptThread::Unthreaded,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut receipts = BTreeMap::new();
|
|
||||||
receipts.insert(ReceiptType::Read, user_receipts);
|
|
||||||
|
|
||||||
let mut receipt_content = BTreeMap::new();
|
|
||||||
receipt_content.insert(event.to_owned(), receipts);
|
|
||||||
|
|
||||||
services().rooms.edus.read_receipt.readreceipt_update(
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
ruma::events::receipt::ReceiptEvent {
|
|
||||||
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
|
|
||||||
room_id: body.room_id.clone(),
|
|
||||||
},
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(set_read_marker::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/rooms/{roomId}/receipt/{receiptType}/{eventId}`
|
|
||||||
///
|
|
||||||
/// Sets private read marker and public read receipt EDU.
|
|
||||||
pub async fn create_receipt_route(
|
|
||||||
body: Ruma<create_receipt::v3::Request>,
|
|
||||||
) -> Result<create_receipt::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if matches!(
|
|
||||||
&body.receipt_type,
|
|
||||||
create_receipt::v3::ReceiptType::Read | create_receipt::v3::ReceiptType::ReadPrivate
|
|
||||||
) {
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.user
|
|
||||||
.reset_notification_counts(sender_user, &body.room_id)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
match body.receipt_type {
|
|
||||||
create_receipt::v3::ReceiptType::FullyRead => {
|
|
||||||
let fully_read_event = ruma::events::fully_read::FullyReadEvent {
|
|
||||||
content: ruma::events::fully_read::FullyReadEventContent {
|
|
||||||
event_id: body.event_id.clone(),
|
|
||||||
},
|
|
||||||
};
|
|
||||||
services().account_data.update(
|
|
||||||
Some(&body.room_id),
|
|
||||||
sender_user,
|
|
||||||
RoomAccountDataEventType::FullyRead,
|
|
||||||
&serde_json::to_value(fully_read_event).expect("to json value always works"),
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
create_receipt::v3::ReceiptType::Read => {
|
|
||||||
let mut user_receipts = BTreeMap::new();
|
|
||||||
user_receipts.insert(
|
|
||||||
sender_user.clone(),
|
|
||||||
ruma::events::receipt::Receipt {
|
|
||||||
ts: Some(MilliSecondsSinceUnixEpoch::now()),
|
|
||||||
thread: ReceiptThread::Unthreaded,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
let mut receipts = BTreeMap::new();
|
|
||||||
receipts.insert(ReceiptType::Read, user_receipts);
|
|
||||||
|
|
||||||
let mut receipt_content = BTreeMap::new();
|
|
||||||
receipt_content.insert(body.event_id.to_owned(), receipts);
|
|
||||||
|
|
||||||
services().rooms.edus.read_receipt.readreceipt_update(
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
ruma::events::receipt::ReceiptEvent {
|
|
||||||
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
|
|
||||||
room_id: body.room_id.clone(),
|
|
||||||
},
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
create_receipt::v3::ReceiptType::ReadPrivate => {
|
|
||||||
let count = services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.get_pdu_count(&body.event_id)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Event does not exist.",
|
|
||||||
))?;
|
|
||||||
let count = match count {
|
|
||||||
PduCount::Backfilled(_) => {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Read receipt is in backfilled timeline",
|
|
||||||
))
|
|
||||||
}
|
|
||||||
PduCount::Normal(c) => c,
|
|
||||||
};
|
|
||||||
services().rooms.edus.read_receipt.private_read_set(
|
|
||||||
&body.room_id,
|
|
||||||
sender_user,
|
|
||||||
count,
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
_ => return Err(Error::bad_database("Unsupported receipt type")),
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(create_receipt::v3::Response {})
|
|
||||||
}
|
|
|
@ -1,146 +0,0 @@
|
||||||
use ruma::api::client::relations::{
|
|
||||||
get_relating_events, get_relating_events_with_rel_type,
|
|
||||||
get_relating_events_with_rel_type_and_event_type,
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::{service::rooms::timeline::PduCount, services, Result, Ruma};
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}/{eventType}`
|
|
||||||
pub async fn get_relating_events_with_rel_type_and_event_type_route(
|
|
||||||
body: Ruma<get_relating_events_with_rel_type_and_event_type::v1::Request>,
|
|
||||||
) -> Result<get_relating_events_with_rel_type_and_event_type::v1::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let from = match body.from.clone() {
|
|
||||||
Some(from) => PduCount::try_from_string(&from)?,
|
|
||||||
None => match ruma::api::Direction::Backward {
|
|
||||||
// TODO: fix ruma so `body.dir` exists
|
|
||||||
ruma::api::Direction::Forward => PduCount::min(),
|
|
||||||
ruma::api::Direction::Backward => PduCount::max(),
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
let to = body
|
|
||||||
.to
|
|
||||||
.as_ref()
|
|
||||||
.and_then(|t| PduCount::try_from_string(t).ok());
|
|
||||||
|
|
||||||
// Use limit or else 10, with maximum 100
|
|
||||||
let limit = body
|
|
||||||
.limit
|
|
||||||
.and_then(|u| u32::try_from(u).ok())
|
|
||||||
.map_or(10_usize, |u| u as usize)
|
|
||||||
.min(100);
|
|
||||||
|
|
||||||
let res = services()
|
|
||||||
.rooms
|
|
||||||
.pdu_metadata
|
|
||||||
.paginate_relations_with_filter(
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
&body.event_id,
|
|
||||||
Some(body.event_type.clone()),
|
|
||||||
Some(body.rel_type.clone()),
|
|
||||||
from,
|
|
||||||
to,
|
|
||||||
limit,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(
|
|
||||||
get_relating_events_with_rel_type_and_event_type::v1::Response {
|
|
||||||
chunk: res.chunk,
|
|
||||||
next_batch: res.next_batch,
|
|
||||||
prev_batch: res.prev_batch,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}`
|
|
||||||
pub async fn get_relating_events_with_rel_type_route(
|
|
||||||
body: Ruma<get_relating_events_with_rel_type::v1::Request>,
|
|
||||||
) -> Result<get_relating_events_with_rel_type::v1::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let from = match body.from.clone() {
|
|
||||||
Some(from) => PduCount::try_from_string(&from)?,
|
|
||||||
None => match ruma::api::Direction::Backward {
|
|
||||||
// TODO: fix ruma so `body.dir` exists
|
|
||||||
ruma::api::Direction::Forward => PduCount::min(),
|
|
||||||
ruma::api::Direction::Backward => PduCount::max(),
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
let to = body
|
|
||||||
.to
|
|
||||||
.as_ref()
|
|
||||||
.and_then(|t| PduCount::try_from_string(t).ok());
|
|
||||||
|
|
||||||
// Use limit or else 10, with maximum 100
|
|
||||||
let limit = body
|
|
||||||
.limit
|
|
||||||
.and_then(|u| u32::try_from(u).ok())
|
|
||||||
.map_or(10_usize, |u| u as usize)
|
|
||||||
.min(100);
|
|
||||||
|
|
||||||
let res = services()
|
|
||||||
.rooms
|
|
||||||
.pdu_metadata
|
|
||||||
.paginate_relations_with_filter(
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
&body.event_id,
|
|
||||||
None,
|
|
||||||
Some(body.rel_type.clone()),
|
|
||||||
from,
|
|
||||||
to,
|
|
||||||
limit,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(get_relating_events_with_rel_type::v1::Response {
|
|
||||||
chunk: res.chunk,
|
|
||||||
next_batch: res.next_batch,
|
|
||||||
prev_batch: res.prev_batch,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}`
|
|
||||||
pub async fn get_relating_events_route(
|
|
||||||
body: Ruma<get_relating_events::v1::Request>,
|
|
||||||
) -> Result<get_relating_events::v1::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let from = match body.from.clone() {
|
|
||||||
Some(from) => PduCount::try_from_string(&from)?,
|
|
||||||
None => match ruma::api::Direction::Backward {
|
|
||||||
// TODO: fix ruma so `body.dir` exists
|
|
||||||
ruma::api::Direction::Forward => PduCount::min(),
|
|
||||||
ruma::api::Direction::Backward => PduCount::max(),
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
let to = body
|
|
||||||
.to
|
|
||||||
.as_ref()
|
|
||||||
.and_then(|t| PduCount::try_from_string(t).ok());
|
|
||||||
|
|
||||||
// Use limit or else 10, with maximum 100
|
|
||||||
let limit = body
|
|
||||||
.limit
|
|
||||||
.and_then(|u| u32::try_from(u).ok())
|
|
||||||
.map_or(10_usize, |u| u as usize)
|
|
||||||
.min(100);
|
|
||||||
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.pdu_metadata
|
|
||||||
.paginate_relations_with_filter(
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
&body.event_id,
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
from,
|
|
||||||
to,
|
|
||||||
limit,
|
|
||||||
)
|
|
||||||
}
|
|
|
@ -1,34 +0,0 @@
|
||||||
use crate::{services, Result, Ruma};
|
|
||||||
use ruma::api::client::space::get_hierarchy;
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/v1/rooms/{room_id}/hierarchy``
|
|
||||||
///
|
|
||||||
/// Paginates over the space tree in a depth-first manner to locate child rooms of a given space.
|
|
||||||
pub async fn get_hierarchy_route(
|
|
||||||
body: Ruma<get_hierarchy::v1::Request>,
|
|
||||||
) -> Result<get_hierarchy::v1::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let skip = body
|
|
||||||
.from
|
|
||||||
.as_ref()
|
|
||||||
.and_then(|s| s.parse::<usize>().ok())
|
|
||||||
.unwrap_or(0);
|
|
||||||
|
|
||||||
let limit = body.limit.map_or(10, u64::from).min(100) as usize;
|
|
||||||
|
|
||||||
let max_depth = body.max_depth.map_or(3, u64::from).min(10) as usize + 1; // +1 to skip the space room itself
|
|
||||||
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.spaces
|
|
||||||
.get_hierarchy(
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
limit,
|
|
||||||
skip,
|
|
||||||
max_depth,
|
|
||||||
body.suggested_only,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
|
|
@ -1,250 +0,0 @@
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use crate::{service::pdu::PduBuilder, services, Error, Result, Ruma, RumaResponse};
|
|
||||||
use ruma::{
|
|
||||||
api::client::{
|
|
||||||
error::ErrorKind,
|
|
||||||
state::{get_state_events, get_state_events_for_key, send_state_event},
|
|
||||||
},
|
|
||||||
events::{
|
|
||||||
room::canonical_alias::RoomCanonicalAliasEventContent, AnyStateEventContent, StateEventType,
|
|
||||||
},
|
|
||||||
serde::Raw,
|
|
||||||
EventId, RoomId, UserId,
|
|
||||||
};
|
|
||||||
use tracing::log::warn;
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}/{stateKey}`
|
|
||||||
///
|
|
||||||
/// Sends a state event into the room.
|
|
||||||
///
|
|
||||||
/// - The only requirement for the content is that it has to be valid json
|
|
||||||
/// - Tries to send the event into the room, auth rules will determine if it is allowed
|
|
||||||
/// - If event is new canonical_alias: Rejects if alias is incorrect
|
|
||||||
pub async fn send_state_event_for_key_route(
|
|
||||||
body: Ruma<send_state_event::v3::Request>,
|
|
||||||
) -> Result<send_state_event::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let event_id = send_state_event_for_key_helper(
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
&body.event_type,
|
|
||||||
&body.body.body, // Yes, I hate it too
|
|
||||||
body.state_key.to_owned(),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let event_id = (*event_id).to_owned();
|
|
||||||
Ok(send_state_event::v3::Response { event_id })
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}`
|
|
||||||
///
|
|
||||||
/// Sends a state event into the room.
|
|
||||||
///
|
|
||||||
/// - The only requirement for the content is that it has to be valid json
|
|
||||||
/// - Tries to send the event into the room, auth rules will determine if it is allowed
|
|
||||||
/// - If event is new canonical_alias: Rejects if alias is incorrect
|
|
||||||
pub async fn send_state_event_for_empty_key_route(
|
|
||||||
body: Ruma<send_state_event::v3::Request>,
|
|
||||||
) -> Result<RumaResponse<send_state_event::v3::Response>> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
// Forbid m.room.encryption if encryption is disabled
|
|
||||||
if body.event_type == StateEventType::RoomEncryption && !services().globals.allow_encryption() {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Forbidden,
|
|
||||||
"Encryption has been disabled",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let event_id = send_state_event_for_key_helper(
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
&body.event_type.to_string().into(),
|
|
||||||
&body.body.body,
|
|
||||||
body.state_key.to_owned(),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let event_id = (*event_id).to_owned();
|
|
||||||
Ok(send_state_event::v3::Response { event_id }.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/rooms/{roomid}/state`
|
|
||||||
///
|
|
||||||
/// Get all state events for a room.
|
|
||||||
///
|
|
||||||
/// - If not joined: Only works if current room history visibility is world readable
|
|
||||||
pub async fn get_state_events_route(
|
|
||||||
body: Ruma<get_state_events::v3::Request>,
|
|
||||||
) -> Result<get_state_events::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if !services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.user_can_see_state_events(sender_user, &body.room_id)?
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Forbidden,
|
|
||||||
"You don't have permission to view the room state.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(get_state_events::v3::Response {
|
|
||||||
room_state: services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.room_state_full(&body.room_id)
|
|
||||||
.await?
|
|
||||||
.values()
|
|
||||||
.map(|pdu| pdu.to_state_event())
|
|
||||||
.collect(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/rooms/{roomid}/state/{eventType}/{stateKey}`
|
|
||||||
///
|
|
||||||
/// Get single state event of a room.
|
|
||||||
///
|
|
||||||
/// - If not joined: Only works if current room history visibility is world readable
|
|
||||||
pub async fn get_state_events_for_key_route(
|
|
||||||
body: Ruma<get_state_events_for_key::v3::Request>,
|
|
||||||
) -> Result<get_state_events_for_key::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if !services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.user_can_see_state_events(sender_user, &body.room_id)?
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Forbidden,
|
|
||||||
"You don't have permission to view the room state.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let event = services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.room_state_get(&body.room_id, &body.event_type, &body.state_key)?
|
|
||||||
.ok_or_else(|| {
|
|
||||||
warn!(
|
|
||||||
"State event {:?} not found in room {:?}",
|
|
||||||
&body.event_type, &body.room_id
|
|
||||||
);
|
|
||||||
Error::BadRequest(ErrorKind::NotFound, "State event not found.")
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(get_state_events_for_key::v3::Response {
|
|
||||||
content: serde_json::from_str(event.content.get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid event content in database"))?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/rooms/{roomid}/state/{eventType}`
|
|
||||||
///
|
|
||||||
/// Get single state event of a room.
|
|
||||||
///
|
|
||||||
/// - If not joined: Only works if current room history visibility is world readable
|
|
||||||
pub async fn get_state_events_for_empty_key_route(
|
|
||||||
body: Ruma<get_state_events_for_key::v3::Request>,
|
|
||||||
) -> Result<RumaResponse<get_state_events_for_key::v3::Response>> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if !services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.user_can_see_state_events(sender_user, &body.room_id)?
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Forbidden,
|
|
||||||
"You don't have permission to view the room state.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let event = services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.room_state_get(&body.room_id, &body.event_type, "")?
|
|
||||||
.ok_or_else(|| {
|
|
||||||
warn!(
|
|
||||||
"State event {:?} not found in room {:?}",
|
|
||||||
&body.event_type, &body.room_id
|
|
||||||
);
|
|
||||||
Error::BadRequest(ErrorKind::NotFound, "State event not found.")
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(get_state_events_for_key::v3::Response {
|
|
||||||
content: serde_json::from_str(event.content.get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid event content in database"))?,
|
|
||||||
}
|
|
||||||
.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn send_state_event_for_key_helper(
|
|
||||||
sender: &UserId,
|
|
||||||
room_id: &RoomId,
|
|
||||||
event_type: &StateEventType,
|
|
||||||
json: &Raw<AnyStateEventContent>,
|
|
||||||
state_key: String,
|
|
||||||
) -> Result<Arc<EventId>> {
|
|
||||||
let sender_user = sender;
|
|
||||||
|
|
||||||
// TODO: Review this check, error if event is unparsable, use event type, allow alias if it
|
|
||||||
// previously existed
|
|
||||||
if let Ok(canonical_alias) =
|
|
||||||
serde_json::from_str::<RoomCanonicalAliasEventContent>(json.json().get())
|
|
||||||
{
|
|
||||||
let mut aliases = canonical_alias.alt_aliases.clone();
|
|
||||||
|
|
||||||
if let Some(alias) = canonical_alias.alias {
|
|
||||||
aliases.push(alias);
|
|
||||||
}
|
|
||||||
|
|
||||||
for alias in aliases {
|
|
||||||
if alias.server_name() != services().globals.server_name()
|
|
||||||
|| services()
|
|
||||||
.rooms
|
|
||||||
.alias
|
|
||||||
.resolve_local_alias(&alias)?
|
|
||||||
.filter(|room| room == room_id) // Make sure it's the right room
|
|
||||||
.is_none()
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Forbidden,
|
|
||||||
"You are only allowed to send canonical_alias \
|
|
||||||
events when it's aliases already exists",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
|
||||||
services()
|
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.write()
|
|
||||||
.unwrap()
|
|
||||||
.entry(room_id.to_owned())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
let event_id = services().rooms.timeline.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: event_type.to_string().into(),
|
|
||||||
content: serde_json::from_str(json.json().get()).expect("content is valid json"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some(state_key),
|
|
||||||
redacts: None,
|
|
||||||
},
|
|
||||||
sender_user,
|
|
||||||
room_id,
|
|
||||||
&state_lock,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(event_id)
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,126 +0,0 @@
|
||||||
use crate::{services, Error, Result, Ruma};
|
|
||||||
use ruma::{
|
|
||||||
api::client::tag::{create_tag, delete_tag, get_tags},
|
|
||||||
events::{
|
|
||||||
tag::{TagEvent, TagEventContent},
|
|
||||||
RoomAccountDataEventType,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}`
|
|
||||||
///
|
|
||||||
/// Adds a tag to the room.
|
|
||||||
///
|
|
||||||
/// - Inserts the tag into the tag event of the room account data.
|
|
||||||
pub async fn update_tag_route(
|
|
||||||
body: Ruma<create_tag::v3::Request>,
|
|
||||||
) -> Result<create_tag::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let event = services().account_data.get(
|
|
||||||
Some(&body.room_id),
|
|
||||||
sender_user,
|
|
||||||
RoomAccountDataEventType::Tag,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let mut tags_event = event
|
|
||||||
.map(|e| {
|
|
||||||
serde_json::from_str(e.get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid account data event in db."))
|
|
||||||
})
|
|
||||||
.unwrap_or_else(|| {
|
|
||||||
Ok(TagEvent {
|
|
||||||
content: TagEventContent {
|
|
||||||
tags: BTreeMap::new(),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
})?;
|
|
||||||
|
|
||||||
tags_event
|
|
||||||
.content
|
|
||||||
.tags
|
|
||||||
.insert(body.tag.clone().into(), body.tag_info.clone());
|
|
||||||
|
|
||||||
services().account_data.update(
|
|
||||||
Some(&body.room_id),
|
|
||||||
sender_user,
|
|
||||||
RoomAccountDataEventType::Tag,
|
|
||||||
&serde_json::to_value(tags_event).expect("to json value always works"),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(create_tag::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `DELETE /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}`
|
|
||||||
///
|
|
||||||
/// Deletes a tag from the room.
|
|
||||||
///
|
|
||||||
/// - Removes the tag from the tag event of the room account data.
|
|
||||||
pub async fn delete_tag_route(
|
|
||||||
body: Ruma<delete_tag::v3::Request>,
|
|
||||||
) -> Result<delete_tag::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let event = services().account_data.get(
|
|
||||||
Some(&body.room_id),
|
|
||||||
sender_user,
|
|
||||||
RoomAccountDataEventType::Tag,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let mut tags_event = event
|
|
||||||
.map(|e| {
|
|
||||||
serde_json::from_str(e.get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid account data event in db."))
|
|
||||||
})
|
|
||||||
.unwrap_or_else(|| {
|
|
||||||
Ok(TagEvent {
|
|
||||||
content: TagEventContent {
|
|
||||||
tags: BTreeMap::new(),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
})?;
|
|
||||||
|
|
||||||
tags_event.content.tags.remove(&body.tag.clone().into());
|
|
||||||
|
|
||||||
services().account_data.update(
|
|
||||||
Some(&body.room_id),
|
|
||||||
sender_user,
|
|
||||||
RoomAccountDataEventType::Tag,
|
|
||||||
&serde_json::to_value(tags_event).expect("to json value always works"),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(delete_tag::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags`
|
|
||||||
///
|
|
||||||
/// Returns tags on the room.
|
|
||||||
///
|
|
||||||
/// - Gets the tag event of the room account data.
|
|
||||||
pub async fn get_tags_route(body: Ruma<get_tags::v3::Request>) -> Result<get_tags::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let event = services().account_data.get(
|
|
||||||
Some(&body.room_id),
|
|
||||||
sender_user,
|
|
||||||
RoomAccountDataEventType::Tag,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let tags_event = event
|
|
||||||
.map(|e| {
|
|
||||||
serde_json::from_str(e.get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid account data event in db."))
|
|
||||||
})
|
|
||||||
.unwrap_or_else(|| {
|
|
||||||
Ok(TagEvent {
|
|
||||||
content: TagEventContent {
|
|
||||||
tags: BTreeMap::new(),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(get_tags::v3::Response {
|
|
||||||
tags: tags_event.content.tags,
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,16 +0,0 @@
|
||||||
use crate::{Result, Ruma};
|
|
||||||
use ruma::api::client::thirdparty::get_protocols;
|
|
||||||
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/thirdparty/protocols`
|
|
||||||
///
|
|
||||||
/// TODO: Fetches all metadata about protocols supported by the homeserver.
|
|
||||||
pub async fn get_protocols_route(
|
|
||||||
_body: Ruma<get_protocols::v3::Request>,
|
|
||||||
) -> Result<get_protocols::v3::Response> {
|
|
||||||
// TODO
|
|
||||||
Ok(get_protocols::v3::Response {
|
|
||||||
protocols: BTreeMap::new(),
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,49 +0,0 @@
|
||||||
use ruma::api::client::{error::ErrorKind, threads::get_threads};
|
|
||||||
|
|
||||||
use crate::{services, Error, Result, Ruma};
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/rooms/{roomId}/threads`
|
|
||||||
pub async fn get_threads_route(
|
|
||||||
body: Ruma<get_threads::v1::Request>,
|
|
||||||
) -> Result<get_threads::v1::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
// Use limit or else 10, with maximum 100
|
|
||||||
let limit = body
|
|
||||||
.limit
|
|
||||||
.and_then(|l| l.try_into().ok())
|
|
||||||
.unwrap_or(10)
|
|
||||||
.min(100);
|
|
||||||
|
|
||||||
let from = if let Some(from) = &body.from {
|
|
||||||
from.parse()
|
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, ""))?
|
|
||||||
} else {
|
|
||||||
u64::MAX
|
|
||||||
};
|
|
||||||
|
|
||||||
let threads = services()
|
|
||||||
.rooms
|
|
||||||
.threads
|
|
||||||
.threads_until(sender_user, &body.room_id, from, &body.include)?
|
|
||||||
.take(limit)
|
|
||||||
.filter_map(|r| r.ok())
|
|
||||||
.filter(|(_, pdu)| {
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.user_can_see_event(sender_user, &body.room_id, &pdu.event_id)
|
|
||||||
.unwrap_or(false)
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let next_batch = threads.last().map(|(count, _)| count.to_string());
|
|
||||||
|
|
||||||
Ok(get_threads::v1::Response {
|
|
||||||
chunk: threads
|
|
||||||
.into_iter()
|
|
||||||
.map(|(_, pdu)| pdu.to_room_event())
|
|
||||||
.collect(),
|
|
||||||
next_batch,
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,40 +0,0 @@
|
||||||
use crate::{services, utils, Error, Result, Ruma};
|
|
||||||
use ruma::api::client::{error::ErrorKind, typing::create_typing_event};
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}`
|
|
||||||
///
|
|
||||||
/// Sets the typing state of the sender user.
|
|
||||||
pub async fn create_typing_event_route(
|
|
||||||
body: Ruma<create_typing_event::v3::Request>,
|
|
||||||
) -> Result<create_typing_event::v3::Response> {
|
|
||||||
use create_typing_event::v3::Typing;
|
|
||||||
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if !services()
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.is_joined(sender_user, &body.room_id)?
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Forbidden,
|
|
||||||
"You are not in this room.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Typing::Yes(duration) = body.state {
|
|
||||||
services().rooms.edus.typing.typing_add(
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
duration.as_millis() as u64 + utils::millis_since_unix_epoch(),
|
|
||||||
)?;
|
|
||||||
} else {
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.edus
|
|
||||||
.typing
|
|
||||||
.typing_remove(sender_user, &body.room_id)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(create_typing_event::v3::Response {})
|
|
||||||
}
|
|
|
@ -1,50 +0,0 @@
|
||||||
use std::{collections::BTreeMap, iter::FromIterator};
|
|
||||||
|
|
||||||
use axum::{response::IntoResponse, Json};
|
|
||||||
use ruma::api::client::{discovery::get_supported_versions, error::ErrorKind};
|
|
||||||
|
|
||||||
use crate::{services, Error, Result, Ruma};
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/versions`
|
|
||||||
///
|
|
||||||
/// Get the versions of the specification and unstable features supported by this server.
|
|
||||||
///
|
|
||||||
/// - Versions take the form MAJOR.MINOR.PATCH
|
|
||||||
/// - Only the latest PATCH release will be reported for each MAJOR.MINOR value
|
|
||||||
/// - Unstable features are namespaced and may include version information in their name
|
|
||||||
///
|
|
||||||
/// Note: Unstable features are used while developing new features. Clients should avoid using
|
|
||||||
/// unstable features in their stable releases
|
|
||||||
pub async fn get_supported_versions_route(
|
|
||||||
_body: Ruma<get_supported_versions::Request>,
|
|
||||||
) -> Result<get_supported_versions::Response> {
|
|
||||||
let resp = get_supported_versions::Response {
|
|
||||||
versions: vec![
|
|
||||||
"r0.5.0".to_owned(),
|
|
||||||
"r0.6.0".to_owned(),
|
|
||||||
"v1.1".to_owned(),
|
|
||||||
"v1.2".to_owned(),
|
|
||||||
"v1.3".to_owned(),
|
|
||||||
"v1.4".to_owned(),
|
|
||||||
"v1.5".to_owned(),
|
|
||||||
],
|
|
||||||
unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]),
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(resp)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /.well-known/matrix/client`
|
|
||||||
pub async fn well_known_client_route(
|
|
||||||
_body: Ruma<get_supported_versions::Request>,
|
|
||||||
) -> Result<impl IntoResponse> {
|
|
||||||
let client_url = match services().globals.well_known_client() {
|
|
||||||
Some(url) => url.clone(),
|
|
||||||
None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")),
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(Json(serde_json::json!({
|
|
||||||
"m.homeserver": {"base_url": client_url},
|
|
||||||
"org.matrix.msc3575.proxy": {"url": client_url}
|
|
||||||
})))
|
|
||||||
}
|
|
|
@ -1,94 +0,0 @@
|
||||||
use crate::{services, Result, Ruma};
|
|
||||||
use ruma::{
|
|
||||||
api::client::user_directory::search_users,
|
|
||||||
events::{
|
|
||||||
room::join_rules::{JoinRule, RoomJoinRulesEventContent},
|
|
||||||
StateEventType,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/user_directory/search`
|
|
||||||
///
|
|
||||||
/// Searches all known users for a match.
|
|
||||||
///
|
|
||||||
/// - Hides any local users that aren't in any public rooms (i.e. those that have the join rule set to public)
|
|
||||||
/// and don't share a room with the sender
|
|
||||||
pub async fn search_users_route(
|
|
||||||
body: Ruma<search_users::v3::Request>,
|
|
||||||
) -> Result<search_users::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
let limit = u64::from(body.limit) as usize;
|
|
||||||
|
|
||||||
let mut users = services().users.iter().filter_map(|user_id| {
|
|
||||||
// Filter out buggy users (they should not exist, but you never know...)
|
|
||||||
let user_id = user_id.ok()?;
|
|
||||||
|
|
||||||
let user = search_users::v3::User {
|
|
||||||
user_id: user_id.clone(),
|
|
||||||
display_name: services().users.displayname(&user_id).ok()?,
|
|
||||||
avatar_url: services().users.avatar_url(&user_id).ok()?,
|
|
||||||
};
|
|
||||||
|
|
||||||
let user_id_matches = user
|
|
||||||
.user_id
|
|
||||||
.to_string()
|
|
||||||
.to_lowercase()
|
|
||||||
.contains(&body.search_term.to_lowercase());
|
|
||||||
|
|
||||||
let user_displayname_matches = user
|
|
||||||
.display_name
|
|
||||||
.as_ref()
|
|
||||||
.filter(|name| {
|
|
||||||
name.to_lowercase()
|
|
||||||
.contains(&body.search_term.to_lowercase())
|
|
||||||
})
|
|
||||||
.is_some();
|
|
||||||
|
|
||||||
if !user_id_matches && !user_displayname_matches {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
let user_is_in_public_rooms = services()
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.rooms_joined(&user_id)
|
|
||||||
.filter_map(|r| r.ok())
|
|
||||||
.any(|room| {
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.room_state_get(&room, &StateEventType::RoomJoinRules, "")
|
|
||||||
.map_or(false, |event| {
|
|
||||||
event.map_or(false, |event| {
|
|
||||||
serde_json::from_str(event.content.get())
|
|
||||||
.map_or(false, |r: RoomJoinRulesEventContent| {
|
|
||||||
r.join_rule == JoinRule::Public
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
});
|
|
||||||
|
|
||||||
if user_is_in_public_rooms {
|
|
||||||
return Some(user);
|
|
||||||
}
|
|
||||||
|
|
||||||
let user_is_in_shared_rooms = services()
|
|
||||||
.rooms
|
|
||||||
.user
|
|
||||||
.get_shared_rooms(vec![sender_user.clone(), user_id])
|
|
||||||
.ok()?
|
|
||||||
.next()
|
|
||||||
.is_some();
|
|
||||||
|
|
||||||
if user_is_in_shared_rooms {
|
|
||||||
return Some(user);
|
|
||||||
}
|
|
||||||
|
|
||||||
None
|
|
||||||
});
|
|
||||||
|
|
||||||
let results = users.by_ref().take(limit).collect();
|
|
||||||
let limited = users.next().is_some();
|
|
||||||
|
|
||||||
Ok(search_users::v3::Response { results, limited })
|
|
||||||
}
|
|
|
@ -1,48 +0,0 @@
|
||||||
use crate::{services, Result, Ruma};
|
|
||||||
use base64::{engine::general_purpose, Engine as _};
|
|
||||||
use hmac::{Hmac, Mac};
|
|
||||||
use ruma::{api::client::voip::get_turn_server_info, SecondsSinceUnixEpoch};
|
|
||||||
use sha1::Sha1;
|
|
||||||
use std::time::{Duration, SystemTime};
|
|
||||||
|
|
||||||
type HmacSha1 = Hmac<Sha1>;
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/voip/turnServer`
|
|
||||||
///
|
|
||||||
/// TODO: Returns information about the recommended turn server.
|
|
||||||
pub async fn turn_server_route(
|
|
||||||
body: Ruma<get_turn_server_info::v3::Request>,
|
|
||||||
) -> Result<get_turn_server_info::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let turn_secret = services().globals.turn_secret().clone();
|
|
||||||
|
|
||||||
let (username, password) = if !turn_secret.is_empty() {
|
|
||||||
let expiry = SecondsSinceUnixEpoch::from_system_time(
|
|
||||||
SystemTime::now() + Duration::from_secs(services().globals.turn_ttl()),
|
|
||||||
)
|
|
||||||
.expect("time is valid");
|
|
||||||
|
|
||||||
let username: String = format!("{}:{}", expiry.get(), sender_user);
|
|
||||||
|
|
||||||
let mut mac = HmacSha1::new_from_slice(turn_secret.as_bytes())
|
|
||||||
.expect("HMAC can take key of any size");
|
|
||||||
mac.update(username.as_bytes());
|
|
||||||
|
|
||||||
let password: String = general_purpose::STANDARD.encode(mac.finalize().into_bytes());
|
|
||||||
|
|
||||||
(username, password)
|
|
||||||
} else {
|
|
||||||
(
|
|
||||||
services().globals.turn_username().clone(),
|
|
||||||
services().globals.turn_password().clone(),
|
|
||||||
)
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(get_turn_server_info::v3::Response {
|
|
||||||
username,
|
|
||||||
password,
|
|
||||||
uris: services().globals.turn_uris().to_vec(),
|
|
||||||
ttl: Duration::from_secs(services().globals.turn_ttl()),
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,4 +0,0 @@
|
||||||
pub mod appservice_server;
|
|
||||||
pub mod client_server;
|
|
||||||
pub mod ruma_wrapper;
|
|
||||||
pub mod server_server;
|
|
|
@ -1,427 +0,0 @@
|
||||||
use std::{collections::BTreeMap, iter::FromIterator, str};
|
|
||||||
|
|
||||||
use axum::{
|
|
||||||
async_trait,
|
|
||||||
body::{Full, HttpBody},
|
|
||||||
extract::{rejection::TypedHeaderRejectionReason, FromRequest, Path, TypedHeader},
|
|
||||||
headers::{
|
|
||||||
authorization::{Bearer, Credentials},
|
|
||||||
Authorization,
|
|
||||||
},
|
|
||||||
response::{IntoResponse, Response},
|
|
||||||
BoxError, RequestExt, RequestPartsExt,
|
|
||||||
};
|
|
||||||
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
|
||||||
use http::{Request, StatusCode};
|
|
||||||
use ruma::{
|
|
||||||
api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse},
|
|
||||||
CanonicalJsonValue, OwnedDeviceId, OwnedServerName, UserId,
|
|
||||||
};
|
|
||||||
use serde::Deserialize;
|
|
||||||
use tracing::{debug, error, warn};
|
|
||||||
|
|
||||||
use super::{Ruma, RumaResponse};
|
|
||||||
use crate::{services, Error, Result};
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<T, S, B> FromRequest<S, B> for Ruma<T>
|
|
||||||
where
|
|
||||||
T: IncomingRequest,
|
|
||||||
B: HttpBody + Send + 'static,
|
|
||||||
B::Data: Send,
|
|
||||||
B::Error: Into<BoxError>,
|
|
||||||
{
|
|
||||||
type Rejection = Error;
|
|
||||||
|
|
||||||
async fn from_request(req: Request<B>, _state: &S) -> Result<Self, Self::Rejection> {
|
|
||||||
#[derive(Deserialize)]
|
|
||||||
struct QueryParams {
|
|
||||||
access_token: Option<String>,
|
|
||||||
user_id: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
let (mut parts, mut body) = match req.with_limited_body() {
|
|
||||||
Ok(limited_req) => {
|
|
||||||
let (parts, body) = limited_req.into_parts();
|
|
||||||
let body = to_bytes(body)
|
|
||||||
.await
|
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?;
|
|
||||||
(parts, body)
|
|
||||||
}
|
|
||||||
Err(original_req) => {
|
|
||||||
let (parts, body) = original_req.into_parts();
|
|
||||||
let body = to_bytes(body)
|
|
||||||
.await
|
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?;
|
|
||||||
(parts, body)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let metadata = T::METADATA;
|
|
||||||
let auth_header: Option<TypedHeader<Authorization<Bearer>>> = parts.extract().await?;
|
|
||||||
let path_params: Path<Vec<String>> = parts.extract().await?;
|
|
||||||
|
|
||||||
let query = parts.uri.query().unwrap_or_default();
|
|
||||||
let query_params: QueryParams = match serde_html_form::from_str(query) {
|
|
||||||
Ok(params) => params,
|
|
||||||
Err(e) => {
|
|
||||||
error!(%query, "Failed to deserialize query parameters: {}", e);
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Unknown,
|
|
||||||
"Failed to read query parameters",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let token = match &auth_header {
|
|
||||||
Some(TypedHeader(Authorization(bearer))) => Some(bearer.token()),
|
|
||||||
None => query_params.access_token.as_deref(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut json_body = serde_json::from_slice::<CanonicalJsonValue>(&body).ok();
|
|
||||||
|
|
||||||
let appservices = services().appservice.all().unwrap();
|
|
||||||
let appservice_registration = appservices.iter().find(|(_id, registration)| {
|
|
||||||
registration
|
|
||||||
.get("as_token")
|
|
||||||
.and_then(|as_token| as_token.as_str())
|
|
||||||
.map_or(false, |as_token| token == Some(as_token))
|
|
||||||
});
|
|
||||||
|
|
||||||
let (sender_user, sender_device, sender_servername, from_appservice) =
|
|
||||||
if let Some((_id, registration)) = appservice_registration {
|
|
||||||
match metadata.authentication {
|
|
||||||
AuthScheme::AccessToken => {
|
|
||||||
let user_id = query_params.user_id.map_or_else(
|
|
||||||
|| {
|
|
||||||
UserId::parse_with_server_name(
|
|
||||||
registration
|
|
||||||
.get("sender_localpart")
|
|
||||||
.unwrap()
|
|
||||||
.as_str()
|
|
||||||
.unwrap(),
|
|
||||||
services().globals.server_name(),
|
|
||||||
)
|
|
||||||
.unwrap()
|
|
||||||
},
|
|
||||||
|s| UserId::parse(s).unwrap(),
|
|
||||||
);
|
|
||||||
|
|
||||||
if !services().users.exists(&user_id).unwrap() {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Forbidden,
|
|
||||||
"User does not exist.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Check if appservice is allowed to be that user
|
|
||||||
(Some(user_id), None, None, true)
|
|
||||||
}
|
|
||||||
AuthScheme::ServerSignatures => (None, None, None, true),
|
|
||||||
AuthScheme::None => (None, None, None, true),
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
match metadata.authentication {
|
|
||||||
AuthScheme::AccessToken => {
|
|
||||||
let token = match token {
|
|
||||||
Some(token) => token,
|
|
||||||
_ => {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::MissingToken,
|
|
||||||
"Missing access token.",
|
|
||||||
))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
match services().users.find_from_token(token).unwrap() {
|
|
||||||
None => {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::UnknownToken { soft_logout: false },
|
|
||||||
"Unknown access token.",
|
|
||||||
))
|
|
||||||
}
|
|
||||||
Some((user_id, device_id)) => (
|
|
||||||
Some(user_id),
|
|
||||||
Some(OwnedDeviceId::from(device_id)),
|
|
||||||
None,
|
|
||||||
false,
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
AuthScheme::ServerSignatures => {
|
|
||||||
let TypedHeader(Authorization(x_matrix)) = parts
|
|
||||||
.extract::<TypedHeader<Authorization<XMatrix>>>()
|
|
||||||
.await
|
|
||||||
.map_err(|e| {
|
|
||||||
warn!("Missing or invalid Authorization header: {}", e);
|
|
||||||
|
|
||||||
let msg = match e.reason() {
|
|
||||||
TypedHeaderRejectionReason::Missing => {
|
|
||||||
"Missing Authorization header."
|
|
||||||
}
|
|
||||||
TypedHeaderRejectionReason::Error(_) => {
|
|
||||||
"Invalid X-Matrix signatures."
|
|
||||||
}
|
|
||||||
_ => "Unknown header-related error",
|
|
||||||
};
|
|
||||||
|
|
||||||
Error::BadRequest(ErrorKind::Forbidden, msg)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let origin_signatures = BTreeMap::from_iter([(
|
|
||||||
x_matrix.key.clone(),
|
|
||||||
CanonicalJsonValue::String(x_matrix.sig),
|
|
||||||
)]);
|
|
||||||
|
|
||||||
let signatures = BTreeMap::from_iter([(
|
|
||||||
x_matrix.origin.as_str().to_owned(),
|
|
||||||
CanonicalJsonValue::Object(origin_signatures),
|
|
||||||
)]);
|
|
||||||
|
|
||||||
let mut request_map = BTreeMap::from_iter([
|
|
||||||
(
|
|
||||||
"method".to_owned(),
|
|
||||||
CanonicalJsonValue::String(parts.method.to_string()),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"uri".to_owned(),
|
|
||||||
CanonicalJsonValue::String(parts.uri.to_string()),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"origin".to_owned(),
|
|
||||||
CanonicalJsonValue::String(x_matrix.origin.as_str().to_owned()),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"destination".to_owned(),
|
|
||||||
CanonicalJsonValue::String(
|
|
||||||
services().globals.server_name().as_str().to_owned(),
|
|
||||||
),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"signatures".to_owned(),
|
|
||||||
CanonicalJsonValue::Object(signatures),
|
|
||||||
),
|
|
||||||
]);
|
|
||||||
|
|
||||||
if let Some(json_body) = &json_body {
|
|
||||||
request_map.insert("content".to_owned(), json_body.clone());
|
|
||||||
};
|
|
||||||
|
|
||||||
let keys_result = services()
|
|
||||||
.rooms
|
|
||||||
.event_handler
|
|
||||||
.fetch_signing_keys(&x_matrix.origin, vec![x_matrix.key.to_owned()])
|
|
||||||
.await;
|
|
||||||
|
|
||||||
let keys = match keys_result {
|
|
||||||
Ok(b) => b,
|
|
||||||
Err(e) => {
|
|
||||||
warn!("Failed to fetch signing keys: {}", e);
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Forbidden,
|
|
||||||
"Failed to fetch signing keys.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let pub_key_map =
|
|
||||||
BTreeMap::from_iter([(x_matrix.origin.as_str().to_owned(), keys)]);
|
|
||||||
|
|
||||||
match ruma::signatures::verify_json(&pub_key_map, &request_map) {
|
|
||||||
Ok(()) => (None, None, Some(x_matrix.origin), false),
|
|
||||||
Err(e) => {
|
|
||||||
warn!(
|
|
||||||
"Failed to verify json request from {}: {}\n{:?}",
|
|
||||||
x_matrix.origin, e, request_map
|
|
||||||
);
|
|
||||||
|
|
||||||
if parts.uri.to_string().contains('@') {
|
|
||||||
warn!(
|
|
||||||
"Request uri contained '@' character. Make sure your \
|
|
||||||
reverse proxy gives Conduit the raw uri (apache: use \
|
|
||||||
nocanon)"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Forbidden,
|
|
||||||
"Failed to verify X-Matrix signatures.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
AuthScheme::None => (None, None, None, false),
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut http_request = http::Request::builder().uri(parts.uri).method(parts.method);
|
|
||||||
*http_request.headers_mut().unwrap() = parts.headers;
|
|
||||||
|
|
||||||
if let Some(CanonicalJsonValue::Object(json_body)) = &mut json_body {
|
|
||||||
let user_id = sender_user.clone().unwrap_or_else(|| {
|
|
||||||
UserId::parse_with_server_name("", services().globals.server_name())
|
|
||||||
.expect("we know this is valid")
|
|
||||||
});
|
|
||||||
|
|
||||||
let uiaa_request = json_body
|
|
||||||
.get("auth")
|
|
||||||
.and_then(|auth| auth.as_object())
|
|
||||||
.and_then(|auth| auth.get("session"))
|
|
||||||
.and_then(|session| session.as_str())
|
|
||||||
.and_then(|session| {
|
|
||||||
services().uiaa.get_uiaa_request(
|
|
||||||
&user_id,
|
|
||||||
&sender_device.clone().unwrap_or_else(|| "".into()),
|
|
||||||
session,
|
|
||||||
)
|
|
||||||
});
|
|
||||||
|
|
||||||
if let Some(CanonicalJsonValue::Object(initial_request)) = uiaa_request {
|
|
||||||
for (key, value) in initial_request {
|
|
||||||
json_body.entry(key).or_insert(value);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut buf = BytesMut::new().writer();
|
|
||||||
serde_json::to_writer(&mut buf, json_body).expect("value serialization can't fail");
|
|
||||||
body = buf.into_inner().freeze();
|
|
||||||
}
|
|
||||||
|
|
||||||
let http_request = http_request.body(&*body).unwrap();
|
|
||||||
|
|
||||||
debug!("{:?}", http_request);
|
|
||||||
|
|
||||||
let body = T::try_from_http_request(http_request, &path_params).map_err(|e| {
|
|
||||||
warn!("try_from_http_request failed: {:?}", e);
|
|
||||||
debug!("JSON body: {:?}", json_body);
|
|
||||||
Error::BadRequest(ErrorKind::BadJson, "Failed to deserialize request.")
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(Ruma {
|
|
||||||
body,
|
|
||||||
sender_user,
|
|
||||||
sender_device,
|
|
||||||
sender_servername,
|
|
||||||
from_appservice,
|
|
||||||
json_body,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct XMatrix {
|
|
||||||
origin: OwnedServerName,
|
|
||||||
key: String, // KeyName?
|
|
||||||
sig: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Credentials for XMatrix {
|
|
||||||
const SCHEME: &'static str = "X-Matrix";
|
|
||||||
|
|
||||||
fn decode(value: &http::HeaderValue) -> Option<Self> {
|
|
||||||
debug_assert!(
|
|
||||||
value.as_bytes().starts_with(b"X-Matrix "),
|
|
||||||
"HeaderValue to decode should start with \"X-Matrix ..\", received = {value:?}",
|
|
||||||
);
|
|
||||||
|
|
||||||
let parameters = str::from_utf8(&value.as_bytes()["X-Matrix ".len()..])
|
|
||||||
.ok()?
|
|
||||||
.trim_start();
|
|
||||||
|
|
||||||
let mut origin = None;
|
|
||||||
let mut key = None;
|
|
||||||
let mut sig = None;
|
|
||||||
|
|
||||||
for entry in parameters.split_terminator(',') {
|
|
||||||
let (name, value) = entry.split_once('=')?;
|
|
||||||
|
|
||||||
// It's not at all clear why some fields are quoted and others not in the spec,
|
|
||||||
// let's simply accept either form for every field.
|
|
||||||
let value = value
|
|
||||||
.strip_prefix('"')
|
|
||||||
.and_then(|rest| rest.strip_suffix('"'))
|
|
||||||
.unwrap_or(value);
|
|
||||||
|
|
||||||
// FIXME: Catch multiple fields of the same name
|
|
||||||
match name {
|
|
||||||
"origin" => origin = Some(value.try_into().ok()?),
|
|
||||||
"key" => key = Some(value.to_owned()),
|
|
||||||
"sig" => sig = Some(value.to_owned()),
|
|
||||||
_ => debug!(
|
|
||||||
"Unexpected field `{}` in X-Matrix Authorization header",
|
|
||||||
name
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Some(Self {
|
|
||||||
origin: origin?,
|
|
||||||
key: key?,
|
|
||||||
sig: sig?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn encode(&self) -> http::HeaderValue {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: OutgoingResponse> IntoResponse for RumaResponse<T> {
|
|
||||||
fn into_response(self) -> Response {
|
|
||||||
match self.0.try_into_http_response::<BytesMut>() {
|
|
||||||
Ok(res) => res.map(BytesMut::freeze).map(Full::new).into_response(),
|
|
||||||
Err(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// copied from hyper under the following license:
|
|
||||||
// Copyright (c) 2014-2021 Sean McArthur
|
|
||||||
|
|
||||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
// of this software and associated documentation files (the "Software"), to deal
|
|
||||||
// in the Software without restriction, including without limitation the rights
|
|
||||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
// copies of the Software, and to permit persons to whom the Software is
|
|
||||||
// furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
// The above copyright notice and this permission notice shall be included in
|
|
||||||
// all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
||||||
// THE SOFTWARE.
|
|
||||||
pub(crate) async fn to_bytes<T>(body: T) -> Result<Bytes, T::Error>
|
|
||||||
where
|
|
||||||
T: HttpBody,
|
|
||||||
{
|
|
||||||
futures_util::pin_mut!(body);
|
|
||||||
|
|
||||||
// If there's only 1 chunk, we can just return Buf::to_bytes()
|
|
||||||
let mut first = if let Some(buf) = body.data().await {
|
|
||||||
buf?
|
|
||||||
} else {
|
|
||||||
return Ok(Bytes::new());
|
|
||||||
};
|
|
||||||
|
|
||||||
let second = if let Some(buf) = body.data().await {
|
|
||||||
buf?
|
|
||||||
} else {
|
|
||||||
return Ok(first.copy_to_bytes(first.remaining()));
|
|
||||||
};
|
|
||||||
|
|
||||||
// With more than 1 buf, we gotta flatten into a Vec first.
|
|
||||||
let cap = first.remaining() + second.remaining() + body.size_hint().lower() as usize;
|
|
||||||
let mut vec = Vec::with_capacity(cap);
|
|
||||||
vec.put(first);
|
|
||||||
vec.put(second);
|
|
||||||
|
|
||||||
while let Some(buf) = body.data().await {
|
|
||||||
vec.put(buf?);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(vec.into())
|
|
||||||
}
|
|
|
@ -1,43 +0,0 @@
|
||||||
use crate::Error;
|
|
||||||
use ruma::{
|
|
||||||
api::client::uiaa::UiaaResponse, CanonicalJsonValue, OwnedDeviceId, OwnedServerName,
|
|
||||||
OwnedUserId,
|
|
||||||
};
|
|
||||||
use std::ops::Deref;
|
|
||||||
|
|
||||||
#[cfg(feature = "conduit_bin")]
|
|
||||||
mod axum;
|
|
||||||
|
|
||||||
/// Extractor for Ruma request structs
|
|
||||||
pub struct Ruma<T> {
|
|
||||||
pub body: T,
|
|
||||||
pub sender_user: Option<OwnedUserId>,
|
|
||||||
pub sender_device: Option<OwnedDeviceId>,
|
|
||||||
pub sender_servername: Option<OwnedServerName>,
|
|
||||||
// This is None when body is not a valid string
|
|
||||||
pub json_body: Option<CanonicalJsonValue>,
|
|
||||||
pub from_appservice: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Deref for Ruma<T> {
|
|
||||||
type Target = T;
|
|
||||||
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&self.body
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct RumaResponse<T>(pub T);
|
|
||||||
|
|
||||||
impl<T> From<T> for RumaResponse<T> {
|
|
||||||
fn from(t: T) -> Self {
|
|
||||||
Self(t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<Error> for RumaResponse<UiaaResponse> {
|
|
||||||
fn from(t: Error) -> Self {
|
|
||||||
t.to_response()
|
|
||||||
}
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,11 +1,11 @@
|
||||||
use crate::{services, utils, Error, Result};
|
use crate::{utils, Error, Result};
|
||||||
use bytes::BytesMut;
|
use bytes::BytesMut;
|
||||||
use ruma::api::{IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken};
|
use ruma::api::{IncomingResponse, OutgoingRequest, SendAccessToken};
|
||||||
use std::{fmt::Debug, mem, time::Duration};
|
use std::{fmt::Debug, mem, time::Duration};
|
||||||
use tracing::warn;
|
use tracing::warn;
|
||||||
|
|
||||||
#[tracing::instrument(skip(request))]
|
|
||||||
pub(crate) async fn send_request<T: OutgoingRequest>(
|
pub(crate) async fn send_request<T: OutgoingRequest>(
|
||||||
|
globals: &crate::database::globals::Globals,
|
||||||
registration: serde_yaml::Value,
|
registration: serde_yaml::Value,
|
||||||
request: T,
|
request: T,
|
||||||
) -> Result<T::IncomingResponse>
|
) -> Result<T::IncomingResponse>
|
||||||
|
@ -16,11 +16,7 @@ where
|
||||||
let hs_token = registration.get("hs_token").unwrap().as_str().unwrap();
|
let hs_token = registration.get("hs_token").unwrap().as_str().unwrap();
|
||||||
|
|
||||||
let mut http_request = request
|
let mut http_request = request
|
||||||
.try_into_http_request::<BytesMut>(
|
.try_into_http_request::<BytesMut>(destination, SendAccessToken::IfRequired(""))
|
||||||
destination,
|
|
||||||
SendAccessToken::IfRequired(hs_token),
|
|
||||||
&[MatrixVersion::V1_0],
|
|
||||||
)
|
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.map(|body| body.freeze());
|
.map(|body| body.freeze());
|
||||||
|
|
||||||
|
@ -45,23 +41,7 @@ where
|
||||||
*reqwest_request.timeout_mut() = Some(Duration::from_secs(30));
|
*reqwest_request.timeout_mut() = Some(Duration::from_secs(30));
|
||||||
|
|
||||||
let url = reqwest_request.url().clone();
|
let url = reqwest_request.url().clone();
|
||||||
let mut response = match services()
|
let mut response = globals.default_client().execute(reqwest_request).await?;
|
||||||
.globals
|
|
||||||
.default_client()
|
|
||||||
.execute(reqwest_request)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(r) => r,
|
|
||||||
Err(e) => {
|
|
||||||
warn!(
|
|
||||||
"Could not send request to appservice {:?} at {}: {}",
|
|
||||||
registration.get("id"),
|
|
||||||
destination,
|
|
||||||
e
|
|
||||||
);
|
|
||||||
return Err(e.into());
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// reqwest::Response -> http::Response conversion
|
// reqwest::Response -> http::Response conversion
|
||||||
let status = response.status();
|
let status = response.status();
|
|
@ -1,23 +1,36 @@
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH};
|
use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH};
|
||||||
use crate::{api::client_server, services, utils, Error, Result, Ruma};
|
use crate::{
|
||||||
|
database::{admin::make_user_admin, DatabaseGuard},
|
||||||
|
pdu::PduBuilder,
|
||||||
|
utils, ConduitResult, Error, Ruma,
|
||||||
|
};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{
|
api::client::{
|
||||||
|
error::ErrorKind,
|
||||||
|
r0::{
|
||||||
account::{
|
account::{
|
||||||
change_password, deactivate, get_3pids, get_username_availability, register,
|
change_password, deactivate, get_3pids, get_username_availability, register,
|
||||||
request_3pid_management_token_via_email, request_3pid_management_token_via_msisdn,
|
|
||||||
whoami, ThirdPartyIdRemovalStatus,
|
whoami, ThirdPartyIdRemovalStatus,
|
||||||
},
|
},
|
||||||
error::ErrorKind,
|
|
||||||
uiaa::{AuthFlow, AuthType, UiaaInfo},
|
uiaa::{AuthFlow, AuthType, UiaaInfo},
|
||||||
},
|
},
|
||||||
events::{room::message::RoomMessageEventContent, GlobalAccountDataEventType},
|
},
|
||||||
|
events::{
|
||||||
|
room::member::{MembershipState, RoomMemberEventContent},
|
||||||
|
EventType,
|
||||||
|
},
|
||||||
push, UserId,
|
push, UserId,
|
||||||
};
|
};
|
||||||
|
use serde_json::value::to_raw_value;
|
||||||
use tracing::{info, warn};
|
use tracing::{info, warn};
|
||||||
|
|
||||||
use register::RegistrationKind;
|
use register::RegistrationKind;
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
use rocket::{get, post};
|
||||||
|
|
||||||
const RANDOM_USER_ID_LENGTH: usize = 10;
|
const GUEST_NAME_LENGTH: usize = 10;
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/register/available`
|
/// # `GET /_matrix/client/r0/register/available`
|
||||||
///
|
///
|
||||||
|
@ -29,17 +42,21 @@ const RANDOM_USER_ID_LENGTH: usize = 10;
|
||||||
/// - No user or appservice on this server already claimed this username
|
/// - No user or appservice on this server already claimed this username
|
||||||
///
|
///
|
||||||
/// Note: This will not reserve the username, so the username might become invalid when trying to register
|
/// Note: This will not reserve the username, so the username might become invalid when trying to register
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/register/available", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn get_register_available_route(
|
pub async fn get_register_available_route(
|
||||||
body: Ruma<get_username_availability::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<get_username_availability::v3::Response> {
|
body: Ruma<get_username_availability::Request<'_>>,
|
||||||
|
) -> ConduitResult<get_username_availability::Response> {
|
||||||
// Validate user id
|
// Validate user id
|
||||||
let user_id = UserId::parse_with_server_name(
|
let user_id =
|
||||||
body.username.to_lowercase(),
|
UserId::parse_with_server_name(body.username.to_lowercase(), db.globals.server_name())
|
||||||
services().globals.server_name(),
|
|
||||||
)
|
|
||||||
.ok()
|
.ok()
|
||||||
.filter(|user_id| {
|
.filter(|user_id| {
|
||||||
!user_id.is_historical() && user_id.server_name() == services().globals.server_name()
|
!user_id.is_historical() && user_id.server_name() == db.globals.server_name()
|
||||||
})
|
})
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::InvalidUsername,
|
ErrorKind::InvalidUsername,
|
||||||
|
@ -47,7 +64,7 @@ pub async fn get_register_available_route(
|
||||||
))?;
|
))?;
|
||||||
|
|
||||||
// Check if username is creative enough
|
// Check if username is creative enough
|
||||||
if services().users.exists(&user_id)? {
|
if db.users.exists(&user_id)? {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::UserInUse,
|
ErrorKind::UserInUse,
|
||||||
"Desired user ID is already taken.",
|
"Desired user ID is already taken.",
|
||||||
|
@ -57,7 +74,7 @@ pub async fn get_register_available_route(
|
||||||
// TODO add check for appservice namespaces
|
// TODO add check for appservice namespaces
|
||||||
|
|
||||||
// If no if check is true we have an username that's available to be used.
|
// If no if check is true we have an username that's available to be used.
|
||||||
Ok(get_username_availability::v3::Response { available: true })
|
Ok(get_username_availability::Response { available: true }.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/register`
|
/// # `POST /_matrix/client/r0/register`
|
||||||
|
@ -73,11 +90,16 @@ pub async fn get_register_available_route(
|
||||||
/// - If type is not guest and no username is given: Always fails after UIAA check
|
/// - If type is not guest and no username is given: Always fails after UIAA check
|
||||||
/// - Creates a new account and populates it with default account data
|
/// - Creates a new account and populates it with default account data
|
||||||
/// - If `inhibit_login` is false: Creates a device and returns device id and access_token
|
/// - If `inhibit_login` is false: Creates a device and returns device id and access_token
|
||||||
pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<register::v3::Response> {
|
#[cfg_attr(
|
||||||
if !services().globals.allow_registration()
|
feature = "conduit_bin",
|
||||||
&& !body.from_appservice
|
post("/_matrix/client/r0/register", data = "<body>")
|
||||||
&& services().globals.config.registration_token.is_none()
|
)]
|
||||||
{
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn register_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<register::Request<'_>>,
|
||||||
|
) -> ConduitResult<register::Response> {
|
||||||
|
if !db.globals.allow_registration() && !body.from_appservice {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
"Registration has been disabled.",
|
"Registration has been disabled.",
|
||||||
|
@ -86,49 +108,43 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
|
||||||
|
|
||||||
let is_guest = body.kind == RegistrationKind::Guest;
|
let is_guest = body.kind == RegistrationKind::Guest;
|
||||||
|
|
||||||
let user_id = match (&body.username, is_guest) {
|
let mut missing_username = false;
|
||||||
(Some(username), false) => {
|
|
||||||
let proposed_user_id = UserId::parse_with_server_name(
|
// Validate user id
|
||||||
username.to_lowercase(),
|
let user_id = UserId::parse_with_server_name(
|
||||||
services().globals.server_name(),
|
if is_guest {
|
||||||
|
utils::random_string(GUEST_NAME_LENGTH)
|
||||||
|
} else {
|
||||||
|
body.username.clone().unwrap_or_else(|| {
|
||||||
|
// If the user didn't send a username field, that means the client is just trying
|
||||||
|
// the get an UIAA error to see available flows
|
||||||
|
missing_username = true;
|
||||||
|
// Just give the user a random name. He won't be able to register with it anyway.
|
||||||
|
utils::random_string(GUEST_NAME_LENGTH)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
.to_lowercase(),
|
||||||
|
db.globals.server_name(),
|
||||||
)
|
)
|
||||||
.ok()
|
.ok()
|
||||||
.filter(|user_id| {
|
.filter(|user_id| !user_id.is_historical() && user_id.server_name() == db.globals.server_name())
|
||||||
!user_id.is_historical()
|
|
||||||
&& user_id.server_name() == services().globals.server_name()
|
|
||||||
})
|
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::InvalidUsername,
|
ErrorKind::InvalidUsername,
|
||||||
"Username is invalid.",
|
"Username is invalid.",
|
||||||
))?;
|
))?;
|
||||||
if services().users.exists(&proposed_user_id)? {
|
|
||||||
|
// Check if username is creative enough
|
||||||
|
if db.users.exists(&user_id)? {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::UserInUse,
|
ErrorKind::UserInUse,
|
||||||
"Desired user ID is already taken.",
|
"Desired user ID is already taken.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
proposed_user_id
|
|
||||||
}
|
|
||||||
_ => loop {
|
|
||||||
let proposed_user_id = UserId::parse_with_server_name(
|
|
||||||
utils::random_string(RANDOM_USER_ID_LENGTH).to_lowercase(),
|
|
||||||
services().globals.server_name(),
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
if !services().users.exists(&proposed_user_id)? {
|
|
||||||
break proposed_user_id;
|
|
||||||
}
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
// UIAA
|
// UIAA
|
||||||
let mut uiaainfo = UiaaInfo {
|
let mut uiaainfo = UiaaInfo {
|
||||||
flows: vec![AuthFlow {
|
flows: vec![AuthFlow {
|
||||||
stages: if services().globals.config.registration_token.is_some() {
|
stages: vec![AuthType::Dummy],
|
||||||
vec![AuthType::RegistrationToken]
|
|
||||||
} else {
|
|
||||||
vec![AuthType::Dummy]
|
|
||||||
},
|
|
||||||
}],
|
}],
|
||||||
completed: Vec::new(),
|
completed: Vec::new(),
|
||||||
params: Default::default(),
|
params: Default::default(),
|
||||||
|
@ -136,14 +152,16 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
|
||||||
auth_error: None,
|
auth_error: None,
|
||||||
};
|
};
|
||||||
|
|
||||||
if !body.from_appservice && !is_guest {
|
if !body.from_appservice {
|
||||||
if let Some(auth) = &body.auth {
|
if let Some(auth) = &body.auth {
|
||||||
let (worked, uiaainfo) = services().uiaa.try_auth(
|
let (worked, uiaainfo) = db.uiaa.try_auth(
|
||||||
&UserId::parse_with_server_name("", services().globals.server_name())
|
&UserId::parse_with_server_name("", db.globals.server_name())
|
||||||
.expect("we know this is valid"),
|
.expect("we know this is valid"),
|
||||||
"".into(),
|
"".into(),
|
||||||
auth,
|
auth,
|
||||||
&uiaainfo,
|
&uiaainfo,
|
||||||
|
&db.users,
|
||||||
|
&db.globals,
|
||||||
)?;
|
)?;
|
||||||
if !worked {
|
if !worked {
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
|
@ -151,8 +169,8 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
|
||||||
// Success!
|
// Success!
|
||||||
} else if let Some(json) = body.json_body {
|
} else if let Some(json) = body.json_body {
|
||||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||||
services().uiaa.create(
|
db.uiaa.create(
|
||||||
&UserId::parse_with_server_name("", services().globals.server_name())
|
&UserId::parse_with_server_name("", db.globals.server_name())
|
||||||
.expect("we know this is valid"),
|
.expect("we know this is valid"),
|
||||||
"".into(),
|
"".into(),
|
||||||
&uiaainfo,
|
&uiaainfo,
|
||||||
|
@ -164,6 +182,13 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if missing_username {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::MissingParam,
|
||||||
|
"Missing username field.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
let password = if is_guest {
|
let password = if is_guest {
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
|
@ -171,42 +196,34 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
|
||||||
};
|
};
|
||||||
|
|
||||||
// Create user
|
// Create user
|
||||||
services().users.create(&user_id, password)?;
|
db.users.create(&user_id, password)?;
|
||||||
|
|
||||||
// Default to pretty displayname
|
// Default to pretty displayname
|
||||||
let mut displayname = user_id.localpart().to_owned();
|
let displayname = format!("{} ⚡️", user_id.localpart());
|
||||||
|
db.users
|
||||||
// If enabled append lightning bolt to display name (default true)
|
|
||||||
if services().globals.enable_lightning_bolt() {
|
|
||||||
displayname.push_str(" ⚡️");
|
|
||||||
}
|
|
||||||
|
|
||||||
services()
|
|
||||||
.users
|
|
||||||
.set_displayname(&user_id, Some(displayname.clone()))?;
|
.set_displayname(&user_id, Some(displayname.clone()))?;
|
||||||
|
|
||||||
// Initial account data
|
// Initial account data
|
||||||
services().account_data.update(
|
db.account_data.update(
|
||||||
None,
|
None,
|
||||||
&user_id,
|
&user_id,
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
EventType::PushRules,
|
||||||
&serde_json::to_value(ruma::events::push_rules::PushRulesEvent {
|
&ruma::events::push_rules::PushRulesEvent {
|
||||||
content: ruma::events::push_rules::PushRulesEventContent {
|
content: ruma::events::push_rules::PushRulesEventContent {
|
||||||
global: push::Ruleset::server_default(&user_id),
|
global: push::Ruleset::server_default(&user_id),
|
||||||
},
|
},
|
||||||
})
|
},
|
||||||
.expect("to json always works"),
|
&db.globals,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// Inhibit login does not work for guests
|
// Inhibit login does not work for guests
|
||||||
if !is_guest && body.inhibit_login {
|
if !is_guest && body.inhibit_login {
|
||||||
return Ok(register::v3::Response {
|
return Ok(register::Response {
|
||||||
access_token: None,
|
access_token: None,
|
||||||
user_id,
|
user_id,
|
||||||
device_id: None,
|
device_id: None,
|
||||||
refresh_token: None,
|
}
|
||||||
expires_in: None,
|
.into());
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate new device id if the user didn't specify one
|
// Generate new device id if the user didn't specify one
|
||||||
|
@ -221,40 +238,31 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
|
||||||
let token = utils::random_string(TOKEN_LENGTH);
|
let token = utils::random_string(TOKEN_LENGTH);
|
||||||
|
|
||||||
// Create device for this account
|
// Create device for this account
|
||||||
services().users.create_device(
|
db.users.create_device(
|
||||||
&user_id,
|
&user_id,
|
||||||
&device_id,
|
&device_id,
|
||||||
&token,
|
&token,
|
||||||
body.initial_device_display_name.clone(),
|
body.initial_device_display_name.clone(),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
info!("New user {} registered on this server.", user_id);
|
info!("{} registered on this server", user_id);
|
||||||
if !body.from_appservice && !is_guest {
|
|
||||||
services()
|
|
||||||
.admin
|
|
||||||
.send_message(RoomMessageEventContent::notice_plain(format!(
|
|
||||||
"New user {user_id} registered on this server."
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
// If this is the first real user, grant them admin privileges
|
// If this is the first real user, grant them admin privileges
|
||||||
// Note: the server user, @conduit:servername, is generated first
|
// Note: the server user, @conduit:servername, is generated first
|
||||||
if services().users.count()? == 2 {
|
if db.users.count()? == 2 {
|
||||||
services()
|
make_user_admin(&db, &user_id, displayname).await?;
|
||||||
.admin
|
|
||||||
.make_user_admin(&user_id, displayname)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
warn!("Granting {} admin privileges as the first user", user_id);
|
warn!("Granting {} admin privileges as the first user", user_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(register::v3::Response {
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(register::Response {
|
||||||
access_token: Some(token),
|
access_token: Some(token),
|
||||||
user_id,
|
user_id,
|
||||||
device_id: Some(device_id),
|
device_id: Some(device_id),
|
||||||
refresh_token: None,
|
}
|
||||||
expires_in: None,
|
.into())
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/account/password`
|
/// # `POST /_matrix/client/r0/account/password`
|
||||||
|
@ -271,9 +279,15 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
|
||||||
/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
|
/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
|
||||||
/// - Forgets to-device events
|
/// - Forgets to-device events
|
||||||
/// - Triggers device list updates
|
/// - Triggers device list updates
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
post("/_matrix/client/r0/account/password", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn change_password_route(
|
pub async fn change_password_route(
|
||||||
body: Ruma<change_password::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<change_password::v3::Response> {
|
body: Ruma<change_password::Request<'_>>,
|
||||||
|
) -> ConduitResult<change_password::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
@ -288,48 +302,45 @@ pub async fn change_password_route(
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(auth) = &body.auth {
|
if let Some(auth) = &body.auth {
|
||||||
let (worked, uiaainfo) =
|
let (worked, uiaainfo) = db.uiaa.try_auth(
|
||||||
services()
|
sender_user,
|
||||||
.uiaa
|
sender_device,
|
||||||
.try_auth(sender_user, sender_device, auth, &uiaainfo)?;
|
auth,
|
||||||
|
&uiaainfo,
|
||||||
|
&db.users,
|
||||||
|
&db.globals,
|
||||||
|
)?;
|
||||||
if !worked {
|
if !worked {
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
}
|
}
|
||||||
// Success!
|
// Success!
|
||||||
} else if let Some(json) = body.json_body {
|
} else if let Some(json) = body.json_body {
|
||||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||||
services()
|
db.uiaa
|
||||||
.uiaa
|
|
||||||
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
} else {
|
} else {
|
||||||
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
||||||
}
|
}
|
||||||
|
|
||||||
services()
|
db.users
|
||||||
.users
|
|
||||||
.set_password(sender_user, Some(&body.new_password))?;
|
.set_password(sender_user, Some(&body.new_password))?;
|
||||||
|
|
||||||
if body.logout_devices {
|
if body.logout_devices {
|
||||||
// Logout all devices except the current one
|
// Logout all devices except the current one
|
||||||
for id in services()
|
for id in db
|
||||||
.users
|
.users
|
||||||
.all_device_ids(sender_user)
|
.all_device_ids(sender_user)
|
||||||
.filter_map(|id| id.ok())
|
.filter_map(|id| id.ok())
|
||||||
.filter(|id| id != sender_device)
|
.filter(|id| id != sender_device)
|
||||||
{
|
{
|
||||||
services().users.remove_device(sender_user, &id)?;
|
db.users.remove_device(sender_user, &id)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
info!("User {} changed their password.", sender_user);
|
db.flush()?;
|
||||||
services()
|
|
||||||
.admin
|
|
||||||
.send_message(RoomMessageEventContent::notice_plain(format!(
|
|
||||||
"User {sender_user} changed their password."
|
|
||||||
)));
|
|
||||||
|
|
||||||
Ok(change_password::v3::Response {})
|
Ok(change_password::Response {}.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `GET _matrix/client/r0/account/whoami`
|
/// # `GET _matrix/client/r0/account/whoami`
|
||||||
|
@ -337,15 +348,17 @@ pub async fn change_password_route(
|
||||||
/// Get user_id of the sender user.
|
/// Get user_id of the sender user.
|
||||||
///
|
///
|
||||||
/// Note: Also works for Application Services
|
/// Note: Also works for Application Services
|
||||||
pub async fn whoami_route(body: Ruma<whoami::v3::Request>) -> Result<whoami::v3::Response> {
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/account/whoami", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(body))]
|
||||||
|
pub async fn whoami_route(body: Ruma<whoami::Request>) -> ConduitResult<whoami::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let device_id = body.sender_device.as_ref().cloned();
|
Ok(whoami::Response {
|
||||||
|
|
||||||
Ok(whoami::v3::Response {
|
|
||||||
user_id: sender_user.clone(),
|
user_id: sender_user.clone(),
|
||||||
device_id,
|
}
|
||||||
is_guest: services().users.is_deactivated(sender_user)? && !body.from_appservice,
|
.into())
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/account/deactivate`
|
/// # `POST /_matrix/client/r0/account/deactivate`
|
||||||
|
@ -358,9 +371,15 @@ pub async fn whoami_route(body: Ruma<whoami::v3::Request>) -> Result<whoami::v3:
|
||||||
/// - Forgets all to-device events
|
/// - Forgets all to-device events
|
||||||
/// - Triggers device list updates
|
/// - Triggers device list updates
|
||||||
/// - Removes ability to log in again
|
/// - Removes ability to log in again
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
post("/_matrix/client/r0/account/deactivate", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn deactivate_route(
|
pub async fn deactivate_route(
|
||||||
body: Ruma<deactivate::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<deactivate::v3::Response> {
|
body: Ruma<deactivate::Request<'_>>,
|
||||||
|
) -> ConduitResult<deactivate::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
@ -375,79 +394,103 @@ pub async fn deactivate_route(
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(auth) = &body.auth {
|
if let Some(auth) = &body.auth {
|
||||||
let (worked, uiaainfo) =
|
let (worked, uiaainfo) = db.uiaa.try_auth(
|
||||||
services()
|
sender_user,
|
||||||
.uiaa
|
sender_device,
|
||||||
.try_auth(sender_user, sender_device, auth, &uiaainfo)?;
|
auth,
|
||||||
|
&uiaainfo,
|
||||||
|
&db.users,
|
||||||
|
&db.globals,
|
||||||
|
)?;
|
||||||
if !worked {
|
if !worked {
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
}
|
}
|
||||||
// Success!
|
// Success!
|
||||||
} else if let Some(json) = body.json_body {
|
} else if let Some(json) = body.json_body {
|
||||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||||
services()
|
db.uiaa
|
||||||
.uiaa
|
|
||||||
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
} else {
|
} else {
|
||||||
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make the user leave all rooms before deactivation
|
// Leave all joined rooms and reject all invitations
|
||||||
client_server::leave_all_rooms(sender_user).await?;
|
// TODO: work over federation invites
|
||||||
|
let all_rooms = db
|
||||||
|
.rooms
|
||||||
|
.rooms_joined(sender_user)
|
||||||
|
.chain(
|
||||||
|
db.rooms
|
||||||
|
.rooms_invited(sender_user)
|
||||||
|
.map(|t| t.map(|(r, _)| r)),
|
||||||
|
)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
// Remove devices and mark account as deactivated
|
for room_id in all_rooms {
|
||||||
services().users.deactivate_account(sender_user)?;
|
let room_id = room_id?;
|
||||||
|
let event = RoomMemberEventContent {
|
||||||
|
membership: MembershipState::Leave,
|
||||||
|
displayname: None,
|
||||||
|
avatar_url: None,
|
||||||
|
is_direct: None,
|
||||||
|
third_party_invite: None,
|
||||||
|
blurhash: None,
|
||||||
|
reason: None,
|
||||||
|
join_authorized_via_users_server: None,
|
||||||
|
};
|
||||||
|
|
||||||
info!("User {} deactivated their account.", sender_user);
|
let mutex_state = Arc::clone(
|
||||||
services()
|
db.globals
|
||||||
.admin
|
.roomid_mutex_state
|
||||||
.send_message(RoomMessageEventContent::notice_plain(format!(
|
.write()
|
||||||
"User {sender_user} deactivated their account."
|
.unwrap()
|
||||||
)));
|
.entry(room_id.clone())
|
||||||
|
.or_default(),
|
||||||
|
);
|
||||||
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
Ok(deactivate::v3::Response {
|
db.rooms.build_and_append_pdu(
|
||||||
id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport,
|
PduBuilder {
|
||||||
})
|
event_type: EventType::RoomMember,
|
||||||
|
content: to_raw_value(&event).expect("event is valid, we just created it"),
|
||||||
|
unsigned: None,
|
||||||
|
state_key: Some(sender_user.to_string()),
|
||||||
|
redacts: None,
|
||||||
|
},
|
||||||
|
sender_user,
|
||||||
|
&room_id,
|
||||||
|
&db,
|
||||||
|
&state_lock,
|
||||||
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `GET _matrix/client/v3/account/3pid`
|
// Remove devices and mark account as deactivated
|
||||||
|
db.users.deactivate_account(sender_user)?;
|
||||||
|
|
||||||
|
info!("{} deactivated their account", sender_user);
|
||||||
|
|
||||||
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(deactivate::Response {
|
||||||
|
id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport,
|
||||||
|
}
|
||||||
|
.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET _matrix/client/r0/account/3pid`
|
||||||
///
|
///
|
||||||
/// Get a list of third party identifiers associated with this account.
|
/// Get a list of third party identifiers associated with this account.
|
||||||
///
|
///
|
||||||
/// - Currently always returns empty list
|
/// - Currently always returns empty list
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/account/3pid", data = "<body>")
|
||||||
|
)]
|
||||||
pub async fn third_party_route(
|
pub async fn third_party_route(
|
||||||
body: Ruma<get_3pids::v3::Request>,
|
body: Ruma<get_3pids::Request>,
|
||||||
) -> Result<get_3pids::v3::Response> {
|
) -> ConduitResult<get_3pids::Response> {
|
||||||
let _sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let _sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
Ok(get_3pids::v3::Response::new(Vec::new()))
|
Ok(get_3pids::Response::new(Vec::new()).into())
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/v3/account/3pid/email/requestToken`
|
|
||||||
///
|
|
||||||
/// "This API should be used to request validation tokens when adding an email address to an account"
|
|
||||||
///
|
|
||||||
/// - 403 signals that The homeserver does not allow the third party identifier as a contact option.
|
|
||||||
pub async fn request_3pid_management_token_via_email_route(
|
|
||||||
_body: Ruma<request_3pid_management_token_via_email::v3::Request>,
|
|
||||||
) -> Result<request_3pid_management_token_via_email::v3::Response> {
|
|
||||||
Err(Error::BadRequest(
|
|
||||||
ErrorKind::ThreepidDenied,
|
|
||||||
"Third party identifier is not allowed",
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/v3/account/3pid/msisdn/requestToken`
|
|
||||||
///
|
|
||||||
/// "This API should be used to request validation tokens when adding an phone number to an account"
|
|
||||||
///
|
|
||||||
/// - 403 signals that The homeserver does not allow the third party identifier as a contact option.
|
|
||||||
pub async fn request_3pid_management_token_via_msisdn_route(
|
|
||||||
_body: Ruma<request_3pid_management_token_via_msisdn::v3::Request>,
|
|
||||||
) -> Result<request_3pid_management_token_via_msisdn::v3::Response> {
|
|
||||||
Err(Error::BadRequest(
|
|
||||||
ErrorKind::ThreepidDenied,
|
|
||||||
"Third party identifier is not allowed",
|
|
||||||
))
|
|
||||||
}
|
}
|
|
@ -1,46 +1,49 @@
|
||||||
use crate::{services, Error, Result, Ruma};
|
use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Ruma};
|
||||||
use rand::seq::SliceRandom;
|
|
||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{
|
api::{
|
||||||
appservice,
|
appservice,
|
||||||
client::{
|
client::{
|
||||||
alias::{create_alias, delete_alias, get_alias},
|
|
||||||
error::ErrorKind,
|
error::ErrorKind,
|
||||||
|
r0::alias::{create_alias, delete_alias, get_alias},
|
||||||
},
|
},
|
||||||
federation,
|
federation,
|
||||||
},
|
},
|
||||||
OwnedRoomAliasId,
|
RoomAliasId,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
use rocket::{delete, get, put};
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/directory/room/{roomAlias}`
|
/// # `PUT /_matrix/client/r0/directory/room/{roomAlias}`
|
||||||
///
|
///
|
||||||
/// Creates a new room alias on this server.
|
/// Creates a new room alias on this server.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
put("/_matrix/client/r0/directory/room/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn create_alias_route(
|
pub async fn create_alias_route(
|
||||||
body: Ruma<create_alias::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<create_alias::v3::Response> {
|
body: Ruma<create_alias::Request<'_>>,
|
||||||
if body.room_alias.server_name() != services().globals.server_name() {
|
) -> ConduitResult<create_alias::Response> {
|
||||||
|
if body.room_alias.server_name() != db.globals.server_name() {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
"Alias is from another server.",
|
"Alias is from another server.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if services()
|
if db.rooms.id_from_alias(&body.room_alias)?.is_some() {
|
||||||
.rooms
|
|
||||||
.alias
|
|
||||||
.resolve_local_alias(&body.room_alias)?
|
|
||||||
.is_some()
|
|
||||||
{
|
|
||||||
return Err(Error::Conflict("Alias already exists."));
|
return Err(Error::Conflict("Alias already exists."));
|
||||||
}
|
}
|
||||||
|
|
||||||
services()
|
db.rooms
|
||||||
.rooms
|
.set_alias(&body.room_alias, Some(&body.room_id), &db.globals)?;
|
||||||
.alias
|
|
||||||
.set_alias(&body.room_alias, &body.room_id)?;
|
|
||||||
|
|
||||||
Ok(create_alias::v3::Response::new())
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(create_alias::Response::new().into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `DELETE /_matrix/client/r0/directory/room/{roomAlias}`
|
/// # `DELETE /_matrix/client/r0/directory/room/{roomAlias}`
|
||||||
|
@ -49,21 +52,29 @@ pub async fn create_alias_route(
|
||||||
///
|
///
|
||||||
/// - TODO: additional access control checks
|
/// - TODO: additional access control checks
|
||||||
/// - TODO: Update canonical alias event
|
/// - TODO: Update canonical alias event
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
delete("/_matrix/client/r0/directory/room/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn delete_alias_route(
|
pub async fn delete_alias_route(
|
||||||
body: Ruma<delete_alias::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<delete_alias::v3::Response> {
|
body: Ruma<delete_alias::Request<'_>>,
|
||||||
if body.room_alias.server_name() != services().globals.server_name() {
|
) -> ConduitResult<delete_alias::Response> {
|
||||||
|
if body.room_alias.server_name() != db.globals.server_name() {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
"Alias is from another server.",
|
"Alias is from another server.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
services().rooms.alias.remove_alias(&body.room_alias)?;
|
db.rooms.set_alias(&body.room_alias, None, &db.globals)?;
|
||||||
|
|
||||||
// TODO: update alt_aliases?
|
// TODO: update alt_aliases?
|
||||||
|
|
||||||
Ok(delete_alias::v3::Response::new())
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(delete_alias::Response::new().into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/directory/room/{roomAlias}`
|
/// # `GET /_matrix/client/r0/directory/room/{roomAlias}`
|
||||||
|
@ -71,37 +82,40 @@ pub async fn delete_alias_route(
|
||||||
/// Resolve an alias locally or over federation.
|
/// Resolve an alias locally or over federation.
|
||||||
///
|
///
|
||||||
/// - TODO: Suggest more servers to join via
|
/// - TODO: Suggest more servers to join via
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/directory/room/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn get_alias_route(
|
pub async fn get_alias_route(
|
||||||
body: Ruma<get_alias::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<get_alias::v3::Response> {
|
body: Ruma<get_alias::Request<'_>>,
|
||||||
get_alias_helper(body.body.room_alias).await
|
) -> ConduitResult<get_alias::Response> {
|
||||||
|
get_alias_helper(&db, &body.room_alias).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn get_alias_helper(
|
pub(crate) async fn get_alias_helper(
|
||||||
room_alias: OwnedRoomAliasId,
|
db: &Database,
|
||||||
) -> Result<get_alias::v3::Response> {
|
room_alias: &RoomAliasId,
|
||||||
if room_alias.server_name() != services().globals.server_name() {
|
) -> ConduitResult<get_alias::Response> {
|
||||||
let response = services()
|
if room_alias.server_name() != db.globals.server_name() {
|
||||||
|
let response = db
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(
|
.send_federation_request(
|
||||||
|
&db.globals,
|
||||||
room_alias.server_name(),
|
room_alias.server_name(),
|
||||||
federation::query::get_room_information::v1::Request {
|
federation::query::get_room_information::v1::Request { room_alias },
|
||||||
room_alias: room_alias.to_owned(),
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let mut servers = response.servers;
|
return Ok(get_alias::Response::new(response.room_id, response.servers).into());
|
||||||
servers.shuffle(&mut rand::thread_rng());
|
|
||||||
|
|
||||||
return Ok(get_alias::v3::Response::new(response.room_id, servers));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut room_id = None;
|
let mut room_id = None;
|
||||||
match services().rooms.alias.resolve_local_alias(&room_alias)? {
|
match db.rooms.id_from_alias(room_alias)? {
|
||||||
Some(r) => room_id = Some(r),
|
Some(r) => room_id = Some(r),
|
||||||
None => {
|
None => {
|
||||||
for (_id, registration) in services().appservice.all()? {
|
for (_id, registration) in db.appservice.all()? {
|
||||||
let aliases = registration
|
let aliases = registration
|
||||||
.get("namespaces")
|
.get("namespaces")
|
||||||
.and_then(|ns| ns.get("aliases"))
|
.and_then(|ns| ns.get("aliases"))
|
||||||
|
@ -116,26 +130,19 @@ pub(crate) async fn get_alias_helper(
|
||||||
if aliases
|
if aliases
|
||||||
.iter()
|
.iter()
|
||||||
.any(|aliases| aliases.is_match(room_alias.as_str()))
|
.any(|aliases| aliases.is_match(room_alias.as_str()))
|
||||||
&& services()
|
&& db
|
||||||
.sending
|
.sending
|
||||||
.send_appservice_request(
|
.send_appservice_request(
|
||||||
|
&db.globals,
|
||||||
registration,
|
registration,
|
||||||
appservice::query::query_room_alias::v1::Request {
|
appservice::query::query_room_alias::v1::Request { room_alias },
|
||||||
room_alias: room_alias.clone(),
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.is_ok()
|
.is_ok()
|
||||||
{
|
{
|
||||||
room_id = Some(
|
room_id = Some(db.rooms.id_from_alias(room_alias)?.ok_or_else(|| {
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.alias
|
|
||||||
.resolve_local_alias(&room_alias)?
|
|
||||||
.ok_or_else(|| {
|
|
||||||
Error::bad_config("Appservice lied to us. Room does not exist.")
|
Error::bad_config("Appservice lied to us. Room does not exist.")
|
||||||
})?,
|
})?);
|
||||||
);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -152,8 +159,5 @@ pub(crate) async fn get_alias_helper(
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(get_alias::v3::Response::new(
|
Ok(get_alias::Response::new(room_id, vec![db.globals.server_name().to_owned()]).into())
|
||||||
room_id,
|
|
||||||
vec![services().globals.server_name().to_owned()],
|
|
||||||
))
|
|
||||||
}
|
}
|
432
src/client_server/backup.rs
Normal file
432
src/client_server/backup.rs
Normal file
|
@ -0,0 +1,432 @@
|
||||||
|
use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma};
|
||||||
|
use ruma::api::client::{
|
||||||
|
error::ErrorKind,
|
||||||
|
r0::backup::{
|
||||||
|
add_backup_key_session, add_backup_key_sessions, add_backup_keys, create_backup,
|
||||||
|
delete_backup, delete_backup_key_session, delete_backup_key_sessions, delete_backup_keys,
|
||||||
|
get_backup, get_backup_key_session, get_backup_key_sessions, get_backup_keys,
|
||||||
|
get_latest_backup, update_backup,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
use rocket::{delete, get, post, put};
|
||||||
|
|
||||||
|
/// # `POST /_matrix/client/r0/room_keys/version`
|
||||||
|
///
|
||||||
|
/// Creates a new backup.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
post("/_matrix/client/unstable/room_keys/version", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn create_backup_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<create_backup::Request>,
|
||||||
|
) -> ConduitResult<create_backup::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
let version = db
|
||||||
|
.key_backups
|
||||||
|
.create_backup(sender_user, &body.algorithm, &db.globals)?;
|
||||||
|
|
||||||
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(create_backup::Response { version }.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `PUT /_matrix/client/r0/room_keys/version/{version}`
|
||||||
|
///
|
||||||
|
/// Update information about an existing backup. Only `auth_data` can be modified.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
put("/_matrix/client/unstable/room_keys/version/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn update_backup_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<update_backup::Request<'_>>,
|
||||||
|
) -> ConduitResult<update_backup::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
db.key_backups
|
||||||
|
.update_backup(sender_user, &body.version, &body.algorithm, &db.globals)?;
|
||||||
|
|
||||||
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(update_backup::Response {}.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/room_keys/version`
|
||||||
|
///
|
||||||
|
/// Get information about the latest backup version.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/unstable/room_keys/version", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn get_latest_backup_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<get_latest_backup::Request>,
|
||||||
|
) -> ConduitResult<get_latest_backup::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let (version, algorithm) =
|
||||||
|
db.key_backups
|
||||||
|
.get_latest_backup(sender_user)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"Key backup does not exist.",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
Ok(get_latest_backup::Response {
|
||||||
|
algorithm,
|
||||||
|
count: (db.key_backups.count_keys(sender_user, &version)? as u32).into(),
|
||||||
|
etag: db.key_backups.get_etag(sender_user, &version)?,
|
||||||
|
version,
|
||||||
|
}
|
||||||
|
.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/room_keys/version`
|
||||||
|
///
|
||||||
|
/// Get information about an existing backup.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/unstable/room_keys/version/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn get_backup_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<get_backup::Request<'_>>,
|
||||||
|
) -> ConduitResult<get_backup::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
let algorithm = db
|
||||||
|
.key_backups
|
||||||
|
.get_backup(sender_user, &body.version)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"Key backup does not exist.",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
Ok(get_backup::Response {
|
||||||
|
algorithm,
|
||||||
|
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
|
||||||
|
etag: db.key_backups.get_etag(sender_user, &body.version)?,
|
||||||
|
version: body.version.to_owned(),
|
||||||
|
}
|
||||||
|
.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `DELETE /_matrix/client/r0/room_keys/version/{version}`
|
||||||
|
///
|
||||||
|
/// Delete an existing key backup.
|
||||||
|
///
|
||||||
|
/// - Deletes both information about the backup, as well as all key data related to the backup
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
delete("/_matrix/client/unstable/room_keys/version/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn delete_backup_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<delete_backup::Request<'_>>,
|
||||||
|
) -> ConduitResult<delete_backup::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
db.key_backups.delete_backup(sender_user, &body.version)?;
|
||||||
|
|
||||||
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(delete_backup::Response {}.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `PUT /_matrix/client/r0/room_keys/keys`
|
||||||
|
///
|
||||||
|
/// Add the received backup keys to the database.
|
||||||
|
///
|
||||||
|
/// - Only manipulating the most recently created version of the backup is allowed
|
||||||
|
/// - Adds the keys to the backup
|
||||||
|
/// - Returns the new number of keys in this backup and the etag
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
put("/_matrix/client/unstable/room_keys/keys", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn add_backup_keys_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<add_backup_keys::Request<'_>>,
|
||||||
|
) -> ConduitResult<add_backup_keys::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
if Some(&body.version)
|
||||||
|
!= db
|
||||||
|
.key_backups
|
||||||
|
.get_latest_backup_version(sender_user)?
|
||||||
|
.as_ref()
|
||||||
|
{
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"You may only manipulate the most recently created version of the backup.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
for (room_id, room) in &body.rooms {
|
||||||
|
for (session_id, key_data) in &room.sessions {
|
||||||
|
db.key_backups.add_key(
|
||||||
|
sender_user,
|
||||||
|
&body.version,
|
||||||
|
room_id,
|
||||||
|
session_id,
|
||||||
|
key_data,
|
||||||
|
&db.globals,
|
||||||
|
)?
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(add_backup_keys::Response {
|
||||||
|
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
|
||||||
|
etag: db.key_backups.get_etag(sender_user, &body.version)?,
|
||||||
|
}
|
||||||
|
.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}`
|
||||||
|
///
|
||||||
|
/// Add the received backup keys to the database.
|
||||||
|
///
|
||||||
|
/// - Only manipulating the most recently created version of the backup is allowed
|
||||||
|
/// - Adds the keys to the backup
|
||||||
|
/// - Returns the new number of keys in this backup and the etag
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
put("/_matrix/client/unstable/room_keys/keys/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn add_backup_key_sessions_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<add_backup_key_sessions::Request<'_>>,
|
||||||
|
) -> ConduitResult<add_backup_key_sessions::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
if Some(&body.version)
|
||||||
|
!= db
|
||||||
|
.key_backups
|
||||||
|
.get_latest_backup_version(sender_user)?
|
||||||
|
.as_ref()
|
||||||
|
{
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"You may only manipulate the most recently created version of the backup.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
for (session_id, key_data) in &body.sessions {
|
||||||
|
db.key_backups.add_key(
|
||||||
|
sender_user,
|
||||||
|
&body.version,
|
||||||
|
&body.room_id,
|
||||||
|
session_id,
|
||||||
|
key_data,
|
||||||
|
&db.globals,
|
||||||
|
)?
|
||||||
|
}
|
||||||
|
|
||||||
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(add_backup_key_sessions::Response {
|
||||||
|
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
|
||||||
|
etag: db.key_backups.get_etag(sender_user, &body.version)?,
|
||||||
|
}
|
||||||
|
.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
|
||||||
|
///
|
||||||
|
/// Add the received backup key to the database.
|
||||||
|
///
|
||||||
|
/// - Only manipulating the most recently created version of the backup is allowed
|
||||||
|
/// - Adds the keys to the backup
|
||||||
|
/// - Returns the new number of keys in this backup and the etag
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
put("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn add_backup_key_session_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<add_backup_key_session::Request<'_>>,
|
||||||
|
) -> ConduitResult<add_backup_key_session::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
if Some(&body.version)
|
||||||
|
!= db
|
||||||
|
.key_backups
|
||||||
|
.get_latest_backup_version(sender_user)?
|
||||||
|
.as_ref()
|
||||||
|
{
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"You may only manipulate the most recently created version of the backup.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
db.key_backups.add_key(
|
||||||
|
sender_user,
|
||||||
|
&body.version,
|
||||||
|
&body.room_id,
|
||||||
|
&body.session_id,
|
||||||
|
&body.session_data,
|
||||||
|
&db.globals,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(add_backup_key_session::Response {
|
||||||
|
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
|
||||||
|
etag: db.key_backups.get_etag(sender_user, &body.version)?,
|
||||||
|
}
|
||||||
|
.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/room_keys/keys`
|
||||||
|
///
|
||||||
|
/// Retrieves all keys from the backup.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/unstable/room_keys/keys", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn get_backup_keys_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<get_backup_keys::Request<'_>>,
|
||||||
|
) -> ConduitResult<get_backup_keys::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let rooms = db.key_backups.get_all(sender_user, &body.version)?;
|
||||||
|
|
||||||
|
Ok(get_backup_keys::Response { rooms }.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/room_keys/keys/{roomId}`
|
||||||
|
///
|
||||||
|
/// Retrieves all keys from the backup for a given room.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/unstable/room_keys/keys/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn get_backup_key_sessions_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<get_backup_key_sessions::Request<'_>>,
|
||||||
|
) -> ConduitResult<get_backup_key_sessions::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let sessions = db
|
||||||
|
.key_backups
|
||||||
|
.get_room(sender_user, &body.version, &body.room_id)?;
|
||||||
|
|
||||||
|
Ok(get_backup_key_sessions::Response { sessions }.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
|
||||||
|
///
|
||||||
|
/// Retrieves a key from the backup.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn get_backup_key_session_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<get_backup_key_session::Request<'_>>,
|
||||||
|
) -> ConduitResult<get_backup_key_session::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let key_data = db
|
||||||
|
.key_backups
|
||||||
|
.get_session(sender_user, &body.version, &body.room_id, &body.session_id)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"Backup key not found for this user's session.",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
Ok(get_backup_key_session::Response { key_data }.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `DELETE /_matrix/client/r0/room_keys/keys`
|
||||||
|
///
|
||||||
|
/// Delete the keys from the backup.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
delete("/_matrix/client/unstable/room_keys/keys", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn delete_backup_keys_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<delete_backup_keys::Request<'_>>,
|
||||||
|
) -> ConduitResult<delete_backup_keys::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
db.key_backups.delete_all_keys(sender_user, &body.version)?;
|
||||||
|
|
||||||
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(delete_backup_keys::Response {
|
||||||
|
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
|
||||||
|
etag: db.key_backups.get_etag(sender_user, &body.version)?,
|
||||||
|
}
|
||||||
|
.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}`
|
||||||
|
///
|
||||||
|
/// Delete the keys from the backup for a given room.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
delete("/_matrix/client/unstable/room_keys/keys/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn delete_backup_key_sessions_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<delete_backup_key_sessions::Request<'_>>,
|
||||||
|
) -> ConduitResult<delete_backup_key_sessions::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
db.key_backups
|
||||||
|
.delete_room_keys(sender_user, &body.version, &body.room_id)?;
|
||||||
|
|
||||||
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(delete_backup_key_sessions::Response {
|
||||||
|
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
|
||||||
|
etag: db.key_backups.get_etag(sender_user, &body.version)?,
|
||||||
|
}
|
||||||
|
.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
|
||||||
|
///
|
||||||
|
/// Delete a key from the backup.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
delete("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn delete_backup_key_session_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<delete_backup_key_session::Request<'_>>,
|
||||||
|
) -> ConduitResult<delete_backup_key_session::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
db.key_backups
|
||||||
|
.delete_room_key(sender_user, &body.version, &body.room_id, &body.session_id)?;
|
||||||
|
|
||||||
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(delete_backup_key_session::Response {
|
||||||
|
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
|
||||||
|
etag: db.key_backups.get_etag(sender_user, &body.version)?,
|
||||||
|
}
|
||||||
|
.into())
|
||||||
|
}
|
35
src/client_server/capabilities.rs
Normal file
35
src/client_server/capabilities.rs
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
use crate::{ConduitResult, Ruma};
|
||||||
|
use ruma::{
|
||||||
|
api::client::r0::capabilities::{
|
||||||
|
get_capabilities, Capabilities, RoomVersionStability, RoomVersionsCapability,
|
||||||
|
},
|
||||||
|
RoomVersionId,
|
||||||
|
};
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
use rocket::get;
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/capabilities`
|
||||||
|
///
|
||||||
|
/// Get information on the supported feature set and other relevent capabilities of this server.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/capabilities", data = "<_body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(_body))]
|
||||||
|
pub async fn get_capabilities_route(
|
||||||
|
_body: Ruma<get_capabilities::Request>,
|
||||||
|
) -> ConduitResult<get_capabilities::Response> {
|
||||||
|
let mut available = BTreeMap::new();
|
||||||
|
available.insert(RoomVersionId::V5, RoomVersionStability::Stable);
|
||||||
|
available.insert(RoomVersionId::V6, RoomVersionStability::Stable);
|
||||||
|
|
||||||
|
let mut capabilities = Capabilities::new();
|
||||||
|
capabilities.room_versions = RoomVersionsCapability {
|
||||||
|
default: RoomVersionId::V6,
|
||||||
|
available,
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(get_capabilities::Response { capabilities }.into())
|
||||||
|
}
|
|
@ -1,11 +1,11 @@
|
||||||
use crate::{services, Error, Result, Ruma};
|
use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{
|
api::client::{
|
||||||
config::{
|
error::ErrorKind,
|
||||||
|
r0::config::{
|
||||||
get_global_account_data, get_room_account_data, set_global_account_data,
|
get_global_account_data, get_room_account_data, set_global_account_data,
|
||||||
set_room_account_data,
|
set_room_account_data,
|
||||||
},
|
},
|
||||||
error::ErrorKind,
|
|
||||||
},
|
},
|
||||||
events::{AnyGlobalAccountDataEventContent, AnyRoomAccountDataEventContent},
|
events::{AnyGlobalAccountDataEventContent, AnyRoomAccountDataEventContent},
|
||||||
serde::Raw,
|
serde::Raw,
|
||||||
|
@ -13,20 +13,29 @@ use ruma::{
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use serde_json::{json, value::RawValue as RawJsonValue};
|
use serde_json::{json, value::RawValue as RawJsonValue};
|
||||||
|
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
use rocket::{get, put};
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/user/{userId}/account_data/{type}`
|
/// # `PUT /_matrix/client/r0/user/{userId}/account_data/{type}`
|
||||||
///
|
///
|
||||||
/// Sets some account data for the sender user.
|
/// Sets some account data for the sender user.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
put("/_matrix/client/r0/user/<_>/account_data/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn set_global_account_data_route(
|
pub async fn set_global_account_data_route(
|
||||||
body: Ruma<set_global_account_data::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<set_global_account_data::v3::Response> {
|
body: Ruma<set_global_account_data::Request<'_>>,
|
||||||
|
) -> ConduitResult<set_global_account_data::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let data: serde_json::Value = serde_json::from_str(body.data.json().get())
|
let data: serde_json::Value = serde_json::from_str(body.data.get())
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?;
|
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?;
|
||||||
|
|
||||||
let event_type = body.event_type.to_string();
|
let event_type = body.event_type.to_string();
|
||||||
|
|
||||||
services().account_data.update(
|
db.account_data.update(
|
||||||
None,
|
None,
|
||||||
sender_user,
|
sender_user,
|
||||||
event_type.clone().into(),
|
event_type.clone().into(),
|
||||||
|
@ -34,25 +43,37 @@ pub async fn set_global_account_data_route(
|
||||||
"type": event_type,
|
"type": event_type,
|
||||||
"content": data,
|
"content": data,
|
||||||
}),
|
}),
|
||||||
|
&db.globals,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
Ok(set_global_account_data::v3::Response {})
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(set_global_account_data::Response {}.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}`
|
/// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}`
|
||||||
///
|
///
|
||||||
/// Sets some room account data for the sender user.
|
/// Sets some room account data for the sender user.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
put(
|
||||||
|
"/_matrix/client/r0/user/<_>/rooms/<_>/account_data/<_>",
|
||||||
|
data = "<body>"
|
||||||
|
)
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn set_room_account_data_route(
|
pub async fn set_room_account_data_route(
|
||||||
body: Ruma<set_room_account_data::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<set_room_account_data::v3::Response> {
|
body: Ruma<set_room_account_data::Request<'_>>,
|
||||||
|
) -> ConduitResult<set_room_account_data::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let data: serde_json::Value = serde_json::from_str(body.data.json().get())
|
let data: serde_json::Value = serde_json::from_str(body.data.get())
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?;
|
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?;
|
||||||
|
|
||||||
let event_type = body.event_type.to_string();
|
let event_type = body.event_type.to_string();
|
||||||
|
|
||||||
services().account_data.update(
|
db.account_data.update(
|
||||||
Some(&body.room_id),
|
Some(&body.room_id),
|
||||||
sender_user,
|
sender_user,
|
||||||
event_type.clone().into(),
|
event_type.clone().into(),
|
||||||
|
@ -60,49 +81,71 @@ pub async fn set_room_account_data_route(
|
||||||
"type": event_type,
|
"type": event_type,
|
||||||
"content": data,
|
"content": data,
|
||||||
}),
|
}),
|
||||||
|
&db.globals,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
Ok(set_room_account_data::v3::Response {})
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(set_room_account_data::Response {}.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/user/{userId}/account_data/{type}`
|
/// # `GET /_matrix/client/r0/user/{userId}/account_data/{type}`
|
||||||
///
|
///
|
||||||
/// Gets some account data for the sender user.
|
/// Gets some account data for the sender user.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/user/<_>/account_data/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn get_global_account_data_route(
|
pub async fn get_global_account_data_route(
|
||||||
body: Ruma<get_global_account_data::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<get_global_account_data::v3::Response> {
|
body: Ruma<get_global_account_data::Request<'_>>,
|
||||||
|
) -> ConduitResult<get_global_account_data::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let event: Box<RawJsonValue> = services()
|
let event: Box<RawJsonValue> = db
|
||||||
.account_data
|
.account_data
|
||||||
.get(None, sender_user, body.event_type.to_string().into())?
|
.get(None, sender_user, body.event_type.clone().into())?
|
||||||
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?;
|
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?;
|
||||||
|
|
||||||
let account_data = serde_json::from_str::<ExtractGlobalEventContent>(event.get())
|
let account_data = serde_json::from_str::<ExtractGlobalEventContent>(event.get())
|
||||||
.map_err(|_| Error::bad_database("Invalid account data event in db."))?
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))?
|
||||||
.content;
|
.content;
|
||||||
|
|
||||||
Ok(get_global_account_data::v3::Response { account_data })
|
Ok(get_global_account_data::Response { account_data }.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}`
|
/// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}`
|
||||||
///
|
///
|
||||||
/// Gets some room account data for the sender user.
|
/// Gets some room account data for the sender user.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get(
|
||||||
|
"/_matrix/client/r0/user/<_>/rooms/<_>/account_data/<_>",
|
||||||
|
data = "<body>"
|
||||||
|
)
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn get_room_account_data_route(
|
pub async fn get_room_account_data_route(
|
||||||
body: Ruma<get_room_account_data::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<get_room_account_data::v3::Response> {
|
body: Ruma<get_room_account_data::Request<'_>>,
|
||||||
|
) -> ConduitResult<get_room_account_data::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let event: Box<RawJsonValue> = services()
|
let event: Box<RawJsonValue> = db
|
||||||
.account_data
|
.account_data
|
||||||
.get(Some(&body.room_id), sender_user, body.event_type.clone())?
|
.get(
|
||||||
|
Some(&body.room_id),
|
||||||
|
sender_user,
|
||||||
|
body.event_type.clone().into(),
|
||||||
|
)?
|
||||||
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?;
|
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?;
|
||||||
|
|
||||||
let account_data = serde_json::from_str::<ExtractRoomEventContent>(event.get())
|
let account_data = serde_json::from_str::<ExtractRoomEventContent>(event.get())
|
||||||
.map_err(|_| Error::bad_database("Invalid account data event in db."))?
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))?
|
||||||
.content;
|
.content;
|
||||||
|
|
||||||
Ok(get_room_account_data::v3::Response { account_data })
|
Ok(get_room_account_data::Response { account_data }.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
|
@ -1,46 +1,60 @@
|
||||||
use crate::{services, Error, Result, Ruma};
|
use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{context::get_context, error::ErrorKind, filter::LazyLoadOptions},
|
api::client::{
|
||||||
events::StateEventType,
|
error::ErrorKind,
|
||||||
|
r0::{context::get_context, filter::LazyLoadOptions},
|
||||||
|
},
|
||||||
|
events::EventType,
|
||||||
};
|
};
|
||||||
use std::collections::HashSet;
|
use std::{collections::HashSet, convert::TryFrom};
|
||||||
use tracing::error;
|
use tracing::error;
|
||||||
|
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
use rocket::get;
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/rooms/{roomId}/context`
|
/// # `GET /_matrix/client/r0/rooms/{roomId}/context`
|
||||||
///
|
///
|
||||||
/// Allows loading room history around an event.
|
/// Allows loading room history around an event.
|
||||||
///
|
///
|
||||||
/// - Only works if the user is joined (TODO: always allow, but only show events if the user was
|
/// - Only works if the user is joined (TODO: always allow, but only show events if the user was
|
||||||
/// joined, depending on history_visibility)
|
/// joined, depending on history_visibility)
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/rooms/<_>/context/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn get_context_route(
|
pub async fn get_context_route(
|
||||||
body: Ruma<get_context::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<get_context::v3::Response> {
|
body: Ruma<get_context::Request<'_>>,
|
||||||
|
) -> ConduitResult<get_context::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let (lazy_load_enabled, lazy_load_send_redundant) = match &body.filter.lazy_load_options {
|
// Load filter
|
||||||
|
let filter = body.filter.clone().unwrap_or_default();
|
||||||
|
|
||||||
|
let (lazy_load_enabled, lazy_load_send_redundant) = match filter.lazy_load_options {
|
||||||
LazyLoadOptions::Enabled {
|
LazyLoadOptions::Enabled {
|
||||||
include_redundant_members,
|
include_redundant_members: redundant,
|
||||||
} => (true, *include_redundant_members),
|
} => (true, redundant),
|
||||||
_ => (false, false),
|
_ => (false, false),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut lazy_loaded = HashSet::new();
|
let mut lazy_loaded = HashSet::new();
|
||||||
|
|
||||||
let base_token = services()
|
let base_pdu_id = db
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
.get_pdu_id(&body.event_id)?
|
||||||
.get_pdu_count(&body.event_id)?
|
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::NotFound,
|
ErrorKind::NotFound,
|
||||||
"Base event id not found.",
|
"Base event id not found.",
|
||||||
))?;
|
))?;
|
||||||
|
|
||||||
let base_event =
|
let base_token = db.rooms.pdu_count(&base_pdu_id)?;
|
||||||
services()
|
|
||||||
|
let base_event = db
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
.get_pdu_from_id(&base_pdu_id)?
|
||||||
.get_pdu(&body.event_id)?
|
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::NotFound,
|
ErrorKind::NotFound,
|
||||||
"Base event not found.",
|
"Base event not found.",
|
||||||
|
@ -48,18 +62,14 @@ pub async fn get_context_route(
|
||||||
|
|
||||||
let room_id = base_event.room_id.clone();
|
let room_id = base_event.room_id.clone();
|
||||||
|
|
||||||
if !services()
|
if !db.rooms.is_joined(sender_user, &room_id)? {
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.user_can_see_event(sender_user, &room_id, &body.event_id)?
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
"You don't have permission to view this event.",
|
"You don't have permission to view this room.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
if !db.rooms.lazy_load_was_sent_before(
|
||||||
sender_user,
|
sender_user,
|
||||||
sender_device,
|
sender_device,
|
||||||
&room_id,
|
&room_id,
|
||||||
|
@ -69,28 +79,22 @@ pub async fn get_context_route(
|
||||||
lazy_loaded.insert(base_event.sender.as_str().to_owned());
|
lazy_loaded.insert(base_event.sender.as_str().to_owned());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use limit with maximum 100
|
|
||||||
let limit = u64::from(body.limit).min(100) as usize;
|
|
||||||
|
|
||||||
let base_event = base_event.to_room_event();
|
let base_event = base_event.to_room_event();
|
||||||
|
|
||||||
let events_before: Vec<_> = services()
|
let events_before: Vec<_> = db
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
|
||||||
.pdus_until(sender_user, &room_id, base_token)?
|
.pdus_until(sender_user, &room_id, base_token)?
|
||||||
.take(limit / 2)
|
.take(
|
||||||
|
u32::try_from(body.limit).map_err(|_| {
|
||||||
|
Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.")
|
||||||
|
})? as usize
|
||||||
|
/ 2,
|
||||||
|
)
|
||||||
.filter_map(|r| r.ok()) // Remove buggy events
|
.filter_map(|r| r.ok()) // Remove buggy events
|
||||||
.filter(|(_, pdu)| {
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.user_can_see_event(sender_user, &room_id, &pdu.event_id)
|
|
||||||
.unwrap_or(false)
|
|
||||||
})
|
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
for (_, event) in &events_before {
|
for (_, event) in &events_before {
|
||||||
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
if !db.rooms.lazy_load_was_sent_before(
|
||||||
sender_user,
|
sender_user,
|
||||||
sender_device,
|
sender_device,
|
||||||
&room_id,
|
&room_id,
|
||||||
|
@ -103,31 +107,28 @@ pub async fn get_context_route(
|
||||||
|
|
||||||
let start_token = events_before
|
let start_token = events_before
|
||||||
.last()
|
.last()
|
||||||
.map(|(count, _)| count.stringify())
|
.and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok())
|
||||||
.unwrap_or_else(|| base_token.stringify());
|
.map(|count| count.to_string());
|
||||||
|
|
||||||
let events_before: Vec<_> = events_before
|
let events_before: Vec<_> = events_before
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(_, pdu)| pdu.to_room_event())
|
.map(|(_, pdu)| pdu.to_room_event())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let events_after: Vec<_> = services()
|
let events_after: Vec<_> = db
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
|
||||||
.pdus_after(sender_user, &room_id, base_token)?
|
.pdus_after(sender_user, &room_id, base_token)?
|
||||||
.take(limit / 2)
|
.take(
|
||||||
|
u32::try_from(body.limit).map_err(|_| {
|
||||||
|
Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.")
|
||||||
|
})? as usize
|
||||||
|
/ 2,
|
||||||
|
)
|
||||||
.filter_map(|r| r.ok()) // Remove buggy events
|
.filter_map(|r| r.ok()) // Remove buggy events
|
||||||
.filter(|(_, pdu)| {
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.user_can_see_event(sender_user, &room_id, &pdu.event_id)
|
|
||||||
.unwrap_or(false)
|
|
||||||
})
|
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
for (_, event) in &events_after {
|
for (_, event) in &events_after {
|
||||||
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
if !db.rooms.lazy_load_was_sent_before(
|
||||||
sender_user,
|
sender_user,
|
||||||
sender_device,
|
sender_device,
|
||||||
&room_id,
|
&room_id,
|
||||||
|
@ -138,29 +139,24 @@ pub async fn get_context_route(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let shortstatehash = match services().rooms.state_accessor.pdu_shortstatehash(
|
let shortstatehash = match db.rooms.pdu_shortstatehash(
|
||||||
events_after
|
events_after
|
||||||
.last()
|
.last()
|
||||||
.map_or(&*body.event_id, |(_, e)| &*e.event_id),
|
.map_or(&*body.event_id, |(_, e)| &*e.event_id),
|
||||||
)? {
|
)? {
|
||||||
Some(s) => s,
|
Some(s) => s,
|
||||||
None => services()
|
None => db
|
||||||
.rooms
|
.rooms
|
||||||
.state
|
.current_shortstatehash(&room_id)?
|
||||||
.get_room_shortstatehash(&room_id)?
|
|
||||||
.expect("All rooms have state"),
|
.expect("All rooms have state"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let state_ids = services()
|
let state_ids = db.rooms.state_full_ids(shortstatehash)?;
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.state_full_ids(shortstatehash)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let end_token = events_after
|
let end_token = events_after
|
||||||
.last()
|
.last()
|
||||||
.map(|(count, _)| count.stringify())
|
.and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok())
|
||||||
.unwrap_or_else(|| base_token.stringify());
|
.map(|count| count.to_string());
|
||||||
|
|
||||||
let events_after: Vec<_> = events_after
|
let events_after: Vec<_> = events_after
|
||||||
.into_iter()
|
.into_iter()
|
||||||
|
@ -170,13 +166,10 @@ pub async fn get_context_route(
|
||||||
let mut state = Vec::new();
|
let mut state = Vec::new();
|
||||||
|
|
||||||
for (shortstatekey, id) in state_ids {
|
for (shortstatekey, id) in state_ids {
|
||||||
let (event_type, state_key) = services()
|
let (event_type, state_key) = db.rooms.get_statekey_from_short(shortstatekey)?;
|
||||||
.rooms
|
|
||||||
.short
|
|
||||||
.get_statekey_from_short(shortstatekey)?;
|
|
||||||
|
|
||||||
if event_type != StateEventType::RoomMember {
|
if event_type != EventType::RoomMember {
|
||||||
let pdu = match services().rooms.timeline.get_pdu(&id)? {
|
let pdu = match db.rooms.get_pdu(&id)? {
|
||||||
Some(pdu) => pdu,
|
Some(pdu) => pdu,
|
||||||
None => {
|
None => {
|
||||||
error!("Pdu in state not found: {}", id);
|
error!("Pdu in state not found: {}", id);
|
||||||
|
@ -185,7 +178,7 @@ pub async fn get_context_route(
|
||||||
};
|
};
|
||||||
state.push(pdu.to_state_event());
|
state.push(pdu.to_state_event());
|
||||||
} else if !lazy_load_enabled || lazy_loaded.contains(&state_key) {
|
} else if !lazy_load_enabled || lazy_loaded.contains(&state_key) {
|
||||||
let pdu = match services().rooms.timeline.get_pdu(&id)? {
|
let pdu = match db.rooms.get_pdu(&id)? {
|
||||||
Some(pdu) => pdu,
|
Some(pdu) => pdu,
|
||||||
None => {
|
None => {
|
||||||
error!("Pdu in state not found: {}", id);
|
error!("Pdu in state not found: {}", id);
|
||||||
|
@ -196,14 +189,14 @@ pub async fn get_context_route(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let resp = get_context::v3::Response {
|
let resp = get_context::Response {
|
||||||
start: Some(start_token),
|
start: start_token,
|
||||||
end: Some(end_token),
|
end: end_token,
|
||||||
events_before,
|
events_before,
|
||||||
event: Some(base_event),
|
event: Some(base_event),
|
||||||
events_after,
|
events_after,
|
||||||
state,
|
state,
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(resp)
|
Ok(resp.into())
|
||||||
}
|
}
|
|
@ -1,65 +1,88 @@
|
||||||
use crate::{services, utils, Error, Result, Ruma};
|
use crate::{database::DatabaseGuard, utils, ConduitResult, Error, Ruma};
|
||||||
use ruma::api::client::{
|
use ruma::api::client::{
|
||||||
device::{self, delete_device, delete_devices, get_device, get_devices, update_device},
|
|
||||||
error::ErrorKind,
|
error::ErrorKind,
|
||||||
|
r0::{
|
||||||
|
device::{self, delete_device, delete_devices, get_device, get_devices, update_device},
|
||||||
uiaa::{AuthFlow, AuthType, UiaaInfo},
|
uiaa::{AuthFlow, AuthType, UiaaInfo},
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::SESSION_ID_LENGTH;
|
use super::SESSION_ID_LENGTH;
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
use rocket::{delete, get, post, put};
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/devices`
|
/// # `GET /_matrix/client/r0/devices`
|
||||||
///
|
///
|
||||||
/// Get metadata on all devices of the sender user.
|
/// Get metadata on all devices of the sender user.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/devices", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn get_devices_route(
|
pub async fn get_devices_route(
|
||||||
body: Ruma<get_devices::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<get_devices::v3::Response> {
|
body: Ruma<get_devices::Request>,
|
||||||
|
) -> ConduitResult<get_devices::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let devices: Vec<device::Device> = services()
|
let devices: Vec<device::Device> = db
|
||||||
.users
|
.users
|
||||||
.all_devices_metadata(sender_user)
|
.all_devices_metadata(sender_user)
|
||||||
.filter_map(|r| r.ok()) // Filter out buggy devices
|
.filter_map(|r| r.ok()) // Filter out buggy devices
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
Ok(get_devices::v3::Response { devices })
|
Ok(get_devices::Response { devices }.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/devices/{deviceId}`
|
/// # `GET /_matrix/client/r0/devices/{deviceId}`
|
||||||
///
|
///
|
||||||
/// Get metadata on a single device of the sender user.
|
/// Get metadata on a single device of the sender user.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/devices/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn get_device_route(
|
pub async fn get_device_route(
|
||||||
body: Ruma<get_device::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<get_device::v3::Response> {
|
body: Ruma<get_device::Request<'_>>,
|
||||||
|
) -> ConduitResult<get_device::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let device = services()
|
let device = db
|
||||||
.users
|
.users
|
||||||
.get_device_metadata(sender_user, &body.body.device_id)?
|
.get_device_metadata(sender_user, &body.body.device_id)?
|
||||||
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?;
|
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?;
|
||||||
|
|
||||||
Ok(get_device::v3::Response { device })
|
Ok(get_device::Response { device }.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/devices/{deviceId}`
|
/// # `PUT /_matrix/client/r0/devices/{deviceId}`
|
||||||
///
|
///
|
||||||
/// Updates the metadata on a given device of the sender user.
|
/// Updates the metadata on a given device of the sender user.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
put("/_matrix/client/r0/devices/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn update_device_route(
|
pub async fn update_device_route(
|
||||||
body: Ruma<update_device::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<update_device::v3::Response> {
|
body: Ruma<update_device::Request<'_>>,
|
||||||
|
) -> ConduitResult<update_device::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let mut device = services()
|
let mut device = db
|
||||||
.users
|
.users
|
||||||
.get_device_metadata(sender_user, &body.device_id)?
|
.get_device_metadata(sender_user, &body.device_id)?
|
||||||
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?;
|
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?;
|
||||||
|
|
||||||
device.display_name = body.display_name.clone();
|
device.display_name = body.display_name.clone();
|
||||||
|
|
||||||
services()
|
db.users
|
||||||
.users
|
|
||||||
.update_device_metadata(sender_user, &body.device_id, &device)?;
|
.update_device_metadata(sender_user, &body.device_id, &device)?;
|
||||||
|
|
||||||
Ok(update_device::v3::Response {})
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(update_device::Response {}.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `DELETE /_matrix/client/r0/devices/{deviceId}`
|
/// # `DELETE /_matrix/client/r0/devices/{deviceId}`
|
||||||
|
@ -71,9 +94,15 @@ pub async fn update_device_route(
|
||||||
/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
|
/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
|
||||||
/// - Forgets to-device events
|
/// - Forgets to-device events
|
||||||
/// - Triggers device list updates
|
/// - Triggers device list updates
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
delete("/_matrix/client/r0/devices/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn delete_device_route(
|
pub async fn delete_device_route(
|
||||||
body: Ruma<delete_device::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<delete_device::v3::Response> {
|
body: Ruma<delete_device::Request<'_>>,
|
||||||
|
) -> ConduitResult<delete_device::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
@ -89,29 +118,32 @@ pub async fn delete_device_route(
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(auth) = &body.auth {
|
if let Some(auth) = &body.auth {
|
||||||
let (worked, uiaainfo) =
|
let (worked, uiaainfo) = db.uiaa.try_auth(
|
||||||
services()
|
sender_user,
|
||||||
.uiaa
|
sender_device,
|
||||||
.try_auth(sender_user, sender_device, auth, &uiaainfo)?;
|
auth,
|
||||||
|
&uiaainfo,
|
||||||
|
&db.users,
|
||||||
|
&db.globals,
|
||||||
|
)?;
|
||||||
if !worked {
|
if !worked {
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
}
|
}
|
||||||
// Success!
|
// Success!
|
||||||
} else if let Some(json) = body.json_body {
|
} else if let Some(json) = body.json_body {
|
||||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||||
services()
|
db.uiaa
|
||||||
.uiaa
|
|
||||||
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
} else {
|
} else {
|
||||||
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
||||||
}
|
}
|
||||||
|
|
||||||
services()
|
db.users.remove_device(sender_user, &body.device_id)?;
|
||||||
.users
|
|
||||||
.remove_device(sender_user, &body.device_id)?;
|
|
||||||
|
|
||||||
Ok(delete_device::v3::Response {})
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(delete_device::Response {}.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/devices/{deviceId}`
|
/// # `PUT /_matrix/client/r0/devices/{deviceId}`
|
||||||
|
@ -125,9 +157,15 @@ pub async fn delete_device_route(
|
||||||
/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
|
/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
|
||||||
/// - Forgets to-device events
|
/// - Forgets to-device events
|
||||||
/// - Triggers device list updates
|
/// - Triggers device list updates
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
post("/_matrix/client/r0/delete_devices", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn delete_devices_route(
|
pub async fn delete_devices_route(
|
||||||
body: Ruma<delete_devices::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<delete_devices::v3::Response> {
|
body: Ruma<delete_devices::Request<'_>>,
|
||||||
|
) -> ConduitResult<delete_devices::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
@ -143,18 +181,21 @@ pub async fn delete_devices_route(
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(auth) = &body.auth {
|
if let Some(auth) = &body.auth {
|
||||||
let (worked, uiaainfo) =
|
let (worked, uiaainfo) = db.uiaa.try_auth(
|
||||||
services()
|
sender_user,
|
||||||
.uiaa
|
sender_device,
|
||||||
.try_auth(sender_user, sender_device, auth, &uiaainfo)?;
|
auth,
|
||||||
|
&uiaainfo,
|
||||||
|
&db.users,
|
||||||
|
&db.globals,
|
||||||
|
)?;
|
||||||
if !worked {
|
if !worked {
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
}
|
}
|
||||||
// Success!
|
// Success!
|
||||||
} else if let Some(json) = body.json_body {
|
} else if let Some(json) = body.json_body {
|
||||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||||
services()
|
db.uiaa
|
||||||
.uiaa
|
|
||||||
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
} else {
|
} else {
|
||||||
|
@ -162,8 +203,10 @@ pub async fn delete_devices_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
for device_id in &body.devices {
|
for device_id in &body.devices {
|
||||||
services().users.remove_device(sender_user, device_id)?
|
db.users.remove_device(sender_user, device_id)?
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(delete_devices::v3::Response {})
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(delete_devices::Response {}.into())
|
||||||
}
|
}
|
|
@ -1,42 +1,53 @@
|
||||||
use crate::{services, Error, Result, Ruma};
|
use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Result, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{
|
api::{
|
||||||
client::{
|
client::{
|
||||||
|
error::ErrorKind,
|
||||||
|
r0::{
|
||||||
directory::{
|
directory::{
|
||||||
get_public_rooms, get_public_rooms_filtered, get_room_visibility,
|
get_public_rooms, get_public_rooms_filtered, get_room_visibility,
|
||||||
set_room_visibility,
|
set_room_visibility,
|
||||||
},
|
},
|
||||||
error::ErrorKind,
|
|
||||||
room,
|
room,
|
||||||
},
|
},
|
||||||
|
},
|
||||||
federation,
|
federation,
|
||||||
},
|
},
|
||||||
directory::{Filter, PublicRoomJoinRule, PublicRoomsChunk, RoomNetwork},
|
directory::{Filter, IncomingFilter, IncomingRoomNetwork, PublicRoomsChunk, RoomNetwork},
|
||||||
events::{
|
events::{
|
||||||
room::{
|
room::{
|
||||||
avatar::RoomAvatarEventContent,
|
avatar::RoomAvatarEventContent,
|
||||||
canonical_alias::RoomCanonicalAliasEventContent,
|
canonical_alias::RoomCanonicalAliasEventContent,
|
||||||
create::RoomCreateEventContent,
|
|
||||||
guest_access::{GuestAccess, RoomGuestAccessEventContent},
|
guest_access::{GuestAccess, RoomGuestAccessEventContent},
|
||||||
history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
|
history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
|
||||||
join_rules::{JoinRule, RoomJoinRulesEventContent},
|
name::RoomNameEventContent,
|
||||||
topic::RoomTopicEventContent,
|
topic::RoomTopicEventContent,
|
||||||
},
|
},
|
||||||
StateEventType,
|
EventType,
|
||||||
},
|
},
|
||||||
ServerName, UInt,
|
ServerName, UInt,
|
||||||
};
|
};
|
||||||
use tracing::{error, info, warn};
|
use tracing::{info, warn};
|
||||||
|
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
use rocket::{get, post, put};
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/publicRooms`
|
/// # `POST /_matrix/client/r0/publicRooms`
|
||||||
///
|
///
|
||||||
/// Lists the public rooms on this server.
|
/// Lists the public rooms on this server.
|
||||||
///
|
///
|
||||||
/// - Rooms are ordered by the number of joined members
|
/// - Rooms are ordered by the number of joined members
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
post("/_matrix/client/r0/publicRooms", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn get_public_rooms_filtered_route(
|
pub async fn get_public_rooms_filtered_route(
|
||||||
body: Ruma<get_public_rooms_filtered::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<get_public_rooms_filtered::v3::Response> {
|
body: Ruma<get_public_rooms_filtered::Request<'_>>,
|
||||||
|
) -> ConduitResult<get_public_rooms_filtered::Response> {
|
||||||
get_public_rooms_filtered_helper(
|
get_public_rooms_filtered_helper(
|
||||||
|
&db,
|
||||||
body.server.as_deref(),
|
body.server.as_deref(),
|
||||||
body.limit,
|
body.limit,
|
||||||
body.since.as_deref(),
|
body.since.as_deref(),
|
||||||
|
@ -51,24 +62,33 @@ pub async fn get_public_rooms_filtered_route(
|
||||||
/// Lists the public rooms on this server.
|
/// Lists the public rooms on this server.
|
||||||
///
|
///
|
||||||
/// - Rooms are ordered by the number of joined members
|
/// - Rooms are ordered by the number of joined members
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/publicRooms", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn get_public_rooms_route(
|
pub async fn get_public_rooms_route(
|
||||||
body: Ruma<get_public_rooms::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<get_public_rooms::v3::Response> {
|
body: Ruma<get_public_rooms::Request<'_>>,
|
||||||
|
) -> ConduitResult<get_public_rooms::Response> {
|
||||||
let response = get_public_rooms_filtered_helper(
|
let response = get_public_rooms_filtered_helper(
|
||||||
|
&db,
|
||||||
body.server.as_deref(),
|
body.server.as_deref(),
|
||||||
body.limit,
|
body.limit,
|
||||||
body.since.as_deref(),
|
body.since.as_deref(),
|
||||||
&Filter::default(),
|
&IncomingFilter::default(),
|
||||||
&RoomNetwork::Matrix,
|
&IncomingRoomNetwork::Matrix,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?
|
||||||
|
.0;
|
||||||
|
|
||||||
Ok(get_public_rooms::v3::Response {
|
Ok(get_public_rooms::Response {
|
||||||
chunk: response.chunk,
|
chunk: response.chunk,
|
||||||
prev_batch: response.prev_batch,
|
prev_batch: response.prev_batch,
|
||||||
next_batch: response.next_batch,
|
next_batch: response.next_batch,
|
||||||
total_room_count_estimate: response.total_room_count_estimate,
|
total_room_count_estimate: response.total_room_count_estimate,
|
||||||
})
|
}
|
||||||
|
.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/directory/list/room/{roomId}`
|
/// # `PUT /_matrix/client/r0/directory/list/room/{roomId}`
|
||||||
|
@ -76,22 +96,23 @@ pub async fn get_public_rooms_route(
|
||||||
/// Sets the visibility of a given room in the room directory.
|
/// Sets the visibility of a given room in the room directory.
|
||||||
///
|
///
|
||||||
/// - TODO: Access control checks
|
/// - TODO: Access control checks
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
put("/_matrix/client/r0/directory/list/room/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn set_room_visibility_route(
|
pub async fn set_room_visibility_route(
|
||||||
body: Ruma<set_room_visibility::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<set_room_visibility::v3::Response> {
|
body: Ruma<set_room_visibility::Request<'_>>,
|
||||||
|
) -> ConduitResult<set_room_visibility::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if !services().rooms.metadata.exists(&body.room_id)? {
|
|
||||||
// Return 404 if the room doesn't exist
|
|
||||||
return Err(Error::BadRequest(ErrorKind::NotFound, "Room not found"));
|
|
||||||
}
|
|
||||||
|
|
||||||
match &body.visibility {
|
match &body.visibility {
|
||||||
room::Visibility::Public => {
|
room::Visibility::Public => {
|
||||||
services().rooms.directory.set_public(&body.room_id)?;
|
db.rooms.set_public(&body.room_id, true)?;
|
||||||
info!("{} made {} public", sender_user, body.room_id);
|
info!("{} made {} public", sender_user, body.room_id);
|
||||||
}
|
}
|
||||||
room::Visibility::Private => services().rooms.directory.set_not_public(&body.room_id)?,
|
room::Visibility::Private => db.rooms.set_public(&body.room_id, false)?,
|
||||||
_ => {
|
_ => {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
|
@ -100,61 +121,78 @@ pub async fn set_room_visibility_route(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(set_room_visibility::v3::Response {})
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(set_room_visibility::Response {}.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/directory/list/room/{roomId}`
|
/// # `GET /_matrix/client/r0/directory/list/room/{roomId}`
|
||||||
///
|
///
|
||||||
/// Gets the visibility of a given room in the room directory.
|
/// Gets the visibility of a given room in the room directory.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/directory/list/room/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn get_room_visibility_route(
|
pub async fn get_room_visibility_route(
|
||||||
body: Ruma<get_room_visibility::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<get_room_visibility::v3::Response> {
|
body: Ruma<get_room_visibility::Request<'_>>,
|
||||||
if !services().rooms.metadata.exists(&body.room_id)? {
|
) -> ConduitResult<get_room_visibility::Response> {
|
||||||
// Return 404 if the room doesn't exist
|
Ok(get_room_visibility::Response {
|
||||||
return Err(Error::BadRequest(ErrorKind::NotFound, "Room not found"));
|
visibility: if db.rooms.is_public_room(&body.room_id)? {
|
||||||
}
|
|
||||||
|
|
||||||
Ok(get_room_visibility::v3::Response {
|
|
||||||
visibility: if services().rooms.directory.is_public_room(&body.room_id)? {
|
|
||||||
room::Visibility::Public
|
room::Visibility::Public
|
||||||
} else {
|
} else {
|
||||||
room::Visibility::Private
|
room::Visibility::Private
|
||||||
},
|
},
|
||||||
})
|
}
|
||||||
|
.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn get_public_rooms_filtered_helper(
|
pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
|
db: &Database,
|
||||||
server: Option<&ServerName>,
|
server: Option<&ServerName>,
|
||||||
limit: Option<UInt>,
|
limit: Option<UInt>,
|
||||||
since: Option<&str>,
|
since: Option<&str>,
|
||||||
filter: &Filter,
|
filter: &IncomingFilter,
|
||||||
_network: &RoomNetwork,
|
_network: &IncomingRoomNetwork,
|
||||||
) -> Result<get_public_rooms_filtered::v3::Response> {
|
) -> ConduitResult<get_public_rooms_filtered::Response> {
|
||||||
if let Some(other_server) =
|
if let Some(other_server) = server.filter(|server| *server != db.globals.server_name().as_str())
|
||||||
server.filter(|server| *server != services().globals.server_name().as_str())
|
|
||||||
{
|
{
|
||||||
let response = services()
|
let response = db
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(
|
.send_federation_request(
|
||||||
|
&db.globals,
|
||||||
other_server,
|
other_server,
|
||||||
federation::directory::get_public_rooms_filtered::v1::Request {
|
federation::directory::get_public_rooms_filtered::v1::Request {
|
||||||
limit,
|
limit,
|
||||||
since: since.map(ToOwned::to_owned),
|
since,
|
||||||
filter: Filter {
|
filter: Filter {
|
||||||
generic_search_term: filter.generic_search_term.clone(),
|
generic_search_term: filter.generic_search_term.as_deref(),
|
||||||
room_types: filter.room_types.clone(),
|
|
||||||
},
|
},
|
||||||
room_network: RoomNetwork::Matrix,
|
room_network: RoomNetwork::Matrix,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
return Ok(get_public_rooms_filtered::v3::Response {
|
return Ok(get_public_rooms_filtered::Response {
|
||||||
chunk: response.chunk,
|
chunk: response
|
||||||
|
.chunk
|
||||||
|
.into_iter()
|
||||||
|
.map(|c| {
|
||||||
|
// Convert ruma::api::federation::directory::get_public_rooms::v1::PublicRoomsChunk
|
||||||
|
// to ruma::api::client::r0::directory::PublicRoomsChunk
|
||||||
|
serde_json::from_str(
|
||||||
|
&serde_json::to_string(&c)
|
||||||
|
.expect("PublicRoomsChunk::to_string always works"),
|
||||||
|
)
|
||||||
|
.expect("federation and client-server PublicRoomsChunk are the same type")
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
prev_batch: response.prev_batch,
|
prev_batch: response.prev_batch,
|
||||||
next_batch: response.next_batch,
|
next_batch: response.next_batch,
|
||||||
total_room_count_estimate: response.total_room_count_estimate,
|
total_room_count_estimate: response.total_room_count_estimate,
|
||||||
});
|
}
|
||||||
|
.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
let limit = limit.map_or(10, u64::from);
|
let limit = limit.map_or(10, u64::from);
|
||||||
|
@ -183,18 +221,17 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut all_rooms: Vec<_> = services()
|
let mut all_rooms: Vec<_> = db
|
||||||
.rooms
|
.rooms
|
||||||
.directory
|
|
||||||
.public_rooms()
|
.public_rooms()
|
||||||
.map(|room_id| {
|
.map(|room_id| {
|
||||||
let room_id = room_id?;
|
let room_id = room_id?;
|
||||||
|
|
||||||
let chunk = PublicRoomsChunk {
|
let chunk = PublicRoomsChunk {
|
||||||
canonical_alias: services()
|
aliases: Vec::new(),
|
||||||
|
canonical_alias: db
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.room_state_get(&room_id, &EventType::RoomCanonicalAlias, "")?
|
||||||
.room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "")?
|
|
||||||
.map_or(Ok(None), |s| {
|
.map_or(Ok(None), |s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
.map(|c: RoomCanonicalAliasEventContent| c.alias)
|
.map(|c: RoomCanonicalAliasEventContent| c.alias)
|
||||||
|
@ -202,10 +239,18 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
Error::bad_database("Invalid canonical alias event in database.")
|
Error::bad_database("Invalid canonical alias event in database.")
|
||||||
})
|
})
|
||||||
})?,
|
})?,
|
||||||
name: services().rooms.state_accessor.get_name(&room_id)?,
|
name: db
|
||||||
num_joined_members: services()
|
.rooms
|
||||||
|
.room_state_get(&room_id, &EventType::RoomName, "")?
|
||||||
|
.map_or(Ok(None), |s| {
|
||||||
|
serde_json::from_str(s.content.get())
|
||||||
|
.map(|c: RoomNameEventContent| c.name)
|
||||||
|
.map_err(|_| {
|
||||||
|
Error::bad_database("Invalid room name event in database.")
|
||||||
|
})
|
||||||
|
})?,
|
||||||
|
num_joined_members: db
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
|
||||||
.room_joined_count(&room_id)?
|
.room_joined_count(&room_id)?
|
||||||
.unwrap_or_else(|| {
|
.unwrap_or_else(|| {
|
||||||
warn!("Room {} has no member count", room_id);
|
warn!("Room {} has no member count", room_id);
|
||||||
|
@ -213,22 +258,19 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
})
|
})
|
||||||
.try_into()
|
.try_into()
|
||||||
.expect("user count should not be that big"),
|
.expect("user count should not be that big"),
|
||||||
topic: services()
|
topic: db
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.room_state_get(&room_id, &EventType::RoomTopic, "")?
|
||||||
.room_state_get(&room_id, &StateEventType::RoomTopic, "")?
|
|
||||||
.map_or(Ok(None), |s| {
|
.map_or(Ok(None), |s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
.map(|c: RoomTopicEventContent| Some(c.topic))
|
.map(|c: RoomTopicEventContent| Some(c.topic))
|
||||||
.map_err(|_| {
|
.map_err(|_| {
|
||||||
error!("Invalid room topic event in database for room {}", room_id);
|
|
||||||
Error::bad_database("Invalid room topic event in database.")
|
Error::bad_database("Invalid room topic event in database.")
|
||||||
})
|
})
|
||||||
})?,
|
})?,
|
||||||
world_readable: services()
|
world_readable: db
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.room_state_get(&room_id, &EventType::RoomHistoryVisibility, "")?
|
||||||
.room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")?
|
|
||||||
.map_or(Ok(false), |s| {
|
.map_or(Ok(false), |s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
.map(|c: RoomHistoryVisibilityEventContent| {
|
.map(|c: RoomHistoryVisibilityEventContent| {
|
||||||
|
@ -240,10 +282,9 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
})?,
|
})?,
|
||||||
guest_can_join: services()
|
guest_can_join: db
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.room_state_get(&room_id, &EventType::RoomGuestAccess, "")?
|
||||||
.room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")?
|
|
||||||
.map_or(Ok(false), |s| {
|
.map_or(Ok(false), |s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
.map(|c: RoomGuestAccessEventContent| {
|
.map(|c: RoomGuestAccessEventContent| {
|
||||||
|
@ -253,10 +294,9 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
Error::bad_database("Invalid room guest access event in database.")
|
Error::bad_database("Invalid room guest access event in database.")
|
||||||
})
|
})
|
||||||
})?,
|
})?,
|
||||||
avatar_url: services()
|
avatar_url: db
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.room_state_get(&room_id, &EventType::RoomAvatar, "")?
|
||||||
.room_state_get(&room_id, &StateEventType::RoomAvatar, "")?
|
|
||||||
.map(|s| {
|
.map(|s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
.map(|c: RoomAvatarEventContent| c.url)
|
.map(|c: RoomAvatarEventContent| c.url)
|
||||||
|
@ -267,39 +307,6 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
.transpose()?
|
.transpose()?
|
||||||
// url is now an Option<String> so we must flatten
|
// url is now an Option<String> so we must flatten
|
||||||
.flatten(),
|
.flatten(),
|
||||||
join_rule: services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.room_state_get(&room_id, &StateEventType::RoomJoinRules, "")?
|
|
||||||
.map(|s| {
|
|
||||||
serde_json::from_str(s.content.get())
|
|
||||||
.map(|c: RoomJoinRulesEventContent| match c.join_rule {
|
|
||||||
JoinRule::Public => Some(PublicRoomJoinRule::Public),
|
|
||||||
JoinRule::Knock => Some(PublicRoomJoinRule::Knock),
|
|
||||||
_ => None,
|
|
||||||
})
|
|
||||||
.map_err(|e| {
|
|
||||||
error!("Invalid room join rule event in database: {}", e);
|
|
||||||
Error::BadDatabase("Invalid room join rule event in database.")
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.transpose()?
|
|
||||||
.flatten()
|
|
||||||
.ok_or_else(|| Error::bad_database("Missing room join rule event for room."))?,
|
|
||||||
room_type: services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.room_state_get(&room_id, &StateEventType::RoomCreate, "")?
|
|
||||||
.map(|s| {
|
|
||||||
serde_json::from_str::<RoomCreateEventContent>(s.content.get()).map_err(
|
|
||||||
|e| {
|
|
||||||
error!("Invalid room create event in database: {}", e);
|
|
||||||
Error::BadDatabase("Invalid room create event in database.")
|
|
||||||
},
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.transpose()?
|
|
||||||
.and_then(|e| e.room_type),
|
|
||||||
room_id,
|
room_id,
|
||||||
};
|
};
|
||||||
Ok(chunk)
|
Ok(chunk)
|
||||||
|
@ -351,7 +358,7 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
let prev_batch = if num_since == 0 {
|
let prev_batch = if num_since == 0 {
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
Some(format!("p{num_since}"))
|
Some(format!("p{}", num_since))
|
||||||
};
|
};
|
||||||
|
|
||||||
let next_batch = if chunk.len() < limit as usize {
|
let next_batch = if chunk.len() < limit as usize {
|
||||||
|
@ -360,10 +367,11 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
Some(format!("n{}", num_since + limit))
|
Some(format!("n{}", num_since + limit))
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(get_public_rooms_filtered::v3::Response {
|
Ok(get_public_rooms_filtered::Response {
|
||||||
chunk,
|
chunk,
|
||||||
prev_batch,
|
prev_batch,
|
||||||
next_batch,
|
next_batch,
|
||||||
total_room_count_estimate: Some(total_room_count_estimate),
|
total_room_count_estimate: Some(total_room_count_estimate),
|
||||||
})
|
}
|
||||||
|
.into())
|
||||||
}
|
}
|
47
src/client_server/filter.rs
Normal file
47
src/client_server/filter.rs
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma};
|
||||||
|
use ruma::api::client::{
|
||||||
|
error::ErrorKind,
|
||||||
|
r0::filter::{create_filter, get_filter},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
use rocket::{get, post};
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/user/{userId}/filter/{filterId}`
|
||||||
|
///
|
||||||
|
/// Loads a filter that was previously created.
|
||||||
|
///
|
||||||
|
/// - A user can only access their own filters
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/user/<_>/filter/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn get_filter_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<get_filter::Request<'_>>,
|
||||||
|
) -> ConduitResult<get_filter::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
let filter = match db.users.get_filter(sender_user, &body.filter_id)? {
|
||||||
|
Some(filter) => filter,
|
||||||
|
None => return Err(Error::BadRequest(ErrorKind::NotFound, "Filter not found.")),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(get_filter::Response::new(filter).into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `PUT /_matrix/client/r0/user/{userId}/filter`
|
||||||
|
///
|
||||||
|
/// Creates a new filter to be used by other endpoints.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
post("/_matrix/client/r0/user/<_>/filter", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn create_filter_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<create_filter::Request<'_>>,
|
||||||
|
) -> ConduitResult<create_filter::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
Ok(create_filter::Response::new(db.users.create_filter(sender_user, &body.filter)?).into())
|
||||||
|
}
|
|
@ -1,27 +1,28 @@
|
||||||
use super::SESSION_ID_LENGTH;
|
use super::SESSION_ID_LENGTH;
|
||||||
use crate::{services, utils, Error, Result, Ruma};
|
use crate::{database::DatabaseGuard, utils, ConduitResult, Database, Error, Result, Ruma};
|
||||||
use futures_util::{stream::FuturesUnordered, StreamExt};
|
use rocket::futures::{prelude::*, stream::FuturesUnordered};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{
|
api::{
|
||||||
client::{
|
client::{
|
||||||
error::ErrorKind,
|
error::ErrorKind,
|
||||||
|
r0::{
|
||||||
keys::{
|
keys::{
|
||||||
claim_keys, get_key_changes, get_keys, upload_keys, upload_signatures,
|
claim_keys, get_key_changes, get_keys, upload_keys, upload_signatures,
|
||||||
upload_signing_keys,
|
upload_signing_keys,
|
||||||
},
|
},
|
||||||
uiaa::{AuthFlow, AuthType, UiaaInfo},
|
uiaa::{AuthFlow, AuthType, UiaaInfo},
|
||||||
},
|
},
|
||||||
|
},
|
||||||
federation,
|
federation,
|
||||||
},
|
},
|
||||||
serde::Raw,
|
serde::Raw,
|
||||||
DeviceKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId,
|
DeviceId, DeviceKeyAlgorithm, UserId,
|
||||||
};
|
};
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
use std::{
|
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||||
collections::{hash_map, BTreeMap, HashMap, HashSet},
|
|
||||||
time::{Duration, Instant},
|
#[cfg(feature = "conduit_bin")]
|
||||||
};
|
use rocket::{get, post};
|
||||||
use tracing::debug;
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/keys/upload`
|
/// # `POST /_matrix/client/r0/keys/upload`
|
||||||
///
|
///
|
||||||
|
@ -29,37 +30,47 @@ use tracing::debug;
|
||||||
///
|
///
|
||||||
/// - Adds one time keys
|
/// - Adds one time keys
|
||||||
/// - If there are no device keys yet: Adds device keys (TODO: merge with existing keys?)
|
/// - If there are no device keys yet: Adds device keys (TODO: merge with existing keys?)
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
post("/_matrix/client/r0/keys/upload", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn upload_keys_route(
|
pub async fn upload_keys_route(
|
||||||
body: Ruma<upload_keys::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<upload_keys::v3::Response> {
|
body: Ruma<upload_keys::Request>,
|
||||||
|
) -> ConduitResult<upload_keys::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
for (key_key, key_value) in &body.one_time_keys {
|
for (key_key, key_value) in &body.one_time_keys {
|
||||||
services()
|
db.users
|
||||||
.users
|
.add_one_time_key(sender_user, sender_device, key_key, key_value, &db.globals)?;
|
||||||
.add_one_time_key(sender_user, sender_device, key_key, key_value)?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(device_keys) = &body.device_keys {
|
if let Some(device_keys) = &body.device_keys {
|
||||||
// TODO: merge this and the existing event?
|
// TODO: merge this and the existing event?
|
||||||
// This check is needed to assure that signatures are kept
|
// This check is needed to assure that signatures are kept
|
||||||
if services()
|
if db
|
||||||
.users
|
.users
|
||||||
.get_device_keys(sender_user, sender_device)?
|
.get_device_keys(sender_user, sender_device)?
|
||||||
.is_none()
|
.is_none()
|
||||||
{
|
{
|
||||||
services()
|
db.users.add_device_keys(
|
||||||
.users
|
sender_user,
|
||||||
.add_device_keys(sender_user, sender_device, device_keys)?;
|
sender_device,
|
||||||
|
device_keys,
|
||||||
|
&db.rooms,
|
||||||
|
&db.globals,
|
||||||
|
)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(upload_keys::v3::Response {
|
db.flush()?;
|
||||||
one_time_key_counts: services()
|
|
||||||
.users
|
Ok(upload_keys::Response {
|
||||||
.count_one_time_keys(sender_user, sender_device)?,
|
one_time_key_counts: db.users.count_one_time_keys(sender_user, sender_device)?,
|
||||||
})
|
}
|
||||||
|
.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/keys/query`
|
/// # `POST /_matrix/client/r0/keys/query`
|
||||||
|
@ -69,24 +80,45 @@ pub async fn upload_keys_route(
|
||||||
/// - Always fetches users from other servers over federation
|
/// - Always fetches users from other servers over federation
|
||||||
/// - Gets master keys, self-signing keys, user signing keys and device keys.
|
/// - Gets master keys, self-signing keys, user signing keys and device keys.
|
||||||
/// - The master and self-signing keys contain signatures that the user is allowed to see
|
/// - The master and self-signing keys contain signatures that the user is allowed to see
|
||||||
pub async fn get_keys_route(body: Ruma<get_keys::v3::Request>) -> Result<get_keys::v3::Response> {
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
post("/_matrix/client/r0/keys/query", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn get_keys_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<get_keys::Request<'_>>,
|
||||||
|
) -> ConduitResult<get_keys::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let response =
|
let response = get_keys_helper(
|
||||||
get_keys_helper(Some(sender_user), &body.device_keys, |u| u == sender_user).await?;
|
Some(sender_user),
|
||||||
|
&body.device_keys,
|
||||||
|
|u| u == sender_user,
|
||||||
|
&db,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
Ok(response)
|
Ok(response.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/keys/claim`
|
/// # `POST /_matrix/client/r0/keys/claim`
|
||||||
///
|
///
|
||||||
/// Claims one-time keys
|
/// Claims one-time keys
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
post("/_matrix/client/r0/keys/claim", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn claim_keys_route(
|
pub async fn claim_keys_route(
|
||||||
body: Ruma<claim_keys::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<claim_keys::v3::Response> {
|
body: Ruma<claim_keys::Request>,
|
||||||
let response = claim_keys_helper(&body.one_time_keys).await?;
|
) -> ConduitResult<claim_keys::Response> {
|
||||||
|
let response = claim_keys_helper(&body.one_time_keys, &db).await?;
|
||||||
|
|
||||||
Ok(response)
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(response.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/keys/device_signing/upload`
|
/// # `POST /_matrix/client/r0/keys/device_signing/upload`
|
||||||
|
@ -94,9 +126,15 @@ pub async fn claim_keys_route(
|
||||||
/// Uploads end-to-end key information for the sender user.
|
/// Uploads end-to-end key information for the sender user.
|
||||||
///
|
///
|
||||||
/// - Requires UIAA to verify password
|
/// - Requires UIAA to verify password
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
post("/_matrix/client/unstable/keys/device_signing/upload", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn upload_signing_keys_route(
|
pub async fn upload_signing_keys_route(
|
||||||
body: Ruma<upload_signing_keys::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<upload_signing_keys::v3::Response> {
|
body: Ruma<upload_signing_keys::Request<'_>>,
|
||||||
|
) -> ConduitResult<upload_signing_keys::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
@ -112,18 +150,21 @@ pub async fn upload_signing_keys_route(
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(auth) = &body.auth {
|
if let Some(auth) = &body.auth {
|
||||||
let (worked, uiaainfo) =
|
let (worked, uiaainfo) = db.uiaa.try_auth(
|
||||||
services()
|
sender_user,
|
||||||
.uiaa
|
sender_device,
|
||||||
.try_auth(sender_user, sender_device, auth, &uiaainfo)?;
|
auth,
|
||||||
|
&uiaainfo,
|
||||||
|
&db.users,
|
||||||
|
&db.globals,
|
||||||
|
)?;
|
||||||
if !worked {
|
if !worked {
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
}
|
}
|
||||||
// Success!
|
// Success!
|
||||||
} else if let Some(json) = body.json_body {
|
} else if let Some(json) = body.json_body {
|
||||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||||
services()
|
db.uiaa
|
||||||
.uiaa
|
|
||||||
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
} else {
|
} else {
|
||||||
|
@ -131,32 +172,38 @@ pub async fn upload_signing_keys_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(master_key) = &body.master_key {
|
if let Some(master_key) = &body.master_key {
|
||||||
services().users.add_cross_signing_keys(
|
db.users.add_cross_signing_keys(
|
||||||
sender_user,
|
sender_user,
|
||||||
master_key,
|
master_key,
|
||||||
&body.self_signing_key,
|
&body.self_signing_key,
|
||||||
&body.user_signing_key,
|
&body.user_signing_key,
|
||||||
true, // notify so that other users see the new keys
|
&db.rooms,
|
||||||
|
&db.globals,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(upload_signing_keys::v3::Response {})
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(upload_signing_keys::Response {}.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/keys/signatures/upload`
|
/// # `POST /_matrix/client/r0/keys/signatures/upload`
|
||||||
///
|
///
|
||||||
/// Uploads end-to-end key signatures from the sender user.
|
/// Uploads end-to-end key signatures from the sender user.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
post("/_matrix/client/unstable/keys/signatures/upload", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn upload_signatures_route(
|
pub async fn upload_signatures_route(
|
||||||
body: Ruma<upload_signatures::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<upload_signatures::v3::Response> {
|
body: Ruma<upload_signatures::Request>,
|
||||||
|
) -> ConduitResult<upload_signatures::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
for (user_id, keys) in &body.signed_keys {
|
for (user_id, signed_keys) in &body.signed_keys {
|
||||||
for (key_id, key) in keys {
|
for (key_id, signed_key) in signed_keys {
|
||||||
let key = serde_json::to_value(key)
|
for signature in signed_key
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid key JSON"))?;
|
|
||||||
|
|
||||||
for signature in key
|
|
||||||
.get("signatures")
|
.get("signatures")
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
|
@ -187,16 +234,21 @@ pub async fn upload_signatures_route(
|
||||||
))?
|
))?
|
||||||
.to_owned(),
|
.to_owned(),
|
||||||
);
|
);
|
||||||
services()
|
db.users.sign_key(
|
||||||
.users
|
user_id,
|
||||||
.sign_key(user_id, key_id, signature, sender_user)?;
|
key_id,
|
||||||
|
signature,
|
||||||
|
sender_user,
|
||||||
|
&db.rooms,
|
||||||
|
&db.globals,
|
||||||
|
)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(upload_signatures::v3::Response {
|
db.flush()?;
|
||||||
failures: BTreeMap::new(), // TODO: integrate
|
|
||||||
})
|
Ok(upload_signatures::Response {}.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/keys/changes`
|
/// # `POST /_matrix/client/r0/keys/changes`
|
||||||
|
@ -204,16 +256,21 @@ pub async fn upload_signatures_route(
|
||||||
/// Gets a list of users who have updated their device identity keys since the previous sync token.
|
/// Gets a list of users who have updated their device identity keys since the previous sync token.
|
||||||
///
|
///
|
||||||
/// - TODO: left users
|
/// - TODO: left users
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/keys/changes", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn get_key_changes_route(
|
pub async fn get_key_changes_route(
|
||||||
body: Ruma<get_key_changes::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<get_key_changes::v3::Response> {
|
body: Ruma<get_key_changes::Request<'_>>,
|
||||||
|
) -> ConduitResult<get_key_changes::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let mut device_list_updates = HashSet::new();
|
let mut device_list_updates = HashSet::new();
|
||||||
|
|
||||||
device_list_updates.extend(
|
device_list_updates.extend(
|
||||||
services()
|
db.users
|
||||||
.users
|
|
||||||
.keys_changed(
|
.keys_changed(
|
||||||
sender_user.as_str(),
|
sender_user.as_str(),
|
||||||
body.from
|
body.from
|
||||||
|
@ -228,17 +285,11 @@ pub async fn get_key_changes_route(
|
||||||
.filter_map(|r| r.ok()),
|
.filter_map(|r| r.ok()),
|
||||||
);
|
);
|
||||||
|
|
||||||
for room_id in services()
|
for room_id in db.rooms.rooms_joined(sender_user).filter_map(|r| r.ok()) {
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.rooms_joined(sender_user)
|
|
||||||
.filter_map(|r| r.ok())
|
|
||||||
{
|
|
||||||
device_list_updates.extend(
|
device_list_updates.extend(
|
||||||
services()
|
db.users
|
||||||
.users
|
|
||||||
.keys_changed(
|
.keys_changed(
|
||||||
room_id.as_ref(),
|
&room_id.to_string(),
|
||||||
body.from.parse().map_err(|_| {
|
body.from.parse().map_err(|_| {
|
||||||
Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`.")
|
Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`.")
|
||||||
})?,
|
})?,
|
||||||
|
@ -249,17 +300,19 @@ pub async fn get_key_changes_route(
|
||||||
.filter_map(|r| r.ok()),
|
.filter_map(|r| r.ok()),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
Ok(get_key_changes::v3::Response {
|
Ok(get_key_changes::Response {
|
||||||
changed: device_list_updates.into_iter().collect(),
|
changed: device_list_updates.into_iter().collect(),
|
||||||
left: Vec::new(), // TODO
|
left: Vec::new(), // TODO
|
||||||
})
|
}
|
||||||
|
.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
||||||
sender_user: Option<&UserId>,
|
sender_user: Option<&UserId>,
|
||||||
device_keys_input: &BTreeMap<OwnedUserId, Vec<OwnedDeviceId>>,
|
device_keys_input: &BTreeMap<Box<UserId>, Vec<Box<DeviceId>>>,
|
||||||
allowed_signatures: F,
|
allowed_signatures: F,
|
||||||
) -> Result<get_keys::v3::Response> {
|
db: &Database,
|
||||||
|
) -> Result<get_keys::Response> {
|
||||||
let mut master_keys = BTreeMap::new();
|
let mut master_keys = BTreeMap::new();
|
||||||
let mut self_signing_keys = BTreeMap::new();
|
let mut self_signing_keys = BTreeMap::new();
|
||||||
let mut user_signing_keys = BTreeMap::new();
|
let mut user_signing_keys = BTreeMap::new();
|
||||||
|
@ -268,9 +321,9 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
||||||
let mut get_over_federation = HashMap::new();
|
let mut get_over_federation = HashMap::new();
|
||||||
|
|
||||||
for (user_id, device_ids) in device_keys_input {
|
for (user_id, device_ids) in device_keys_input {
|
||||||
let user_id: &UserId = user_id;
|
let user_id: &UserId = &**user_id;
|
||||||
|
|
||||||
if user_id.server_name() != services().globals.server_name() {
|
if user_id.server_name() != db.globals.server_name() {
|
||||||
get_over_federation
|
get_over_federation
|
||||||
.entry(user_id.server_name())
|
.entry(user_id.server_name())
|
||||||
.or_insert_with(Vec::new)
|
.or_insert_with(Vec::new)
|
||||||
|
@ -280,10 +333,10 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
||||||
|
|
||||||
if device_ids.is_empty() {
|
if device_ids.is_empty() {
|
||||||
let mut container = BTreeMap::new();
|
let mut container = BTreeMap::new();
|
||||||
for device_id in services().users.all_device_ids(user_id) {
|
for device_id in db.users.all_device_ids(user_id) {
|
||||||
let device_id = device_id?;
|
let device_id = device_id?;
|
||||||
if let Some(mut keys) = services().users.get_device_keys(user_id, &device_id)? {
|
if let Some(mut keys) = db.users.get_device_keys(user_id, &device_id)? {
|
||||||
let metadata = services()
|
let metadata = db
|
||||||
.users
|
.users
|
||||||
.get_device_metadata(user_id, &device_id)?
|
.get_device_metadata(user_id, &device_id)?
|
||||||
.ok_or_else(|| {
|
.ok_or_else(|| {
|
||||||
|
@ -299,14 +352,13 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
||||||
} else {
|
} else {
|
||||||
for device_id in device_ids {
|
for device_id in device_ids {
|
||||||
let mut container = BTreeMap::new();
|
let mut container = BTreeMap::new();
|
||||||
if let Some(mut keys) = services().users.get_device_keys(user_id, device_id)? {
|
if let Some(mut keys) = db.users.get_device_keys(user_id, device_id)? {
|
||||||
let metadata = services()
|
let metadata = db.users.get_device_metadata(user_id, device_id)?.ok_or(
|
||||||
.users
|
Error::BadRequest(
|
||||||
.get_device_metadata(user_id, device_id)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
"Tried to get keys for nonexistent device.",
|
"Tried to get keys for nonexistent device.",
|
||||||
))?;
|
),
|
||||||
|
)?;
|
||||||
|
|
||||||
add_unsigned_device_display_name(&mut keys, metadata)
|
add_unsigned_device_display_name(&mut keys, metadata)
|
||||||
.map_err(|_| Error::bad_database("invalid device keys in database"))?;
|
.map_err(|_| Error::bad_database("invalid device keys in database"))?;
|
||||||
|
@ -316,22 +368,17 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(master_key) =
|
if let Some(master_key) = db.users.get_master_key(user_id, &allowed_signatures)? {
|
||||||
services()
|
|
||||||
.users
|
|
||||||
.get_master_key(sender_user, user_id, &allowed_signatures)?
|
|
||||||
{
|
|
||||||
master_keys.insert(user_id.to_owned(), master_key);
|
master_keys.insert(user_id.to_owned(), master_key);
|
||||||
}
|
}
|
||||||
if let Some(self_signing_key) =
|
if let Some(self_signing_key) = db
|
||||||
services()
|
|
||||||
.users
|
.users
|
||||||
.get_self_signing_key(sender_user, user_id, &allowed_signatures)?
|
.get_self_signing_key(user_id, &allowed_signatures)?
|
||||||
{
|
{
|
||||||
self_signing_keys.insert(user_id.to_owned(), self_signing_key);
|
self_signing_keys.insert(user_id.to_owned(), self_signing_key);
|
||||||
}
|
}
|
||||||
if Some(user_id) == sender_user {
|
if Some(user_id) == sender_user {
|
||||||
if let Some(user_signing_key) = services().users.get_user_signing_key(user_id)? {
|
if let Some(user_signing_key) = db.users.get_user_signing_key(user_id)? {
|
||||||
user_signing_keys.insert(user_id.to_owned(), user_signing_key);
|
user_signing_keys.insert(user_id.to_owned(), user_signing_key);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -339,102 +386,42 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
||||||
|
|
||||||
let mut failures = BTreeMap::new();
|
let mut failures = BTreeMap::new();
|
||||||
|
|
||||||
let back_off = |id| match services()
|
|
||||||
.globals
|
|
||||||
.bad_query_ratelimiter
|
|
||||||
.write()
|
|
||||||
.unwrap()
|
|
||||||
.entry(id)
|
|
||||||
{
|
|
||||||
hash_map::Entry::Vacant(e) => {
|
|
||||||
e.insert((Instant::now(), 1));
|
|
||||||
}
|
|
||||||
hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut futures: FuturesUnordered<_> = get_over_federation
|
let mut futures: FuturesUnordered<_> = get_over_federation
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(server, vec)| async move {
|
.map(|(server, vec)| async move {
|
||||||
if let Some((time, tries)) = services()
|
|
||||||
.globals
|
|
||||||
.bad_query_ratelimiter
|
|
||||||
.read()
|
|
||||||
.unwrap()
|
|
||||||
.get(server)
|
|
||||||
{
|
|
||||||
// Exponential backoff
|
|
||||||
let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries);
|
|
||||||
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
|
|
||||||
min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
|
|
||||||
}
|
|
||||||
|
|
||||||
if time.elapsed() < min_elapsed_duration {
|
|
||||||
debug!("Backing off query from {:?}", server);
|
|
||||||
return (
|
|
||||||
server,
|
|
||||||
Err(Error::BadServerResponse("bad query, still backing off")),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut device_keys_input_fed = BTreeMap::new();
|
let mut device_keys_input_fed = BTreeMap::new();
|
||||||
for (user_id, keys) in vec {
|
for (user_id, keys) in vec {
|
||||||
device_keys_input_fed.insert(user_id.to_owned(), keys.clone());
|
device_keys_input_fed.insert(user_id.to_owned(), keys.clone());
|
||||||
}
|
}
|
||||||
(
|
(
|
||||||
server,
|
server,
|
||||||
tokio::time::timeout(
|
db.sending
|
||||||
Duration::from_secs(25),
|
.send_federation_request(
|
||||||
services().sending.send_federation_request(
|
&db.globals,
|
||||||
server,
|
server,
|
||||||
federation::keys::get_keys::v1::Request {
|
federation::keys::get_keys::v1::Request {
|
||||||
device_keys: device_keys_input_fed,
|
device_keys: device_keys_input_fed,
|
||||||
},
|
},
|
||||||
),
|
|
||||||
)
|
)
|
||||||
.await
|
.await,
|
||||||
.map_err(|_e| Error::BadServerResponse("Query took too long")),
|
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
while let Some((server, response)) = futures.next().await {
|
while let Some((server, response)) = futures.next().await {
|
||||||
match response {
|
match response {
|
||||||
Ok(Ok(response)) => {
|
Ok(response) => {
|
||||||
for (user, masterkey) in response.master_keys {
|
master_keys.extend(response.master_keys);
|
||||||
let (master_key_id, mut master_key) =
|
|
||||||
services().users.parse_master_key(&user, &masterkey)?;
|
|
||||||
|
|
||||||
if let Some(our_master_key) = services().users.get_key(
|
|
||||||
&master_key_id,
|
|
||||||
sender_user,
|
|
||||||
&user,
|
|
||||||
&allowed_signatures,
|
|
||||||
)? {
|
|
||||||
let (_, our_master_key) =
|
|
||||||
services().users.parse_master_key(&user, &our_master_key)?;
|
|
||||||
master_key.signatures.extend(our_master_key.signatures);
|
|
||||||
}
|
|
||||||
let json = serde_json::to_value(master_key).expect("to_value always works");
|
|
||||||
let raw = serde_json::from_value(json).expect("Raw::from_value always works");
|
|
||||||
services().users.add_cross_signing_keys(
|
|
||||||
&user, &raw, &None, &None,
|
|
||||||
false, // Dont notify. A notification would trigger another key request resulting in an endless loop
|
|
||||||
)?;
|
|
||||||
master_keys.insert(user, raw);
|
|
||||||
}
|
|
||||||
|
|
||||||
self_signing_keys.extend(response.self_signing_keys);
|
self_signing_keys.extend(response.self_signing_keys);
|
||||||
device_keys.extend(response.device_keys);
|
device_keys.extend(response.device_keys);
|
||||||
}
|
}
|
||||||
_ => {
|
Err(_e) => {
|
||||||
back_off(server.to_owned());
|
|
||||||
failures.insert(server.to_string(), json!({}));
|
failures.insert(server.to_string(), json!({}));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(get_keys::v3::Response {
|
Ok(get_keys::Response {
|
||||||
master_keys,
|
master_keys,
|
||||||
self_signing_keys,
|
self_signing_keys,
|
||||||
user_signing_keys,
|
user_signing_keys,
|
||||||
|
@ -445,7 +432,7 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
||||||
|
|
||||||
fn add_unsigned_device_display_name(
|
fn add_unsigned_device_display_name(
|
||||||
keys: &mut Raw<ruma::encryption::DeviceKeys>,
|
keys: &mut Raw<ruma::encryption::DeviceKeys>,
|
||||||
metadata: ruma::api::client::device::Device,
|
metadata: ruma::api::client::r0::device::Device,
|
||||||
) -> serde_json::Result<()> {
|
) -> serde_json::Result<()> {
|
||||||
if let Some(display_name) = metadata.display_name {
|
if let Some(display_name) = metadata.display_name {
|
||||||
let mut object = keys.deserialize_as::<serde_json::Map<String, serde_json::Value>>()?;
|
let mut object = keys.deserialize_as::<serde_json::Map<String, serde_json::Value>>()?;
|
||||||
|
@ -462,14 +449,15 @@ fn add_unsigned_device_display_name(
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn claim_keys_helper(
|
pub(crate) async fn claim_keys_helper(
|
||||||
one_time_keys_input: &BTreeMap<OwnedUserId, BTreeMap<OwnedDeviceId, DeviceKeyAlgorithm>>,
|
one_time_keys_input: &BTreeMap<Box<UserId>, BTreeMap<Box<DeviceId>, DeviceKeyAlgorithm>>,
|
||||||
) -> Result<claim_keys::v3::Response> {
|
db: &Database,
|
||||||
|
) -> Result<claim_keys::Response> {
|
||||||
let mut one_time_keys = BTreeMap::new();
|
let mut one_time_keys = BTreeMap::new();
|
||||||
|
|
||||||
let mut get_over_federation = BTreeMap::new();
|
let mut get_over_federation = BTreeMap::new();
|
||||||
|
|
||||||
for (user_id, map) in one_time_keys_input {
|
for (user_id, map) in one_time_keys_input {
|
||||||
if user_id.server_name() != services().globals.server_name() {
|
if user_id.server_name() != db.globals.server_name() {
|
||||||
get_over_federation
|
get_over_federation
|
||||||
.entry(user_id.server_name())
|
.entry(user_id.server_name())
|
||||||
.or_insert_with(Vec::new)
|
.or_insert_with(Vec::new)
|
||||||
|
@ -479,9 +467,8 @@ pub(crate) async fn claim_keys_helper(
|
||||||
let mut container = BTreeMap::new();
|
let mut container = BTreeMap::new();
|
||||||
for (device_id, key_algorithm) in map {
|
for (device_id, key_algorithm) in map {
|
||||||
if let Some(one_time_keys) =
|
if let Some(one_time_keys) =
|
||||||
services()
|
db.users
|
||||||
.users
|
.take_one_time_key(user_id, device_id, key_algorithm, &db.globals)?
|
||||||
.take_one_time_key(user_id, device_id, key_algorithm)?
|
|
||||||
{
|
{
|
||||||
let mut c = BTreeMap::new();
|
let mut c = BTreeMap::new();
|
||||||
c.insert(one_time_keys.0, one_time_keys.1);
|
c.insert(one_time_keys.0, one_time_keys.1);
|
||||||
|
@ -493,40 +480,30 @@ pub(crate) async fn claim_keys_helper(
|
||||||
|
|
||||||
let mut failures = BTreeMap::new();
|
let mut failures = BTreeMap::new();
|
||||||
|
|
||||||
let mut futures: FuturesUnordered<_> = get_over_federation
|
for (server, vec) in get_over_federation {
|
||||||
.into_iter()
|
|
||||||
.map(|(server, vec)| async move {
|
|
||||||
let mut one_time_keys_input_fed = BTreeMap::new();
|
let mut one_time_keys_input_fed = BTreeMap::new();
|
||||||
for (user_id, keys) in vec {
|
for (user_id, keys) in vec {
|
||||||
one_time_keys_input_fed.insert(user_id.clone(), keys.clone());
|
one_time_keys_input_fed.insert(user_id.clone(), keys.clone());
|
||||||
}
|
}
|
||||||
(
|
// Ignore failures
|
||||||
server,
|
if let Ok(keys) = db
|
||||||
services()
|
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(
|
.send_federation_request(
|
||||||
|
&db.globals,
|
||||||
server,
|
server,
|
||||||
federation::keys::claim_keys::v1::Request {
|
federation::keys::claim_keys::v1::Request {
|
||||||
one_time_keys: one_time_keys_input_fed,
|
one_time_keys: one_time_keys_input_fed,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await,
|
.await
|
||||||
)
|
{
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
while let Some((server, response)) = futures.next().await {
|
|
||||||
match response {
|
|
||||||
Ok(keys) => {
|
|
||||||
one_time_keys.extend(keys.one_time_keys);
|
one_time_keys.extend(keys.one_time_keys);
|
||||||
}
|
} else {
|
||||||
Err(_e) => {
|
|
||||||
failures.insert(server.to_string(), json!({}));
|
failures.insert(server.to_string(), json!({}));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
Ok(claim_keys::v3::Response {
|
Ok(claim_keys::Response {
|
||||||
failures,
|
failures,
|
||||||
one_time_keys,
|
one_time_keys,
|
||||||
})
|
})
|
|
@ -1,25 +1,32 @@
|
||||||
use std::time::Duration;
|
use crate::{
|
||||||
|
database::{media::FileMeta, DatabaseGuard},
|
||||||
use crate::{service::media::FileMeta, services, utils, Error, Result, Ruma};
|
utils, ConduitResult, Error, Ruma,
|
||||||
|
};
|
||||||
use ruma::api::client::{
|
use ruma::api::client::{
|
||||||
error::ErrorKind,
|
error::ErrorKind,
|
||||||
media::{
|
r0::media::{
|
||||||
create_content, get_content, get_content_as_filename, get_content_thumbnail,
|
create_content, get_content, get_content_as_filename, get_content_thumbnail,
|
||||||
get_media_config,
|
get_media_config,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
use rocket::{get, post};
|
||||||
|
|
||||||
const MXC_LENGTH: usize = 32;
|
const MXC_LENGTH: usize = 32;
|
||||||
|
|
||||||
/// # `GET /_matrix/media/r0/config`
|
/// # `GET /_matrix/media/r0/config`
|
||||||
///
|
///
|
||||||
/// Returns max upload size.
|
/// Returns max upload size.
|
||||||
|
#[cfg_attr(feature = "conduit_bin", get("/_matrix/media/r0/config"))]
|
||||||
|
#[tracing::instrument(skip(db))]
|
||||||
pub async fn get_media_config_route(
|
pub async fn get_media_config_route(
|
||||||
_body: Ruma<get_media_config::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<get_media_config::v3::Response> {
|
) -> ConduitResult<get_media_config::Response> {
|
||||||
Ok(get_media_config::v3::Response {
|
Ok(get_media_config::Response {
|
||||||
upload_size: services().globals.max_request_size().into(),
|
upload_size: db.globals.max_request_size().into(),
|
||||||
})
|
}
|
||||||
|
.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `POST /_matrix/media/r0/upload`
|
/// # `POST /_matrix/media/r0/upload`
|
||||||
|
@ -28,59 +35,69 @@ pub async fn get_media_config_route(
|
||||||
///
|
///
|
||||||
/// - Some metadata will be saved in the database
|
/// - Some metadata will be saved in the database
|
||||||
/// - Media will be saved in the media/ directory
|
/// - Media will be saved in the media/ directory
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
post("/_matrix/media/r0/upload", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn create_content_route(
|
pub async fn create_content_route(
|
||||||
body: Ruma<create_content::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<create_content::v3::Response> {
|
body: Ruma<create_content::Request<'_>>,
|
||||||
|
) -> ConduitResult<create_content::Response> {
|
||||||
let mxc = format!(
|
let mxc = format!(
|
||||||
"mxc://{}/{}",
|
"mxc://{}/{}",
|
||||||
services().globals.server_name(),
|
db.globals.server_name(),
|
||||||
utils::random_string(MXC_LENGTH)
|
utils::random_string(MXC_LENGTH)
|
||||||
);
|
);
|
||||||
|
|
||||||
services()
|
db.media
|
||||||
.media
|
|
||||||
.create(
|
.create(
|
||||||
mxc.clone(),
|
mxc.clone(),
|
||||||
body.filename
|
&db.globals,
|
||||||
|
&body
|
||||||
|
.filename
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map(|filename| "inline; filename=".to_owned() + filename)
|
.map(|filename| "inline; filename=".to_owned() + filename)
|
||||||
.as_deref(),
|
.as_deref(),
|
||||||
body.content_type.as_deref(),
|
&body.content_type.as_deref(),
|
||||||
&body.file,
|
&body.file,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
Ok(create_content::v3::Response {
|
db.flush()?;
|
||||||
content_uri: mxc.into(),
|
|
||||||
|
Ok(create_content::Response {
|
||||||
|
content_uri: mxc.try_into().expect("Invalid mxc:// URI"),
|
||||||
blurhash: None,
|
blurhash: None,
|
||||||
})
|
}
|
||||||
|
.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_remote_content(
|
pub async fn get_remote_content(
|
||||||
|
db: &DatabaseGuard,
|
||||||
mxc: &str,
|
mxc: &str,
|
||||||
server_name: &ruma::ServerName,
|
server_name: &ruma::ServerName,
|
||||||
media_id: String,
|
media_id: &str,
|
||||||
) -> Result<get_content::v3::Response, Error> {
|
) -> Result<get_content::Response, Error> {
|
||||||
let content_response = services()
|
let content_response = db
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(
|
.send_federation_request(
|
||||||
|
&db.globals,
|
||||||
server_name,
|
server_name,
|
||||||
get_content::v3::Request {
|
get_content::Request {
|
||||||
allow_remote: false,
|
allow_remote: false,
|
||||||
server_name: server_name.to_owned(),
|
server_name,
|
||||||
media_id,
|
media_id,
|
||||||
timeout_ms: Duration::from_secs(20),
|
|
||||||
allow_redirect: false,
|
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
services()
|
db.media
|
||||||
.media
|
|
||||||
.create(
|
.create(
|
||||||
mxc.to_owned(),
|
mxc.to_string(),
|
||||||
content_response.content_disposition.as_deref(),
|
&db.globals,
|
||||||
content_response.content_type.as_deref(),
|
&content_response.content_disposition.as_deref(),
|
||||||
|
&content_response.content_type.as_deref(),
|
||||||
&content_response.file,
|
&content_response.file,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
@ -93,27 +110,33 @@ pub async fn get_remote_content(
|
||||||
/// Load media from our server or over federation.
|
/// Load media from our server or over federation.
|
||||||
///
|
///
|
||||||
/// - Only allows federation if `allow_remote` is true
|
/// - Only allows federation if `allow_remote` is true
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/media/r0/download/<_>/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn get_content_route(
|
pub async fn get_content_route(
|
||||||
body: Ruma<get_content::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<get_content::v3::Response> {
|
body: Ruma<get_content::Request<'_>>,
|
||||||
|
) -> ConduitResult<get_content::Response> {
|
||||||
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
||||||
|
|
||||||
if let Some(FileMeta {
|
if let Some(FileMeta {
|
||||||
content_disposition,
|
content_disposition,
|
||||||
content_type,
|
content_type,
|
||||||
file,
|
file,
|
||||||
}) = services().media.get(mxc.clone()).await?
|
}) = db.media.get(&db.globals, &mxc).await?
|
||||||
{
|
{
|
||||||
Ok(get_content::v3::Response {
|
Ok(get_content::Response {
|
||||||
file,
|
file,
|
||||||
content_type,
|
content_type,
|
||||||
content_disposition,
|
content_disposition,
|
||||||
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
}
|
||||||
})
|
.into())
|
||||||
} else if &*body.server_name != services().globals.server_name() && body.allow_remote {
|
} else if &*body.server_name != db.globals.server_name() && body.allow_remote {
|
||||||
let remote_content_response =
|
let remote_content_response =
|
||||||
get_remote_content(&mxc, &body.server_name, body.media_id.clone()).await?;
|
get_remote_content(&db, &mxc, &body.server_name, &body.media_id).await?;
|
||||||
Ok(remote_content_response)
|
Ok(remote_content_response.into())
|
||||||
} else {
|
} else {
|
||||||
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
||||||
}
|
}
|
||||||
|
@ -124,33 +147,39 @@ pub async fn get_content_route(
|
||||||
/// Load media from our server or over federation, permitting desired filename.
|
/// Load media from our server or over federation, permitting desired filename.
|
||||||
///
|
///
|
||||||
/// - Only allows federation if `allow_remote` is true
|
/// - Only allows federation if `allow_remote` is true
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/media/r0/download/<_>/<_>/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn get_content_as_filename_route(
|
pub async fn get_content_as_filename_route(
|
||||||
body: Ruma<get_content_as_filename::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<get_content_as_filename::v3::Response> {
|
body: Ruma<get_content_as_filename::Request<'_>>,
|
||||||
|
) -> ConduitResult<get_content_as_filename::Response> {
|
||||||
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
||||||
|
|
||||||
if let Some(FileMeta {
|
if let Some(FileMeta {
|
||||||
content_disposition: _,
|
content_disposition: _,
|
||||||
content_type,
|
content_type,
|
||||||
file,
|
file,
|
||||||
}) = services().media.get(mxc.clone()).await?
|
}) = db.media.get(&db.globals, &mxc).await?
|
||||||
{
|
{
|
||||||
Ok(get_content_as_filename::v3::Response {
|
Ok(get_content_as_filename::Response {
|
||||||
file,
|
file,
|
||||||
content_type,
|
content_type,
|
||||||
content_disposition: Some(format!("inline; filename={}", body.filename)),
|
content_disposition: Some(format!("inline; filename={}", body.filename)),
|
||||||
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
}
|
||||||
})
|
.into())
|
||||||
} else if &*body.server_name != services().globals.server_name() && body.allow_remote {
|
} else if &*body.server_name != db.globals.server_name() && body.allow_remote {
|
||||||
let remote_content_response =
|
let remote_content_response =
|
||||||
get_remote_content(&mxc, &body.server_name, body.media_id.clone()).await?;
|
get_remote_content(&db, &mxc, &body.server_name, &body.media_id).await?;
|
||||||
|
|
||||||
Ok(get_content_as_filename::v3::Response {
|
Ok(get_content_as_filename::Response {
|
||||||
content_disposition: Some(format!("inline: filename={}", body.filename)),
|
content_disposition: Some(format!("inline: filename={}", body.filename)),
|
||||||
content_type: remote_content_response.content_type,
|
content_type: remote_content_response.content_type,
|
||||||
file: remote_content_response.file,
|
file: remote_content_response.file,
|
||||||
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
}
|
||||||
})
|
.into())
|
||||||
} else {
|
} else {
|
||||||
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
||||||
}
|
}
|
||||||
|
@ -161,17 +190,24 @@ pub async fn get_content_as_filename_route(
|
||||||
/// Load media thumbnail from our server or over federation.
|
/// Load media thumbnail from our server or over federation.
|
||||||
///
|
///
|
||||||
/// - Only allows federation if `allow_remote` is true
|
/// - Only allows federation if `allow_remote` is true
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/media/r0/thumbnail/<_>/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn get_content_thumbnail_route(
|
pub async fn get_content_thumbnail_route(
|
||||||
body: Ruma<get_content_thumbnail::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<get_content_thumbnail::v3::Response> {
|
body: Ruma<get_content_thumbnail::Request<'_>>,
|
||||||
|
) -> ConduitResult<get_content_thumbnail::Response> {
|
||||||
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
||||||
|
|
||||||
if let Some(FileMeta {
|
if let Some(FileMeta {
|
||||||
content_type, file, ..
|
content_type, file, ..
|
||||||
}) = services()
|
}) = db
|
||||||
.media
|
.media
|
||||||
.get_thumbnail(
|
.get_thumbnail(
|
||||||
mxc.clone(),
|
&mxc,
|
||||||
|
&db.globals,
|
||||||
body.width
|
body.width
|
||||||
.try_into()
|
.try_into()
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?,
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?,
|
||||||
|
@ -181,42 +217,37 @@ pub async fn get_content_thumbnail_route(
|
||||||
)
|
)
|
||||||
.await?
|
.await?
|
||||||
{
|
{
|
||||||
Ok(get_content_thumbnail::v3::Response {
|
Ok(get_content_thumbnail::Response { file, content_type }.into())
|
||||||
file,
|
} else if &*body.server_name != db.globals.server_name() && body.allow_remote {
|
||||||
content_type,
|
let get_thumbnail_response = db
|
||||||
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
|
||||||
})
|
|
||||||
} else if &*body.server_name != services().globals.server_name() && body.allow_remote {
|
|
||||||
let get_thumbnail_response = services()
|
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(
|
.send_federation_request(
|
||||||
|
&db.globals,
|
||||||
&body.server_name,
|
&body.server_name,
|
||||||
get_content_thumbnail::v3::Request {
|
get_content_thumbnail::Request {
|
||||||
allow_remote: false,
|
allow_remote: false,
|
||||||
height: body.height,
|
height: body.height,
|
||||||
width: body.width,
|
width: body.width,
|
||||||
method: body.method.clone(),
|
method: body.method.clone(),
|
||||||
server_name: body.server_name.clone(),
|
server_name: &body.server_name,
|
||||||
media_id: body.media_id.clone(),
|
media_id: &body.media_id,
|
||||||
timeout_ms: Duration::from_secs(20),
|
|
||||||
allow_redirect: false,
|
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
services()
|
db.media
|
||||||
.media
|
|
||||||
.upload_thumbnail(
|
.upload_thumbnail(
|
||||||
mxc,
|
mxc,
|
||||||
None,
|
&db.globals,
|
||||||
get_thumbnail_response.content_type.as_deref(),
|
&None,
|
||||||
|
&get_thumbnail_response.content_type,
|
||||||
body.width.try_into().expect("all UInts are valid u32s"),
|
body.width.try_into().expect("all UInts are valid u32s"),
|
||||||
body.height.try_into().expect("all UInts are valid u32s"),
|
body.height.try_into().expect("all UInts are valid u32s"),
|
||||||
&get_thumbnail_response.file,
|
&get_thumbnail_response.file,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
Ok(get_thumbnail_response)
|
Ok(get_thumbnail_response.into())
|
||||||
} else {
|
} else {
|
||||||
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
||||||
}
|
}
|
1140
src/client_server/membership.rs
Normal file
1140
src/client_server/membership.rs
Normal file
File diff suppressed because it is too large
Load diff
|
@ -1,19 +1,19 @@
|
||||||
use crate::{
|
use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma};
|
||||||
service::{pdu::PduBuilder, rooms::timeline::PduCount},
|
|
||||||
services, utils, Error, Result, Ruma,
|
|
||||||
};
|
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{
|
api::client::{
|
||||||
error::ErrorKind,
|
error::ErrorKind,
|
||||||
message::{get_message_events, send_message_event},
|
r0::message::{get_message_events, send_message_event},
|
||||||
},
|
},
|
||||||
events::{StateEventType, TimelineEventType},
|
events::EventType,
|
||||||
};
|
};
|
||||||
use std::{
|
use std::{
|
||||||
collections::{BTreeMap, HashSet},
|
collections::{BTreeMap, HashSet},
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
use rocket::{get, put};
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/rooms/{roomId}/send/{eventType}/{txnId}`
|
/// # `PUT /_matrix/client/r0/rooms/{roomId}/send/{eventType}/{txnId}`
|
||||||
///
|
///
|
||||||
/// Send a message event into the room.
|
/// Send a message event into the room.
|
||||||
|
@ -21,15 +21,20 @@ use std::{
|
||||||
/// - Is a NOOP if the txn id was already used before and returns the same event id again
|
/// - Is a NOOP if the txn id was already used before and returns the same event id again
|
||||||
/// - The only requirement for the content is that it has to be valid json
|
/// - The only requirement for the content is that it has to be valid json
|
||||||
/// - Tries to send the event into the room, auth rules will determine if it is allowed
|
/// - Tries to send the event into the room, auth rules will determine if it is allowed
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
put("/_matrix/client/r0/rooms/<_>/send/<_>/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn send_message_event_route(
|
pub async fn send_message_event_route(
|
||||||
body: Ruma<send_message_event::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<send_message_event::v3::Response> {
|
body: Ruma<send_message_event::Request<'_>>,
|
||||||
|
) -> ConduitResult<send_message_event::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_deref();
|
let sender_device = body.sender_device.as_deref();
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
let mutex_state = Arc::clone(
|
||||||
services()
|
db.globals
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -39,9 +44,7 @@ pub async fn send_message_event_route(
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
// Forbid m.room.encrypted if encryption is disabled
|
// Forbid m.room.encrypted if encryption is disabled
|
||||||
if TimelineEventType::RoomEncrypted == body.event_type.to_string().into()
|
if &body.event_type == "m.room.encrypted" && !db.globals.allow_encryption() {
|
||||||
&& !services().globals.allow_encryption()
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
"Encryption has been disabled",
|
"Encryption has been disabled",
|
||||||
|
@ -50,8 +53,7 @@ pub async fn send_message_event_route(
|
||||||
|
|
||||||
// Check if this is a new transaction id
|
// Check if this is a new transaction id
|
||||||
if let Some(response) =
|
if let Some(response) =
|
||||||
services()
|
db.transaction_ids
|
||||||
.transaction_ids
|
|
||||||
.existing_txnid(sender_user, sender_device, &body.txn_id)?
|
.existing_txnid(sender_user, sender_device, &body.txn_id)?
|
||||||
{
|
{
|
||||||
// The client might have sent a txnid of the /sendToDevice endpoint
|
// The client might have sent a txnid of the /sendToDevice endpoint
|
||||||
|
@ -67,15 +69,15 @@ pub async fn send_message_event_route(
|
||||||
.map_err(|_| Error::bad_database("Invalid txnid bytes in database."))?
|
.map_err(|_| Error::bad_database("Invalid txnid bytes in database."))?
|
||||||
.try_into()
|
.try_into()
|
||||||
.map_err(|_| Error::bad_database("Invalid event id in txnid data."))?;
|
.map_err(|_| Error::bad_database("Invalid event id in txnid data."))?;
|
||||||
return Ok(send_message_event::v3::Response { event_id });
|
return Ok(send_message_event::Response { event_id }.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut unsigned = BTreeMap::new();
|
let mut unsigned = BTreeMap::new();
|
||||||
unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into());
|
unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into());
|
||||||
|
|
||||||
let event_id = services().rooms.timeline.build_and_append_pdu(
|
let event_id = db.rooms.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: body.event_type.to_string().into(),
|
event_type: EventType::from(&*body.event_type),
|
||||||
content: serde_json::from_str(body.body.body.json().get())
|
content: serde_json::from_str(body.body.body.json().get())
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?,
|
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?,
|
||||||
unsigned: Some(unsigned),
|
unsigned: Some(unsigned),
|
||||||
|
@ -84,10 +86,11 @@ pub async fn send_message_event_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
|
&db,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
services().transaction_ids.add_txnid(
|
db.transaction_ids.add_txnid(
|
||||||
sender_user,
|
sender_user,
|
||||||
sender_device,
|
sender_device,
|
||||||
&body.txn_id,
|
&body.txn_id,
|
||||||
|
@ -96,9 +99,9 @@ pub async fn send_message_event_route(
|
||||||
|
|
||||||
drop(state_lock);
|
drop(state_lock);
|
||||||
|
|
||||||
Ok(send_message_event::v3::Response::new(
|
db.flush()?;
|
||||||
(*event_id).to_owned(),
|
|
||||||
))
|
Ok(send_message_event::Response::new((*event_id).to_owned()).into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/rooms/{roomId}/messages`
|
/// # `GET /_matrix/client/r0/rooms/{roomId}/messages`
|
||||||
|
@ -107,63 +110,63 @@ pub async fn send_message_event_route(
|
||||||
///
|
///
|
||||||
/// - Only works if the user is joined (TODO: always allow, but only show events where the user was
|
/// - Only works if the user is joined (TODO: always allow, but only show events where the user was
|
||||||
/// joined, depending on history_visibility)
|
/// joined, depending on history_visibility)
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/rooms/<_>/messages", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn get_message_events_route(
|
pub async fn get_message_events_route(
|
||||||
body: Ruma<get_message_events::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<get_message_events::v3::Response> {
|
body: Ruma<get_message_events::Request<'_>>,
|
||||||
|
) -> ConduitResult<get_message_events::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let from = match body.from.clone() {
|
if !db.rooms.is_joined(sender_user, &body.room_id)? {
|
||||||
Some(from) => PduCount::try_from_string(&from)?,
|
return Err(Error::BadRequest(
|
||||||
None => match body.dir {
|
ErrorKind::Forbidden,
|
||||||
ruma::api::Direction::Forward => PduCount::min(),
|
"You don't have permission to view this room.",
|
||||||
ruma::api::Direction::Backward => PduCount::max(),
|
));
|
||||||
},
|
}
|
||||||
};
|
|
||||||
|
|
||||||
let to = body
|
let from = body
|
||||||
.to
|
.from
|
||||||
.as_ref()
|
.clone()
|
||||||
.and_then(|t| PduCount::try_from_string(t).ok());
|
.parse()
|
||||||
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from` value."))?;
|
||||||
|
|
||||||
services().rooms.lazy_loading.lazy_load_confirm_delivery(
|
let to = body.to.as_ref().map(|t| t.parse());
|
||||||
sender_user,
|
|
||||||
sender_device,
|
|
||||||
&body.room_id,
|
|
||||||
from,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let limit = u64::from(body.limit).min(100) as usize;
|
db.rooms
|
||||||
|
.lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from)?;
|
||||||
|
|
||||||
|
// Use limit or else 10
|
||||||
|
let limit = body.limit.try_into().map_or(10_usize, |l: u32| l as usize);
|
||||||
|
|
||||||
let next_token;
|
let next_token;
|
||||||
|
|
||||||
let mut resp = get_message_events::v3::Response::new();
|
let mut resp = get_message_events::Response::new();
|
||||||
|
|
||||||
let mut lazy_loaded = HashSet::new();
|
let mut lazy_loaded = HashSet::new();
|
||||||
|
|
||||||
match body.dir {
|
match body.dir {
|
||||||
ruma::api::Direction::Forward => {
|
get_message_events::Direction::Forward => {
|
||||||
let events_after: Vec<_> = services()
|
let events_after: Vec<_> = db
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
|
||||||
.pdus_after(sender_user, &body.room_id, from)?
|
.pdus_after(sender_user, &body.room_id, from)?
|
||||||
.take(limit)
|
.take(limit)
|
||||||
.filter_map(|r| r.ok()) // Filter out buggy events
|
.filter_map(|r| r.ok()) // Filter out buggy events
|
||||||
.filter(|(_, pdu)| {
|
.filter_map(|(pdu_id, pdu)| {
|
||||||
services()
|
db.rooms
|
||||||
.rooms
|
.pdu_count(&pdu_id)
|
||||||
.state_accessor
|
.map(|pdu_count| (pdu_count, pdu))
|
||||||
.user_can_see_event(sender_user, &body.room_id, &pdu.event_id)
|
.ok()
|
||||||
.unwrap_or(false)
|
|
||||||
})
|
})
|
||||||
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
|
.take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to`
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
for (_, event) in &events_after {
|
for (_, event) in &events_after {
|
||||||
/* TODO: Remove this when these are resolved:
|
if !db.rooms.lazy_load_was_sent_before(
|
||||||
* https://github.com/vector-im/element-android/issues/3417
|
|
||||||
* https://github.com/vector-im/element-web/issues/21034
|
|
||||||
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
|
||||||
sender_user,
|
sender_user,
|
||||||
sender_device,
|
sender_device,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
|
@ -171,8 +174,6 @@ pub async fn get_message_events_route(
|
||||||
)? {
|
)? {
|
||||||
lazy_loaded.insert(event.sender.clone());
|
lazy_loaded.insert(event.sender.clone());
|
||||||
}
|
}
|
||||||
*/
|
|
||||||
lazy_loaded.insert(event.sender.clone());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
next_token = events_after.last().map(|(count, _)| count).copied();
|
next_token = events_after.last().map(|(count, _)| count).copied();
|
||||||
|
@ -182,37 +183,27 @@ pub async fn get_message_events_route(
|
||||||
.map(|(_, pdu)| pdu.to_room_event())
|
.map(|(_, pdu)| pdu.to_room_event())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
resp.start = from.stringify();
|
resp.start = body.from.to_owned();
|
||||||
resp.end = next_token.map(|count| count.stringify());
|
resp.end = next_token.map(|count| count.to_string());
|
||||||
resp.chunk = events_after;
|
resp.chunk = events_after;
|
||||||
}
|
}
|
||||||
ruma::api::Direction::Backward => {
|
get_message_events::Direction::Backward => {
|
||||||
services()
|
let events_before: Vec<_> = db
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
|
||||||
.backfill_if_required(&body.room_id, from)
|
|
||||||
.await?;
|
|
||||||
let events_before: Vec<_> = services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.pdus_until(sender_user, &body.room_id, from)?
|
.pdus_until(sender_user, &body.room_id, from)?
|
||||||
.take(limit)
|
.take(limit)
|
||||||
.filter_map(|r| r.ok()) // Filter out buggy events
|
.filter_map(|r| r.ok()) // Filter out buggy events
|
||||||
.filter(|(_, pdu)| {
|
.filter_map(|(pdu_id, pdu)| {
|
||||||
services()
|
db.rooms
|
||||||
.rooms
|
.pdu_count(&pdu_id)
|
||||||
.state_accessor
|
.map(|pdu_count| (pdu_count, pdu))
|
||||||
.user_can_see_event(sender_user, &body.room_id, &pdu.event_id)
|
.ok()
|
||||||
.unwrap_or(false)
|
|
||||||
})
|
})
|
||||||
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
|
.take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to`
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
for (_, event) in &events_before {
|
for (_, event) in &events_before {
|
||||||
/* TODO: Remove this when these are resolved:
|
if !db.rooms.lazy_load_was_sent_before(
|
||||||
* https://github.com/vector-im/element-android/issues/3417
|
|
||||||
* https://github.com/vector-im/element-web/issues/21034
|
|
||||||
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
|
||||||
sender_user,
|
sender_user,
|
||||||
sender_device,
|
sender_device,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
|
@ -220,8 +211,6 @@ pub async fn get_message_events_route(
|
||||||
)? {
|
)? {
|
||||||
lazy_loaded.insert(event.sender.clone());
|
lazy_loaded.insert(event.sender.clone());
|
||||||
}
|
}
|
||||||
*/
|
|
||||||
lazy_loaded.insert(event.sender.clone());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
next_token = events_before.last().map(|(count, _)| count).copied();
|
next_token = events_before.last().map(|(count, _)| count).copied();
|
||||||
|
@ -231,27 +220,24 @@ pub async fn get_message_events_route(
|
||||||
.map(|(_, pdu)| pdu.to_room_event())
|
.map(|(_, pdu)| pdu.to_room_event())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
resp.start = from.stringify();
|
resp.start = body.from.to_owned();
|
||||||
resp.end = next_token.map(|count| count.stringify());
|
resp.end = next_token.map(|count| count.to_string());
|
||||||
resp.chunk = events_before;
|
resp.chunk = events_before;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resp.state = Vec::new();
|
resp.state = Vec::new();
|
||||||
for ll_id in &lazy_loaded {
|
for ll_id in &lazy_loaded {
|
||||||
if let Some(member_event) = services().rooms.state_accessor.room_state_get(
|
if let Some(member_event) =
|
||||||
&body.room_id,
|
db.rooms
|
||||||
&StateEventType::RoomMember,
|
.room_state_get(&body.room_id, &EventType::RoomMember, ll_id.as_str())?
|
||||||
ll_id.as_str(),
|
{
|
||||||
)? {
|
|
||||||
resp.state.push(member_event.to_state_event());
|
resp.state.push(member_event.to_state_event());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: enable again when we are sure clients can handle it
|
|
||||||
/*
|
|
||||||
if let Some(next_token) = next_token {
|
if let Some(next_token) = next_token {
|
||||||
services().rooms.lazy_loading.lazy_load_mark_sent(
|
db.rooms.lazy_load_mark_sent(
|
||||||
sender_user,
|
sender_user,
|
||||||
sender_device,
|
sender_device,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
|
@ -259,7 +245,6 @@ pub async fn get_message_events_route(
|
||||||
next_token,
|
next_token,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
*/
|
|
||||||
|
|
||||||
Ok(resp)
|
Ok(resp.into())
|
||||||
}
|
}
|
|
@ -16,17 +16,14 @@ mod profile;
|
||||||
mod push;
|
mod push;
|
||||||
mod read_marker;
|
mod read_marker;
|
||||||
mod redact;
|
mod redact;
|
||||||
mod relations;
|
|
||||||
mod report;
|
mod report;
|
||||||
mod room;
|
mod room;
|
||||||
mod search;
|
mod search;
|
||||||
mod session;
|
mod session;
|
||||||
mod space;
|
|
||||||
mod state;
|
mod state;
|
||||||
mod sync;
|
mod sync;
|
||||||
mod tag;
|
mod tag;
|
||||||
mod thirdparty;
|
mod thirdparty;
|
||||||
mod threads;
|
|
||||||
mod to_device;
|
mod to_device;
|
||||||
mod typing;
|
mod typing;
|
||||||
mod unversioned;
|
mod unversioned;
|
||||||
|
@ -51,24 +48,37 @@ pub use profile::*;
|
||||||
pub use push::*;
|
pub use push::*;
|
||||||
pub use read_marker::*;
|
pub use read_marker::*;
|
||||||
pub use redact::*;
|
pub use redact::*;
|
||||||
pub use relations::*;
|
|
||||||
pub use report::*;
|
pub use report::*;
|
||||||
pub use room::*;
|
pub use room::*;
|
||||||
pub use search::*;
|
pub use search::*;
|
||||||
pub use session::*;
|
pub use session::*;
|
||||||
pub use space::*;
|
|
||||||
pub use state::*;
|
pub use state::*;
|
||||||
pub use sync::*;
|
pub use sync::*;
|
||||||
pub use tag::*;
|
pub use tag::*;
|
||||||
pub use thirdparty::*;
|
pub use thirdparty::*;
|
||||||
pub use threads::*;
|
|
||||||
pub use to_device::*;
|
pub use to_device::*;
|
||||||
pub use typing::*;
|
pub use typing::*;
|
||||||
pub use unversioned::*;
|
pub use unversioned::*;
|
||||||
pub use user_directory::*;
|
pub use user_directory::*;
|
||||||
pub use voip::*;
|
pub use voip::*;
|
||||||
|
|
||||||
|
#[cfg(not(feature = "conduit_bin"))]
|
||||||
|
use super::State;
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
use {
|
||||||
|
crate::ConduitResult, rocket::options, ruma::api::client::r0::to_device::send_event_to_device,
|
||||||
|
};
|
||||||
|
|
||||||
pub const DEVICE_ID_LENGTH: usize = 10;
|
pub const DEVICE_ID_LENGTH: usize = 10;
|
||||||
pub const TOKEN_LENGTH: usize = 32;
|
pub const TOKEN_LENGTH: usize = 256;
|
||||||
pub const SESSION_ID_LENGTH: usize = 32;
|
pub const SESSION_ID_LENGTH: usize = 256;
|
||||||
pub const AUTO_GEN_PASSWORD_LENGTH: usize = 15;
|
|
||||||
|
/// # `OPTIONS`
|
||||||
|
///
|
||||||
|
/// Web clients use this to get CORS headers.
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
#[options("/<_..>")]
|
||||||
|
#[tracing::instrument]
|
||||||
|
pub async fn options_route() -> ConduitResult<send_event_to_device::Response> {
|
||||||
|
Ok(send_event_to_device::Response {}.into())
|
||||||
|
}
|
|
@ -1,29 +1,35 @@
|
||||||
use crate::{services, utils, Error, Result, Ruma};
|
use crate::{database::DatabaseGuard, utils, ConduitResult, Ruma};
|
||||||
use ruma::api::client::{
|
use ruma::api::client::r0::presence::{get_presence, set_presence};
|
||||||
error::ErrorKind,
|
|
||||||
presence::{get_presence, set_presence},
|
|
||||||
};
|
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
use rocket::{get, put};
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/presence/{userId}/status`
|
/// # `PUT /_matrix/client/r0/presence/{userId}/status`
|
||||||
///
|
///
|
||||||
/// Sets the presence state of the sender user.
|
/// Sets the presence state of the sender user.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
put("/_matrix/client/r0/presence/<_>/status", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn set_presence_route(
|
pub async fn set_presence_route(
|
||||||
body: Ruma<set_presence::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<set_presence::v3::Response> {
|
body: Ruma<set_presence::Request<'_>>,
|
||||||
|
) -> ConduitResult<set_presence::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
for room_id in services().rooms.state_cache.rooms_joined(sender_user) {
|
for room_id in db.rooms.rooms_joined(sender_user) {
|
||||||
let room_id = room_id?;
|
let room_id = room_id?;
|
||||||
|
|
||||||
services().rooms.edus.presence.update_presence(
|
db.rooms.edus.update_presence(
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
ruma::events::presence::PresenceEvent {
|
ruma::events::presence::PresenceEvent {
|
||||||
content: ruma::events::presence::PresenceEventContent {
|
content: ruma::events::presence::PresenceEventContent {
|
||||||
avatar_url: services().users.avatar_url(sender_user)?,
|
avatar_url: db.users.avatar_url(sender_user)?,
|
||||||
currently_active: None,
|
currently_active: None,
|
||||||
displayname: services().users.displayname(sender_user)?,
|
displayname: db.users.displayname(sender_user)?,
|
||||||
last_active_ago: Some(
|
last_active_ago: Some(
|
||||||
utils::millis_since_unix_epoch()
|
utils::millis_since_unix_epoch()
|
||||||
.try_into()
|
.try_into()
|
||||||
|
@ -34,10 +40,13 @@ pub async fn set_presence_route(
|
||||||
},
|
},
|
||||||
sender: sender_user.clone(),
|
sender: sender_user.clone(),
|
||||||
},
|
},
|
||||||
|
&db.globals,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(set_presence::v3::Response {})
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(set_presence::Response {}.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/presence/{userId}/status`
|
/// # `GET /_matrix/client/r0/presence/{userId}/status`
|
||||||
|
@ -45,24 +54,28 @@ pub async fn set_presence_route(
|
||||||
/// Gets the presence state of the given user.
|
/// Gets the presence state of the given user.
|
||||||
///
|
///
|
||||||
/// - Only works if you share a room with the user
|
/// - Only works if you share a room with the user
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/presence/<_>/status", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn get_presence_route(
|
pub async fn get_presence_route(
|
||||||
body: Ruma<get_presence::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<get_presence::v3::Response> {
|
body: Ruma<get_presence::Request<'_>>,
|
||||||
|
) -> ConduitResult<get_presence::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let mut presence_event = None;
|
let mut presence_event = None;
|
||||||
|
|
||||||
for room_id in services()
|
for room_id in db
|
||||||
.rooms
|
.rooms
|
||||||
.user
|
|
||||||
.get_shared_rooms(vec![sender_user.clone(), body.user_id.clone()])?
|
.get_shared_rooms(vec![sender_user.clone(), body.user_id.clone()])?
|
||||||
{
|
{
|
||||||
let room_id = room_id?;
|
let room_id = room_id?;
|
||||||
|
|
||||||
if let Some(presence) = services()
|
if let Some(presence) = db
|
||||||
.rooms
|
.rooms
|
||||||
.edus
|
.edus
|
||||||
.presence
|
|
||||||
.get_last_presence_event(sender_user, &room_id)?
|
.get_last_presence_event(sender_user, &room_id)?
|
||||||
{
|
{
|
||||||
presence_event = Some(presence);
|
presence_event = Some(presence);
|
||||||
|
@ -71,7 +84,7 @@ pub async fn get_presence_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(presence) = presence_event {
|
if let Some(presence) = presence_event {
|
||||||
Ok(get_presence::v3::Response {
|
Ok(get_presence::Response {
|
||||||
// TODO: Should ruma just use the presenceeventcontent type here?
|
// TODO: Should ruma just use the presenceeventcontent type here?
|
||||||
status_msg: presence.content.status_msg,
|
status_msg: presence.content.status_msg,
|
||||||
currently_active: presence.content.currently_active,
|
currently_active: presence.content.currently_active,
|
||||||
|
@ -80,11 +93,9 @@ pub async fn get_presence_route(
|
||||||
.last_active_ago
|
.last_active_ago
|
||||||
.map(|millis| Duration::from_millis(millis.into())),
|
.map(|millis| Duration::from_millis(millis.into())),
|
||||||
presence: presence.content.presence,
|
presence: presence.content.presence,
|
||||||
})
|
}
|
||||||
|
.into())
|
||||||
} else {
|
} else {
|
||||||
Err(Error::BadRequest(
|
todo!();
|
||||||
ErrorKind::NotFound,
|
|
||||||
"Presence state for this user was not found",
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -1,52 +1,57 @@
|
||||||
use crate::{service::pdu::PduBuilder, services, utils, Error, Result, Ruma};
|
use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{
|
api::{
|
||||||
client::{
|
client::{
|
||||||
error::ErrorKind,
|
error::ErrorKind,
|
||||||
profile::{
|
r0::profile::{
|
||||||
get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name,
|
get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
federation::{self, query::get_profile_information::v1::ProfileField},
|
federation::{self, query::get_profile_information::v1::ProfileField},
|
||||||
},
|
},
|
||||||
events::{room::member::RoomMemberEventContent, StateEventType, TimelineEventType},
|
events::{room::member::RoomMemberEventContent, EventType},
|
||||||
};
|
};
|
||||||
use serde_json::value::to_raw_value;
|
use serde_json::value::to_raw_value;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
use rocket::{get, put};
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/profile/{userId}/displayname`
|
/// # `PUT /_matrix/client/r0/profile/{userId}/displayname`
|
||||||
///
|
///
|
||||||
/// Updates the displayname.
|
/// Updates the displayname.
|
||||||
///
|
///
|
||||||
/// - Also makes sure other users receive the update using presence EDUs
|
/// - Also makes sure other users receive the update using presence EDUs
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
put("/_matrix/client/r0/profile/<_>/displayname", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn set_displayname_route(
|
pub async fn set_displayname_route(
|
||||||
body: Ruma<set_display_name::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<set_display_name::v3::Response> {
|
body: Ruma<set_display_name::Request<'_>>,
|
||||||
|
) -> ConduitResult<set_display_name::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
services()
|
db.users
|
||||||
.users
|
|
||||||
.set_displayname(sender_user, body.displayname.clone())?;
|
.set_displayname(sender_user, body.displayname.clone())?;
|
||||||
|
|
||||||
// Send a new membership event and presence update into all joined rooms
|
// Send a new membership event and presence update into all joined rooms
|
||||||
let all_rooms_joined: Vec<_> = services()
|
let all_rooms_joined: Vec<_> = db
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
|
||||||
.rooms_joined(sender_user)
|
.rooms_joined(sender_user)
|
||||||
.filter_map(|r| r.ok())
|
.filter_map(|r| r.ok())
|
||||||
.map(|room_id| {
|
.map(|room_id| {
|
||||||
Ok::<_, Error>((
|
Ok::<_, Error>((
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: TimelineEventType::RoomMember,
|
event_type: EventType::RoomMember,
|
||||||
content: to_raw_value(&RoomMemberEventContent {
|
content: to_raw_value(&RoomMemberEventContent {
|
||||||
displayname: body.displayname.clone(),
|
displayname: body.displayname.clone(),
|
||||||
..serde_json::from_str(
|
..serde_json::from_str(
|
||||||
services()
|
db.rooms
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.room_state_get(
|
.room_state_get(
|
||||||
&room_id,
|
&room_id,
|
||||||
&StateEventType::RoomMember,
|
&EventType::RoomMember,
|
||||||
sender_user.as_str(),
|
sender_user.as_str(),
|
||||||
)?
|
)?
|
||||||
.ok_or_else(|| {
|
.ok_or_else(|| {
|
||||||
|
@ -73,8 +78,7 @@ pub async fn set_displayname_route(
|
||||||
|
|
||||||
for (pdu_builder, room_id) in all_rooms_joined {
|
for (pdu_builder, room_id) in all_rooms_joined {
|
||||||
let mutex_state = Arc::clone(
|
let mutex_state = Arc::clone(
|
||||||
services()
|
db.globals
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -83,22 +87,19 @@ pub async fn set_displayname_route(
|
||||||
);
|
);
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
let _ = services().rooms.timeline.build_and_append_pdu(
|
let _ = db
|
||||||
pdu_builder,
|
.rooms
|
||||||
sender_user,
|
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &db, &state_lock);
|
||||||
&room_id,
|
|
||||||
&state_lock,
|
|
||||||
);
|
|
||||||
|
|
||||||
// Presence update
|
// Presence update
|
||||||
services().rooms.edus.presence.update_presence(
|
db.rooms.edus.update_presence(
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
ruma::events::presence::PresenceEvent {
|
ruma::events::presence::PresenceEvent {
|
||||||
content: ruma::events::presence::PresenceEventContent {
|
content: ruma::events::presence::PresenceEventContent {
|
||||||
avatar_url: services().users.avatar_url(sender_user)?,
|
avatar_url: db.users.avatar_url(sender_user)?,
|
||||||
currently_active: None,
|
currently_active: None,
|
||||||
displayname: services().users.displayname(sender_user)?,
|
displayname: db.users.displayname(sender_user)?,
|
||||||
last_active_ago: Some(
|
last_active_ago: Some(
|
||||||
utils::millis_since_unix_epoch()
|
utils::millis_since_unix_epoch()
|
||||||
.try_into()
|
.try_into()
|
||||||
|
@ -109,10 +110,13 @@ pub async fn set_displayname_route(
|
||||||
},
|
},
|
||||||
sender: sender_user.clone(),
|
sender: sender_user.clone(),
|
||||||
},
|
},
|
||||||
|
&db.globals,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(set_display_name::v3::Response {})
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(set_display_name::Response {}.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/profile/{userId}/displayname`
|
/// # `GET /_matrix/client/r0/profile/{userId}/displayname`
|
||||||
|
@ -120,29 +124,38 @@ pub async fn set_displayname_route(
|
||||||
/// Returns the displayname of the user.
|
/// Returns the displayname of the user.
|
||||||
///
|
///
|
||||||
/// - If user is on another server: Fetches displayname over federation
|
/// - If user is on another server: Fetches displayname over federation
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/profile/<_>/displayname", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn get_displayname_route(
|
pub async fn get_displayname_route(
|
||||||
body: Ruma<get_display_name::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<get_display_name::v3::Response> {
|
body: Ruma<get_display_name::Request<'_>>,
|
||||||
if body.user_id.server_name() != services().globals.server_name() {
|
) -> ConduitResult<get_display_name::Response> {
|
||||||
let response = services()
|
if body.user_id.server_name() != db.globals.server_name() {
|
||||||
|
let response = db
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(
|
.send_federation_request(
|
||||||
|
&db.globals,
|
||||||
body.user_id.server_name(),
|
body.user_id.server_name(),
|
||||||
federation::query::get_profile_information::v1::Request {
|
federation::query::get_profile_information::v1::Request {
|
||||||
user_id: body.user_id.clone(),
|
user_id: &body.user_id,
|
||||||
field: Some(ProfileField::DisplayName),
|
field: Some(&ProfileField::DisplayName),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
return Ok(get_display_name::v3::Response {
|
return Ok(get_display_name::Response {
|
||||||
displayname: response.displayname,
|
displayname: response.displayname,
|
||||||
});
|
}
|
||||||
|
.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(get_display_name::v3::Response {
|
Ok(get_display_name::Response {
|
||||||
displayname: services().users.displayname(&body.user_id)?,
|
displayname: db.users.displayname(&body.user_id)?,
|
||||||
})
|
}
|
||||||
|
.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/profile/{userId}/avatar_url`
|
/// # `PUT /_matrix/client/r0/profile/{userId}/avatar_url`
|
||||||
|
@ -150,38 +163,38 @@ pub async fn get_displayname_route(
|
||||||
/// Updates the avatar_url and blurhash.
|
/// Updates the avatar_url and blurhash.
|
||||||
///
|
///
|
||||||
/// - Also makes sure other users receive the update using presence EDUs
|
/// - Also makes sure other users receive the update using presence EDUs
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
put("/_matrix/client/r0/profile/<_>/avatar_url", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn set_avatar_url_route(
|
pub async fn set_avatar_url_route(
|
||||||
body: Ruma<set_avatar_url::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<set_avatar_url::v3::Response> {
|
body: Ruma<set_avatar_url::Request<'_>>,
|
||||||
|
) -> ConduitResult<set_avatar_url::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
services()
|
db.users
|
||||||
.users
|
|
||||||
.set_avatar_url(sender_user, body.avatar_url.clone())?;
|
.set_avatar_url(sender_user, body.avatar_url.clone())?;
|
||||||
|
|
||||||
services()
|
db.users.set_blurhash(sender_user, body.blurhash.clone())?;
|
||||||
.users
|
|
||||||
.set_blurhash(sender_user, body.blurhash.clone())?;
|
|
||||||
|
|
||||||
// Send a new membership event and presence update into all joined rooms
|
// Send a new membership event and presence update into all joined rooms
|
||||||
let all_joined_rooms: Vec<_> = services()
|
let all_joined_rooms: Vec<_> = db
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
|
||||||
.rooms_joined(sender_user)
|
.rooms_joined(sender_user)
|
||||||
.filter_map(|r| r.ok())
|
.filter_map(|r| r.ok())
|
||||||
.map(|room_id| {
|
.map(|room_id| {
|
||||||
Ok::<_, Error>((
|
Ok::<_, Error>((
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: TimelineEventType::RoomMember,
|
event_type: EventType::RoomMember,
|
||||||
content: to_raw_value(&RoomMemberEventContent {
|
content: to_raw_value(&RoomMemberEventContent {
|
||||||
avatar_url: body.avatar_url.clone(),
|
avatar_url: body.avatar_url.clone(),
|
||||||
..serde_json::from_str(
|
..serde_json::from_str(
|
||||||
services()
|
db.rooms
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.room_state_get(
|
.room_state_get(
|
||||||
&room_id,
|
&room_id,
|
||||||
&StateEventType::RoomMember,
|
&EventType::RoomMember,
|
||||||
sender_user.as_str(),
|
sender_user.as_str(),
|
||||||
)?
|
)?
|
||||||
.ok_or_else(|| {
|
.ok_or_else(|| {
|
||||||
|
@ -208,8 +221,7 @@ pub async fn set_avatar_url_route(
|
||||||
|
|
||||||
for (pdu_builder, room_id) in all_joined_rooms {
|
for (pdu_builder, room_id) in all_joined_rooms {
|
||||||
let mutex_state = Arc::clone(
|
let mutex_state = Arc::clone(
|
||||||
services()
|
db.globals
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -218,22 +230,19 @@ pub async fn set_avatar_url_route(
|
||||||
);
|
);
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
let _ = services().rooms.timeline.build_and_append_pdu(
|
let _ = db
|
||||||
pdu_builder,
|
.rooms
|
||||||
sender_user,
|
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &db, &state_lock);
|
||||||
&room_id,
|
|
||||||
&state_lock,
|
|
||||||
);
|
|
||||||
|
|
||||||
// Presence update
|
// Presence update
|
||||||
services().rooms.edus.presence.update_presence(
|
db.rooms.edus.update_presence(
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
ruma::events::presence::PresenceEvent {
|
ruma::events::presence::PresenceEvent {
|
||||||
content: ruma::events::presence::PresenceEventContent {
|
content: ruma::events::presence::PresenceEventContent {
|
||||||
avatar_url: services().users.avatar_url(sender_user)?,
|
avatar_url: db.users.avatar_url(sender_user)?,
|
||||||
currently_active: None,
|
currently_active: None,
|
||||||
displayname: services().users.displayname(sender_user)?,
|
displayname: db.users.displayname(sender_user)?,
|
||||||
last_active_ago: Some(
|
last_active_ago: Some(
|
||||||
utils::millis_since_unix_epoch()
|
utils::millis_since_unix_epoch()
|
||||||
.try_into()
|
.try_into()
|
||||||
|
@ -244,10 +253,13 @@ pub async fn set_avatar_url_route(
|
||||||
},
|
},
|
||||||
sender: sender_user.clone(),
|
sender: sender_user.clone(),
|
||||||
},
|
},
|
||||||
|
&db.globals,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(set_avatar_url::v3::Response {})
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(set_avatar_url::Response {}.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/profile/{userId}/avatar_url`
|
/// # `GET /_matrix/client/r0/profile/{userId}/avatar_url`
|
||||||
|
@ -255,31 +267,40 @@ pub async fn set_avatar_url_route(
|
||||||
/// Returns the avatar_url and blurhash of the user.
|
/// Returns the avatar_url and blurhash of the user.
|
||||||
///
|
///
|
||||||
/// - If user is on another server: Fetches avatar_url and blurhash over federation
|
/// - If user is on another server: Fetches avatar_url and blurhash over federation
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/profile/<_>/avatar_url", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn get_avatar_url_route(
|
pub async fn get_avatar_url_route(
|
||||||
body: Ruma<get_avatar_url::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<get_avatar_url::v3::Response> {
|
body: Ruma<get_avatar_url::Request<'_>>,
|
||||||
if body.user_id.server_name() != services().globals.server_name() {
|
) -> ConduitResult<get_avatar_url::Response> {
|
||||||
let response = services()
|
if body.user_id.server_name() != db.globals.server_name() {
|
||||||
|
let response = db
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(
|
.send_federation_request(
|
||||||
|
&db.globals,
|
||||||
body.user_id.server_name(),
|
body.user_id.server_name(),
|
||||||
federation::query::get_profile_information::v1::Request {
|
federation::query::get_profile_information::v1::Request {
|
||||||
user_id: body.user_id.clone(),
|
user_id: &body.user_id,
|
||||||
field: Some(ProfileField::AvatarUrl),
|
field: Some(&ProfileField::AvatarUrl),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
return Ok(get_avatar_url::v3::Response {
|
return Ok(get_avatar_url::Response {
|
||||||
avatar_url: response.avatar_url,
|
avatar_url: response.avatar_url,
|
||||||
blurhash: response.blurhash,
|
blurhash: response.blurhash,
|
||||||
});
|
}
|
||||||
|
.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(get_avatar_url::v3::Response {
|
Ok(get_avatar_url::Response {
|
||||||
avatar_url: services().users.avatar_url(&body.user_id)?,
|
avatar_url: db.users.avatar_url(&body.user_id)?,
|
||||||
blurhash: services().users.blurhash(&body.user_id)?,
|
blurhash: db.users.blurhash(&body.user_id)?,
|
||||||
})
|
}
|
||||||
|
.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/profile/{userId}`
|
/// # `GET /_matrix/client/r0/profile/{userId}`
|
||||||
|
@ -287,29 +308,37 @@ pub async fn get_avatar_url_route(
|
||||||
/// Returns the displayname, avatar_url and blurhash of the user.
|
/// Returns the displayname, avatar_url and blurhash of the user.
|
||||||
///
|
///
|
||||||
/// - If user is on another server: Fetches profile over federation
|
/// - If user is on another server: Fetches profile over federation
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/profile/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn get_profile_route(
|
pub async fn get_profile_route(
|
||||||
body: Ruma<get_profile::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<get_profile::v3::Response> {
|
body: Ruma<get_profile::Request<'_>>,
|
||||||
if body.user_id.server_name() != services().globals.server_name() {
|
) -> ConduitResult<get_profile::Response> {
|
||||||
let response = services()
|
if body.user_id.server_name() != db.globals.server_name() {
|
||||||
|
let response = db
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(
|
.send_federation_request(
|
||||||
|
&db.globals,
|
||||||
body.user_id.server_name(),
|
body.user_id.server_name(),
|
||||||
federation::query::get_profile_information::v1::Request {
|
federation::query::get_profile_information::v1::Request {
|
||||||
user_id: body.user_id.clone(),
|
user_id: &body.user_id,
|
||||||
field: None,
|
field: None,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
return Ok(get_profile::v3::Response {
|
return Ok(get_profile::Response {
|
||||||
displayname: response.displayname,
|
displayname: response.displayname,
|
||||||
avatar_url: response.avatar_url,
|
avatar_url: response.avatar_url,
|
||||||
blurhash: response.blurhash,
|
blurhash: response.blurhash,
|
||||||
});
|
}
|
||||||
|
.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
if !services().users.exists(&body.user_id)? {
|
if !db.users.exists(&body.user_id)? {
|
||||||
// Return 404 if this user doesn't exist
|
// Return 404 if this user doesn't exist
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::NotFound,
|
ErrorKind::NotFound,
|
||||||
|
@ -317,9 +346,10 @@ pub async fn get_profile_route(
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(get_profile::v3::Response {
|
Ok(get_profile::Response {
|
||||||
avatar_url: services().users.avatar_url(&body.user_id)?,
|
avatar_url: db.users.avatar_url(&body.user_id)?,
|
||||||
blurhash: services().users.blurhash(&body.user_id)?,
|
blurhash: db.users.blurhash(&body.user_id)?,
|
||||||
displayname: services().users.displayname(&body.user_id)?,
|
displayname: db.users.displayname(&body.user_id)?,
|
||||||
})
|
}
|
||||||
|
.into())
|
||||||
}
|
}
|
588
src/client_server/push.rs
Normal file
588
src/client_server/push.rs
Normal file
|
@ -0,0 +1,588 @@
|
||||||
|
use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma};
|
||||||
|
use ruma::{
|
||||||
|
api::client::{
|
||||||
|
error::ErrorKind,
|
||||||
|
r0::push::{
|
||||||
|
delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, get_pushrule_enabled,
|
||||||
|
get_pushrules_all, set_pusher, set_pushrule, set_pushrule_actions,
|
||||||
|
set_pushrule_enabled, RuleKind,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
events::{push_rules::PushRulesEvent, EventType},
|
||||||
|
push::{ConditionalPushRuleInit, PatternedPushRuleInit, SimplePushRuleInit},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
use rocket::{delete, get, post, put};
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/pushrules`
|
||||||
|
///
|
||||||
|
/// Retrieves the push rules event for this user.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/pushrules", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn get_pushrules_all_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<get_pushrules_all::Request>,
|
||||||
|
) -> ConduitResult<get_pushrules_all::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let event: PushRulesEvent = db
|
||||||
|
.account_data
|
||||||
|
.get(None, sender_user, EventType::PushRules)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"PushRules event not found.",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
Ok(get_pushrules_all::Response {
|
||||||
|
global: event.content.global,
|
||||||
|
}
|
||||||
|
.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}`
|
||||||
|
///
|
||||||
|
/// Retrieves a single specified push rule for this user.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn get_pushrule_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<get_pushrule::Request<'_>>,
|
||||||
|
) -> ConduitResult<get_pushrule::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let event: PushRulesEvent = db
|
||||||
|
.account_data
|
||||||
|
.get(None, sender_user, EventType::PushRules)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"PushRules event not found.",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
let global = event.content.global;
|
||||||
|
let rule = match body.kind {
|
||||||
|
RuleKind::Override => global
|
||||||
|
.override_
|
||||||
|
.get(body.rule_id.as_str())
|
||||||
|
.map(|rule| rule.clone().into()),
|
||||||
|
RuleKind::Underride => global
|
||||||
|
.underride
|
||||||
|
.get(body.rule_id.as_str())
|
||||||
|
.map(|rule| rule.clone().into()),
|
||||||
|
RuleKind::Sender => global
|
||||||
|
.sender
|
||||||
|
.get(body.rule_id.as_str())
|
||||||
|
.map(|rule| rule.clone().into()),
|
||||||
|
RuleKind::Room => global
|
||||||
|
.room
|
||||||
|
.get(body.rule_id.as_str())
|
||||||
|
.map(|rule| rule.clone().into()),
|
||||||
|
RuleKind::Content => global
|
||||||
|
.content
|
||||||
|
.get(body.rule_id.as_str())
|
||||||
|
.map(|rule| rule.clone().into()),
|
||||||
|
_ => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(rule) = rule {
|
||||||
|
Ok(get_pushrule::Response { rule }.into())
|
||||||
|
} else {
|
||||||
|
Err(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"Push rule not found.",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}`
|
||||||
|
///
|
||||||
|
/// Creates a single specified push rule for this user.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
put("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn set_pushrule_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<set_pushrule::Request<'_>>,
|
||||||
|
) -> ConduitResult<set_pushrule::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
let body = body.body;
|
||||||
|
|
||||||
|
if body.scope != "global" {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Scopes other than 'global' are not supported.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut event: PushRulesEvent = db
|
||||||
|
.account_data
|
||||||
|
.get(None, sender_user, EventType::PushRules)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"PushRules event not found.",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
let global = &mut event.content.global;
|
||||||
|
match body.kind {
|
||||||
|
RuleKind::Override => {
|
||||||
|
global.override_.replace(
|
||||||
|
ConditionalPushRuleInit {
|
||||||
|
actions: body.actions,
|
||||||
|
default: false,
|
||||||
|
enabled: true,
|
||||||
|
rule_id: body.rule_id,
|
||||||
|
conditions: body.conditions,
|
||||||
|
}
|
||||||
|
.into(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
RuleKind::Underride => {
|
||||||
|
global.underride.replace(
|
||||||
|
ConditionalPushRuleInit {
|
||||||
|
actions: body.actions,
|
||||||
|
default: false,
|
||||||
|
enabled: true,
|
||||||
|
rule_id: body.rule_id,
|
||||||
|
conditions: body.conditions,
|
||||||
|
}
|
||||||
|
.into(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
RuleKind::Sender => {
|
||||||
|
global.sender.replace(
|
||||||
|
SimplePushRuleInit {
|
||||||
|
actions: body.actions,
|
||||||
|
default: false,
|
||||||
|
enabled: true,
|
||||||
|
rule_id: body.rule_id,
|
||||||
|
}
|
||||||
|
.into(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
RuleKind::Room => {
|
||||||
|
global.room.replace(
|
||||||
|
SimplePushRuleInit {
|
||||||
|
actions: body.actions,
|
||||||
|
default: false,
|
||||||
|
enabled: true,
|
||||||
|
rule_id: body.rule_id,
|
||||||
|
}
|
||||||
|
.into(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
RuleKind::Content => {
|
||||||
|
global.content.replace(
|
||||||
|
PatternedPushRuleInit {
|
||||||
|
actions: body.actions,
|
||||||
|
default: false,
|
||||||
|
enabled: true,
|
||||||
|
rule_id: body.rule_id,
|
||||||
|
pattern: body.pattern.unwrap_or_default(),
|
||||||
|
}
|
||||||
|
.into(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
|
||||||
|
db.account_data
|
||||||
|
.update(None, sender_user, EventType::PushRules, &event, &db.globals)?;
|
||||||
|
|
||||||
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(set_pushrule::Response {}.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions`
|
||||||
|
///
|
||||||
|
/// Gets the actions of a single specified push rule for this user.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/pushrules/<_>/<_>/<_>/actions", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn get_pushrule_actions_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<get_pushrule_actions::Request<'_>>,
|
||||||
|
) -> ConduitResult<get_pushrule_actions::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
if body.scope != "global" {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Scopes other than 'global' are not supported.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut event: PushRulesEvent = db
|
||||||
|
.account_data
|
||||||
|
.get(None, sender_user, EventType::PushRules)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"PushRules event not found.",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
let global = &mut event.content.global;
|
||||||
|
let actions = match body.kind {
|
||||||
|
RuleKind::Override => global
|
||||||
|
.override_
|
||||||
|
.get(body.rule_id.as_str())
|
||||||
|
.map(|rule| rule.actions.clone()),
|
||||||
|
RuleKind::Underride => global
|
||||||
|
.underride
|
||||||
|
.get(body.rule_id.as_str())
|
||||||
|
.map(|rule| rule.actions.clone()),
|
||||||
|
RuleKind::Sender => global
|
||||||
|
.sender
|
||||||
|
.get(body.rule_id.as_str())
|
||||||
|
.map(|rule| rule.actions.clone()),
|
||||||
|
RuleKind::Room => global
|
||||||
|
.room
|
||||||
|
.get(body.rule_id.as_str())
|
||||||
|
.map(|rule| rule.actions.clone()),
|
||||||
|
RuleKind::Content => global
|
||||||
|
.content
|
||||||
|
.get(body.rule_id.as_str())
|
||||||
|
.map(|rule| rule.actions.clone()),
|
||||||
|
_ => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(get_pushrule_actions::Response {
|
||||||
|
actions: actions.unwrap_or_default(),
|
||||||
|
}
|
||||||
|
.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions`
|
||||||
|
///
|
||||||
|
/// Sets the actions of a single specified push rule for this user.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/actions", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn set_pushrule_actions_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<set_pushrule_actions::Request<'_>>,
|
||||||
|
) -> ConduitResult<set_pushrule_actions::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
if body.scope != "global" {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Scopes other than 'global' are not supported.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut event: PushRulesEvent = db
|
||||||
|
.account_data
|
||||||
|
.get(None, sender_user, EventType::PushRules)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"PushRules event not found.",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
let global = &mut event.content.global;
|
||||||
|
match body.kind {
|
||||||
|
RuleKind::Override => {
|
||||||
|
if let Some(mut rule) = global.override_.get(body.rule_id.as_str()).cloned() {
|
||||||
|
rule.actions = body.actions.clone();
|
||||||
|
global.override_.replace(rule);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RuleKind::Underride => {
|
||||||
|
if let Some(mut rule) = global.underride.get(body.rule_id.as_str()).cloned() {
|
||||||
|
rule.actions = body.actions.clone();
|
||||||
|
global.underride.replace(rule);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RuleKind::Sender => {
|
||||||
|
if let Some(mut rule) = global.sender.get(body.rule_id.as_str()).cloned() {
|
||||||
|
rule.actions = body.actions.clone();
|
||||||
|
global.sender.replace(rule);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RuleKind::Room => {
|
||||||
|
if let Some(mut rule) = global.room.get(body.rule_id.as_str()).cloned() {
|
||||||
|
rule.actions = body.actions.clone();
|
||||||
|
global.room.replace(rule);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RuleKind::Content => {
|
||||||
|
if let Some(mut rule) = global.content.get(body.rule_id.as_str()).cloned() {
|
||||||
|
rule.actions = body.actions.clone();
|
||||||
|
global.content.replace(rule);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
|
};
|
||||||
|
|
||||||
|
db.account_data
|
||||||
|
.update(None, sender_user, EventType::PushRules, &event, &db.globals)?;
|
||||||
|
|
||||||
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(set_pushrule_actions::Response {}.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled`
|
||||||
|
///
|
||||||
|
/// Gets the enabled status of a single specified push rule for this user.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn get_pushrule_enabled_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<get_pushrule_enabled::Request<'_>>,
|
||||||
|
) -> ConduitResult<get_pushrule_enabled::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
if body.scope != "global" {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Scopes other than 'global' are not supported.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut event: PushRulesEvent = db
|
||||||
|
.account_data
|
||||||
|
.get(None, sender_user, EventType::PushRules)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"PushRules event not found.",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
let global = &mut event.content.global;
|
||||||
|
let enabled = match body.kind {
|
||||||
|
RuleKind::Override => global
|
||||||
|
.override_
|
||||||
|
.iter()
|
||||||
|
.find(|rule| rule.rule_id == body.rule_id)
|
||||||
|
.map_or(false, |rule| rule.enabled),
|
||||||
|
RuleKind::Underride => global
|
||||||
|
.underride
|
||||||
|
.iter()
|
||||||
|
.find(|rule| rule.rule_id == body.rule_id)
|
||||||
|
.map_or(false, |rule| rule.enabled),
|
||||||
|
RuleKind::Sender => global
|
||||||
|
.sender
|
||||||
|
.iter()
|
||||||
|
.find(|rule| rule.rule_id == body.rule_id)
|
||||||
|
.map_or(false, |rule| rule.enabled),
|
||||||
|
RuleKind::Room => global
|
||||||
|
.room
|
||||||
|
.iter()
|
||||||
|
.find(|rule| rule.rule_id == body.rule_id)
|
||||||
|
.map_or(false, |rule| rule.enabled),
|
||||||
|
RuleKind::Content => global
|
||||||
|
.content
|
||||||
|
.iter()
|
||||||
|
.find(|rule| rule.rule_id == body.rule_id)
|
||||||
|
.map_or(false, |rule| rule.enabled),
|
||||||
|
_ => false,
|
||||||
|
};
|
||||||
|
|
||||||
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(get_pushrule_enabled::Response { enabled }.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled`
|
||||||
|
///
|
||||||
|
/// Sets the enabled status of a single specified push rule for this user.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn set_pushrule_enabled_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<set_pushrule_enabled::Request<'_>>,
|
||||||
|
) -> ConduitResult<set_pushrule_enabled::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
if body.scope != "global" {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Scopes other than 'global' are not supported.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut event: PushRulesEvent = db
|
||||||
|
.account_data
|
||||||
|
.get(None, sender_user, EventType::PushRules)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"PushRules event not found.",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
let global = &mut event.content.global;
|
||||||
|
match body.kind {
|
||||||
|
RuleKind::Override => {
|
||||||
|
if let Some(mut rule) = global.override_.get(body.rule_id.as_str()).cloned() {
|
||||||
|
global.override_.remove(&rule);
|
||||||
|
rule.enabled = body.enabled;
|
||||||
|
global.override_.insert(rule);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RuleKind::Underride => {
|
||||||
|
if let Some(mut rule) = global.underride.get(body.rule_id.as_str()).cloned() {
|
||||||
|
global.underride.remove(&rule);
|
||||||
|
rule.enabled = body.enabled;
|
||||||
|
global.underride.insert(rule);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RuleKind::Sender => {
|
||||||
|
if let Some(mut rule) = global.sender.get(body.rule_id.as_str()).cloned() {
|
||||||
|
global.sender.remove(&rule);
|
||||||
|
rule.enabled = body.enabled;
|
||||||
|
global.sender.insert(rule);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RuleKind::Room => {
|
||||||
|
if let Some(mut rule) = global.room.get(body.rule_id.as_str()).cloned() {
|
||||||
|
global.room.remove(&rule);
|
||||||
|
rule.enabled = body.enabled;
|
||||||
|
global.room.insert(rule);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RuleKind::Content => {
|
||||||
|
if let Some(mut rule) = global.content.get(body.rule_id.as_str()).cloned() {
|
||||||
|
global.content.remove(&rule);
|
||||||
|
rule.enabled = body.enabled;
|
||||||
|
global.content.insert(rule);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
|
||||||
|
db.account_data
|
||||||
|
.update(None, sender_user, EventType::PushRules, &event, &db.globals)?;
|
||||||
|
|
||||||
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(set_pushrule_enabled::Response {}.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `DELETE /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}`
|
||||||
|
///
|
||||||
|
/// Deletes a single specified push rule for this user.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
delete("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn delete_pushrule_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<delete_pushrule::Request<'_>>,
|
||||||
|
) -> ConduitResult<delete_pushrule::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
if body.scope != "global" {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Scopes other than 'global' are not supported.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut event: PushRulesEvent = db
|
||||||
|
.account_data
|
||||||
|
.get(None, sender_user, EventType::PushRules)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"PushRules event not found.",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
let global = &mut event.content.global;
|
||||||
|
match body.kind {
|
||||||
|
RuleKind::Override => {
|
||||||
|
if let Some(rule) = global.override_.get(body.rule_id.as_str()).cloned() {
|
||||||
|
global.override_.remove(&rule);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RuleKind::Underride => {
|
||||||
|
if let Some(rule) = global.underride.get(body.rule_id.as_str()).cloned() {
|
||||||
|
global.underride.remove(&rule);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RuleKind::Sender => {
|
||||||
|
if let Some(rule) = global.sender.get(body.rule_id.as_str()).cloned() {
|
||||||
|
global.sender.remove(&rule);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RuleKind::Room => {
|
||||||
|
if let Some(rule) = global.room.get(body.rule_id.as_str()).cloned() {
|
||||||
|
global.room.remove(&rule);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RuleKind::Content => {
|
||||||
|
if let Some(rule) = global.content.get(body.rule_id.as_str()).cloned() {
|
||||||
|
global.content.remove(&rule);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
|
||||||
|
db.account_data
|
||||||
|
.update(None, sender_user, EventType::PushRules, &event, &db.globals)?;
|
||||||
|
|
||||||
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(delete_pushrule::Response {}.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/pushers`
|
||||||
|
///
|
||||||
|
/// Gets all currently active pushers for the sender user.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/pushers", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn get_pushers_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<get_pushers::Request>,
|
||||||
|
) -> ConduitResult<get_pushers::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
Ok(get_pushers::Response {
|
||||||
|
pushers: db.pusher.get_pushers(sender_user)?,
|
||||||
|
}
|
||||||
|
.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `POST /_matrix/client/r0/pushers/set`
|
||||||
|
///
|
||||||
|
/// Adds a pusher for the sender user.
|
||||||
|
///
|
||||||
|
/// - TODO: Handle `append`
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
post("/_matrix/client/r0/pushers/set", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn set_pushers_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<set_pusher::Request>,
|
||||||
|
) -> ConduitResult<set_pusher::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
let pusher = body.pusher.clone();
|
||||||
|
|
||||||
|
db.pusher.set_pusher(sender_user, pusher)?;
|
||||||
|
|
||||||
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(set_pusher::Response::default().into())
|
||||||
|
}
|
143
src/client_server/read_marker.rs
Normal file
143
src/client_server/read_marker.rs
Normal file
|
@ -0,0 +1,143 @@
|
||||||
|
use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma};
|
||||||
|
use ruma::{
|
||||||
|
api::client::{
|
||||||
|
error::ErrorKind,
|
||||||
|
r0::{read_marker::set_read_marker, receipt::create_receipt},
|
||||||
|
},
|
||||||
|
events::{AnyEphemeralRoomEvent, EventType},
|
||||||
|
receipt::ReceiptType,
|
||||||
|
MilliSecondsSinceUnixEpoch,
|
||||||
|
};
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
use rocket::post;
|
||||||
|
|
||||||
|
/// # `POST /_matrix/client/r0/rooms/{roomId}/read_markers`
|
||||||
|
///
|
||||||
|
/// Sets different types of read markers.
|
||||||
|
///
|
||||||
|
/// - Updates fully-read account data event to `fully_read`
|
||||||
|
/// - If `read_receipt` is set: Update private marker and public read receipt EDU
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
post("/_matrix/client/r0/rooms/<_>/read_markers", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn set_read_marker_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<set_read_marker::Request<'_>>,
|
||||||
|
) -> ConduitResult<set_read_marker::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let fully_read_event = ruma::events::fully_read::FullyReadEvent {
|
||||||
|
content: ruma::events::fully_read::FullyReadEventContent {
|
||||||
|
event_id: body.fully_read.clone(),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
db.account_data.update(
|
||||||
|
Some(&body.room_id),
|
||||||
|
sender_user,
|
||||||
|
EventType::FullyRead,
|
||||||
|
&fully_read_event,
|
||||||
|
&db.globals,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
if let Some(event) = &body.read_receipt {
|
||||||
|
db.rooms.edus.private_read_set(
|
||||||
|
&body.room_id,
|
||||||
|
sender_user,
|
||||||
|
db.rooms.get_pdu_count(event)?.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Event does not exist.",
|
||||||
|
))?,
|
||||||
|
&db.globals,
|
||||||
|
)?;
|
||||||
|
db.rooms
|
||||||
|
.reset_notification_counts(sender_user, &body.room_id)?;
|
||||||
|
|
||||||
|
let mut user_receipts = BTreeMap::new();
|
||||||
|
user_receipts.insert(
|
||||||
|
sender_user.clone(),
|
||||||
|
ruma::events::receipt::Receipt {
|
||||||
|
ts: Some(MilliSecondsSinceUnixEpoch::now()),
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut receipts = BTreeMap::new();
|
||||||
|
receipts.insert(ReceiptType::Read, user_receipts);
|
||||||
|
|
||||||
|
let mut receipt_content = BTreeMap::new();
|
||||||
|
receipt_content.insert(event.to_owned(), receipts);
|
||||||
|
|
||||||
|
db.rooms.edus.readreceipt_update(
|
||||||
|
sender_user,
|
||||||
|
&body.room_id,
|
||||||
|
AnyEphemeralRoomEvent::Receipt(ruma::events::receipt::ReceiptEvent {
|
||||||
|
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
|
||||||
|
room_id: body.room_id.clone(),
|
||||||
|
}),
|
||||||
|
&db.globals,
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(set_read_marker::Response {}.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `POST /_matrix/client/r0/rooms/{roomId}/receipt/{receiptType}/{eventId}`
|
||||||
|
///
|
||||||
|
/// Sets private read marker and public read receipt EDU.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
post("/_matrix/client/r0/rooms/<_>/receipt/<_>/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn create_receipt_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<create_receipt::Request<'_>>,
|
||||||
|
) -> ConduitResult<create_receipt::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
db.rooms.edus.private_read_set(
|
||||||
|
&body.room_id,
|
||||||
|
sender_user,
|
||||||
|
db.rooms
|
||||||
|
.get_pdu_count(&body.event_id)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Event does not exist.",
|
||||||
|
))?,
|
||||||
|
&db.globals,
|
||||||
|
)?;
|
||||||
|
db.rooms
|
||||||
|
.reset_notification_counts(sender_user, &body.room_id)?;
|
||||||
|
|
||||||
|
let mut user_receipts = BTreeMap::new();
|
||||||
|
user_receipts.insert(
|
||||||
|
sender_user.clone(),
|
||||||
|
ruma::events::receipt::Receipt {
|
||||||
|
ts: Some(MilliSecondsSinceUnixEpoch::now()),
|
||||||
|
},
|
||||||
|
);
|
||||||
|
let mut receipts = BTreeMap::new();
|
||||||
|
receipts.insert(ReceiptType::Read, user_receipts);
|
||||||
|
|
||||||
|
let mut receipt_content = BTreeMap::new();
|
||||||
|
receipt_content.insert(body.event_id.to_owned(), receipts);
|
||||||
|
|
||||||
|
db.rooms.edus.readreceipt_update(
|
||||||
|
sender_user,
|
||||||
|
&body.room_id,
|
||||||
|
AnyEphemeralRoomEvent::Receipt(ruma::events::receipt::ReceiptEvent {
|
||||||
|
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
|
||||||
|
room_id: body.room_id.clone(),
|
||||||
|
}),
|
||||||
|
&db.globals,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(create_receipt::Response {}.into())
|
||||||
|
}
|
|
@ -1,11 +1,13 @@
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use crate::{service::pdu::PduBuilder, services, Result, Ruma};
|
use crate::{database::DatabaseGuard, pdu::PduBuilder, ConduitResult, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::redact::redact_event,
|
api::client::r0::redact::redact_event,
|
||||||
events::{room::redaction::RoomRedactionEventContent, TimelineEventType},
|
events::{room::redaction::RoomRedactionEventContent, EventType},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
use rocket::put;
|
||||||
use serde_json::value::to_raw_value;
|
use serde_json::value::to_raw_value;
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/rooms/{roomId}/redact/{eventId}/{txnId}`
|
/// # `PUT /_matrix/client/r0/rooms/{roomId}/redact/{eventId}/{txnId}`
|
||||||
|
@ -13,15 +15,20 @@ use serde_json::value::to_raw_value;
|
||||||
/// Tries to send a redaction event into the room.
|
/// Tries to send a redaction event into the room.
|
||||||
///
|
///
|
||||||
/// - TODO: Handle txn id
|
/// - TODO: Handle txn id
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
put("/_matrix/client/r0/rooms/<_>/redact/<_>/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn redact_event_route(
|
pub async fn redact_event_route(
|
||||||
body: Ruma<redact_event::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<redact_event::v3::Response> {
|
body: Ruma<redact_event::Request<'_>>,
|
||||||
|
) -> ConduitResult<redact_event::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let body = body.body;
|
let body = body.body;
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
let mutex_state = Arc::clone(
|
||||||
services()
|
db.globals
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -30,11 +37,10 @@ pub async fn redact_event_route(
|
||||||
);
|
);
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
let event_id = services().rooms.timeline.build_and_append_pdu(
|
let event_id = db.rooms.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: TimelineEventType::RoomRedaction,
|
event_type: EventType::RoomRedaction,
|
||||||
content: to_raw_value(&RoomRedactionEventContent {
|
content: to_raw_value(&RoomRedactionEventContent {
|
||||||
redacts: Some(body.event_id.clone()),
|
|
||||||
reason: body.reason.clone(),
|
reason: body.reason.clone(),
|
||||||
})
|
})
|
||||||
.expect("event is valid, we just created it"),
|
.expect("event is valid, we just created it"),
|
||||||
|
@ -44,11 +50,14 @@ pub async fn redact_event_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
|
&db,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
drop(state_lock);
|
drop(state_lock);
|
||||||
|
|
||||||
|
db.flush()?;
|
||||||
|
|
||||||
let event_id = (*event_id).to_owned();
|
let event_id = (*event_id).to_owned();
|
||||||
Ok(redact_event::v3::Response { event_id })
|
Ok(redact_event::Response { event_id }.into())
|
||||||
}
|
}
|
|
@ -1,20 +1,29 @@
|
||||||
use crate::{services, utils::HtmlEscape, Error, Result, Ruma};
|
use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{error::ErrorKind, room::report_content},
|
api::client::{error::ErrorKind, r0::room::report_content},
|
||||||
events::room::message,
|
events::room::message,
|
||||||
int,
|
int,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
use rocket::{http::RawStr, post};
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/rooms/{roomId}/report/{eventId}`
|
/// # `POST /_matrix/client/r0/rooms/{roomId}/report/{eventId}`
|
||||||
///
|
///
|
||||||
/// Reports an inappropriate event to homeserver admins
|
/// Reports an inappropriate event to homeserver admins
|
||||||
///
|
///
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
post("/_matrix/client/r0/rooms/<_>/report/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn report_event_route(
|
pub async fn report_event_route(
|
||||||
body: Ruma<report_content::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<report_content::v3::Response> {
|
body: Ruma<report_content::Request<'_>>,
|
||||||
|
) -> ConduitResult<report_content::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let pdu = match services().rooms.timeline.get_pdu(&body.event_id)? {
|
let pdu = match db.rooms.get_pdu(&body.event_id)? {
|
||||||
Some(pdu) => pdu,
|
Some(pdu) => pdu,
|
||||||
_ => {
|
_ => {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
|
@ -24,46 +33,48 @@ pub async fn report_event_route(
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(true) = body.score.map(|s| s > int!(0) || s < int!(-100)) {
|
if body.score > int!(0) || body.score < int!(-100) {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
"Invalid score, must be within 0 to -100",
|
"Invalid score, must be within 0 to -100",
|
||||||
));
|
));
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(true) = body.reason.clone().map(|s| s.chars().count() > 250) {
|
if body.reason.chars().count() > 250 {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
"Reason too long, should be 250 characters or fewer",
|
"Reason too long, should be 250 characters or fewer",
|
||||||
));
|
));
|
||||||
};
|
};
|
||||||
|
|
||||||
services().admin
|
db.admin
|
||||||
.send_message(message::RoomMessageEventContent::text_html(
|
.send_message(message::RoomMessageEventContent::text_html(
|
||||||
format!(
|
format!(
|
||||||
"Report received from: {}\n\n\
|
"Report received from: {}\n\n\
|
||||||
Event ID: {:?}\n\
|
Event ID: {}\n\
|
||||||
Room ID: {:?}\n\
|
Room ID: {}\n\
|
||||||
Sent By: {:?}\n\n\
|
Sent By: {}\n\n\
|
||||||
Report Score: {:?}\n\
|
Report Score: {}\n\
|
||||||
Report Reason: {:?}",
|
Report Reason: {}",
|
||||||
sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, body.reason
|
sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, body.reason
|
||||||
),
|
),
|
||||||
format!(
|
format!(
|
||||||
"<details><summary>Report received from: <a href=\"https://matrix.to/#/{0:?}\">{0:?}\
|
"<details><summary>Report received from: <a href=\"https://matrix.to/#/{0}\">{0}\
|
||||||
</a></summary><ul><li>Event Info<ul><li>Event ID: <code>{1:?}</code>\
|
</a></summary><ul><li>Event Info<ul><li>Event ID: <code>{1}</code>\
|
||||||
<a href=\"https://matrix.to/#/{2:?}/{1:?}\">🔗</a></li><li>Room ID: <code>{2:?}</code>\
|
<a href=\"https://matrix.to/#/{2}/{1}\">🔗</a></li><li>Room ID: <code>{2}</code>\
|
||||||
</li><li>Sent By: <a href=\"https://matrix.to/#/{3:?}\">{3:?}</a></li></ul></li><li>\
|
</li><li>Sent By: <a href=\"https://matrix.to/#/{3}\">{3}</a></li></ul></li><li>\
|
||||||
Report Info<ul><li>Report Score: {4:?}</li><li>Report Reason: {5}</li></ul></li>\
|
Report Info<ul><li>Report Score: {4}</li><li>Report Reason: {5}</li></ul></li>\
|
||||||
</ul></details>",
|
</ul></details>",
|
||||||
sender_user,
|
sender_user,
|
||||||
pdu.event_id,
|
pdu.event_id,
|
||||||
pdu.room_id,
|
pdu.room_id,
|
||||||
pdu.sender,
|
pdu.sender,
|
||||||
body.score,
|
body.score,
|
||||||
HtmlEscape(body.reason.as_deref().unwrap_or(""))
|
RawStr::new(&body.reason).html_escape()
|
||||||
),
|
),
|
||||||
));
|
));
|
||||||
|
|
||||||
Ok(report_content::v3::Response {})
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(report_content::Response {}.into())
|
||||||
}
|
}
|
|
@ -1,10 +1,11 @@
|
||||||
use crate::{
|
use crate::{
|
||||||
api::client_server::invite_helper, service::pdu::PduBuilder, services, Error, Result, Ruma,
|
client_server::invite_helper, database::DatabaseGuard, pdu::PduBuilder, ConduitResult, Error,
|
||||||
|
Ruma,
|
||||||
};
|
};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{
|
api::client::{
|
||||||
error::ErrorKind,
|
error::ErrorKind,
|
||||||
room::{self, aliases, create_room, get_room_event, upgrade_room},
|
r0::room::{self, aliases, create_room, get_room_event, upgrade_room},
|
||||||
},
|
},
|
||||||
events::{
|
events::{
|
||||||
room::{
|
room::{
|
||||||
|
@ -19,16 +20,19 @@ use ruma::{
|
||||||
tombstone::RoomTombstoneEventContent,
|
tombstone::RoomTombstoneEventContent,
|
||||||
topic::RoomTopicEventContent,
|
topic::RoomTopicEventContent,
|
||||||
},
|
},
|
||||||
StateEventType, TimelineEventType,
|
EventType,
|
||||||
},
|
},
|
||||||
int,
|
int,
|
||||||
serde::JsonObject,
|
serde::{CanonicalJsonObject, JsonObject},
|
||||||
CanonicalJsonObject, OwnedRoomAliasId, RoomAliasId, RoomId,
|
RoomAliasId, RoomId, RoomVersionId,
|
||||||
};
|
};
|
||||||
use serde_json::{json, value::to_raw_value};
|
use serde_json::{json, value::to_raw_value};
|
||||||
use std::{cmp::max, collections::BTreeMap, sync::Arc};
|
use std::{cmp::max, collections::BTreeMap, sync::Arc};
|
||||||
use tracing::{info, warn};
|
use tracing::{info, warn};
|
||||||
|
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
use rocket::{get, post};
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/createRoom`
|
/// # `POST /_matrix/client/r0/createRoom`
|
||||||
///
|
///
|
||||||
/// Creates a new room.
|
/// Creates a new room.
|
||||||
|
@ -45,20 +49,23 @@ use tracing::{info, warn};
|
||||||
/// - Send events listed in initial state
|
/// - Send events listed in initial state
|
||||||
/// - Send events implied by `name` and `topic`
|
/// - Send events implied by `name` and `topic`
|
||||||
/// - Send invite events
|
/// - Send invite events
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
post("/_matrix/client/r0/createRoom", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn create_room_route(
|
pub async fn create_room_route(
|
||||||
body: Ruma<create_room::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<create_room::v3::Response> {
|
body: Ruma<create_room::Request<'_>>,
|
||||||
use create_room::v3::RoomPreset;
|
) -> ConduitResult<create_room::Response> {
|
||||||
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let room_id = RoomId::new(services().globals.server_name());
|
let room_id = RoomId::new(db.globals.server_name());
|
||||||
|
|
||||||
services().rooms.short.get_or_create_shortroomid(&room_id)?;
|
db.rooms.get_or_create_shortroomid(&room_id, &db.globals)?;
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
let mutex_state = Arc::clone(
|
||||||
services()
|
db.globals
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -67,9 +74,9 @@ pub async fn create_room_route(
|
||||||
);
|
);
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
if !services().globals.allow_room_creation()
|
if !db.globals.allow_room_creation()
|
||||||
&& !body.from_appservice
|
&& !body.from_appservice
|
||||||
&& !services().users.is_admin(sender_user)?
|
&& !db.users.is_admin(sender_user, &db.rooms, &db.globals)?
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
|
@ -77,24 +84,18 @@ pub async fn create_room_route(
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let alias: Option<OwnedRoomAliasId> =
|
let alias: Option<Box<RoomAliasId>> =
|
||||||
body.room_alias_name
|
body.room_alias_name
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map_or(Ok(None), |localpart| {
|
.map_or(Ok(None), |localpart| {
|
||||||
// TODO: Check for invalid characters and maximum length
|
// TODO: Check for invalid characters and maximum length
|
||||||
let alias = RoomAliasId::parse(format!(
|
let alias =
|
||||||
"#{}:{}",
|
RoomAliasId::parse(format!("#{}:{}", localpart, db.globals.server_name()))
|
||||||
localpart,
|
.map_err(|_| {
|
||||||
services().globals.server_name()
|
Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias.")
|
||||||
))
|
})?;
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?;
|
|
||||||
|
|
||||||
if services()
|
if db.rooms.id_from_alias(&alias)?.is_some() {
|
||||||
.rooms
|
|
||||||
.alias
|
|
||||||
.resolve_local_alias(&alias)?
|
|
||||||
.is_some()
|
|
||||||
{
|
|
||||||
Err(Error::BadRequest(
|
Err(Error::BadRequest(
|
||||||
ErrorKind::RoomInUse,
|
ErrorKind::RoomInUse,
|
||||||
"Room alias already exists.",
|
"Room alias already exists.",
|
||||||
|
@ -106,11 +107,7 @@ pub async fn create_room_route(
|
||||||
|
|
||||||
let room_version = match body.room_version.clone() {
|
let room_version = match body.room_version.clone() {
|
||||||
Some(room_version) => {
|
Some(room_version) => {
|
||||||
if services()
|
if room_version == RoomVersionId::V5 || room_version == RoomVersionId::V6 {
|
||||||
.globals
|
|
||||||
.supported_room_versions()
|
|
||||||
.contains(&room_version)
|
|
||||||
{
|
|
||||||
room_version
|
room_version
|
||||||
} else {
|
} else {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
|
@ -119,7 +116,7 @@ pub async fn create_room_route(
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
None => services().globals.default_room_version(),
|
None => RoomVersionId::V6,
|
||||||
};
|
};
|
||||||
|
|
||||||
let content = match &body.creation_content {
|
let content = match &body.creation_content {
|
||||||
|
@ -142,9 +139,8 @@ pub async fn create_room_route(
|
||||||
content
|
content
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
// TODO: Add correct value for v11
|
|
||||||
let mut content = serde_json::from_str::<CanonicalJsonObject>(
|
let mut content = serde_json::from_str::<CanonicalJsonObject>(
|
||||||
to_raw_value(&RoomCreateEventContent::new_v1(sender_user.clone()))
|
to_raw_value(&RoomCreateEventContent::new(sender_user.clone()))
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid creation content"))?
|
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid creation content"))?
|
||||||
.get(),
|
.get(),
|
||||||
)
|
)
|
||||||
|
@ -174,9 +170,9 @@ pub async fn create_room_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
// 1. The room create event
|
// 1. The room create event
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
db.rooms.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: TimelineEventType::RoomCreate,
|
event_type: EventType::RoomCreate,
|
||||||
content: to_raw_value(&content).expect("event is valid, we just created it"),
|
content: to_raw_value(&content).expect("event is valid, we just created it"),
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some("".to_owned()),
|
state_key: Some("".to_owned()),
|
||||||
|
@ -184,20 +180,21 @@ pub async fn create_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
|
&db,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// 2. Let the room creator join
|
// 2. Let the room creator join
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
db.rooms.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: TimelineEventType::RoomMember,
|
event_type: EventType::RoomMember,
|
||||||
content: to_raw_value(&RoomMemberEventContent {
|
content: to_raw_value(&RoomMemberEventContent {
|
||||||
membership: MembershipState::Join,
|
membership: MembershipState::Join,
|
||||||
displayname: services().users.displayname(sender_user)?,
|
displayname: db.users.displayname(sender_user)?,
|
||||||
avatar_url: services().users.avatar_url(sender_user)?,
|
avatar_url: db.users.avatar_url(sender_user)?,
|
||||||
is_direct: Some(body.is_direct),
|
is_direct: Some(body.is_direct),
|
||||||
third_party_invite: None,
|
third_party_invite: None,
|
||||||
blurhash: services().users.blurhash(sender_user)?,
|
blurhash: db.users.blurhash(sender_user)?,
|
||||||
reason: None,
|
reason: None,
|
||||||
join_authorized_via_users_server: None,
|
join_authorized_via_users_server: None,
|
||||||
})
|
})
|
||||||
|
@ -208,22 +205,26 @@ pub async fn create_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
|
&db,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// 3. Power levels
|
// 3. Power levels
|
||||||
|
|
||||||
// Figure out preset. We need it for preset specific events
|
// Figure out preset. We need it for preset specific events
|
||||||
let preset = body.preset.clone().unwrap_or(match &body.visibility {
|
let preset = body
|
||||||
room::Visibility::Private => RoomPreset::PrivateChat,
|
.preset
|
||||||
room::Visibility::Public => RoomPreset::PublicChat,
|
.clone()
|
||||||
_ => RoomPreset::PrivateChat, // Room visibility should not be custom
|
.unwrap_or_else(|| match &body.visibility {
|
||||||
|
room::Visibility::Private => create_room::RoomPreset::PrivateChat,
|
||||||
|
room::Visibility::Public => create_room::RoomPreset::PublicChat,
|
||||||
|
_ => create_room::RoomPreset::PrivateChat, // Room visibility should not be custom
|
||||||
});
|
});
|
||||||
|
|
||||||
let mut users = BTreeMap::new();
|
let mut users = BTreeMap::new();
|
||||||
users.insert(sender_user.clone(), int!(100));
|
users.insert(sender_user.clone(), int!(100));
|
||||||
|
|
||||||
if preset == RoomPreset::TrustedPrivateChat {
|
if preset == create_room::RoomPreset::TrustedPrivateChat {
|
||||||
for invite_ in &body.invite {
|
for invite_ in &body.invite {
|
||||||
users.insert(invite_.clone(), int!(100));
|
users.insert(invite_.clone(), int!(100));
|
||||||
}
|
}
|
||||||
|
@ -246,9 +247,9 @@ pub async fn create_room_route(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
db.rooms.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: TimelineEventType::RoomPowerLevels,
|
event_type: EventType::RoomPowerLevels,
|
||||||
content: to_raw_value(&power_levels_content)
|
content: to_raw_value(&power_levels_content)
|
||||||
.expect("to_raw_value always works on serde_json::Value"),
|
.expect("to_raw_value always works on serde_json::Value"),
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
|
@ -257,14 +258,15 @@ pub async fn create_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
|
&db,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// 4. Canonical room alias
|
// 4. Canonical room alias
|
||||||
if let Some(room_alias_id) = &alias {
|
if let Some(room_alias_id) = &alias {
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
db.rooms.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: TimelineEventType::RoomCanonicalAlias,
|
event_type: EventType::RoomCanonicalAlias,
|
||||||
content: to_raw_value(&RoomCanonicalAliasEventContent {
|
content: to_raw_value(&RoomCanonicalAliasEventContent {
|
||||||
alias: Some(room_alias_id.to_owned()),
|
alias: Some(room_alias_id.to_owned()),
|
||||||
alt_aliases: vec![],
|
alt_aliases: vec![],
|
||||||
|
@ -276,6 +278,7 @@ pub async fn create_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
|
&db,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
@ -283,11 +286,11 @@ pub async fn create_room_route(
|
||||||
// 5. Events set by preset
|
// 5. Events set by preset
|
||||||
|
|
||||||
// 5.1 Join Rules
|
// 5.1 Join Rules
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
db.rooms.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: TimelineEventType::RoomJoinRules,
|
event_type: EventType::RoomJoinRules,
|
||||||
content: to_raw_value(&RoomJoinRulesEventContent::new(match preset {
|
content: to_raw_value(&RoomJoinRulesEventContent::new(match preset {
|
||||||
RoomPreset::PublicChat => JoinRule::Public,
|
create_room::RoomPreset::PublicChat => JoinRule::Public,
|
||||||
// according to spec "invite" is the default
|
// according to spec "invite" is the default
|
||||||
_ => JoinRule::Invite,
|
_ => JoinRule::Invite,
|
||||||
}))
|
}))
|
||||||
|
@ -298,13 +301,14 @@ pub async fn create_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
|
&db,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// 5.2 History Visibility
|
// 5.2 History Visibility
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
db.rooms.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: TimelineEventType::RoomHistoryVisibility,
|
event_type: EventType::RoomHistoryVisibility,
|
||||||
content: to_raw_value(&RoomHistoryVisibilityEventContent::new(
|
content: to_raw_value(&RoomHistoryVisibilityEventContent::new(
|
||||||
HistoryVisibility::Shared,
|
HistoryVisibility::Shared,
|
||||||
))
|
))
|
||||||
|
@ -315,15 +319,16 @@ pub async fn create_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
|
&db,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// 5.3 Guest Access
|
// 5.3 Guest Access
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
db.rooms.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: TimelineEventType::RoomGuestAccess,
|
event_type: EventType::RoomGuestAccess,
|
||||||
content: to_raw_value(&RoomGuestAccessEventContent::new(match preset {
|
content: to_raw_value(&RoomGuestAccessEventContent::new(match preset {
|
||||||
RoomPreset::PublicChat => GuestAccess::Forbidden,
|
create_room::RoomPreset::PublicChat => GuestAccess::Forbidden,
|
||||||
_ => GuestAccess::CanJoin,
|
_ => GuestAccess::CanJoin,
|
||||||
}))
|
}))
|
||||||
.expect("event is valid, we just created it"),
|
.expect("event is valid, we just created it"),
|
||||||
|
@ -333,6 +338,7 @@ pub async fn create_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
|
&db,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|
@ -347,26 +353,20 @@ pub async fn create_room_route(
|
||||||
pdu_builder.state_key.get_or_insert_with(|| "".to_owned());
|
pdu_builder.state_key.get_or_insert_with(|| "".to_owned());
|
||||||
|
|
||||||
// Silently skip encryption events if they are not allowed
|
// Silently skip encryption events if they are not allowed
|
||||||
if pdu_builder.event_type == TimelineEventType::RoomEncryption
|
if pdu_builder.event_type == EventType::RoomEncryption && !db.globals.allow_encryption() {
|
||||||
&& !services().globals.allow_encryption()
|
|
||||||
{
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
db.rooms
|
||||||
pdu_builder,
|
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &db, &state_lock)?;
|
||||||
sender_user,
|
|
||||||
&room_id,
|
|
||||||
&state_lock,
|
|
||||||
)?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// 7. Events implied by name and topic
|
// 7. Events implied by name and topic
|
||||||
if let Some(name) = &body.name {
|
if let Some(name) = &body.name {
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
db.rooms.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: TimelineEventType::RoomName,
|
event_type: EventType::RoomName,
|
||||||
content: to_raw_value(&RoomNameEventContent::new(name.clone()))
|
content: to_raw_value(&RoomNameEventContent::new(Some(name.clone())))
|
||||||
.expect("event is valid, we just created it"),
|
.expect("event is valid, we just created it"),
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some("".to_owned()),
|
state_key: Some("".to_owned()),
|
||||||
|
@ -374,14 +374,15 @@ pub async fn create_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
|
&db,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(topic) = &body.topic {
|
if let Some(topic) = &body.topic {
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
db.rooms.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: TimelineEventType::RoomTopic,
|
event_type: EventType::RoomTopic,
|
||||||
content: to_raw_value(&RoomTopicEventContent {
|
content: to_raw_value(&RoomTopicEventContent {
|
||||||
topic: topic.clone(),
|
topic: topic.clone(),
|
||||||
})
|
})
|
||||||
|
@ -392,6 +393,7 @@ pub async fn create_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
|
&db,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
@ -399,21 +401,23 @@ pub async fn create_room_route(
|
||||||
// 8. Events implied by invite (and TODO: invite_3pid)
|
// 8. Events implied by invite (and TODO: invite_3pid)
|
||||||
drop(state_lock);
|
drop(state_lock);
|
||||||
for user_id in &body.invite {
|
for user_id in &body.invite {
|
||||||
let _ = invite_helper(sender_user, user_id, &room_id, None, body.is_direct).await;
|
let _ = invite_helper(sender_user, user_id, &room_id, &db, body.is_direct).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Homeserver specific stuff
|
// Homeserver specific stuff
|
||||||
if let Some(alias) = alias {
|
if let Some(alias) = alias {
|
||||||
services().rooms.alias.set_alias(&alias, &room_id)?;
|
db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if body.visibility == room::Visibility::Public {
|
if body.visibility == room::Visibility::Public {
|
||||||
services().rooms.directory.set_public(&room_id)?;
|
db.rooms.set_public(&room_id, true)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
info!("{} created a room", sender_user);
|
info!("{} created a room", sender_user);
|
||||||
|
|
||||||
Ok(create_room::v3::Response::new(room_id))
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(create_room::Response::new(room_id).into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/rooms/{roomId}/event/{eventId}`
|
/// # `GET /_matrix/client/r0/rooms/{roomId}/event/{eventId}`
|
||||||
|
@ -421,37 +425,32 @@ pub async fn create_room_route(
|
||||||
/// Gets a single event.
|
/// Gets a single event.
|
||||||
///
|
///
|
||||||
/// - You have to currently be joined to the room (TODO: Respect history visibility)
|
/// - You have to currently be joined to the room (TODO: Respect history visibility)
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/rooms/<_>/event/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn get_room_event_route(
|
pub async fn get_room_event_route(
|
||||||
body: Ruma<get_room_event::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<get_room_event::v3::Response> {
|
body: Ruma<get_room_event::Request<'_>>,
|
||||||
|
) -> ConduitResult<get_room_event::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let event = services()
|
if !db.rooms.is_joined(sender_user, &body.room_id)? {
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.get_pdu(&body.event_id)?
|
|
||||||
.ok_or_else(|| {
|
|
||||||
warn!("Event not found, event ID: {:?}", &body.event_id);
|
|
||||||
Error::BadRequest(ErrorKind::NotFound, "Event not found.")
|
|
||||||
})?;
|
|
||||||
|
|
||||||
if !services().rooms.state_accessor.user_can_see_event(
|
|
||||||
sender_user,
|
|
||||||
&event.room_id,
|
|
||||||
&body.event_id,
|
|
||||||
)? {
|
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
"You don't have permission to view this event.",
|
"You don't have permission to view this room.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut event = (*event).clone();
|
Ok(get_room_event::Response {
|
||||||
event.add_age()?;
|
event: db
|
||||||
|
.rooms
|
||||||
Ok(get_room_event::v3::Response {
|
.get_pdu(&body.event_id)?
|
||||||
event: event.to_room_event(),
|
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?
|
||||||
})
|
.to_room_event(),
|
||||||
|
}
|
||||||
|
.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/rooms/{roomId}/aliases`
|
/// # `GET /_matrix/client/r0/rooms/{roomId}/aliases`
|
||||||
|
@ -459,30 +458,32 @@ pub async fn get_room_event_route(
|
||||||
/// Lists all aliases of the room.
|
/// Lists all aliases of the room.
|
||||||
///
|
///
|
||||||
/// - Only users joined to the room are allowed to call this TODO: Allow any user to call it if history_visibility is world readable
|
/// - Only users joined to the room are allowed to call this TODO: Allow any user to call it if history_visibility is world readable
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/rooms/<_>/aliases", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn get_room_aliases_route(
|
pub async fn get_room_aliases_route(
|
||||||
body: Ruma<aliases::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<aliases::v3::Response> {
|
body: Ruma<aliases::Request<'_>>,
|
||||||
|
) -> ConduitResult<aliases::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if !services()
|
if !db.rooms.is_joined(sender_user, &body.room_id)? {
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.is_joined(sender_user, &body.room_id)?
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
"You don't have permission to view this room.",
|
"You don't have permission to view this room.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(aliases::v3::Response {
|
Ok(aliases::Response {
|
||||||
aliases: services()
|
aliases: db
|
||||||
.rooms
|
.rooms
|
||||||
.alias
|
.room_aliases(&body.room_id)
|
||||||
.local_aliases_for_room(&body.room_id)
|
|
||||||
.filter_map(|a| a.ok())
|
.filter_map(|a| a.ok())
|
||||||
.collect(),
|
.collect(),
|
||||||
})
|
}
|
||||||
|
.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/rooms/{roomId}/upgrade`
|
/// # `POST /_matrix/client/r0/rooms/{roomId}/upgrade`
|
||||||
|
@ -495,16 +496,18 @@ pub async fn get_room_aliases_route(
|
||||||
/// - Transfers some state events
|
/// - Transfers some state events
|
||||||
/// - Moves local aliases
|
/// - Moves local aliases
|
||||||
/// - Modifies old room power levels to prevent users from speaking
|
/// - Modifies old room power levels to prevent users from speaking
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
post("/_matrix/client/r0/rooms/<_>/upgrade", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn upgrade_room_route(
|
pub async fn upgrade_room_route(
|
||||||
body: Ruma<upgrade_room::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<upgrade_room::v3::Response> {
|
body: Ruma<upgrade_room::Request<'_>>,
|
||||||
|
) -> ConduitResult<upgrade_room::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if !services()
|
if !matches!(body.new_version, RoomVersionId::V5 | RoomVersionId::V6) {
|
||||||
.globals
|
|
||||||
.supported_room_versions()
|
|
||||||
.contains(&body.new_version)
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::UnsupportedRoomVersion,
|
ErrorKind::UnsupportedRoomVersion,
|
||||||
"This server does not support that room version.",
|
"This server does not support that room version.",
|
||||||
|
@ -512,15 +515,12 @@ pub async fn upgrade_room_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a replacement room
|
// Create a replacement room
|
||||||
let replacement_room = RoomId::new(services().globals.server_name());
|
let replacement_room = RoomId::new(db.globals.server_name());
|
||||||
services()
|
db.rooms
|
||||||
.rooms
|
.get_or_create_shortroomid(&replacement_room, &db.globals)?;
|
||||||
.short
|
|
||||||
.get_or_create_shortroomid(&replacement_room)?;
|
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
let mutex_state = Arc::clone(
|
||||||
services()
|
db.globals
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -531,9 +531,9 @@ pub async fn upgrade_room_route(
|
||||||
|
|
||||||
// Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further
|
// Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further
|
||||||
// Fail if the sender does not have the required permissions
|
// Fail if the sender does not have the required permissions
|
||||||
let tombstone_event_id = services().rooms.timeline.build_and_append_pdu(
|
let tombstone_event_id = db.rooms.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: TimelineEventType::RoomTombstone,
|
event_type: EventType::RoomTombstone,
|
||||||
content: to_raw_value(&RoomTombstoneEventContent {
|
content: to_raw_value(&RoomTombstoneEventContent {
|
||||||
body: "This room has been replaced".to_owned(),
|
body: "This room has been replaced".to_owned(),
|
||||||
replacement_room: replacement_room.clone(),
|
replacement_room: replacement_room.clone(),
|
||||||
|
@ -545,14 +545,14 @@ pub async fn upgrade_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
|
&db,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// Change lock to replacement room
|
// Change lock to replacement room
|
||||||
drop(state_lock);
|
drop(state_lock);
|
||||||
let mutex_state = Arc::clone(
|
let mutex_state = Arc::clone(
|
||||||
services()
|
db.globals
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -563,10 +563,8 @@ pub async fn upgrade_room_route(
|
||||||
|
|
||||||
// Get the old room creation event
|
// Get the old room creation event
|
||||||
let mut create_event_content = serde_json::from_str::<CanonicalJsonObject>(
|
let mut create_event_content = serde_json::from_str::<CanonicalJsonObject>(
|
||||||
services()
|
db.rooms
|
||||||
.rooms
|
.room_state_get(&body.room_id, &EventType::RoomCreate, "")?
|
||||||
.state_accessor
|
|
||||||
.room_state_get(&body.room_id, &StateEventType::RoomCreate, "")?
|
|
||||||
.ok_or_else(|| Error::bad_database("Found room without m.room.create event."))?
|
.ok_or_else(|| Error::bad_database("Found room without m.room.create event."))?
|
||||||
.content
|
.content
|
||||||
.get(),
|
.get(),
|
||||||
|
@ -613,9 +611,9 @@ pub async fn upgrade_room_route(
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
db.rooms.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: TimelineEventType::RoomCreate,
|
event_type: EventType::RoomCreate,
|
||||||
content: to_raw_value(&create_event_content)
|
content: to_raw_value(&create_event_content)
|
||||||
.expect("event is valid, we just created it"),
|
.expect("event is valid, we just created it"),
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
|
@ -624,20 +622,21 @@ pub async fn upgrade_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&replacement_room,
|
&replacement_room,
|
||||||
|
&db,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// Join the new room
|
// Join the new room
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
db.rooms.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: TimelineEventType::RoomMember,
|
event_type: EventType::RoomMember,
|
||||||
content: to_raw_value(&RoomMemberEventContent {
|
content: to_raw_value(&RoomMemberEventContent {
|
||||||
membership: MembershipState::Join,
|
membership: MembershipState::Join,
|
||||||
displayname: services().users.displayname(sender_user)?,
|
displayname: db.users.displayname(sender_user)?,
|
||||||
avatar_url: services().users.avatar_url(sender_user)?,
|
avatar_url: db.users.avatar_url(sender_user)?,
|
||||||
is_direct: None,
|
is_direct: None,
|
||||||
third_party_invite: None,
|
third_party_invite: None,
|
||||||
blurhash: services().users.blurhash(sender_user)?,
|
blurhash: db.users.blurhash(sender_user)?,
|
||||||
reason: None,
|
reason: None,
|
||||||
join_authorized_via_users_server: None,
|
join_authorized_via_users_server: None,
|
||||||
})
|
})
|
||||||
|
@ -648,37 +647,33 @@ pub async fn upgrade_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&replacement_room,
|
&replacement_room,
|
||||||
|
&db,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// Recommended transferable state events list from the specs
|
// Recommended transferable state events list from the specs
|
||||||
let transferable_state_events = vec![
|
let transferable_state_events = vec![
|
||||||
StateEventType::RoomServerAcl,
|
EventType::RoomServerAcl,
|
||||||
StateEventType::RoomEncryption,
|
EventType::RoomEncryption,
|
||||||
StateEventType::RoomName,
|
EventType::RoomName,
|
||||||
StateEventType::RoomAvatar,
|
EventType::RoomAvatar,
|
||||||
StateEventType::RoomTopic,
|
EventType::RoomTopic,
|
||||||
StateEventType::RoomGuestAccess,
|
EventType::RoomGuestAccess,
|
||||||
StateEventType::RoomHistoryVisibility,
|
EventType::RoomHistoryVisibility,
|
||||||
StateEventType::RoomJoinRules,
|
EventType::RoomJoinRules,
|
||||||
StateEventType::RoomPowerLevels,
|
EventType::RoomPowerLevels,
|
||||||
];
|
];
|
||||||
|
|
||||||
// Replicate transferable state events to the new room
|
// Replicate transferable state events to the new room
|
||||||
for event_type in transferable_state_events {
|
for event_type in transferable_state_events {
|
||||||
let event_content =
|
let event_content = match db.rooms.room_state_get(&body.room_id, &event_type, "")? {
|
||||||
match services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.room_state_get(&body.room_id, &event_type, "")?
|
|
||||||
{
|
|
||||||
Some(v) => v.content.clone(),
|
Some(v) => v.content.clone(),
|
||||||
None => continue, // Skipping missing events.
|
None => continue, // Skipping missing events.
|
||||||
};
|
};
|
||||||
|
|
||||||
services().rooms.timeline.build_and_append_pdu(
|
db.rooms.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: event_type.to_string().into(),
|
event_type,
|
||||||
content: event_content,
|
content: event_content,
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some("".to_owned()),
|
state_key: Some("".to_owned()),
|
||||||
|
@ -686,29 +681,21 @@ pub async fn upgrade_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&replacement_room,
|
&replacement_room,
|
||||||
|
&db,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Moves any local aliases to the new room
|
// Moves any local aliases to the new room
|
||||||
for alias in services()
|
for alias in db.rooms.room_aliases(&body.room_id).filter_map(|r| r.ok()) {
|
||||||
.rooms
|
db.rooms
|
||||||
.alias
|
.set_alias(&alias, Some(&replacement_room), &db.globals)?;
|
||||||
.local_aliases_for_room(&body.room_id)
|
|
||||||
.filter_map(|r| r.ok())
|
|
||||||
{
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.alias
|
|
||||||
.set_alias(&alias, &replacement_room)?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the old room power levels
|
// Get the old room power levels
|
||||||
let mut power_levels_event_content: RoomPowerLevelsEventContent = serde_json::from_str(
|
let mut power_levels_event_content: RoomPowerLevelsEventContent = serde_json::from_str(
|
||||||
services()
|
db.rooms
|
||||||
.rooms
|
.room_state_get(&body.room_id, &EventType::RoomPowerLevels, "")?
|
||||||
.state_accessor
|
|
||||||
.room_state_get(&body.room_id, &StateEventType::RoomPowerLevels, "")?
|
|
||||||
.ok_or_else(|| Error::bad_database("Found room without m.room.create event."))?
|
.ok_or_else(|| Error::bad_database("Found room without m.room.create event."))?
|
||||||
.content
|
.content
|
||||||
.get(),
|
.get(),
|
||||||
|
@ -721,9 +708,9 @@ pub async fn upgrade_room_route(
|
||||||
power_levels_event_content.invite = new_level;
|
power_levels_event_content.invite = new_level;
|
||||||
|
|
||||||
// Modify the power levels in the old room to prevent sending of events and inviting new users
|
// Modify the power levels in the old room to prevent sending of events and inviting new users
|
||||||
let _ = services().rooms.timeline.build_and_append_pdu(
|
let _ = db.rooms.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: TimelineEventType::RoomPowerLevels,
|
event_type: EventType::RoomPowerLevels,
|
||||||
content: to_raw_value(&power_levels_event_content)
|
content: to_raw_value(&power_levels_event_content)
|
||||||
.expect("event is valid, we just created it"),
|
.expect("event is valid, we just created it"),
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
|
@ -732,11 +719,14 @@ pub async fn upgrade_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
|
&db,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
drop(state_lock);
|
drop(state_lock);
|
||||||
|
|
||||||
|
db.flush()?;
|
||||||
|
|
||||||
// Return the replacement room id
|
// Return the replacement room id
|
||||||
Ok(upgrade_room::v3::Response { replacement_room })
|
Ok(upgrade_room::Response { replacement_room }.into())
|
||||||
}
|
}
|
|
@ -1,12 +1,9 @@
|
||||||
use crate::{services, Error, Result, Ruma};
|
use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma};
|
||||||
use ruma::api::client::{
|
use ruma::api::client::{error::ErrorKind, r0::search::search_events};
|
||||||
error::ErrorKind,
|
|
||||||
search::search_events::{
|
|
||||||
self,
|
|
||||||
v3::{EventContextResult, ResultCategories, ResultRoomEvents, SearchResult},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
use rocket::post;
|
||||||
|
use search_events::{EventContextResult, ResultCategories, ResultRoomEvents, SearchResult};
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/search`
|
/// # `POST /_matrix/client/r0/search`
|
||||||
|
@ -14,43 +11,41 @@ use std::collections::BTreeMap;
|
||||||
/// Searches rooms for messages.
|
/// Searches rooms for messages.
|
||||||
///
|
///
|
||||||
/// - Only works if the user is currently joined to the room (TODO: Respect history visibility)
|
/// - Only works if the user is currently joined to the room (TODO: Respect history visibility)
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
post("/_matrix/client/r0/search", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn search_events_route(
|
pub async fn search_events_route(
|
||||||
body: Ruma<search_events::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<search_events::v3::Response> {
|
body: Ruma<search_events::Request<'_>>,
|
||||||
|
) -> ConduitResult<search_events::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let search_criteria = body.search_categories.room_events.as_ref().unwrap();
|
let search_criteria = body.search_categories.room_events.as_ref().unwrap();
|
||||||
let filter = &search_criteria.filter;
|
let filter = search_criteria.filter.clone().unwrap_or_default();
|
||||||
|
|
||||||
let room_ids = filter.rooms.clone().unwrap_or_else(|| {
|
let room_ids = filter.rooms.clone().unwrap_or_else(|| {
|
||||||
services()
|
db.rooms
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.rooms_joined(sender_user)
|
.rooms_joined(sender_user)
|
||||||
.filter_map(|r| r.ok())
|
.filter_map(|r| r.ok())
|
||||||
.collect()
|
.collect()
|
||||||
});
|
});
|
||||||
|
|
||||||
// Use limit or else 10, with maximum 100
|
let limit = filter.limit.map_or(10, |l| u64::from(l) as usize);
|
||||||
let limit = filter.limit.map_or(10, u64::from).min(100) as usize;
|
|
||||||
|
|
||||||
let mut searches = Vec::new();
|
let mut searches = Vec::new();
|
||||||
|
|
||||||
for room_id in room_ids {
|
for room_id in room_ids {
|
||||||
if !services()
|
if !db.rooms.is_joined(sender_user, &room_id)? {
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.is_joined(sender_user, &room_id)?
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
"You don't have permission to view this room.",
|
"You don't have permission to view this room.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(search) = services()
|
if let Some(search) = db
|
||||||
.rooms
|
.rooms
|
||||||
.search
|
|
||||||
.search_pdus(&room_id, &search_criteria.search_term)?
|
.search_pdus(&room_id, &search_criteria.search_term)?
|
||||||
{
|
{
|
||||||
searches.push(search.0.peekable());
|
searches.push(search.0.peekable());
|
||||||
|
@ -82,21 +77,6 @@ pub async fn search_events_route(
|
||||||
|
|
||||||
let results: Vec<_> = results
|
let results: Vec<_> = results
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|result| {
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.get_pdu_from_id(result)
|
|
||||||
.ok()?
|
|
||||||
.filter(|pdu| {
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.user_can_see_event(sender_user, &pdu.room_id, &pdu.event_id)
|
|
||||||
.unwrap_or(false)
|
|
||||||
})
|
|
||||||
.map(|pdu| pdu.to_room_event())
|
|
||||||
})
|
|
||||||
.map(|result| {
|
.map(|result| {
|
||||||
Ok::<_, Error>(SearchResult {
|
Ok::<_, Error>(SearchResult {
|
||||||
context: EventContextResult {
|
context: EventContextResult {
|
||||||
|
@ -107,7 +87,10 @@ pub async fn search_events_route(
|
||||||
start: None,
|
start: None,
|
||||||
},
|
},
|
||||||
rank: None,
|
rank: None,
|
||||||
result: Some(result),
|
result: db
|
||||||
|
.rooms
|
||||||
|
.get_pdu_from_id(result)?
|
||||||
|
.map(|pdu| pdu.to_room_event()),
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
.filter_map(|r| r.ok())
|
.filter_map(|r| r.ok())
|
||||||
|
@ -115,13 +98,13 @@ pub async fn search_events_route(
|
||||||
.take(limit)
|
.take(limit)
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let next_batch = if results.len() < limit {
|
let next_batch = if results.len() < limit as usize {
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
Some((skip + limit).to_string())
|
Some((skip + limit).to_string())
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(search_events::v3::Response::new(ResultCategories {
|
Ok(search_events::Response::new(ResultCategories {
|
||||||
room_events: ResultRoomEvents {
|
room_events: ResultRoomEvents {
|
||||||
count: Some((results.len() as u32).into()), // TODO: set this to none. Element shouldn't depend on it
|
count: Some((results.len() as u32).into()), // TODO: set this to none. Element shouldn't depend on it
|
||||||
groups: BTreeMap::new(), // TODO
|
groups: BTreeMap::new(), // TODO
|
||||||
|
@ -134,5 +117,6 @@ pub async fn search_events_route(
|
||||||
.map(str::to_lowercase)
|
.map(str::to_lowercase)
|
||||||
.collect(),
|
.collect(),
|
||||||
},
|
},
|
||||||
}))
|
})
|
||||||
|
.into())
|
||||||
}
|
}
|
|
@ -1,33 +1,40 @@
|
||||||
use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH};
|
use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH};
|
||||||
use crate::{services, utils, Error, Result, Ruma};
|
use crate::{database::DatabaseGuard, utils, ConduitResult, Error, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{
|
api::client::{
|
||||||
error::ErrorKind,
|
error::ErrorKind,
|
||||||
|
r0::{
|
||||||
session::{get_login_types, login, logout, logout_all},
|
session::{get_login_types, login, logout, logout_all},
|
||||||
uiaa::UserIdentifier,
|
uiaa::IncomingUserIdentifier,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
UserId,
|
UserId,
|
||||||
};
|
};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use tracing::{info, warn};
|
use tracing::info;
|
||||||
|
|
||||||
#[derive(Debug, Deserialize)]
|
#[derive(Debug, Deserialize)]
|
||||||
struct Claims {
|
struct Claims {
|
||||||
sub: String,
|
sub: String,
|
||||||
//exp: usize,
|
exp: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
use rocket::{get, post};
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/login`
|
/// # `GET /_matrix/client/r0/login`
|
||||||
///
|
///
|
||||||
/// Get the supported login types of this server. One of these should be used as the `type` field
|
/// Get the supported login types of this server. One of these should be used as the `type` field
|
||||||
/// when logging in.
|
/// when logging in.
|
||||||
pub async fn get_login_types_route(
|
#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/login"))]
|
||||||
_body: Ruma<get_login_types::v3::Request>,
|
#[tracing::instrument]
|
||||||
) -> Result<get_login_types::v3::Response> {
|
pub async fn get_login_types_route() -> ConduitResult<get_login_types::Response> {
|
||||||
Ok(get_login_types::v3::Response::new(vec![
|
Ok(
|
||||||
get_login_types::v3::LoginType::Password(Default::default()),
|
get_login_types::Response::new(vec![get_login_types::LoginType::Password(
|
||||||
get_login_types::v3::LoginType::ApplicationService(Default::default()),
|
Default::default(),
|
||||||
]))
|
)])
|
||||||
|
.into(),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/login`
|
/// # `POST /_matrix/client/r0/login`
|
||||||
|
@ -41,36 +48,33 @@ pub async fn get_login_types_route(
|
||||||
///
|
///
|
||||||
/// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see
|
/// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see
|
||||||
/// supported login types.
|
/// supported login types.
|
||||||
pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Response> {
|
#[cfg_attr(
|
||||||
// To allow deprecated login methods
|
feature = "conduit_bin",
|
||||||
#![allow(deprecated)]
|
post("/_matrix/client/r0/login", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn login_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<login::Request<'_>>,
|
||||||
|
) -> ConduitResult<login::Response> {
|
||||||
// Validate login method
|
// Validate login method
|
||||||
// TODO: Other login methods
|
// TODO: Other login methods
|
||||||
let user_id = match &body.login_info {
|
let user_id = match &body.login_info {
|
||||||
login::v3::LoginInfo::Password(login::v3::Password {
|
login::IncomingLoginInfo::Password(login::IncomingPassword {
|
||||||
identifier,
|
identifier,
|
||||||
password,
|
password,
|
||||||
user,
|
|
||||||
address: _,
|
|
||||||
medium: _,
|
|
||||||
}) => {
|
}) => {
|
||||||
let user_id = if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier {
|
let username = if let IncomingUserIdentifier::MatrixId(matrix_id) = identifier {
|
||||||
UserId::parse_with_server_name(
|
matrix_id
|
||||||
user_id.to_lowercase(),
|
|
||||||
services().globals.server_name(),
|
|
||||||
)
|
|
||||||
} else if let Some(user) = user {
|
|
||||||
UserId::parse(user)
|
|
||||||
} else {
|
} else {
|
||||||
warn!("Bad login type: {:?}", &body.login_info);
|
|
||||||
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
|
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
|
||||||
}
|
};
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?;
|
let user_id =
|
||||||
|
UserId::parse_with_server_name(username.to_owned(), db.globals.server_name())
|
||||||
let hash = services()
|
.map_err(|_| {
|
||||||
.users
|
Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.")
|
||||||
.password_hash(&user_id)?
|
})?;
|
||||||
.ok_or(Error::BadRequest(
|
let hash = db.users.password_hash(&user_id)?.ok_or(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
"Wrong username or password.",
|
"Wrong username or password.",
|
||||||
))?;
|
))?;
|
||||||
|
@ -93,16 +97,16 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
|
||||||
|
|
||||||
user_id
|
user_id
|
||||||
}
|
}
|
||||||
login::v3::LoginInfo::Token(login::v3::Token { token }) => {
|
login::IncomingLoginInfo::Token(login::IncomingToken { token }) => {
|
||||||
if let Some(jwt_decoding_key) = services().globals.jwt_decoding_key() {
|
if let Some(jwt_decoding_key) = db.globals.jwt_decoding_key() {
|
||||||
let token = jsonwebtoken::decode::<Claims>(
|
let token = jsonwebtoken::decode::<Claims>(
|
||||||
token,
|
token,
|
||||||
jwt_decoding_key,
|
jwt_decoding_key,
|
||||||
&jsonwebtoken::Validation::default(),
|
&jsonwebtoken::Validation::default(),
|
||||||
)
|
)
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Token is invalid."))?;
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Token is invalid."))?;
|
||||||
let username = token.claims.sub.to_lowercase();
|
let username = token.claims.sub;
|
||||||
UserId::parse_with_server_name(username, services().globals.server_name()).map_err(
|
UserId::parse_with_server_name(username, db.globals.server_name()).map_err(
|
||||||
|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."),
|
|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."),
|
||||||
)?
|
)?
|
||||||
} else {
|
} else {
|
||||||
|
@ -112,31 +116,7 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
login::v3::LoginInfo::ApplicationService(login::v3::ApplicationService {
|
|
||||||
identifier,
|
|
||||||
user,
|
|
||||||
}) => {
|
|
||||||
if !body.from_appservice {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Forbidden,
|
|
||||||
"Forbidden login type.",
|
|
||||||
));
|
|
||||||
};
|
|
||||||
if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier {
|
|
||||||
UserId::parse_with_server_name(
|
|
||||||
user_id.to_lowercase(),
|
|
||||||
services().globals.server_name(),
|
|
||||||
)
|
|
||||||
} else if let Some(user) = user {
|
|
||||||
UserId::parse(user)
|
|
||||||
} else {
|
|
||||||
warn!("Bad login type: {:?}", &body.login_info);
|
|
||||||
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
|
|
||||||
}
|
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?
|
|
||||||
}
|
|
||||||
_ => {
|
_ => {
|
||||||
warn!("Unsupported or unknown login type: {:?}", &body.login_info);
|
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Unknown,
|
ErrorKind::Unknown,
|
||||||
"Unsupported login type.",
|
"Unsupported login type.",
|
||||||
|
@ -155,16 +135,15 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
|
||||||
|
|
||||||
// Determine if device_id was provided and exists in the db for this user
|
// Determine if device_id was provided and exists in the db for this user
|
||||||
let device_exists = body.device_id.as_ref().map_or(false, |device_id| {
|
let device_exists = body.device_id.as_ref().map_or(false, |device_id| {
|
||||||
services()
|
db.users
|
||||||
.users
|
|
||||||
.all_device_ids(&user_id)
|
.all_device_ids(&user_id)
|
||||||
.any(|x| x.as_ref().map_or(false, |v| v == device_id))
|
.any(|x| x.as_ref().map_or(false, |v| v == device_id))
|
||||||
});
|
});
|
||||||
|
|
||||||
if device_exists {
|
if device_exists {
|
||||||
services().users.set_token(&user_id, &device_id, &token)?;
|
db.users.set_token(&user_id, &device_id, &token)?;
|
||||||
} else {
|
} else {
|
||||||
services().users.create_device(
|
db.users.create_device(
|
||||||
&user_id,
|
&user_id,
|
||||||
&device_id,
|
&device_id,
|
||||||
&token,
|
&token,
|
||||||
|
@ -174,17 +153,16 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
|
||||||
|
|
||||||
info!("{} logged in", user_id);
|
info!("{} logged in", user_id);
|
||||||
|
|
||||||
// Homeservers are still required to send the `home_server` field
|
db.flush()?;
|
||||||
#[allow(deprecated)]
|
|
||||||
Ok(login::v3::Response {
|
Ok(login::Response {
|
||||||
user_id,
|
user_id,
|
||||||
access_token: token,
|
access_token: token,
|
||||||
home_server: Some(services().globals.server_name().to_owned()),
|
home_server: Some(db.globals.server_name().to_owned()),
|
||||||
device_id,
|
device_id,
|
||||||
well_known: None,
|
well_known: None,
|
||||||
refresh_token: None,
|
}
|
||||||
expires_in: None,
|
.into())
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/logout`
|
/// # `POST /_matrix/client/r0/logout`
|
||||||
|
@ -195,13 +173,23 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
|
||||||
/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
|
/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
|
||||||
/// - Forgets to-device events
|
/// - Forgets to-device events
|
||||||
/// - Triggers device list updates
|
/// - Triggers device list updates
|
||||||
pub async fn logout_route(body: Ruma<logout::v3::Request>) -> Result<logout::v3::Response> {
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
post("/_matrix/client/r0/logout", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn logout_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<logout::Request>,
|
||||||
|
) -> ConduitResult<logout::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
services().users.remove_device(sender_user, sender_device)?;
|
db.users.remove_device(sender_user, sender_device)?;
|
||||||
|
|
||||||
Ok(logout::v3::Response::new())
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(logout::Response::new().into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/logout/all`
|
/// # `POST /_matrix/client/r0/logout/all`
|
||||||
|
@ -215,14 +203,22 @@ pub async fn logout_route(body: Ruma<logout::v3::Request>) -> Result<logout::v3:
|
||||||
///
|
///
|
||||||
/// Note: This is equivalent to calling [`GET /_matrix/client/r0/logout`](fn.logout_route.html)
|
/// Note: This is equivalent to calling [`GET /_matrix/client/r0/logout`](fn.logout_route.html)
|
||||||
/// from each device of this user.
|
/// from each device of this user.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
post("/_matrix/client/r0/logout/all", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn logout_all_route(
|
pub async fn logout_all_route(
|
||||||
body: Ruma<logout_all::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<logout_all::v3::Response> {
|
body: Ruma<logout_all::Request>,
|
||||||
|
) -> ConduitResult<logout_all::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
for device_id in services().users.all_device_ids(sender_user).flatten() {
|
for device_id in db.users.all_device_ids(sender_user).flatten() {
|
||||||
services().users.remove_device(sender_user, &device_id)?;
|
db.users.remove_device(sender_user, &device_id)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(logout_all::v3::Response::new())
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(logout_all::Response::new().into())
|
||||||
}
|
}
|
328
src/client_server/state.rs
Normal file
328
src/client_server/state.rs
Normal file
|
@ -0,0 +1,328 @@
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
database::DatabaseGuard, pdu::PduBuilder, ConduitResult, Database, Error, Result, Ruma,
|
||||||
|
};
|
||||||
|
use ruma::{
|
||||||
|
api::client::{
|
||||||
|
error::ErrorKind,
|
||||||
|
r0::state::{get_state_events, get_state_events_for_key, send_state_event},
|
||||||
|
},
|
||||||
|
events::{
|
||||||
|
room::{
|
||||||
|
canonical_alias::RoomCanonicalAliasEventContent,
|
||||||
|
history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
|
||||||
|
},
|
||||||
|
AnyStateEventContent, EventType,
|
||||||
|
},
|
||||||
|
serde::Raw,
|
||||||
|
EventId, RoomId, UserId,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
use rocket::{get, put};
|
||||||
|
|
||||||
|
/// # `PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}/{stateKey}`
|
||||||
|
///
|
||||||
|
/// Sends a state event into the room.
|
||||||
|
///
|
||||||
|
/// - The only requirement for the content is that it has to be valid json
|
||||||
|
/// - Tries to send the event into the room, auth rules will determine if it is allowed
|
||||||
|
/// - If event is new canonical_alias: Rejects if alias is incorrect
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
put("/_matrix/client/r0/rooms/<_>/state/<_>/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn send_state_event_for_key_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<send_state_event::Request<'_>>,
|
||||||
|
) -> ConduitResult<send_state_event::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let event_id = send_state_event_for_key_helper(
|
||||||
|
&db,
|
||||||
|
sender_user,
|
||||||
|
&body.room_id,
|
||||||
|
EventType::from(&*body.event_type),
|
||||||
|
&body.body.body, // Yes, I hate it too
|
||||||
|
body.state_key.to_owned(),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
db.flush()?;
|
||||||
|
|
||||||
|
let event_id = (*event_id).to_owned();
|
||||||
|
Ok(send_state_event::Response { event_id }.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}`
|
||||||
|
///
|
||||||
|
/// Sends a state event into the room.
|
||||||
|
///
|
||||||
|
/// - The only requirement for the content is that it has to be valid json
|
||||||
|
/// - Tries to send the event into the room, auth rules will determine if it is allowed
|
||||||
|
/// - If event is new canonical_alias: Rejects if alias is incorrect
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
put("/_matrix/client/r0/rooms/<_>/state/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn send_state_event_for_empty_key_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<send_state_event::Request<'_>>,
|
||||||
|
) -> ConduitResult<send_state_event::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
// Forbid m.room.encryption if encryption is disabled
|
||||||
|
if &body.event_type == "m.room.encryption" && !db.globals.allow_encryption() {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::Forbidden,
|
||||||
|
"Encryption has been disabled",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let event_id = send_state_event_for_key_helper(
|
||||||
|
&db,
|
||||||
|
sender_user,
|
||||||
|
&body.room_id,
|
||||||
|
EventType::from(&*body.event_type),
|
||||||
|
&body.body.body,
|
||||||
|
body.state_key.to_owned(),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
db.flush()?;
|
||||||
|
|
||||||
|
let event_id = (*event_id).to_owned();
|
||||||
|
Ok(send_state_event::Response { event_id }.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/rooms/{roomid}/state`
|
||||||
|
///
|
||||||
|
/// Get all state events for a room.
|
||||||
|
///
|
||||||
|
/// - If not joined: Only works if current room history visibility is world readable
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/rooms/<_>/state", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn get_state_events_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<get_state_events::Request<'_>>,
|
||||||
|
) -> ConduitResult<get_state_events::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
#[allow(clippy::blocks_in_if_conditions)]
|
||||||
|
// Users not in the room should not be able to access the state unless history_visibility is
|
||||||
|
// WorldReadable
|
||||||
|
if !db.rooms.is_joined(sender_user, &body.room_id)?
|
||||||
|
&& !matches!(
|
||||||
|
db.rooms
|
||||||
|
.room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")?
|
||||||
|
.map(|event| {
|
||||||
|
serde_json::from_str(event.content.get())
|
||||||
|
.map(|e: RoomHistoryVisibilityEventContent| e.history_visibility)
|
||||||
|
.map_err(|_| {
|
||||||
|
Error::bad_database(
|
||||||
|
"Invalid room history visibility event in database.",
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}),
|
||||||
|
Some(Ok(HistoryVisibility::WorldReadable))
|
||||||
|
)
|
||||||
|
{
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::Forbidden,
|
||||||
|
"You don't have permission to view the room state.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(get_state_events::Response {
|
||||||
|
room_state: db
|
||||||
|
.rooms
|
||||||
|
.room_state_full(&body.room_id)?
|
||||||
|
.values()
|
||||||
|
.map(|pdu| pdu.to_state_event())
|
||||||
|
.collect(),
|
||||||
|
}
|
||||||
|
.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/rooms/{roomid}/state/{eventType}/{stateKey}`
|
||||||
|
///
|
||||||
|
/// Get single state event of a room.
|
||||||
|
///
|
||||||
|
/// - If not joined: Only works if current room history visibility is world readable
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/rooms/<_>/state/<_>/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn get_state_events_for_key_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<get_state_events_for_key::Request<'_>>,
|
||||||
|
) -> ConduitResult<get_state_events_for_key::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
#[allow(clippy::blocks_in_if_conditions)]
|
||||||
|
// Users not in the room should not be able to access the state unless history_visibility is
|
||||||
|
// WorldReadable
|
||||||
|
if !db.rooms.is_joined(sender_user, &body.room_id)?
|
||||||
|
&& !matches!(
|
||||||
|
db.rooms
|
||||||
|
.room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")?
|
||||||
|
.map(|event| {
|
||||||
|
serde_json::from_str(event.content.get())
|
||||||
|
.map(|e: RoomHistoryVisibilityEventContent| e.history_visibility)
|
||||||
|
.map_err(|_| {
|
||||||
|
Error::bad_database(
|
||||||
|
"Invalid room history visibility event in database.",
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}),
|
||||||
|
Some(Ok(HistoryVisibility::WorldReadable))
|
||||||
|
)
|
||||||
|
{
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::Forbidden,
|
||||||
|
"You don't have permission to view the room state.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let event = db
|
||||||
|
.rooms
|
||||||
|
.room_state_get(&body.room_id, &body.event_type, &body.state_key)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"State event not found.",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
Ok(get_state_events_for_key::Response {
|
||||||
|
content: serde_json::from_str(event.content.get())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid event content in database"))?,
|
||||||
|
}
|
||||||
|
.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/rooms/{roomid}/state/{eventType}`
|
||||||
|
///
|
||||||
|
/// Get single state event of a room.
|
||||||
|
///
|
||||||
|
/// - If not joined: Only works if current room history visibility is world readable
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/rooms/<_>/state/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn get_state_events_for_empty_key_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<get_state_events_for_key::Request<'_>>,
|
||||||
|
) -> ConduitResult<get_state_events_for_key::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
#[allow(clippy::blocks_in_if_conditions)]
|
||||||
|
// Users not in the room should not be able to access the state unless history_visibility is
|
||||||
|
// WorldReadable
|
||||||
|
if !db.rooms.is_joined(sender_user, &body.room_id)?
|
||||||
|
&& !matches!(
|
||||||
|
db.rooms
|
||||||
|
.room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")?
|
||||||
|
.map(|event| {
|
||||||
|
serde_json::from_str(event.content.get())
|
||||||
|
.map(|e: RoomHistoryVisibilityEventContent| e.history_visibility)
|
||||||
|
.map_err(|_| {
|
||||||
|
Error::bad_database(
|
||||||
|
"Invalid room history visibility event in database.",
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}),
|
||||||
|
Some(Ok(HistoryVisibility::WorldReadable))
|
||||||
|
)
|
||||||
|
{
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::Forbidden,
|
||||||
|
"You don't have permission to view the room state.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let event = db
|
||||||
|
.rooms
|
||||||
|
.room_state_get(&body.room_id, &body.event_type, "")?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"State event not found.",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
Ok(get_state_events_for_key::Response {
|
||||||
|
content: serde_json::from_str(event.content.get())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid event content in database"))?,
|
||||||
|
}
|
||||||
|
.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn send_state_event_for_key_helper(
|
||||||
|
db: &Database,
|
||||||
|
sender: &UserId,
|
||||||
|
room_id: &RoomId,
|
||||||
|
event_type: EventType,
|
||||||
|
json: &Raw<AnyStateEventContent>,
|
||||||
|
state_key: String,
|
||||||
|
) -> Result<Arc<EventId>> {
|
||||||
|
let sender_user = sender;
|
||||||
|
|
||||||
|
// TODO: Review this check, error if event is unparsable, use event type, allow alias if it
|
||||||
|
// previously existed
|
||||||
|
if let Ok(canonical_alias) =
|
||||||
|
serde_json::from_str::<RoomCanonicalAliasEventContent>(json.json().get())
|
||||||
|
{
|
||||||
|
let mut aliases = canonical_alias.alt_aliases.clone();
|
||||||
|
|
||||||
|
if let Some(alias) = canonical_alias.alias {
|
||||||
|
aliases.push(alias);
|
||||||
|
}
|
||||||
|
|
||||||
|
for alias in aliases {
|
||||||
|
if alias.server_name() != db.globals.server_name()
|
||||||
|
|| db
|
||||||
|
.rooms
|
||||||
|
.id_from_alias(&alias)?
|
||||||
|
.filter(|room| room == room_id) // Make sure it's the right room
|
||||||
|
.is_none()
|
||||||
|
{
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::Forbidden,
|
||||||
|
"You are only allowed to send canonical_alias \
|
||||||
|
events when it's aliases already exists",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mutex_state = Arc::clone(
|
||||||
|
db.globals
|
||||||
|
.roomid_mutex_state
|
||||||
|
.write()
|
||||||
|
.unwrap()
|
||||||
|
.entry(room_id.to_owned())
|
||||||
|
.or_default(),
|
||||||
|
);
|
||||||
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
|
let event_id = db.rooms.build_and_append_pdu(
|
||||||
|
PduBuilder {
|
||||||
|
event_type,
|
||||||
|
content: serde_json::from_str(json.json().get()).expect("content is valid json"),
|
||||||
|
unsigned: None,
|
||||||
|
state_key: Some(state_key),
|
||||||
|
redacts: None,
|
||||||
|
},
|
||||||
|
sender_user,
|
||||||
|
room_id,
|
||||||
|
db,
|
||||||
|
&state_lock,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(event_id)
|
||||||
|
}
|
928
src/client_server/sync.rs
Normal file
928
src/client_server/sync.rs
Normal file
|
@ -0,0 +1,928 @@
|
||||||
|
use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Result, Ruma, RumaResponse};
|
||||||
|
use ruma::{
|
||||||
|
api::client::r0::{
|
||||||
|
filter::{IncomingFilterDefinition, LazyLoadOptions},
|
||||||
|
sync::sync_events,
|
||||||
|
uiaa::UiaaResponse,
|
||||||
|
},
|
||||||
|
events::{
|
||||||
|
room::member::{MembershipState, RoomMemberEventContent},
|
||||||
|
AnySyncEphemeralRoomEvent, EventType,
|
||||||
|
},
|
||||||
|
serde::Raw,
|
||||||
|
DeviceId, RoomId, UserId,
|
||||||
|
};
|
||||||
|
use std::{
|
||||||
|
collections::{hash_map::Entry, BTreeMap, HashMap, HashSet},
|
||||||
|
sync::Arc,
|
||||||
|
time::Duration,
|
||||||
|
};
|
||||||
|
use tokio::sync::watch::Sender;
|
||||||
|
use tracing::error;
|
||||||
|
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
use rocket::{get, tokio};
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/sync`
|
||||||
|
///
|
||||||
|
/// Synchronize the client's state with the latest state on the server.
|
||||||
|
///
|
||||||
|
/// - This endpoint takes a `since` parameter which should be the `next_batch` value from a
|
||||||
|
/// previous request for incremental syncs.
|
||||||
|
///
|
||||||
|
/// Calling this endpoint without a `since` parameter returns:
|
||||||
|
/// - Some of the most recent events of each timeline
|
||||||
|
/// - Notification counts for each room
|
||||||
|
/// - Joined and invited member counts, heroes
|
||||||
|
/// - All state events
|
||||||
|
///
|
||||||
|
/// Calling this endpoint with a `since` parameter from a previous `next_batch` returns:
|
||||||
|
/// For joined rooms:
|
||||||
|
/// - Some of the most recent events of each timeline that happened after since
|
||||||
|
/// - If user joined the room after since: All state events (unless lazy loading is activated) and
|
||||||
|
/// all device list updates in that room
|
||||||
|
/// - If the user was already in the room: A list of all events that are in the state now, but were
|
||||||
|
/// not in the state at `since`
|
||||||
|
/// - If the state we send contains a member event: Joined and invited member counts, heroes
|
||||||
|
/// - Device list updates that happened after `since`
|
||||||
|
/// - If there are events in the timeline we send or the user send updated his read mark: Notification counts
|
||||||
|
/// - EDUs that are active now (read receipts, typing updates, presence)
|
||||||
|
/// - TODO: Allow multiple sync streams to support Pantalaimon
|
||||||
|
///
|
||||||
|
/// For invited rooms:
|
||||||
|
/// - If the user was invited after `since`: A subset of the state of the room at the point of the invite
|
||||||
|
///
|
||||||
|
/// For left rooms:
|
||||||
|
/// - If the user left after `since`: prev_batch token, empty state (TODO: subset of the state at the point of the leave)
|
||||||
|
///
|
||||||
|
/// - Sync is handled in an async task, multiple requests from the same device with the same
|
||||||
|
/// `since` will be cached
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/sync", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn sync_events_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<sync_events::Request<'_>>,
|
||||||
|
) -> Result<RumaResponse<sync_events::Response>, RumaResponse<UiaaResponse>> {
|
||||||
|
let sender_user = body.sender_user.expect("user is authenticated");
|
||||||
|
let sender_device = body.sender_device.expect("user is authenticated");
|
||||||
|
let body = body.body;
|
||||||
|
|
||||||
|
let arc_db = Arc::new(db);
|
||||||
|
|
||||||
|
let mut rx = match arc_db
|
||||||
|
.globals
|
||||||
|
.sync_receivers
|
||||||
|
.write()
|
||||||
|
.unwrap()
|
||||||
|
.entry((sender_user.clone(), sender_device.clone()))
|
||||||
|
{
|
||||||
|
Entry::Vacant(v) => {
|
||||||
|
let (tx, rx) = tokio::sync::watch::channel(None);
|
||||||
|
|
||||||
|
v.insert((body.since.clone(), rx.clone()));
|
||||||
|
|
||||||
|
tokio::spawn(sync_helper_wrapper(
|
||||||
|
Arc::clone(&arc_db),
|
||||||
|
sender_user.clone(),
|
||||||
|
sender_device.clone(),
|
||||||
|
body,
|
||||||
|
tx,
|
||||||
|
));
|
||||||
|
|
||||||
|
rx
|
||||||
|
}
|
||||||
|
Entry::Occupied(mut o) => {
|
||||||
|
if o.get().0 != body.since {
|
||||||
|
let (tx, rx) = tokio::sync::watch::channel(None);
|
||||||
|
|
||||||
|
o.insert((body.since.clone(), rx.clone()));
|
||||||
|
|
||||||
|
tokio::spawn(sync_helper_wrapper(
|
||||||
|
Arc::clone(&arc_db),
|
||||||
|
sender_user.clone(),
|
||||||
|
sender_device.clone(),
|
||||||
|
body,
|
||||||
|
tx,
|
||||||
|
));
|
||||||
|
|
||||||
|
rx
|
||||||
|
} else {
|
||||||
|
o.get().1.clone()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let we_have_to_wait = rx.borrow().is_none();
|
||||||
|
if we_have_to_wait {
|
||||||
|
if let Err(e) = rx.changed().await {
|
||||||
|
error!("Error waiting for sync: {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let result = match rx
|
||||||
|
.borrow()
|
||||||
|
.as_ref()
|
||||||
|
.expect("When sync channel changes it's always set to some")
|
||||||
|
{
|
||||||
|
Ok(response) => Ok(response.clone()),
|
||||||
|
Err(error) => Err(error.to_response()),
|
||||||
|
};
|
||||||
|
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn sync_helper_wrapper(
|
||||||
|
db: Arc<DatabaseGuard>,
|
||||||
|
sender_user: Box<UserId>,
|
||||||
|
sender_device: Box<DeviceId>,
|
||||||
|
body: sync_events::IncomingRequest,
|
||||||
|
tx: Sender<Option<ConduitResult<sync_events::Response>>>,
|
||||||
|
) {
|
||||||
|
let since = body.since.clone();
|
||||||
|
|
||||||
|
let r = sync_helper(
|
||||||
|
Arc::clone(&db),
|
||||||
|
sender_user.clone(),
|
||||||
|
sender_device.clone(),
|
||||||
|
body,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
if let Ok((_, caching_allowed)) = r {
|
||||||
|
if !caching_allowed {
|
||||||
|
match db
|
||||||
|
.globals
|
||||||
|
.sync_receivers
|
||||||
|
.write()
|
||||||
|
.unwrap()
|
||||||
|
.entry((sender_user, sender_device))
|
||||||
|
{
|
||||||
|
Entry::Occupied(o) => {
|
||||||
|
// Only remove if the device didn't start a different /sync already
|
||||||
|
if o.get().0 == since {
|
||||||
|
o.remove();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Entry::Vacant(_) => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
drop(db);
|
||||||
|
|
||||||
|
let _ = tx.send(Some(r.map(|(r, _)| r.into())));
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn sync_helper(
|
||||||
|
db: Arc<DatabaseGuard>,
|
||||||
|
sender_user: Box<UserId>,
|
||||||
|
sender_device: Box<DeviceId>,
|
||||||
|
body: sync_events::IncomingRequest,
|
||||||
|
// bool = caching allowed
|
||||||
|
) -> Result<(sync_events::Response, bool), Error> {
|
||||||
|
// TODO: match body.set_presence {
|
||||||
|
db.rooms.edus.ping_presence(&sender_user)?;
|
||||||
|
|
||||||
|
// Setup watchers, so if there's no response, we can wait for them
|
||||||
|
let watcher = db.watch(&sender_user, &sender_device);
|
||||||
|
|
||||||
|
let next_batch = db.globals.current_count()?;
|
||||||
|
let next_batch_string = next_batch.to_string();
|
||||||
|
|
||||||
|
// Load filter
|
||||||
|
let filter = match body.filter {
|
||||||
|
None => IncomingFilterDefinition::default(),
|
||||||
|
Some(sync_events::IncomingFilter::FilterDefinition(filter)) => filter,
|
||||||
|
Some(sync_events::IncomingFilter::FilterId(filter_id)) => db
|
||||||
|
.users
|
||||||
|
.get_filter(&sender_user, &filter_id)?
|
||||||
|
.unwrap_or_default(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let (lazy_load_enabled, lazy_load_send_redundant) = match filter.room.state.lazy_load_options {
|
||||||
|
LazyLoadOptions::Enabled {
|
||||||
|
include_redundant_members: redundant,
|
||||||
|
} => (true, redundant),
|
||||||
|
_ => (false, false),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut joined_rooms = BTreeMap::new();
|
||||||
|
let since = body
|
||||||
|
.since
|
||||||
|
.clone()
|
||||||
|
.and_then(|string| string.parse().ok())
|
||||||
|
.unwrap_or(0);
|
||||||
|
|
||||||
|
let mut presence_updates = HashMap::new();
|
||||||
|
let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in
|
||||||
|
let mut device_list_updates = HashSet::new();
|
||||||
|
let mut device_list_left = HashSet::new();
|
||||||
|
|
||||||
|
// Look for device list updates of this account
|
||||||
|
device_list_updates.extend(
|
||||||
|
db.users
|
||||||
|
.keys_changed(&sender_user.to_string(), since, None)
|
||||||
|
.filter_map(|r| r.ok()),
|
||||||
|
);
|
||||||
|
|
||||||
|
let all_joined_rooms = db.rooms.rooms_joined(&sender_user).collect::<Vec<_>>();
|
||||||
|
for room_id in all_joined_rooms {
|
||||||
|
let room_id = room_id?;
|
||||||
|
|
||||||
|
// Get and drop the lock to wait for remaining operations to finish
|
||||||
|
// This will make sure the we have all events until next_batch
|
||||||
|
let mutex_insert = Arc::clone(
|
||||||
|
db.globals
|
||||||
|
.roomid_mutex_insert
|
||||||
|
.write()
|
||||||
|
.unwrap()
|
||||||
|
.entry(room_id.clone())
|
||||||
|
.or_default(),
|
||||||
|
);
|
||||||
|
let insert_lock = mutex_insert.lock().unwrap();
|
||||||
|
drop(insert_lock);
|
||||||
|
|
||||||
|
let mut non_timeline_pdus = db
|
||||||
|
.rooms
|
||||||
|
.pdus_until(&sender_user, &room_id, u64::MAX)?
|
||||||
|
.filter_map(|r| {
|
||||||
|
// Filter out buggy events
|
||||||
|
if r.is_err() {
|
||||||
|
error!("Bad pdu in pdus_since: {:?}", r);
|
||||||
|
}
|
||||||
|
r.ok()
|
||||||
|
})
|
||||||
|
.take_while(|(pduid, _)| {
|
||||||
|
db.rooms
|
||||||
|
.pdu_count(pduid)
|
||||||
|
.map_or(false, |count| count > since)
|
||||||
|
});
|
||||||
|
|
||||||
|
// Take the last 10 events for the timeline
|
||||||
|
let timeline_pdus: Vec<_> = non_timeline_pdus
|
||||||
|
.by_ref()
|
||||||
|
.take(10)
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.into_iter()
|
||||||
|
.rev()
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let send_notification_counts = !timeline_pdus.is_empty()
|
||||||
|
|| db
|
||||||
|
.rooms
|
||||||
|
.edus
|
||||||
|
.last_privateread_update(&sender_user, &room_id)?
|
||||||
|
> since;
|
||||||
|
|
||||||
|
// They /sync response doesn't always return all messages, so we say the output is
|
||||||
|
// limited unless there are events in non_timeline_pdus
|
||||||
|
let limited = non_timeline_pdus.next().is_some();
|
||||||
|
|
||||||
|
let mut timeline_users = HashSet::new();
|
||||||
|
for (_, event) in &timeline_pdus {
|
||||||
|
timeline_users.insert(event.sender.as_str().to_owned());
|
||||||
|
}
|
||||||
|
|
||||||
|
db.rooms
|
||||||
|
.lazy_load_confirm_delivery(&sender_user, &sender_device, &room_id, since)?;
|
||||||
|
|
||||||
|
// Database queries:
|
||||||
|
|
||||||
|
let current_shortstatehash = db
|
||||||
|
.rooms
|
||||||
|
.current_shortstatehash(&room_id)?
|
||||||
|
.expect("All rooms have state");
|
||||||
|
|
||||||
|
let since_shortstatehash = db.rooms.get_token_shortstatehash(&room_id, since)?;
|
||||||
|
|
||||||
|
// Calculates joined_member_count, invited_member_count and heroes
|
||||||
|
let calculate_counts = || {
|
||||||
|
let joined_member_count = db.rooms.room_joined_count(&room_id)?.unwrap_or(0);
|
||||||
|
let invited_member_count = db.rooms.room_invited_count(&room_id)?.unwrap_or(0);
|
||||||
|
|
||||||
|
// Recalculate heroes (first 5 members)
|
||||||
|
let mut heroes = Vec::new();
|
||||||
|
|
||||||
|
if joined_member_count + invited_member_count <= 5 {
|
||||||
|
// Go through all PDUs and for each member event, check if the user is still joined or
|
||||||
|
// invited until we have 5 or we reach the end
|
||||||
|
|
||||||
|
for hero in db
|
||||||
|
.rooms
|
||||||
|
.all_pdus(&sender_user, &room_id)?
|
||||||
|
.filter_map(|pdu| pdu.ok()) // Ignore all broken pdus
|
||||||
|
.filter(|(_, pdu)| pdu.kind == EventType::RoomMember)
|
||||||
|
.map(|(_, pdu)| {
|
||||||
|
let content: RoomMemberEventContent =
|
||||||
|
serde_json::from_str(pdu.content.get()).map_err(|_| {
|
||||||
|
Error::bad_database("Invalid member event in database.")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
if let Some(state_key) = &pdu.state_key {
|
||||||
|
let user_id = UserId::parse(state_key.clone()).map_err(|_| {
|
||||||
|
Error::bad_database("Invalid UserId in member PDU.")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
// The membership was and still is invite or join
|
||||||
|
if matches!(
|
||||||
|
content.membership,
|
||||||
|
MembershipState::Join | MembershipState::Invite
|
||||||
|
) && (db.rooms.is_joined(&user_id, &room_id)?
|
||||||
|
|| db.rooms.is_invited(&user_id, &room_id)?)
|
||||||
|
{
|
||||||
|
Ok::<_, Error>(Some(state_key.clone()))
|
||||||
|
} else {
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
// Filter out buggy users
|
||||||
|
.filter_map(|u| u.ok())
|
||||||
|
// Filter for possible heroes
|
||||||
|
.flatten()
|
||||||
|
{
|
||||||
|
if heroes.contains(&hero) || hero == sender_user.as_str() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
heroes.push(hero);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok::<_, Error>((
|
||||||
|
Some(joined_member_count),
|
||||||
|
Some(invited_member_count),
|
||||||
|
heroes,
|
||||||
|
))
|
||||||
|
};
|
||||||
|
|
||||||
|
let (
|
||||||
|
heroes,
|
||||||
|
joined_member_count,
|
||||||
|
invited_member_count,
|
||||||
|
joined_since_last_sync,
|
||||||
|
state_events,
|
||||||
|
) = if since_shortstatehash.is_none() {
|
||||||
|
// Probably since = 0, we will do an initial sync
|
||||||
|
|
||||||
|
let (joined_member_count, invited_member_count, heroes) = calculate_counts()?;
|
||||||
|
|
||||||
|
let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?;
|
||||||
|
|
||||||
|
let mut state_events = Vec::new();
|
||||||
|
let mut lazy_loaded = HashSet::new();
|
||||||
|
|
||||||
|
for (shortstatekey, id) in current_state_ids {
|
||||||
|
let (event_type, state_key) = db.rooms.get_statekey_from_short(shortstatekey)?;
|
||||||
|
|
||||||
|
if event_type != EventType::RoomMember {
|
||||||
|
let pdu = match db.rooms.get_pdu(&id)? {
|
||||||
|
Some(pdu) => pdu,
|
||||||
|
None => {
|
||||||
|
error!("Pdu in state not found: {}", id);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
state_events.push(pdu);
|
||||||
|
} else if !lazy_load_enabled
|
||||||
|
|| body.full_state
|
||||||
|
|| timeline_users.contains(&state_key)
|
||||||
|
{
|
||||||
|
let pdu = match db.rooms.get_pdu(&id)? {
|
||||||
|
Some(pdu) => pdu,
|
||||||
|
None => {
|
||||||
|
error!("Pdu in state not found: {}", id);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
lazy_loaded.insert(
|
||||||
|
UserId::parse(state_key.as_ref())
|
||||||
|
.expect("they are in timeline_users, so they should be correct"),
|
||||||
|
);
|
||||||
|
state_events.push(pdu);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset lazy loading because this is an initial sync
|
||||||
|
db.rooms
|
||||||
|
.lazy_load_reset(&sender_user, &sender_device, &room_id)?;
|
||||||
|
|
||||||
|
// The state_events above should contain all timeline_users, let's mark them as lazy
|
||||||
|
// loaded.
|
||||||
|
db.rooms.lazy_load_mark_sent(
|
||||||
|
&sender_user,
|
||||||
|
&sender_device,
|
||||||
|
&room_id,
|
||||||
|
lazy_loaded,
|
||||||
|
next_batch,
|
||||||
|
);
|
||||||
|
|
||||||
|
(
|
||||||
|
heroes,
|
||||||
|
joined_member_count,
|
||||||
|
invited_member_count,
|
||||||
|
true,
|
||||||
|
state_events,
|
||||||
|
)
|
||||||
|
} else if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) {
|
||||||
|
// No state changes
|
||||||
|
(Vec::new(), None, None, false, Vec::new())
|
||||||
|
} else {
|
||||||
|
// Incremental /sync
|
||||||
|
let since_shortstatehash = since_shortstatehash.unwrap();
|
||||||
|
|
||||||
|
let since_sender_member: Option<RoomMemberEventContent> = db
|
||||||
|
.rooms
|
||||||
|
.state_get(
|
||||||
|
since_shortstatehash,
|
||||||
|
&EventType::RoomMember,
|
||||||
|
sender_user.as_str(),
|
||||||
|
)?
|
||||||
|
.and_then(|pdu| {
|
||||||
|
serde_json::from_str(pdu.content.get())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid PDU in database."))
|
||||||
|
.ok()
|
||||||
|
});
|
||||||
|
|
||||||
|
let joined_since_last_sync = since_sender_member
|
||||||
|
.map_or(true, |member| member.membership != MembershipState::Join);
|
||||||
|
|
||||||
|
let mut state_events = Vec::new();
|
||||||
|
let mut lazy_loaded = HashSet::new();
|
||||||
|
|
||||||
|
if since_shortstatehash != current_shortstatehash {
|
||||||
|
let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?;
|
||||||
|
let since_state_ids = db.rooms.state_full_ids(since_shortstatehash)?;
|
||||||
|
|
||||||
|
for (key, id) in current_state_ids {
|
||||||
|
if body.full_state || since_state_ids.get(&key) != Some(&id) {
|
||||||
|
let pdu = match db.rooms.get_pdu(&id)? {
|
||||||
|
Some(pdu) => pdu,
|
||||||
|
None => {
|
||||||
|
error!("Pdu in state not found: {}", id);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if pdu.kind == EventType::RoomMember {
|
||||||
|
match UserId::parse(
|
||||||
|
pdu.state_key
|
||||||
|
.as_ref()
|
||||||
|
.expect("State event has state key")
|
||||||
|
.clone(),
|
||||||
|
) {
|
||||||
|
Ok(state_key_userid) => {
|
||||||
|
lazy_loaded.insert(state_key_userid);
|
||||||
|
}
|
||||||
|
Err(e) => error!("Invalid state key for member event: {}", e),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
state_events.push(pdu);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (_, event) in &timeline_pdus {
|
||||||
|
if lazy_loaded.contains(&event.sender) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if !db.rooms.lazy_load_was_sent_before(
|
||||||
|
&sender_user,
|
||||||
|
&sender_device,
|
||||||
|
&room_id,
|
||||||
|
&event.sender,
|
||||||
|
)? || lazy_load_send_redundant
|
||||||
|
{
|
||||||
|
if let Some(member_event) = db.rooms.room_state_get(
|
||||||
|
&room_id,
|
||||||
|
&EventType::RoomMember,
|
||||||
|
event.sender.as_str(),
|
||||||
|
)? {
|
||||||
|
lazy_loaded.insert(event.sender.clone());
|
||||||
|
state_events.push(member_event);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
db.rooms.lazy_load_mark_sent(
|
||||||
|
&sender_user,
|
||||||
|
&sender_device,
|
||||||
|
&room_id,
|
||||||
|
lazy_loaded,
|
||||||
|
next_batch,
|
||||||
|
);
|
||||||
|
|
||||||
|
let encrypted_room = db
|
||||||
|
.rooms
|
||||||
|
.state_get(current_shortstatehash, &EventType::RoomEncryption, "")?
|
||||||
|
.is_some();
|
||||||
|
|
||||||
|
let since_encryption =
|
||||||
|
db.rooms
|
||||||
|
.state_get(since_shortstatehash, &EventType::RoomEncryption, "")?;
|
||||||
|
|
||||||
|
// Calculations:
|
||||||
|
let new_encrypted_room = encrypted_room && since_encryption.is_none();
|
||||||
|
|
||||||
|
let send_member_count = state_events
|
||||||
|
.iter()
|
||||||
|
.any(|event| event.kind == EventType::RoomMember);
|
||||||
|
|
||||||
|
if encrypted_room {
|
||||||
|
for state_event in &state_events {
|
||||||
|
if state_event.kind != EventType::RoomMember {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(state_key) = &state_event.state_key {
|
||||||
|
let user_id = UserId::parse(state_key.clone())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?;
|
||||||
|
|
||||||
|
if user_id == sender_user {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let new_membership = serde_json::from_str::<RoomMemberEventContent>(
|
||||||
|
state_event.content.get(),
|
||||||
|
)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid PDU in database."))?
|
||||||
|
.membership;
|
||||||
|
|
||||||
|
match new_membership {
|
||||||
|
MembershipState::Join => {
|
||||||
|
// A new user joined an encrypted room
|
||||||
|
if !share_encrypted_room(&db, &sender_user, &user_id, &room_id)? {
|
||||||
|
device_list_updates.insert(user_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
MembershipState::Leave => {
|
||||||
|
// Write down users that have left encrypted rooms we are in
|
||||||
|
left_encrypted_users.insert(user_id);
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if joined_since_last_sync && encrypted_room || new_encrypted_room {
|
||||||
|
// If the user is in a new encrypted room, give them all joined users
|
||||||
|
device_list_updates.extend(
|
||||||
|
db.rooms
|
||||||
|
.room_members(&room_id)
|
||||||
|
.flatten()
|
||||||
|
.filter(|user_id| {
|
||||||
|
// Don't send key updates from the sender to the sender
|
||||||
|
&sender_user != user_id
|
||||||
|
})
|
||||||
|
.filter(|user_id| {
|
||||||
|
// Only send keys if the sender doesn't share an encrypted room with the target already
|
||||||
|
!share_encrypted_room(&db, &sender_user, user_id, &room_id)
|
||||||
|
.unwrap_or(false)
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let (joined_member_count, invited_member_count, heroes) = if send_member_count {
|
||||||
|
calculate_counts()?
|
||||||
|
} else {
|
||||||
|
(None, None, Vec::new())
|
||||||
|
};
|
||||||
|
|
||||||
|
(
|
||||||
|
heroes,
|
||||||
|
joined_member_count,
|
||||||
|
invited_member_count,
|
||||||
|
joined_since_last_sync,
|
||||||
|
state_events,
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
|
// Look for device list updates in this room
|
||||||
|
device_list_updates.extend(
|
||||||
|
db.users
|
||||||
|
.keys_changed(&room_id.to_string(), since, None)
|
||||||
|
.filter_map(|r| r.ok()),
|
||||||
|
);
|
||||||
|
|
||||||
|
let notification_count = if send_notification_counts {
|
||||||
|
Some(
|
||||||
|
db.rooms
|
||||||
|
.notification_count(&sender_user, &room_id)?
|
||||||
|
.try_into()
|
||||||
|
.expect("notification count can't go that high"),
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
let highlight_count = if send_notification_counts {
|
||||||
|
Some(
|
||||||
|
db.rooms
|
||||||
|
.highlight_count(&sender_user, &room_id)?
|
||||||
|
.try_into()
|
||||||
|
.expect("highlight count can't go that high"),
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
let prev_batch = timeline_pdus
|
||||||
|
.first()
|
||||||
|
.map_or(Ok::<_, Error>(None), |(pdu_id, _)| {
|
||||||
|
Ok(Some(db.rooms.pdu_count(pdu_id)?.to_string()))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let room_events: Vec<_> = timeline_pdus
|
||||||
|
.iter()
|
||||||
|
.map(|(_, pdu)| pdu.to_sync_room_event())
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let mut edus: Vec<_> = db
|
||||||
|
.rooms
|
||||||
|
.edus
|
||||||
|
.readreceipts_since(&room_id, since)
|
||||||
|
.filter_map(|r| r.ok()) // Filter out buggy events
|
||||||
|
.map(|(_, _, v)| v)
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
if db.rooms.edus.last_typing_update(&room_id, &db.globals)? > since {
|
||||||
|
edus.push(
|
||||||
|
serde_json::from_str(
|
||||||
|
&serde_json::to_string(&AnySyncEphemeralRoomEvent::Typing(
|
||||||
|
db.rooms.edus.typings_all(&room_id)?,
|
||||||
|
))
|
||||||
|
.expect("event is valid, we just created it"),
|
||||||
|
)
|
||||||
|
.expect("event is valid, we just created it"),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save the state after this sync so we can send the correct state diff next sync
|
||||||
|
db.rooms
|
||||||
|
.associate_token_shortstatehash(&room_id, next_batch, current_shortstatehash)?;
|
||||||
|
|
||||||
|
let joined_room = sync_events::JoinedRoom {
|
||||||
|
account_data: sync_events::RoomAccountData {
|
||||||
|
events: db
|
||||||
|
.account_data
|
||||||
|
.changes_since(Some(&room_id), &sender_user, since)?
|
||||||
|
.into_iter()
|
||||||
|
.filter_map(|(_, v)| {
|
||||||
|
serde_json::from_str(v.json().get())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid account event in database."))
|
||||||
|
.ok()
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
},
|
||||||
|
summary: sync_events::RoomSummary {
|
||||||
|
heroes,
|
||||||
|
joined_member_count: joined_member_count.map(|n| (n as u32).into()),
|
||||||
|
invited_member_count: invited_member_count.map(|n| (n as u32).into()),
|
||||||
|
},
|
||||||
|
unread_notifications: sync_events::UnreadNotificationsCount {
|
||||||
|
highlight_count,
|
||||||
|
notification_count,
|
||||||
|
},
|
||||||
|
timeline: sync_events::Timeline {
|
||||||
|
limited: limited || joined_since_last_sync,
|
||||||
|
prev_batch,
|
||||||
|
events: room_events,
|
||||||
|
},
|
||||||
|
state: sync_events::State {
|
||||||
|
events: state_events
|
||||||
|
.iter()
|
||||||
|
.map(|pdu| pdu.to_sync_state_event())
|
||||||
|
.collect(),
|
||||||
|
},
|
||||||
|
ephemeral: sync_events::Ephemeral { events: edus },
|
||||||
|
};
|
||||||
|
|
||||||
|
if !joined_room.is_empty() {
|
||||||
|
joined_rooms.insert(room_id.clone(), joined_room);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Take presence updates from this room
|
||||||
|
for (user_id, presence) in
|
||||||
|
db.rooms
|
||||||
|
.edus
|
||||||
|
.presence_since(&room_id, since, &db.rooms, &db.globals)?
|
||||||
|
{
|
||||||
|
match presence_updates.entry(user_id) {
|
||||||
|
Entry::Vacant(v) => {
|
||||||
|
v.insert(presence);
|
||||||
|
}
|
||||||
|
Entry::Occupied(mut o) => {
|
||||||
|
let p = o.get_mut();
|
||||||
|
|
||||||
|
// Update existing presence event with more info
|
||||||
|
p.content.presence = presence.content.presence;
|
||||||
|
if let Some(status_msg) = presence.content.status_msg {
|
||||||
|
p.content.status_msg = Some(status_msg);
|
||||||
|
}
|
||||||
|
if let Some(last_active_ago) = presence.content.last_active_ago {
|
||||||
|
p.content.last_active_ago = Some(last_active_ago);
|
||||||
|
}
|
||||||
|
if let Some(displayname) = presence.content.displayname {
|
||||||
|
p.content.displayname = Some(displayname);
|
||||||
|
}
|
||||||
|
if let Some(avatar_url) = presence.content.avatar_url {
|
||||||
|
p.content.avatar_url = Some(avatar_url);
|
||||||
|
}
|
||||||
|
if let Some(currently_active) = presence.content.currently_active {
|
||||||
|
p.content.currently_active = Some(currently_active);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut left_rooms = BTreeMap::new();
|
||||||
|
let all_left_rooms: Vec<_> = db.rooms.rooms_left(&sender_user).collect();
|
||||||
|
for result in all_left_rooms {
|
||||||
|
let (room_id, left_state_events) = result?;
|
||||||
|
|
||||||
|
// Get and drop the lock to wait for remaining operations to finish
|
||||||
|
let mutex_insert = Arc::clone(
|
||||||
|
db.globals
|
||||||
|
.roomid_mutex_insert
|
||||||
|
.write()
|
||||||
|
.unwrap()
|
||||||
|
.entry(room_id.clone())
|
||||||
|
.or_default(),
|
||||||
|
);
|
||||||
|
let insert_lock = mutex_insert.lock().unwrap();
|
||||||
|
drop(insert_lock);
|
||||||
|
|
||||||
|
let left_count = db.rooms.get_left_count(&room_id, &sender_user)?;
|
||||||
|
|
||||||
|
// Left before last sync
|
||||||
|
if Some(since) >= left_count {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
left_rooms.insert(
|
||||||
|
room_id.clone(),
|
||||||
|
sync_events::LeftRoom {
|
||||||
|
account_data: sync_events::RoomAccountData { events: Vec::new() },
|
||||||
|
timeline: sync_events::Timeline {
|
||||||
|
limited: false,
|
||||||
|
prev_batch: Some(next_batch_string.clone()),
|
||||||
|
events: Vec::new(),
|
||||||
|
},
|
||||||
|
state: sync_events::State {
|
||||||
|
events: left_state_events,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut invited_rooms = BTreeMap::new();
|
||||||
|
let all_invited_rooms: Vec<_> = db.rooms.rooms_invited(&sender_user).collect();
|
||||||
|
for result in all_invited_rooms {
|
||||||
|
let (room_id, invite_state_events) = result?;
|
||||||
|
|
||||||
|
// Get and drop the lock to wait for remaining operations to finish
|
||||||
|
let mutex_insert = Arc::clone(
|
||||||
|
db.globals
|
||||||
|
.roomid_mutex_insert
|
||||||
|
.write()
|
||||||
|
.unwrap()
|
||||||
|
.entry(room_id.clone())
|
||||||
|
.or_default(),
|
||||||
|
);
|
||||||
|
let insert_lock = mutex_insert.lock().unwrap();
|
||||||
|
drop(insert_lock);
|
||||||
|
|
||||||
|
let invite_count = db.rooms.get_invite_count(&room_id, &sender_user)?;
|
||||||
|
|
||||||
|
// Invited before last sync
|
||||||
|
if Some(since) >= invite_count {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
invited_rooms.insert(
|
||||||
|
room_id.clone(),
|
||||||
|
sync_events::InvitedRoom {
|
||||||
|
invite_state: sync_events::InviteState {
|
||||||
|
events: invite_state_events,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
for user_id in left_encrypted_users {
|
||||||
|
let still_share_encrypted_room = db
|
||||||
|
.rooms
|
||||||
|
.get_shared_rooms(vec![sender_user.clone(), user_id.clone()])?
|
||||||
|
.filter_map(|r| r.ok())
|
||||||
|
.filter_map(|other_room_id| {
|
||||||
|
Some(
|
||||||
|
db.rooms
|
||||||
|
.room_state_get(&other_room_id, &EventType::RoomEncryption, "")
|
||||||
|
.ok()?
|
||||||
|
.is_some(),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.all(|encrypted| !encrypted);
|
||||||
|
// If the user doesn't share an encrypted room with the target anymore, we need to tell
|
||||||
|
// them
|
||||||
|
if still_share_encrypted_room {
|
||||||
|
device_list_left.insert(user_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove all to-device events the device received *last time*
|
||||||
|
db.users
|
||||||
|
.remove_to_device_events(&sender_user, &sender_device, since)?;
|
||||||
|
|
||||||
|
let response = sync_events::Response {
|
||||||
|
next_batch: next_batch_string,
|
||||||
|
rooms: sync_events::Rooms {
|
||||||
|
leave: left_rooms,
|
||||||
|
join: joined_rooms,
|
||||||
|
invite: invited_rooms,
|
||||||
|
knock: BTreeMap::new(), // TODO
|
||||||
|
},
|
||||||
|
presence: sync_events::Presence {
|
||||||
|
events: presence_updates
|
||||||
|
.into_iter()
|
||||||
|
.map(|(_, v)| Raw::new(&v).expect("PresenceEvent always serializes successfully"))
|
||||||
|
.collect(),
|
||||||
|
},
|
||||||
|
account_data: sync_events::GlobalAccountData {
|
||||||
|
events: db
|
||||||
|
.account_data
|
||||||
|
.changes_since(None, &sender_user, since)?
|
||||||
|
.into_iter()
|
||||||
|
.filter_map(|(_, v)| {
|
||||||
|
serde_json::from_str(v.json().get())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid account event in database."))
|
||||||
|
.ok()
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
},
|
||||||
|
device_lists: sync_events::DeviceLists {
|
||||||
|
changed: device_list_updates.into_iter().collect(),
|
||||||
|
left: device_list_left.into_iter().collect(),
|
||||||
|
},
|
||||||
|
device_one_time_keys_count: db.users.count_one_time_keys(&sender_user, &sender_device)?,
|
||||||
|
to_device: sync_events::ToDevice {
|
||||||
|
events: db
|
||||||
|
.users
|
||||||
|
.get_to_device_events(&sender_user, &sender_device)?,
|
||||||
|
},
|
||||||
|
// Fallback keys are not yet supported
|
||||||
|
device_unused_fallback_key_types: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
// TODO: Retry the endpoint instead of returning (waiting for #118)
|
||||||
|
if !body.full_state
|
||||||
|
&& response.rooms.is_empty()
|
||||||
|
&& response.presence.is_empty()
|
||||||
|
&& response.account_data.is_empty()
|
||||||
|
&& response.device_lists.is_empty()
|
||||||
|
&& response.to_device.is_empty()
|
||||||
|
{
|
||||||
|
// Hang a few seconds so requests are not spammed
|
||||||
|
// Stop hanging if new info arrives
|
||||||
|
let mut duration = body.timeout.unwrap_or_default();
|
||||||
|
if duration.as_secs() > 30 {
|
||||||
|
duration = Duration::from_secs(30);
|
||||||
|
}
|
||||||
|
let _ = tokio::time::timeout(duration, watcher).await;
|
||||||
|
Ok((response, false))
|
||||||
|
} else {
|
||||||
|
Ok((response, since != next_batch)) // Only cache if we made progress
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(db))]
|
||||||
|
fn share_encrypted_room(
|
||||||
|
db: &Database,
|
||||||
|
sender_user: &UserId,
|
||||||
|
user_id: &UserId,
|
||||||
|
ignore_room: &RoomId,
|
||||||
|
) -> Result<bool> {
|
||||||
|
Ok(db
|
||||||
|
.rooms
|
||||||
|
.get_shared_rooms(vec![sender_user.to_owned(), user_id.to_owned()])?
|
||||||
|
.filter_map(|r| r.ok())
|
||||||
|
.filter(|room_id| room_id != ignore_room)
|
||||||
|
.filter_map(|other_room_id| {
|
||||||
|
Some(
|
||||||
|
db.rooms
|
||||||
|
.room_state_get(&other_room_id, &EventType::RoomEncryption, "")
|
||||||
|
.ok()?
|
||||||
|
.is_some(),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.any(|encrypted| encrypted))
|
||||||
|
}
|
124
src/client_server/tag.rs
Normal file
124
src/client_server/tag.rs
Normal file
|
@ -0,0 +1,124 @@
|
||||||
|
use crate::{database::DatabaseGuard, ConduitResult, Ruma};
|
||||||
|
use ruma::{
|
||||||
|
api::client::r0::tag::{create_tag, delete_tag, get_tags},
|
||||||
|
events::{
|
||||||
|
tag::{TagEvent, TagEventContent},
|
||||||
|
EventType,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
use rocket::{delete, get, put};
|
||||||
|
|
||||||
|
/// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}`
|
||||||
|
///
|
||||||
|
/// Adds a tag to the room.
|
||||||
|
///
|
||||||
|
/// - Inserts the tag into the tag event of the room account data.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
put("/_matrix/client/r0/user/<_>/rooms/<_>/tags/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn update_tag_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<create_tag::Request<'_>>,
|
||||||
|
) -> ConduitResult<create_tag::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let mut tags_event = db
|
||||||
|
.account_data
|
||||||
|
.get(Some(&body.room_id), sender_user, EventType::Tag)?
|
||||||
|
.unwrap_or_else(|| TagEvent {
|
||||||
|
content: TagEventContent {
|
||||||
|
tags: BTreeMap::new(),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
tags_event
|
||||||
|
.content
|
||||||
|
.tags
|
||||||
|
.insert(body.tag.clone().into(), body.tag_info.clone());
|
||||||
|
|
||||||
|
db.account_data.update(
|
||||||
|
Some(&body.room_id),
|
||||||
|
sender_user,
|
||||||
|
EventType::Tag,
|
||||||
|
&tags_event,
|
||||||
|
&db.globals,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(create_tag::Response {}.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `DELETE /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}`
|
||||||
|
///
|
||||||
|
/// Deletes a tag from the room.
|
||||||
|
///
|
||||||
|
/// - Removes the tag from the tag event of the room account data.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
delete("/_matrix/client/r0/user/<_>/rooms/<_>/tags/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn delete_tag_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<delete_tag::Request<'_>>,
|
||||||
|
) -> ConduitResult<delete_tag::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let mut tags_event = db
|
||||||
|
.account_data
|
||||||
|
.get(Some(&body.room_id), sender_user, EventType::Tag)?
|
||||||
|
.unwrap_or_else(|| TagEvent {
|
||||||
|
content: TagEventContent {
|
||||||
|
tags: BTreeMap::new(),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
tags_event.content.tags.remove(&body.tag.clone().into());
|
||||||
|
|
||||||
|
db.account_data.update(
|
||||||
|
Some(&body.room_id),
|
||||||
|
sender_user,
|
||||||
|
EventType::Tag,
|
||||||
|
&tags_event,
|
||||||
|
&db.globals,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(delete_tag::Response {}.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags`
|
||||||
|
///
|
||||||
|
/// Returns tags on the room.
|
||||||
|
///
|
||||||
|
/// - Gets the tag event of the room account data.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/user/<_>/rooms/<_>/tags", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn get_tags_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<get_tags::Request<'_>>,
|
||||||
|
) -> ConduitResult<get_tags::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
Ok(get_tags::Response {
|
||||||
|
tags: db
|
||||||
|
.account_data
|
||||||
|
.get(Some(&body.room_id), sender_user, EventType::Tag)?
|
||||||
|
.unwrap_or_else(|| TagEvent {
|
||||||
|
content: TagEventContent {
|
||||||
|
tags: BTreeMap::new(),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
.content
|
||||||
|
.tags,
|
||||||
|
}
|
||||||
|
.into())
|
||||||
|
}
|
22
src/client_server/thirdparty.rs
Normal file
22
src/client_server/thirdparty.rs
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
use crate::ConduitResult;
|
||||||
|
use ruma::api::client::r0::thirdparty::get_protocols;
|
||||||
|
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
use rocket::get;
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/thirdparty/protocols`
|
||||||
|
///
|
||||||
|
/// TODO: Fetches all metadata about protocols supported by the homeserver.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/thirdparty/protocols")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument]
|
||||||
|
pub async fn get_protocols_route() -> ConduitResult<get_protocols::Response> {
|
||||||
|
// TODO
|
||||||
|
Ok(get_protocols::Response {
|
||||||
|
protocols: BTreeMap::new(),
|
||||||
|
}
|
||||||
|
.into())
|
||||||
|
}
|
|
@ -1,81 +1,93 @@
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
use crate::{services, Error, Result, Ruma};
|
use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{
|
api::{
|
||||||
client::{error::ErrorKind, to_device::send_event_to_device},
|
client::{error::ErrorKind, r0::to_device::send_event_to_device},
|
||||||
federation::{self, transactions::edu::DirectDeviceContent},
|
federation::{self, transactions::edu::DirectDeviceContent},
|
||||||
},
|
},
|
||||||
|
events::EventType,
|
||||||
to_device::DeviceIdOrAllDevices,
|
to_device::DeviceIdOrAllDevices,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
use rocket::put;
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/sendToDevice/{eventType}/{txnId}`
|
/// # `PUT /_matrix/client/r0/sendToDevice/{eventType}/{txnId}`
|
||||||
///
|
///
|
||||||
/// Send a to-device event to a set of client devices.
|
/// Send a to-device event to a set of client devices.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
put("/_matrix/client/r0/sendToDevice/<_>/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
pub async fn send_event_to_device_route(
|
pub async fn send_event_to_device_route(
|
||||||
body: Ruma<send_event_to_device::v3::Request>,
|
db: DatabaseGuard,
|
||||||
) -> Result<send_event_to_device::v3::Response> {
|
body: Ruma<send_event_to_device::Request<'_>>,
|
||||||
|
) -> ConduitResult<send_event_to_device::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_deref();
|
let sender_device = body.sender_device.as_deref();
|
||||||
|
|
||||||
|
// TODO: uncomment when https://github.com/vector-im/element-android/issues/3589 is solved
|
||||||
// Check if this is a new transaction id
|
// Check if this is a new transaction id
|
||||||
if services()
|
/*
|
||||||
|
if db
|
||||||
.transaction_ids
|
.transaction_ids
|
||||||
.existing_txnid(sender_user, sender_device, &body.txn_id)?
|
.existing_txnid(sender_user, sender_device, &body.txn_id)?
|
||||||
.is_some()
|
.is_some()
|
||||||
{
|
{
|
||||||
return Ok(send_event_to_device::v3::Response {});
|
return Ok(send_event_to_device::Response.into());
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
for (target_user_id, map) in &body.messages {
|
for (target_user_id, map) in &body.messages {
|
||||||
for (target_device_id_maybe, event) in map {
|
for (target_device_id_maybe, event) in map {
|
||||||
if target_user_id.server_name() != services().globals.server_name() {
|
if target_user_id.server_name() != db.globals.server_name() {
|
||||||
let mut map = BTreeMap::new();
|
let mut map = BTreeMap::new();
|
||||||
map.insert(target_device_id_maybe.clone(), event.clone());
|
map.insert(target_device_id_maybe.clone(), event.clone());
|
||||||
let mut messages = BTreeMap::new();
|
let mut messages = BTreeMap::new();
|
||||||
messages.insert(target_user_id.clone(), map);
|
messages.insert(target_user_id.clone(), map);
|
||||||
let count = services().globals.next_count()?;
|
|
||||||
|
|
||||||
services().sending.send_reliable_edu(
|
db.sending.send_reliable_edu(
|
||||||
target_user_id.server_name(),
|
target_user_id.server_name(),
|
||||||
serde_json::to_vec(&federation::transactions::edu::Edu::DirectToDevice(
|
serde_json::to_vec(&federation::transactions::edu::Edu::DirectToDevice(
|
||||||
DirectDeviceContent {
|
DirectDeviceContent {
|
||||||
sender: sender_user.clone(),
|
sender: sender_user.clone(),
|
||||||
ev_type: body.event_type.clone(),
|
ev_type: EventType::from(&*body.event_type),
|
||||||
message_id: count.to_string().into(),
|
message_id: body.txn_id.clone(),
|
||||||
messages,
|
messages,
|
||||||
},
|
},
|
||||||
))
|
))
|
||||||
.expect("DirectToDevice EDU can be serialized"),
|
.expect("DirectToDevice EDU can be serialized"),
|
||||||
count,
|
db.globals.next_count()?,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
match target_device_id_maybe {
|
match target_device_id_maybe {
|
||||||
DeviceIdOrAllDevices::DeviceId(target_device_id) => {
|
DeviceIdOrAllDevices::DeviceId(target_device_id) => db.users.add_to_device_event(
|
||||||
services().users.add_to_device_event(
|
|
||||||
sender_user,
|
sender_user,
|
||||||
target_user_id,
|
target_user_id,
|
||||||
target_device_id,
|
target_device_id,
|
||||||
&body.event_type.to_string(),
|
&body.event_type,
|
||||||
event.deserialize_as().map_err(|_| {
|
event.deserialize_as().map_err(|_| {
|
||||||
Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid")
|
Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid")
|
||||||
})?,
|
})?,
|
||||||
)?
|
&db.globals,
|
||||||
}
|
)?,
|
||||||
|
|
||||||
DeviceIdOrAllDevices::AllDevices => {
|
DeviceIdOrAllDevices::AllDevices => {
|
||||||
for target_device_id in services().users.all_device_ids(target_user_id) {
|
for target_device_id in db.users.all_device_ids(target_user_id) {
|
||||||
services().users.add_to_device_event(
|
db.users.add_to_device_event(
|
||||||
sender_user,
|
sender_user,
|
||||||
target_user_id,
|
target_user_id,
|
||||||
&target_device_id?,
|
&target_device_id?,
|
||||||
&body.event_type.to_string(),
|
&body.event_type,
|
||||||
event.deserialize_as().map_err(|_| {
|
event.deserialize_as().map_err(|_| {
|
||||||
Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid")
|
Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid")
|
||||||
})?,
|
})?,
|
||||||
|
&db.globals,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -84,9 +96,10 @@ pub async fn send_event_to_device_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save transaction id with empty data
|
// Save transaction id with empty data
|
||||||
services()
|
db.transaction_ids
|
||||||
.transaction_ids
|
|
||||||
.add_txnid(sender_user, sender_device, &body.txn_id, &[])?;
|
.add_txnid(sender_user, sender_device, &body.txn_id, &[])?;
|
||||||
|
|
||||||
Ok(send_event_to_device::v3::Response {})
|
db.flush()?;
|
||||||
|
|
||||||
|
Ok(send_event_to_device::Response {}.into())
|
||||||
}
|
}
|
36
src/client_server/typing.rs
Normal file
36
src/client_server/typing.rs
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
use crate::{database::DatabaseGuard, utils, ConduitResult, Ruma};
|
||||||
|
use create_typing_event::Typing;
|
||||||
|
use ruma::api::client::r0::typing::create_typing_event;
|
||||||
|
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
use rocket::put;
|
||||||
|
|
||||||
|
/// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}`
|
||||||
|
///
|
||||||
|
/// Sets the typing state of the sender user.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
put("/_matrix/client/r0/rooms/<_>/typing/<_>", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub fn create_typing_event_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<create_typing_event::Request<'_>>,
|
||||||
|
) -> ConduitResult<create_typing_event::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
if let Typing::Yes(duration) = body.state {
|
||||||
|
db.rooms.edus.typing_add(
|
||||||
|
sender_user,
|
||||||
|
&body.room_id,
|
||||||
|
duration.as_millis() as u64 + utils::millis_since_unix_epoch(),
|
||||||
|
&db.globals,
|
||||||
|
)?;
|
||||||
|
} else {
|
||||||
|
db.rooms
|
||||||
|
.edus
|
||||||
|
.typing_remove(sender_user, &body.room_id, &db.globals)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(create_typing_event::Response {}.into())
|
||||||
|
}
|
28
src/client_server/unversioned.rs
Normal file
28
src/client_server/unversioned.rs
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
use std::{collections::BTreeMap, iter::FromIterator};
|
||||||
|
|
||||||
|
use crate::ConduitResult;
|
||||||
|
use ruma::api::client::unversioned::get_supported_versions;
|
||||||
|
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
use rocket::get;
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/versions`
|
||||||
|
///
|
||||||
|
/// Get the versions of the specification and unstable features supported by this server.
|
||||||
|
///
|
||||||
|
/// - Versions take the form MAJOR.MINOR.PATCH
|
||||||
|
/// - Only the latest PATCH release will be reported for each MAJOR.MINOR value
|
||||||
|
/// - Unstable features are namespaced and may include version information in their name
|
||||||
|
///
|
||||||
|
/// Note: Unstable features are used while developing new features. Clients should avoid using
|
||||||
|
/// unstable features in their stable releases
|
||||||
|
#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/versions"))]
|
||||||
|
#[tracing::instrument]
|
||||||
|
pub async fn get_supported_versions_route() -> ConduitResult<get_supported_versions::Response> {
|
||||||
|
let resp = get_supported_versions::Response {
|
||||||
|
versions: vec!["r0.5.0".to_owned(), "r0.6.0".to_owned()],
|
||||||
|
unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(resp.into())
|
||||||
|
}
|
59
src/client_server/user_directory.rs
Normal file
59
src/client_server/user_directory.rs
Normal file
|
@ -0,0 +1,59 @@
|
||||||
|
use crate::{database::DatabaseGuard, ConduitResult, Ruma};
|
||||||
|
use ruma::api::client::r0::user_directory::search_users;
|
||||||
|
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
use rocket::post;
|
||||||
|
|
||||||
|
/// # `POST /_matrix/client/r0/user_directory/search`
|
||||||
|
///
|
||||||
|
/// Searches all known users for a match.
|
||||||
|
///
|
||||||
|
/// - TODO: Hide users that are not in any public rooms?
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
post("/_matrix/client/r0/user_directory/search", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn search_users_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<search_users::Request<'_>>,
|
||||||
|
) -> ConduitResult<search_users::Response> {
|
||||||
|
let limit = u64::from(body.limit) as usize;
|
||||||
|
|
||||||
|
let mut users = db.users.iter().filter_map(|user_id| {
|
||||||
|
// Filter out buggy users (they should not exist, but you never know...)
|
||||||
|
let user_id = user_id.ok()?;
|
||||||
|
|
||||||
|
let user = search_users::User {
|
||||||
|
user_id: user_id.clone(),
|
||||||
|
display_name: db.users.displayname(&user_id).ok()?,
|
||||||
|
avatar_url: db.users.avatar_url(&user_id).ok()?,
|
||||||
|
};
|
||||||
|
|
||||||
|
let user_id_matches = user
|
||||||
|
.user_id
|
||||||
|
.to_string()
|
||||||
|
.to_lowercase()
|
||||||
|
.contains(&body.search_term.to_lowercase());
|
||||||
|
|
||||||
|
let user_displayname_matches = user
|
||||||
|
.display_name
|
||||||
|
.as_ref()
|
||||||
|
.filter(|name| {
|
||||||
|
name.to_lowercase()
|
||||||
|
.contains(&body.search_term.to_lowercase())
|
||||||
|
})
|
||||||
|
.is_some();
|
||||||
|
|
||||||
|
if !user_id_matches && !user_displayname_matches {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(user)
|
||||||
|
});
|
||||||
|
|
||||||
|
let results = users.by_ref().take(limit).collect();
|
||||||
|
let limited = users.next().is_some();
|
||||||
|
|
||||||
|
Ok(search_users::Response { results, limited }.into())
|
||||||
|
}
|
58
src/client_server/voip.rs
Normal file
58
src/client_server/voip.rs
Normal file
|
@ -0,0 +1,58 @@
|
||||||
|
use crate::{database::DatabaseGuard, ConduitResult, Ruma};
|
||||||
|
use hmac::{Hmac, Mac, NewMac};
|
||||||
|
use ruma::api::client::r0::voip::get_turn_server_info;
|
||||||
|
use ruma::SecondsSinceUnixEpoch;
|
||||||
|
use sha1::Sha1;
|
||||||
|
use std::time::{Duration, SystemTime};
|
||||||
|
|
||||||
|
type HmacSha1 = Hmac<Sha1>;
|
||||||
|
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
use rocket::get;
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/voip/turnServer`
|
||||||
|
///
|
||||||
|
/// TODO: Returns information about the recommended turn server.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "conduit_bin",
|
||||||
|
get("/_matrix/client/r0/voip/turnServer", data = "<body>")
|
||||||
|
)]
|
||||||
|
#[tracing::instrument(skip(body, db))]
|
||||||
|
pub async fn turn_server_route(
|
||||||
|
body: Ruma<get_turn_server_info::Request>,
|
||||||
|
db: DatabaseGuard,
|
||||||
|
) -> ConduitResult<get_turn_server_info::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let turn_secret = db.globals.turn_secret();
|
||||||
|
|
||||||
|
let (username, password) = if !turn_secret.is_empty() {
|
||||||
|
let expiry = SecondsSinceUnixEpoch::from_system_time(
|
||||||
|
SystemTime::now() + Duration::from_secs(db.globals.turn_ttl()),
|
||||||
|
)
|
||||||
|
.expect("time is valid");
|
||||||
|
|
||||||
|
let username: String = format!("{}:{}", expiry.get(), sender_user);
|
||||||
|
|
||||||
|
let mut mac = HmacSha1::new_from_slice(turn_secret.as_bytes())
|
||||||
|
.expect("HMAC can take key of any size");
|
||||||
|
mac.update(username.as_bytes());
|
||||||
|
|
||||||
|
let password: String = base64::encode_config(mac.finalize().into_bytes(), base64::STANDARD);
|
||||||
|
|
||||||
|
(username, password)
|
||||||
|
} else {
|
||||||
|
(
|
||||||
|
db.globals.turn_username().clone(),
|
||||||
|
db.globals.turn_password().clone(),
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(get_turn_server_info::Response {
|
||||||
|
username,
|
||||||
|
password,
|
||||||
|
uris: db.globals.turn_uris().to_vec(),
|
||||||
|
ttl: Duration::from_secs(db.globals.turn_ttl()),
|
||||||
|
}
|
||||||
|
.into())
|
||||||
|
}
|
131
src/config.rs
Normal file
131
src/config.rs
Normal file
|
@ -0,0 +1,131 @@
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
|
use ruma::ServerName;
|
||||||
|
use serde::{de::IgnoredAny, Deserialize};
|
||||||
|
use tracing::warn;
|
||||||
|
|
||||||
|
mod proxy;
|
||||||
|
|
||||||
|
use self::proxy::ProxyConfig;
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
|
pub struct Config {
|
||||||
|
pub server_name: Box<ServerName>,
|
||||||
|
#[serde(default = "default_database_backend")]
|
||||||
|
pub database_backend: String,
|
||||||
|
pub database_path: String,
|
||||||
|
#[serde(default = "default_db_cache_capacity_mb")]
|
||||||
|
pub db_cache_capacity_mb: f64,
|
||||||
|
#[serde(default = "default_conduit_cache_capacity_modifier")]
|
||||||
|
pub conduit_cache_capacity_modifier: f64,
|
||||||
|
#[serde(default = "default_rocksdb_max_open_files")]
|
||||||
|
pub rocksdb_max_open_files: i32,
|
||||||
|
#[serde(default = "default_pdu_cache_capacity")]
|
||||||
|
pub pdu_cache_capacity: u32,
|
||||||
|
#[serde(default = "default_cleanup_second_interval")]
|
||||||
|
pub cleanup_second_interval: u32,
|
||||||
|
#[serde(default = "default_max_request_size")]
|
||||||
|
pub max_request_size: u32,
|
||||||
|
#[serde(default = "default_max_concurrent_requests")]
|
||||||
|
pub max_concurrent_requests: u16,
|
||||||
|
#[serde(default = "false_fn")]
|
||||||
|
pub allow_registration: bool,
|
||||||
|
#[serde(default = "true_fn")]
|
||||||
|
pub allow_encryption: bool,
|
||||||
|
#[serde(default = "false_fn")]
|
||||||
|
pub allow_federation: bool,
|
||||||
|
#[serde(default = "true_fn")]
|
||||||
|
pub allow_room_creation: bool,
|
||||||
|
#[serde(default = "false_fn")]
|
||||||
|
pub allow_jaeger: bool,
|
||||||
|
#[serde(default = "false_fn")]
|
||||||
|
pub tracing_flame: bool,
|
||||||
|
#[serde(default)]
|
||||||
|
pub proxy: ProxyConfig,
|
||||||
|
pub jwt_secret: Option<String>,
|
||||||
|
#[serde(default = "Vec::new")]
|
||||||
|
pub trusted_servers: Vec<Box<ServerName>>,
|
||||||
|
#[serde(default = "default_log")]
|
||||||
|
pub log: String,
|
||||||
|
#[serde(default)]
|
||||||
|
pub turn_username: String,
|
||||||
|
#[serde(default)]
|
||||||
|
pub turn_password: String,
|
||||||
|
#[serde(default = "Vec::new")]
|
||||||
|
pub turn_uris: Vec<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub turn_secret: String,
|
||||||
|
#[serde(default = "default_turn_ttl")]
|
||||||
|
pub turn_ttl: u64,
|
||||||
|
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub catchall: BTreeMap<String, IgnoredAny>,
|
||||||
|
}
|
||||||
|
|
||||||
|
const DEPRECATED_KEYS: &[&str] = &["cache_capacity"];
|
||||||
|
|
||||||
|
impl Config {
|
||||||
|
pub fn warn_deprecated(&self) {
|
||||||
|
let mut was_deprecated = false;
|
||||||
|
for key in self
|
||||||
|
.catchall
|
||||||
|
.keys()
|
||||||
|
.filter(|key| DEPRECATED_KEYS.iter().any(|s| s == key))
|
||||||
|
{
|
||||||
|
warn!("Config parameter {} is deprecated", key);
|
||||||
|
was_deprecated = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if was_deprecated {
|
||||||
|
warn!("Read conduit documentation and check your configuration if any new configuration parameters should be adjusted");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn false_fn() -> bool {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
|
fn true_fn() -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_database_backend() -> String {
|
||||||
|
"sqlite".to_owned()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_db_cache_capacity_mb() -> f64 {
|
||||||
|
10.0
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_conduit_cache_capacity_modifier() -> f64 {
|
||||||
|
1.0
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_rocksdb_max_open_files() -> i32 {
|
||||||
|
20
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_pdu_cache_capacity() -> u32 {
|
||||||
|
150_000
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_cleanup_second_interval() -> u32 {
|
||||||
|
1 * 60 // every minute
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_max_request_size() -> u32 {
|
||||||
|
20 * 1024 * 1024 // Default to 20 MB
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_max_concurrent_requests() -> u16 {
|
||||||
|
100
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_log() -> String {
|
||||||
|
"info,state_res=warn,rocket=off,_=off,sled=off".to_owned()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_turn_ttl() -> u64 {
|
||||||
|
60 * 60 * 24
|
||||||
|
}
|
|
@ -1,277 +0,0 @@
|
||||||
use std::{
|
|
||||||
collections::BTreeMap,
|
|
||||||
fmt,
|
|
||||||
net::{IpAddr, Ipv4Addr},
|
|
||||||
};
|
|
||||||
|
|
||||||
use ruma::{OwnedServerName, RoomVersionId};
|
|
||||||
use serde::{de::IgnoredAny, Deserialize};
|
|
||||||
use tracing::warn;
|
|
||||||
|
|
||||||
mod proxy;
|
|
||||||
|
|
||||||
use self::proxy::ProxyConfig;
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize)]
|
|
||||||
pub struct Config {
|
|
||||||
#[serde(default = "default_address")]
|
|
||||||
pub address: IpAddr,
|
|
||||||
#[serde(default = "default_port")]
|
|
||||||
pub port: u16,
|
|
||||||
pub tls: Option<TlsConfig>,
|
|
||||||
|
|
||||||
pub server_name: OwnedServerName,
|
|
||||||
#[serde(default = "default_database_backend")]
|
|
||||||
pub database_backend: String,
|
|
||||||
pub database_path: String,
|
|
||||||
#[serde(default = "default_db_cache_capacity_mb")]
|
|
||||||
pub db_cache_capacity_mb: f64,
|
|
||||||
#[serde(default = "true_fn")]
|
|
||||||
pub enable_lightning_bolt: bool,
|
|
||||||
#[serde(default = "true_fn")]
|
|
||||||
pub allow_check_for_updates: bool,
|
|
||||||
#[serde(default = "default_conduit_cache_capacity_modifier")]
|
|
||||||
pub conduit_cache_capacity_modifier: f64,
|
|
||||||
#[serde(default = "default_rocksdb_max_open_files")]
|
|
||||||
pub rocksdb_max_open_files: i32,
|
|
||||||
#[serde(default = "default_pdu_cache_capacity")]
|
|
||||||
pub pdu_cache_capacity: u32,
|
|
||||||
#[serde(default = "default_cleanup_second_interval")]
|
|
||||||
pub cleanup_second_interval: u32,
|
|
||||||
#[serde(default = "default_max_request_size")]
|
|
||||||
pub max_request_size: u32,
|
|
||||||
#[serde(default = "default_max_concurrent_requests")]
|
|
||||||
pub max_concurrent_requests: u16,
|
|
||||||
#[serde(default = "default_max_fetch_prev_events")]
|
|
||||||
pub max_fetch_prev_events: u16,
|
|
||||||
#[serde(default = "false_fn")]
|
|
||||||
pub allow_registration: bool,
|
|
||||||
pub registration_token: Option<String>,
|
|
||||||
#[serde(default = "true_fn")]
|
|
||||||
pub allow_encryption: bool,
|
|
||||||
#[serde(default = "false_fn")]
|
|
||||||
pub allow_federation: bool,
|
|
||||||
#[serde(default = "true_fn")]
|
|
||||||
pub allow_room_creation: bool,
|
|
||||||
#[serde(default = "true_fn")]
|
|
||||||
pub allow_unstable_room_versions: bool,
|
|
||||||
#[serde(default = "default_default_room_version")]
|
|
||||||
pub default_room_version: RoomVersionId,
|
|
||||||
pub well_known_client: Option<String>,
|
|
||||||
#[serde(default = "false_fn")]
|
|
||||||
pub allow_jaeger: bool,
|
|
||||||
#[serde(default = "false_fn")]
|
|
||||||
pub tracing_flame: bool,
|
|
||||||
#[serde(default)]
|
|
||||||
pub proxy: ProxyConfig,
|
|
||||||
pub jwt_secret: Option<String>,
|
|
||||||
#[serde(default = "default_trusted_servers")]
|
|
||||||
pub trusted_servers: Vec<OwnedServerName>,
|
|
||||||
#[serde(default = "default_log")]
|
|
||||||
pub log: String,
|
|
||||||
#[serde(default)]
|
|
||||||
pub turn_username: String,
|
|
||||||
#[serde(default)]
|
|
||||||
pub turn_password: String,
|
|
||||||
#[serde(default = "Vec::new")]
|
|
||||||
pub turn_uris: Vec<String>,
|
|
||||||
#[serde(default)]
|
|
||||||
pub turn_secret: String,
|
|
||||||
#[serde(default = "default_turn_ttl")]
|
|
||||||
pub turn_ttl: u64,
|
|
||||||
|
|
||||||
pub emergency_password: Option<String>,
|
|
||||||
|
|
||||||
#[serde(flatten)]
|
|
||||||
pub catchall: BTreeMap<String, IgnoredAny>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize)]
|
|
||||||
pub struct TlsConfig {
|
|
||||||
pub certs: String,
|
|
||||||
pub key: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
const DEPRECATED_KEYS: &[&str] = &["cache_capacity"];
|
|
||||||
|
|
||||||
impl Config {
|
|
||||||
pub fn warn_deprecated(&self) {
|
|
||||||
let mut was_deprecated = false;
|
|
||||||
for key in self
|
|
||||||
.catchall
|
|
||||||
.keys()
|
|
||||||
.filter(|key| DEPRECATED_KEYS.iter().any(|s| s == key))
|
|
||||||
{
|
|
||||||
warn!("Config parameter {} is deprecated", key);
|
|
||||||
was_deprecated = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if was_deprecated {
|
|
||||||
warn!("Read conduit documentation and check your configuration if any new configuration parameters should be adjusted");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for Config {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
// Prepare a list of config values to show
|
|
||||||
let lines = [
|
|
||||||
("Server name", self.server_name.host()),
|
|
||||||
("Database backend", &self.database_backend),
|
|
||||||
("Database path", &self.database_path),
|
|
||||||
(
|
|
||||||
"Database cache capacity (MB)",
|
|
||||||
&self.db_cache_capacity_mb.to_string(),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"Cache capacity modifier",
|
|
||||||
&self.conduit_cache_capacity_modifier.to_string(),
|
|
||||||
),
|
|
||||||
#[cfg(feature = "rocksdb")]
|
|
||||||
(
|
|
||||||
"Maximum open files for RocksDB",
|
|
||||||
&self.rocksdb_max_open_files.to_string(),
|
|
||||||
),
|
|
||||||
("PDU cache capacity", &self.pdu_cache_capacity.to_string()),
|
|
||||||
(
|
|
||||||
"Cleanup interval in seconds",
|
|
||||||
&self.cleanup_second_interval.to_string(),
|
|
||||||
),
|
|
||||||
("Maximum request size", &self.max_request_size.to_string()),
|
|
||||||
(
|
|
||||||
"Maximum concurrent requests",
|
|
||||||
&self.max_concurrent_requests.to_string(),
|
|
||||||
),
|
|
||||||
("Allow registration", &self.allow_registration.to_string()),
|
|
||||||
(
|
|
||||||
"Enabled lightning bolt",
|
|
||||||
&self.enable_lightning_bolt.to_string(),
|
|
||||||
),
|
|
||||||
("Allow encryption", &self.allow_encryption.to_string()),
|
|
||||||
("Allow federation", &self.allow_federation.to_string()),
|
|
||||||
("Allow room creation", &self.allow_room_creation.to_string()),
|
|
||||||
(
|
|
||||||
"JWT secret",
|
|
||||||
match self.jwt_secret {
|
|
||||||
Some(_) => "set",
|
|
||||||
None => "not set",
|
|
||||||
},
|
|
||||||
),
|
|
||||||
("Trusted servers", {
|
|
||||||
let mut lst = vec![];
|
|
||||||
for server in &self.trusted_servers {
|
|
||||||
lst.push(server.host());
|
|
||||||
}
|
|
||||||
&lst.join(", ")
|
|
||||||
}),
|
|
||||||
(
|
|
||||||
"TURN username",
|
|
||||||
if self.turn_username.is_empty() {
|
|
||||||
"not set"
|
|
||||||
} else {
|
|
||||||
&self.turn_username
|
|
||||||
},
|
|
||||||
),
|
|
||||||
("TURN password", {
|
|
||||||
if self.turn_password.is_empty() {
|
|
||||||
"not set"
|
|
||||||
} else {
|
|
||||||
"set"
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
("TURN secret", {
|
|
||||||
if self.turn_secret.is_empty() {
|
|
||||||
"not set"
|
|
||||||
} else {
|
|
||||||
"set"
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
("Turn TTL", &self.turn_ttl.to_string()),
|
|
||||||
("Turn URIs", {
|
|
||||||
let mut lst = vec![];
|
|
||||||
for item in self.turn_uris.iter().cloned().enumerate() {
|
|
||||||
let (_, uri): (usize, String) = item;
|
|
||||||
lst.push(uri);
|
|
||||||
}
|
|
||||||
&lst.join(", ")
|
|
||||||
}),
|
|
||||||
];
|
|
||||||
|
|
||||||
let mut msg: String = "Active config values:\n\n".to_owned();
|
|
||||||
|
|
||||||
for line in lines.into_iter().enumerate() {
|
|
||||||
msg += &format!("{}: {}\n", line.1 .0, line.1 .1);
|
|
||||||
}
|
|
||||||
|
|
||||||
write!(f, "{msg}")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn false_fn() -> bool {
|
|
||||||
false
|
|
||||||
}
|
|
||||||
|
|
||||||
fn true_fn() -> bool {
|
|
||||||
true
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_address() -> IpAddr {
|
|
||||||
Ipv4Addr::LOCALHOST.into()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_port() -> u16 {
|
|
||||||
8000
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_database_backend() -> String {
|
|
||||||
"sqlite".to_owned()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_db_cache_capacity_mb() -> f64 {
|
|
||||||
300.0
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_conduit_cache_capacity_modifier() -> f64 {
|
|
||||||
1.0
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_rocksdb_max_open_files() -> i32 {
|
|
||||||
1000
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_pdu_cache_capacity() -> u32 {
|
|
||||||
150_000
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_cleanup_second_interval() -> u32 {
|
|
||||||
60 // every minute
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_max_request_size() -> u32 {
|
|
||||||
20 * 1024 * 1024 // Default to 20 MB
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_max_concurrent_requests() -> u16 {
|
|
||||||
100
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_max_fetch_prev_events() -> u16 {
|
|
||||||
100_u16
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_trusted_servers() -> Vec<OwnedServerName> {
|
|
||||||
vec![OwnedServerName::try_from("matrix.org").unwrap()]
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_log() -> String {
|
|
||||||
"warn,state_res=warn,_=off,sled=off".to_owned()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_turn_ttl() -> u64 {
|
|
||||||
60 * 60 * 24
|
|
||||||
}
|
|
||||||
|
|
||||||
// I know, it's a great name
|
|
||||||
pub fn default_default_room_version() -> RoomVersionId {
|
|
||||||
RoomVersionId::V9
|
|
||||||
}
|
|
|
@ -10,13 +10,13 @@ use crate::Result;
|
||||||
/// ```
|
/// ```
|
||||||
/// - Global proxy
|
/// - Global proxy
|
||||||
/// ```toml
|
/// ```toml
|
||||||
/// [global.proxy]
|
/// [proxy]
|
||||||
/// global = { url = "socks5h://localhost:9050" }
|
/// global = { url = "socks5h://localhost:9050" }
|
||||||
/// ```
|
/// ```
|
||||||
/// - Proxy some domains
|
/// - Proxy some domains
|
||||||
/// ```toml
|
/// ```toml
|
||||||
/// [global.proxy]
|
/// [proxy]
|
||||||
/// [[global.proxy.by_domain]]
|
/// [[proxy.by_domain]]
|
||||||
/// url = "socks5h://localhost:9050"
|
/// url = "socks5h://localhost:9050"
|
||||||
/// include = ["*.onion", "matrix.myspecial.onion"]
|
/// include = ["*.onion", "matrix.myspecial.onion"]
|
||||||
/// exclude = ["*.myspecial.onion"]
|
/// exclude = ["*.myspecial.onion"]
|
||||||
|
@ -29,9 +29,7 @@ use crate::Result;
|
||||||
/// would be used for `ordinary.onion`, `matrix.myspecial.onion`, but not `hello.myspecial.onion`.
|
/// would be used for `ordinary.onion`, `matrix.myspecial.onion`, but not `hello.myspecial.onion`.
|
||||||
#[derive(Clone, Debug, Deserialize)]
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
#[serde(rename_all = "snake_case")]
|
#[serde(rename_all = "snake_case")]
|
||||||
#[derive(Default)]
|
|
||||||
pub enum ProxyConfig {
|
pub enum ProxyConfig {
|
||||||
#[default]
|
|
||||||
None,
|
None,
|
||||||
Global {
|
Global {
|
||||||
#[serde(deserialize_with = "crate::utils::deserialize_from_str")]
|
#[serde(deserialize_with = "crate::utils::deserialize_from_str")]
|
||||||
|
@ -50,6 +48,11 @@ impl ProxyConfig {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
impl Default for ProxyConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
ProxyConfig::None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize)]
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
pub struct PartialProxyConfig {
|
pub struct PartialProxyConfig {
|
||||||
|
|
966
src/database.rs
Normal file
966
src/database.rs
Normal file
|
@ -0,0 +1,966 @@
|
||||||
|
pub mod abstraction;
|
||||||
|
|
||||||
|
pub mod account_data;
|
||||||
|
pub mod admin;
|
||||||
|
pub mod appservice;
|
||||||
|
pub mod globals;
|
||||||
|
pub mod key_backups;
|
||||||
|
pub mod media;
|
||||||
|
pub mod pusher;
|
||||||
|
pub mod rooms;
|
||||||
|
pub mod sending;
|
||||||
|
pub mod transaction_ids;
|
||||||
|
pub mod uiaa;
|
||||||
|
pub mod users;
|
||||||
|
|
||||||
|
use crate::{utils, Config, Error, Result};
|
||||||
|
use abstraction::DatabaseEngine;
|
||||||
|
use directories::ProjectDirs;
|
||||||
|
use lru_cache::LruCache;
|
||||||
|
use rocket::{
|
||||||
|
futures::{channel::mpsc, stream::FuturesUnordered, StreamExt},
|
||||||
|
outcome::{try_outcome, IntoOutcome},
|
||||||
|
request::{FromRequest, Request},
|
||||||
|
Shutdown, State,
|
||||||
|
};
|
||||||
|
use ruma::{DeviceId, EventId, RoomId, UserId};
|
||||||
|
use std::{
|
||||||
|
collections::{BTreeMap, HashMap, HashSet},
|
||||||
|
fs::{self, remove_dir_all},
|
||||||
|
io::Write,
|
||||||
|
mem::size_of,
|
||||||
|
ops::Deref,
|
||||||
|
path::Path,
|
||||||
|
sync::{Arc, Mutex, RwLock},
|
||||||
|
};
|
||||||
|
use tokio::sync::{OwnedRwLockReadGuard, RwLock as TokioRwLock, Semaphore};
|
||||||
|
use tracing::{debug, error, info, warn};
|
||||||
|
|
||||||
|
use self::admin::create_admin_room;
|
||||||
|
|
||||||
|
pub struct Database {
|
||||||
|
_db: Arc<dyn DatabaseEngine>,
|
||||||
|
pub globals: globals::Globals,
|
||||||
|
pub users: users::Users,
|
||||||
|
pub uiaa: uiaa::Uiaa,
|
||||||
|
pub rooms: rooms::Rooms,
|
||||||
|
pub account_data: account_data::AccountData,
|
||||||
|
pub media: media::Media,
|
||||||
|
pub key_backups: key_backups::KeyBackups,
|
||||||
|
pub transaction_ids: transaction_ids::TransactionIds,
|
||||||
|
pub sending: sending::Sending,
|
||||||
|
pub admin: admin::Admin,
|
||||||
|
pub appservice: appservice::Appservice,
|
||||||
|
pub pusher: pusher::PushData,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Database {
|
||||||
|
/// Tries to remove the old database but ignores all errors.
|
||||||
|
pub fn try_remove(server_name: &str) -> Result<()> {
|
||||||
|
let mut path = ProjectDirs::from("xyz", "koesters", "conduit")
|
||||||
|
.ok_or_else(|| Error::bad_config("The OS didn't return a valid home directory path."))?
|
||||||
|
.data_dir()
|
||||||
|
.to_path_buf();
|
||||||
|
path.push(server_name);
|
||||||
|
let _ = remove_dir_all(path);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn check_db_setup(config: &Config) -> Result<()> {
|
||||||
|
let path = Path::new(&config.database_path);
|
||||||
|
|
||||||
|
let sled_exists = path.join("db").exists();
|
||||||
|
let sqlite_exists = path.join("conduit.db").exists();
|
||||||
|
let rocksdb_exists = path.join("IDENTITY").exists();
|
||||||
|
|
||||||
|
let mut count = 0;
|
||||||
|
|
||||||
|
if sled_exists {
|
||||||
|
count += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if sqlite_exists {
|
||||||
|
count += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if rocksdb_exists {
|
||||||
|
count += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if count > 1 {
|
||||||
|
warn!("Multiple databases at database_path detected");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
if sled_exists && config.database_backend != "sled" {
|
||||||
|
return Err(Error::bad_config(
|
||||||
|
"Found sled at database_path, but is not specified in config.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if sqlite_exists && config.database_backend != "sqlite" {
|
||||||
|
return Err(Error::bad_config(
|
||||||
|
"Found sqlite at database_path, but is not specified in config.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if rocksdb_exists && config.database_backend != "rocksdb" {
|
||||||
|
return Err(Error::bad_config(
|
||||||
|
"Found rocksdb at database_path, but is not specified in config.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Load an existing database or create a new one.
|
||||||
|
pub async fn load_or_create(config: &Config) -> Result<Arc<TokioRwLock<Self>>> {
|
||||||
|
Self::check_db_setup(config)?;
|
||||||
|
|
||||||
|
if !Path::new(&config.database_path).exists() {
|
||||||
|
std::fs::create_dir_all(&config.database_path)
|
||||||
|
.map_err(|_| Error::BadConfig("Database folder doesn't exists and couldn't be created (e.g. due to missing permissions). Please create the database folder yourself."))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let builder: Arc<dyn DatabaseEngine> = match &*config.database_backend {
|
||||||
|
"sqlite" => {
|
||||||
|
#[cfg(not(feature = "sqlite"))]
|
||||||
|
return Err(Error::BadConfig("Database backend not found."));
|
||||||
|
#[cfg(feature = "sqlite")]
|
||||||
|
Arc::new(Arc::<abstraction::sqlite::Engine>::open(config)?)
|
||||||
|
}
|
||||||
|
"rocksdb" => {
|
||||||
|
#[cfg(not(feature = "rocksdb"))]
|
||||||
|
return Err(Error::BadConfig("Database backend not found."));
|
||||||
|
#[cfg(feature = "rocksdb")]
|
||||||
|
Arc::new(Arc::<abstraction::rocksdb::Engine>::open(config)?)
|
||||||
|
}
|
||||||
|
"persy" => {
|
||||||
|
#[cfg(not(feature = "persy"))]
|
||||||
|
return Err(Error::BadConfig("Database backend not found."));
|
||||||
|
#[cfg(feature = "persy")]
|
||||||
|
Arc::new(Arc::<abstraction::persy::Engine>::open(config)?)
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
return Err(Error::BadConfig("Database backend not found."));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if config.max_request_size < 1024 {
|
||||||
|
eprintln!("ERROR: Max request size is less than 1KB. Please increase it.");
|
||||||
|
}
|
||||||
|
|
||||||
|
let (admin_sender, admin_receiver) = mpsc::unbounded();
|
||||||
|
let (sending_sender, sending_receiver) = mpsc::unbounded();
|
||||||
|
|
||||||
|
let db = Arc::new(TokioRwLock::from(Self {
|
||||||
|
_db: builder.clone(),
|
||||||
|
users: users::Users {
|
||||||
|
userid_password: builder.open_tree("userid_password")?,
|
||||||
|
userid_displayname: builder.open_tree("userid_displayname")?,
|
||||||
|
userid_avatarurl: builder.open_tree("userid_avatarurl")?,
|
||||||
|
userid_blurhash: builder.open_tree("userid_blurhash")?,
|
||||||
|
userdeviceid_token: builder.open_tree("userdeviceid_token")?,
|
||||||
|
userdeviceid_metadata: builder.open_tree("userdeviceid_metadata")?,
|
||||||
|
userid_devicelistversion: builder.open_tree("userid_devicelistversion")?,
|
||||||
|
token_userdeviceid: builder.open_tree("token_userdeviceid")?,
|
||||||
|
onetimekeyid_onetimekeys: builder.open_tree("onetimekeyid_onetimekeys")?,
|
||||||
|
userid_lastonetimekeyupdate: builder.open_tree("userid_lastonetimekeyupdate")?,
|
||||||
|
keychangeid_userid: builder.open_tree("keychangeid_userid")?,
|
||||||
|
keyid_key: builder.open_tree("keyid_key")?,
|
||||||
|
userid_masterkeyid: builder.open_tree("userid_masterkeyid")?,
|
||||||
|
userid_selfsigningkeyid: builder.open_tree("userid_selfsigningkeyid")?,
|
||||||
|
userid_usersigningkeyid: builder.open_tree("userid_usersigningkeyid")?,
|
||||||
|
userfilterid_filter: builder.open_tree("userfilterid_filter")?,
|
||||||
|
todeviceid_events: builder.open_tree("todeviceid_events")?,
|
||||||
|
},
|
||||||
|
uiaa: uiaa::Uiaa {
|
||||||
|
userdevicesessionid_uiaainfo: builder.open_tree("userdevicesessionid_uiaainfo")?,
|
||||||
|
userdevicesessionid_uiaarequest: RwLock::new(BTreeMap::new()),
|
||||||
|
},
|
||||||
|
rooms: rooms::Rooms {
|
||||||
|
edus: rooms::RoomEdus {
|
||||||
|
readreceiptid_readreceipt: builder.open_tree("readreceiptid_readreceipt")?,
|
||||||
|
roomuserid_privateread: builder.open_tree("roomuserid_privateread")?, // "Private" read receipt
|
||||||
|
roomuserid_lastprivatereadupdate: builder
|
||||||
|
.open_tree("roomuserid_lastprivatereadupdate")?,
|
||||||
|
typingid_userid: builder.open_tree("typingid_userid")?,
|
||||||
|
roomid_lasttypingupdate: builder.open_tree("roomid_lasttypingupdate")?,
|
||||||
|
presenceid_presence: builder.open_tree("presenceid_presence")?,
|
||||||
|
userid_lastpresenceupdate: builder.open_tree("userid_lastpresenceupdate")?,
|
||||||
|
},
|
||||||
|
pduid_pdu: builder.open_tree("pduid_pdu")?,
|
||||||
|
eventid_pduid: builder.open_tree("eventid_pduid")?,
|
||||||
|
roomid_pduleaves: builder.open_tree("roomid_pduleaves")?,
|
||||||
|
|
||||||
|
alias_roomid: builder.open_tree("alias_roomid")?,
|
||||||
|
aliasid_alias: builder.open_tree("aliasid_alias")?,
|
||||||
|
publicroomids: builder.open_tree("publicroomids")?,
|
||||||
|
|
||||||
|
tokenids: builder.open_tree("tokenids")?,
|
||||||
|
|
||||||
|
roomserverids: builder.open_tree("roomserverids")?,
|
||||||
|
serverroomids: builder.open_tree("serverroomids")?,
|
||||||
|
userroomid_joined: builder.open_tree("userroomid_joined")?,
|
||||||
|
roomuserid_joined: builder.open_tree("roomuserid_joined")?,
|
||||||
|
roomid_joinedcount: builder.open_tree("roomid_joinedcount")?,
|
||||||
|
roomid_invitedcount: builder.open_tree("roomid_invitedcount")?,
|
||||||
|
roomuseroncejoinedids: builder.open_tree("roomuseroncejoinedids")?,
|
||||||
|
userroomid_invitestate: builder.open_tree("userroomid_invitestate")?,
|
||||||
|
roomuserid_invitecount: builder.open_tree("roomuserid_invitecount")?,
|
||||||
|
userroomid_leftstate: builder.open_tree("userroomid_leftstate")?,
|
||||||
|
roomuserid_leftcount: builder.open_tree("roomuserid_leftcount")?,
|
||||||
|
|
||||||
|
lazyloadedids: builder.open_tree("lazyloadedids")?,
|
||||||
|
|
||||||
|
userroomid_notificationcount: builder.open_tree("userroomid_notificationcount")?,
|
||||||
|
userroomid_highlightcount: builder.open_tree("userroomid_highlightcount")?,
|
||||||
|
|
||||||
|
statekey_shortstatekey: builder.open_tree("statekey_shortstatekey")?,
|
||||||
|
shortstatekey_statekey: builder.open_tree("shortstatekey_statekey")?,
|
||||||
|
|
||||||
|
shorteventid_authchain: builder.open_tree("shorteventid_authchain")?,
|
||||||
|
|
||||||
|
roomid_shortroomid: builder.open_tree("roomid_shortroomid")?,
|
||||||
|
|
||||||
|
shortstatehash_statediff: builder.open_tree("shortstatehash_statediff")?,
|
||||||
|
eventid_shorteventid: builder.open_tree("eventid_shorteventid")?,
|
||||||
|
shorteventid_eventid: builder.open_tree("shorteventid_eventid")?,
|
||||||
|
shorteventid_shortstatehash: builder.open_tree("shorteventid_shortstatehash")?,
|
||||||
|
roomid_shortstatehash: builder.open_tree("roomid_shortstatehash")?,
|
||||||
|
roomsynctoken_shortstatehash: builder.open_tree("roomsynctoken_shortstatehash")?,
|
||||||
|
statehash_shortstatehash: builder.open_tree("statehash_shortstatehash")?,
|
||||||
|
|
||||||
|
eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?,
|
||||||
|
softfailedeventids: builder.open_tree("softfailedeventids")?,
|
||||||
|
|
||||||
|
referencedevents: builder.open_tree("referencedevents")?,
|
||||||
|
pdu_cache: Mutex::new(LruCache::new(
|
||||||
|
config
|
||||||
|
.pdu_cache_capacity
|
||||||
|
.try_into()
|
||||||
|
.expect("pdu cache capacity fits into usize"),
|
||||||
|
)),
|
||||||
|
auth_chain_cache: Mutex::new(LruCache::new(
|
||||||
|
(100_000.0 * config.conduit_cache_capacity_modifier) as usize,
|
||||||
|
)),
|
||||||
|
shorteventid_cache: Mutex::new(LruCache::new(
|
||||||
|
(100_000.0 * config.conduit_cache_capacity_modifier) as usize,
|
||||||
|
)),
|
||||||
|
eventidshort_cache: Mutex::new(LruCache::new(
|
||||||
|
(100_000.0 * config.conduit_cache_capacity_modifier) as usize,
|
||||||
|
)),
|
||||||
|
shortstatekey_cache: Mutex::new(LruCache::new(
|
||||||
|
(100_000.0 * config.conduit_cache_capacity_modifier) as usize,
|
||||||
|
)),
|
||||||
|
statekeyshort_cache: Mutex::new(LruCache::new(
|
||||||
|
(100_000.0 * config.conduit_cache_capacity_modifier) as usize,
|
||||||
|
)),
|
||||||
|
our_real_users_cache: RwLock::new(HashMap::new()),
|
||||||
|
appservice_in_room_cache: RwLock::new(HashMap::new()),
|
||||||
|
lazy_load_waiting: Mutex::new(HashMap::new()),
|
||||||
|
stateinfo_cache: Mutex::new(LruCache::new(
|
||||||
|
(100.0 * config.conduit_cache_capacity_modifier) as usize,
|
||||||
|
)),
|
||||||
|
},
|
||||||
|
account_data: account_data::AccountData {
|
||||||
|
roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?,
|
||||||
|
roomusertype_roomuserdataid: builder.open_tree("roomusertype_roomuserdataid")?,
|
||||||
|
},
|
||||||
|
media: media::Media {
|
||||||
|
mediaid_file: builder.open_tree("mediaid_file")?,
|
||||||
|
},
|
||||||
|
key_backups: key_backups::KeyBackups {
|
||||||
|
backupid_algorithm: builder.open_tree("backupid_algorithm")?,
|
||||||
|
backupid_etag: builder.open_tree("backupid_etag")?,
|
||||||
|
backupkeyid_backup: builder.open_tree("backupkeyid_backup")?,
|
||||||
|
},
|
||||||
|
transaction_ids: transaction_ids::TransactionIds {
|
||||||
|
userdevicetxnid_response: builder.open_tree("userdevicetxnid_response")?,
|
||||||
|
},
|
||||||
|
sending: sending::Sending {
|
||||||
|
servername_educount: builder.open_tree("servername_educount")?,
|
||||||
|
servernameevent_data: builder.open_tree("servernameevent_data")?,
|
||||||
|
servercurrentevent_data: builder.open_tree("servercurrentevent_data")?,
|
||||||
|
maximum_requests: Arc::new(Semaphore::new(config.max_concurrent_requests as usize)),
|
||||||
|
sender: sending_sender,
|
||||||
|
},
|
||||||
|
admin: admin::Admin {
|
||||||
|
sender: admin_sender,
|
||||||
|
},
|
||||||
|
appservice: appservice::Appservice {
|
||||||
|
cached_registrations: Arc::new(RwLock::new(HashMap::new())),
|
||||||
|
id_appserviceregistrations: builder.open_tree("id_appserviceregistrations")?,
|
||||||
|
},
|
||||||
|
pusher: pusher::PushData {
|
||||||
|
senderkey_pusher: builder.open_tree("senderkey_pusher")?,
|
||||||
|
},
|
||||||
|
globals: globals::Globals::load(
|
||||||
|
builder.open_tree("global")?,
|
||||||
|
builder.open_tree("server_signingkeys")?,
|
||||||
|
config.clone(),
|
||||||
|
)?,
|
||||||
|
}));
|
||||||
|
|
||||||
|
let guard = db.read().await;
|
||||||
|
|
||||||
|
// Matrix resource ownership is based on the server name; changing it
|
||||||
|
// requires recreating the database from scratch.
|
||||||
|
if guard.users.count()? > 0 {
|
||||||
|
let conduit_user =
|
||||||
|
UserId::parse_with_server_name("conduit", guard.globals.server_name())
|
||||||
|
.expect("@conduit:server_name is valid");
|
||||||
|
|
||||||
|
if !guard.users.exists(&conduit_user)? {
|
||||||
|
error!(
|
||||||
|
"The {} server user does not exist, and the database is not new.",
|
||||||
|
conduit_user
|
||||||
|
);
|
||||||
|
return Err(Error::bad_database(
|
||||||
|
"Cannot reuse an existing database after changing the server name, please delete the old one first."
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the database has any data, perform data migrations before starting
|
||||||
|
let latest_database_version = 11;
|
||||||
|
|
||||||
|
if guard.users.count()? > 0 {
|
||||||
|
let db = &*guard;
|
||||||
|
// MIGRATIONS
|
||||||
|
if db.globals.database_version()? < 1 {
|
||||||
|
for (roomserverid, _) in db.rooms.roomserverids.iter() {
|
||||||
|
let mut parts = roomserverid.split(|&b| b == 0xff);
|
||||||
|
let room_id = parts.next().expect("split always returns one element");
|
||||||
|
let servername = match parts.next() {
|
||||||
|
Some(s) => s,
|
||||||
|
None => {
|
||||||
|
error!("Migration: Invalid roomserverid in db.");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let mut serverroomid = servername.to_vec();
|
||||||
|
serverroomid.push(0xff);
|
||||||
|
serverroomid.extend_from_slice(room_id);
|
||||||
|
|
||||||
|
db.rooms.serverroomids.insert(&serverroomid, &[])?;
|
||||||
|
}
|
||||||
|
|
||||||
|
db.globals.bump_database_version(1)?;
|
||||||
|
|
||||||
|
warn!("Migration: 0 -> 1 finished");
|
||||||
|
}
|
||||||
|
|
||||||
|
if db.globals.database_version()? < 2 {
|
||||||
|
// We accidentally inserted hashed versions of "" into the db instead of just ""
|
||||||
|
for (userid, password) in db.users.userid_password.iter() {
|
||||||
|
let password = utils::string_from_bytes(&password);
|
||||||
|
|
||||||
|
let empty_hashed_password = password.map_or(false, |password| {
|
||||||
|
argon2::verify_encoded(&password, b"").unwrap_or(false)
|
||||||
|
});
|
||||||
|
|
||||||
|
if empty_hashed_password {
|
||||||
|
db.users.userid_password.insert(&userid, b"")?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
db.globals.bump_database_version(2)?;
|
||||||
|
|
||||||
|
warn!("Migration: 1 -> 2 finished");
|
||||||
|
}
|
||||||
|
|
||||||
|
if db.globals.database_version()? < 3 {
|
||||||
|
// Move media to filesystem
|
||||||
|
for (key, content) in db.media.mediaid_file.iter() {
|
||||||
|
if content.is_empty() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let path = db.globals.get_media_file(&key);
|
||||||
|
let mut file = fs::File::create(path)?;
|
||||||
|
file.write_all(&content)?;
|
||||||
|
db.media.mediaid_file.insert(&key, &[])?;
|
||||||
|
}
|
||||||
|
|
||||||
|
db.globals.bump_database_version(3)?;
|
||||||
|
|
||||||
|
warn!("Migration: 2 -> 3 finished");
|
||||||
|
}
|
||||||
|
|
||||||
|
if db.globals.database_version()? < 4 {
|
||||||
|
// Add federated users to db as deactivated
|
||||||
|
for our_user in db.users.iter() {
|
||||||
|
let our_user = our_user?;
|
||||||
|
if db.users.is_deactivated(&our_user)? {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
for room in db.rooms.rooms_joined(&our_user) {
|
||||||
|
for user in db.rooms.room_members(&room?) {
|
||||||
|
let user = user?;
|
||||||
|
if user.server_name() != db.globals.server_name() {
|
||||||
|
println!("Migration: Creating user {}", user);
|
||||||
|
db.users.create(&user, None)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
db.globals.bump_database_version(4)?;
|
||||||
|
|
||||||
|
warn!("Migration: 3 -> 4 finished");
|
||||||
|
}
|
||||||
|
|
||||||
|
if db.globals.database_version()? < 5 {
|
||||||
|
// Upgrade user data store
|
||||||
|
for (roomuserdataid, _) in db.account_data.roomuserdataid_accountdata.iter() {
|
||||||
|
let mut parts = roomuserdataid.split(|&b| b == 0xff);
|
||||||
|
let room_id = parts.next().unwrap();
|
||||||
|
let user_id = parts.next().unwrap();
|
||||||
|
let event_type = roomuserdataid.rsplit(|&b| b == 0xff).next().unwrap();
|
||||||
|
|
||||||
|
let mut key = room_id.to_vec();
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(user_id);
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(event_type);
|
||||||
|
|
||||||
|
db.account_data
|
||||||
|
.roomusertype_roomuserdataid
|
||||||
|
.insert(&key, &roomuserdataid)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
db.globals.bump_database_version(5)?;
|
||||||
|
|
||||||
|
warn!("Migration: 4 -> 5 finished");
|
||||||
|
}
|
||||||
|
|
||||||
|
if db.globals.database_version()? < 6 {
|
||||||
|
// Set room member count
|
||||||
|
for (roomid, _) in db.rooms.roomid_shortstatehash.iter() {
|
||||||
|
let string = utils::string_from_bytes(&roomid).unwrap();
|
||||||
|
let room_id = <&RoomId>::try_from(string.as_str()).unwrap();
|
||||||
|
db.rooms.update_joined_count(room_id, &db)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
db.globals.bump_database_version(6)?;
|
||||||
|
|
||||||
|
warn!("Migration: 5 -> 6 finished");
|
||||||
|
}
|
||||||
|
|
||||||
|
if db.globals.database_version()? < 7 {
|
||||||
|
// Upgrade state store
|
||||||
|
let mut last_roomstates: HashMap<Box<RoomId>, u64> = HashMap::new();
|
||||||
|
let mut current_sstatehash: Option<u64> = None;
|
||||||
|
let mut current_room = None;
|
||||||
|
let mut current_state = HashSet::new();
|
||||||
|
let mut counter = 0;
|
||||||
|
|
||||||
|
let mut handle_state =
|
||||||
|
|current_sstatehash: u64,
|
||||||
|
current_room: &RoomId,
|
||||||
|
current_state: HashSet<_>,
|
||||||
|
last_roomstates: &mut HashMap<_, _>| {
|
||||||
|
counter += 1;
|
||||||
|
println!("counter: {}", counter);
|
||||||
|
let last_roomsstatehash = last_roomstates.get(current_room);
|
||||||
|
|
||||||
|
let states_parents = last_roomsstatehash.map_or_else(
|
||||||
|
|| Ok(Vec::new()),
|
||||||
|
|&last_roomsstatehash| {
|
||||||
|
db.rooms.load_shortstatehash_info(dbg!(last_roomsstatehash))
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let (statediffnew, statediffremoved) =
|
||||||
|
if let Some(parent_stateinfo) = states_parents.last() {
|
||||||
|
let statediffnew = current_state
|
||||||
|
.difference(&parent_stateinfo.1)
|
||||||
|
.copied()
|
||||||
|
.collect::<HashSet<_>>();
|
||||||
|
|
||||||
|
let statediffremoved = parent_stateinfo
|
||||||
|
.1
|
||||||
|
.difference(¤t_state)
|
||||||
|
.copied()
|
||||||
|
.collect::<HashSet<_>>();
|
||||||
|
|
||||||
|
(statediffnew, statediffremoved)
|
||||||
|
} else {
|
||||||
|
(current_state, HashSet::new())
|
||||||
|
};
|
||||||
|
|
||||||
|
db.rooms.save_state_from_diff(
|
||||||
|
dbg!(current_sstatehash),
|
||||||
|
statediffnew,
|
||||||
|
statediffremoved,
|
||||||
|
2, // every state change is 2 event changes on average
|
||||||
|
states_parents,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
/*
|
||||||
|
let mut tmp = db.rooms.load_shortstatehash_info(¤t_sstatehash, &db)?;
|
||||||
|
let state = tmp.pop().unwrap();
|
||||||
|
println!(
|
||||||
|
"{}\t{}{:?}: {:?} + {:?} - {:?}",
|
||||||
|
current_room,
|
||||||
|
" ".repeat(tmp.len()),
|
||||||
|
utils::u64_from_bytes(¤t_sstatehash).unwrap(),
|
||||||
|
tmp.last().map(|b| utils::u64_from_bytes(&b.0).unwrap()),
|
||||||
|
state
|
||||||
|
.2
|
||||||
|
.iter()
|
||||||
|
.map(|b| utils::u64_from_bytes(&b[size_of::<u64>()..]).unwrap())
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
state
|
||||||
|
.3
|
||||||
|
.iter()
|
||||||
|
.map(|b| utils::u64_from_bytes(&b[size_of::<u64>()..]).unwrap())
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
);
|
||||||
|
*/
|
||||||
|
|
||||||
|
Ok::<_, Error>(())
|
||||||
|
};
|
||||||
|
|
||||||
|
for (k, seventid) in db._db.open_tree("stateid_shorteventid")?.iter() {
|
||||||
|
let sstatehash = utils::u64_from_bytes(&k[0..size_of::<u64>()])
|
||||||
|
.expect("number of bytes is correct");
|
||||||
|
let sstatekey = k[size_of::<u64>()..].to_vec();
|
||||||
|
if Some(sstatehash) != current_sstatehash {
|
||||||
|
if let Some(current_sstatehash) = current_sstatehash {
|
||||||
|
handle_state(
|
||||||
|
current_sstatehash,
|
||||||
|
current_room.as_deref().unwrap(),
|
||||||
|
current_state,
|
||||||
|
&mut last_roomstates,
|
||||||
|
)?;
|
||||||
|
last_roomstates
|
||||||
|
.insert(current_room.clone().unwrap(), current_sstatehash);
|
||||||
|
}
|
||||||
|
current_state = HashSet::new();
|
||||||
|
current_sstatehash = Some(sstatehash);
|
||||||
|
|
||||||
|
let event_id = db
|
||||||
|
.rooms
|
||||||
|
.shorteventid_eventid
|
||||||
|
.get(&seventid)
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
let string = utils::string_from_bytes(&event_id).unwrap();
|
||||||
|
let event_id = <&EventId>::try_from(string.as_str()).unwrap();
|
||||||
|
let pdu = db.rooms.get_pdu(event_id).unwrap().unwrap();
|
||||||
|
|
||||||
|
if Some(&pdu.room_id) != current_room.as_ref() {
|
||||||
|
current_room = Some(pdu.room_id.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut val = sstatekey;
|
||||||
|
val.extend_from_slice(&seventid);
|
||||||
|
current_state.insert(val.try_into().expect("size is correct"));
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(current_sstatehash) = current_sstatehash {
|
||||||
|
handle_state(
|
||||||
|
current_sstatehash,
|
||||||
|
current_room.as_deref().unwrap(),
|
||||||
|
current_state,
|
||||||
|
&mut last_roomstates,
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
db.globals.bump_database_version(7)?;
|
||||||
|
|
||||||
|
warn!("Migration: 6 -> 7 finished");
|
||||||
|
}
|
||||||
|
|
||||||
|
if db.globals.database_version()? < 8 {
|
||||||
|
// Generate short room ids for all rooms
|
||||||
|
for (room_id, _) in db.rooms.roomid_shortstatehash.iter() {
|
||||||
|
let shortroomid = db.globals.next_count()?.to_be_bytes();
|
||||||
|
db.rooms.roomid_shortroomid.insert(&room_id, &shortroomid)?;
|
||||||
|
info!("Migration: 8");
|
||||||
|
}
|
||||||
|
// Update pduids db layout
|
||||||
|
let mut batch = db.rooms.pduid_pdu.iter().filter_map(|(key, v)| {
|
||||||
|
if !key.starts_with(b"!") {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let mut parts = key.splitn(2, |&b| b == 0xff);
|
||||||
|
let room_id = parts.next().unwrap();
|
||||||
|
let count = parts.next().unwrap();
|
||||||
|
|
||||||
|
let short_room_id = db
|
||||||
|
.rooms
|
||||||
|
.roomid_shortroomid
|
||||||
|
.get(room_id)
|
||||||
|
.unwrap()
|
||||||
|
.expect("shortroomid should exist");
|
||||||
|
|
||||||
|
let mut new_key = short_room_id;
|
||||||
|
new_key.extend_from_slice(count);
|
||||||
|
|
||||||
|
Some((new_key, v))
|
||||||
|
});
|
||||||
|
|
||||||
|
db.rooms.pduid_pdu.insert_batch(&mut batch)?;
|
||||||
|
|
||||||
|
let mut batch2 = db.rooms.eventid_pduid.iter().filter_map(|(k, value)| {
|
||||||
|
if !value.starts_with(b"!") {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let mut parts = value.splitn(2, |&b| b == 0xff);
|
||||||
|
let room_id = parts.next().unwrap();
|
||||||
|
let count = parts.next().unwrap();
|
||||||
|
|
||||||
|
let short_room_id = db
|
||||||
|
.rooms
|
||||||
|
.roomid_shortroomid
|
||||||
|
.get(room_id)
|
||||||
|
.unwrap()
|
||||||
|
.expect("shortroomid should exist");
|
||||||
|
|
||||||
|
let mut new_value = short_room_id;
|
||||||
|
new_value.extend_from_slice(count);
|
||||||
|
|
||||||
|
Some((k, new_value))
|
||||||
|
});
|
||||||
|
|
||||||
|
db.rooms.eventid_pduid.insert_batch(&mut batch2)?;
|
||||||
|
|
||||||
|
db.globals.bump_database_version(8)?;
|
||||||
|
|
||||||
|
warn!("Migration: 7 -> 8 finished");
|
||||||
|
}
|
||||||
|
|
||||||
|
if db.globals.database_version()? < 9 {
|
||||||
|
// Update tokenids db layout
|
||||||
|
let mut iter = db
|
||||||
|
.rooms
|
||||||
|
.tokenids
|
||||||
|
.iter()
|
||||||
|
.filter_map(|(key, _)| {
|
||||||
|
if !key.starts_with(b"!") {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let mut parts = key.splitn(4, |&b| b == 0xff);
|
||||||
|
let room_id = parts.next().unwrap();
|
||||||
|
let word = parts.next().unwrap();
|
||||||
|
let _pdu_id_room = parts.next().unwrap();
|
||||||
|
let pdu_id_count = parts.next().unwrap();
|
||||||
|
|
||||||
|
let short_room_id = db
|
||||||
|
.rooms
|
||||||
|
.roomid_shortroomid
|
||||||
|
.get(room_id)
|
||||||
|
.unwrap()
|
||||||
|
.expect("shortroomid should exist");
|
||||||
|
let mut new_key = short_room_id;
|
||||||
|
new_key.extend_from_slice(word);
|
||||||
|
new_key.push(0xff);
|
||||||
|
new_key.extend_from_slice(pdu_id_count);
|
||||||
|
println!("old {:?}", key);
|
||||||
|
println!("new {:?}", new_key);
|
||||||
|
Some((new_key, Vec::new()))
|
||||||
|
})
|
||||||
|
.peekable();
|
||||||
|
|
||||||
|
while iter.peek().is_some() {
|
||||||
|
db.rooms
|
||||||
|
.tokenids
|
||||||
|
.insert_batch(&mut iter.by_ref().take(1000))?;
|
||||||
|
println!("smaller batch done");
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("Deleting starts");
|
||||||
|
|
||||||
|
let batch2: Vec<_> = db
|
||||||
|
.rooms
|
||||||
|
.tokenids
|
||||||
|
.iter()
|
||||||
|
.filter_map(|(key, _)| {
|
||||||
|
if key.starts_with(b"!") {
|
||||||
|
println!("del {:?}", key);
|
||||||
|
Some(key)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
for key in batch2 {
|
||||||
|
println!("del");
|
||||||
|
db.rooms.tokenids.remove(&key)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
db.globals.bump_database_version(9)?;
|
||||||
|
|
||||||
|
warn!("Migration: 8 -> 9 finished");
|
||||||
|
}
|
||||||
|
|
||||||
|
if db.globals.database_version()? < 10 {
|
||||||
|
// Add other direction for shortstatekeys
|
||||||
|
for (statekey, shortstatekey) in db.rooms.statekey_shortstatekey.iter() {
|
||||||
|
db.rooms
|
||||||
|
.shortstatekey_statekey
|
||||||
|
.insert(&shortstatekey, &statekey)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Force E2EE device list updates so we can send them over federation
|
||||||
|
for user_id in db.users.iter().filter_map(|r| r.ok()) {
|
||||||
|
db.users
|
||||||
|
.mark_device_key_update(&user_id, &db.rooms, &db.globals)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
db.globals.bump_database_version(10)?;
|
||||||
|
|
||||||
|
warn!("Migration: 9 -> 10 finished");
|
||||||
|
}
|
||||||
|
|
||||||
|
if db.globals.database_version()? < 11 {
|
||||||
|
db._db
|
||||||
|
.open_tree("userdevicesessionid_uiaarequest")?
|
||||||
|
.clear()?;
|
||||||
|
db.globals.bump_database_version(11)?;
|
||||||
|
|
||||||
|
warn!("Migration: 10 -> 11 finished");
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_eq!(11, latest_database_version);
|
||||||
|
|
||||||
|
info!(
|
||||||
|
"Loaded {} database with version {}",
|
||||||
|
config.database_backend, latest_database_version
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
guard
|
||||||
|
.globals
|
||||||
|
.bump_database_version(latest_database_version)?;
|
||||||
|
|
||||||
|
// Create the admin room and server user on first run
|
||||||
|
create_admin_room(&guard).await?;
|
||||||
|
|
||||||
|
warn!(
|
||||||
|
"Created new {} database with version {}",
|
||||||
|
config.database_backend, latest_database_version
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// This data is probably outdated
|
||||||
|
guard.rooms.edus.presenceid_presence.clear()?;
|
||||||
|
|
||||||
|
guard.admin.start_handler(Arc::clone(&db), admin_receiver);
|
||||||
|
guard
|
||||||
|
.sending
|
||||||
|
.start_handler(Arc::clone(&db), sending_receiver);
|
||||||
|
|
||||||
|
drop(guard);
|
||||||
|
|
||||||
|
Self::start_cleanup_task(Arc::clone(&db), config).await;
|
||||||
|
|
||||||
|
Ok(db)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
pub async fn start_on_shutdown_tasks(db: Arc<TokioRwLock<Self>>, shutdown: Shutdown) {
|
||||||
|
tokio::spawn(async move {
|
||||||
|
shutdown.await;
|
||||||
|
|
||||||
|
info!(target: "shutdown-sync", "Received shutdown notification, notifying sync helpers...");
|
||||||
|
|
||||||
|
db.read().await.globals.rotate.fire();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) {
|
||||||
|
let userid_bytes = user_id.as_bytes().to_vec();
|
||||||
|
let mut userid_prefix = userid_bytes.clone();
|
||||||
|
userid_prefix.push(0xff);
|
||||||
|
|
||||||
|
let mut userdeviceid_prefix = userid_prefix.clone();
|
||||||
|
userdeviceid_prefix.extend_from_slice(device_id.as_bytes());
|
||||||
|
userdeviceid_prefix.push(0xff);
|
||||||
|
|
||||||
|
let mut futures = FuturesUnordered::new();
|
||||||
|
|
||||||
|
// Return when *any* user changed his key
|
||||||
|
// TODO: only send for user they share a room with
|
||||||
|
futures.push(
|
||||||
|
self.users
|
||||||
|
.todeviceid_events
|
||||||
|
.watch_prefix(&userdeviceid_prefix),
|
||||||
|
);
|
||||||
|
|
||||||
|
futures.push(self.rooms.userroomid_joined.watch_prefix(&userid_prefix));
|
||||||
|
futures.push(
|
||||||
|
self.rooms
|
||||||
|
.userroomid_invitestate
|
||||||
|
.watch_prefix(&userid_prefix),
|
||||||
|
);
|
||||||
|
futures.push(self.rooms.userroomid_leftstate.watch_prefix(&userid_prefix));
|
||||||
|
futures.push(
|
||||||
|
self.rooms
|
||||||
|
.userroomid_notificationcount
|
||||||
|
.watch_prefix(&userid_prefix),
|
||||||
|
);
|
||||||
|
futures.push(
|
||||||
|
self.rooms
|
||||||
|
.userroomid_highlightcount
|
||||||
|
.watch_prefix(&userid_prefix),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Events for rooms we are in
|
||||||
|
for room_id in self.rooms.rooms_joined(user_id).filter_map(|r| r.ok()) {
|
||||||
|
let short_roomid = self
|
||||||
|
.rooms
|
||||||
|
.get_shortroomid(&room_id)
|
||||||
|
.ok()
|
||||||
|
.flatten()
|
||||||
|
.expect("room exists")
|
||||||
|
.to_be_bytes()
|
||||||
|
.to_vec();
|
||||||
|
|
||||||
|
let roomid_bytes = room_id.as_bytes().to_vec();
|
||||||
|
let mut roomid_prefix = roomid_bytes.clone();
|
||||||
|
roomid_prefix.push(0xff);
|
||||||
|
|
||||||
|
// PDUs
|
||||||
|
futures.push(self.rooms.pduid_pdu.watch_prefix(&short_roomid));
|
||||||
|
|
||||||
|
// EDUs
|
||||||
|
futures.push(
|
||||||
|
self.rooms
|
||||||
|
.edus
|
||||||
|
.roomid_lasttypingupdate
|
||||||
|
.watch_prefix(&roomid_bytes),
|
||||||
|
);
|
||||||
|
|
||||||
|
futures.push(
|
||||||
|
self.rooms
|
||||||
|
.edus
|
||||||
|
.readreceiptid_readreceipt
|
||||||
|
.watch_prefix(&roomid_prefix),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Key changes
|
||||||
|
futures.push(self.users.keychangeid_userid.watch_prefix(&roomid_prefix));
|
||||||
|
|
||||||
|
// Room account data
|
||||||
|
let mut roomuser_prefix = roomid_prefix.clone();
|
||||||
|
roomuser_prefix.extend_from_slice(&userid_prefix);
|
||||||
|
|
||||||
|
futures.push(
|
||||||
|
self.account_data
|
||||||
|
.roomusertype_roomuserdataid
|
||||||
|
.watch_prefix(&roomuser_prefix),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut globaluserdata_prefix = vec![0xff];
|
||||||
|
globaluserdata_prefix.extend_from_slice(&userid_prefix);
|
||||||
|
|
||||||
|
futures.push(
|
||||||
|
self.account_data
|
||||||
|
.roomusertype_roomuserdataid
|
||||||
|
.watch_prefix(&globaluserdata_prefix),
|
||||||
|
);
|
||||||
|
|
||||||
|
// More key changes (used when user is not joined to any rooms)
|
||||||
|
futures.push(self.users.keychangeid_userid.watch_prefix(&userid_prefix));
|
||||||
|
|
||||||
|
// One time keys
|
||||||
|
futures.push(
|
||||||
|
self.users
|
||||||
|
.userid_lastonetimekeyupdate
|
||||||
|
.watch_prefix(&userid_bytes),
|
||||||
|
);
|
||||||
|
|
||||||
|
futures.push(Box::pin(self.globals.rotate.watch()));
|
||||||
|
|
||||||
|
// Wait until one of them finds something
|
||||||
|
futures.next().await;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
|
pub fn flush(&self) -> Result<()> {
|
||||||
|
let start = std::time::Instant::now();
|
||||||
|
|
||||||
|
let res = self._db.flush();
|
||||||
|
|
||||||
|
debug!("flush: took {:?}", start.elapsed());
|
||||||
|
|
||||||
|
res
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(db, config))]
|
||||||
|
pub async fn start_cleanup_task(db: Arc<TokioRwLock<Self>>, config: &Config) {
|
||||||
|
use tokio::time::interval;
|
||||||
|
|
||||||
|
#[cfg(unix)]
|
||||||
|
use tokio::signal::unix::{signal, SignalKind};
|
||||||
|
use tracing::info;
|
||||||
|
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
|
let timer_interval = Duration::from_secs(config.cleanup_second_interval as u64);
|
||||||
|
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let mut i = interval(timer_interval);
|
||||||
|
#[cfg(unix)]
|
||||||
|
let mut s = signal(SignalKind::hangup()).unwrap();
|
||||||
|
|
||||||
|
loop {
|
||||||
|
#[cfg(unix)]
|
||||||
|
tokio::select! {
|
||||||
|
_ = i.tick() => {
|
||||||
|
info!("cleanup: Timer ticked");
|
||||||
|
}
|
||||||
|
_ = s.recv() => {
|
||||||
|
info!("cleanup: Received SIGHUP");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
#[cfg(not(unix))]
|
||||||
|
{
|
||||||
|
i.tick().await;
|
||||||
|
info!("cleanup: Timer ticked")
|
||||||
|
}
|
||||||
|
|
||||||
|
let start = Instant::now();
|
||||||
|
if let Err(e) = db.read().await._db.cleanup() {
|
||||||
|
error!("cleanup: Errored: {}", e);
|
||||||
|
} else {
|
||||||
|
info!("cleanup: Finished in {:?}", start.elapsed());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct DatabaseGuard(OwnedRwLockReadGuard<Database>);
|
||||||
|
|
||||||
|
impl Deref for DatabaseGuard {
|
||||||
|
type Target = OwnedRwLockReadGuard<Database>;
|
||||||
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
&self.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rocket::async_trait]
|
||||||
|
impl<'r> FromRequest<'r> for DatabaseGuard {
|
||||||
|
type Error = ();
|
||||||
|
|
||||||
|
async fn from_request(req: &'r Request<'_>) -> rocket::request::Outcome<Self, ()> {
|
||||||
|
let db = try_outcome!(req.guard::<&State<Arc<TokioRwLock<Database>>>>().await);
|
||||||
|
|
||||||
|
Ok(DatabaseGuard(Arc::clone(db).read_owned().await)).or_forward(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<OwnedRwLockReadGuard<Database>> for DatabaseGuard {
|
||||||
|
fn from(val: OwnedRwLockReadGuard<Database>) -> Self {
|
||||||
|
Self(val)
|
||||||
|
}
|
||||||
|
}
|
|
@ -26,11 +26,11 @@ pub mod persy;
|
||||||
))]
|
))]
|
||||||
pub mod watchers;
|
pub mod watchers;
|
||||||
|
|
||||||
pub trait KeyValueDatabaseEngine: Send + Sync {
|
pub trait DatabaseEngine: Send + Sync {
|
||||||
fn open(config: &Config) -> Result<Self>
|
fn open(config: &Config) -> Result<Self>
|
||||||
where
|
where
|
||||||
Self: Sized;
|
Self: Sized;
|
||||||
fn open_tree(&self, name: &'static str) -> Result<Arc<dyn KvTree>>;
|
fn open_tree(&self, name: &'static str) -> Result<Arc<dyn Tree>>;
|
||||||
fn flush(&self) -> Result<()>;
|
fn flush(&self) -> Result<()>;
|
||||||
fn cleanup(&self) -> Result<()> {
|
fn cleanup(&self) -> Result<()> {
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -38,10 +38,9 @@ pub trait KeyValueDatabaseEngine: Send + Sync {
|
||||||
fn memory_usage(&self) -> Result<String> {
|
fn memory_usage(&self) -> Result<String> {
|
||||||
Ok("Current database engine does not support memory usage reporting.".to_owned())
|
Ok("Current database engine does not support memory usage reporting.".to_owned())
|
||||||
}
|
}
|
||||||
fn clear_caches(&self) {}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait KvTree: Send + Sync {
|
pub trait Tree: Send + Sync {
|
||||||
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>>;
|
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>>;
|
||||||
|
|
||||||
fn insert(&self, key: &[u8], value: &[u8]) -> Result<()>;
|
fn insert(&self, key: &[u8], value: &[u8]) -> Result<()>;
|
||||||
|
|
|
@ -69,6 +69,7 @@ impl DatabaseEngine for Engine {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl EngineTree {
|
impl EngineTree {
|
||||||
|
#[tracing::instrument(skip(self, tree, from, backwards))]
|
||||||
fn iter_from_thread(
|
fn iter_from_thread(
|
||||||
&self,
|
&self,
|
||||||
tree: Arc<heed::UntypedDatabase>,
|
tree: Arc<heed::UntypedDatabase>,
|
||||||
|
@ -93,6 +94,7 @@ impl EngineTree {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(tree, txn, from, backwards))]
|
||||||
fn iter_from_thread_work(
|
fn iter_from_thread_work(
|
||||||
tree: Arc<heed::UntypedDatabase>,
|
tree: Arc<heed::UntypedDatabase>,
|
||||||
txn: &heed::RoTxn<'_>,
|
txn: &heed::RoTxn<'_>,
|
||||||
|
@ -124,6 +126,7 @@ fn iter_from_thread_work(
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Tree for EngineTree {
|
impl Tree for EngineTree {
|
||||||
|
#[tracing::instrument(skip(self, key))]
|
||||||
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
||||||
let txn = self.engine.env.read_txn().map_err(convert_error)?;
|
let txn = self.engine.env.read_txn().map_err(convert_error)?;
|
||||||
Ok(self
|
Ok(self
|
||||||
|
@ -133,6 +136,7 @@ impl Tree for EngineTree {
|
||||||
.map(|s| s.to_vec()))
|
.map(|s| s.to_vec()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(self, key, value))]
|
||||||
fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> {
|
fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> {
|
||||||
let mut txn = self.engine.env.write_txn().map_err(convert_error)?;
|
let mut txn = self.engine.env.write_txn().map_err(convert_error)?;
|
||||||
self.tree
|
self.tree
|
||||||
|
@ -143,6 +147,7 @@ impl Tree for EngineTree {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(self, key))]
|
||||||
fn remove(&self, key: &[u8]) -> Result<()> {
|
fn remove(&self, key: &[u8]) -> Result<()> {
|
||||||
let mut txn = self.engine.env.write_txn().map_err(convert_error)?;
|
let mut txn = self.engine.env.write_txn().map_err(convert_error)?;
|
||||||
self.tree.delete(&mut txn, &key).map_err(convert_error)?;
|
self.tree.delete(&mut txn, &key).map_err(convert_error)?;
|
||||||
|
@ -150,10 +155,12 @@ impl Tree for EngineTree {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + Send + 'a> {
|
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + Send + 'a> {
|
||||||
self.iter_from(&[], false)
|
self.iter_from(&[], false)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(self, from, backwards))]
|
||||||
fn iter_from(
|
fn iter_from(
|
||||||
&self,
|
&self,
|
||||||
from: &[u8],
|
from: &[u8],
|
||||||
|
@ -162,6 +169,7 @@ impl Tree for EngineTree {
|
||||||
self.iter_from_thread(Arc::clone(&self.tree), from.to_vec(), backwards)
|
self.iter_from_thread(Arc::clone(&self.tree), from.to_vec(), backwards)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(self, key))]
|
||||||
fn increment(&self, key: &[u8]) -> Result<Vec<u8>> {
|
fn increment(&self, key: &[u8]) -> Result<Vec<u8>> {
|
||||||
let mut txn = self.engine.env.write_txn().map_err(convert_error)?;
|
let mut txn = self.engine.env.write_txn().map_err(convert_error)?;
|
||||||
|
|
||||||
|
@ -178,6 +186,7 @@ impl Tree for EngineTree {
|
||||||
Ok(new)
|
Ok(new)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(self, prefix))]
|
||||||
fn scan_prefix<'a>(
|
fn scan_prefix<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
prefix: Vec<u8>,
|
prefix: Vec<u8>,
|
||||||
|
@ -188,6 +197,7 @@ impl Tree for EngineTree {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(self, prefix))]
|
||||||
fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin<Box<dyn Future<Output = ()> + Send + 'a>> {
|
fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin<Box<dyn Future<Output = ()> + Send + 'a>> {
|
||||||
self.watchers.watch(prefix)
|
self.watchers.watch(prefix)
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue