diff --git a/.envrc b/.envrc index b9238c3b..3550a30f 100644 --- a/.envrc +++ b/.envrc @@ -1,3 +1 @@ -#!/usr/bin/env bash - use flake diff --git a/.gitignore b/.gitignore index 1b5d37b8..19f05ce3 100644 --- a/.gitignore +++ b/.gitignore @@ -31,6 +31,7 @@ modules.xml ### vscode ### .vscode/* +!.vscode/settings.json !.vscode/tasks.json !.vscode/launch.json !.vscode/extensions.json @@ -61,7 +62,6 @@ conduit.db # Etc. **/*.rs.bk -cached_target # Nix artifacts /result* diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f5ab4246..91258ea5 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -16,16 +16,16 @@ variables: .docker-shared-settings: stage: "build docker image" + image: jdrouet/docker-with-buildx:20.10.21-0.9.1 needs: [] - tags: [ "docker" ] + tags: ["docker"] variables: # Docker in Docker: - DOCKER_BUILDKIT: 1 - image: - name: docker.io/docker + DOCKER_HOST: tcp://docker:2375/ + DOCKER_TLS_CERTDIR: "" + DOCKER_DRIVER: overlay2 services: - - name: docker.io/docker:dind - alias: docker + - docker:dind script: - apk add openssh-client - eval $(ssh-agent -s) @@ -41,7 +41,6 @@ variables: --pull --tag "$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID" --push - --provenance=false --file "Dockerfile" . # Build multiplatform image to deb stage and extract their .deb files: - > @@ -49,7 +48,6 @@ variables: --platform "linux/arm/v7,linux/arm64,linux/amd64" --target "packager-result" --output="type=local,dest=/tmp/build-output" - --provenance=false --file "Dockerfile" . # Build multiplatform image to binary stage and extract their binaries: - > @@ -57,7 +55,6 @@ variables: --platform "linux/arm/v7,linux/arm64,linux/amd64" --target "builder-result" --output="type=local,dest=/tmp/build-output" - --provenance=false --file "Dockerfile" . # Copy to GitLab container registry: - > @@ -103,20 +100,13 @@ docker:tags: TAG: "matrix-conduit:$CI_COMMIT_TAG" -docker build debugging: - extends: .docker-shared-settings - rules: - - if: "$CI_MERGE_REQUEST_TITLE =~ /.*[Dd]ocker.*/" - variables: - TAG: "matrix-conduit-docker-tests:latest" - # --------------------------------------------------------------------- # # Run tests # # --------------------------------------------------------------------- # cargo check: stage: test - image: docker.io/rust:1.70.0-bullseye + image: docker.io/rust:1.64.0-bullseye needs: [] interruptible: true before_script: @@ -141,7 +131,11 @@ test:cargo: - apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb script: - rustc --version && cargo --version # Print version info for debugging - - "cargo test --color always --workspace --verbose --locked --no-fail-fast" + - "cargo test --color always --workspace --verbose --locked --no-fail-fast -- -Z unstable-options --format json | gitlab-report -p test > $CI_PROJECT_DIR/report.xml" + artifacts: + when: always + reports: + junit: report.xml test:clippy: extends: .test-shared-settings diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 00000000..95294d48 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,3 @@ +{ + "rust-analyzer.procMacro.enable": true, +} \ No newline at end of file diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md deleted file mode 100644 index 1b060350..00000000 --- a/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,134 +0,0 @@ - -# Contributor Covenant Code of Conduct - -## Our Pledge - -We as members, contributors, and leaders pledge to make participation in our -community a harassment-free experience for everyone, regardless of age, body -size, visible or invisible disability, ethnicity, sex characteristics, gender -identity and expression, level of experience, education, socio-economic status, -nationality, personal appearance, race, caste, color, religion, or sexual -identity and orientation. - -We pledge to act and interact in ways that contribute to an open, welcoming, -diverse, inclusive, and healthy community. - -## Our Standards - -Examples of behavior that contributes to a positive environment for our -community include: - -* Demonstrating empathy and kindness toward other people -* Being respectful of differing opinions, viewpoints, and experiences -* Giving and gracefully accepting constructive feedback -* Accepting responsibility and apologizing to those affected by our mistakes, - and learning from the experience -* Focusing on what is best not just for us as individuals, but for the overall - community - -Examples of unacceptable behavior include: - -* The use of sexualized language or imagery, and sexual attention or advances of - any kind -* Trolling, insulting or derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or email address, - without their explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Enforcement Responsibilities - -Community leaders are responsible for clarifying and enforcing our standards of -acceptable behavior and will take appropriate and fair corrective action in -response to any behavior that they deem inappropriate, threatening, offensive, -or harmful. - -Community leaders have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions that are -not aligned to this Code of Conduct, and will communicate reasons for moderation -decisions when appropriate. - -## Scope - -This Code of Conduct applies within all community spaces, and also applies when -an individual is officially representing the community in public spaces. -Examples of representing our community include using an official e-mail address, -posting via an official social media account, or acting as an appointed -representative at an online or offline event. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported to the community leaders responsible for enforcement over email at -coc@koesters.xyz or over Matrix at @timo:conduit.rs. -All complaints will be reviewed and investigated promptly and fairly. - -All community leaders are obligated to respect the privacy and security of the -reporter of any incident. - -## Enforcement Guidelines - -Community leaders will follow these Community Impact Guidelines in determining -the consequences for any action they deem in violation of this Code of Conduct: - -### 1. Correction - -**Community Impact**: Use of inappropriate language or other behavior deemed -unprofessional or unwelcome in the community. - -**Consequence**: A private, written warning from community leaders, providing -clarity around the nature of the violation and an explanation of why the -behavior was inappropriate. A public apology may be requested. - -### 2. Warning - -**Community Impact**: A violation through a single incident or series of -actions. - -**Consequence**: A warning with consequences for continued behavior. No -interaction with the people involved, including unsolicited interaction with -those enforcing the Code of Conduct, for a specified period of time. This -includes avoiding interactions in community spaces as well as external channels -like social media. Violating these terms may lead to a temporary or permanent -ban. - -### 3. Temporary Ban - -**Community Impact**: A serious violation of community standards, including -sustained inappropriate behavior. - -**Consequence**: A temporary ban from any sort of interaction or public -communication with the community for a specified period of time. No public or -private interaction with the people involved, including unsolicited interaction -with those enforcing the Code of Conduct, is allowed during this period. -Violating these terms may lead to a permanent ban. - -### 4. Permanent Ban - -**Community Impact**: Demonstrating a pattern of violation of community -standards, including sustained inappropriate behavior, harassment of an -individual, or aggression toward or disparagement of classes of individuals. - -**Consequence**: A permanent ban from any sort of public interaction within the -community. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], -version 2.1, available at -[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. - -Community Impact Guidelines were inspired by -[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. - -For answers to common questions about this code of conduct, see the FAQ at -[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at -[https://www.contributor-covenant.org/translations][translations]. - -[homepage]: https://www.contributor-covenant.org -[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html -[Mozilla CoC]: https://github.com/mozilla/diversity -[FAQ]: https://www.contributor-covenant.org/faq -[translations]: https://www.contributor-covenant.org/translations - diff --git a/Cargo.lock b/Cargo.lock index 9c8596aa..db8ee9d0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10,59 +10,56 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "ahash" -version = "0.8.3" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" +checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "cfg-if", + "getrandom 0.2.8", "once_cell", "version_check", ] [[package]] name = "aho-corasick" -version = "1.0.2" +version = "0.7.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" +checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" dependencies = [ "memchr", ] [[package]] -name = "allocator-api2" -version = "0.2.15" +name = "alloc-no-stdlib" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56fc6cf8dc8c4158eed8649f9b8b0ea1518eb62b544fe9490d66fa0b349eafe9" +checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" [[package]] -name = "anstyle" -version = "1.0.1" +name = "alloc-stdlib" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" +checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" +dependencies = [ + "alloc-no-stdlib", +] [[package]] name = "arc-swap" -version = "1.6.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" +checksum = "983cd8b9d4b02a6dc6ffa557262eb5858a27a0038ffffe21a0f133eaa819a164" [[package]] name = "arrayref" -version = "0.3.7" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" +checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" [[package]] name = "arrayvec" -version = "0.7.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" - -[[package]] -name = "as_variant" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f38fa22307249f86fb7fad906fcae77f2564caeb56d7209103c551cd1cf4798f" +checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" [[package]] name = "assign" @@ -71,21 +68,38 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" [[package]] -name = "async-trait" -version = "0.1.68" +name = "async-compression" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" +checksum = "942c7cd7ae39e91bde4820d74132e9862e62c2f386c3aa90ccf55949f5bad63a" +dependencies = [ + "brotli", + "flate2", + "futures-core", + "memchr", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "async-trait" +version = "0.1.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e805d94e6b5001b651426cf4cd446b1ab5f319d27bab5c644f61de0a804360c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.21", + "syn", ] [[package]] name = "atomic" -version = "0.5.3" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c59bdb34bc650a32731b31bd8f0829cc15d24a708ee31559e0bb34f2bc320cba" +checksum = "b88d82667eca772c4aa12f0f1348b3ae643424c8876448f3f7bd5787032e234c" +dependencies = [ + "autocfg", +] [[package]] name = "autocfg" @@ -95,13 +109,13 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.6.18" +version = "0.5.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8175979259124331c1d7bf6586ee7e0da434155e4b2d48ec2c8386281d8df39" +checksum = "acee9fd5073ab6b045a275b3e709c163dd36c90685219cb21804a147b58dba43" dependencies = [ "async-trait", "axum-core", - "bitflags 1.3.2", + "bitflags", "bytes", "futures-util", "headers", @@ -114,22 +128,22 @@ dependencies = [ "mime", "percent-encoding", "pin-project-lite", - "rustversion", "serde", "serde_json", - "serde_path_to_error", "serde_urlencoded", "sync_wrapper", + "tokio", "tower", + "tower-http", "tower-layer", "tower-service", ] [[package]] name = "axum-core" -version = "0.3.4" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" +checksum = "37e5939e02c56fecd5c017c37df4238c0a839fa76b7f97acdd7efb804fd181cc" dependencies = [ "async-trait", "bytes", @@ -137,16 +151,15 @@ dependencies = [ "http", "http-body", "mime", - "rustversion", "tower-layer", "tower-service", ] [[package]] name = "axum-server" -version = "0.5.1" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "447f28c85900215cc1bea282f32d4a2f22d55c5a300afdfbc661c8d6a632e063" +checksum = "8456dab8f11484979a86651da8e619b355ede5d61a160755155f6c344bd18c47" dependencies = [ "arc-swap", "bytes", @@ -155,10 +168,10 @@ dependencies = [ "http-body", "hyper", "pin-project-lite", - "rustls 0.21.2", - "rustls-pemfile 1.0.2", + "rustls", + "rustls-pemfile 1.0.1", "tokio", - "tokio-rustls 0.24.1", + "tokio-rustls", "tower-service", ] @@ -168,17 +181,11 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" -[[package]] -name = "base64" -version = "0.21.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" - [[package]] name = "base64ct" -version = "1.6.0" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" +checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf" [[package]] name = "bincode" @@ -191,23 +198,21 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.65.1" +version = "0.59.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfdf7b466f9a4903edc73f95d6d2bcd5baf8ae620638762244d3f60143643cc5" +checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" dependencies = [ - "bitflags 1.3.2", + "bitflags", "cexpr", "clang-sys", "lazy_static", "lazycell", "peeking_take_while", - "prettyplease", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", - "syn 2.0.21", ] [[package]] @@ -216,43 +221,67 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" -[[package]] -name = "bitflags" -version = "2.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dbe3c979c178231552ecba20214a8272df4e09f232a87aef4320cf06539aded" - [[package]] name = "blake2b_simd" -version = "1.0.1" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c2f0dc9a68c6317d884f97cc36cf5a3d20ba14ce404227df55e1af708ab04bc" +checksum = "72936ee4afc7f8f736d1c38383b56480b5497b4617b4a77bdbf1d2ababc76127" dependencies = [ "arrayref", "arrayvec", - "constant_time_eq 0.2.6", + "constant_time_eq", ] [[package]] name = "block-buffer" -version = "0.10.4" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ "generic-array", ] [[package]] -name = "bumpalo" -version = "3.13.0" +name = "block-buffer" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" +checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" +dependencies = [ + "generic-array", +] + +[[package]] +name = "brotli" +version = "3.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1a0b1dbcc8ae29329621f8d4f0d835787c1c38bb1401979b49d13b0b305ff68" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", + "brotli-decompressor", +] + +[[package]] +name = "brotli-decompressor" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ad2d4653bf5ca36ae797b1f4bb4dbddb60ce49ca4aed8a2ce4829f60425b80" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", +] + +[[package]] +name = "bumpalo" +version = "3.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" [[package]] name = "bytemuck" -version = "1.13.1" +version = "1.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17febce684fd15d89027105661fec94afb475cb995fbc59d2865198446ba2eea" +checksum = "aaa3a8d9a1ca92e282c96a32d6511b695d7d994d1d102ba85d279f9b2756947f" [[package]] name = "byteorder" @@ -262,26 +291,15 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.4.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" - -[[package]] -name = "bzip2-sys" -version = "0.1.11+1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" -dependencies = [ - "cc", - "libc", - "pkg-config", -] +checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c" [[package]] name = "cc" -version = "1.0.79" +version = "1.0.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" +checksum = "e9f73505338f7d905b19d18738976aae232eb46b8efc15554ffc56deb5d9ebe4" dependencies = [ "jobserver", ] @@ -303,9 +321,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "clang-sys" -version = "1.6.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" +checksum = "fa2e27ae6ab525c3d369ded447057bca5438d86dc3a68f6faafb8269ba82ebf3" dependencies = [ "glob", "libc", @@ -314,43 +332,37 @@ dependencies = [ [[package]] name = "clap" -version = "4.3.8" +version = "4.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9394150f5b4273a1763355bd1c2ec54cc5a2593f790587bcd6b2c947cfa9211" +checksum = "0acbd8d28a0a60d7108d7ae850af6ba34cf2d1257fc646980e5f97ce14275966" dependencies = [ - "clap_builder", + "bitflags", "clap_derive", + "clap_lex", "once_cell", ] -[[package]] -name = "clap_builder" -version = "4.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a78fbdd3cc2914ddf37ba444114bc7765bbdcb55ec9cbe6fa054f0137400717" -dependencies = [ - "anstyle", - "bitflags 1.3.2", - "clap_lex", -] - [[package]] name = "clap_derive" -version = "4.3.2" +version = "4.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8cd2b2a819ad6eec39e8f1d6b53001af1e5469f8c177579cdaeb313115b825f" +checksum = "0177313f9f02afc995627906bbd8967e2be069f5261954222dac78290c2b9014" dependencies = [ "heck", + "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.21", + "syn", ] [[package]] name = "clap_lex" -version = "0.5.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" +checksum = "0d4198f73e42b4936b35b5bb248d81d2b595ecb170da0bac7655c54eedfa8da8" +dependencies = [ + "os_str_bytes", +] [[package]] name = "color_quant" @@ -360,12 +372,12 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" [[package]] name = "conduit" -version = "0.7.0-alpha" +version = "0.4.0-next" dependencies = [ "async-trait", "axum", "axum-server", - "base64 0.21.2", + "base64", "bytes", "clap", "crossbeam", @@ -379,13 +391,12 @@ dependencies = [ "jsonwebtoken", "lazy_static", "lru-cache", - "nix", "num_cpus", "opentelemetry", "opentelemetry-jaeger", "parking_lot", "persy", - "rand", + "rand 0.8.5", "regex", "reqwest", "ring", @@ -393,9 +404,7 @@ dependencies = [ "ruma", "rusqlite", "rust-argon2", - "sd-notify", "serde", - "serde_html_form", "serde_json", "serde_yaml", "sha-1", @@ -409,22 +418,15 @@ dependencies = [ "tower-http", "tracing", "tracing-flame", - "tracing-opentelemetry", "tracing-subscriber", "trust-dns-resolver", ] [[package]] name = "const-oid" -version = "0.9.2" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" - -[[package]] -name = "const_panic" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6051f239ecec86fde3410901ab7860d458d160371533842974fc61f96d15879b" +checksum = "cec318a675afcb6a1ea1d4340e2d377e56e47c266f28043ceccbf4412ddfdd3b" [[package]] name = "constant_time_eq" @@ -432,12 +434,6 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" -[[package]] -name = "constant_time_eq" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a53c0a4d288377e7415b53dcfc3c04da5cdc2cc95c8d5ac178b58f0b861ad6" - [[package]] name = "core-foundation" version = "0.9.3" @@ -450,33 +446,33 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.4" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" +checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" [[package]] name = "cpufeatures" -version = "0.2.8" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03e69e28e9f7f77debdedbaafa2866e1de9ba56df55a8bd7cfc724c25a09987c" +checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" dependencies = [ "libc", ] [[package]] name = "crc" -version = "3.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe" +checksum = "49fc9a695bca7f35f5f4c15cddc84415f66a74ea78eef08e90c5024f2b540e23" dependencies = [ "crc-catalog", ] [[package]] name = "crc-catalog" -version = "2.2.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cace84e55f07e7301bae1c519df89cdad8cc3cd868413d3fdbdeca9ff3db484" +checksum = "ccaeedb56da03b09f598226e25e80088cb4cd25f316e6e4df7d695f0feeb1403" [[package]] name = "crc32fast" @@ -503,9 +499,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.8" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" +checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" dependencies = [ "cfg-if", "crossbeam-utils", @@ -513,9 +509,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.3" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" +checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" dependencies = [ "cfg-if", "crossbeam-epoch", @@ -524,14 +520,14 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.15" +version = "0.9.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" +checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a" dependencies = [ "autocfg", "cfg-if", "crossbeam-utils", - "memoffset 0.9.0", + "memoffset", "scopeguard", ] @@ -547,9 +543,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.16" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" +checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" dependencies = [ "cfg-if", ] @@ -566,56 +562,28 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622178105f911d937a42cdb140730ba4a3ed2becd8ae6ce39c7d28b5d75d4588" +checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" dependencies = [ - "cfg-if", - "cpufeatures", - "curve25519-dalek-derive", - "digest", - "fiat-crypto", - "platforms", - "rustc_version", + "byteorder", + "digest 0.9.0", + "rand_core 0.5.1", "subtle", "zeroize", ] -[[package]] -name = "curve25519-dalek-derive" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.21", -] - -[[package]] -name = "dashmap" -version = "5.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "907076dfda823b0b36d2a1bb5f90c96660a5bbcd7729e10727f07858f22c4edc" -dependencies = [ - "cfg-if", - "hashbrown 0.12.3", - "lock_api", - "once_cell", - "parking_lot_core", -] - [[package]] name = "data-encoding" -version = "2.4.0" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" +checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" [[package]] name = "der" -version = "0.7.8" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" +checksum = "13dd2ae565c0a381dde7fade45fce95984c568bdcb4700a4fdbe3175e0380b2f" dependencies = [ "const-oid", "zeroize", @@ -623,11 +591,20 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.7" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "block-buffer", + "generic-array", +] + +[[package]] +name = "digest" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" +dependencies = [ + "block-buffer 0.10.3", "crypto-common", "subtle", ] @@ -654,23 +631,22 @@ dependencies = [ [[package]] name = "ed25519" -version = "2.2.2" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60f6d271ca33075c88028be6f04d502853d63a5ece419d269c15315d4fc1cf1d" +checksum = "1e9c280362032ea4203659fc489832d0204ef09f247a0506f170dafcac08c369" dependencies = [ - "pkcs8", "signature", ] [[package]] name = "ed25519-dalek" -version = "2.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7277392b266383ef8396db7fdeb1e77b6c52fed775f5df15bb24f35b72156980" +checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" dependencies = [ "curve25519-dalek", "ed25519", - "rand_core", + "rand 0.7.3", "serde", "sha2", "zeroize", @@ -678,15 +654,15 @@ dependencies = [ [[package]] name = "either" -version = "1.8.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" +checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" [[package]] name = "encoding_rs" -version = "0.8.32" +version = "0.8.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" +checksum = "9852635589dc9f9ea1b6fe9f05b50ef208c85c834a562f0c6abb1c475736ec2b" dependencies = [ "cfg-if", ] @@ -700,15 +676,9 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 1.0.109", + "syn", ] -[[package]] -name = "equivalent" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88bffebc5d80432c9b140ee17875ff173a8ab62faad5b257da912bd2f6c1c0a1" - [[package]] name = "fallible-iterator" version = "0.2.0" @@ -721,26 +691,11 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" -[[package]] -name = "fdeflate" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d329bdeac514ee06249dabc27877490f17f5d371ec693360768b838e19f3ae10" -dependencies = [ - "simd-adler32", -] - -[[package]] -name = "fiat-crypto" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0870c84016d4b481be5c9f323c24f65e31e901ae618f0e80f4308fb00de1d2d" - [[package]] name = "figment" -version = "0.10.10" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4547e226f4c9ab860571e070a9034192b3175580ecea38da34fcdb53a018c9a5" +checksum = "4e56602b469b2201400dec66a66aec5a9b8761ee97cd1b8c96ab2483fcc16cc9" dependencies = [ "atomic", "pear", @@ -752,9 +707,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.26" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" +checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841" dependencies = [ "crc32fast", "miniz_oxide", @@ -768,9 +723,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "form_urlencoded" -version = "1.2.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" +checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" dependencies = [ "percent-encoding", ] @@ -786,10 +741,16 @@ dependencies = [ ] [[package]] -name = "futures" -version = "0.3.28" +name = "fs_extra" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" +checksum = "2022715d62ab30faffd124d40b76f4134a550a87792276512b18d63272333394" + +[[package]] +name = "futures" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38390104763dc37a5145a53c29c63c1290b5d316d6086ec32c293f6736051bb0" dependencies = [ "futures-channel", "futures-core", @@ -802,9 +763,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.28" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" +checksum = "52ba265a92256105f45b719605a571ffe2d1f0fea3807304b522c1d778f79eed" dependencies = [ "futures-core", "futures-sink", @@ -812,15 +773,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.28" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" +checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac" [[package]] name = "futures-executor" -version = "0.3.28" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" +checksum = "7acc85df6714c176ab5edf386123fafe217be88c0840ec11f199441134a074e2" dependencies = [ "futures-core", "futures-task", @@ -829,38 +790,38 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.28" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" +checksum = "00f5fb52a06bdcadeb54e8d3671f8888a39697dcb0b81b23b55174030427f4eb" [[package]] name = "futures-macro" -version = "0.3.28" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" +checksum = "bdfb8ce053d86b91919aad980c220b1fb8401a9394410e1c289ed7e66b61835d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.21", + "syn", ] [[package]] name = "futures-sink" -version = "0.3.28" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" +checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9" [[package]] name = "futures-task" -version = "0.3.28" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" +checksum = "2ffb393ac5d9a6eaa9d3fdf37ae2776656b706e200c8e16b1bdb227f5198e6ea" [[package]] name = "futures-util" -version = "0.3.28" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" +checksum = "197676987abd2f9cadff84926f410af1c183608d36641465df73ae8211dc65d6" dependencies = [ "futures-channel", "futures-core", @@ -876,9 +837,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.7" +version = "0.14.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" dependencies = [ "typenum", "version_check", @@ -886,20 +847,31 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.10" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ "cfg-if", "libc", - "wasi", + "wasi 0.9.0+wasi-snapshot-preview1", +] + +[[package]] +name = "getrandom" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", ] [[package]] name = "gif" -version = "0.12.0" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80792593675e051cf94a4b111980da2ba60d4a83e43e0048c5693baab3977045" +checksum = "3edd93c6756b4dfaf2709eafcc345ba2636565295c198a9cfbf75fa5e3e00b06" dependencies = [ "color_quant", "weezl", @@ -907,15 +879,15 @@ dependencies = [ [[package]] name = "glob" -version = "0.3.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "h2" -version = "0.3.19" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d357c7ae988e7d2182f7d7871d0b963962420b0678b0997ce7de72001aeab782" +checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" dependencies = [ "bytes", "fnv", @@ -923,7 +895,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 1.9.3", + "indexmap", "slab", "tokio", "tokio-util", @@ -935,24 +907,17 @@ name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" - -[[package]] -name = "hashbrown" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" dependencies = [ "ahash", - "allocator-api2", ] [[package]] name = "hashlink" -version = "0.8.3" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "312f66718a2d7789ffef4f4b7b213138ed9f1eb3aa1d0d82fc99f88fb3ffd26f" +checksum = "69fe1fcf8b4278d860ad0548329f892a3631fb63f82574df68275f34cdbe0ffa" dependencies = [ - "hashbrown 0.14.0", + "hashbrown", ] [[package]] @@ -961,8 +926,8 @@ version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" dependencies = [ - "base64 0.13.1", - "bitflags 1.3.2", + "base64", + "bitflags", "bytes", "headers-core", "http", @@ -982,9 +947,9 @@ dependencies = [ [[package]] name = "heck" -version = "0.4.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" [[package]] name = "heed" @@ -1024,9 +989,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.2.6" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" dependencies = [ "libc", ] @@ -1037,7 +1002,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest", + "digest 0.10.6", ] [[package]] @@ -1053,9 +1018,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.9" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" +checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ "bytes", "fnv", @@ -1093,9 +1058,9 @@ checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" -version = "0.14.26" +version = "0.14.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab302d72a6f11a3b910431ff93aae7e773078c769f0a3ef15fb9ec692ed147d4" +checksum = "034711faac9d2166cb1baf1a2fb0b60b1f277f8492fd72176c17f3515e1abd3c" dependencies = [ "bytes", "futures-channel", @@ -1108,7 +1073,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.9", + "socket2", "tokio", "tower-service", "tracing", @@ -1117,15 +1082,15 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.23.2" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" +checksum = "59df7c4e19c950e6e0e868dcc0a300b09a9b88e9ec55bd879ca819087a77355d" dependencies = [ "http", "hyper", - "rustls 0.20.8", + "rustls", "tokio", - "tokio-rustls 0.23.4", + "tokio-rustls", ] [[package]] @@ -1141,9 +1106,9 @@ dependencies = [ [[package]] name = "idna" -version = "0.4.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -1151,9 +1116,9 @@ dependencies = [ [[package]] name = "image" -version = "0.24.6" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "527909aa81e20ac3a44803521443a765550f09b5130c2c2fa1ea59c2f8f50a3a" +checksum = "69b7ea949b537b0fd0af141fff8c77690f2ce96f4f41f042ccb6c69c6c965945" dependencies = [ "bytemuck", "byteorder", @@ -1167,22 +1132,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.9.3" +version = "1.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" dependencies = [ "autocfg", - "hashbrown 0.12.3", -] - -[[package]] -name = "indexmap" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" -dependencies = [ - "equivalent", - "hashbrown 0.14.0", + "hashbrown", "serde", ] @@ -1200,42 +1155,42 @@ checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" [[package]] name = "ipconfig" -version = "0.3.2" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" +checksum = "bd302af1b90f2463a98fa5ad469fc212c8e3175a41c3068601bfa2727591c5be" dependencies = [ - "socket2 0.5.3", + "socket2", "widestring", - "windows-sys 0.48.0", - "winreg 0.50.0", + "winapi", + "winreg 0.10.1", ] [[package]] name = "ipnet" -version = "2.7.2" +version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12b6ee2129af8d4fb011108c73d99a1b83a85977f23b82460c0ae2e25bb4b57f" +checksum = "f88c5561171189e69df9d98bcf18fd5f9558300f7ea7b801eb8a0fd748bd8745" [[package]] name = "itertools" -version = "0.11.0" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" dependencies = [ "either", ] [[package]] name = "itoa" -version = "1.0.6" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" +checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc" [[package]] name = "jobserver" -version = "0.1.26" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" +checksum = "068b1ee6743e4d11fb9c6a1e6064b3693a1b600e7f5f5988047d98b3dc9fb90b" dependencies = [ "libc", ] @@ -1248,9 +1203,9 @@ checksum = "bc0000e42512c92e31c2252315bda326620a4e034105e900c98ec492fa077b3e" [[package]] name = "js-sys" -version = "0.3.64" +version = "0.3.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" +checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47" dependencies = [ "wasm-bindgen", ] @@ -1275,11 +1230,11 @@ dependencies = [ [[package]] name = "jsonwebtoken" -version = "8.3.0" +version = "8.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" +checksum = "1aa4b4af834c6cfd35d8763d359661b90f2e45d8f750a0849156c7f4671af09c" dependencies = [ - "base64 0.21.2", + "base64", "pem", "ring", "serde", @@ -1289,23 +1244,25 @@ dependencies = [ [[package]] name = "konst" -version = "0.3.5" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d9a8bb6c7c71d151b25936b03e012a4c00daea99e3a3797c6ead66b0a0d55e2" +checksum = "330f0e13e6483b8c34885f7e6c9f19b1a7bd449c673fbb948a51c99d66ef74f4" dependencies = [ - "const_panic", - "konst_kernel", - "typewit", + "konst_macro_rules", + "konst_proc_macros", ] [[package]] -name = "konst_kernel" -version = "0.3.5" +name = "konst_macro_rules" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55d2ab266022e7309df89ed712bddc753e3a3c395c3ced1bb2e4470ec2a8146d" -dependencies = [ - "typewit", -] +checksum = "a4933f3f57a8e9d9da04db23fb153356ecaf00cbd14aee46279c33dc80925c37" + +[[package]] +name = "konst_proc_macros" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "984e109462d46ad18314f10e392c286c3d47bce203088a09012de1015b45b737" [[package]] name = "lazy_static" @@ -1321,9 +1278,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.146" +version = "0.2.137" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f92be4933c13fd498862a9e02a3055f8a8d9c039ce33db97306fd5a6caa7f29b" +checksum = "fc7fcc620a3bff7cdd7a365be3376c97191aeaccc2a603e600951e452615bf89" [[package]] name = "libloading" @@ -1337,36 +1294,21 @@ dependencies = [ [[package]] name = "librocksdb-sys" -version = "0.11.0+8.1.1" +version = "6.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3386f101bcb4bd252d8e9d2fb41ec3b0862a15a62b478c355b2982efa469e3e" +checksum = "c309a9d2470844aceb9a4a098cf5286154d20596868b75a6b36357d2bb9ca25d" dependencies = [ "bindgen", - "bzip2-sys", "cc", "glob", "libc", - "libz-sys", - "lz4-sys", - "zstd-sys", ] [[package]] name = "libsqlite3-sys" -version = "0.26.0" +version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afc22eff61b133b115c6e8c74e818c628d6d5e7a502afea6f64dee076dd94326" -dependencies = [ - "cc", - "pkg-config", - "vcpkg", -] - -[[package]] -name = "libz-sys" -version = "1.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56ee889ecc9568871456d42f603d6a0ce59ff328d291063a45cbdf0036baf6db" +checksum = "29f835d03d717946d28b1d1ed632eb6f0e24a299388ee623d0c23118d3e8a7fa" dependencies = [ "cc", "pkg-config", @@ -1392,9 +1334,9 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.10" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" +checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" dependencies = [ "autocfg", "scopeguard", @@ -1402,9 +1344,12 @@ dependencies = [ [[package]] name = "log" -version = "0.4.19" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" +checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" +dependencies = [ + "cfg-if", +] [[package]] name = "lru-cache" @@ -1415,16 +1360,6 @@ dependencies = [ "linked-hash-map", ] -[[package]] -name = "lz4-sys" -version = "1.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d27b317e207b10f69f5e75494119e391a96f48861ae870d1da6edac98ca900" -dependencies = [ - "cc", - "libc", -] - [[package]] name = "maplit" version = "1.0.2" @@ -1448,15 +1383,15 @@ dependencies = [ [[package]] name = "matches" -version = "0.1.10" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" +checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" [[package]] name = "matchit" -version = "0.7.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40" +checksum = "73cbba799671b762df5a175adf59ce145165747bb891505c43d09aefbbf38beb" [[package]] name = "memchr" @@ -1473,20 +1408,11 @@ dependencies = [ "autocfg", ] -[[package]] -name = "memoffset" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" -dependencies = [ - "autocfg", -] - [[package]] name = "mime" -version = "0.3.17" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" +checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" [[package]] name = "minimal-lexical" @@ -1496,44 +1422,30 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" dependencies = [ "adler", - "simd-adler32", ] [[package]] name = "mio" -version = "0.8.8" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" +checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" dependencies = [ "libc", - "wasi", - "windows-sys 0.48.0", -] - -[[package]] -name = "nix" -version = "0.26.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" -dependencies = [ - "bitflags 1.3.2", - "cfg-if", - "libc", - "memoffset 0.7.1", - "pin-utils", - "static_assertions", + "log", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys 0.42.0", ] [[package]] name = "nom" -version = "7.1.3" +version = "7.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +checksum = "a8903e5a29a317527874d0402f867152a3d21c908bb0b933e416c65e301d4c36" dependencies = [ "memchr", "minimal-lexical", @@ -1592,9 +1504,9 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.15.0" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" +checksum = "f6058e64324c71e02bc2b150e4f3bc8286db6c83092132ffa3f6b1eab0f9def5" dependencies = [ "hermit-abi", "libc", @@ -1602,9 +1514,15 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.18.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" +checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860" + +[[package]] +name = "opaque-debug" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl-probe" @@ -1654,10 +1572,9 @@ version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c24f96e21e7acc813c7a8394ee94978929db2bcc46cf6b5014fc612bf7760c22" dependencies = [ - "fnv", "futures-channel", "futures-util", - "indexmap 1.9.3", + "indexmap", "js-sys", "once_cell", "pin-project-lite", @@ -1672,15 +1589,13 @@ checksum = "1ca41c4933371b61c2a2f214bf16931499af4ec90543604ec828f7a625c09113" dependencies = [ "async-trait", "crossbeam-channel", - "dashmap", - "fnv", "futures-channel", "futures-executor", "futures-util", "once_cell", "opentelemetry_api", "percent-encoding", - "rand", + "rand 0.8.5", "thiserror", "tokio", "tokio-stream", @@ -1695,6 +1610,12 @@ dependencies = [ "num-traits", ] +[[package]] +name = "os_str_bytes" +version = "6.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee" + [[package]] name = "overload" version = "0.1.1" @@ -1723,28 +1644,28 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.8" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" +checksum = "4dc9e0dc2adc1c69d09143aff38d3d30c5c3f0df0dad82e6d25547af174ebec0" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.3.5", + "redox_syscall", "smallvec", - "windows-targets", + "windows-sys 0.42.0", ] [[package]] name = "paste" -version = "1.0.12" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79" +checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1" [[package]] name = "pear" -version = "0.2.4" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ec95680a7087503575284e5063e14b694b7a9c0b065e5dceec661e0497127e8" +checksum = "15e44241c5e4c868e3eaa78b7c1848cadd6344ed4f54d029832d32b415a58702" dependencies = [ "inlinable_string", "pear_codegen", @@ -1753,14 +1674,14 @@ dependencies = [ [[package]] name = "pear_codegen" -version = "0.2.4" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9661a3a53f93f09f2ea882018e4d7c88f6ff2956d809a276060476fd8c879d3c" +checksum = "82a5ca643c2303ecb740d506539deba189e16f2754040a42901cd8105d0282d0" dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.21", + "syn", ] [[package]] @@ -1771,30 +1692,30 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "pem" -version = "1.1.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" +checksum = "03c64931a1a212348ec4f3b4362585eca7159d0d09cbdf4a7f74f02173596fd4" dependencies = [ - "base64 0.13.1", + "base64", ] [[package]] name = "percent-encoding" -version = "2.3.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" +checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "persy" -version = "1.4.4" +version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3712821f12453814409ec149071bd4832a8ec458e648579c104aee30ed70b300" +checksum = "5511189f4dbd737283b0dd2ff6715f2e35fd0d3e1ddf953ed6a772e439e1f73f" dependencies = [ "crc", "data-encoding", "fs2", "linked-hash-map", - "rand", + "rand 0.8.5", "thiserror", "unsigned-varint", "zigzag", @@ -1802,22 +1723,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.0" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c95a7476719eab1e366eaf73d0260af3021184f18177925b07f54b30089ceead" +checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.0" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" +checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", - "syn 2.0.21", + "syn", ] [[package]] @@ -1834,9 +1755,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkcs8" -version = "0.10.2" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" dependencies = [ "der", "spki", @@ -1844,25 +1765,18 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.27" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" - -[[package]] -name = "platforms" -version = "3.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4503fa043bf02cee09a9582e9554b4c6403b2ef55e4612e96561d294419429f8" +checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" [[package]] name = "png" -version = "0.17.9" +version = "0.17.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59871cc5b6cce7eaccca5a802b4173377a1c2ba90654246789a8fa2334426d11" +checksum = "5d708eaf860a19b19ce538740d2b4bdeeb8337fa53f7738455e706623ad5c638" dependencies = [ - "bitflags 1.3.2", + "bitflags", "crc32fast", - "fdeflate", "flate2", "miniz_oxide", ] @@ -1874,43 +1788,58 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] -name = "prettyplease" -version = "0.2.9" +name = "proc-macro-crate" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9825a04601d60621feed79c4e6b56d65db77cdca55cef43b46b0de1096d1c282" +checksum = "eda0fc3b0fb7c975631757e14d9049da17374063edb6ebbcbc54d880d4fe94e9" dependencies = [ - "proc-macro2", - "syn 2.0.21", + "once_cell", + "thiserror", + "toml", ] [[package]] -name = "proc-macro-crate" -version = "1.3.1" +name = "proc-macro-error" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ - "once_cell", - "toml_edit", + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", ] [[package]] name = "proc-macro2" -version = "1.0.61" +version = "1.0.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "363a6f739a0c0addeaf6ed75150b95743aa18643a3c6f40409ed7b6db3a6911f" +checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725" dependencies = [ "unicode-ident", ] [[package]] name = "proc-macro2-diagnostics" -version = "0.10.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "606c4ba35817e2922a308af55ad51bab3645b59eae5c570d4a6cf07e36bd493b" +checksum = "4bf29726d67464d49fa6224a1d07936a8c08bb3fba727c7493f6cf1616fdaada" dependencies = [ "proc-macro2", "quote", - "syn 2.0.21", + "syn", "version_check", "yansi", ] @@ -1923,13 +1852,26 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.28" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" +checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" dependencies = [ "proc-macro2", ] +[[package]] +name = "rand" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +dependencies = [ + "getrandom 0.1.16", + "libc", + "rand_chacha 0.2.2", + "rand_core 0.5.1", + "rand_hc", +] + [[package]] name = "rand" version = "0.8.5" @@ -1937,8 +1879,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha", - "rand_core", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" +dependencies = [ + "ppv-lite86", + "rand_core 0.5.1", ] [[package]] @@ -1948,7 +1900,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_core" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +dependencies = [ + "getrandom 0.1.16", ] [[package]] @@ -1957,7 +1918,16 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom", + "getrandom 0.2.8", +] + +[[package]] +name = "rand_hc" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +dependencies = [ + "rand_core 0.5.1", ] [[package]] @@ -1966,16 +1936,7 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "redox_syscall" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" -dependencies = [ - "bitflags 1.3.2", + "bitflags", ] [[package]] @@ -1984,20 +1945,20 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom", - "redox_syscall 0.2.16", + "getrandom 0.2.8", + "redox_syscall", "thiserror", ] [[package]] name = "regex" -version = "1.8.4" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f" +checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.7.2", + "regex-syntax", ] [[package]] @@ -2006,27 +1967,21 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" dependencies = [ - "regex-syntax 0.6.29", + "regex-syntax", ] [[package]] name = "regex-syntax" -version = "0.6.29" +version = "0.6.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" - -[[package]] -name = "regex-syntax" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78" +checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" [[package]] name = "reqwest" version = "0.11.9" source = "git+https://github.com/timokoesters/reqwest?rev=57b7cf4feb921573dfafad7d34b9ac6e44ead0bd#57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" dependencies = [ - "base64 0.13.1", + "base64", "bytes", "encoding_rs", "futures-core", @@ -2043,14 +1998,14 @@ dependencies = [ "mime", "percent-encoding", "pin-project-lite", - "rustls 0.20.8", + "rustls", "rustls-native-certs", "rustls-pemfile 0.2.1", "serde", "serde_json", "serde_urlencoded", "tokio", - "tokio-rustls 0.23.4", + "tokio-rustls", "tokio-socks", "url", "wasm-bindgen", @@ -2086,9 +2041,9 @@ dependencies = [ [[package]] name = "rocksdb" -version = "0.21.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb6f170a4041d50a0ce04b0d2e14916d6ca863ea2e422689a5b694395d299ffe" +checksum = "7a62eca5cacf2c8261128631bed9f045598d40bfbe4b29f5163f0f802f8f44a7" dependencies = [ "libc", "librocksdb-sys", @@ -2096,8 +2051,8 @@ dependencies = [ [[package]] name = "ruma" -version = "0.8.2" -source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" +version = "0.7.4" +source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" dependencies = [ "assign", "js_int", @@ -2105,7 +2060,6 @@ dependencies = [ "ruma-appservice-api", "ruma-client-api", "ruma-common", - "ruma-events", "ruma-federation-api", "ruma-identity-service-api", "ruma-push-gateway-api", @@ -2115,20 +2069,19 @@ dependencies = [ [[package]] name = "ruma-appservice-api" -version = "0.8.1" -source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" +version = "0.7.0" +source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" dependencies = [ "js_int", "ruma-common", - "ruma-events", "serde", "serde_json", ] [[package]] name = "ruma-client-api" -version = "0.16.2" -source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" +version = "0.15.3" +source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" dependencies = [ "assign", "bytes", @@ -2136,33 +2089,32 @@ dependencies = [ "js_int", "js_option", "maplit", + "percent-encoding", "ruma-common", - "ruma-events", "serde", - "serde_html_form", "serde_json", ] [[package]] name = "ruma-common" -version = "0.11.3" -source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" +version = "0.10.5" +source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" dependencies = [ - "as_variant", - "base64 0.21.2", + "base64", "bytes", "form_urlencoded", "http", - "indexmap 2.0.0", + "indexmap", + "itoa", "js_int", + "js_option", "konst", "percent-encoding", - "rand", + "rand 0.8.5", "regex", "ruma-identifiers-validation", "ruma-macros", "serde", - "serde_html_form", "serde_json", "thiserror", "tracing", @@ -2171,44 +2123,21 @@ dependencies = [ "wildmatch", ] -[[package]] -name = "ruma-events" -version = "0.26.0" -source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" -dependencies = [ - "as_variant", - "indexmap 2.0.0", - "js_int", - "js_option", - "percent-encoding", - "regex", - "ruma-common", - "ruma-identifiers-validation", - "ruma-macros", - "serde", - "serde_json", - "thiserror", - "tracing", - "url", - "wildmatch", -] - [[package]] name = "ruma-federation-api" -version = "0.7.1" -source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" +version = "0.6.0" +source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" dependencies = [ "js_int", "ruma-common", - "ruma-events", "serde", "serde_json", ] [[package]] name = "ruma-identifiers-validation" -version = "0.9.1" -source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" +version = "0.9.0" +source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" dependencies = [ "js_int", "thiserror", @@ -2216,8 +2145,8 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" -version = "0.7.1" -source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" +version = "0.6.0" +source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" dependencies = [ "js_int", "ruma-common", @@ -2226,8 +2155,8 @@ dependencies = [ [[package]] name = "ruma-macros" -version = "0.11.3" -source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" +version = "0.10.5" +source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" dependencies = [ "once_cell", "proc-macro-crate", @@ -2235,31 +2164,30 @@ dependencies = [ "quote", "ruma-identifiers-validation", "serde", - "syn 2.0.21", + "syn", "toml", ] [[package]] name = "ruma-push-gateway-api" -version = "0.7.1" -source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" +version = "0.6.0" +source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" dependencies = [ "js_int", "ruma-common", - "ruma-events", "serde", "serde_json", ] [[package]] name = "ruma-signatures" -version = "0.13.1" -source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" +version = "0.12.0" +source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" dependencies = [ - "base64 0.21.2", + "base64", "ed25519-dalek", "pkcs8", - "rand", + "rand 0.7.3", "ruma-common", "serde_json", "sha2", @@ -2269,13 +2197,12 @@ dependencies = [ [[package]] name = "ruma-state-res" -version = "0.9.1" -source = "git+https://github.com/ruma/ruma?rev=b4853aa8fa5e3a24e3689fc88044de9915f6ab67#b4853aa8fa5e3a24e3689fc88044de9915f6ab67" +version = "0.8.0" +source = "git+https://github.com/ruma/ruma?rev=2bd5c131f49b2239750c39ed63b623cd5a01c965#2bd5c131f49b2239750c39ed63b623cd5a01c965" dependencies = [ "itertools", "js_int", "ruma-common", - "ruma-events", "serde", "serde_json", "thiserror", @@ -2284,11 +2211,11 @@ dependencies = [ [[package]] name = "rusqlite" -version = "0.29.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "549b9d036d571d42e6e85d1c1425e2ac83491075078ca9a15be021c56b1641f2" +checksum = "01e213bc3ecb39ac32e81e51ebe31fd888a940515173e3a18a35f8c6e896422a" dependencies = [ - "bitflags 2.3.2", + "bitflags", "fallible-iterator", "fallible-streaming-iterator", "hashlink", @@ -2302,9 +2229,9 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b50162d19404029c1ceca6f6980fe40d45c8b369f6f44446fa14bb39573b5bb9" dependencies = [ - "base64 0.13.1", + "base64", "blake2b_simd", - "constant_time_eq 0.1.5", + "constant_time_eq", "crossbeam-utils", ] @@ -2314,20 +2241,11 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" -[[package]] -name = "rustc_version" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" -dependencies = [ - "semver", -] - [[package]] name = "rustls" -version = "0.20.8" +version = "0.20.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" +checksum = "539a2bfe908f471bfa933876bd1eb6a19cf2176d375f82ef7f99530a40e48c2c" dependencies = [ "log", "ring", @@ -2335,26 +2253,14 @@ dependencies = [ "webpki", ] -[[package]] -name = "rustls" -version = "0.21.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e32ca28af694bc1bbf399c33a516dbdf1c90090b8ab23c2bc24f834aa2247f5f" -dependencies = [ - "log", - "ring", - "rustls-webpki", - "sct", -] - [[package]] name = "rustls-native-certs" -version = "0.6.3" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" +checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" dependencies = [ "openssl-probe", - "rustls-pemfile 1.0.2", + "rustls-pemfile 1.0.1", "schannel", "security-framework", ] @@ -2365,47 +2271,32 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5eebeaeb360c87bfb72e84abdb3447159c0eaececf1bef2aecd65a8be949d1c9" dependencies = [ - "base64 0.13.1", + "base64", ] [[package]] name = "rustls-pemfile" -version = "1.0.2" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" +checksum = "0864aeff53f8c05aa08d86e5ef839d3dfcf07aeba2db32f12db0ef716e87bd55" dependencies = [ - "base64 0.21.2", + "base64", ] -[[package]] -name = "rustls-webpki" -version = "0.100.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6207cd5ed3d8dca7816f8f3725513a34609c0c765bf652b8c3cb4cfd87db46b" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "rustversion" -version = "1.0.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" - [[package]] name = "ryu" -version = "1.0.13" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" +checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09" [[package]] name = "schannel" -version = "0.1.21" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" +checksum = "88d6731146462ea25d9244b2ed5fd1d716d25c52e4d54aa4fb0f3c4e9854dbe2" dependencies = [ - "windows-sys 0.42.0", + "lazy_static", + "windows-sys 0.36.1", ] [[package]] @@ -2424,19 +2315,13 @@ dependencies = [ "untrusted", ] -[[package]] -name = "sd-notify" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "621e3680f3e07db4c9c2c3fb07c6223ab2fab2e54bd3c04c3ae037990f428c32" - [[package]] name = "security-framework" -version = "2.9.1" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" +checksum = "2bc1bb97804af6631813c55739f771071e0f2ed33ee20b68c86ec505d906356c" dependencies = [ - "bitflags 1.3.2", + "bitflags", "core-foundation", "core-foundation-sys", "libc", @@ -2445,82 +2330,45 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.9.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" +checksum = "0160a13a177a45bfb43ce71c01580998474f556ad854dcbca936dd2841a5c556" dependencies = [ "core-foundation-sys", "libc", ] -[[package]] -name = "semver" -version = "1.0.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" - [[package]] name = "serde" -version = "1.0.164" +version = "1.0.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d" +checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.164" +version = "1.0.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68" +checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852" dependencies = [ "proc-macro2", "quote", - "syn 2.0.21", -] - -[[package]] -name = "serde_html_form" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53192e38d5c88564b924dbe9b60865ecbb71b81d38c4e61c817cffd3e36ef696" -dependencies = [ - "form_urlencoded", - "indexmap 1.9.3", - "itoa", - "ryu", - "serde", + "syn", ] [[package]] name = "serde_json" -version = "1.0.99" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46266871c240a00b8f503b877622fe33430b3c7d963bdc0f2adc511e54a1eae3" +checksum = "020ff22c755c2ed3f8cf162dbb41a7268d934702f3ed3631656ea597e08fc3db" dependencies = [ "itoa", "ryu", "serde", ] -[[package]] -name = "serde_path_to_error" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7f05c1d5476066defcdfacce1f52fc3cae3af1d3089727100c02ae92e5abbe0" -dependencies = [ - "serde", -] - -[[package]] -name = "serde_spanned" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186" -dependencies = [ - "serde", -] - [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -2535,11 +2383,11 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.22" +version = "0.9.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "452e67b9c20c37fa79df53201dc03839651086ed9bbe92b3ca585ca9fdaa7d85" +checksum = "6d232d893b10de3eb7258ff01974d6ee20663d8e833263c99409d4b13a0209da" dependencies = [ - "indexmap 2.0.0", + "indexmap", "itoa", "ryu", "serde", @@ -2548,13 +2396,13 @@ dependencies = [ [[package]] name = "sha-1" -version = "0.10.1" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" +checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" dependencies = [ "cfg-if", "cpufeatures", - "digest", + "digest 0.10.6", ] [[package]] @@ -2565,18 +2413,20 @@ checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if", "cpufeatures", - "digest", + "digest 0.10.6", ] [[package]] name = "sha2" -version = "0.10.7" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" +checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ + "block-buffer 0.9.0", "cfg-if", "cpufeatures", - "digest", + "digest 0.9.0", + "opaque-debug", ] [[package]] @@ -2596,24 +2446,18 @@ checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" [[package]] name = "signal-hook-registry" -version = "1.4.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" dependencies = [ "libc", ] [[package]] name = "signature" -version = "2.1.0" +version = "1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500" - -[[package]] -name = "simd-adler32" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "238abfbb77c1915110ad968465608b68e869e0772622c9656714e73e5a1a522f" +checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" [[package]] name = "simple_asn1" @@ -2629,9 +2473,9 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.8" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" +checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" dependencies = [ "autocfg", ] @@ -2644,24 +2488,14 @@ checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "socket2" -version = "0.4.9" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" +checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" dependencies = [ "libc", "winapi", ] -[[package]] -name = "socket2" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877" -dependencies = [ - "libc", - "windows-sys 0.48.0", -] - [[package]] name = "spin" version = "0.5.2" @@ -2670,20 +2504,14 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "spki" -version = "0.7.2" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" +checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" dependencies = [ "base64ct", "der", ] -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - [[package]] name = "subslice" version = "0.2.3" @@ -2695,26 +2523,15 @@ dependencies = [ [[package]] name = "subtle" -version = "2.5.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" +checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.109" +version = "1.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "syn" -version = "2.0.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1182caafaab7018eaea9b404afa8184c0baf42a04d5e10ae4f4843c2029c8aab" +checksum = "a864042229133ada95abf3b54fdc62ef5ccabe9515b64717bcb9a1919e59445d" dependencies = [ "proc-macro2", "quote", @@ -2723,9 +2540,9 @@ dependencies = [ [[package]] name = "sync_wrapper" -version = "0.1.2" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +checksum = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8" [[package]] name = "synchronoise" @@ -2737,32 +2554,43 @@ dependencies = [ ] [[package]] -name = "thiserror" -version = "1.0.40" +name = "synstructure" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" +checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "unicode-xid", +] + +[[package]] +name = "thiserror" +version = "1.0.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.40" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" +checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.21", + "syn", ] [[package]] name = "thread_local" -version = "1.1.7" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" +checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" dependencies = [ - "cfg-if", "once_cell", ] @@ -2801,11 +2629,12 @@ dependencies = [ [[package]] name = "tikv-jemalloc-sys" -version = "0.5.3+5.3.0-patched" +version = "0.5.2+5.3.0-patched" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a678df20055b43e57ef8cddde41cdfda9a3c1a060b67f4c5836dfb1d78543ba8" +checksum = "ec45c14da997d0925c7835883e4d5c181f196fa142f8c19d7643d1e9af2592c3" dependencies = [ "cc", + "fs_extra", "libc", ] @@ -2821,9 +2650,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.22" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea9e1b3cf1243ae005d9e74085d4d542f3125458f3a81af210d901dcd7411efd" +checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" dependencies = [ "itoa", "serde", @@ -2833,15 +2662,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.1" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" +checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" [[package]] name = "time-macros" -version = "0.2.9" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "372950940a5f07bf38dbe211d7283c9e6d7327df53794992d293e534c733d09b" +checksum = "d967f99f534ca7e495c575c62638eebc2898a8c84c119b89e250477bc4ba16b2" dependencies = [ "time-core", ] @@ -2857,37 +2686,38 @@ dependencies = [ [[package]] name = "tinyvec_macros" -version = "0.1.1" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" +checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.28.2" +version = "1.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94d7b1cfd2aa4011f2de74c2c4c63665e27a71006b0a192dcd2710272e73dfa2" +checksum = "d76ce4a75fb488c605c54bf610f221cea8b0dafb53333c1a67e8ee199dcd2ae3" dependencies = [ "autocfg", "bytes", "libc", + "memchr", "mio", "num_cpus", "pin-project-lite", "signal-hook-registry", - "socket2 0.4.9", + "socket2", "tokio-macros", - "windows-sys 0.48.0", + "winapi", ] [[package]] name = "tokio-macros" -version = "2.1.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" +checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484" dependencies = [ "proc-macro2", "quote", - "syn 2.0.21", + "syn", ] [[package]] @@ -2896,21 +2726,11 @@ version = "0.23.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ - "rustls 0.20.8", + "rustls", "tokio", "webpki", ] -[[package]] -name = "tokio-rustls" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" -dependencies = [ - "rustls 0.21.2", - "tokio", -] - [[package]] name = "tokio-socks" version = "0.5.1" @@ -2925,9 +2745,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.14" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" +checksum = "d660770404473ccd7bc9f8b28494a811bc18542b915c0855c51e8f419d5223ce" dependencies = [ "futures-core", "pin-project-lite", @@ -2936,9 +2756,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.8" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" +checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" dependencies = [ "bytes", "futures-core", @@ -2950,36 +2770,11 @@ dependencies = [ [[package]] name = "toml" -version = "0.7.5" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ebafdf5ad1220cb59e7d17cf4d2c72015297b75b19a10472f99b89225089240" +checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" dependencies = [ "serde", - "serde_spanned", - "toml_datetime", - "toml_edit", -] - -[[package]] -name = "toml_datetime" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" -dependencies = [ - "serde", -] - -[[package]] -name = "toml_edit" -version = "0.19.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266f016b7f039eec8a1a80dfe6156b633d208b9fccca5e4db1d6775b0c4e34a7" -dependencies = [ - "indexmap 2.0.0", - "serde", - "serde_spanned", - "toml_datetime", - "winnow", ] [[package]] @@ -2992,6 +2787,7 @@ dependencies = [ "futures-util", "pin-project", "pin-project-lite", + "tokio", "tower-layer", "tower-service", "tracing", @@ -2999,11 +2795,12 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.4.1" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8bd22a874a2d0b70452d5597b12c537331d49060824a95f49f108994f94aa4c" +checksum = "3c530c8675c1dbf98facee631536fa116b5fb6382d7dd6dc1b118d970eafe3ba" dependencies = [ - "bitflags 2.3.2", + "async-compression", + "bitflags", "bytes", "futures-core", "futures-util", @@ -3011,6 +2808,8 @@ dependencies = [ "http-body", "http-range-header", "pin-project-lite", + "tokio", + "tokio-util", "tower", "tower-layer", "tower-service", @@ -3044,20 +2843,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.26" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" +checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.21", + "syn", ] [[package]] name = "tracing-core" -version = "0.1.31" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" +checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" dependencies = [ "once_cell", "valuable", @@ -3085,25 +2884,11 @@ dependencies = [ "tracing-core", ] -[[package]] -name = "tracing-opentelemetry" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21ebb87a95ea13271332df069020513ab70bdb5637ca42d6e492dc3bbbad48de" -dependencies = [ - "once_cell", - "opentelemetry", - "tracing", - "tracing-core", - "tracing-log", - "tracing-subscriber", -] - [[package]] name = "tracing-subscriber" -version = "0.3.17" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" +checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70" dependencies = [ "matchers", "nu-ansi-term", @@ -3133,7 +2918,7 @@ dependencies = [ "idna 0.2.3", "ipnet", "lazy_static", - "rand", + "rand 0.8.5", "smallvec", "thiserror", "tinyvec", @@ -3164,42 +2949,36 @@ dependencies = [ [[package]] name = "try-lock" -version = "0.2.4" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" +checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "typenum" -version = "1.16.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" - -[[package]] -name = "typewit" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4061a10d4d8f3081a8ccc025182afd8434302d8d4b4503ec6d8510d09df08c2d" +checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" [[package]] name = "uncased" -version = "0.9.9" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b9bc53168a4be7402ab86c3aad243a84dd7381d09be0eddc81280c1da95ca68" +checksum = "09b01702b0fd0b3fadcf98e098780badda8742d4f4a7676615cad90e8ac73622" dependencies = [ "version_check", ] [[package]] name = "unicode-bidi" -version = "0.3.13" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" +checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" [[package]] name = "unicode-ident" -version = "1.0.9" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15811caf2415fb889178633e7724bad2509101cde276048e013b9def5e51fa0" +checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3" [[package]] name = "unicode-normalization" @@ -3211,10 +2990,16 @@ dependencies = [ ] [[package]] -name = "unsafe-libyaml" -version = "0.2.8" +name = "unicode-xid" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1865806a559042e51ab5414598446a5871b561d21b6764f2eabb0dd481d880a6" +checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" + +[[package]] +name = "unsafe-libyaml" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1e5fa573d8ac5f1a856f8d7be41d390ee973daf97c806b2c1a465e4e1406e68" [[package]] name = "unsigned-varint" @@ -3230,22 +3015,22 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.4.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" +checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" dependencies = [ "form_urlencoded", - "idna 0.4.0", + "idna 0.3.0", "percent-encoding", ] [[package]] name = "uuid" -version = "1.3.4" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa2982af2eec27de306107c027578ff7f423d65f7250e40ce0fea8f45248b81" +checksum = "422ee0de9031b5b948b97a8fc04e3aa35230001a722ddd27943e0be31564ce4c" dependencies = [ - "getrandom", + "getrandom 0.2.8", ] [[package]] @@ -3268,13 +3053,20 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "want" -version = "0.3.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" dependencies = [ + "log", "try-lock", ] +[[package]] +name = "wasi" +version = "0.9.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" + [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -3283,9 +3075,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.87" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" +checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -3293,24 +3085,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.87" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" +checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.21", + "syn", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.37" +version = "0.4.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" +checksum = "23639446165ca5a5de86ae1d8896b737ae80319560fbaa4c2887b7da6e7ebd7d" dependencies = [ "cfg-if", "js-sys", @@ -3320,9 +3112,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.87" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" +checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3330,28 +3122,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.87" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" +checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.21", + "syn", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.87" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" +checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" [[package]] name = "web-sys" -version = "0.3.64" +version = "0.3.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" +checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f" dependencies = [ "js-sys", "wasm-bindgen", @@ -3375,9 +3167,9 @@ checksum = "9193164d4de03a926d909d3bc7c30543cecb35400c02114792c2cae20d5e2dbb" [[package]] name = "widestring" -version = "1.0.2" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "653f141f39ec16bba3c5abe400a0c60da7468261cc2cbf36805022876bc721a8" +checksum = "17882f045410753661207383517a6f62ec3dbeb6a4ed2acce01f0728238d1983" [[package]] name = "wildmatch" @@ -3409,135 +3201,103 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-sys" -version = "0.42.0" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" +checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", + "windows_aarch64_msvc 0.36.1", + "windows_i686_gnu 0.36.1", + "windows_i686_msvc 0.36.1", + "windows_x86_64_gnu 0.36.1", + "windows_x86_64_msvc 0.36.1", ] [[package]] name = "windows-sys" -version = "0.48.0" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ - "windows-targets", -] - -[[package]] -name = "windows-targets" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" -dependencies = [ - "windows_aarch64_gnullvm 0.48.0", - "windows_aarch64_msvc 0.48.0", - "windows_i686_gnu 0.48.0", - "windows_i686_msvc 0.48.0", - "windows_x86_64_gnu 0.48.0", - "windows_x86_64_gnullvm 0.48.0", - "windows_x86_64_msvc 0.48.0", + "windows_aarch64_gnullvm", + "windows_aarch64_msvc 0.42.0", + "windows_i686_gnu 0.42.0", + "windows_i686_msvc 0.42.0", + "windows_x86_64_gnu 0.42.0", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc 0.42.0", ] [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.2" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" +checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e" [[package]] name = "windows_aarch64_msvc" -version = "0.42.2" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" +checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" [[package]] name = "windows_aarch64_msvc" -version = "0.48.0" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" +checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4" [[package]] name = "windows_i686_gnu" -version = "0.42.2" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" +checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" [[package]] name = "windows_i686_gnu" -version = "0.48.0" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" +checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7" [[package]] name = "windows_i686_msvc" -version = "0.42.2" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" +checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" [[package]] name = "windows_i686_msvc" -version = "0.48.0" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" +checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246" [[package]] name = "windows_x86_64_gnu" -version = "0.42.2" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" +checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" [[package]] name = "windows_x86_64_gnu" -version = "0.48.0" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" +checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.2" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" +checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028" [[package]] name = "windows_x86_64_msvc" -version = "0.42.2" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" +checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" [[package]] name = "windows_x86_64_msvc" -version = "0.48.0" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" - -[[package]] -name = "winnow" -version = "0.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca0ace3845f0d96209f0375e6d367e3eb87eb65d27d445bdc9f1843a26f39448" -dependencies = [ - "memchr", -] +checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" [[package]] name = "winreg" @@ -3550,12 +3310,11 @@ dependencies = [ [[package]] name = "winreg" -version = "0.50.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" +checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" dependencies = [ - "cfg-if", - "windows-sys 0.48.0", + "winapi", ] [[package]] @@ -3566,9 +3325,24 @@ checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" [[package]] name = "zeroize" -version = "1.6.0" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" +checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f8f187641dad4f680d25c4bfc4225b418165984179f26ca76ec4fb6441d3a17" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] [[package]] name = "zigzag" @@ -3578,14 +3352,3 @@ checksum = "70b40401a28d86ce16a330b863b86fd7dbee4d7c940587ab09ab8c019f9e3fdf" dependencies = [ "num-traits", ] - -[[package]] -name = "zstd-sys" -version = "2.0.8+zstd.1.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5556e6ee25d32df2586c098bbfa278803692a20d0ab9565e049480d52707ec8c" -dependencies = [ - "cc", - "libc", - "pkg-config", -] diff --git a/Cargo.toml b/Cargo.toml index ff1785e4..ae519459 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,109 +6,95 @@ authors = ["timokoesters "] homepage = "https://conduit.rs" repository = "https://gitlab.com/famedly/conduit" readme = "README.md" -version = "0.7.0-alpha" +version = "0.4.0-next" +rust-version = "1.64" edition = "2021" -# When changing this, make sure to update the `flake.lock` file by running -# `nix flake update`. If you don't have Nix installed or otherwise don't know -# how to do this, ping `@charles:computer.surgery` or `@dusk:gaze.systems` in -# the matrix room. -rust-version = "1.70.0" - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] # Web framework -axum = { version = "0.6.18", default-features = false, features = ["form", "headers", "http1", "http2", "json", "matched-path"], optional = true } -axum-server = { version = "0.5.1", features = ["tls-rustls"] } -tower = { version = "0.4.13", features = ["util"] } -tower-http = { version = "0.4.1", features = ["add-extension", "cors", "sensitive-headers", "trace", "util"] } +axum = { version = "0.5.17", default-features = false, features = ["form", "headers", "http1", "http2", "json", "matched-path"], optional = true } +axum-server = { version = "0.4.0", features = ["tls-rustls"] } +tower = { version = "0.4.8", features = ["util"] } +tower-http = { version = "0.3.4", features = ["add-extension", "cors", "compression-full", "sensitive-headers", "trace", "util"] } # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "b4853aa8fa5e3a24e3689fc88044de9915f6ab67", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } -#ruma = { git = "https://github.com/timokoesters/ruma", rev = "4ec9c69bb7e09391add2382b3ebac97b6e8f4c64", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } -#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } +ruma = { git = "https://github.com/ruma/ruma", rev = "2bd5c131f49b2239750c39ed63b623cd5a01c965", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } +#ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } # Async runtime and utilities -tokio = { version = "1.28.1", features = ["fs", "macros", "signal", "sync"] } +tokio = { version = "1.11.0", features = ["fs", "macros", "signal", "sync"] } # Used for storing data permanently #sled = { version = "0.34.7", features = ["compression", "no_metrics"], optional = true } #sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] } -persy = { version = "1.4.4", optional = true, features = ["background_ops"] } +persy = { version = "1.0.0", optional = true, features = ["background_ops"] } # Used for the http request / response body type for Ruma endpoints used with reqwest -bytes = "1.4.0" -http = "0.2.9" +bytes = "1.1.0" +http = "0.2.4" # Used to find data directory for default db path -directories = "4.0.1" +directories = "4.0.0" # Used for ruma wrapper -serde_json = { version = "1.0.96", features = ["raw_value"] } +serde_json = { version = "1.0.68", features = ["raw_value"] } # Used for appservice registration files -serde_yaml = "0.9.21" +serde_yaml = "0.9.13" # Used for pdu definition -serde = { version = "1.0.163", features = ["rc"] } +serde = { version = "1.0.130", features = ["rc"] } # Used for secure identifiers -rand = "0.8.5" +rand = "0.8.4" # Used to hash passwords rust-argon2 = "1.0.0" # Used to send requests reqwest = { default-features = false, features = ["rustls-tls-native-roots", "socks"], git = "https://github.com/timokoesters/reqwest", rev = "57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" } # Used for conduit::Error type -thiserror = "1.0.40" +thiserror = "1.0.29" # Used to generate thumbnails for images -image = { version = "0.24.6", default-features = false, features = ["jpeg", "png", "gif"] } +image = { version = "0.24.4", default-features = false, features = ["jpeg", "png", "gif"] } # Used to encode server public key -base64 = "0.21.2" +base64 = "0.13.0" # Used when hashing the state ring = "0.16.20" # Used when querying the SRV record of other servers trust-dns-resolver = "0.22.0" # Used to find matching events for appservices -regex = "1.8.1" +regex = "1.5.4" # jwt jsonwebtokens -jsonwebtoken = "8.3.0" +jsonwebtoken = "8.1.1" # Performance measurements -tracing = { version = "0.1.37", features = [] } -tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } +tracing = { version = "0.1.27", features = [] } +tracing-subscriber = { version = "0.3.16", features = ["env-filter"] } tracing-flame = "0.2.0" opentelemetry = { version = "0.18.0", features = ["rt-tokio"] } opentelemetry-jaeger = { version = "0.17.0", features = ["rt-tokio"] } -tracing-opentelemetry = "0.18.0" lru-cache = "0.1.2" -rusqlite = { version = "0.29.0", optional = true, features = ["bundled"] } +rusqlite = { version = "0.28.0", optional = true, features = ["bundled"] } parking_lot = { version = "0.12.1", optional = true } -crossbeam = { version = "0.8.2", optional = true } -num_cpus = "1.15.0" +crossbeam = { version = "0.8.1", optional = true } +num_cpus = "1.13.0" threadpool = "1.8.1" heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } -# Used for ruma wrapper -serde_html_form = "0.2.0" +rocksdb = { version = "0.17.0", default-features = true, features = ["multi-threaded-cf", "zstd"], optional = true } -rocksdb = { version = "0.21.0", default-features = true, features = ["multi-threaded-cf", "zstd"], optional = true } - -thread_local = "1.1.7" +thread_local = "1.1.3" # used for TURN server authentication hmac = "0.12.1" -sha-1 = "0.10.1" +sha-1 = "0.10.0" # used for conduit's CLI and admin room command parsing -clap = { version = "4.3.0", default-features = false, features = ["std", "derive", "help", "usage", "error-context"] } -futures-util = { version = "0.3.28", default-features = false } +clap = { version = "4.0.11", default-features = false, features = ["std", "derive", "help", "usage", "error-context"] } +futures-util = { version = "0.3.17", default-features = false } # Used for reading the configuration from conduit.toml & environment variables -figment = { version = "0.10.8", features = ["env", "toml"] } +figment = { version = "0.10.6", features = ["env", "toml"] } tikv-jemalloc-ctl = { version = "0.5.0", features = ["use_std"], optional = true } tikv-jemallocator = { version = "0.5.0", features = ["unprefixed_malloc_on_supported_platforms"], optional = true } lazy_static = "1.4.0" -async-trait = "0.1.68" - -sd-notify = { version = "0.4.1", optional = true } - -[target.'cfg(unix)'.dependencies] -nix = { version = "0.26.2", features = ["resource"] } +async-trait = "0.1.57" [features] -default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "systemd"] +default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "jemalloc"] #backend_sled = ["sled"] backend_persy = ["persy", "parking_lot"] backend_sqlite = ["sqlite"] @@ -117,7 +103,6 @@ backend_rocksdb = ["rocksdb"] jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator"] sqlite = ["rusqlite", "parking_lot", "tokio/signal"] conduit_bin = ["axum"] -systemd = ["sd-notify"] [[bin]] name = "conduit" @@ -140,7 +125,7 @@ instead of a server that has high scalability.""" section = "net" priority = "optional" assets = [ - ["debian/README.md", "usr/share/doc/matrix-conduit/README.Debian", "644"], + ["debian/README.Debian", "usr/share/doc/matrix-conduit/", "644"], ["README.md", "usr/share/doc/matrix-conduit/", "644"], ["target/release/conduit", "usr/sbin/matrix-conduit", "755"], ] diff --git a/Cross.toml b/Cross.toml new file mode 100644 index 00000000..5d99a358 --- /dev/null +++ b/Cross.toml @@ -0,0 +1,23 @@ +[build.env] +# CI uses an S3 endpoint to store sccache artifacts, so their config needs to +# be available in the cross container as well +passthrough = [ + "RUSTC_WRAPPER", + "AWS_ACCESS_KEY_ID", + "AWS_SECRET_ACCESS_KEY", + "SCCACHE_BUCKET", + "SCCACHE_ENDPOINT", + "SCCACHE_S3_USE_SSL", +] + +[target.aarch64-unknown-linux-musl] +image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-aarch64-unknown-linux-musl:latest" + +[target.arm-unknown-linux-musleabihf] +image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-arm-unknown-linux-musleabihf:latest" + +[target.armv7-unknown-linux-musleabihf] +image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-armv7-unknown-linux-musleabihf:latest" + +[target.x86_64-unknown-linux-musl] +image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-x86_64-unknown-linux-musl@sha256:b6d689e42f0236c8a38b961bca2a12086018b85ed20e0826310421daf182e2bb" diff --git a/DEPLOY.md b/DEPLOY.md index cb318eee..c484823b 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -2,15 +2,15 @@ > ## Getting help > -> If you run into any problems while setting up Conduit, write an email to `conduit@koesters.xyz`, ask us +> If you run into any problems while setting up Conduit, write an email to `timo@koesters.xyz`, ask us > in `#conduit:fachschaften.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new). ## Installing Conduit -Although you might be able to compile Conduit for Windows, we do recommend running it on a Linux server. We therefore +Although you might be able to compile Conduit for Windows, we do recommend running it on a linux server. We therefore only offer Linux binaries. -You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the appropriate url: +You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the right url: | CPU Architecture | Download stable version | Download development version | | ------------------------------------------- | --------------------------------------------------------------- | ----------------------------------------------------------- | @@ -19,7 +19,7 @@ You may simply download the binary that fits your machine. Run `uname -m` to see | armv8 / aarch64 | [Binary][armv8-glibc-master] / [.deb][armv8-glibc-master-deb] | [Binary][armv8-glibc-next] / [.deb][armv8-glibc-next-deb] | These builds were created on and linked against the glibc version shipped with Debian bullseye. -If you use a system with an older glibc version (e.g. RHEL8), you might need to compile Conduit yourself. +If you use a system with an older glibc version, you might need to compile Conduit yourself. [x84_64-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit?job=docker:master [armv7-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit?job=docker:master @@ -39,49 +39,27 @@ $ sudo wget -O /usr/local/bin/matrix-conduit $ sudo chmod +x /usr/local/bin/matrix-conduit ``` -Alternatively, you may compile the binary yourself. First, install any dependencies: +Alternatively, you may compile the binary yourself ```bash -# Debian $ sudo apt install libclang-dev build-essential - -# RHEL -$ sudo dnf install clang ``` -Then, `cd` into the source tree of conduit-next and run: + ```bash $ cargo build --release ``` -If you want to cross compile Conduit to another architecture, read the guide below. - -
-Cross compilation - -As easiest way to compile conduit for another platform [cross-rs](https://github.com/cross-rs/cross) is recommended, so install it first. - -In order to use RockDB as storage backend append `-latomic` to linker flags. - -For example, to build a binary for Raspberry Pi Zero W (ARMv6) you need `arm-unknown-linux-gnueabihf` as compilation -target. - -```bash -git clone https://gitlab.com/famedly/conduit.git -cd conduit -export RUSTFLAGS='-C link-arg=-lgcc -Clink-arg=-latomic -Clink-arg=-static-libgcc' -cross build --release --no-default-features --features conduit_bin,backend_rocksdb,jemalloc --target=arm-unknown-linux-gnueabihf -``` -
+If you want to cross compile Conduit to another architecture, read the [Cross-Compile Guide](cross/README.md). ## Adding a Conduit user While Conduit can run as any user it is usually better to use dedicated users for different services. This also allows you to make sure that the file permissions are correctly set up. -In Debian or RHEL, you can use this command to create a Conduit user: +In Debian you can use this command to create a Conduit user: ```bash -sudo adduser --system conduit --group --disabled-login --no-create-home +sudo adduser --system conduit --no-create-home ``` ## Forwarding ports in the firewall or the router @@ -90,19 +68,6 @@ Conduit uses the ports 443 and 8448 both of which need to be open in the firewal If Conduit runs behind a router or in a container and has a different public IP address than the host system these public ports need to be forwarded directly or indirectly to the port mentioned in the config. -## Optional: Avoid port 8448 - -If Conduit runs behind Cloudflare reverse proxy, which doesn't support port 8448 on free plans, [delegation](https://matrix-org.github.io/synapse/latest/delegate.html) can be set up to have federation traffic routed to port 443: -```apache -# .well-known delegation on Apache - - ErrorDocument 200 '{"m.server": "your.server.name:443"}' - Header always set Content-Type application/json - Header always set Access-Control-Allow-Origin * - -``` -[SRV DNS record](https://spec.matrix.org/latest/server-server-api/#resolving-server-names) delegation is also [possible](https://www.cloudflare.com/en-gb/learning/dns/dns-records/dns-srv-record/). - ## Setting up a systemd service Now we'll set up a systemd service for Conduit, so it's easy to start/stop Conduit and set it to autostart when your @@ -117,7 +82,7 @@ After=network.target [Service] Environment="CONDUIT_CONFIG=/etc/matrix-conduit/conduit.toml" User=conduit -Group=conduit +Group=nogroup Restart=always ExecStart=/usr/local/bin/matrix-conduit @@ -172,9 +137,7 @@ max_request_size = 20_000_000 # in bytes allow_registration = true allow_federation = true -allow_check_for_updates = true -# Server to get public keys from. You probably shouldn't change this trusted_servers = ["matrix.org"] #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time @@ -187,7 +150,7 @@ address = "127.0.0.1" # This makes sure Conduit can only be reached using the re ## Setting the correct file permissions As we are using a Conduit specific user we need to allow it to read the config. To do that you can run this command on -Debian or RHEL: +Debian: ```bash sudo chown -R root:root /etc/matrix-conduit @@ -198,7 +161,7 @@ If you use the default database path you also need to run this: ```bash sudo mkdir -p /var/lib/matrix-conduit/ -sudo chown -R conduit:conduit /var/lib/matrix-conduit/ +sudo chown -R conduit:nogroup /var/lib/matrix-conduit/ sudo chmod 700 /var/lib/matrix-conduit/ ``` @@ -211,11 +174,6 @@ This depends on whether you use Apache, Caddy, Nginx or another web server. Create `/etc/apache2/sites-enabled/050-conduit.conf` and copy-and-paste this: ```apache -# Requires mod_proxy and mod_proxy_http -# -# On Apache instance compiled from source, -# paste into httpd-ssl.conf or httpd.conf - Listen 8448 @@ -223,7 +181,7 @@ Listen 8448 ServerName your.server.name # EDIT THIS AllowEncodedSlashes NoDecode -ProxyPass /_matrix/ http://127.0.0.1:6167/_matrix/ timeout=300 nocanon +ProxyPass /_matrix/ http://127.0.0.1:6167/_matrix/ nocanon ProxyPassReverse /_matrix/ http://127.0.0.1:6167/_matrix/ @@ -232,11 +190,7 @@ ProxyPassReverse /_matrix/ http://127.0.0.1:6167/_matrix/ **You need to make some edits again.** When you are done, run ```bash -# Debian $ sudo systemctl reload apache2 - -# Installed from source -$ sudo apachectl -k graceful ``` ### Caddy @@ -269,14 +223,12 @@ server { merge_slashes off; # Nginx defaults to only allow 1MB uploads - # Increase this to allow posting large files such as videos client_max_body_size 20M; location /_matrix/ { proxy_pass http://127.0.0.1:6167$request_uri; proxy_set_header Host $http_host; proxy_buffering off; - proxy_read_timeout 5m; } ssl_certificate /etc/letsencrypt/live/your.server.name/fullchain.pem; # EDIT THIS @@ -296,19 +248,11 @@ $ sudo systemctl reload nginx If you chose Caddy as your web proxy SSL certificates are handled automatically and you can skip this step. -The easiest way to get an SSL certificate, if you don't have one already, is to [install](https://certbot.eff.org/instructions) `certbot` and run this: +The easiest way to get an SSL certificate, if you don't have one already, is to install `certbot` and run this: ```bash -# To use ECC for the private key, -# paste into /etc/letsencrypt/cli.ini: -# key-type = ecdsa -# elliptic-curve = secp384r1 - $ sudo certbot -d your.server.name ``` -[Automated renewal](https://eff-certbot.readthedocs.io/en/stable/using.html#automated-renewals) is usually preconfigured. - -If using Cloudflare, configure instead the edge and origin certificates in dashboard. In case you’re already running a website on the same Apache server, you can just copy-and-paste the SSL configuration from your main virtual host on port 443 into the above-mentioned vhost. ## You're done! @@ -332,8 +276,6 @@ You can also use these commands as a quick health check. ```bash $ curl https://your.server.name/_matrix/client/versions - -# If using port 8448 $ curl https://your.server.name:8448/_matrix/client/versions ``` diff --git a/Dockerfile b/Dockerfile index 943f6864..2763b126 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,5 @@ # syntax=docker/dockerfile:1 -FROM docker.io/rust:1.70-bullseye AS base - -FROM base AS builder +FROM docker.io/rust:1.64-bullseye AS builder WORKDIR /usr/src/conduit # Install required packages to build Conduit and it's dependencies @@ -39,7 +37,7 @@ COPY --from=builder /usr/src/conduit/target/release/conduit /conduit # --------------------------------------------------------------------------------------------------------------- # Build cargo-deb, a tool to package up rust binaries into .deb packages for Debian/Ubuntu based systems: # --------------------------------------------------------------------------------------------------------------- -FROM base AS build-cargo-deb +FROM docker.io/rust:1.64-bullseye AS build-cargo-deb RUN apt-get update && \ apt-get install -y --no-install-recommends \ @@ -59,7 +57,7 @@ WORKDIR /usr/src/conduit COPY ./LICENSE ./LICENSE COPY ./README.md ./README.md -COPY debian ./debian +COPY debian/README.Debian ./debian/ COPY --from=build-cargo-deb /usr/local/cargo/bin/cargo-deb /usr/local/cargo/bin/cargo-deb # --no-build makes cargo-deb reuse already compiled project diff --git a/README.md b/README.md index 5e01c8c1..ab471769 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,6 @@ # Conduit -### A Matrix homeserver written in Rust -#### What is Matrix? -[Matrix](https://matrix.org) is an open network for secure and decentralized -communication. Users from every Matrix homeserver can chat with users from all -other Matrix servers. You can even use bridges (also called Matrix appservices) -to communicate with users outside of Matrix, like a community on Discord. +### A Matrix homeserver written in Rust #### What is the goal? @@ -16,9 +11,11 @@ friends or company. #### Can I try it out? Yes! You can test our Conduit instance by opening a Matrix client ( or Element Android for -example) and registering on the `conduit.rs` homeserver. The registration token is "for_testing_only". Don't share personal information. +example) and registering on the `conduit.rs` homeserver. -Server hosting for conduit.rs is donated by the Matrix.org Foundation. +It is hosted on a ODROID HC 2 with 2GB RAM and a SAMSUNG Exynos 5422 CPU, which +was used in the Samsung Galaxy S5. It joined many big rooms including Matrix +HQ. #### What is the current status? @@ -28,15 +25,15 @@ from time to time. There are still a few important features missing: -- E2EE emoji comparison over federation (E2EE chat works) -- Outgoing read receipts, typing, presence over federation (incoming works) +- E2EE verification over federation +- Outgoing read receipts, typing, presence over federation Check out the [Conduit 1.0 Release Milestone](https://gitlab.com/famedly/conduit/-/milestones/3). #### How can I deploy my own? - Simple install (this was tested the most): [DEPLOY.md](DEPLOY.md) -- Debian package: [debian/README.md](debian/README.md) +- Debian package: [debian/README.Debian](debian/README.Debian) - Nix/NixOS: [nix/README.md](nix/README.md) - Docker: [docker/README.md](docker/README.md) @@ -53,21 +50,13 @@ If you want to connect an Appservice to Conduit, take a look at [APPSERVICES.md] #### Thanks to -Thanks to FUTO, Famedly, Prototype Fund (DLR and German BMBF) and all individuals for financially supporting this project. +Thanks to Famedly, Prototype Fund (DLR and German BMBF) and all other individuals for financially supporting this project. Thanks to the contributors to Conduit and all libraries we use, for example: - Ruma: A clean library for the Matrix Spec in Rust - axum: A modular web framework -#### Contact - -If you run into any question, feel free to -- Ask us in `#conduit:fachschaften.org` on Matrix -- Write an E-Mail to `conduit@koesters.xyz` -- Send an direct message to `timokoesters@fachschaften.org` on Matrix -- [Open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new) - #### Donate Liberapay: \ diff --git a/complement/Dockerfile b/complement/Dockerfile deleted file mode 100644 index 50173a1c..00000000 --- a/complement/Dockerfile +++ /dev/null @@ -1,48 +0,0 @@ -# For use in our CI only. This requires a build artifact created by a previous run pipline stage to be placed in cached_target/release/conduit -FROM registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:commit-16a08e9b as builder -#FROM rust:latest as builder - -WORKDIR /workdir - -ARG RUSTC_WRAPPER -ARG AWS_ACCESS_KEY_ID -ARG AWS_SECRET_ACCESS_KEY -ARG SCCACHE_BUCKET -ARG SCCACHE_ENDPOINT -ARG SCCACHE_S3_USE_SSL - -COPY . . -RUN mkdir -p target/release -RUN test -e cached_target/release/conduit && cp cached_target/release/conduit target/release/conduit || cargo build --release - -## Actual image -FROM debian:bullseye -WORKDIR /workdir - -# Install caddy -RUN apt-get update && apt-get install -y debian-keyring debian-archive-keyring apt-transport-https curl && curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/gpg.key' | gpg --dearmor -o /usr/share/keyrings/caddy-testing-archive-keyring.gpg && curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/debian.deb.txt' | tee /etc/apt/sources.list.d/caddy-testing.list && apt-get update && apt-get install -y caddy - -COPY conduit-example.toml conduit.toml -COPY complement/caddy.json caddy.json - -ENV SERVER_NAME=localhost -ENV CONDUIT_CONFIG=/workdir/conduit.toml - -RUN sed -i "s/port = 6167/port = 8008/g" conduit.toml -RUN echo "allow_federation = true" >> conduit.toml -RUN echo "allow_check_for_updates = true" >> conduit.toml -RUN echo "allow_encryption = true" >> conduit.toml -RUN echo "allow_registration = true" >> conduit.toml -RUN echo "log = \"warn,_=off,sled=off\"" >> conduit.toml -RUN sed -i "s/address = \"127.0.0.1\"/address = \"0.0.0.0\"/g" conduit.toml - -COPY --from=builder /workdir/target/release/conduit /workdir/conduit -RUN chmod +x /workdir/conduit - -EXPOSE 8008 8448 - -CMD uname -a && \ - sed -i "s/#server_name = \"your.server.name\"/server_name = \"${SERVER_NAME}\"/g" conduit.toml && \ - sed -i "s/your.server.name/${SERVER_NAME}/g" caddy.json && \ - caddy start --config caddy.json > /dev/null && \ - /workdir/conduit diff --git a/complement/README.md b/complement/README.md deleted file mode 100644 index b86aab38..00000000 --- a/complement/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Running Conduit on Complement - -This assumes that you're familiar with complement, if not, please readme -[their readme](https://github.com/matrix-org/complement#running). - -Complement works with "base images", this directory (and Dockerfile) helps build the conduit complement-ready docker -image. - -To build, `cd` to the base directory of the workspace, and run this: - -`docker build -t complement-conduit:dev -f complement/Dockerfile .` - -Then use `complement-conduit:dev` as a base image for running complement tests. diff --git a/complement/caddy.json b/complement/caddy.json deleted file mode 100644 index ea52c2c9..00000000 --- a/complement/caddy.json +++ /dev/null @@ -1,72 +0,0 @@ -{ - "logging": { - "logs": { - "default": { - "level": "WARN" - } - } - }, - "apps": { - "http": { - "https_port": 8448, - "servers": { - "srv0": { - "listen": [":8448"], - "routes": [{ - "match": [{ - "host": ["your.server.name"] - }], - "handle": [{ - "handler": "subroute", - "routes": [{ - "handle": [{ - "handler": "reverse_proxy", - "upstreams": [{ - "dial": "127.0.0.1:8008" - }] - }] - }] - }], - "terminal": true - }], - "tls_connection_policies": [{ - "match": { - "sni": ["your.server.name"] - } - }] - } - } - }, - "pki": { - "certificate_authorities": { - "local": { - "name": "Complement CA", - "root": { - "certificate": "/complement/ca/ca.crt", - "private_key": "/complement/ca/ca.key" - }, - "intermediate": { - "certificate": "/complement/ca/ca.crt", - "private_key": "/complement/ca/ca.key" - } - } - } - }, - "tls": { - "automation": { - "policies": [{ - "subjects": ["your.server.name"], - "issuers": [{ - "module": "internal" - }], - "on_demand": true - }, { - "issuers": [{ - "module": "internal", - "ca": "local" - }] - }] - } - } - } -} \ No newline at end of file diff --git a/conduit-example.toml b/conduit-example.toml index 836db654..fee31020 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -38,16 +38,15 @@ max_request_size = 20_000_000 # in bytes # Enables registration. If set to false, no users can register on this server. allow_registration = true +# Enables federation. If set to false, this server will not federate with others (rooms from other server will not be available). allow_federation = true -allow_check_for_updates = true + +# Enables presence. If set to false, the presence of users (whether they are online, idle or offline) will not be shown or processed. +allow_presence = true # Enable the display name lightning bolt on registration. enable_lightning_bolt = true -# Servers listed here will be used to gather public keys of other servers. -# Generally, copying this exactly should be enough. (Currently, Conduit doesn't -# support batched key requests, so this list should only contain Synapse -# servers.) trusted_servers = ["matrix.org"] #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time diff --git a/debian/README.md b/debian/README.Debian similarity index 58% rename from debian/README.md rename to debian/README.Debian index 443be76b..5f63b5cb 100644 --- a/debian/README.md +++ b/debian/README.Debian @@ -1,36 +1,28 @@ Conduit for Debian ================== -Installation ------------- - -Information about downloading, building and deploying the Debian package, see -the "Installing Conduit" section in [DEPLOY.md](../DEPLOY.md). -All following sections until "Setting up the Reverse Proxy" be ignored because -this is handled automatically by the packaging. - Configuration ------------- When installed, Debconf generates the configuration of the homeserver (host)name, the address and port it listens on. This configuration ends up in -`/etc/matrix-conduit/conduit.toml`. +/etc/matrix-conduit/conduit.toml. You can tweak more detailed settings by uncommenting and setting the variables -in `/etc/matrix-conduit/conduit.toml`. This involves settings such as the maximum +in /etc/matrix-conduit/conduit.toml. This involves settings such as the maximum file size for download/upload, enabling federation, etc. Running ------- -The package uses the `matrix-conduit.service` systemd unit file to start and +The package uses the matrix-conduit.service systemd unit file to start and stop Conduit. It loads the configuration file mentioned above to set up the environment before running the server. This package assumes by default that Conduit will be placed behind a reverse proxy such as Apache or nginx. This default deployment entails just listening -on `127.0.0.1` and the free port `6167` and is reachable via a client using the URL -. +on 127.0.0.1 and the free port 6167 and is reachable via a client using the URL +http://localhost:6167. At a later stage this packaging may support also setting up TLS and running stand-alone. In this case, however, you need to set up some certificates and diff --git a/debian/postinst b/debian/postinst index 69a766a0..73e554b7 100644 --- a/debian/postinst +++ b/debian/postinst @@ -19,11 +19,11 @@ case "$1" in _matrix-conduit fi - # Create the database path if it does not exist yet and fix up ownership - # and permissions. - mkdir -p "$CONDUIT_DATABASE_PATH" - chown _matrix-conduit "$CONDUIT_DATABASE_PATH" - chmod 700 "$CONDUIT_DATABASE_PATH" + # Create the database path if it does not exist yet. + if [ ! -d "$CONDUIT_DATABASE_PATH" ]; then + mkdir -p "$CONDUIT_DATABASE_PATH" + chown _matrix-conduit "$CONDUIT_DATABASE_PATH" + fi if [ ! -e "$CONDUIT_CONFIG_FILE" ]; then # Write the debconf values in the config. @@ -73,7 +73,6 @@ max_request_size = 20_000_000 # in bytes allow_registration = true allow_federation = true -allow_check_for_updates = true trusted_servers = ["matrix.org"] diff --git a/docker/docker-compose.yml b/docker-compose.yml similarity index 97% rename from docker/docker-compose.yml rename to docker-compose.yml index 5bcf84f7..d9c32b51 100644 --- a/docker/docker-compose.yml +++ b/docker-compose.yml @@ -29,7 +29,6 @@ services: CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB CONDUIT_ALLOW_REGISTRATION: 'true' CONDUIT_ALLOW_FEDERATION: 'true' - CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true' CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' #CONDUIT_MAX_CONCURRENT_REQUESTS: 100 #CONDUIT_LOG: warn,rocket=off,_=off,sled=off diff --git a/docker/README.md b/docker/README.md index b34f9d87..36717c4f 100644 --- a/docker/README.md +++ b/docker/README.md @@ -4,36 +4,7 @@ ## Docker -To run Conduit with Docker you can either build the image yourself or pull it from a registry. - - -### Use a registry - -OCI images for Conduit are available in the registries listed below. We recommend using the image tagged as `latest` from GitLab's own registry. - -| Registry | Image | Size | Notes | -| --------------- | --------------------------------------------------------------- | ----------------------------- | ---------------------- | -| GitLab Registry | [registry.gitlab.com/famedly/conduit/matrix-conduit:latest][gl] | ![Image Size][shield-latest] | Stable image. | -| Docker Hub | [docker.io/matrixconduit/matrix-conduit:latest][dh] | ![Image Size][shield-latest] | Stable image. | -| GitLab Registry | [registry.gitlab.com/famedly/conduit/matrix-conduit:next][gl] | ![Image Size][shield-next] | Development version. | -| Docker Hub | [docker.io/matrixconduit/matrix-conduit:next][dh] | ![Image Size][shield-next] | Development version. | - - -[dh]: https://hub.docker.com/r/matrixconduit/matrix-conduit -[gl]: https://gitlab.com/famedly/conduit/container_registry/2497937 -[shield-latest]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest -[shield-next]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/next - - -Use -```bash -docker image pull -``` -to pull it to your machine. - - - -### Build using a dockerfile +### Build & Dockerfile The Dockerfile provided by Conduit has two stages, each of which creates an image. @@ -48,11 +19,9 @@ docker build --tag matrixconduit/matrix-conduit:latest . which also will tag the resulting image as `matrixconduit/matrix-conduit:latest`. - - ### Run -When you have the image you can simply run it with +After building the image you can simply run it with ```bash docker run -d -p 8448:6167 \ @@ -65,10 +34,19 @@ docker run -d -p 8448:6167 \ -e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \ -e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \ -e CONDUIT_LOG="warn,rocket=off,_=off,sled=off" \ - --name conduit + --name conduit matrixconduit/matrix-conduit:latest ``` -or you can use [docker-compose](#docker-compose). +or you can skip the build step and pull the image from one of the following registries: + +| Registry | Image | Size | +| --------------- | --------------------------------------------------------------- | --------------------- | +| Docker Hub | [matrixconduit/matrix-conduit:latest][dh] | ![Image Size][shield] | +| GitLab Registry | [registry.gitlab.com/famedly/conduit/matrix-conduit:latest][gl] | ![Image Size][shield] | + +[dh]: https://hub.docker.com/r/matrixconduit/matrix-conduit +[gl]: https://gitlab.com/famedly/conduit/container_registry/2497937 +[shield]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../conduit-example.toml). You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need @@ -76,7 +54,7 @@ to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it. -### Docker-compose +## Docker-compose If the `docker run` command is not for you or your setup, you can also use one of the provided `docker-compose` files. @@ -117,7 +95,7 @@ As a container user, you probably know about Traefik. It is a easy to use revers containerized app and services available through the web. With the two provided files, [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or [`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and -[`docker-compose.override.yml`](docker-compose.override.yml), it is equally easy to deploy +[`docker-compose.override.yml`](docker-compose.override.traefik.yml), it is equally easy to deploy and use Conduit, with a little caveat. If you already took a look at the files, then you should have seen the `well-known` service, and that is the little caveat. Traefik is simply a proxy and loadbalancer and is not able to serve any kind of content, but for Conduit to federate, we need to @@ -128,8 +106,7 @@ With the service `well-known` we use a single `nginx` container that will serve So...step by step: -1. Copy [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or -[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and [`docker-compose.override.yml`](docker-compose.override.yml) from the repository and remove `.for-traefik` (or `.with-traefik`) from the filename. +1. Copy [`docker-compose.traefik.yml`](docker-compose.traefik.yml) and [`docker-compose.override.traefik.yml`](docker-compose.override.traefik.yml) from the repository and remove `.traefik` from the filenames. 2. Open both files and modify/adjust them to your needs. Meaning, change the `CONDUIT_SERVER_NAME` and the volume host mappings according to your needs. 3. Create the `conduit.toml` config file, an example can be found [here](../conduit-example.toml), or set `CONDUIT_CONFIG=""` and configure Conduit per env vars. 4. Uncomment the `element-web` service if you want to host your own Element Web Client and create a `element_config.json`. @@ -144,12 +121,12 @@ So...step by step: location /.well-known/matrix/server { return 200 '{"m.server": ".:443"}'; - types { } default_type "application/json; charset=utf-8"; + add_header Content-Type application/json; } location /.well-known/matrix/client { return 200 '{"m.homeserver": {"base_url": "https://."}}'; - types { } default_type "application/json; charset=utf-8"; + add_header Content-Type application/json; add_header "Access-Control-Allow-Origin" *; } @@ -161,58 +138,3 @@ So...step by step: 6. Run `docker-compose up -d` 7. Connect to your homeserver with your preferred client and create a user. You should do this immediately after starting Conduit, because the first created user is the admin. - - - - -## Voice communication - -In order to make or receive calls, a TURN server is required. Conduit suggests using [Coturn](https://github.com/coturn/coturn) for this purpose, which is also available as a Docker image. Before proceeding with the software installation, it is essential to have the necessary configurations in place. - -### Configuration - -Create a configuration file called `coturn.conf` containing: - -```conf -use-auth-secret -static-auth-secret= -realm= -``` -A common way to generate a suitable alphanumeric secret key is by using `pwgen -s 64 1`. - -These same values need to be set in conduit. You can either modify conduit.toml to include these lines: -``` -turn_uris = ["turn:?transport=udp", "turn:?transport=tcp"] -turn_secret = "" -``` -or append the following to the docker environment variables dependig on which configuration method you used earlier: -```yml -CONDUIT_TURN_URIS: '["turn:?transport=udp", "turn:?transport=tcp"]' -CONDUIT_TURN_SECRET: "" -``` -Restart Conduit to apply these changes. - -### Run -Run the [Coturn](https://hub.docker.com/r/coturn/coturn) image using -```bash -docker run -d --network=host -v $(pwd)/coturn.conf:/etc/coturn/turnserver.conf coturn/coturn -``` - -or docker-compose. For the latter, paste the following section into a file called `docker-compose.yml` -and run `docker-compose up -d` in the same directory. - -```yml -version: 3 -services: - turn: - container_name: coturn-server - image: docker.io/coturn/coturn - restart: unless-stopped - network_mode: "host" - volumes: - - ./coturn.conf:/etc/coturn/turnserver.conf -``` - -To understand why the host networking mode is used and explore alternative configuration options, please visit the following link: https://github.com/coturn/coturn/blob/master/docker/coturn/README.md. -For security recommendations see Synapse's [Coturn documentation](https://github.com/matrix-org/synapse/blob/develop/docs/setup/turn/coturn.md#configuration). - diff --git a/docker/docker-compose.for-traefik.yml b/docker/docker-compose.for-traefik.yml index bed734f1..474299f6 100644 --- a/docker/docker-compose.for-traefik.yml +++ b/docker/docker-compose.for-traefik.yml @@ -29,7 +29,6 @@ services: CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB CONDUIT_ALLOW_REGISTRATION: 'true' CONDUIT_ALLOW_FEDERATION: 'true' - CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true' CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' #CONDUIT_MAX_CONCURRENT_REQUESTS: 100 #CONDUIT_LOG: warn,rocket=off,_=off,sled=off diff --git a/docker/docker-compose.with-traefik.yml b/docker/docker-compose.with-traefik.yml index fda942bc..79ebef4b 100644 --- a/docker/docker-compose.with-traefik.yml +++ b/docker/docker-compose.with-traefik.yml @@ -35,9 +35,8 @@ services: # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging # CONDUIT_LOG: info # default is: "warn,_=off,sled=off" # CONDUIT_ALLOW_JAEGER: 'false' - # CONDUIT_ALLOW_ENCRYPTION: 'true' - # CONDUIT_ALLOW_FEDERATION: 'true' - # CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true' + # CONDUIT_ALLOW_ENCRYPTION: 'false' + # CONDUIT_ALLOW_FEDERATION: 'false' # CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit # CONDUIT_WORKERS: 10 # CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB diff --git a/docker/healthcheck.sh b/docker/healthcheck.sh index 62f2f987..42b2e103 100644 --- a/docker/healthcheck.sh +++ b/docker/healthcheck.sh @@ -6,14 +6,9 @@ if [ -z "${CONDUIT_PORT}" ]; then CONDUIT_PORT=$(ss -tlpn | grep conduit | grep -m1 -o ':[0-9]*' | grep -m1 -o '[0-9]*') fi -# If CONDUIT_ADDRESS is not set try to get the address from the process list -if [ -z "${CONDUIT_ADDRESS}" ]; then - CONDUIT_ADDRESS=$(ss -tlpn | awk -F ' +|:' '/conduit/ { print $4 }') -fi - # The actual health check. # We try to first get a response on HTTP and when that fails on HTTPS and when that fails, we exit with code 1. # TODO: Change this to a single wget call. Do we have a config value that we can check for that? -wget --no-verbose --tries=1 --spider "http://${CONDUIT_ADDRESS}:${CONDUIT_PORT}/_matrix/client/versions" || \ - wget --no-verbose --tries=1 --spider "https://${CONDUIT_ADDRESS}:${CONDUIT_PORT}/_matrix/client/versions" || \ +wget --no-verbose --tries=1 --spider "http://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \ + wget --no-verbose --tries=1 --spider "https://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \ exit 1 diff --git a/flake.lock b/flake.lock index 00655252..9217ff26 100644 --- a/flake.lock +++ b/flake.lock @@ -1,30 +1,5 @@ { "nodes": { - "crane": { - "inputs": { - "flake-compat": "flake-compat", - "flake-utils": [ - "flake-utils" - ], - "nixpkgs": [ - "nixpkgs" - ], - "rust-overlay": "rust-overlay" - }, - "locked": { - "lastModified": 1688772518, - "narHash": "sha256-ol7gZxwvgLnxNSZwFTDJJ49xVY5teaSvF7lzlo3YQfM=", - "owner": "ipetkov", - "repo": "crane", - "rev": "8b08e96c9af8c6e3a2b69af5a7fa168750fcf88e", - "type": "github" - }, - "original": { - "owner": "ipetkov", - "repo": "crane", - "type": "github" - } - }, "fenix": { "inputs": { "nixpkgs": [ @@ -33,11 +8,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1689488573, - "narHash": "sha256-diVASflKCCryTYv0djvMnP2444mFsIG0ge5pa7ahauQ=", + "lastModified": 1665815894, + "narHash": "sha256-Vboo1L4NMGLKZKVLnOPi9OHlae7uoNyfgvyIUm+SVXE=", "owner": "nix-community", "repo": "fenix", - "rev": "39096fe3f379036ff4a5fa198950b8e79defe939", + "rev": "2348450241a5f945f0ba07e44ecbfac2f541d7f4", "type": "github" }, "original": { @@ -46,32 +21,13 @@ "type": "github" } }, - "flake-compat": { - "flake": false, - "locked": { - "lastModified": 1673956053, - "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", - "owner": "edolstra", - "repo": "flake-compat", - "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", - "type": "github" - }, - "original": { - "owner": "edolstra", - "repo": "flake-compat", - "type": "github" - } - }, "flake-utils": { - "inputs": { - "systems": "systems" - }, "locked": { - "lastModified": 1689068808, - "narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=", + "lastModified": 1659877975, + "narHash": "sha256-zllb8aq3YO3h8B/U0/J1WBgAL8EX5yWf5pMj3G0NAmc=", "owner": "numtide", "repo": "flake-utils", - "rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4", + "rev": "c0e246b9b83f637f4681389ecabcb2681b4f3af0", "type": "github" }, "original": { @@ -80,38 +36,57 @@ "type": "github" } }, + "naersk": { + "inputs": { + "nixpkgs": [ + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1662220400, + "narHash": "sha256-9o2OGQqu4xyLZP9K6kNe1pTHnyPz0Wr3raGYnr9AIgY=", + "owner": "nix-community", + "repo": "naersk", + "rev": "6944160c19cb591eb85bbf9b2f2768a935623ed3", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "naersk", + "type": "github" + } + }, "nixpkgs": { "locked": { - "lastModified": 1689444953, - "narHash": "sha256-0o56bfb2LC38wrinPdCGLDScd77LVcr7CrH1zK7qvDg=", + "lastModified": 1665856037, + "narHash": "sha256-/RvIWnGKdTSoIq5Xc2HwPIL0TzRslzU6Rqk4Img6UNg=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "8acef304efe70152463a6399f73e636bcc363813", + "rev": "c95ebc5125ffffcd431df0ad8620f0926b8125b8", "type": "github" }, "original": { "owner": "NixOS", - "ref": "nixos-unstable", "repo": "nixpkgs", "type": "github" } }, "root": { "inputs": { - "crane": "crane", "fenix": "fenix", "flake-utils": "flake-utils", + "naersk": "naersk", "nixpkgs": "nixpkgs" } }, "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1689441253, - "narHash": "sha256-4MSDZaFI4DOfsLIZYPMBl0snzWhX1/OqR/QHir382CY=", + "lastModified": 1665765556, + "narHash": "sha256-w9L5j0TIB5ay4aRwzGCp8mgvGsu5dVJQvbEFutwr6xE=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "996e054f1eb1dbfc8455ecabff0f6ff22ba7f7c8", + "rev": "018b8429cf3fa9d8aed916704e41dfedeb0f4f78", "type": "github" }, "original": { @@ -120,46 +95,6 @@ "repo": "rust-analyzer", "type": "github" } - }, - "rust-overlay": { - "inputs": { - "flake-utils": [ - "crane", - "flake-utils" - ], - "nixpkgs": [ - "crane", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1688351637, - "narHash": "sha256-CLTufJ29VxNOIZ8UTg0lepsn3X03AmopmaLTTeHDCL4=", - "owner": "oxalica", - "repo": "rust-overlay", - "rev": "f9b92316727af9e6c7fee4a761242f7f46880329", - "type": "github" - }, - "original": { - "owner": "oxalica", - "repo": "rust-overlay", - "type": "github" - } - }, - "systems": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" - } } }, "root": "root", diff --git a/flake.nix b/flake.nix index eb3a31cb..924300cf 100644 --- a/flake.nix +++ b/flake.nix @@ -1,16 +1,15 @@ { inputs = { - nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable"; + nixpkgs.url = "github:NixOS/nixpkgs"; flake-utils.url = "github:numtide/flake-utils"; fenix = { url = "github:nix-community/fenix"; inputs.nixpkgs.follows = "nixpkgs"; }; - crane = { - url = "github:ipetkov/crane"; + naersk = { + url = "github:nix-community/naersk"; inputs.nixpkgs.follows = "nixpkgs"; - inputs.flake-utils.follows = "flake-utils"; }; }; @@ -20,17 +19,11 @@ , flake-utils , fenix - , crane + , naersk }: flake-utils.lib.eachDefaultSystem (system: let pkgs = nixpkgs.legacyPackages.${system}; - # Use mold on Linux - stdenv = if pkgs.stdenv.isLinux then - pkgs.stdenvAdapters.useMoldLinker pkgs.stdenv - else - pkgs.stdenv; - # Nix-accessible `Cargo.toml` cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml); @@ -39,44 +32,33 @@ # Use the Rust version defined in `Cargo.toml` channel = cargoToml.package.rust-version; - # THE rust-version HASH - sha256 = "sha256-gdYqng0y9iHYzYPAdkC/ka3DRny3La/S5G8ASj0Ayyc="; + # This will need to be updated when `package.rust-version` is changed in + # `Cargo.toml` + sha256 = "sha256-KXx+ID0y4mg2B3LHp7IyaiMrdexF6octADnAtFIOjrY="; }; - # The system's RocksDB - ROCKSDB_INCLUDE_DIR = "${pkgs.rocksdb}/include"; - ROCKSDB_LIB_DIR = "${pkgs.rocksdb}/lib"; - - # Shared between the package and the devShell - nativeBuildInputs = (with pkgs.rustPlatform; [ - bindgenHook - ]); - - builder = - ((crane.mkLib pkgs).overrideToolchain toolchain.toolchain).buildPackage; + builder = (pkgs.callPackage naersk { + inherit (toolchain) rustc cargo; + }).buildPackage; in { packages.default = builder { src = ./.; - inherit - stdenv - nativeBuildInputs - ROCKSDB_INCLUDE_DIR - ROCKSDB_LIB_DIR; + nativeBuildInputs = (with pkgs.rustPlatform; [ + bindgenHook + ]); }; - devShells.default = (pkgs.mkShell.override { inherit stdenv; }) { + devShells.default = pkgs.mkShell { # Rust Analyzer needs to be able to find the path to default crate # sources, and it can read this environment variable to do so RUST_SRC_PATH = "${toolchain.rust-src}/lib/rustlib/src/rust/library"; - inherit - ROCKSDB_INCLUDE_DIR - ROCKSDB_LIB_DIR; - # Development tools - nativeBuildInputs = nativeBuildInputs ++ (with toolchain; [ + nativeBuildInputs = (with pkgs.rustPlatform; [ + bindgenHook + ]) ++ (with toolchain; [ cargo clippy rust-src diff --git a/nix/README.md b/nix/README.md index bd6f0962..d92f910b 100644 --- a/nix/README.md +++ b/nix/README.md @@ -107,7 +107,7 @@ in recommendedProxySettings = true; virtualHosts = { - "${matrix_hostname}" = { + "${server_name}" = { forceSSL = true; enableACME = true; @@ -118,21 +118,20 @@ in ssl = true; } { - addr = "[::]"; - port = 443; - ssl = true; - } { addr = "0.0.0.0"; port = 8448; ssl = true; } - { - addr = "[::]"; - port = 8448; - ssl = true; - } ]; + extraConfig = '' + merge_slashes off; + ''; + + "${matrix_hostname}" = { + forceSSL = true; + enableACME = true; + locations."/_matrix/" = { proxyPass = "http://backend_conduit$request_uri"; proxyWebsockets = true; @@ -142,15 +141,6 @@ in ''; }; - extraConfig = '' - merge_slashes off; - ''; - }; - - "${server_name}" = { - forceSSL = true; - enableACME = true; - locations."=/.well-known/matrix/server" = { # Use the contents of the derivation built previously alias = "${well_known_server}"; @@ -179,7 +169,7 @@ in upstreams = { "backend_conduit" = { servers = { - "[::1]:${toString config.services.matrix-conduit.settings.global.port}" = { }; + "localhost:${toString config.services.matrix-conduit.settings.global.port}" = { }; }; }; }; diff --git a/src/api/appservice_server.rs b/src/api/appservice_server.rs index 082a1bc2..dc319e2c 100644 --- a/src/api/appservice_server.rs +++ b/src/api/appservice_server.rs @@ -18,7 +18,7 @@ where let mut http_request = request .try_into_http_request::( destination, - SendAccessToken::IfRequired(hs_token), + SendAccessToken::IfRequired(""), &[MatrixVersion::V1_0], ) .unwrap() diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index 46551305..309a3618 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -30,7 +30,7 @@ const RANDOM_USER_ID_LENGTH: usize = 10; /// /// Note: This will not reserve the username, so the username might become invalid when trying to register pub async fn get_register_available_route( - body: Ruma, + body: Ruma, ) -> Result { // Validate user id let user_id = UserId::parse_with_server_name( @@ -73,11 +73,10 @@ pub async fn get_register_available_route( /// - If type is not guest and no username is given: Always fails after UIAA check /// - Creates a new account and populates it with default account data /// - If `inhibit_login` is false: Creates a device and returns device id and access_token -pub async fn register_route(body: Ruma) -> Result { - if !services().globals.allow_registration() - && !body.from_appservice - && services().globals.config.registration_token.is_none() - { +pub async fn register_route( + body: Ruma, +) -> Result { + if !services().globals.allow_registration() && !body.from_appservice { return Err(Error::BadRequest( ErrorKind::Forbidden, "Registration has been disabled.", @@ -124,11 +123,7 @@ pub async fn register_route(body: Ruma) -> Result) -> Result) -> Result) -> Result, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -326,7 +320,8 @@ pub async fn change_password_route( services() .admin .send_message(RoomMessageEventContent::notice_plain(format!( - "User {sender_user} changed their password." + "User {} changed their password.", + sender_user ))); Ok(change_password::v3::Response {}) @@ -359,7 +354,7 @@ pub async fn whoami_route(body: Ruma) -> Result, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -403,7 +398,8 @@ pub async fn deactivate_route( services() .admin .send_message(RoomMessageEventContent::notice_plain(format!( - "User {sender_user} deactivated their account." + "User {} deactivated their account.", + sender_user ))); Ok(deactivate::v3::Response { @@ -430,7 +426,7 @@ pub async fn third_party_route( /// /// - 403 signals that The homeserver does not allow the third party identifier as a contact option. pub async fn request_3pid_management_token_via_email_route( - _body: Ruma, + _body: Ruma, ) -> Result { Err(Error::BadRequest( ErrorKind::ThreepidDenied, @@ -444,7 +440,7 @@ pub async fn request_3pid_management_token_via_email_route( /// /// - 403 signals that The homeserver does not allow the third party identifier as a contact option. pub async fn request_3pid_management_token_via_msisdn_route( - _body: Ruma, + _body: Ruma, ) -> Result { Err(Error::BadRequest( ErrorKind::ThreepidDenied, diff --git a/src/api/client_server/alias.rs b/src/api/client_server/alias.rs index 7660ca2f..b28606c1 100644 --- a/src/api/client_server/alias.rs +++ b/src/api/client_server/alias.rs @@ -1,5 +1,4 @@ use crate::{services, Error, Result, Ruma}; -use rand::seq::SliceRandom; use regex::Regex; use ruma::{ api::{ @@ -10,14 +9,14 @@ use ruma::{ }, federation, }, - OwnedRoomAliasId, + RoomAliasId, }; /// # `PUT /_matrix/client/r0/directory/room/{roomAlias}` /// /// Creates a new room alias on this server. pub async fn create_alias_route( - body: Ruma, + body: Ruma, ) -> Result { if body.room_alias.server_name() != services().globals.server_name() { return Err(Error::BadRequest( @@ -50,7 +49,7 @@ pub async fn create_alias_route( /// - TODO: additional access control checks /// - TODO: Update canonical alias event pub async fn delete_alias_route( - body: Ruma, + body: Ruma, ) -> Result { if body.room_alias.server_name() != services().globals.server_name() { return Err(Error::BadRequest( @@ -72,33 +71,29 @@ pub async fn delete_alias_route( /// /// - TODO: Suggest more servers to join via pub async fn get_alias_route( - body: Ruma, + body: Ruma, ) -> Result { - get_alias_helper(body.body.room_alias).await + get_alias_helper(&body.room_alias).await } -pub(crate) async fn get_alias_helper( - room_alias: OwnedRoomAliasId, -) -> Result { +pub(crate) async fn get_alias_helper(room_alias: &RoomAliasId) -> Result { if room_alias.server_name() != services().globals.server_name() { let response = services() .sending .send_federation_request( room_alias.server_name(), - federation::query::get_room_information::v1::Request { - room_alias: room_alias.to_owned(), - }, + federation::query::get_room_information::v1::Request { room_alias }, ) .await?; - let mut servers = response.servers; - servers.shuffle(&mut rand::thread_rng()); - - return Ok(get_alias::v3::Response::new(response.room_id, servers)); + return Ok(get_alias::v3::Response::new( + response.room_id, + response.servers, + )); } let mut room_id = None; - match services().rooms.alias.resolve_local_alias(&room_alias)? { + match services().rooms.alias.resolve_local_alias(room_alias)? { Some(r) => room_id = Some(r), None => { for (_id, registration) in services().appservice.all()? { @@ -120,9 +115,7 @@ pub(crate) async fn get_alias_helper( .sending .send_appservice_request( registration, - appservice::query::query_room_alias::v1::Request { - room_alias: room_alias.clone(), - }, + appservice::query::query_room_alias::v1::Request { room_alias }, ) .await .is_ok() @@ -131,7 +124,7 @@ pub(crate) async fn get_alias_helper( services() .rooms .alias - .resolve_local_alias(&room_alias)? + .resolve_local_alias(room_alias)? .ok_or_else(|| { Error::bad_config("Appservice lied to us. Room does not exist.") })?, diff --git a/src/api/client_server/backup.rs b/src/api/client_server/backup.rs index 115cba7c..f3d5ddc5 100644 --- a/src/api/client_server/backup.rs +++ b/src/api/client_server/backup.rs @@ -28,7 +28,7 @@ pub async fn create_backup_version_route( /// /// Update information about an existing backup. Only `auth_data` can be modified. pub async fn update_backup_version_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); services() @@ -66,7 +66,7 @@ pub async fn get_latest_backup_info_route( /// /// Get information about an existing backup. pub async fn get_backup_info_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let algorithm = services() @@ -96,7 +96,7 @@ pub async fn get_backup_info_route( /// /// - Deletes both information about the backup, as well as all key data related to the backup pub async fn delete_backup_version_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -115,7 +115,7 @@ pub async fn delete_backup_version_route( /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag pub async fn add_backup_keys_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -162,7 +162,7 @@ pub async fn add_backup_keys_route( /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag pub async fn add_backup_keys_for_room_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -207,7 +207,7 @@ pub async fn add_backup_keys_for_room_route( /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag pub async fn add_backup_keys_for_session_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -246,7 +246,7 @@ pub async fn add_backup_keys_for_session_route( /// /// Retrieves all keys from the backup. pub async fn get_backup_keys_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -259,7 +259,7 @@ pub async fn get_backup_keys_route( /// /// Retrieves all keys from the backup for a given room. pub async fn get_backup_keys_for_room_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -274,7 +274,7 @@ pub async fn get_backup_keys_for_room_route( /// /// Retrieves a key from the backup. pub async fn get_backup_keys_for_session_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -293,7 +293,7 @@ pub async fn get_backup_keys_for_session_route( /// /// Delete the keys from the backup. pub async fn delete_backup_keys_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -316,7 +316,7 @@ pub async fn delete_backup_keys_route( /// /// Delete the keys from the backup for a given room. pub async fn delete_backup_keys_for_room_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -339,7 +339,7 @@ pub async fn delete_backup_keys_for_room_route( /// /// Delete a key from the backup. pub async fn delete_backup_keys_for_session_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/api/client_server/capabilities.rs b/src/api/client_server/capabilities.rs index 233e3c9c..31d42d2f 100644 --- a/src/api/client_server/capabilities.rs +++ b/src/api/client_server/capabilities.rs @@ -8,7 +8,7 @@ use std::collections::BTreeMap; /// /// Get information on the supported feature set and other relevent capabilities of this server. pub async fn get_capabilities_route( - _body: Ruma, + _body: Ruma, ) -> Result { let mut available = BTreeMap::new(); for room_version in &services().globals.unstable_room_versions { diff --git a/src/api/client_server/config.rs b/src/api/client_server/config.rs index 37279e35..dbd2b2cc 100644 --- a/src/api/client_server/config.rs +++ b/src/api/client_server/config.rs @@ -17,7 +17,7 @@ use serde_json::{json, value::RawValue as RawJsonValue}; /// /// Sets some account data for the sender user. pub async fn set_global_account_data_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -43,7 +43,7 @@ pub async fn set_global_account_data_route( /// /// Sets some room account data for the sender user. pub async fn set_room_account_data_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -69,13 +69,13 @@ pub async fn set_room_account_data_route( /// /// Gets some account data for the sender user. pub async fn get_global_account_data_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event: Box = services() .account_data - .get(None, sender_user, body.event_type.to_string().into())? + .get(None, sender_user, body.event_type.clone().into())? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; let account_data = serde_json::from_str::(event.get()) @@ -89,13 +89,17 @@ pub async fn get_global_account_data_route( /// /// Gets some room account data for the sender user. pub async fn get_room_account_data_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event: Box = services() .account_data - .get(Some(&body.room_id), sender_user, body.event_type.clone())? + .get( + Some(&body.room_id), + sender_user, + body.event_type.clone().into(), + )? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; let account_data = serde_json::from_str::(event.get()) diff --git a/src/api/client_server/context.rs b/src/api/client_server/context.rs index 8e193e6b..2e0f2576 100644 --- a/src/api/client_server/context.rs +++ b/src/api/client_server/context.rs @@ -3,7 +3,7 @@ use ruma::{ api::client::{context::get_context, error::ErrorKind, filter::LazyLoadOptions}, events::StateEventType, }; -use std::collections::HashSet; +use std::{collections::HashSet, convert::TryFrom}; use tracing::error; /// # `GET /_matrix/client/r0/rooms/{roomId}/context` @@ -13,7 +13,7 @@ use tracing::error; /// - Only works if the user is joined (TODO: always allow, but only show events if the user was /// joined, depending on history_visibility) pub async fn get_context_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -27,35 +27,36 @@ pub async fn get_context_route( let mut lazy_loaded = HashSet::new(); - let base_token = services() + let base_pdu_id = services() .rooms .timeline - .get_pdu_count(&body.event_id)? + .get_pdu_id(&body.event_id)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "Base event id not found.", ))?; - let base_event = - services() - .rooms - .timeline - .get_pdu(&body.event_id)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Base event not found.", - ))?; + let base_token = services().rooms.timeline.pdu_count(&base_pdu_id)?; + + let base_event = services() + .rooms + .timeline + .get_pdu_from_id(&base_pdu_id)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Base event not found.", + ))?; let room_id = base_event.room_id.clone(); if !services() .rooms - .state_accessor - .user_can_see_event(sender_user, &room_id, &body.event_id)? + .state_cache + .is_joined(sender_user, &room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, - "You don't have permission to view this event.", + "You don't have permission to view this room.", )); } @@ -69,24 +70,19 @@ pub async fn get_context_route( lazy_loaded.insert(base_event.sender.as_str().to_owned()); } - // Use limit with maximum 100 - let limit = u64::from(body.limit).min(100) as usize; - let base_event = base_event.to_room_event(); let events_before: Vec<_> = services() .rooms .timeline .pdus_until(sender_user, &room_id, base_token)? - .take(limit / 2) + .take( + u32::try_from(body.limit).map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") + })? as usize + / 2, + ) .filter_map(|r| r.ok()) // Remove buggy events - .filter(|(_, pdu)| { - services() - .rooms - .state_accessor - .user_can_see_event(sender_user, &room_id, &pdu.event_id) - .unwrap_or(false) - }) .collect(); for (_, event) in &events_before { @@ -103,8 +99,8 @@ pub async fn get_context_route( let start_token = events_before .last() - .map(|(count, _)| count.stringify()) - .unwrap_or_else(|| base_token.stringify()); + .and_then(|(pdu_id, _)| services().rooms.timeline.pdu_count(pdu_id).ok()) + .map(|count| count.to_string()); let events_before: Vec<_> = events_before .into_iter() @@ -115,15 +111,13 @@ pub async fn get_context_route( .rooms .timeline .pdus_after(sender_user, &room_id, base_token)? - .take(limit / 2) + .take( + u32::try_from(body.limit).map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") + })? as usize + / 2, + ) .filter_map(|r| r.ok()) // Remove buggy events - .filter(|(_, pdu)| { - services() - .rooms - .state_accessor - .user_can_see_event(sender_user, &room_id, &pdu.event_id) - .unwrap_or(false) - }) .collect(); for (_, event) in &events_after { @@ -159,8 +153,8 @@ pub async fn get_context_route( let end_token = events_after .last() - .map(|(count, _)| count.stringify()) - .unwrap_or_else(|| base_token.stringify()); + .and_then(|(pdu_id, _)| services().rooms.timeline.pdu_count(pdu_id).ok()) + .map(|count| count.to_string()); let events_after: Vec<_> = events_after .into_iter() @@ -197,8 +191,8 @@ pub async fn get_context_route( } let resp = get_context::v3::Response { - start: Some(start_token), - end: Some(end_token), + start: start_token, + end: end_token, events_before, event: Some(base_event), events_after, diff --git a/src/api/client_server/device.rs b/src/api/client_server/device.rs index aba061b2..d4c41786 100644 --- a/src/api/client_server/device.rs +++ b/src/api/client_server/device.rs @@ -28,7 +28,7 @@ pub async fn get_devices_route( /// /// Get metadata on a single device of the sender user. pub async fn get_device_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -44,7 +44,7 @@ pub async fn get_device_route( /// /// Updates the metadata on a given device of the sender user. pub async fn update_device_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -72,7 +72,7 @@ pub async fn update_device_route( /// - Forgets to-device events /// - Triggers device list updates pub async fn delete_device_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -126,7 +126,7 @@ pub async fn delete_device_route( /// - Forgets to-device events /// - Triggers device list updates pub async fn delete_devices_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); diff --git a/src/api/client_server/directory.rs b/src/api/client_server/directory.rs index 50ae9f15..f07a2254 100644 --- a/src/api/client_server/directory.rs +++ b/src/api/client_server/directory.rs @@ -11,7 +11,10 @@ use ruma::{ }, federation, }, - directory::{Filter, PublicRoomJoinRule, PublicRoomsChunk, RoomNetwork}, + directory::{ + Filter, IncomingFilter, IncomingRoomNetwork, PublicRoomJoinRule, PublicRoomsChunk, + RoomNetwork, + }, events::{ room::{ avatar::RoomAvatarEventContent, @@ -20,6 +23,7 @@ use ruma::{ guest_access::{GuestAccess, RoomGuestAccessEventContent}, history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, join_rules::{JoinRule, RoomJoinRulesEventContent}, + name::RoomNameEventContent, topic::RoomTopicEventContent, }, StateEventType, @@ -34,7 +38,7 @@ use tracing::{error, info, warn}; /// /// - Rooms are ordered by the number of joined members pub async fn get_public_rooms_filtered_route( - body: Ruma, + body: Ruma, ) -> Result { get_public_rooms_filtered_helper( body.server.as_deref(), @@ -52,14 +56,14 @@ pub async fn get_public_rooms_filtered_route( /// /// - Rooms are ordered by the number of joined members pub async fn get_public_rooms_route( - body: Ruma, + body: Ruma, ) -> Result { let response = get_public_rooms_filtered_helper( body.server.as_deref(), body.limit, body.since.as_deref(), - &Filter::default(), - &RoomNetwork::Matrix, + &IncomingFilter::default(), + &IncomingRoomNetwork::Matrix, ) .await?; @@ -77,7 +81,7 @@ pub async fn get_public_rooms_route( /// /// - TODO: Access control checks pub async fn set_room_visibility_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -107,7 +111,7 @@ pub async fn set_room_visibility_route( /// /// Gets the visibility of a given room in the room directory. pub async fn get_room_visibility_route( - body: Ruma, + body: Ruma, ) -> Result { if !services().rooms.metadata.exists(&body.room_id)? { // Return 404 if the room doesn't exist @@ -127,8 +131,8 @@ pub(crate) async fn get_public_rooms_filtered_helper( server: Option<&ServerName>, limit: Option, since: Option<&str>, - filter: &Filter, - _network: &RoomNetwork, + filter: &IncomingFilter, + _network: &IncomingRoomNetwork, ) -> Result { if let Some(other_server) = server.filter(|server| *server != services().globals.server_name().as_str()) @@ -139,9 +143,9 @@ pub(crate) async fn get_public_rooms_filtered_helper( other_server, federation::directory::get_public_rooms_filtered::v1::Request { limit, - since: since.map(ToOwned::to_owned), + since, filter: Filter { - generic_search_term: filter.generic_search_term.clone(), + generic_search_term: filter.generic_search_term.as_deref(), room_types: filter.room_types.clone(), }, room_network: RoomNetwork::Matrix, @@ -202,7 +206,17 @@ pub(crate) async fn get_public_rooms_filtered_helper( Error::bad_database("Invalid canonical alias event in database.") }) })?, - name: services().rooms.state_accessor.get_name(&room_id)?, + name: services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomName, "")? + .map_or(Ok(None), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomNameEventContent| c.name) + .map_err(|_| { + Error::bad_database("Invalid room name event in database.") + }) + })?, num_joined_members: services() .rooms .state_cache @@ -221,7 +235,6 @@ pub(crate) async fn get_public_rooms_filtered_helper( serde_json::from_str(s.content.get()) .map(|c: RoomTopicEventContent| Some(c.topic)) .map_err(|_| { - error!("Invalid room topic event in database for room {}", room_id); Error::bad_database("Invalid room topic event in database.") }) })?, @@ -351,7 +364,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( let prev_batch = if num_since == 0 { None } else { - Some(format!("p{num_since}")) + Some(format!("p{}", num_since)) }; let next_batch = if chunk.len() < limit as usize { diff --git a/src/api/client_server/filter.rs b/src/api/client_server/filter.rs index e9a359d6..a0d5a192 100644 --- a/src/api/client_server/filter.rs +++ b/src/api/client_server/filter.rs @@ -10,7 +10,7 @@ use ruma::api::client::{ /// /// - A user can only access their own filters pub async fn get_filter_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let filter = match services().users.get_filter(sender_user, &body.filter_id)? { @@ -25,7 +25,7 @@ pub async fn get_filter_route( /// /// Creates a new filter to be used by other endpoints. pub async fn create_filter_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(create_filter::v3::Response::new( diff --git a/src/api/client_server/keys.rs b/src/api/client_server/keys.rs index 7dbe040d..b649166a 100644 --- a/src/api/client_server/keys.rs +++ b/src/api/client_server/keys.rs @@ -17,11 +17,7 @@ use ruma::{ DeviceKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, }; use serde_json::json; -use std::{ - collections::{hash_map, BTreeMap, HashMap, HashSet}, - time::{Duration, Instant}, -}; -use tracing::debug; +use std::collections::{BTreeMap, HashMap, HashSet}; /// # `POST /_matrix/client/r0/keys/upload` /// @@ -69,7 +65,9 @@ pub async fn upload_keys_route( /// - Always fetches users from other servers over federation /// - Gets master keys, self-signing keys, user signing keys and device keys. /// - The master and self-signing keys contain signatures that the user is allowed to see -pub async fn get_keys_route(body: Ruma) -> Result { +pub async fn get_keys_route( + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let response = @@ -95,7 +93,7 @@ pub async fn claim_keys_route( /// /// - Requires UIAA to verify password pub async fn upload_signing_keys_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -136,7 +134,6 @@ pub async fn upload_signing_keys_route( master_key, &body.self_signing_key, &body.user_signing_key, - true, // notify so that other users see the new keys )?; } @@ -156,6 +153,18 @@ pub async fn upload_signatures_route( let key = serde_json::to_value(key) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid key JSON"))?; + let is_signed_key = match key.get("usage") { + Some(usage) => usage + .as_array() + .map(|usage| !usage.contains(&json!("master"))) + .unwrap_or(false), + None => true, + }; + + if !is_signed_key { + continue; + } + for signature in key .get("signatures") .ok_or(Error::BadRequest( @@ -205,7 +214,7 @@ pub async fn upload_signatures_route( /// /// - TODO: left users pub async fn get_key_changes_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -316,17 +325,15 @@ pub(crate) async fn get_keys_helper bool>( } } - if let Some(master_key) = - services() - .users - .get_master_key(sender_user, user_id, &allowed_signatures)? + if let Some(master_key) = services() + .users + .get_master_key(user_id, &allowed_signatures)? { master_keys.insert(user_id.to_owned(), master_key); } - if let Some(self_signing_key) = - services() - .users - .get_self_signing_key(sender_user, user_id, &allowed_signatures)? + if let Some(self_signing_key) = services() + .users + .get_self_signing_key(user_id, &allowed_signatures)? { self_signing_keys.insert(user_id.to_owned(), self_signing_key); } @@ -339,96 +346,36 @@ pub(crate) async fn get_keys_helper bool>( let mut failures = BTreeMap::new(); - let back_off = |id| match services() - .globals - .bad_query_ratelimiter - .write() - .unwrap() - .entry(id) - { - hash_map::Entry::Vacant(e) => { - e.insert((Instant::now(), 1)); - } - hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), - }; - let mut futures: FuturesUnordered<_> = get_over_federation .into_iter() .map(|(server, vec)| async move { - if let Some((time, tries)) = services() - .globals - .bad_query_ratelimiter - .read() - .unwrap() - .get(&*server) - { - // Exponential backoff - let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); - } - - if time.elapsed() < min_elapsed_duration { - debug!("Backing off query from {:?}", server); - return ( - server, - Err(Error::BadServerResponse("bad query, still backing off")), - ); - } - } - let mut device_keys_input_fed = BTreeMap::new(); for (user_id, keys) in vec { device_keys_input_fed.insert(user_id.to_owned(), keys.clone()); } ( server, - tokio::time::timeout( - Duration::from_secs(25), - services().sending.send_federation_request( + services() + .sending + .send_federation_request( server, federation::keys::get_keys::v1::Request { device_keys: device_keys_input_fed, }, - ), - ) - .await - .map_err(|e| Error::BadServerResponse("Query took too long")), + ) + .await, ) }) .collect(); while let Some((server, response)) = futures.next().await { match response { - Ok(Ok(response)) => { - for (user, masterkey) in response.master_keys { - let (master_key_id, mut master_key) = - services().users.parse_master_key(&user, &masterkey)?; - - if let Some(our_master_key) = services().users.get_key( - &master_key_id, - sender_user, - &user, - &allowed_signatures, - )? { - let (_, our_master_key) = - services().users.parse_master_key(&user, &our_master_key)?; - master_key.signatures.extend(our_master_key.signatures); - } - let json = serde_json::to_value(master_key).expect("to_value always works"); - let raw = serde_json::from_value(json).expect("Raw::from_value always works"); - services().users.add_cross_signing_keys( - &user, &raw, &None, &None, - false, // Dont notify. A notification would trigger another key request resulting in an endless loop - )?; - master_keys.insert(user, raw); - } - + Ok(response) => { + master_keys.extend(response.master_keys); self_signing_keys.extend(response.self_signing_keys); device_keys.extend(response.device_keys); } - _ => { - back_off(server.to_owned()); + Err(_e) => { failures.insert(server.to_string(), json!({})); } } diff --git a/src/api/client_server/media.rs b/src/api/client_server/media.rs index 75f8e156..fa6def0b 100644 --- a/src/api/client_server/media.rs +++ b/src/api/client_server/media.rs @@ -1,5 +1,3 @@ -use std::time::Duration; - use crate::{service::media::FileMeta, services, utils, Error, Result, Ruma}; use ruma::api::client::{ error::ErrorKind, @@ -29,7 +27,7 @@ pub async fn get_media_config_route( /// - Some metadata will be saved in the database /// - Media will be saved in the media/ directory pub async fn create_content_route( - body: Ruma, + body: Ruma, ) -> Result { let mxc = format!( "mxc://{}/{}", @@ -59,7 +57,7 @@ pub async fn create_content_route( pub async fn get_remote_content( mxc: &str, server_name: &ruma::ServerName, - media_id: String, + media_id: &str, ) -> Result { let content_response = services() .sending @@ -67,10 +65,8 @@ pub async fn get_remote_content( server_name, get_content::v3::Request { allow_remote: false, - server_name: server_name.to_owned(), + server_name, media_id, - timeout_ms: Duration::from_secs(20), - allow_redirect: false, }, ) .await?; @@ -94,7 +90,7 @@ pub async fn get_remote_content( /// /// - Only allows federation if `allow_remote` is true pub async fn get_content_route( - body: Ruma, + body: Ruma, ) -> Result { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); @@ -112,7 +108,7 @@ pub async fn get_content_route( }) } else if &*body.server_name != services().globals.server_name() && body.allow_remote { let remote_content_response = - get_remote_content(&mxc, &body.server_name, body.media_id.clone()).await?; + get_remote_content(&mxc, &body.server_name, &body.media_id).await?; Ok(remote_content_response) } else { Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) @@ -125,7 +121,7 @@ pub async fn get_content_route( /// /// - Only allows federation if `allow_remote` is true pub async fn get_content_as_filename_route( - body: Ruma, + body: Ruma, ) -> Result { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); @@ -143,7 +139,7 @@ pub async fn get_content_as_filename_route( }) } else if &*body.server_name != services().globals.server_name() && body.allow_remote { let remote_content_response = - get_remote_content(&mxc, &body.server_name, body.media_id.clone()).await?; + get_remote_content(&mxc, &body.server_name, &body.media_id).await?; Ok(get_content_as_filename::v3::Response { content_disposition: Some(format!("inline: filename={}", body.filename)), @@ -162,7 +158,7 @@ pub async fn get_content_as_filename_route( /// /// - Only allows federation if `allow_remote` is true pub async fn get_content_thumbnail_route( - body: Ruma, + body: Ruma, ) -> Result { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); @@ -196,10 +192,8 @@ pub async fn get_content_thumbnail_route( height: body.height, width: body.width, method: body.method.clone(), - server_name: body.server_name.clone(), - media_id: body.media_id.clone(), - timeout_ms: Duration::from_secs(20), - allow_redirect: false, + server_name: &body.server_name, + media_id: &body.media_id, }, ) .await?; diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 346f2575..2267cbf3 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -5,23 +5,19 @@ use ruma::{ membership::{ ban_user, forget_room, get_member_events, invite_user, join_room_by_id, join_room_by_id_or_alias, joined_members, joined_rooms, kick_user, leave_room, - unban_user, ThirdPartySigned, + unban_user, IncomingThirdPartySigned, }, }, federation::{self, membership::create_invite}, }, canonical_json::to_canonical_value, events::{ - room::{ - join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent}, - member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, - }, - StateEventType, TimelineEventType, + room::member::{MembershipState, RoomMemberEventContent}, + RoomEventType, StateEventType, }, serde::Base64, - state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, - OwnedServerName, OwnedUserId, RoomId, RoomVersionId, UserId, + CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, + OwnedUserId, RoomId, RoomVersionId, UserId, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ @@ -29,7 +25,7 @@ use std::{ sync::{Arc, RwLock}, time::{Duration, Instant}, }; -use tracing::{debug, error, info, warn}; +use tracing::{debug, error, warn}; use crate::{ service::pdu::{gen_event_id_canonical_json, PduBuilder}, @@ -45,7 +41,7 @@ use super::get_alias_helper; /// - If the server knowns about this room: creates the join event and does auth rules locally /// - If the server does not know about the room: asks other servers over federation pub async fn join_room_by_id_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -69,7 +65,6 @@ pub async fn join_room_by_id_route( join_room_by_id_helper( body.sender_user.as_deref(), &body.room_id, - body.reason.clone(), &servers, body.third_party_signed.as_ref(), ) @@ -83,7 +78,7 @@ pub async fn join_room_by_id_route( /// - If the server knowns about this room: creates the join event and does auth rules locally /// - If the server does not know about the room: asks other servers over federation pub async fn join_room_by_id_or_alias_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_deref().expect("user is authenticated"); let body = body.body; @@ -106,20 +101,18 @@ pub async fn join_room_by_id_or_alias_route( ); servers.push(room_id.server_name().to_owned()); - (servers, room_id) } Err(room_alias) => { - let response = get_alias_helper(room_alias).await?; + let response = get_alias_helper(&room_alias).await?; - (response.servers, response.room_id) + (response.servers.into_iter().collect(), response.room_id) } }; let join_room_response = join_room_by_id_helper( Some(sender_user), &room_id, - body.reason.clone(), &servers, body.third_party_signed.as_ref(), ) @@ -136,11 +129,11 @@ pub async fn join_room_by_id_or_alias_route( /// /// - This should always work if the user is currently joined. pub async fn leave_room_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - leave_room(sender_user, &body.room_id, body.reason.clone()).await?; + leave_room(sender_user, &body.room_id).await?; Ok(leave_room::v3::Response::new()) } @@ -149,19 +142,12 @@ pub async fn leave_room_route( /// /// Tries to send an invite event into the room. pub async fn invite_user_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if let invite_user::v3::InvitationRecipient::UserId { user_id } = &body.recipient { - invite_helper( - sender_user, - user_id, - &body.room_id, - body.reason.clone(), - false, - ) - .await?; + if let invite_user::v3::IncomingInvitationRecipient::UserId { user_id } = &body.recipient { + invite_helper(sender_user, user_id, &body.room_id, false).await?; Ok(invite_user::v3::Response {}) } else { Err(Error::BadRequest(ErrorKind::NotFound, "User not found.")) @@ -172,7 +158,7 @@ pub async fn invite_user_route( /// /// Tries to send a kick event into the room. pub async fn kick_user_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -195,7 +181,7 @@ pub async fn kick_user_route( .map_err(|_| Error::bad_database("Invalid member event in database."))?; event.membership = MembershipState::Leave; - event.reason = body.reason.clone(); + // TODO: reason let mutex_state = Arc::clone( services() @@ -210,7 +196,7 @@ pub async fn kick_user_route( services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), unsigned: None, state_key: Some(body.user_id.to_string()), @@ -229,9 +215,13 @@ pub async fn kick_user_route( /// # `POST /_matrix/client/r0/rooms/{roomId}/ban` /// /// Tries to send a ban event into the room. -pub async fn ban_user_route(body: Ruma) -> Result { +pub async fn ban_user_route( + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + // TODO: reason + let event = services() .rooms .state_accessor @@ -248,7 +238,7 @@ pub async fn ban_user_route(body: Ruma) -> Result) -> Result) -> Result, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -317,7 +307,6 @@ pub async fn unban_user_route( .map_err(|_| Error::bad_database("Invalid member event in database."))?; event.membership = MembershipState::Leave; - event.reason = body.reason.clone(); let mutex_state = Arc::clone( services() @@ -332,7 +321,7 @@ pub async fn unban_user_route( services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), unsigned: None, state_key: Some(body.user_id.to_string()), @@ -357,7 +346,7 @@ pub async fn unban_user_route( /// Note: Other devices of the user have no way of knowing the room was forgotten, so this has to /// be called from every device pub async fn forget_room_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -393,14 +382,15 @@ pub async fn joined_rooms_route( /// /// - Only works if the user is currently joined pub async fn get_member_events_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + // TODO: check history visibility? if !services() .rooms - .state_accessor - .user_can_see_state_events(&sender_user, &body.room_id)? + .state_cache + .is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -428,18 +418,18 @@ pub async fn get_member_events_route( /// - The sender user must be in the room /// - TODO: An appservice just needs a puppet joined pub async fn joined_members_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if !services() .rooms - .state_accessor - .user_can_see_state_events(&sender_user, &body.room_id)? + .state_cache + .is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, - "You don't have permission to view this room.", + "You aren't a member of the room.", )); } @@ -468,9 +458,8 @@ pub async fn joined_members_route( async fn join_room_by_id_helper( sender_user: Option<&UserId>, room_id: &RoomId, - reason: Option, servers: &[OwnedServerName], - _third_party_signed: Option<&ThirdPartySigned>, + _third_party_signed: Option<&IncomingThirdPartySigned>, ) -> Result { let sender_user = sender_user.expect("user is authenticated"); @@ -491,14 +480,33 @@ async fn join_room_by_id_helper( .state_cache .server_in_room(services().globals.server_name(), room_id)? { - info!("Joining {room_id} over federation."); + let mut make_join_response_and_server = Err(Error::BadServerResponse( + "No server available to assist in joining.", + )); - let (make_join_response, remote_server) = - make_join_request(sender_user, room_id, servers).await?; + for remote_server in servers { + let make_join_response = services() + .sending + .send_federation_request( + remote_server, + federation::membership::prepare_join_event::v1::Request { + room_id, + user_id: sender_user, + ver: &services().globals.supported_room_versions(), + }, + ) + .await; - info!("make_join finished"); + make_join_response_and_server = make_join_response.map(|r| (r, remote_server)); - let room_version_id = match make_join_response.room_version { + if make_join_response_and_server.is_ok() { + break; + } + } + + let (make_join_response, remote_server) = make_join_response_and_server?; + + let room_version = match make_join_response.room_version { Some(room_version) if services() .globals @@ -546,7 +554,7 @@ async fn join_room_by_id_helper( is_direct: None, third_party_invite: None, blurhash: services().users.blurhash(sender_user)?, - reason, + reason: None, join_authorized_via_users_server, }) .expect("event is valid, we just created it"), @@ -560,14 +568,14 @@ async fn join_room_by_id_helper( services().globals.server_name().as_str(), services().globals.keypair(), &mut join_event_stub, - &room_version_id, + &room_version, ) .expect("event is valid, we just created it"); // Generate event id let event_id = format!( "${}", - ruma::signatures::reference_hash(&join_event_stub, &room_version_id) + ruma::signatures::reference_hash(&join_event_stub, &room_version) .expect("ruma can calculate reference hashes") ); let event_id = <&EventId>::try_from(event_id.as_str()) @@ -580,95 +588,39 @@ async fn join_room_by_id_helper( ); // It has enough fields to be called a proper event now - let mut join_event = join_event_stub; + let join_event = join_event_stub; - info!("Asking {remote_server} for send_join"); let send_join_response = services() .sending .send_federation_request( - &remote_server, + remote_server, federation::membership::create_join_event::v2::Request { - room_id: room_id.to_owned(), - event_id: event_id.to_owned(), - pdu: PduEvent::convert_to_outgoing_federation_event(join_event.clone()), - omit_members: false, + room_id, + event_id, + pdu: &PduEvent::convert_to_outgoing_federation_event(join_event.clone()), }, ) .await?; - info!("send_join finished"); - - if let Some(signed_raw) = &send_join_response.room_state.event { - info!("There is a signed event. This room is probably using restricted joins. Adding signature to our event"); - let (signed_event_id, signed_value) = - match gen_event_id_canonical_json(signed_raw, &room_version_id) { - Ok(t) => t, - Err(_) => { - // Event could not be converted to canonical json - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Could not convert event to canonical json.", - )); - } - }; - - if signed_event_id != event_id { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Server sent event with wrong event id", - )); - } - - match signed_value["signatures"] - .as_object() - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Server sent invalid signatures type", - )) - .and_then(|e| { - e.get(remote_server.as_str()).ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Server did not send its signature", - )) - }) { - Ok(signature) => { - join_event - .get_mut("signatures") - .expect("we created a valid pdu") - .as_object_mut() - .expect("we created a valid pdu") - .insert(remote_server.to_string(), signature.clone()); - } - Err(e) => { - warn!( - "Server {remote_server} sent invalid signature in sendjoin signatures for event {signed_value:?}: {e:?}", - ); - } - } - } - services().rooms.short.get_or_create_shortroomid(room_id)?; - info!("Parsing join event"); - let parsed_join_pdu = PduEvent::from_id_val(event_id, join_event.clone()) + let parsed_pdu = PduEvent::from_id_val(event_id, join_event.clone()) .map_err(|_| Error::BadServerResponse("Invalid join event PDU."))?; let mut state = HashMap::new(); let pub_key_map = RwLock::new(BTreeMap::new()); - info!("Fetching join signing keys"); services() .rooms .event_handler - .fetch_join_signing_keys(&send_join_response, &room_version_id, &pub_key_map) + .fetch_join_signing_keys(&send_join_response, &room_version, &pub_key_map) .await?; - info!("Going through send_join response room_state"); for result in send_join_response .room_state .state .iter() - .map(|pdu| validate_and_add_event_id(pdu, &room_version_id, &pub_key_map)) + .map(|pdu| validate_and_add_event_id(pdu, &room_version, &pub_key_map)) { let (event_id, value) = match result { Ok(t) => t, @@ -676,7 +628,7 @@ async fn join_room_by_id_helper( }; let pdu = PduEvent::from_id_val(&event_id, value.clone()).map_err(|e| { - warn!("Invalid PDU in send_join response: {} {:?}", e, value); + warn!("{:?}: {}", value, e); Error::BadServerResponse("Invalid PDU in send_join response.") })?; @@ -693,12 +645,31 @@ async fn join_room_by_id_helper( } } - info!("Going through send_join response auth_chain"); + let incoming_shortstatekey = services().rooms.short.get_or_create_shortstatekey( + &parsed_pdu.kind.to_string().into(), + parsed_pdu + .state_key + .as_ref() + .expect("Pdu is a membership state event"), + )?; + + state.insert(incoming_shortstatekey, parsed_pdu.event_id.clone()); + + let create_shortstatekey = services() + .rooms + .short + .get_shortstatekey(&StateEventType::RoomCreate, "")? + .expect("Room exists"); + + if state.get(&create_shortstatekey).is_none() { + return Err(Error::BadServerResponse("State contained no create event.")); + } + for result in send_join_response .room_state .auth_chain .iter() - .map(|pdu| validate_and_add_event_id(pdu, &room_version_id, &pub_key_map)) + .map(|pdu| validate_and_add_event_id(pdu, &room_version, &pub_key_map)) { let (event_id, value) = match result { Ok(t) => t, @@ -711,51 +682,17 @@ async fn join_room_by_id_helper( .add_pdu_outlier(&event_id, &value)?; } - info!("Running send_join auth check"); - if !state_res::event_auth::auth_check( - &state_res::RoomVersion::new(&room_version_id).expect("room version is supported"), - &parsed_join_pdu, - None::, // TODO: third party invite - |k, s| { - services() - .rooms - .timeline - .get_pdu( - state.get( - &services() - .rooms - .short - .get_or_create_shortstatekey(&k.to_string().into(), s) - .ok()?, - )?, - ) - .ok()? - }, - ) - .map_err(|e| { - warn!("Auth check failed: {e}"); - Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed") - })? { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Auth check failed", - )); - } - - info!("Saving state from send_join"); let (statehash_before_join, new, removed) = services().rooms.state_compressor.save_state( room_id, - Arc::new( - state - .into_iter() - .map(|(k, id)| { - services() - .rooms - .state_compressor - .compress_state_event(k, &id) - }) - .collect::>()?, - ), + state + .into_iter() + .map(|(k, id)| { + services() + .rooms + .state_compressor + .compress_state_event(k, &id) + }) + .collect::>()?, )?; services() @@ -764,22 +701,19 @@ async fn join_room_by_id_helper( .force_state(room_id, statehash_before_join, new, removed, &state_lock) .await?; - info!("Updating joined counts for new room"); services().rooms.state_cache.update_joined_count(room_id)?; // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - let statehash_after_join = services().rooms.state.append_to_state(&parsed_join_pdu)?; + let statehash_after_join = services().rooms.state.append_to_state(&parsed_pdu)?; - info!("Appending new room join event"); services().rooms.timeline.append_pdu( - &parsed_join_pdu, + &parsed_pdu, join_event, - vec![(*parsed_join_pdu.event_id).to_owned()], + vec![(*parsed_pdu.event_id).to_owned()], &state_lock, )?; - info!("Setting final room state for new room"); // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist services() @@ -787,97 +721,6 @@ async fn join_room_by_id_helper( .state .set_room_state(room_id, statehash_after_join, &state_lock)?; } else { - info!("We can join locally"); - - let join_rules_event = services().rooms.state_accessor.room_state_get( - room_id, - &StateEventType::RoomJoinRules, - "", - )?; - let power_levels_event = services().rooms.state_accessor.room_state_get( - room_id, - &StateEventType::RoomPowerLevels, - "", - )?; - - let join_rules_event_content: Option = join_rules_event - .as_ref() - .map(|join_rules_event| { - serde_json::from_str(join_rules_event.content.get()).map_err(|e| { - warn!("Invalid join rules event: {}", e); - Error::bad_database("Invalid join rules event in db.") - }) - }) - .transpose()?; - let power_levels_event_content: Option = power_levels_event - .as_ref() - .map(|power_levels_event| { - serde_json::from_str(power_levels_event.content.get()).map_err(|e| { - warn!("Invalid power levels event: {}", e); - Error::bad_database("Invalid power levels event in db.") - }) - }) - .transpose()?; - - let restriction_rooms = match join_rules_event_content { - Some(RoomJoinRulesEventContent { - join_rule: JoinRule::Restricted(restricted), - }) - | Some(RoomJoinRulesEventContent { - join_rule: JoinRule::KnockRestricted(restricted), - }) => restricted - .allow - .into_iter() - .filter_map(|a| match a { - AllowRule::RoomMembership(r) => Some(r.room_id), - _ => None, - }) - .collect(), - _ => Vec::new(), - }; - - let authorized_user = restriction_rooms - .iter() - .find_map(|restriction_room_id| { - if !services() - .rooms - .state_cache - .is_joined(sender_user, restriction_room_id) - .ok()? - { - return None; - } - let authorized_user = power_levels_event_content - .as_ref() - .and_then(|c| { - c.users - .iter() - .filter(|(uid, i)| { - uid.server_name() == services().globals.server_name() - && **i > ruma::int!(0) - && services() - .rooms - .state_cache - .is_joined(uid, restriction_room_id) - .unwrap_or(false) - }) - .max_by_key(|(_, i)| *i) - .map(|(u, _)| u.to_owned()) - }) - .or_else(|| { - // TODO: Check here if user is actually allowed to invite. Currently the auth - // check will just fail in this case. - services() - .rooms - .state_cache - .room_members(restriction_room_id) - .filter_map(|r| r.ok()) - .find(|uid| uid.server_name() == services().globals.server_name()) - }); - Some(authorized_user) - }) - .flatten(); - let event = RoomMemberEventContent { membership: MembershipState::Join, displayname: services().users.displayname(sender_user)?, @@ -885,14 +728,13 @@ async fn join_room_by_id_helper( is_direct: None, third_party_invite: None, blurhash: services().users.blurhash(sender_user)?, - reason: reason.clone(), - join_authorized_via_users_server: authorized_user, + reason: None, + join_authorized_via_users_server: None, }; - // Try normal join first - let error = match services().rooms.timeline.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), unsigned: None, state_key: Some(sender_user.to_string()), @@ -901,202 +743,14 @@ async fn join_room_by_id_helper( sender_user, room_id, &state_lock, - ) { - Ok(_event_id) => return Ok(join_room_by_id::v3::Response::new(room_id.to_owned())), - Err(e) => e, - }; - - if !restriction_rooms.is_empty() - && servers - .iter() - .filter(|s| *s != services().globals.server_name()) - .count() - > 0 - { - info!( - "We couldn't do the join locally, maybe federation can help to satisfy the restricted join requirements" - ); - let (make_join_response, remote_server) = - make_join_request(sender_user, room_id, servers).await?; - - let room_version_id = match make_join_response.room_version { - Some(room_version_id) - if services() - .globals - .supported_room_versions() - .contains(&room_version_id) => - { - room_version_id - } - _ => return Err(Error::BadServerResponse("Room version is not supported")), - }; - let mut join_event_stub: CanonicalJsonObject = - serde_json::from_str(make_join_response.event.get()).map_err(|_| { - Error::BadServerResponse("Invalid make_join event json received from server.") - })?; - let join_authorized_via_users_server = join_event_stub - .get("content") - .map(|s| { - s.as_object()? - .get("join_authorised_via_users_server")? - .as_str() - }) - .and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok()); - // TODO: Is origin needed? - join_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(services().globals.server_name().as_str().to_owned()), - ); - join_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - join_event_stub.insert( - "content".to_owned(), - to_canonical_value(RoomMemberEventContent { - membership: MembershipState::Join, - displayname: services().users.displayname(sender_user)?, - avatar_url: services().users.avatar_url(sender_user)?, - is_direct: None, - third_party_invite: None, - blurhash: services().users.blurhash(sender_user)?, - reason, - join_authorized_via_users_server, - }) - .expect("event is valid, we just created it"), - ); - - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - join_event_stub.remove("event_id"); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - services().globals.server_name().as_str(), - services().globals.keypair(), - &mut join_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - let event_id = format!( - "${}", - ruma::signatures::reference_hash(&join_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - ); - let event_id = <&EventId>::try_from(event_id.as_str()) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - join_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - - // It has enough fields to be called a proper event now - let join_event = join_event_stub; - - let send_join_response = services() - .sending - .send_federation_request( - &remote_server, - federation::membership::create_join_event::v2::Request { - room_id: room_id.to_owned(), - event_id: event_id.to_owned(), - pdu: PduEvent::convert_to_outgoing_federation_event(join_event.clone()), - omit_members: false, - }, - ) - .await?; - - if let Some(signed_raw) = send_join_response.room_state.event { - let (signed_event_id, signed_value) = - match gen_event_id_canonical_json(&signed_raw, &room_version_id) { - Ok(t) => t, - Err(_) => { - // Event could not be converted to canonical json - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Could not convert event to canonical json.", - )); - } - }; - - if signed_event_id != event_id { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Server sent event with wrong event id", - )); - } - - drop(state_lock); - let pub_key_map = RwLock::new(BTreeMap::new()); - services() - .rooms - .event_handler - .handle_incoming_pdu( - &remote_server, - &signed_event_id, - room_id, - signed_value, - true, - &pub_key_map, - ) - .await?; - } else { - return Err(error); - } - } else { - return Err(error); - } + )?; } + drop(state_lock); + Ok(join_room_by_id::v3::Response::new(room_id.to_owned())) } -async fn make_join_request( - sender_user: &UserId, - room_id: &RoomId, - servers: &[OwnedServerName], -) -> Result<( - federation::membership::prepare_join_event::v1::Response, - OwnedServerName, -)> { - let mut make_join_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in joining.", - )); - - for remote_server in servers { - if remote_server == services().globals.server_name() { - continue; - } - info!("Asking {remote_server} for make_join"); - let make_join_response = services() - .sending - .send_federation_request( - remote_server, - federation::membership::prepare_join_event::v1::Request { - room_id: room_id.to_owned(), - user_id: sender_user.to_owned(), - ver: services().globals.supported_room_versions(), - }, - ) - .await; - - make_join_response_and_server = make_join_response.map(|r| (r, remote_server.clone())); - - if make_join_response_and_server.is_ok() { - break; - } - } - - make_join_response_and_server -} - fn validate_and_add_event_id( pdu: &RawJsonValue, room_version: &RoomVersionId, @@ -1169,7 +823,6 @@ pub(crate) async fn invite_helper<'a>( sender_user: &UserId, user_id: &UserId, room_id: &RoomId, - reason: Option, is_direct: bool, ) -> Result<()> { if user_id.server_name() != services().globals.server_name() { @@ -1192,14 +845,14 @@ pub(crate) async fn invite_helper<'a>( membership: MembershipState::Invite, third_party_invite: None, blurhash: None, - reason, + reason: None, join_authorized_via_users_server: None, }) .expect("member event is valid value"); let (pdu, pdu_json) = services().rooms.timeline.create_hash_and_sign_event( PduBuilder { - event_type: TimelineEventType::RoomMember, + event_type: RoomEventType::RoomMember, content, unsigned: None, state_key: Some(user_id.to_string()), @@ -1217,18 +870,16 @@ pub(crate) async fn invite_helper<'a>( (pdu, pdu_json, invite_room_state) }; - let room_version_id = services().rooms.state.get_room_version(room_id)?; - let response = services() .sending .send_federation_request( user_id.server_name(), create_invite::v2::Request { - room_id: room_id.to_owned(), - event_id: (*pdu.event_id).to_owned(), - room_version: room_version_id.clone(), - event: PduEvent::convert_to_outgoing_federation_event(pdu_json.clone()), - invite_room_state, + room_id, + event_id: &pdu.event_id, + room_version: &services().rooms.state.get_room_version(room_id)?, + event: &PduEvent::convert_to_outgoing_federation_event(pdu_json.clone()), + invite_room_state: &invite_room_state, }, ) .await?; @@ -1236,8 +887,7 @@ pub(crate) async fn invite_helper<'a>( let pub_key_map = RwLock::new(BTreeMap::new()); // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = match gen_event_id_canonical_json(&response.event, &room_version_id) - { + let (event_id, value) = match gen_event_id_canonical_json(&response.event) { Ok(t) => t, Err(_) => { // Event could not be converted to canonical json @@ -1248,7 +898,7 @@ pub(crate) async fn invite_helper<'a>( } }; - if *pdu.event_id != *event_id { + if pdu.event_id != event_id { warn!("Server {} changed invite event, that's not allowed in the spec: ours: {:?}, theirs: {:?}", user_id.server_name(), pdu_json, value); } @@ -1308,7 +958,7 @@ pub(crate) async fn invite_helper<'a>( services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Invite, displayname: services().users.displayname(user_id)?, @@ -1316,7 +966,7 @@ pub(crate) async fn invite_helper<'a>( is_direct: Some(is_direct), third_party_invite: None, blurhash: services().users.blurhash(user_id)?, - reason, + reason: None, join_authorized_via_users_server: None, }) .expect("event is valid, we just created it"), @@ -1355,13 +1005,13 @@ pub async fn leave_all_rooms(user_id: &UserId) -> Result<()> { Err(_) => continue, }; - let _ = leave_room(user_id, &room_id, None).await; + let _ = leave_room(user_id, &room_id).await; } Ok(()) } -pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option) -> Result<()> { +pub async fn leave_room(user_id: &UserId, room_id: &RoomId) -> Result<()> { // Ask a remote server if we don't have this room if !services().rooms.metadata.exists(room_id)? && room_id.server_name() != services().globals.server_name() @@ -1429,11 +1079,10 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option Result<()> { .sending .send_federation_request( &remote_server, - federation::membership::prepare_leave_event::v1::Request { - room_id: room_id.to_owned(), - user_id: user_id.to_owned(), - }, + federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, ) .await; @@ -1556,9 +1202,9 @@ async fn remote_leave_room(user_id: &UserId, room_id: &RoomId) -> Result<()> { .send_federation_request( &remote_server, federation::membership::create_leave_event::v2::Request { - room_id: room_id.to_owned(), - event_id, - pdu: PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), + room_id, + event_id: &event_id, + pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), }, ) .await?; diff --git a/src/api/client_server/message.rs b/src/api/client_server/message.rs index 750e0303..b04c2626 100644 --- a/src/api/client_server/message.rs +++ b/src/api/client_server/message.rs @@ -1,13 +1,10 @@ -use crate::{ - service::{pdu::PduBuilder, rooms::timeline::PduCount}, - services, utils, Error, Result, Ruma, -}; +use crate::{service::pdu::PduBuilder, services, utils, Error, Result, Ruma}; use ruma::{ api::client::{ error::ErrorKind, message::{get_message_events, send_message_event}, }, - events::{StateEventType, TimelineEventType}, + events::{RoomEventType, StateEventType}, }; use std::{ collections::{BTreeMap, HashSet}, @@ -22,7 +19,7 @@ use std::{ /// - The only requirement for the content is that it has to be valid json /// - Tries to send the event into the room, auth rules will determine if it is allowed pub async fn send_message_event_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_deref(); @@ -39,7 +36,7 @@ pub async fn send_message_event_route( let state_lock = mutex_state.lock().await; // Forbid m.room.encrypted if encryption is disabled - if TimelineEventType::RoomEncrypted == body.event_type.to_string().into() + if RoomEventType::RoomEncrypted == body.event_type.to_string().into() && !services().globals.allow_encryption() { return Err(Error::BadRequest( @@ -108,23 +105,34 @@ pub async fn send_message_event_route( /// - Only works if the user is joined (TODO: always allow, but only show events where the user was /// joined, depending on history_visibility) pub async fn get_message_events_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + if !services() + .rooms + .state_cache + .is_joined(sender_user, &body.room_id)? + { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view this room.", + )); + } + let from = match body.from.clone() { - Some(from) => PduCount::try_from_string(&from)?, + Some(from) => from + .parse() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from` value."))?, + None => match body.dir { - ruma::api::Direction::Forward => PduCount::min(), - ruma::api::Direction::Backward => PduCount::max(), + ruma::api::client::Direction::Forward => 0, + ruma::api::client::Direction::Backward => u64::MAX, }, }; - let to = body - .to - .as_ref() - .and_then(|t| PduCount::try_from_string(&t).ok()); + let to = body.to.as_ref().map(|t| t.parse()); services().rooms.lazy_loading.lazy_load_confirm_delivery( sender_user, @@ -133,7 +141,8 @@ pub async fn get_message_events_route( from, )?; - let limit = u64::from(body.limit).min(100) as usize; + // Use limit or else 10 + let limit = body.limit.try_into().map_or(10_usize, |l: u32| l as usize); let next_token; @@ -142,21 +151,22 @@ pub async fn get_message_events_route( let mut lazy_loaded = HashSet::new(); match body.dir { - ruma::api::Direction::Forward => { + ruma::api::client::Direction::Forward => { let events_after: Vec<_> = services() .rooms .timeline .pdus_after(sender_user, &body.room_id, from)? .take(limit) .filter_map(|r| r.ok()) // Filter out buggy events - .filter(|(_, pdu)| { + .filter_map(|(pdu_id, pdu)| { services() .rooms - .state_accessor - .user_can_see_event(sender_user, &body.room_id, &pdu.event_id) - .unwrap_or(false) + .timeline + .pdu_count(&pdu_id) + .map(|pdu_count| (pdu_count, pdu)) + .ok() }) - .take_while(|&(k, _)| Some(k) != to) // Stop at `to` + .take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to` .collect(); for (_, event) in &events_after { @@ -182,30 +192,26 @@ pub async fn get_message_events_route( .map(|(_, pdu)| pdu.to_room_event()) .collect(); - resp.start = from.stringify(); - resp.end = next_token.map(|count| count.stringify()); + resp.start = from.to_string(); + resp.end = next_token.map(|count| count.to_string()); resp.chunk = events_after; } - ruma::api::Direction::Backward => { - services() - .rooms - .timeline - .backfill_if_required(&body.room_id, from) - .await?; + ruma::api::client::Direction::Backward => { let events_before: Vec<_> = services() .rooms .timeline .pdus_until(sender_user, &body.room_id, from)? .take(limit) .filter_map(|r| r.ok()) // Filter out buggy events - .filter(|(_, pdu)| { + .filter_map(|(pdu_id, pdu)| { services() .rooms - .state_accessor - .user_can_see_event(sender_user, &body.room_id, &pdu.event_id) - .unwrap_or(false) + .timeline + .pdu_count(&pdu_id) + .map(|pdu_count| (pdu_count, pdu)) + .ok() }) - .take_while(|&(k, _)| Some(k) != to) // Stop at `to` + .take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to` .collect(); for (_, event) in &events_before { @@ -231,8 +237,8 @@ pub async fn get_message_events_route( .map(|(_, pdu)| pdu.to_room_event()) .collect(); - resp.start = from.stringify(); - resp.end = next_token.map(|count| count.stringify()); + resp.start = from.to_string(); + resp.end = next_token.map(|count| count.to_string()); resp.chunk = events_before; } } diff --git a/src/api/client_server/mod.rs b/src/api/client_server/mod.rs index 54c99aa0..6ed17e76 100644 --- a/src/api/client_server/mod.rs +++ b/src/api/client_server/mod.rs @@ -16,17 +16,14 @@ mod profile; mod push; mod read_marker; mod redact; -mod relations; mod report; mod room; mod search; mod session; -mod space; mod state; mod sync; mod tag; mod thirdparty; -mod threads; mod to_device; mod typing; mod unversioned; @@ -51,17 +48,14 @@ pub use profile::*; pub use push::*; pub use read_marker::*; pub use redact::*; -pub use relations::*; pub use report::*; pub use room::*; pub use search::*; pub use session::*; -pub use space::*; pub use state::*; pub use sync::*; pub use tag::*; pub use thirdparty::*; -pub use threads::*; pub use to_device::*; pub use typing::*; pub use unversioned::*; diff --git a/src/api/client_server/presence.rs b/src/api/client_server/presence.rs index e5cd1b8e..9bcd7ba9 100644 --- a/src/api/client_server/presence.rs +++ b/src/api/client_server/presence.rs @@ -1,7 +1,8 @@ -use crate::{services, utils, Error, Result, Ruma}; -use ruma::api::client::{ - error::ErrorKind, - presence::{get_presence, set_presence}, +use crate::{services, Result, Ruma}; +use ruma::{ + api::client::presence::{get_presence, set_presence}, + presence::PresenceState, + uint, }; use std::time::Duration; @@ -9,7 +10,7 @@ use std::time::Duration; /// /// Sets the presence state of the sender user. pub async fn set_presence_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -24,16 +25,13 @@ pub async fn set_presence_route( avatar_url: services().users.avatar_url(sender_user)?, currently_active: None, displayname: services().users.displayname(sender_user)?, - last_active_ago: Some( - utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - ), + last_active_ago: Some(uint!(0)), presence: body.presence.clone(), status_msg: body.status_msg.clone(), }, sender: sender_user.clone(), }, + true, )?; } @@ -46,7 +44,7 @@ pub async fn set_presence_route( /// /// - Only works if you share a room with the user pub async fn get_presence_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -63,7 +61,7 @@ pub async fn get_presence_route( .rooms .edus .presence - .get_last_presence_event(sender_user, &room_id)? + .get_presence_event(sender_user, &room_id)? { presence_event = Some(presence); break; @@ -72,7 +70,6 @@ pub async fn get_presence_route( if let Some(presence) = presence_event { Ok(get_presence::v3::Response { - // TODO: Should ruma just use the presenceeventcontent type here? status_msg: presence.content.status_msg, currently_active: presence.content.currently_active, last_active_ago: presence @@ -82,9 +79,6 @@ pub async fn get_presence_route( presence: presence.content.presence, }) } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "Presence state for this user was not found", - )) + Ok(get_presence::v3::Response::new(PresenceState::Offline)) } } diff --git a/src/api/client_server/profile.rs b/src/api/client_server/profile.rs index 8fb38b59..09f1a5e8 100644 --- a/src/api/client_server/profile.rs +++ b/src/api/client_server/profile.rs @@ -9,7 +9,7 @@ use ruma::{ }, federation::{self, query::get_profile_information::v1::ProfileField}, }, - events::{room::member::RoomMemberEventContent, StateEventType, TimelineEventType}, + events::{room::member::RoomMemberEventContent, RoomEventType, StateEventType}, }; use serde_json::value::to_raw_value; use std::sync::Arc; @@ -20,7 +20,7 @@ use std::sync::Arc; /// /// - Also makes sure other users receive the update using presence EDUs pub async fn set_displayname_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -37,7 +37,7 @@ pub async fn set_displayname_route( .map(|room_id| { Ok::<_, Error>(( PduBuilder { - event_type: TimelineEventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { displayname: body.displayname.clone(), ..serde_json::from_str( @@ -109,6 +109,7 @@ pub async fn set_displayname_route( }, sender: sender_user.clone(), }, + true, )?; } @@ -121,7 +122,7 @@ pub async fn set_displayname_route( /// /// - If user is on another server: Fetches displayname over federation pub async fn get_displayname_route( - body: Ruma, + body: Ruma, ) -> Result { if body.user_id.server_name() != services().globals.server_name() { let response = services() @@ -129,8 +130,8 @@ pub async fn get_displayname_route( .send_federation_request( body.user_id.server_name(), federation::query::get_profile_information::v1::Request { - user_id: body.user_id.clone(), - field: Some(ProfileField::DisplayName), + user_id: &body.user_id, + field: Some(&ProfileField::DisplayName), }, ) .await?; @@ -151,7 +152,7 @@ pub async fn get_displayname_route( /// /// - Also makes sure other users receive the update using presence EDUs pub async fn set_avatar_url_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -172,7 +173,7 @@ pub async fn set_avatar_url_route( .map(|room_id| { Ok::<_, Error>(( PduBuilder { - event_type: TimelineEventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { avatar_url: body.avatar_url.clone(), ..serde_json::from_str( @@ -244,6 +245,7 @@ pub async fn set_avatar_url_route( }, sender: sender_user.clone(), }, + true, )?; } @@ -256,7 +258,7 @@ pub async fn set_avatar_url_route( /// /// - If user is on another server: Fetches avatar_url and blurhash over federation pub async fn get_avatar_url_route( - body: Ruma, + body: Ruma, ) -> Result { if body.user_id.server_name() != services().globals.server_name() { let response = services() @@ -264,8 +266,8 @@ pub async fn get_avatar_url_route( .send_federation_request( body.user_id.server_name(), federation::query::get_profile_information::v1::Request { - user_id: body.user_id.clone(), - field: Some(ProfileField::AvatarUrl), + user_id: &body.user_id, + field: Some(&ProfileField::AvatarUrl), }, ) .await?; @@ -288,7 +290,7 @@ pub async fn get_avatar_url_route( /// /// - If user is on another server: Fetches profile over federation pub async fn get_profile_route( - body: Ruma, + body: Ruma, ) -> Result { if body.user_id.server_name() != services().globals.server_name() { let response = services() @@ -296,7 +298,7 @@ pub async fn get_profile_route( .send_federation_request( body.user_id.server_name(), federation::query::get_profile_information::v1::Request { - user_id: body.user_id.clone(), + user_id: &body.user_id, field: None, }, ) diff --git a/src/api/client_server/push.rs b/src/api/client_server/push.rs index 72768662..dc936a6c 100644 --- a/src/api/client_server/push.rs +++ b/src/api/client_server/push.rs @@ -5,11 +5,11 @@ use ruma::{ push::{ delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, get_pushrule_enabled, get_pushrules_all, set_pusher, set_pushrule, set_pushrule_actions, - set_pushrule_enabled, RuleScope, + set_pushrule_enabled, RuleKind, }, }, events::{push_rules::PushRulesEvent, GlobalAccountDataEventType}, - push::{InsertPushRuleError, RemovePushRuleError}, + push::{ConditionalPushRuleInit, PatternedPushRuleInit, SimplePushRuleInit}, }; /// # `GET /_matrix/client/r0/pushrules` @@ -45,7 +45,7 @@ pub async fn get_pushrules_all_route( /// /// Retrieves a single specified push rule for this user. pub async fn get_pushrule_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -65,10 +65,30 @@ pub async fn get_pushrule_route( .map_err(|_| Error::bad_database("Invalid account data event in db."))? .content; - let rule = account_data - .global - .get(body.kind.clone(), &body.rule_id) - .map(Into::into); + let global = account_data.global; + let rule = match body.kind { + RuleKind::Override => global + .override_ + .get(body.rule_id.as_str()) + .map(|rule| rule.clone().into()), + RuleKind::Underride => global + .underride + .get(body.rule_id.as_str()) + .map(|rule| rule.clone().into()), + RuleKind::Sender => global + .sender + .get(body.rule_id.as_str()) + .map(|rule| rule.clone().into()), + RuleKind::Room => global + .room + .get(body.rule_id.as_str()) + .map(|rule| rule.clone().into()), + RuleKind::Content => global + .content + .get(body.rule_id.as_str()) + .map(|rule| rule.clone().into()), + _ => None, + }; if let Some(rule) = rule { Ok(get_pushrule::v3::Response { rule }) @@ -84,12 +104,12 @@ pub async fn get_pushrule_route( /// /// Creates a single specified push rule for this user. pub async fn set_pushrule_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let body = body.body; - if body.scope != RuleScope::Global { + if body.scope != "global" { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Scopes other than 'global' are not supported.", @@ -111,36 +131,67 @@ pub async fn set_pushrule_route( let mut account_data = serde_json::from_str::(event.get()) .map_err(|_| Error::bad_database("Invalid account data event in db."))?; - if let Err(error) = account_data.content.global.insert( - body.rule.clone(), - body.after.as_deref(), - body.before.as_deref(), - ) { - let err = match error { - InsertPushRuleError::ServerDefaultRuleId => Error::BadRequest( - ErrorKind::InvalidParam, - "Rule IDs starting with a dot are reserved for server-default rules.", - ), - InsertPushRuleError::InvalidRuleId => Error::BadRequest( - ErrorKind::InvalidParam, - "Rule ID containing invalid characters.", - ), - InsertPushRuleError::RelativeToServerDefaultRule => Error::BadRequest( - ErrorKind::InvalidParam, - "Can't place a push rule relatively to a server-default rule.", - ), - InsertPushRuleError::UnknownRuleId => Error::BadRequest( - ErrorKind::NotFound, - "The before or after rule could not be found.", - ), - InsertPushRuleError::BeforeHigherThanAfter => Error::BadRequest( - ErrorKind::InvalidParam, - "The before rule has a higher priority than the after rule.", - ), - _ => Error::BadRequest(ErrorKind::InvalidParam, "Invalid data."), - }; - - return Err(err); + let global = &mut account_data.content.global; + match body.kind { + RuleKind::Override => { + global.override_.replace( + ConditionalPushRuleInit { + actions: body.actions, + default: false, + enabled: true, + rule_id: body.rule_id, + conditions: body.conditions, + } + .into(), + ); + } + RuleKind::Underride => { + global.underride.replace( + ConditionalPushRuleInit { + actions: body.actions, + default: false, + enabled: true, + rule_id: body.rule_id, + conditions: body.conditions, + } + .into(), + ); + } + RuleKind::Sender => { + global.sender.replace( + SimplePushRuleInit { + actions: body.actions, + default: false, + enabled: true, + rule_id: body.rule_id, + } + .into(), + ); + } + RuleKind::Room => { + global.room.replace( + SimplePushRuleInit { + actions: body.actions, + default: false, + enabled: true, + rule_id: body.rule_id, + } + .into(), + ); + } + RuleKind::Content => { + global.content.replace( + PatternedPushRuleInit { + actions: body.actions, + default: false, + enabled: true, + rule_id: body.rule_id, + pattern: body.pattern.unwrap_or_default(), + } + .into(), + ); + } + _ => {} } services().account_data.update( @@ -157,11 +208,11 @@ pub async fn set_pushrule_route( /// /// Gets the actions of a single specified push rule for this user. pub async fn get_pushrule_actions_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if body.scope != RuleScope::Global { + if body.scope != "global" { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Scopes other than 'global' are not supported.", @@ -185,26 +236,44 @@ pub async fn get_pushrule_actions_route( .content; let global = account_data.global; - let actions = global - .get(body.kind.clone(), &body.rule_id) - .map(|rule| rule.actions().to_owned()) - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Push rule not found.", - ))?; + let actions = match body.kind { + RuleKind::Override => global + .override_ + .get(body.rule_id.as_str()) + .map(|rule| rule.actions.clone()), + RuleKind::Underride => global + .underride + .get(body.rule_id.as_str()) + .map(|rule| rule.actions.clone()), + RuleKind::Sender => global + .sender + .get(body.rule_id.as_str()) + .map(|rule| rule.actions.clone()), + RuleKind::Room => global + .room + .get(body.rule_id.as_str()) + .map(|rule| rule.actions.clone()), + RuleKind::Content => global + .content + .get(body.rule_id.as_str()) + .map(|rule| rule.actions.clone()), + _ => None, + }; - Ok(get_pushrule_actions::v3::Response { actions }) + Ok(get_pushrule_actions::v3::Response { + actions: actions.unwrap_or_default(), + }) } /// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions` /// /// Sets the actions of a single specified push rule for this user. pub async fn set_pushrule_actions_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if body.scope != RuleScope::Global { + if body.scope != "global" { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Scopes other than 'global' are not supported.", @@ -226,17 +295,40 @@ pub async fn set_pushrule_actions_route( let mut account_data = serde_json::from_str::(event.get()) .map_err(|_| Error::bad_database("Invalid account data event in db."))?; - if account_data - .content - .global - .set_actions(body.kind.clone(), &body.rule_id, body.actions.clone()) - .is_err() - { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Push rule not found.", - )); - } + let global = &mut account_data.content.global; + match body.kind { + RuleKind::Override => { + if let Some(mut rule) = global.override_.get(body.rule_id.as_str()).cloned() { + rule.actions = body.actions.clone(); + global.override_.replace(rule); + } + } + RuleKind::Underride => { + if let Some(mut rule) = global.underride.get(body.rule_id.as_str()).cloned() { + rule.actions = body.actions.clone(); + global.underride.replace(rule); + } + } + RuleKind::Sender => { + if let Some(mut rule) = global.sender.get(body.rule_id.as_str()).cloned() { + rule.actions = body.actions.clone(); + global.sender.replace(rule); + } + } + RuleKind::Room => { + if let Some(mut rule) = global.room.get(body.rule_id.as_str()).cloned() { + rule.actions = body.actions.clone(); + global.room.replace(rule); + } + } + RuleKind::Content => { + if let Some(mut rule) = global.content.get(body.rule_id.as_str()).cloned() { + rule.actions = body.actions.clone(); + global.content.replace(rule); + } + } + _ => {} + }; services().account_data.update( None, @@ -252,11 +344,11 @@ pub async fn set_pushrule_actions_route( /// /// Gets the enabled status of a single specified push rule for this user. pub async fn get_pushrule_enabled_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if body.scope != RuleScope::Global { + if body.scope != "global" { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Scopes other than 'global' are not supported.", @@ -279,13 +371,34 @@ pub async fn get_pushrule_enabled_route( .map_err(|_| Error::bad_database("Invalid account data event in db."))?; let global = account_data.content.global; - let enabled = global - .get(body.kind.clone(), &body.rule_id) - .map(|r| r.enabled()) - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Push rule not found.", - ))?; + let enabled = match body.kind { + RuleKind::Override => global + .override_ + .iter() + .find(|rule| rule.rule_id == body.rule_id) + .map_or(false, |rule| rule.enabled), + RuleKind::Underride => global + .underride + .iter() + .find(|rule| rule.rule_id == body.rule_id) + .map_or(false, |rule| rule.enabled), + RuleKind::Sender => global + .sender + .iter() + .find(|rule| rule.rule_id == body.rule_id) + .map_or(false, |rule| rule.enabled), + RuleKind::Room => global + .room + .iter() + .find(|rule| rule.rule_id == body.rule_id) + .map_or(false, |rule| rule.enabled), + RuleKind::Content => global + .content + .iter() + .find(|rule| rule.rule_id == body.rule_id) + .map_or(false, |rule| rule.enabled), + _ => false, + }; Ok(get_pushrule_enabled::v3::Response { enabled }) } @@ -294,11 +407,11 @@ pub async fn get_pushrule_enabled_route( /// /// Sets the enabled status of a single specified push rule for this user. pub async fn set_pushrule_enabled_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if body.scope != RuleScope::Global { + if body.scope != "global" { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Scopes other than 'global' are not supported.", @@ -320,16 +433,44 @@ pub async fn set_pushrule_enabled_route( let mut account_data = serde_json::from_str::(event.get()) .map_err(|_| Error::bad_database("Invalid account data event in db."))?; - if account_data - .content - .global - .set_enabled(body.kind.clone(), &body.rule_id, body.enabled) - .is_err() - { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Push rule not found.", - )); + let global = &mut account_data.content.global; + match body.kind { + RuleKind::Override => { + if let Some(mut rule) = global.override_.get(body.rule_id.as_str()).cloned() { + global.override_.remove(&rule); + rule.enabled = body.enabled; + global.override_.insert(rule); + } + } + RuleKind::Underride => { + if let Some(mut rule) = global.underride.get(body.rule_id.as_str()).cloned() { + global.underride.remove(&rule); + rule.enabled = body.enabled; + global.underride.insert(rule); + } + } + RuleKind::Sender => { + if let Some(mut rule) = global.sender.get(body.rule_id.as_str()).cloned() { + global.sender.remove(&rule); + rule.enabled = body.enabled; + global.sender.insert(rule); + } + } + RuleKind::Room => { + if let Some(mut rule) = global.room.get(body.rule_id.as_str()).cloned() { + global.room.remove(&rule); + rule.enabled = body.enabled; + global.room.insert(rule); + } + } + RuleKind::Content => { + if let Some(mut rule) = global.content.get(body.rule_id.as_str()).cloned() { + global.content.remove(&rule); + rule.enabled = body.enabled; + global.content.insert(rule); + } + } + _ => {} } services().account_data.update( @@ -346,11 +487,11 @@ pub async fn set_pushrule_enabled_route( /// /// Deletes a single specified push rule for this user. pub async fn delete_pushrule_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if body.scope != RuleScope::Global { + if body.scope != "global" { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Scopes other than 'global' are not supported.", @@ -372,23 +513,34 @@ pub async fn delete_pushrule_route( let mut account_data = serde_json::from_str::(event.get()) .map_err(|_| Error::bad_database("Invalid account data event in db."))?; - if let Err(error) = account_data - .content - .global - .remove(body.kind.clone(), &body.rule_id) - { - let err = match error { - RemovePushRuleError::ServerDefault => Error::BadRequest( - ErrorKind::InvalidParam, - "Cannot delete a server-default pushrule.", - ), - RemovePushRuleError::NotFound => { - Error::BadRequest(ErrorKind::NotFound, "Push rule not found.") + let global = &mut account_data.content.global; + match body.kind { + RuleKind::Override => { + if let Some(rule) = global.override_.get(body.rule_id.as_str()).cloned() { + global.override_.remove(&rule); } - _ => Error::BadRequest(ErrorKind::InvalidParam, "Invalid data."), - }; - - return Err(err); + } + RuleKind::Underride => { + if let Some(rule) = global.underride.get(body.rule_id.as_str()).cloned() { + global.underride.remove(&rule); + } + } + RuleKind::Sender => { + if let Some(rule) = global.sender.get(body.rule_id.as_str()).cloned() { + global.sender.remove(&rule); + } + } + RuleKind::Room => { + if let Some(rule) = global.room.get(body.rule_id.as_str()).cloned() { + global.room.remove(&rule); + } + } + RuleKind::Content => { + if let Some(rule) = global.content.get(body.rule_id.as_str()).cloned() { + global.content.remove(&rule); + } + } + _ => {} } services().account_data.update( diff --git a/src/api/client_server/read_marker.rs b/src/api/client_server/read_marker.rs index a5553d25..d529c6a8 100644 --- a/src/api/client_server/read_marker.rs +++ b/src/api/client_server/read_marker.rs @@ -1,4 +1,4 @@ -use crate::{service::rooms::timeline::PduCount, services, Error, Result, Ruma}; +use crate::{services, Error, Result, Ruma}; use ruma::{ api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt}, events::{ @@ -16,7 +16,7 @@ use std::collections::BTreeMap; /// - Updates fully-read account data event to `fully_read` /// - If `read_receipt` is set: Update private marker and public read receipt EDU pub async fn set_read_marker_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -42,28 +42,18 @@ pub async fn set_read_marker_route( } if let Some(event) = &body.private_read_receipt { - let count = services() - .rooms - .timeline - .get_pdu_count(event)? - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Event does not exist.", - ))?; - let count = match count { - PduCount::Backfilled(_) => { - return Err(Error::BadRequest( + services().rooms.edus.read_receipt.private_read_set( + &body.room_id, + sender_user, + services() + .rooms + .timeline + .get_pdu_count(event)? + .ok_or(Error::BadRequest( ErrorKind::InvalidParam, - "Read receipt is in backfilled timeline", - )) - } - PduCount::Normal(c) => c, - }; - services() - .rooms - .edus - .read_receipt - .private_read_set(&body.room_id, sender_user, count)?; + "Event does not exist.", + ))?, + )?; } if let Some(event) = &body.read_receipt { @@ -99,7 +89,7 @@ pub async fn set_read_marker_route( /// /// Sets private read marker and public read receipt EDU. pub async fn create_receipt_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -152,27 +142,17 @@ pub async fn create_receipt_route( )?; } create_receipt::v3::ReceiptType::ReadPrivate => { - let count = services() - .rooms - .timeline - .get_pdu_count(&body.event_id)? - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Event does not exist.", - ))?; - let count = match count { - PduCount::Backfilled(_) => { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Read receipt is in backfilled timeline", - )) - } - PduCount::Normal(c) => c, - }; services().rooms.edus.read_receipt.private_read_set( &body.room_id, sender_user, - count, + services() + .rooms + .timeline + .get_pdu_count(&body.event_id)? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event does not exist.", + ))?, )?; } _ => return Err(Error::bad_database("Unsupported receipt type")), diff --git a/src/api/client_server/redact.rs b/src/api/client_server/redact.rs index 21da2221..ab586c01 100644 --- a/src/api/client_server/redact.rs +++ b/src/api/client_server/redact.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use crate::{service::pdu::PduBuilder, services, Result, Ruma}; use ruma::{ api::client::redact::redact_event, - events::{room::redaction::RoomRedactionEventContent, TimelineEventType}, + events::{room::redaction::RoomRedactionEventContent, RoomEventType}, }; use serde_json::value::to_raw_value; @@ -14,7 +14,7 @@ use serde_json::value::to_raw_value; /// /// - TODO: Handle txn id pub async fn redact_event_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let body = body.body; @@ -32,9 +32,8 @@ pub async fn redact_event_route( let event_id = services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomRedaction, + event_type: RoomEventType::RoomRedaction, content: to_raw_value(&RoomRedactionEventContent { - redacts: Some(body.event_id.clone()), reason: body.reason.clone(), }) .expect("event is valid, we just created it"), diff --git a/src/api/client_server/relations.rs b/src/api/client_server/relations.rs deleted file mode 100644 index a7cea786..00000000 --- a/src/api/client_server/relations.rs +++ /dev/null @@ -1,146 +0,0 @@ -use ruma::api::client::relations::{ - get_relating_events, get_relating_events_with_rel_type, - get_relating_events_with_rel_type_and_event_type, -}; - -use crate::{service::rooms::timeline::PduCount, services, Result, Ruma}; - -/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}/{eventType}` -pub async fn get_relating_events_with_rel_type_and_event_type_route( - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - let from = match body.from.clone() { - Some(from) => PduCount::try_from_string(&from)?, - None => match ruma::api::Direction::Backward { - // TODO: fix ruma so `body.dir` exists - ruma::api::Direction::Forward => PduCount::min(), - ruma::api::Direction::Backward => PduCount::max(), - }, - }; - - let to = body - .to - .as_ref() - .and_then(|t| PduCount::try_from_string(&t).ok()); - - // Use limit or else 10, with maximum 100 - let limit = body - .limit - .and_then(|u| u32::try_from(u).ok()) - .map_or(10_usize, |u| u as usize) - .min(100); - - let res = services() - .rooms - .pdu_metadata - .paginate_relations_with_filter( - sender_user, - &body.room_id, - &body.event_id, - Some(body.event_type.clone()), - Some(body.rel_type.clone()), - from, - to, - limit, - )?; - - Ok( - get_relating_events_with_rel_type_and_event_type::v1::Response { - chunk: res.chunk, - next_batch: res.next_batch, - prev_batch: res.prev_batch, - }, - ) -} - -/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}` -pub async fn get_relating_events_with_rel_type_route( - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - let from = match body.from.clone() { - Some(from) => PduCount::try_from_string(&from)?, - None => match ruma::api::Direction::Backward { - // TODO: fix ruma so `body.dir` exists - ruma::api::Direction::Forward => PduCount::min(), - ruma::api::Direction::Backward => PduCount::max(), - }, - }; - - let to = body - .to - .as_ref() - .and_then(|t| PduCount::try_from_string(&t).ok()); - - // Use limit or else 10, with maximum 100 - let limit = body - .limit - .and_then(|u| u32::try_from(u).ok()) - .map_or(10_usize, |u| u as usize) - .min(100); - - let res = services() - .rooms - .pdu_metadata - .paginate_relations_with_filter( - sender_user, - &body.room_id, - &body.event_id, - None, - Some(body.rel_type.clone()), - from, - to, - limit, - )?; - - Ok(get_relating_events_with_rel_type::v1::Response { - chunk: res.chunk, - next_batch: res.next_batch, - prev_batch: res.prev_batch, - }) -} - -/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}` -pub async fn get_relating_events_route( - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - let from = match body.from.clone() { - Some(from) => PduCount::try_from_string(&from)?, - None => match ruma::api::Direction::Backward { - // TODO: fix ruma so `body.dir` exists - ruma::api::Direction::Forward => PduCount::min(), - ruma::api::Direction::Backward => PduCount::max(), - }, - }; - - let to = body - .to - .as_ref() - .and_then(|t| PduCount::try_from_string(&t).ok()); - - // Use limit or else 10, with maximum 100 - let limit = body - .limit - .and_then(|u| u32::try_from(u).ok()) - .map_or(10_usize, |u| u as usize) - .min(100); - - services() - .rooms - .pdu_metadata - .paginate_relations_with_filter( - sender_user, - &body.room_id, - &body.event_id, - None, - None, - from, - to, - limit, - ) -} diff --git a/src/api/client_server/report.rs b/src/api/client_server/report.rs index ab5027cd..e45820e8 100644 --- a/src/api/client_server/report.rs +++ b/src/api/client_server/report.rs @@ -10,7 +10,7 @@ use ruma::{ /// Reports an inappropriate event to homeserver admins /// pub async fn report_event_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index 0e2d9326..097f0e14 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -19,7 +19,7 @@ use ruma::{ tombstone::RoomTombstoneEventContent, topic::RoomTopicEventContent, }, - StateEventType, TimelineEventType, + RoomEventType, StateEventType, }, int, serde::JsonObject, @@ -46,7 +46,7 @@ use tracing::{info, warn}; /// - Send events implied by `name` and `topic` /// - Send invite events pub async fn create_room_route( - body: Ruma, + body: Ruma, ) -> Result { use create_room::v3::RoomPreset; @@ -142,9 +142,8 @@ pub async fn create_room_route( content } None => { - // TODO: Add correct value for v11 let mut content = serde_json::from_str::( - to_raw_value(&RoomCreateEventContent::new_v1(sender_user.clone())) + to_raw_value(&RoomCreateEventContent::new(sender_user.clone())) .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid creation content"))? .get(), ) @@ -176,7 +175,7 @@ pub async fn create_room_route( // 1. The room create event services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomCreate, + event_type: RoomEventType::RoomCreate, content: to_raw_value(&content).expect("event is valid, we just created it"), unsigned: None, state_key: Some("".to_owned()), @@ -190,7 +189,7 @@ pub async fn create_room_route( // 2. Let the room creator join services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Join, displayname: services().users.displayname(sender_user)?, @@ -248,7 +247,7 @@ pub async fn create_room_route( services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomPowerLevels, + event_type: RoomEventType::RoomPowerLevels, content: to_raw_value(&power_levels_content) .expect("to_raw_value always works on serde_json::Value"), unsigned: None, @@ -264,7 +263,7 @@ pub async fn create_room_route( if let Some(room_alias_id) = &alias { services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomCanonicalAlias, + event_type: RoomEventType::RoomCanonicalAlias, content: to_raw_value(&RoomCanonicalAliasEventContent { alias: Some(room_alias_id.to_owned()), alt_aliases: vec![], @@ -285,7 +284,7 @@ pub async fn create_room_route( // 5.1 Join Rules services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomJoinRules, + event_type: RoomEventType::RoomJoinRules, content: to_raw_value(&RoomJoinRulesEventContent::new(match preset { RoomPreset::PublicChat => JoinRule::Public, // according to spec "invite" is the default @@ -304,7 +303,7 @@ pub async fn create_room_route( // 5.2 History Visibility services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomHistoryVisibility, + event_type: RoomEventType::RoomHistoryVisibility, content: to_raw_value(&RoomHistoryVisibilityEventContent::new( HistoryVisibility::Shared, )) @@ -321,7 +320,7 @@ pub async fn create_room_route( // 5.3 Guest Access services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomGuestAccess, + event_type: RoomEventType::RoomGuestAccess, content: to_raw_value(&RoomGuestAccessEventContent::new(match preset { RoomPreset::PublicChat => GuestAccess::Forbidden, _ => GuestAccess::CanJoin, @@ -347,7 +346,7 @@ pub async fn create_room_route( pdu_builder.state_key.get_or_insert_with(|| "".to_owned()); // Silently skip encryption events if they are not allowed - if pdu_builder.event_type == TimelineEventType::RoomEncryption + if pdu_builder.event_type == RoomEventType::RoomEncryption && !services().globals.allow_encryption() { continue; @@ -365,7 +364,7 @@ pub async fn create_room_route( if let Some(name) = &body.name { services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomName, + event_type: RoomEventType::RoomName, content: to_raw_value(&RoomNameEventContent::new(Some(name.clone()))) .expect("event is valid, we just created it"), unsigned: None, @@ -381,7 +380,7 @@ pub async fn create_room_route( if let Some(topic) = &body.topic { services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomTopic, + event_type: RoomEventType::RoomTopic, content: to_raw_value(&RoomTopicEventContent { topic: topic.clone(), }) @@ -399,7 +398,7 @@ pub async fn create_room_route( // 8. Events implied by invite (and TODO: invite_3pid) drop(state_lock); for user_id in &body.invite { - let _ = invite_helper(sender_user, user_id, &room_id, None, body.is_direct).await; + let _ = invite_helper(sender_user, user_id, &room_id, body.is_direct).await; } // Homeserver specific stuff @@ -422,35 +421,28 @@ pub async fn create_room_route( /// /// - You have to currently be joined to the room (TODO: Respect history visibility) pub async fn get_room_event_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event = services() + if !services() .rooms - .timeline - .get_pdu(&body.event_id)? - .ok_or_else(|| { - warn!("Event not found, event ID: {:?}", &body.event_id); - Error::BadRequest(ErrorKind::NotFound, "Event not found.") - })?; - - if !services().rooms.state_accessor.user_can_see_event( - sender_user, - &event.room_id, - &body.event_id, - )? { + .state_cache + .is_joined(sender_user, &body.room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, - "You don't have permission to view this event.", + "You don't have permission to view this room.", )); } - let mut event = (*event).clone(); - event.add_age()?; - Ok(get_room_event::v3::Response { - event: event.to_room_event(), + event: services() + .rooms + .timeline + .get_pdu(&body.event_id)? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))? + .to_room_event(), }) } @@ -460,7 +452,7 @@ pub async fn get_room_event_route( /// /// - Only users joined to the room are allowed to call this TODO: Allow any user to call it if history_visibility is world readable pub async fn get_room_aliases_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -496,7 +488,7 @@ pub async fn get_room_aliases_route( /// - Moves local aliases /// - Modifies old room power levels to prevent users from speaking pub async fn upgrade_room_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -533,7 +525,7 @@ pub async fn upgrade_room_route( // Fail if the sender does not have the required permissions let tombstone_event_id = services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomTombstone, + event_type: RoomEventType::RoomTombstone, content: to_raw_value(&RoomTombstoneEventContent { body: "This room has been replaced".to_owned(), replacement_room: replacement_room.clone(), @@ -615,7 +607,7 @@ pub async fn upgrade_room_route( services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomCreate, + event_type: RoomEventType::RoomCreate, content: to_raw_value(&create_event_content) .expect("event is valid, we just created it"), unsigned: None, @@ -630,7 +622,7 @@ pub async fn upgrade_room_route( // Join the new room services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Join, displayname: services().users.displayname(sender_user)?, @@ -723,7 +715,7 @@ pub async fn upgrade_room_route( // Modify the power levels in the old room to prevent sending of events and inviting new users let _ = services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomPowerLevels, + event_type: RoomEventType::RoomPowerLevels, content: to_raw_value(&power_levels_event_content) .expect("event is valid, we just created it"), unsigned: None, diff --git a/src/api/client_server/search.rs b/src/api/client_server/search.rs index e9fac365..5b634a44 100644 --- a/src/api/client_server/search.rs +++ b/src/api/client_server/search.rs @@ -15,7 +15,7 @@ use std::collections::BTreeMap; /// /// - Only works if the user is currently joined to the room (TODO: Respect history visibility) pub async fn search_events_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -31,8 +31,7 @@ pub async fn search_events_route( .collect() }); - // Use limit or else 10, with maximum 100 - let limit = filter.limit.map_or(10, u64::from).min(100) as usize; + let limit = filter.limit.map_or(10, |l| u64::from(l) as usize); let mut searches = Vec::new(); @@ -82,21 +81,6 @@ pub async fn search_events_route( let results: Vec<_> = results .iter() - .filter_map(|result| { - services() - .rooms - .timeline - .get_pdu_from_id(result) - .ok()? - .filter(|pdu| { - services() - .rooms - .state_accessor - .user_can_see_event(sender_user, &pdu.room_id, &pdu.event_id) - .unwrap_or(false) - }) - .map(|pdu| pdu.to_room_event()) - }) .map(|result| { Ok::<_, Error>(SearchResult { context: EventContextResult { @@ -107,7 +91,11 @@ pub async fn search_events_route( start: None, }, rank: None, - result: Some(result), + result: services() + .rooms + .timeline + .get_pdu_from_id(result)? + .map(|pdu| pdu.to_room_event()), }) }) .filter_map(|r| r.ok()) diff --git a/src/api/client_server/session.rs b/src/api/client_server/session.rs index 5ce62af9..7c8c1288 100644 --- a/src/api/client_server/session.rs +++ b/src/api/client_server/session.rs @@ -4,12 +4,12 @@ use ruma::{ api::client::{ error::ErrorKind, session::{get_login_types, login, logout, logout_all}, - uiaa::UserIdentifier, + uiaa::IncomingUserIdentifier, }, UserId, }; use serde::Deserialize; -use tracing::{info, warn}; +use tracing::info; #[derive(Debug, Deserialize)] struct Claims { @@ -22,11 +22,10 @@ struct Claims { /// Get the supported login types of this server. One of these should be used as the `type` field /// when logging in. pub async fn get_login_types_route( - _body: Ruma, + _body: Ruma, ) -> Result { Ok(get_login_types::v3::Response::new(vec![ get_login_types::v3::LoginType::Password(Default::default()), - get_login_types::v3::LoginType::ApplicationService(Default::default()), ])) } @@ -41,18 +40,17 @@ pub async fn get_login_types_route( /// /// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see /// supported login types. -pub async fn login_route(body: Ruma) -> Result { +pub async fn login_route(body: Ruma) -> Result { // Validate login method // TODO: Other login methods let user_id = match &body.login_info { - login::v3::LoginInfo::Password(login::v3::Password { + login::v3::IncomingLoginInfo::Password(login::v3::IncomingPassword { identifier, password, }) => { - let username = if let UserIdentifier::UserIdOrLocalpart(user_id) = identifier { + let username = if let IncomingUserIdentifier::UserIdOrLocalpart(user_id) = identifier { user_id.to_lowercase() } else { - warn!("Bad login type: {:?}", &body.login_info); return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); }; let user_id = @@ -86,7 +84,7 @@ pub async fn login_route(body: Ruma) -> Result { + login::v3::IncomingLoginInfo::Token(login::v3::IncomingToken { token }) => { if let Some(jwt_decoding_key) = services().globals.jwt_decoding_key() { let token = jsonwebtoken::decode::( token, @@ -105,27 +103,7 @@ pub async fn login_route(body: Ruma) -> Result { - if !body.from_appservice { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Forbidden login type.", - )); - }; - let username = if let UserIdentifier::UserIdOrLocalpart(user_id) = identifier { - user_id.to_lowercase() - } else { - return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); - }; - let user_id = - UserId::parse_with_server_name(username, services().globals.server_name()) - .map_err(|_| { - Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") - })?; - user_id - } _ => { - warn!("Unsupported or unknown login type: {:?}", &body.login_info); return Err(Error::BadRequest( ErrorKind::Unknown, "Unsupported login type.", diff --git a/src/api/client_server/space.rs b/src/api/client_server/space.rs deleted file mode 100644 index e2ea8c34..00000000 --- a/src/api/client_server/space.rs +++ /dev/null @@ -1,34 +0,0 @@ -use crate::{services, Result, Ruma}; -use ruma::api::client::space::get_hierarchy; - -/// # `GET /_matrix/client/v1/rooms/{room_id}/hierarchy`` -/// -/// Paginates over the space tree in a depth-first manner to locate child rooms of a given space. -pub async fn get_hierarchy_route( - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - let skip = body - .from - .as_ref() - .and_then(|s| s.parse::().ok()) - .unwrap_or(0); - - let limit = body.limit.map_or(10, u64::from).min(100) as usize; - - let max_depth = body.max_depth.map_or(3, u64::from).min(10) as usize + 1; // +1 to skip the space room itself - - services() - .rooms - .spaces - .get_hierarchy( - sender_user, - &body.room_id, - limit, - skip, - max_depth, - body.suggested_only, - ) - .await -} diff --git a/src/api/client_server/state.rs b/src/api/client_server/state.rs index d6d39390..36466b8f 100644 --- a/src/api/client_server/state.rs +++ b/src/api/client_server/state.rs @@ -7,12 +7,15 @@ use ruma::{ state::{get_state_events, get_state_events_for_key, send_state_event}, }, events::{ - room::canonical_alias::RoomCanonicalAliasEventContent, AnyStateEventContent, StateEventType, + room::{ + canonical_alias::RoomCanonicalAliasEventContent, + history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + }, + AnyStateEventContent, StateEventType, }, serde::Raw, EventId, RoomId, UserId, }; -use tracing::log::warn; /// # `PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}/{stateKey}` /// @@ -22,7 +25,7 @@ use tracing::log::warn; /// - Tries to send the event into the room, auth rules will determine if it is allowed /// - If event is new canonical_alias: Rejects if alias is incorrect pub async fn send_state_event_for_key_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -47,7 +50,7 @@ pub async fn send_state_event_for_key_route( /// - Tries to send the event into the room, auth rules will determine if it is allowed /// - If event is new canonical_alias: Rejects if alias is incorrect pub async fn send_state_event_for_empty_key_route( - body: Ruma, + body: Ruma, ) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -78,14 +81,33 @@ pub async fn send_state_event_for_empty_key_route( /// /// - If not joined: Only works if current room history visibility is world readable pub async fn get_state_events_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + #[allow(clippy::blocks_in_if_conditions)] + // Users not in the room should not be able to access the state unless history_visibility is + // WorldReadable if !services() .rooms - .state_accessor - .user_can_see_state_events(&sender_user, &body.room_id)? + .state_cache + .is_joined(sender_user, &body.room_id)? + && !matches!( + services() + .rooms + .state_accessor + .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? + .map(|event| { + serde_json::from_str(event.content.get()) + .map(|e: RoomHistoryVisibilityEventContent| e.history_visibility) + .map_err(|_| { + Error::bad_database( + "Invalid room history visibility event in database.", + ) + }) + }), + Some(Ok(HistoryVisibility::WorldReadable)) + ) { return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -111,14 +133,33 @@ pub async fn get_state_events_route( /// /// - If not joined: Only works if current room history visibility is world readable pub async fn get_state_events_for_key_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + #[allow(clippy::blocks_in_if_conditions)] + // Users not in the room should not be able to access the state unless history_visibility is + // WorldReadable if !services() .rooms - .state_accessor - .user_can_see_state_events(&sender_user, &body.room_id)? + .state_cache + .is_joined(sender_user, &body.room_id)? + && !matches!( + services() + .rooms + .state_accessor + .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? + .map(|event| { + serde_json::from_str(event.content.get()) + .map(|e: RoomHistoryVisibilityEventContent| e.history_visibility) + .map_err(|_| { + Error::bad_database( + "Invalid room history visibility event in database.", + ) + }) + }), + Some(Ok(HistoryVisibility::WorldReadable)) + ) { return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -130,13 +171,10 @@ pub async fn get_state_events_for_key_route( .rooms .state_accessor .room_state_get(&body.room_id, &body.event_type, &body.state_key)? - .ok_or_else(|| { - warn!( - "State event {:?} not found in room {:?}", - &body.event_type, &body.room_id - ); - Error::BadRequest(ErrorKind::NotFound, "State event not found.") - })?; + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "State event not found.", + ))?; Ok(get_state_events_for_key::v3::Response { content: serde_json::from_str(event.content.get()) @@ -150,14 +188,33 @@ pub async fn get_state_events_for_key_route( /// /// - If not joined: Only works if current room history visibility is world readable pub async fn get_state_events_for_empty_key_route( - body: Ruma, + body: Ruma, ) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + #[allow(clippy::blocks_in_if_conditions)] + // Users not in the room should not be able to access the state unless history_visibility is + // WorldReadable if !services() .rooms - .state_accessor - .user_can_see_state_events(&sender_user, &body.room_id)? + .state_cache + .is_joined(sender_user, &body.room_id)? + && !matches!( + services() + .rooms + .state_accessor + .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? + .map(|event| { + serde_json::from_str(event.content.get()) + .map(|e: RoomHistoryVisibilityEventContent| e.history_visibility) + .map_err(|_| { + Error::bad_database( + "Invalid room history visibility event in database.", + ) + }) + }), + Some(Ok(HistoryVisibility::WorldReadable)) + ) { return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -169,13 +226,10 @@ pub async fn get_state_events_for_empty_key_route( .rooms .state_accessor .room_state_get(&body.room_id, &body.event_type, "")? - .ok_or_else(|| { - warn!( - "State event {:?} not found in room {:?}", - &body.event_type, &body.room_id - ); - Error::BadRequest(ErrorKind::NotFound, "State event not found.") - })?; + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "State event not found.", + ))?; Ok(get_state_events_for_key::v3::Response { content: serde_json::from_str(event.content.get()) diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index a275b066..03ef17a1 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -1,29 +1,19 @@ -use crate::{ - service::rooms::timeline::PduCount, services, Error, PduEvent, Result, Ruma, RumaResponse, -}; +use crate::{services, Error, Result, Ruma, RumaResponse}; use ruma::{ api::client::{ - filter::{FilterDefinition, LazyLoadOptions}, - sync::sync_events::{ - self, - v3::{ - Ephemeral, Filter, GlobalAccountData, InviteState, InvitedRoom, JoinedRoom, - LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, State, Timeline, ToDevice, - }, - v4::SlidingOp, - DeviceLists, UnreadNotificationsCount, - }, + filter::{IncomingFilterDefinition, LazyLoadOptions}, + sync::sync_events::{self, DeviceLists, UnreadNotificationsCount}, uiaa::UiaaResponse, }, events::{ room::member::{MembershipState, RoomMemberEventContent}, - StateEventType, TimelineEventType, + RoomEventType, StateEventType, }, serde::Raw, - uint, DeviceId, OwnedDeviceId, OwnedUserId, RoomId, UInt, UserId, + OwnedDeviceId, OwnedUserId, RoomId, UserId, }; use std::{ - collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet}, + collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, sync::Arc, time::Duration, }; @@ -65,7 +55,7 @@ use tracing::error; /// - Sync is handled in an async task, multiple requests from the same device with the same /// `since` will be cached pub async fn sync_events_route( - body: Ruma, + body: Ruma, ) -> Result> { let sender_user = body.sender_user.expect("user is authenticated"); let sender_device = body.sender_device.expect("user is authenticated"); @@ -134,7 +124,7 @@ pub async fn sync_events_route( async fn sync_helper_wrapper( sender_user: OwnedUserId, sender_device: OwnedDeviceId, - body: sync_events::v3::Request, + body: sync_events::v3::IncomingRequest, tx: Sender>>, ) { let since = body.since.clone(); @@ -167,24 +157,32 @@ async fn sync_helper_wrapper( async fn sync_helper( sender_user: OwnedUserId, sender_device: OwnedDeviceId, - body: sync_events::v3::Request, + body: sync_events::v3::IncomingRequest, // bool = caching allowed ) -> Result<(sync_events::v3::Response, bool), Error> { + use sync_events::v3::{ + Ephemeral, GlobalAccountData, IncomingFilter, InviteState, InvitedRoom, JoinedRoom, + LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, State, Timeline, ToDevice, + }; + // TODO: match body.set_presence { - services().rooms.edus.presence.ping_presence(&sender_user)?; + services() + .rooms + .edus + .presence + .ping_presence(&sender_user, false, true, true)?; // Setup watchers, so if there's no response, we can wait for them let watcher = services().globals.watch(&sender_user, &sender_device); let next_batch = services().globals.current_count()?; - let next_batchcount = PduCount::Normal(next_batch); let next_batch_string = next_batch.to_string(); // Load filter let filter = match body.filter { - None => FilterDefinition::default(), - Some(Filter::FilterDefinition(filter)) => filter, - Some(Filter::FilterId(filter_id)) => services() + None => IncomingFilterDefinition::default(), + Some(IncomingFilter::FilterDefinition(filter)) => filter, + Some(IncomingFilter::FilterId(filter_id)) => services() .users .get_filter(&sender_user, &filter_id)? .unwrap_or_default(), @@ -197,15 +195,12 @@ async fn sync_helper( _ => (false, false), }; - let full_state = body.full_state; - let mut joined_rooms = BTreeMap::new(); let since = body .since - .as_ref() + .clone() .and_then(|string| string.parse().ok()) .unwrap_or(0); - let sincecount = PduCount::Normal(since); let mut presence_updates = HashMap::new(); let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in @@ -227,60 +222,610 @@ async fn sync_helper( .collect::>(); for room_id in all_joined_rooms { let room_id = room_id?; - if let Ok(joined_room) = load_joined_room( + + { + // Get and drop the lock to wait for remaining operations to finish + // This will make sure the we have all events until next_batch + let mutex_insert = Arc::clone( + services() + .globals + .roomid_mutex_insert + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let insert_lock = mutex_insert.lock().unwrap(); + drop(insert_lock); + } + + let timeline_pdus; + let limited; + if services() + .rooms + .timeline + .last_timeline_count(&sender_user, &room_id)? + > since + { + let mut non_timeline_pdus = services() + .rooms + .timeline + .pdus_until(&sender_user, &room_id, u64::MAX)? + .filter_map(|r| { + // Filter out buggy events + if r.is_err() { + error!("Bad pdu in pdus_since: {:?}", r); + } + r.ok() + }) + .take_while(|(pduid, _)| { + services() + .rooms + .timeline + .pdu_count(pduid) + .map_or(false, |count| count > since) + }); + + // Take the last 10 events for the timeline + timeline_pdus = non_timeline_pdus + .by_ref() + .take(10) + .collect::>() + .into_iter() + .rev() + .collect::>(); + + // They /sync response doesn't always return all messages, so we say the output is + // limited unless there are events in non_timeline_pdus + limited = non_timeline_pdus.next().is_some(); + } else { + timeline_pdus = Vec::new(); + limited = false; + } + + let send_notification_counts = !timeline_pdus.is_empty() + || services() + .rooms + .user + .last_notification_read(&sender_user, &room_id)? + > since; + + let mut timeline_users = HashSet::new(); + for (_, event) in &timeline_pdus { + timeline_users.insert(event.sender.as_str().to_owned()); + } + + services().rooms.lazy_loading.lazy_load_confirm_delivery( &sender_user, &sender_device, &room_id, since, - sincecount, - next_batch, - next_batchcount, - lazy_load_enabled, - lazy_load_send_redundant, - full_state, - &mut device_list_updates, - &mut left_encrypted_users, - ) - .await - { - if !joined_room.is_empty() { - joined_rooms.insert(room_id.clone(), joined_room); + )?; + + // Database queries: + + let current_shortstatehash = + if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? { + s + } else { + error!("Room {} has no state", room_id); + continue; + }; + + let since_shortstatehash = services() + .rooms + .user + .get_token_shortstatehash(&room_id, since)?; + + // Calculates joined_member_count, invited_member_count and heroes + let calculate_counts = || { + let joined_member_count = services() + .rooms + .state_cache + .room_joined_count(&room_id)? + .unwrap_or(0); + let invited_member_count = services() + .rooms + .state_cache + .room_invited_count(&room_id)? + .unwrap_or(0); + + // Recalculate heroes (first 5 members) + let mut heroes = Vec::new(); + + if joined_member_count + invited_member_count <= 5 { + // Go through all PDUs and for each member event, check if the user is still joined or + // invited until we have 5 or we reach the end + + for hero in services() + .rooms + .timeline + .all_pdus(&sender_user, &room_id)? + .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus + .filter(|(_, pdu)| pdu.kind == RoomEventType::RoomMember) + .map(|(_, pdu)| { + let content: RoomMemberEventContent = + serde_json::from_str(pdu.content.get()).map_err(|_| { + Error::bad_database("Invalid member event in database.") + })?; + + if let Some(state_key) = &pdu.state_key { + let user_id = UserId::parse(state_key.clone()).map_err(|_| { + Error::bad_database("Invalid UserId in member PDU.") + })?; + + // The membership was and still is invite or join + if matches!( + content.membership, + MembershipState::Join | MembershipState::Invite + ) && (services().rooms.state_cache.is_joined(&user_id, &room_id)? + || services() + .rooms + .state_cache + .is_invited(&user_id, &room_id)?) + { + Ok::<_, Error>(Some(state_key.clone())) + } else { + Ok(None) + } + } else { + Ok(None) + } + }) + // Filter out buggy users + .filter_map(|u| u.ok()) + // Filter for possible heroes + .flatten() + { + if heroes.contains(&hero) || hero == sender_user.as_str() { + continue; + } + + heroes.push(hero); + } } - // Take presence updates from this room - for (user_id, presence) in services() - .rooms - .edus - .presence - .presence_since(&room_id, since)? - { - match presence_updates.entry(user_id) { - Entry::Vacant(v) => { - v.insert(presence); - } - Entry::Occupied(mut o) => { - let p = o.get_mut(); + Ok::<_, Error>(( + Some(joined_member_count), + Some(invited_member_count), + heroes, + )) + }; - // Update existing presence event with more info - p.content.presence = presence.content.presence; - if let Some(status_msg) = presence.content.status_msg { - p.content.status_msg = Some(status_msg); + let since_sender_member: Option = since_shortstatehash + .and_then(|shortstatehash| { + services() + .rooms + .state_accessor + .state_get( + shortstatehash, + &StateEventType::RoomMember, + sender_user.as_str(), + ) + .transpose() + }) + .transpose()? + .and_then(|pdu| { + serde_json::from_str(pdu.content.get()) + .map_err(|_| Error::bad_database("Invalid PDU in database.")) + .ok() + }); + + let joined_since_last_sync = + since_sender_member.map_or(true, |member| member.membership != MembershipState::Join); + + let ( + heroes, + joined_member_count, + invited_member_count, + joined_since_last_sync, + state_events, + ) = if since_shortstatehash.is_none() || joined_since_last_sync { + // Probably since = 0, we will do an initial sync + + let (joined_member_count, invited_member_count, heroes) = calculate_counts()?; + + let current_state_ids = services() + .rooms + .state_accessor + .state_full_ids(current_shortstatehash) + .await?; + + let mut state_events = Vec::new(); + let mut lazy_loaded = HashSet::new(); + + let mut i = 0; + for (shortstatekey, id) in current_state_ids { + let (event_type, state_key) = services() + .rooms + .short + .get_statekey_from_short(shortstatekey)?; + + if event_type != StateEventType::RoomMember { + let pdu = match services().rooms.timeline.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; } - if let Some(last_active_ago) = presence.content.last_active_ago { - p.content.last_active_ago = Some(last_active_ago); + }; + state_events.push(pdu); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } else if !lazy_load_enabled + || body.full_state + || timeline_users.contains(&state_key) + // TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565 + || *sender_user == state_key + { + let pdu = match services().rooms.timeline.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; } - if let Some(displayname) = presence.content.displayname { - p.content.displayname = Some(displayname); + }; + + // This check is in case a bad user ID made it into the database + if let Ok(uid) = UserId::parse(&state_key) { + lazy_loaded.insert(uid); + } + state_events.push(pdu); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } + } + + // Reset lazy loading because this is an initial sync + services().rooms.lazy_loading.lazy_load_reset( + &sender_user, + &sender_device, + &room_id, + )?; + + // The state_events above should contain all timeline_users, let's mark them as lazy + // loaded. + services().rooms.lazy_loading.lazy_load_mark_sent( + &sender_user, + &sender_device, + &room_id, + lazy_loaded, + next_batch, + ); + + ( + heroes, + joined_member_count, + invited_member_count, + true, + state_events, + ) + } else if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) { + // No state changes + (Vec::new(), None, None, false, Vec::new()) + } else { + // Incremental /sync + let since_shortstatehash = since_shortstatehash.unwrap(); + + let mut state_events = Vec::new(); + let mut lazy_loaded = HashSet::new(); + + if since_shortstatehash != current_shortstatehash { + let current_state_ids = services() + .rooms + .state_accessor + .state_full_ids(current_shortstatehash) + .await?; + let since_state_ids = services() + .rooms + .state_accessor + .state_full_ids(since_shortstatehash) + .await?; + + for (key, id) in current_state_ids { + if body.full_state || since_state_ids.get(&key) != Some(&id) { + let pdu = match services().rooms.timeline.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; + } + }; + + if pdu.kind == RoomEventType::RoomMember { + match UserId::parse( + pdu.state_key + .as_ref() + .expect("State event has state key") + .clone(), + ) { + Ok(state_key_userid) => { + lazy_loaded.insert(state_key_userid); + } + Err(e) => error!("Invalid state key for member event: {}", e), + } } - if let Some(avatar_url) = presence.content.avatar_url { - p.content.avatar_url = Some(avatar_url); + + state_events.push(pdu); + tokio::task::yield_now().await; + } + } + } + + for (_, event) in &timeline_pdus { + if lazy_loaded.contains(&event.sender) { + continue; + } + + if !services().rooms.lazy_loading.lazy_load_was_sent_before( + &sender_user, + &sender_device, + &room_id, + &event.sender, + )? || lazy_load_send_redundant + { + if let Some(member_event) = services().rooms.state_accessor.room_state_get( + &room_id, + &StateEventType::RoomMember, + event.sender.as_str(), + )? { + lazy_loaded.insert(event.sender.clone()); + state_events.push(member_event); + } + } + } + + services().rooms.lazy_loading.lazy_load_mark_sent( + &sender_user, + &sender_device, + &room_id, + lazy_loaded, + next_batch, + ); + + let encrypted_room = services() + .rooms + .state_accessor + .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")? + .is_some(); + + let since_encryption = services().rooms.state_accessor.state_get( + since_shortstatehash, + &StateEventType::RoomEncryption, + "", + )?; + + // Calculations: + let new_encrypted_room = encrypted_room && since_encryption.is_none(); + + let send_member_count = state_events + .iter() + .any(|event| event.kind == RoomEventType::RoomMember); + + if encrypted_room { + for state_event in &state_events { + if state_event.kind != RoomEventType::RoomMember { + continue; + } + + if let Some(state_key) = &state_event.state_key { + let user_id = UserId::parse(state_key.clone()) + .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; + + if user_id == sender_user { + continue; } - if let Some(currently_active) = presence.content.currently_active { - p.content.currently_active = Some(currently_active); + + let new_membership = serde_json::from_str::( + state_event.content.get(), + ) + .map_err(|_| Error::bad_database("Invalid PDU in database."))? + .membership; + + match new_membership { + MembershipState::Join => { + // A new user joined an encrypted room + if !share_encrypted_room(&sender_user, &user_id, &room_id)? { + device_list_updates.insert(user_id); + } + } + MembershipState::Leave => { + // Write down users that have left encrypted rooms we are in + left_encrypted_users.insert(user_id); + } + _ => {} } } } } + + if joined_since_last_sync && encrypted_room || new_encrypted_room { + // If the user is in a new encrypted room, give them all joined users + device_list_updates.extend( + services() + .rooms + .state_cache + .room_members(&room_id) + .flatten() + .filter(|user_id| { + // Don't send key updates from the sender to the sender + &sender_user != user_id + }) + .filter(|user_id| { + // Only send keys if the sender doesn't share an encrypted room with the target already + !share_encrypted_room(&sender_user, user_id, &room_id).unwrap_or(false) + }), + ); + } + + let (joined_member_count, invited_member_count, heroes) = if send_member_count { + calculate_counts()? + } else { + (None, None, Vec::new()) + }; + + ( + heroes, + joined_member_count, + invited_member_count, + joined_since_last_sync, + state_events, + ) + }; + + // Look for device list updates in this room + device_list_updates.extend( + services() + .users + .keys_changed(room_id.as_ref(), since, None) + .filter_map(|r| r.ok()), + ); + + let notification_count = if send_notification_counts { + Some( + services() + .rooms + .user + .notification_count(&sender_user, &room_id)? + .try_into() + .expect("notification count can't go that high"), + ) + } else { + None + }; + + let highlight_count = if send_notification_counts { + Some( + services() + .rooms + .user + .highlight_count(&sender_user, &room_id)? + .try_into() + .expect("highlight count can't go that high"), + ) + } else { + None + }; + + let prev_batch = timeline_pdus + .first() + .map_or(Ok::<_, Error>(None), |(pdu_id, _)| { + Ok(Some( + services().rooms.timeline.pdu_count(pdu_id)?.to_string(), + )) + })?; + + let room_events: Vec<_> = timeline_pdus + .iter() + .map(|(_, pdu)| pdu.to_sync_room_event()) + .collect(); + + let mut edus: Vec<_> = services() + .rooms + .edus + .read_receipt + .readreceipts_since(&room_id, since) + .filter_map(|r| r.ok()) // Filter out buggy events + .map(|(_, _, v)| v) + .collect(); + + if services().rooms.edus.typing.last_typing_update(&room_id)? > since { + edus.push( + serde_json::from_str( + &serde_json::to_string(&services().rooms.edus.typing.typings_all(&room_id)?) + .expect("event is valid, we just created it"), + ) + .expect("event is valid, we just created it"), + ); + } + + // Save the state after this sync so we can send the correct state diff next sync + services().rooms.user.associate_token_shortstatehash( + &room_id, + next_batch, + current_shortstatehash, + )?; + + let joined_room = JoinedRoom { + account_data: RoomAccountData { + events: services() + .account_data + .changes_since(Some(&room_id), &sender_user, since)? + .into_iter() + .filter_map(|(_, v)| { + serde_json::from_str(v.json().get()) + .map_err(|_| Error::bad_database("Invalid account event in database.")) + .ok() + }) + .collect(), + }, + summary: RoomSummary { + heroes, + joined_member_count: joined_member_count.map(|n| (n as u32).into()), + invited_member_count: invited_member_count.map(|n| (n as u32).into()), + }, + unread_notifications: UnreadNotificationsCount { + highlight_count, + notification_count, + }, + timeline: Timeline { + limited: limited || joined_since_last_sync, + prev_batch, + events: room_events, + }, + state: State { + events: state_events + .iter() + .map(|pdu| pdu.to_sync_state_event()) + .collect(), + }, + ephemeral: Ephemeral { events: edus }, + unread_thread_notifications: BTreeMap::new(), + }; + + if !joined_room.is_empty() { + joined_rooms.insert(room_id.clone(), joined_room); + } + + // Take presence updates from this room + for (user_id, presence) in services() + .rooms + .edus + .presence + .presence_since(&room_id, since)? + { + match presence_updates.entry(user_id) { + Entry::Vacant(v) => { + v.insert(presence); + } + Entry::Occupied(mut o) => { + let p = o.get_mut(); + + // Update existing presence event with more info + p.content.presence = presence.content.presence; + if let Some(status_msg) = presence.content.status_msg { + p.content.status_msg = Some(status_msg); + } + if let Some(last_active_ago) = presence.content.last_active_ago { + p.content.last_active_ago = Some(last_active_ago); + } + if let Some(displayname) = presence.content.displayname { + p.content.displayname = Some(displayname); + } + if let Some(avatar_url) = presence.content.avatar_url { + p.content.avatar_url = Some(avatar_url); + } + if let Some(currently_active) = presence.content.currently_active { + p.content.currently_active = Some(currently_active); + } + } + } } } @@ -332,7 +877,7 @@ async fn sync_helper( let since_state_ids = match since_shortstatehash { Some(s) => services().rooms.state_accessor.state_full_ids(s).await?, - None => HashMap::new(), + None => BTreeMap::new(), }; let left_event_id = match services().rooms.state_accessor.room_state_get_id( @@ -374,13 +919,13 @@ async fn sync_helper( let mut i = 0; for (key, id) in left_state_ids { - if full_state || since_state_ids.get(&key) != Some(&id) { + if body.full_state || since_state_ids.get(&key) != Some(&id) { let (event_type, state_key) = services().rooms.short.get_statekey_from_short(key)?; if !lazy_load_enabled || event_type != StateEventType::RoomMember - || full_state + || body.full_state // TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565 || *sender_user == state_key { @@ -463,7 +1008,7 @@ async fn sync_helper( } for user_id in left_encrypted_users { - let dont_share_encrypted_room = services() + let still_share_encrypted_room = services() .rooms .user .get_shared_rooms(vec![sender_user.clone(), user_id.clone()])? @@ -481,7 +1026,7 @@ async fn sync_helper( .all(|encrypted| !encrypted); // If the user doesn't share an encrypted room with the target anymore, we need to tell // them - if dont_share_encrypted_room { + if still_share_encrypted_room { device_list_left.insert(user_id); } } @@ -534,7 +1079,7 @@ async fn sync_helper( }; // TODO: Retry the endpoint instead of returning (waiting for #118) - if !full_state + if !body.full_state && response.rooms.is_empty() && response.presence.is_empty() && response.account_data.is_empty() @@ -554,597 +1099,6 @@ async fn sync_helper( } } -async fn load_joined_room( - sender_user: &UserId, - sender_device: &DeviceId, - room_id: &RoomId, - since: u64, - sincecount: PduCount, - next_batch: u64, - next_batchcount: PduCount, - lazy_load_enabled: bool, - lazy_load_send_redundant: bool, - full_state: bool, - device_list_updates: &mut HashSet, - left_encrypted_users: &mut HashSet, -) -> Result { - { - // Get and drop the lock to wait for remaining operations to finish - // This will make sure the we have all events until next_batch - let mutex_insert = Arc::clone( - services() - .globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - drop(insert_lock); - } - - let (timeline_pdus, limited) = load_timeline(sender_user, room_id, sincecount, 10)?; - - let send_notification_counts = !timeline_pdus.is_empty() - || services() - .rooms - .user - .last_notification_read(&sender_user, &room_id)? - > since; - - let mut timeline_users = HashSet::new(); - for (_, event) in &timeline_pdus { - timeline_users.insert(event.sender.as_str().to_owned()); - } - - services().rooms.lazy_loading.lazy_load_confirm_delivery( - &sender_user, - &sender_device, - &room_id, - sincecount, - )?; - - // Database queries: - - let current_shortstatehash = - if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? { - s - } else { - error!("Room {} has no state", room_id); - return Err(Error::BadDatabase("Room has no state")); - }; - - let since_shortstatehash = services() - .rooms - .user - .get_token_shortstatehash(&room_id, since)?; - - let (heroes, joined_member_count, invited_member_count, joined_since_last_sync, state_events) = - if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) { - // No state changes - (Vec::new(), None, None, false, Vec::new()) - } else { - // Calculates joined_member_count, invited_member_count and heroes - let calculate_counts = || { - let joined_member_count = services() - .rooms - .state_cache - .room_joined_count(&room_id)? - .unwrap_or(0); - let invited_member_count = services() - .rooms - .state_cache - .room_invited_count(&room_id)? - .unwrap_or(0); - - // Recalculate heroes (first 5 members) - let mut heroes = Vec::new(); - - if joined_member_count + invited_member_count <= 5 { - // Go through all PDUs and for each member event, check if the user is still joined or - // invited until we have 5 or we reach the end - - for hero in services() - .rooms - .timeline - .all_pdus(&sender_user, &room_id)? - .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus - .filter(|(_, pdu)| pdu.kind == TimelineEventType::RoomMember) - .map(|(_, pdu)| { - let content: RoomMemberEventContent = - serde_json::from_str(pdu.content.get()).map_err(|_| { - Error::bad_database("Invalid member event in database.") - })?; - - if let Some(state_key) = &pdu.state_key { - let user_id = UserId::parse(state_key.clone()).map_err(|_| { - Error::bad_database("Invalid UserId in member PDU.") - })?; - - // The membership was and still is invite or join - if matches!( - content.membership, - MembershipState::Join | MembershipState::Invite - ) && (services() - .rooms - .state_cache - .is_joined(&user_id, &room_id)? - || services() - .rooms - .state_cache - .is_invited(&user_id, &room_id)?) - { - Ok::<_, Error>(Some(state_key.clone())) - } else { - Ok(None) - } - } else { - Ok(None) - } - }) - // Filter out buggy users - .filter_map(|u| u.ok()) - // Filter for possible heroes - .flatten() - { - if heroes.contains(&hero) || hero == sender_user.as_str() { - continue; - } - - heroes.push(hero); - } - } - - Ok::<_, Error>(( - Some(joined_member_count), - Some(invited_member_count), - heroes, - )) - }; - - let since_sender_member: Option = since_shortstatehash - .and_then(|shortstatehash| { - services() - .rooms - .state_accessor - .state_get( - shortstatehash, - &StateEventType::RoomMember, - sender_user.as_str(), - ) - .transpose() - }) - .transpose()? - .and_then(|pdu| { - serde_json::from_str(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid PDU in database.")) - .ok() - }); - - let joined_since_last_sync = since_sender_member - .map_or(true, |member| member.membership != MembershipState::Join); - - if since_shortstatehash.is_none() || joined_since_last_sync { - // Probably since = 0, we will do an initial sync - - let (joined_member_count, invited_member_count, heroes) = calculate_counts()?; - - let current_state_ids = services() - .rooms - .state_accessor - .state_full_ids(current_shortstatehash) - .await?; - - let mut state_events = Vec::new(); - let mut lazy_loaded = HashSet::new(); - - let mut i = 0; - for (shortstatekey, id) in current_state_ids { - let (event_type, state_key) = services() - .rooms - .short - .get_statekey_from_short(shortstatekey)?; - - if event_type != StateEventType::RoomMember { - let pdu = match services().rooms.timeline.get_pdu(&id)? { - Some(pdu) => pdu, - None => { - error!("Pdu in state not found: {}", id); - continue; - } - }; - state_events.push(pdu); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } else if !lazy_load_enabled - || full_state - || timeline_users.contains(&state_key) - // TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565 - || *sender_user == state_key - { - let pdu = match services().rooms.timeline.get_pdu(&id)? { - Some(pdu) => pdu, - None => { - error!("Pdu in state not found: {}", id); - continue; - } - }; - - // This check is in case a bad user ID made it into the database - if let Ok(uid) = UserId::parse(&state_key) { - lazy_loaded.insert(uid); - } - state_events.push(pdu); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - } - - // Reset lazy loading because this is an initial sync - services().rooms.lazy_loading.lazy_load_reset( - &sender_user, - &sender_device, - &room_id, - )?; - - // The state_events above should contain all timeline_users, let's mark them as lazy - // loaded. - services().rooms.lazy_loading.lazy_load_mark_sent( - &sender_user, - &sender_device, - &room_id, - lazy_loaded, - next_batchcount, - ); - - ( - heroes, - joined_member_count, - invited_member_count, - true, - state_events, - ) - } else { - // Incremental /sync - let since_shortstatehash = since_shortstatehash.unwrap(); - - let mut state_events = Vec::new(); - let mut lazy_loaded = HashSet::new(); - - if since_shortstatehash != current_shortstatehash { - let current_state_ids = services() - .rooms - .state_accessor - .state_full_ids(current_shortstatehash) - .await?; - let since_state_ids = services() - .rooms - .state_accessor - .state_full_ids(since_shortstatehash) - .await?; - - for (key, id) in current_state_ids { - if full_state || since_state_ids.get(&key) != Some(&id) { - let pdu = match services().rooms.timeline.get_pdu(&id)? { - Some(pdu) => pdu, - None => { - error!("Pdu in state not found: {}", id); - continue; - } - }; - - if pdu.kind == TimelineEventType::RoomMember { - match UserId::parse( - pdu.state_key - .as_ref() - .expect("State event has state key") - .clone(), - ) { - Ok(state_key_userid) => { - lazy_loaded.insert(state_key_userid); - } - Err(e) => error!("Invalid state key for member event: {}", e), - } - } - - state_events.push(pdu); - tokio::task::yield_now().await; - } - } - } - - for (_, event) in &timeline_pdus { - if lazy_loaded.contains(&event.sender) { - continue; - } - - if !services().rooms.lazy_loading.lazy_load_was_sent_before( - &sender_user, - &sender_device, - &room_id, - &event.sender, - )? || lazy_load_send_redundant - { - if let Some(member_event) = services().rooms.state_accessor.room_state_get( - &room_id, - &StateEventType::RoomMember, - event.sender.as_str(), - )? { - lazy_loaded.insert(event.sender.clone()); - state_events.push(member_event); - } - } - } - - services().rooms.lazy_loading.lazy_load_mark_sent( - &sender_user, - &sender_device, - &room_id, - lazy_loaded, - next_batchcount, - ); - - let encrypted_room = services() - .rooms - .state_accessor - .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")? - .is_some(); - - let since_encryption = services().rooms.state_accessor.state_get( - since_shortstatehash, - &StateEventType::RoomEncryption, - "", - )?; - - // Calculations: - let new_encrypted_room = encrypted_room && since_encryption.is_none(); - - let send_member_count = state_events - .iter() - .any(|event| event.kind == TimelineEventType::RoomMember); - - if encrypted_room { - for state_event in &state_events { - if state_event.kind != TimelineEventType::RoomMember { - continue; - } - - if let Some(state_key) = &state_event.state_key { - let user_id = UserId::parse(state_key.clone()).map_err(|_| { - Error::bad_database("Invalid UserId in member PDU.") - })?; - - if user_id == sender_user { - continue; - } - - let new_membership = serde_json::from_str::( - state_event.content.get(), - ) - .map_err(|_| Error::bad_database("Invalid PDU in database."))? - .membership; - - match new_membership { - MembershipState::Join => { - // A new user joined an encrypted room - if !share_encrypted_room(&sender_user, &user_id, &room_id)? { - device_list_updates.insert(user_id); - } - } - MembershipState::Leave => { - // Write down users that have left encrypted rooms we are in - left_encrypted_users.insert(user_id); - } - _ => {} - } - } - } - } - - if joined_since_last_sync && encrypted_room || new_encrypted_room { - // If the user is in a new encrypted room, give them all joined users - device_list_updates.extend( - services() - .rooms - .state_cache - .room_members(&room_id) - .flatten() - .filter(|user_id| { - // Don't send key updates from the sender to the sender - &sender_user != user_id - }) - .filter(|user_id| { - // Only send keys if the sender doesn't share an encrypted room with the target already - !share_encrypted_room(&sender_user, user_id, &room_id) - .unwrap_or(false) - }), - ); - } - - let (joined_member_count, invited_member_count, heroes) = if send_member_count { - calculate_counts()? - } else { - (None, None, Vec::new()) - }; - - ( - heroes, - joined_member_count, - invited_member_count, - joined_since_last_sync, - state_events, - ) - } - }; - - // Look for device list updates in this room - device_list_updates.extend( - services() - .users - .keys_changed(room_id.as_ref(), since, None) - .filter_map(|r| r.ok()), - ); - - let notification_count = if send_notification_counts { - Some( - services() - .rooms - .user - .notification_count(&sender_user, &room_id)? - .try_into() - .expect("notification count can't go that high"), - ) - } else { - None - }; - - let highlight_count = if send_notification_counts { - Some( - services() - .rooms - .user - .highlight_count(&sender_user, &room_id)? - .try_into() - .expect("highlight count can't go that high"), - ) - } else { - None - }; - - let prev_batch = timeline_pdus - .first() - .map_or(Ok::<_, Error>(None), |(pdu_count, _)| { - Ok(Some(match pdu_count { - PduCount::Backfilled(_) => { - error!("timeline in backfill state?!"); - "0".to_owned() - } - PduCount::Normal(c) => c.to_string(), - })) - })?; - - let room_events: Vec<_> = timeline_pdus - .iter() - .map(|(_, pdu)| pdu.to_sync_room_event()) - .collect(); - - let mut edus: Vec<_> = services() - .rooms - .edus - .read_receipt - .readreceipts_since(&room_id, since) - .filter_map(|r| r.ok()) // Filter out buggy events - .map(|(_, _, v)| v) - .collect(); - - if services().rooms.edus.typing.last_typing_update(&room_id)? > since { - edus.push( - serde_json::from_str( - &serde_json::to_string(&services().rooms.edus.typing.typings_all(&room_id)?) - .expect("event is valid, we just created it"), - ) - .expect("event is valid, we just created it"), - ); - } - - // Save the state after this sync so we can send the correct state diff next sync - services().rooms.user.associate_token_shortstatehash( - &room_id, - next_batch, - current_shortstatehash, - )?; - - Ok(JoinedRoom { - account_data: RoomAccountData { - events: services() - .account_data - .changes_since(Some(&room_id), &sender_user, since)? - .into_iter() - .filter_map(|(_, v)| { - serde_json::from_str(v.json().get()) - .map_err(|_| Error::bad_database("Invalid account event in database.")) - .ok() - }) - .collect(), - }, - summary: RoomSummary { - heroes, - joined_member_count: joined_member_count.map(|n| (n as u32).into()), - invited_member_count: invited_member_count.map(|n| (n as u32).into()), - }, - unread_notifications: UnreadNotificationsCount { - highlight_count, - notification_count, - }, - timeline: Timeline { - limited: limited || joined_since_last_sync, - prev_batch, - events: room_events, - }, - state: State { - events: state_events - .iter() - .map(|pdu| pdu.to_sync_state_event()) - .collect(), - }, - ephemeral: Ephemeral { events: edus }, - unread_thread_notifications: BTreeMap::new(), - }) -} - -fn load_timeline( - sender_user: &UserId, - room_id: &RoomId, - roomsincecount: PduCount, - limit: u64, -) -> Result<(Vec<(PduCount, PduEvent)>, bool), Error> { - let timeline_pdus; - let limited; - if services() - .rooms - .timeline - .last_timeline_count(&sender_user, &room_id)? - > roomsincecount - { - let mut non_timeline_pdus = services() - .rooms - .timeline - .pdus_until(&sender_user, &room_id, PduCount::max())? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .take_while(|(pducount, _)| pducount > &roomsincecount); - - // Take the last events for the timeline - timeline_pdus = non_timeline_pdus - .by_ref() - .take(limit as usize) - .collect::>() - .into_iter() - .rev() - .collect::>(); - - // They /sync response doesn't always return all messages, so we say the output is - // limited unless there are events in non_timeline_pdus - limited = non_timeline_pdus.next().is_some(); - } else { - timeline_pdus = Vec::new(); - limited = false; - } - Ok((timeline_pdus, limited)) -} - fn share_encrypted_room( sender_user: &UserId, user_id: &UserId, @@ -1168,581 +1122,3 @@ fn share_encrypted_room( }) .any(|encrypted| encrypted)) } - -pub async fn sync_events_v4_route( - body: Ruma, -) -> Result> { - dbg!(&body.body); - let sender_user = body.sender_user.expect("user is authenticated"); - let sender_device = body.sender_device.expect("user is authenticated"); - let mut body = body.body; - // Setup watchers, so if there's no response, we can wait for them - let watcher = services().globals.watch(&sender_user, &sender_device); - - let next_batch = services().globals.next_count()?; - - let globalsince = body - .pos - .as_ref() - .and_then(|string| string.parse().ok()) - .unwrap_or(0); - - if globalsince == 0 { - if let Some(conn_id) = &body.conn_id { - services().users.forget_sync_request_connection( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - ) - } - } - - // Get sticky parameters from cache - let known_rooms = services().users.update_sync_request_with_cache( - sender_user.clone(), - sender_device.clone(), - &mut body, - ); - - let all_joined_rooms = services() - .rooms - .state_cache - .rooms_joined(&sender_user) - .filter_map(|r| r.ok()) - .collect::>(); - - if body.extensions.to_device.enabled.unwrap_or(false) { - services() - .users - .remove_to_device_events(&sender_user, &sender_device, globalsince)?; - } - - let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in - let mut device_list_changes = HashSet::new(); - let mut device_list_left = HashSet::new(); - - if body.extensions.e2ee.enabled.unwrap_or(false) { - // Look for device list updates of this account - device_list_changes.extend( - services() - .users - .keys_changed(sender_user.as_ref(), globalsince, None) - .filter_map(|r| r.ok()), - ); - - for room_id in &all_joined_rooms { - let current_shortstatehash = - if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? { - s - } else { - error!("Room {} has no state", room_id); - continue; - }; - - let since_shortstatehash = services() - .rooms - .user - .get_token_shortstatehash(&room_id, globalsince)?; - - let since_sender_member: Option = since_shortstatehash - .and_then(|shortstatehash| { - services() - .rooms - .state_accessor - .state_get( - shortstatehash, - &StateEventType::RoomMember, - sender_user.as_str(), - ) - .transpose() - }) - .transpose()? - .and_then(|pdu| { - serde_json::from_str(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid PDU in database.")) - .ok() - }); - - let encrypted_room = services() - .rooms - .state_accessor - .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")? - .is_some(); - - if let Some(since_shortstatehash) = since_shortstatehash { - // Skip if there are only timeline changes - if since_shortstatehash == current_shortstatehash { - continue; - } - - let since_encryption = services().rooms.state_accessor.state_get( - since_shortstatehash, - &StateEventType::RoomEncryption, - "", - )?; - - let joined_since_last_sync = since_sender_member - .map_or(true, |member| member.membership != MembershipState::Join); - - let new_encrypted_room = encrypted_room && since_encryption.is_none(); - if encrypted_room { - let current_state_ids = services() - .rooms - .state_accessor - .state_full_ids(current_shortstatehash) - .await?; - let since_state_ids = services() - .rooms - .state_accessor - .state_full_ids(since_shortstatehash) - .await?; - - for (key, id) in current_state_ids { - if since_state_ids.get(&key) != Some(&id) { - let pdu = match services().rooms.timeline.get_pdu(&id)? { - Some(pdu) => pdu, - None => { - error!("Pdu in state not found: {}", id); - continue; - } - }; - if pdu.kind == TimelineEventType::RoomMember { - if let Some(state_key) = &pdu.state_key { - let user_id = - UserId::parse(state_key.clone()).map_err(|_| { - Error::bad_database("Invalid UserId in member PDU.") - })?; - - if user_id == sender_user { - continue; - } - - let new_membership = serde_json::from_str::< - RoomMemberEventContent, - >( - pdu.content.get() - ) - .map_err(|_| Error::bad_database("Invalid PDU in database."))? - .membership; - - match new_membership { - MembershipState::Join => { - // A new user joined an encrypted room - if !share_encrypted_room( - &sender_user, - &user_id, - &room_id, - )? { - device_list_changes.insert(user_id); - } - } - MembershipState::Leave => { - // Write down users that have left encrypted rooms we are in - left_encrypted_users.insert(user_id); - } - _ => {} - } - } - } - } - } - if joined_since_last_sync || new_encrypted_room { - // If the user is in a new encrypted room, give them all joined users - device_list_changes.extend( - services() - .rooms - .state_cache - .room_members(&room_id) - .flatten() - .filter(|user_id| { - // Don't send key updates from the sender to the sender - &sender_user != user_id - }) - .filter(|user_id| { - // Only send keys if the sender doesn't share an encrypted room with the target already - !share_encrypted_room(&sender_user, user_id, &room_id) - .unwrap_or(false) - }), - ); - } - } - } - // Look for device list updates in this room - device_list_changes.extend( - services() - .users - .keys_changed(room_id.as_ref(), globalsince, None) - .filter_map(|r| r.ok()), - ); - } - for user_id in left_encrypted_users { - let dont_share_encrypted_room = services() - .rooms - .user - .get_shared_rooms(vec![sender_user.clone(), user_id.clone()])? - .filter_map(|r| r.ok()) - .filter_map(|other_room_id| { - Some( - services() - .rooms - .state_accessor - .room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") - .ok()? - .is_some(), - ) - }) - .all(|encrypted| !encrypted); - // If the user doesn't share an encrypted room with the target anymore, we need to tell - // them - if dont_share_encrypted_room { - device_list_left.insert(user_id); - } - } - } - - let mut lists = BTreeMap::new(); - let mut todo_rooms = BTreeMap::new(); // and required state - - for (list_id, list) in body.lists { - if list.filters.and_then(|f| f.is_invite).unwrap_or(false) { - continue; - } - - let mut new_known_rooms = BTreeSet::new(); - - lists.insert( - list_id.clone(), - sync_events::v4::SyncList { - ops: list - .ranges - .into_iter() - .map(|mut r| { - r.0 = - r.0.clamp(uint!(0), UInt::from(all_joined_rooms.len() as u32 - 1)); - r.1 = - r.1.clamp(r.0, UInt::from(all_joined_rooms.len() as u32 - 1)); - let room_ids = all_joined_rooms - [(u64::from(r.0) as usize)..=(u64::from(r.1) as usize)] - .to_vec(); - new_known_rooms.extend(room_ids.iter().cloned()); - for room_id in &room_ids { - let todo_room = todo_rooms.entry(room_id.clone()).or_insert(( - BTreeSet::new(), - 0, - u64::MAX, - )); - let limit = list - .room_details - .timeline_limit - .map_or(10, u64::from) - .min(100); - todo_room - .0 - .extend(list.room_details.required_state.iter().cloned()); - todo_room.1 = todo_room.1.max(limit); - // 0 means unknown because it got out of date - todo_room.2 = todo_room.2.min( - known_rooms - .get(&list_id) - .and_then(|k| k.get(room_id)) - .copied() - .unwrap_or(0), - ); - } - sync_events::v4::SyncOp { - op: SlidingOp::Sync, - range: Some(r.clone()), - index: None, - room_ids, - room_id: None, - } - }) - .collect(), - count: UInt::from(all_joined_rooms.len() as u32), - }, - ); - - if let Some(conn_id) = &body.conn_id { - services().users.update_sync_known_rooms( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - list_id, - new_known_rooms, - globalsince, - ); - } - } - - let mut known_subscription_rooms = BTreeSet::new(); - for (room_id, room) in &body.room_subscriptions { - let todo_room = todo_rooms - .entry(room_id.clone()) - .or_insert((BTreeSet::new(), 0, u64::MAX)); - let limit = room.timeline_limit.map_or(10, u64::from).min(100); - todo_room.0.extend(room.required_state.iter().cloned()); - todo_room.1 = todo_room.1.max(limit); - // 0 means unknown because it got out of date - todo_room.2 = todo_room.2.min( - known_rooms - .get("subscriptions") - .and_then(|k| k.get(room_id)) - .copied() - .unwrap_or(0), - ); - known_subscription_rooms.insert(room_id.clone()); - } - - for r in body.unsubscribe_rooms { - known_subscription_rooms.remove(&r); - body.room_subscriptions.remove(&r); - } - - if let Some(conn_id) = &body.conn_id { - services().users.update_sync_known_rooms( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - "subscriptions".to_owned(), - known_subscription_rooms, - globalsince, - ); - } - - if let Some(conn_id) = &body.conn_id { - services().users.update_sync_subscriptions( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - body.room_subscriptions, - ); - } - - let mut rooms = BTreeMap::new(); - for (room_id, (required_state_request, timeline_limit, roomsince)) in &todo_rooms { - let roomsincecount = PduCount::Normal(*roomsince); - - let (timeline_pdus, limited) = - load_timeline(&sender_user, &room_id, roomsincecount, *timeline_limit)?; - - if roomsince != &0 && timeline_pdus.is_empty() { - continue; - } - - let prev_batch = timeline_pdus - .first() - .map_or(Ok::<_, Error>(None), |(pdu_count, _)| { - Ok(Some(match pdu_count { - PduCount::Backfilled(_) => { - error!("timeline in backfill state?!"); - "0".to_owned() - } - PduCount::Normal(c) => c.to_string(), - })) - })? - .or_else(|| { - if roomsince != &0 { - Some(roomsince.to_string()) - } else { - None - } - }); - - let room_events: Vec<_> = timeline_pdus - .iter() - .map(|(_, pdu)| pdu.to_sync_room_event()) - .collect(); - - let required_state = required_state_request - .iter() - .map(|state| { - services() - .rooms - .state_accessor - .room_state_get(&room_id, &state.0, &state.1) - }) - .filter_map(|r| r.ok()) - .filter_map(|o| o) - .map(|state| state.to_sync_state_event()) - .collect(); - - // Heroes - let heroes = services() - .rooms - .state_cache - .room_members(&room_id) - .filter_map(|r| r.ok()) - .filter(|member| member != &sender_user) - .map(|member| { - Ok::<_, Error>( - services() - .rooms - .state_accessor - .get_member(&room_id, &member)? - .map(|memberevent| { - ( - memberevent - .displayname - .unwrap_or_else(|| member.to_string()), - memberevent.avatar_url, - ) - }), - ) - }) - .filter_map(|r| r.ok()) - .filter_map(|o| o) - .take(5) - .collect::>(); - let name = if heroes.len() > 1 { - let last = heroes[0].0.clone(); - Some( - heroes[1..] - .iter() - .map(|h| h.0.clone()) - .collect::>() - .join(", ") - + " and " - + &last, - ) - } else if heroes.len() == 1 { - Some(heroes[0].0.clone()) - } else { - None - }; - - let avatar = if heroes.len() == 1 { - heroes[0].1.clone() - } else { - None - }; - - rooms.insert( - room_id.clone(), - sync_events::v4::SlidingSyncRoom { - name: services() - .rooms - .state_accessor - .get_name(&room_id)? - .or_else(|| name), - avatar: services() - .rooms - .state_accessor - .get_avatar(&room_id)? - .map_or(avatar, |a| a.url), - initial: Some(roomsince == &0), - is_dm: None, - invite_state: None, - unread_notifications: UnreadNotificationsCount { - highlight_count: Some( - services() - .rooms - .user - .highlight_count(&sender_user, &room_id)? - .try_into() - .expect("notification count can't go that high"), - ), - notification_count: Some( - services() - .rooms - .user - .notification_count(&sender_user, &room_id)? - .try_into() - .expect("notification count can't go that high"), - ), - }, - timeline: room_events, - required_state, - prev_batch, - limited, - joined_count: Some( - (services() - .rooms - .state_cache - .room_joined_count(&room_id)? - .unwrap_or(0) as u32) - .into(), - ), - invited_count: Some( - (services() - .rooms - .state_cache - .room_invited_count(&room_id)? - .unwrap_or(0) as u32) - .into(), - ), - num_live: None, // Count events in timeline greater than global sync counter - timestamp: None, - }, - ); - } - - if rooms - .iter() - .all(|(_, r)| r.timeline.is_empty() && r.required_state.is_empty()) - { - // Hang a few seconds so requests are not spammed - // Stop hanging if new info arrives - let mut duration = body.timeout.unwrap_or(Duration::from_secs(30)); - if duration.as_secs() > 30 { - duration = Duration::from_secs(30); - } - let _ = tokio::time::timeout(duration, watcher).await; - } - - Ok(dbg!(sync_events::v4::Response { - initial: globalsince == 0, - txn_id: body.txn_id.clone(), - pos: next_batch.to_string(), - lists, - rooms, - extensions: sync_events::v4::Extensions { - to_device: if body.extensions.to_device.enabled.unwrap_or(false) { - Some(sync_events::v4::ToDevice { - events: services() - .users - .get_to_device_events(&sender_user, &sender_device)?, - next_batch: next_batch.to_string(), - }) - } else { - None - }, - e2ee: sync_events::v4::E2EE { - device_lists: DeviceLists { - changed: device_list_changes.into_iter().collect(), - left: device_list_left.into_iter().collect(), - }, - device_one_time_keys_count: services() - .users - .count_one_time_keys(&sender_user, &sender_device)?, - // Fallback keys are not yet supported - device_unused_fallback_key_types: None, - }, - account_data: sync_events::v4::AccountData { - global: if body.extensions.account_data.enabled.unwrap_or(false) { - services() - .account_data - .changes_since(None, &sender_user, globalsince)? - .into_iter() - .filter_map(|(_, v)| { - serde_json::from_str(v.json().get()) - .map_err(|_| { - Error::bad_database("Invalid account event in database.") - }) - .ok() - }) - .collect() - } else { - Vec::new() - }, - rooms: BTreeMap::new(), - }, - receipts: sync_events::v4::Receipts { - rooms: BTreeMap::new(), - }, - typing: sync_events::v4::Typing { - rooms: BTreeMap::new(), - }, - }, - delta_token: None, - })) -} diff --git a/src/api/client_server/tag.rs b/src/api/client_server/tag.rs index 16f1600f..c87e2335 100644 --- a/src/api/client_server/tag.rs +++ b/src/api/client_server/tag.rs @@ -14,7 +14,7 @@ use std::collections::BTreeMap; /// /// - Inserts the tag into the tag event of the room account data. pub async fn update_tag_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -58,7 +58,7 @@ pub async fn update_tag_route( /// /// - Removes the tag from the tag event of the room account data. pub async fn delete_tag_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -98,7 +98,9 @@ pub async fn delete_tag_route( /// Returns tags on the room. /// /// - Gets the tag event of the room account data. -pub async fn get_tags_route(body: Ruma) -> Result { +pub async fn get_tags_route( + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event = services().account_data.get( diff --git a/src/api/client_server/thirdparty.rs b/src/api/client_server/thirdparty.rs index c2c1adfd..5665ad6c 100644 --- a/src/api/client_server/thirdparty.rs +++ b/src/api/client_server/thirdparty.rs @@ -7,7 +7,7 @@ use std::collections::BTreeMap; /// /// TODO: Fetches all metadata about protocols supported by the homeserver. pub async fn get_protocols_route( - _body: Ruma, + _body: Ruma, ) -> Result { // TODO Ok(get_protocols::v3::Response { diff --git a/src/api/client_server/threads.rs b/src/api/client_server/threads.rs deleted file mode 100644 index a095b420..00000000 --- a/src/api/client_server/threads.rs +++ /dev/null @@ -1,49 +0,0 @@ -use ruma::api::client::{error::ErrorKind, threads::get_threads}; - -use crate::{services, Error, Result, Ruma}; - -/// # `GET /_matrix/client/r0/rooms/{roomId}/threads` -pub async fn get_threads_route( - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - // Use limit or else 10, with maximum 100 - let limit = body - .limit - .and_then(|l| l.try_into().ok()) - .unwrap_or(10) - .min(100); - - let from = if let Some(from) = &body.from { - from.parse() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, ""))? - } else { - u64::MAX - }; - - let threads = services() - .rooms - .threads - .threads_until(sender_user, &body.room_id, from, &body.include)? - .take(limit) - .filter_map(|r| r.ok()) - .filter(|(_, pdu)| { - services() - .rooms - .state_accessor - .user_can_see_event(sender_user, &body.room_id, &pdu.event_id) - .unwrap_or(false) - }) - .collect::>(); - - let next_batch = threads.last().map(|(count, _)| count.to_string()); - - Ok(get_threads::v1::Response { - chunk: threads - .into_iter() - .map(|(_, pdu)| pdu.to_room_event()) - .collect(), - next_batch, - }) -} diff --git a/src/api/client_server/to_device.rs b/src/api/client_server/to_device.rs index 31590fc7..139b845d 100644 --- a/src/api/client_server/to_device.rs +++ b/src/api/client_server/to_device.rs @@ -1,3 +1,4 @@ +use ruma::events::ToDeviceEventType; use std::collections::BTreeMap; use crate::{services, Error, Result, Ruma}; @@ -13,7 +14,7 @@ use ruma::{ /// /// Send a to-device event to a set of client devices. pub async fn send_event_to_device_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_deref(); @@ -41,7 +42,7 @@ pub async fn send_event_to_device_route( serde_json::to_vec(&federation::transactions::edu::Edu::DirectToDevice( DirectDeviceContent { sender: sender_user.clone(), - ev_type: body.event_type.clone(), + ev_type: ToDeviceEventType::from(&*body.event_type), message_id: count.to_string().into(), messages, }, @@ -59,7 +60,7 @@ pub async fn send_event_to_device_route( sender_user, target_user_id, target_device_id, - &body.event_type.to_string(), + &body.event_type, event.deserialize_as().map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") })?, @@ -72,7 +73,7 @@ pub async fn send_event_to_device_route( sender_user, target_user_id, &target_device_id?, - &body.event_type.to_string(), + &body.event_type, event.deserialize_as().map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") })?, diff --git a/src/api/client_server/typing.rs b/src/api/client_server/typing.rs index 43217e1a..ecc926f4 100644 --- a/src/api/client_server/typing.rs +++ b/src/api/client_server/typing.rs @@ -5,7 +5,7 @@ use ruma::api::client::{error::ErrorKind, typing::create_typing_event}; /// /// Sets the typing state of the sender user. pub async fn create_typing_event_route( - body: Ruma, + body: Ruma, ) -> Result { use create_typing_event::v3::Typing; diff --git a/src/api/client_server/unversioned.rs b/src/api/client_server/unversioned.rs index 797b9529..8a5c3d25 100644 --- a/src/api/client_server/unversioned.rs +++ b/src/api/client_server/unversioned.rs @@ -1,9 +1,8 @@ use std::{collections::BTreeMap, iter::FromIterator}; -use axum::{response::IntoResponse, Json}; -use ruma::api::client::{discovery::get_supported_versions, error::ErrorKind}; +use ruma::api::client::discovery::get_supported_versions; -use crate::{services, Error, Result, Ruma}; +use crate::{Result, Ruma}; /// # `GET /_matrix/client/versions` /// @@ -16,7 +15,7 @@ use crate::{services, Error, Result, Ruma}; /// Note: Unstable features are used while developing new features. Clients should avoid using /// unstable features in their stable releases pub async fn get_supported_versions_route( - _body: Ruma, + _body: Ruma, ) -> Result { let resp = get_supported_versions::Response { versions: vec![ @@ -24,26 +23,9 @@ pub async fn get_supported_versions_route( "r0.6.0".to_owned(), "v1.1".to_owned(), "v1.2".to_owned(), - "v1.3".to_owned(), - "v1.4".to_owned(), ], unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]), }; Ok(resp) } - -/// # `GET /.well-known/matrix/client` -pub async fn well_known_client_route( - _body: Ruma, -) -> Result { - let client_url = match services().globals.well_known_client() { - Some(url) => url.clone(), - None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")), - }; - - Ok(Json(serde_json::json!({ - "m.homeserver": {"base_url": client_url}, - "org.matrix.msc3575.proxy": {"url": client_url} - }))) -} diff --git a/src/api/client_server/user_directory.rs b/src/api/client_server/user_directory.rs index c30bac51..518daa5e 100644 --- a/src/api/client_server/user_directory.rs +++ b/src/api/client_server/user_directory.rs @@ -14,7 +14,7 @@ use ruma::{ /// - Hides any local users that aren't in any public rooms (i.e. those that have the join rule set to public) /// and don't share a room with the sender pub async fn search_users_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let limit = u64::from(body.limit) as usize; diff --git a/src/api/client_server/voip.rs b/src/api/client_server/voip.rs index f0d91f71..6b1ee400 100644 --- a/src/api/client_server/voip.rs +++ b/src/api/client_server/voip.rs @@ -1,5 +1,4 @@ use crate::{services, Result, Ruma}; -use base64::{engine::general_purpose, Engine as _}; use hmac::{Hmac, Mac}; use ruma::{api::client::voip::get_turn_server_info, SecondsSinceUnixEpoch}; use sha1::Sha1; @@ -11,7 +10,7 @@ type HmacSha1 = Hmac; /// /// TODO: Returns information about the recommended turn server. pub async fn turn_server_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -29,7 +28,7 @@ pub async fn turn_server_route( .expect("HMAC can take key of any size"); mac.update(username.as_bytes()); - let password: String = general_purpose::STANDARD.encode(mac.finalize().into_bytes()); + let password: String = base64::encode_config(mac.finalize().into_bytes(), base64::STANDARD); (username, password) } else { diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index bbd48614..d056f3f2 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -3,16 +3,18 @@ use std::{collections::BTreeMap, iter::FromIterator, str}; use axum::{ async_trait, body::{Full, HttpBody}, - extract::{rejection::TypedHeaderRejectionReason, FromRequest, Path, TypedHeader}, + extract::{ + rejection::TypedHeaderRejectionReason, FromRequest, Path, RequestParts, TypedHeader, + }, headers::{ authorization::{Bearer, Credentials}, Authorization, }, response::{IntoResponse, Response}, - BoxError, RequestExt, RequestPartsExt, + BoxError, }; -use bytes::{Buf, BufMut, Bytes, BytesMut}; -use http::{Request, StatusCode}; +use bytes::{BufMut, Bytes, BytesMut}; +use http::StatusCode; use ruma::{ api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse}, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, UserId, @@ -24,45 +26,28 @@ use super::{Ruma, RumaResponse}; use crate::{services, Error, Result}; #[async_trait] -impl FromRequest for Ruma +impl FromRequest for Ruma where T: IncomingRequest, - B: HttpBody + Send + 'static, + B: HttpBody + Send, B::Data: Send, B::Error: Into, { type Rejection = Error; - async fn from_request(req: Request, _state: &S) -> Result { + async fn from_request(req: &mut RequestParts) -> Result { #[derive(Deserialize)] struct QueryParams { access_token: Option, user_id: Option, } - let (mut parts, mut body) = match req.with_limited_body() { - Ok(limited_req) => { - let (parts, body) = limited_req.into_parts(); - let body = to_bytes(body) - .await - .map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?; - (parts, body) - } - Err(original_req) => { - let (parts, body) = original_req.into_parts(); - let body = to_bytes(body) - .await - .map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?; - (parts, body) - } - }; - let metadata = T::METADATA; - let auth_header: Option>> = parts.extract().await?; - let path_params: Path> = parts.extract().await?; + let auth_header = Option::>>::from_request(req).await?; + let path_params = Path::>::from_request(req).await?; - let query = parts.uri.query().unwrap_or_default(); - let query_params: QueryParams = match serde_html_form::from_str(query) { + let query = req.uri().query().unwrap_or_default(); + let query_params: QueryParams = match ruma::serde::urlencoded::from_str(query) { Ok(params) => params, Err(e) => { error!(%query, "Failed to deserialize query parameters: {}", e); @@ -78,6 +63,10 @@ where None => query_params.access_token.as_deref(), }; + let mut body = Bytes::from_request(req) + .await + .map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?; + let mut json_body = serde_json::from_slice::(&body).ok(); let appservices = services().appservice.all().unwrap(); @@ -149,24 +138,24 @@ where } } AuthScheme::ServerSignatures => { - let TypedHeader(Authorization(x_matrix)) = parts - .extract::>>() - .await - .map_err(|e| { - warn!("Missing or invalid Authorization header: {}", e); + let TypedHeader(Authorization(x_matrix)) = + TypedHeader::>::from_request(req) + .await + .map_err(|e| { + warn!("Missing or invalid Authorization header: {}", e); - let msg = match e.reason() { - TypedHeaderRejectionReason::Missing => { - "Missing Authorization header." - } - TypedHeaderRejectionReason::Error(_) => { - "Invalid X-Matrix signatures." - } - _ => "Unknown header-related error", - }; + let msg = match e.reason() { + TypedHeaderRejectionReason::Missing => { + "Missing Authorization header." + } + TypedHeaderRejectionReason::Error(_) => { + "Invalid X-Matrix signatures." + } + _ => "Unknown header-related error", + }; - Error::BadRequest(ErrorKind::Forbidden, msg) - })?; + Error::BadRequest(ErrorKind::Forbidden, msg) + })?; let origin_signatures = BTreeMap::from_iter([( x_matrix.key.clone(), @@ -181,11 +170,11 @@ where let mut request_map = BTreeMap::from_iter([ ( "method".to_owned(), - CanonicalJsonValue::String(parts.method.to_string()), + CanonicalJsonValue::String(req.method().to_string()), ), ( "uri".to_owned(), - CanonicalJsonValue::String(parts.uri.to_string()), + CanonicalJsonValue::String(req.uri().to_string()), ), ( "origin".to_owned(), @@ -235,7 +224,7 @@ where x_matrix.origin, e, request_map ); - if parts.uri.to_string().contains('@') { + if req.uri().to_string().contains('@') { warn!( "Request uri contained '@' character. Make sure your \ reverse proxy gives Conduit the raw uri (apache: use \ @@ -254,8 +243,8 @@ where } }; - let mut http_request = http::Request::builder().uri(parts.uri).method(parts.method); - *http_request.headers_mut().unwrap() = parts.headers; + let mut http_request = http::Request::builder().uri(req.uri()).method(req.method()); + *http_request.headers_mut().unwrap() = req.headers().clone(); if let Some(CanonicalJsonValue::Object(json_body)) = &mut json_body { let user_id = sender_user.clone().unwrap_or_else(|| { @@ -292,8 +281,7 @@ where debug!("{:?}", http_request); let body = T::try_from_http_request(http_request, &path_params).map_err(|e| { - warn!("try_from_http_request failed: {:?}", e); - debug!("JSON body: {:?}", json_body); + warn!("{:?}\n{:?}", e, json_body); Error::BadRequest(ErrorKind::BadJson, "Failed to deserialize request.") })?; @@ -320,7 +308,8 @@ impl Credentials for XMatrix { fn decode(value: &http::HeaderValue) -> Option { debug_assert!( value.as_bytes().starts_with(b"X-Matrix "), - "HeaderValue to decode should start with \"X-Matrix ..\", received = {value:?}", + "HeaderValue to decode should start with \"X-Matrix ..\", received = {:?}", + value, ); let parameters = str::from_utf8(&value.as_bytes()["X-Matrix ".len()..]) @@ -373,55 +362,3 @@ impl IntoResponse for RumaResponse { } } } - -// copied from hyper under the following license: -// Copyright (c) 2014-2021 Sean McArthur - -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: - -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. - -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. -pub(crate) async fn to_bytes(body: T) -> Result -where - T: HttpBody, -{ - futures_util::pin_mut!(body); - - // If there's only 1 chunk, we can just return Buf::to_bytes() - let mut first = if let Some(buf) = body.data().await { - buf? - } else { - return Ok(Bytes::new()); - }; - - let second = if let Some(buf) = body.data().await { - buf? - } else { - return Ok(first.copy_to_bytes(first.remaining())); - }; - - // With more than 1 buf, we gotta flatten into a Vec first. - let cap = first.remaining() + second.remaining() + body.size_hint().lower() as usize; - let mut vec = Vec::with_capacity(cap); - vec.put(first); - vec.put(second); - - while let Some(buf) = body.data().await { - vec.put(buf?); - } - - Ok(vec.into()) -} diff --git a/src/api/server_server.rs b/src/api/server_server.rs index f29de32b..543bd837 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1,5 +1,3 @@ -#![allow(deprecated)] - use crate::{ api::client_server::{self, claim_keys_helper, get_keys_helper}, service::pdu::{gen_event_id_canonical_json, PduBuilder}, @@ -14,13 +12,16 @@ use ruma::{ client::error::{Error as RumaError, ErrorKind}, federation::{ authorization::get_event_authorization, - backfill::get_backfill, device::get_devices::{self, v1::UserDevice}, directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{get_server_keys, get_server_version, ServerSigningKeys, VerifyKey}, event::{get_event, get_missing_events, get_room_state, get_room_state_ids}, keys::{claim_keys, get_keys}, - membership::{create_invite, create_join_event, prepare_join_event}, + membership::{ + create_invite, + create_join_event::{self, RoomState}, + prepare_join_event, + }, query::{get_profile_information, get_room_information}, transactions::{ edu::{DeviceListUpdateContent, DirectDeviceContent, Edu, SigningKeyUpdateContent}, @@ -30,20 +31,20 @@ use ruma::{ EndpointError, IncomingResponse, MatrixVersion, OutgoingRequest, OutgoingResponse, SendAccessToken, }, - directory::{Filter, RoomNetwork}, + directory::{IncomingFilter, IncomingRoomNetwork}, events::{ + presence::{PresenceEvent, PresenceEventContent}, receipt::{ReceiptEvent, ReceiptEventContent, ReceiptType}, room::{ join_rules::{JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, }, - StateEventType, TimelineEventType, + RoomEventType, StateEventType, }, serde::{Base64, JsonObject, Raw}, to_device::DeviceIdOrAllDevices, - uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, - OwnedEventId, OwnedRoomId, OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, RoomId, - ServerName, + CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, + OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, RoomId, ServerName, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ @@ -55,7 +56,7 @@ use std::{ time::{Duration, Instant, SystemTime}, }; -use tracing::{debug, error, trace, warn}; +use tracing::{debug, error, info, warn}; /// Wraps either an literal IP address plus port, or a hostname plus complement /// (colon-plus-port if it was specified). @@ -84,8 +85,8 @@ pub enum FedDest { impl FedDest { fn into_https_string(self) -> String { match self { - Self::Literal(addr) => format!("https://{addr}"), - Self::Named(host, port) => format!("https://{host}{port}"), + Self::Literal(addr) => format!("https://{}", addr), + Self::Named(host, port) => format!("https://{}{}", host, port), } } @@ -123,14 +124,6 @@ where return Err(Error::bad_config("Federation is disabled.")); } - if destination == services().globals.server_name() { - return Err(Error::bad_config( - "Won't send federation request to ourselves", - )); - } - - debug!("Preparing to send request to {destination}"); - let mut write_destination_to_cache = false; let cached_result = services() @@ -157,7 +150,7 @@ where .try_into_http_request::>( &actual_destination_str, SendAccessToken::IfRequired(""), - &[MatrixVersion::V1_4], + &[MatrixVersion::V1_0], ) .map_err(|e| { warn!( @@ -237,13 +230,11 @@ where let url = reqwest_request.url().clone(); - debug!("Sending request to {destination} at {url}"); let response = services() .globals .federation_client() .execute(reqwest_request) .await; - debug!("Received response from {destination} at {url}"); match response { Ok(mut response) => { @@ -259,12 +250,10 @@ where .expect("http::response::Builder is usable"), ); - debug!("Getting response bytes from {destination}"); let body = response.bytes().await.unwrap_or_else(|e| { warn!("server error {}", e); Vec::new().into() }); // TODO: handle timeout - debug!("Got response bytes from {destination}"); if status != 200 { warn!( @@ -283,7 +272,6 @@ where .expect("reqwest body is valid http body"); if status == 200 { - debug!("Parsing response bytes from {destination}"); let response = T::IncomingResponse::try_from_http_response(http_response); if response.is_ok() && write_destination_to_cache { services() @@ -305,10 +293,15 @@ where Error::BadServerResponse("Server returned bad 200 response.") }) } else { - debug!("Returning error from {destination}"); Err(Error::FederationError( destination.to_owned(), - RumaError::from_http_response(http_response), + RumaError::try_from_http_response(http_response).map_err(|e| { + warn!( + "Invalid {} response from {} on: {} {}", + status, &destination, url, e + ); + Error::BadServerResponse("Server returned bad error response.") + })?, )) } } @@ -344,38 +337,36 @@ fn add_port_to_hostname(destination_str: &str) -> FedDest { /// Implemented according to the specification at https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names /// Numbers in comments below refer to bullet points in linked section of specification async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDest) { - debug!("Finding actual destination for {destination}"); let destination_str = destination.as_str().to_owned(); let mut hostname = destination_str.clone(); let actual_destination = match get_ip_with_port(&destination_str) { Some(host_port) => { - debug!("1: IP literal with provided or default port"); + // 1: IP literal with provided or default port host_port } None => { if let Some(pos) = destination_str.find(':') { - debug!("2: Hostname with included port"); + // 2: Hostname with included port let (host, port) = destination_str.split_at(pos); FedDest::Named(host.to_owned(), port.to_owned()) } else { - debug!("Requesting well known for {destination}"); match request_well_known(destination.as_str()).await { + // 3: A .well-known file is available Some(delegated_hostname) => { - debug!("3: A .well-known file is available"); hostname = add_port_to_hostname(&delegated_hostname).into_uri_string(); match get_ip_with_port(&delegated_hostname) { Some(host_and_port) => host_and_port, // 3.1: IP literal in .well-known file None => { if let Some(pos) = delegated_hostname.find(':') { - debug!("3.2: Hostname with port in .well-known file"); + // 3.2: Hostname with port in .well-known file let (host, port) = delegated_hostname.split_at(pos); FedDest::Named(host.to_owned(), port.to_owned()) } else { - debug!("Delegated hostname has no port in this branch"); + // Delegated hostname has no port in this branch if let Some(hostname_override) = query_srv_record(&delegated_hostname).await { - debug!("3.3: SRV lookup successful"); + // 3.3: SRV lookup successful let force_port = hostname_override.port(); if let Ok(override_ip) = services() @@ -401,23 +392,23 @@ async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDe } if let Some(port) = force_port { - FedDest::Named(delegated_hostname, format!(":{port}")) + FedDest::Named(delegated_hostname, format!(":{}", port)) } else { add_port_to_hostname(&delegated_hostname) } } else { - debug!("3.4: No SRV records, just use the hostname from .well-known"); + // 3.4: No SRV records, just use the hostname from .well-known add_port_to_hostname(&delegated_hostname) } } } } } + // 4: No .well-known or an error occured None => { - debug!("4: No .well-known or an error occured"); match query_srv_record(&destination_str).await { + // 4: SRV record found Some(hostname_override) => { - debug!("4: SRV record found"); let force_port = hostname_override.port(); if let Ok(override_ip) = services() @@ -443,22 +434,19 @@ async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDe } if let Some(port) = force_port { - FedDest::Named(hostname.clone(), format!(":{port}")) + FedDest::Named(hostname.clone(), format!(":{}", port)) } else { add_port_to_hostname(&hostname) } } - None => { - debug!("5: No SRV record found"); - add_port_to_hostname(&destination_str) - } + // 5: No SRV record found + None => add_port_to_hostname(&destination_str), } } } } } }; - debug!("Actual destination: {actual_destination:?}"); // Can't use get_ip_with_port here because we don't want to add a port // to an IP address if it wasn't specified @@ -476,11 +464,10 @@ async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDe } async fn query_srv_record(hostname: &'_ str) -> Option { - let hostname = hostname.trim_end_matches('.'); if let Ok(Some(host_port)) = services() .globals .dns_resolver() - .srv_lookup(format!("_matrix._tcp.{hostname}.")) + .srv_lookup(format!("_matrix._tcp.{}", hostname)) .await .map(|srv| { srv.iter().next().map(|result| { @@ -498,20 +485,22 @@ async fn query_srv_record(hostname: &'_ str) -> Option { } async fn request_well_known(destination: &str) -> Option { - let response = services() - .globals - .default_client() - .get(&format!("https://{destination}/.well-known/matrix/server")) - .send() - .await; - debug!("Got well known response"); - if let Err(e) = &response { - debug!("Well known error: {e:?}"); - return None; - } - let text = response.ok()?.text().await; - debug!("Got well known response text"); - let body: serde_json::Value = serde_json::from_str(&text.ok()?).ok()?; + let body: serde_json::Value = serde_json::from_str( + &services() + .globals + .default_client() + .get(&format!( + "https://{}/.well-known/matrix/server", + destination + )) + .send() + .await + .ok()? + .text() + .await + .ok()?, + ) + .ok()?; Some(body.get("m.server")?.as_str()?.to_owned()) } @@ -598,7 +587,7 @@ pub async fn get_server_keys_deprecated_route() -> impl IntoResponse { /// /// Lists the public rooms on this server. pub async fn get_public_rooms_filtered_route( - body: Ruma, + body: Ruma, ) -> Result { if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -625,7 +614,7 @@ pub async fn get_public_rooms_filtered_route( /// /// Lists the public rooms on this server. pub async fn get_public_rooms_route( - body: Ruma, + body: Ruma, ) -> Result { if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -635,8 +624,8 @@ pub async fn get_public_rooms_route( None, body.limit, body.since.as_deref(), - &Filter::default(), - &RoomNetwork::Matrix, + &IncomingFilter::default(), + &IncomingRoomNetwork::Matrix, ) .await?; @@ -648,42 +637,11 @@ pub async fn get_public_rooms_route( }) } -pub fn parse_incoming_pdu( - pdu: &RawJsonValue, -) -> Result<(OwnedEventId, CanonicalJsonObject, OwnedRoomId)> { - let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { - warn!("Error parsing incoming event {:?}: {:?}", pdu, e); - Error::BadServerResponse("Invalid PDU in server response") - })?; - - let room_id: OwnedRoomId = value - .get("room_id") - .and_then(|id| RoomId::parse(id.as_str()?).ok()) - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid room id in pdu", - ))?; - - let room_version_id = services().rooms.state.get_room_version(&room_id)?; - - let (event_id, value) = match gen_event_id_canonical_json(&pdu, &room_version_id) { - Ok(t) => t, - Err(_) => { - // Event could not be converted to canonical json - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Could not convert event to canonical json.", - )); - } - }; - Ok((event_id, value, room_id)) -} - /// # `PUT /_matrix/federation/v1/send/{txnId}` /// /// Push EDUs and PDUs to this server. pub async fn send_transaction_message_route( - body: Ruma, + body: Ruma, ) -> Result { if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -707,33 +665,35 @@ pub async fn send_transaction_message_route( // let mut auth_cache = EventMap::new(); for pdu in &body.pdus { - let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { - warn!("Error parsing incoming event {:?}: {:?}", pdu, e); - Error::BadServerResponse("Invalid PDU in server response") - })?; - let room_id: OwnedRoomId = value - .get("room_id") - .and_then(|id| RoomId::parse(id.as_str()?).ok()) - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid room id in pdu", - ))?; - - if services().rooms.state.get_room_version(&room_id).is_err() { - debug!("Server is not in room {room_id}"); - continue; - } - - let r = parse_incoming_pdu(&pdu); - let (event_id, value, room_id) = match r { + // We do not add the event_id field to the pdu here because of signature and hashes checks + let (event_id, value) = match gen_event_id_canonical_json(pdu) { Ok(t) => t, - Err(e) => { - warn!("Could not parse PDU: {e}"); - warn!("Full PDU: {:?}", &pdu); + Err(_) => { + // Event could not be converted to canonical json continue; } }; - // We do not add the event_id field to the pdu here because of signature and hashes checks + + // 0. Check the server is in the room + let room_id: OwnedRoomId = match value + .get("room_id") + .and_then(|id| RoomId::parse(id.as_str()?).ok()) + { + Some(id) => id, + None => { + // Event is invalid + resolved_map.insert( + event_id, + Err(Error::bad_database("Event needs a valid RoomId.")), + ); + continue; + } + }; + + services() + .rooms + .event_handler + .acl_check(sender_servername, &room_id)?; let mutex = Arc::clone( services() @@ -787,7 +747,34 @@ pub async fn send_transaction_message_route( .filter_map(|edu| serde_json::from_str::(edu.json().get()).ok()) { match edu { - Edu::Presence(_) => {} + Edu::Presence(presence) => { + for presence_update in presence.push { + let user_id = presence_update.user_id; + for room_id in services() + .rooms + .state_cache + .rooms_joined(&user_id) + .filter_map(|room_id| room_id.ok()) + { + services().rooms.edus.presence.update_presence( + &user_id, + &room_id, + PresenceEvent { + content: PresenceEventContent { + avatar_url: services().users.avatar_url(&user_id)?, + currently_active: Some(presence_update.currently_active), + displayname: services().users.displayname(&user_id)?, + last_active_ago: Some(presence_update.last_active_ago), + presence: presence_update.presence.clone(), + status_msg: presence_update.status_msg.clone(), + }, + sender: user_id.clone(), + }, + true, + )?; + } + } + } Edu::Receipt(receipt) => { for (room_id, room_updates) in receipt.receipts { for (user_id, user_updates) in room_updates.read { @@ -825,7 +812,7 @@ pub async fn send_transaction_message_route( .readreceipt_update(&user_id, &room_id, event)?; } else { // TODO fetch missing events - debug!("No known event ids in read receipt: {:?}", user_updates); + info!("No known event ids in read receipt: {:?}", user_updates); } } } @@ -929,7 +916,6 @@ pub async fn send_transaction_message_route( &master_key, &self_signing_key, &None, - true, )?; } } @@ -940,7 +926,7 @@ pub async fn send_transaction_message_route( Ok(send_transaction_message::v1::Response { pdus: resolved_map .into_iter() - .map(|(e, r)| (e, r.map_err(|e| e.sanitized_error()))) + .map(|(e, r)| (e, r.map_err(|e| e.to_string()))) .collect(), }) } @@ -951,7 +937,7 @@ pub async fn send_transaction_message_route( /// /// - Only works if a user of this server is currently invited or joined the room pub async fn get_event_route( - body: Ruma, + body: Ruma, ) -> Result { if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -966,10 +952,7 @@ pub async fn get_event_route( .rooms .timeline .get_pdu_json(&body.event_id)? - .ok_or_else(|| { - warn!("Event not found, event ID: {:?}", &body.event_id); - Error::BadRequest(ErrorKind::NotFound, "Event not found.") - })?; + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; let room_id_str = event .get("room_id") @@ -990,17 +973,6 @@ pub async fn get_event_route( )); } - if !services().rooms.state_accessor.server_can_see_event( - sender_servername, - &room_id, - &body.event_id, - )? { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Server is not allowed to see event.", - )); - } - Ok(get_event::v1::Response { origin: services().globals.server_name().to_owned(), origin_server_ts: MilliSecondsSinceUnixEpoch::now(), @@ -1008,88 +980,11 @@ pub async fn get_event_route( }) } -/// # `GET /_matrix/federation/v1/backfill/` -/// -/// Retrieves events from before the sender joined the room, if the room's -/// history visibility allows. -pub async fn get_backfill_route( - body: Ruma, -) -> Result { - if !services().globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); - - debug!("Got backfill request from: {}", sender_servername); - - if !services() - .rooms - .state_cache - .server_in_room(sender_servername, &body.room_id)? - { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Server is not in room.", - )); - } - - services() - .rooms - .event_handler - .acl_check(sender_servername, &body.room_id)?; - - let until = body - .v - .iter() - .map(|eventid| services().rooms.timeline.get_pdu_count(eventid)) - .filter_map(|r| r.ok().flatten()) - .max() - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "No known eventid in v", - ))?; - - let limit = body.limit.min(uint!(100)); - - let all_events = services() - .rooms - .timeline - .pdus_until(&user_id!("@doesntmatter:conduit.rs"), &body.room_id, until)? - .take(limit.try_into().unwrap()); - - let events = all_events - .filter_map(|r| r.ok()) - .filter(|(_, e)| { - matches!( - services().rooms.state_accessor.server_can_see_event( - sender_servername, - &e.room_id, - &e.event_id, - ), - Ok(true), - ) - }) - .map(|(_, pdu)| services().rooms.timeline.get_pdu_json(&pdu.event_id)) - .filter_map(|r| r.ok().flatten()) - .map(|pdu| PduEvent::convert_to_outgoing_federation_event(pdu)) - .collect(); - - Ok(get_backfill::v1::Response { - origin: services().globals.server_name().to_owned(), - origin_server_ts: MilliSecondsSinceUnixEpoch::now(), - pdus: events, - }) -} - /// # `POST /_matrix/federation/v1/get_missing_events/{roomId}` /// /// Retrieves events that the sender is missing. pub async fn get_missing_events_route( - body: Ruma, + body: Ruma, ) -> Result { if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -1145,16 +1040,6 @@ pub async fn get_missing_events_route( i += 1; continue; } - - if !services().rooms.state_accessor.server_can_see_event( - sender_servername, - &body.room_id, - &queued_events[i], - )? { - i += 1; - continue; - } - queued_events.extend_from_slice( &serde_json::from_value::>( serde_json::to_value(pdu.get("prev_events").cloned().ok_or_else(|| { @@ -1178,7 +1063,7 @@ pub async fn get_missing_events_route( /// /// - This does not include the event itself pub async fn get_event_authorization_route( - body: Ruma, + body: Ruma, ) -> Result { if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -1209,10 +1094,7 @@ pub async fn get_event_authorization_route( .rooms .timeline .get_pdu_json(&body.event_id)? - .ok_or_else(|| { - warn!("Event not found, event ID: {:?}", &body.event_id); - Error::BadRequest(ErrorKind::NotFound, "Event not found.") - })?; + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; let room_id_str = event .get("room_id") @@ -1240,7 +1122,7 @@ pub async fn get_event_authorization_route( /// /// Retrieves the current state of the room. pub async fn get_room_state_route( - body: Ruma, + body: Ruma, ) -> Result { if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -1320,7 +1202,7 @@ pub async fn get_room_state_route( /// /// Retrieves the current state of the room. pub async fn get_room_state_ids_route( - body: Ruma, + body: Ruma, ) -> Result { if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -1381,7 +1263,7 @@ pub async fn get_room_state_ids_route( /// /// Creates a join template. pub async fn create_join_event_template_route( - body: Ruma, + body: Ruma, ) -> Result { if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -1468,7 +1350,7 @@ pub async fn create_join_event_template_route( let (_pdu, mut pdu_json) = services().rooms.timeline.create_hash_and_sign_event( PduBuilder { - event_type: TimelineEventType::RoomMember, + event_type: RoomEventType::RoomMember, content, unsigned: None, state_key: Some(body.user_id.to_string()), @@ -1493,7 +1375,7 @@ async fn create_join_event( sender_servername: &ServerName, room_id: &RoomId, pdu: &RawJsonValue, -) -> Result { +) -> Result { if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -1553,8 +1435,7 @@ async fn create_join_event( // let mut auth_cache = EventMap::new(); // We do not add the event_id field to the pdu here because of signature and hashes checks - let room_version_id = services().rooms.state.get_room_version(room_id)?; - let (event_id, value) = match gen_event_id_canonical_json(pdu, &room_version_id) { + let (event_id, value) = match gen_event_id_canonical_json(pdu) { Ok(t) => t, Err(_) => { // Event could not be converted to canonical json @@ -1615,7 +1496,7 @@ async fn create_join_event( services().sending.send_pdu(servers, &pdu_id)?; - Ok(create_join_event::v1::RoomState { + Ok(RoomState { auth_chain: auth_chain_ids .filter_map(|id| services().rooms.timeline.get_pdu_json(&id).ok().flatten()) .map(PduEvent::convert_to_outgoing_federation_event) @@ -1633,7 +1514,7 @@ async fn create_join_event( /// /// Submits a signed join event. pub async fn create_join_event_v1_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_servername = body .sender_servername @@ -1649,25 +1530,14 @@ pub async fn create_join_event_v1_route( /// /// Submits a signed join event. pub async fn create_join_event_v2_route( - body: Ruma, + body: Ruma, ) -> Result { let sender_servername = body .sender_servername .as_ref() .expect("server is authenticated"); - let create_join_event::v1::RoomState { - auth_chain, - state, - event, - } = create_join_event(sender_servername, &body.room_id, &body.pdu).await?; - let room_state = create_join_event::v2::RoomState { - members_omitted: false, - auth_chain, - state, - event, - servers_in_room: None, - }; + let room_state = create_join_event(sender_servername, &body.room_id, &body.pdu).await?; Ok(create_join_event::v2::Response { room_state }) } @@ -1676,7 +1546,7 @@ pub async fn create_join_event_v2_route( /// /// Invites a remote user to a room. pub async fn create_invite_route( - body: Ruma, + body: Ruma, ) -> Result { if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -1768,12 +1638,8 @@ pub async fn create_invite_route( invite_state.push(pdu.to_stripped_state_event()); - // If we are active in the room, the remote server will notify us about the join via /send - if !services() - .rooms - .state_cache - .server_in_room(services().globals.server_name(), &body.room_id)? - { + // If the room already exists, the remote server will notify us about the join via /send + if !services().rooms.metadata.exists(&pdu.room_id)? { services().rooms.state_cache.update_membership( &body.room_id, &invited_user, @@ -1793,7 +1659,7 @@ pub async fn create_invite_route( /// /// Gets information on all devices of the user. pub async fn get_devices_route( - body: Ruma, + body: Ruma, ) -> Result { if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -1827,14 +1693,12 @@ pub async fn get_devices_route( }) }) .collect(), - master_key: services().users.get_master_key(None, &body.user_id, &|u| { - u.server_name() == sender_servername - })?, + master_key: services() + .users + .get_master_key(&body.user_id, &|u| u.server_name() == sender_servername)?, self_signing_key: services() .users - .get_self_signing_key(None, &body.user_id, &|u| { - u.server_name() == sender_servername - })?, + .get_self_signing_key(&body.user_id, &|u| u.server_name() == sender_servername)?, }) } @@ -1842,7 +1706,7 @@ pub async fn get_devices_route( /// /// Resolve a room alias to a room id. pub async fn get_room_information_route( - body: Ruma, + body: Ruma, ) -> Result { if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -1867,7 +1731,7 @@ pub async fn get_room_information_route( /// /// Gets information on a profile. pub async fn get_profile_information_route( - body: Ruma, + body: Ruma, ) -> Result { if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); diff --git a/src/config/mod.rs b/src/config/mod.rs index a4d7cca4..025969e3 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -28,8 +28,6 @@ pub struct Config { pub db_cache_capacity_mb: f64, #[serde(default = "true_fn")] pub enable_lightning_bolt: bool, - #[serde(default = "true_fn")] - pub allow_check_for_updates: bool, #[serde(default = "default_conduit_cache_capacity_modifier")] pub conduit_cache_capacity_modifier: f64, #[serde(default = "default_rocksdb_max_open_files")] @@ -42,11 +40,8 @@ pub struct Config { pub max_request_size: u32, #[serde(default = "default_max_concurrent_requests")] pub max_concurrent_requests: u16, - #[serde(default = "default_max_fetch_prev_events")] - pub max_fetch_prev_events: u16, #[serde(default = "false_fn")] pub allow_registration: bool, - pub registration_token: Option, #[serde(default = "true_fn")] pub allow_encryption: bool, #[serde(default = "false_fn")] @@ -57,7 +52,6 @@ pub struct Config { pub allow_unstable_room_versions: bool, #[serde(default = "default_default_room_version")] pub default_room_version: RoomVersionId, - pub well_known_client: Option, #[serde(default = "false_fn")] pub allow_jaeger: bool, #[serde(default = "false_fn")] @@ -65,7 +59,7 @@ pub struct Config { #[serde(default)] pub proxy: ProxyConfig, pub jwt_secret: Option, - #[serde(default = "default_trusted_servers")] + #[serde(default = "Vec::new")] pub trusted_servers: Vec, #[serde(default = "default_log")] pub log: String, @@ -82,6 +76,19 @@ pub struct Config { pub emergency_password: Option, + #[serde(default = "true_fn")] + pub allow_presence: bool, + + #[serde(default = "default_presence_idle_timeout")] + pub presence_idle_timeout: u64, + #[serde(default = "default_presence_offline_timeout")] + pub presence_offline_timeout: u64, + + #[serde(default = "default_presence_cleanup_period")] + pub presence_cleanup_period: u64, + #[serde(default = "default_presence_cleanup_limit")] + pub presence_cleanup_limit: u64, + #[serde(flatten)] pub catchall: BTreeMap, } @@ -203,7 +210,7 @@ impl fmt::Display for Config { msg += &format!("{}: {}\n", line.1 .0, line.1 .1); } - write!(f, "{msg}") + write!(f, "{}", msg) } } @@ -228,7 +235,7 @@ fn default_database_backend() -> String { } fn default_db_cache_capacity_mb() -> f64 { - 300.0 + 10.0 } fn default_conduit_cache_capacity_modifier() -> f64 { @@ -236,7 +243,7 @@ fn default_conduit_cache_capacity_modifier() -> f64 { } fn default_rocksdb_max_open_files() -> i32 { - 1000 + 20 } fn default_pdu_cache_capacity() -> u32 { @@ -255,14 +262,6 @@ fn default_max_concurrent_requests() -> u16 { 100 } -fn default_max_fetch_prev_events() -> u16 { - 100_u16 -} - -fn default_trusted_servers() -> Vec { - vec![OwnedServerName::try_from("matrix.org").unwrap()] -} - fn default_log() -> String { "warn,state_res=warn,_=off,sled=off".to_owned() } @@ -271,6 +270,22 @@ fn default_turn_ttl() -> u64 { 60 * 60 * 24 } +fn default_presence_idle_timeout() -> u64 { + 60 +} + +fn default_presence_offline_timeout() -> u64 { + 30 * 60 +} + +fn default_presence_cleanup_period() -> u64 { + 24 * 60 * 60 +} + +fn default_presence_cleanup_limit() -> u64 { + 24 * 60 * 60 +} + // I know, it's a great name pub fn default_default_room_version() -> RoomVersionId { RoomVersionId::V9 diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index 0a321054..93660f9f 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -38,7 +38,6 @@ pub trait KeyValueDatabaseEngine: Send + Sync { fn memory_usage(&self) -> Result { Ok("Current database engine does not support memory usage reporting.".to_owned()) } - fn clear_caches(&self) {} } pub trait KvTree: Send + Sync { diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index b40c4393..34d91d29 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -45,17 +45,6 @@ fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::O db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); db_opts.optimize_level_style_compaction(10 * 1024 * 1024); - // https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning - db_opts.set_max_background_jobs(6); - db_opts.set_bytes_per_sync(1048576); - - // https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes#ktoleratecorruptedtailrecords - // - // Unclean shutdowns of a Matrix homeserver are likely to be fine when - // recovered in this manner as it's likely any lost information will be - // restored via federation. - db_opts.set_wal_recovery_mode(rocksdb::DBRecoveryMode::TolerateCorruptedTailRecords); - let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1); db_opts.set_prefix_extractor(prefix_extractor); @@ -65,7 +54,7 @@ fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::O impl KeyValueDatabaseEngine for Arc { fn open(config: &Config) -> Result { let cache_capacity_bytes = (config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize; - let rocksdb_cache = rocksdb::Cache::new_lru_cache(cache_capacity_bytes); + let rocksdb_cache = rocksdb::Cache::new_lru_cache(cache_capacity_bytes).unwrap(); let db_opts = db_options(config.rocksdb_max_open_files, &rocksdb_cache); @@ -132,8 +121,6 @@ impl KeyValueDatabaseEngine for Arc { self.cache.get_pinned_usage() as f64 / 1024.0 / 1024.0, )) } - - fn clear_caches(&self) {} } impl RocksDbEngineTree<'_> { @@ -174,7 +161,7 @@ impl KvTree for RocksDbEngineTree<'_> { self.db .rocks .iterator_cf(&self.cf(), rocksdb::IteratorMode::Start) - .map(|r| r.unwrap()) + //.map(|r| r.unwrap()) .map(|(k, v)| (Vec::from(k), Vec::from(v))), ) } @@ -198,7 +185,7 @@ impl KvTree for RocksDbEngineTree<'_> { }, ), ) - .map(|r| r.unwrap()) + //.map(|r| r.unwrap()) .map(|(k, v)| (Vec::from(k), Vec::from(v))), ) } @@ -239,7 +226,7 @@ impl KvTree for RocksDbEngineTree<'_> { &self.cf(), rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward), ) - .map(|r| r.unwrap()) + //.map(|r| r.unwrap()) .map(|(k, v)| (Vec::from(k), Vec::from(v))) .take_while(move |(k, _)| k.starts_with(&prefix)), ) diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index b69efb61..4961fd74 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -106,7 +106,7 @@ impl KeyValueDatabaseEngine for Arc { } fn open_tree(&self, name: &str) -> Result> { - self.write_lock().execute(&format!("CREATE TABLE IF NOT EXISTS {name} ( \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL )"), [])?; + self.write_lock().execute(&format!("CREATE TABLE IF NOT EXISTS {} ( \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL )", name), [])?; Ok(Arc::new(SqliteTable { engine: Arc::clone(self), @@ -135,6 +135,7 @@ type TupleOfBytes = (Vec, Vec); impl SqliteTable { fn get_with_guard(&self, guard: &Connection, key: &[u8]) -> Result>> { + //dbg!(&self.name); Ok(guard .prepare(format!("SELECT value FROM {} WHERE key = ?", self.name).as_str())? .query_row([key], |row| row.get(0)) @@ -142,6 +143,7 @@ impl SqliteTable { } fn insert_with_guard(&self, guard: &Connection, key: &[u8], value: &[u8]) -> Result<()> { + //dbg!(&self.name); guard.execute( format!( "INSERT OR REPLACE INTO {} (key, value) VALUES (?, ?)", @@ -174,7 +176,10 @@ impl SqliteTable { statement .query_map([], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) .unwrap() - .map(move |r| r.unwrap()), + .map(move |r| { + //dbg!(&name); + r.unwrap() + }), ); Box::new(PreparedStatementIterator { @@ -271,7 +276,10 @@ impl KvTree for SqliteTable { statement .query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) .unwrap() - .map(move |r| r.unwrap()), + .map(move |r| { + //dbg!(&name); + r.unwrap() + }), ); Box::new(PreparedStatementIterator { iterator, @@ -293,7 +301,10 @@ impl KvTree for SqliteTable { statement .query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) .unwrap() - .map(move |r| r.unwrap()), + .map(move |r| { + //dbg!(&name); + r.unwrap() + }), ); Box::new(PreparedStatementIterator { diff --git a/src/database/key_value/globals.rs b/src/database/key_value/globals.rs index 11aa0641..7b7675ca 100644 --- a/src/database/key_value/globals.rs +++ b/src/database/key_value/globals.rs @@ -1,8 +1,7 @@ -use std::collections::{BTreeMap, HashMap}; +use std::collections::BTreeMap; use async_trait::async_trait; use futures_util::{stream::FuturesUnordered, StreamExt}; -use lru_cache::LruCache; use ruma::{ api::federation::discovery::{ServerSigningKeys, VerifyKey}, signatures::Ed25519KeyPair, @@ -12,7 +11,6 @@ use ruma::{ use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; pub const COUNTER: &[u8] = b"c"; -pub const LAST_CHECK_FOR_UPDATES_COUNT: &[u8] = b"u"; #[async_trait] impl service::globals::Data for KeyValueDatabase { @@ -28,23 +26,6 @@ impl service::globals::Data for KeyValueDatabase { }) } - fn last_check_for_updates_id(&self) -> Result { - self.global - .get(LAST_CHECK_FOR_UPDATES_COUNT)? - .map_or(Ok(0_u64), |bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("last check for updates count has invalid bytes.") - }) - }) - } - - fn update_check_for_updates_id(&self, id: u64) -> Result<()> { - self.global - .insert(LAST_CHECK_FOR_UPDATES_COUNT, &id.to_be_bytes())?; - - Ok(()) - } - async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { let userid_bytes = user_id.as_bytes().to_vec(); let mut userid_prefix = userid_bytes.clone(); @@ -137,67 +118,8 @@ impl service::globals::Data for KeyValueDatabase { self._db.cleanup() } - fn memory_usage(&self) -> String { - let pdu_cache = self.pdu_cache.lock().unwrap().len(); - let shorteventid_cache = self.shorteventid_cache.lock().unwrap().len(); - let auth_chain_cache = self.auth_chain_cache.lock().unwrap().len(); - let eventidshort_cache = self.eventidshort_cache.lock().unwrap().len(); - let statekeyshort_cache = self.statekeyshort_cache.lock().unwrap().len(); - let our_real_users_cache = self.our_real_users_cache.read().unwrap().len(); - let appservice_in_room_cache = self.appservice_in_room_cache.read().unwrap().len(); - let lasttimelinecount_cache = self.lasttimelinecount_cache.lock().unwrap().len(); - - let mut response = format!( - "\ -pdu_cache: {pdu_cache} -shorteventid_cache: {shorteventid_cache} -auth_chain_cache: {auth_chain_cache} -eventidshort_cache: {eventidshort_cache} -statekeyshort_cache: {statekeyshort_cache} -our_real_users_cache: {our_real_users_cache} -appservice_in_room_cache: {appservice_in_room_cache} -lasttimelinecount_cache: {lasttimelinecount_cache}\n" - ); - if let Ok(db_stats) = self._db.memory_usage() { - response += &db_stats; - } - - response - } - - fn clear_caches(&self, amount: u32) { - if amount > 0 { - let c = &mut *self.pdu_cache.lock().unwrap(); - *c = LruCache::new(c.capacity()); - } - if amount > 1 { - let c = &mut *self.shorteventid_cache.lock().unwrap(); - *c = LruCache::new(c.capacity()); - } - if amount > 2 { - let c = &mut *self.auth_chain_cache.lock().unwrap(); - *c = LruCache::new(c.capacity()); - } - if amount > 3 { - let c = &mut *self.eventidshort_cache.lock().unwrap(); - *c = LruCache::new(c.capacity()); - } - if amount > 4 { - let c = &mut *self.statekeyshort_cache.lock().unwrap(); - *c = LruCache::new(c.capacity()); - } - if amount > 5 { - let c = &mut *self.our_real_users_cache.write().unwrap(); - *c = HashMap::new(); - } - if amount > 6 { - let c = &mut *self.appservice_in_room_cache.write().unwrap(); - *c = HashMap::new(); - } - if amount > 7 { - let c = &mut *self.lasttimelinecount_cache.lock().unwrap(); - *c = HashMap::new(); - } + fn memory_usage(&self) -> Result { + self._db.memory_usage() } fn load_keypair(&self) -> Result { diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index 904b1c44..7732983a 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -1,10 +1,53 @@ -use std::collections::HashMap; +use futures_util::{stream::FuturesUnordered, StreamExt}; +use std::{ + collections::{hash_map::Entry, HashMap}, + mem, + time::Duration, +}; +use tracing::{error, info}; use ruma::{ events::presence::PresenceEvent, presence::PresenceState, OwnedUserId, RoomId, UInt, UserId, }; +use tokio::{sync::mpsc, time::sleep}; -use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; +use crate::{ + database::KeyValueDatabase, + service::{self, rooms::edus::presence::PresenceIter}, + services, utils, + utils::{millis_since_unix_epoch, u64_from_bytes}, + Error, Result, +}; + +pub struct PresenceUpdate { + count: u64, + prev_timestamp: u64, + curr_timestamp: u64, +} + +impl PresenceUpdate { + fn to_be_bytes(&self) -> Vec { + [ + self.count.to_be_bytes(), + self.prev_timestamp.to_be_bytes(), + self.curr_timestamp.to_be_bytes(), + ] + .concat() + } + + fn from_be_bytes(bytes: &[u8]) -> Result { + let (count_bytes, timestamps_bytes) = bytes.split_at(mem::size_of::()); + let (prev_timestamp_bytes, curr_timestamp_bytes) = + timestamps_bytes.split_at(mem::size_of::()); + Ok(Self { + count: u64_from_bytes(count_bytes).expect("count bytes from DB are valid"), + prev_timestamp: u64_from_bytes(prev_timestamp_bytes) + .expect("timestamp bytes from DB are valid"), + curr_timestamp: u64_from_bytes(curr_timestamp_bytes) + .expect("timestamp bytes from DB are valid"), + }) + } +} impl service::rooms::edus::presence::Data for KeyValueDatabase { fn update_presence( @@ -13,45 +56,82 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { room_id: &RoomId, presence: PresenceEvent, ) -> Result<()> { - // TODO: Remove old entry? Or maybe just wipe completely from time to time? + let roomuser_id = [room_id.as_bytes(), &[0xff], user_id.as_bytes()].concat(); - let count = services().globals.next_count()?.to_be_bytes(); - - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(presence.sender.as_bytes()); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&presence).expect("PresenceEvent can be serialized"), + self.roomuserid_presenceevent.insert( + &roomuser_id, + &serde_json::to_vec(&presence).expect("presence event from DB is valid"), )?; - self.userid_lastpresenceupdate.insert( + let timestamp = match presence.content.last_active_ago { + Some(active_ago) => millis_since_unix_epoch().saturating_sub(active_ago.into()), + None => millis_since_unix_epoch(), + }; + + self.userid_presenceupdate.insert( user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), + &PresenceUpdate { + count: services().globals.next_count()?, + prev_timestamp: timestamp, + curr_timestamp: timestamp, + } + .to_be_bytes(), )?; Ok(()) } - fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; + fn ping_presence( + &self, + user_id: &UserId, + update_count: bool, + update_timestamp: bool, + ) -> Result<()> { + let now = millis_since_unix_epoch(); + + let presence = self + .userid_presenceupdate + .get(user_id.as_bytes())? + .map(|presence_bytes| PresenceUpdate::from_be_bytes(&presence_bytes)) + .transpose()?; + + let new_presence = match presence { + Some(presence) => PresenceUpdate { + count: if update_count { + services().globals.next_count()? + } else { + presence.count + }, + prev_timestamp: if update_timestamp { + presence.curr_timestamp + } else { + presence.prev_timestamp + }, + curr_timestamp: if update_timestamp { + now + } else { + presence.curr_timestamp + }, + }, + None => PresenceUpdate { + count: services().globals.current_count()?, + prev_timestamp: now, + curr_timestamp: now, + }, + }; + + self.userid_presenceupdate + .insert(user_id.as_bytes(), &new_presence.to_be_bytes())?; Ok(()) } - fn last_presence_update(&self, user_id: &UserId) -> Result> { - self.userid_lastpresenceupdate + fn last_presence_update(&self, user_id: &UserId) -> Result> { + self.userid_presenceupdate .get(user_id.as_bytes())? .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) + PresenceUpdate::from_be_bytes(&bytes) + .map(|update| (update.prev_timestamp, update.curr_timestamp)) }) .transpose() } @@ -60,93 +140,268 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { &self, room_id: &RoomId, user_id: &UserId, - count: u64, + presence_timestamp: u64, ) -> Result> { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count.to_be_bytes()); - presence_id.push(0xff); - presence_id.extend_from_slice(user_id.as_bytes()); - - self.presenceid_presence - .get(&presence_id)? - .map(|value| parse_presence_event(&value)) + let roomuser_id = [room_id.as_bytes(), &[0xff], user_id.as_bytes()].concat(); + self.roomuserid_presenceevent + .get(&roomuser_id)? + .map(|value| parse_presence_event(&value, presence_timestamp)) .transpose() } - fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - let mut hashmap = HashMap::new(); - - for (key, value) in self - .presenceid_presence - .iter_from(&first_possible_edu, false) - .take_while(|(key, _)| key.starts_with(&prefix)) - { - let user_id = UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), + fn presence_since<'a>(&'a self, room_id: &RoomId, since: u64) -> Result> { + let user_timestamp: HashMap = self + .userid_presenceupdate + .iter() + .map(|(user_id_bytes, update_bytes)| { + ( + UserId::parse( + utils::string_from_bytes(&user_id_bytes) + .expect("UserID bytes are a valid string"), + ) + .expect("UserID bytes from database are a valid UserID"), + PresenceUpdate::from_be_bytes(&update_bytes) + .expect("PresenceUpdate bytes from database are a valid PresenceUpdate"), ) - .map_err(|_| Error::bad_database("Invalid UserId bytes in presenceid_presence."))?, - ) - .map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?; + }) + .filter_map(|(user_id, presence_update)| { + if presence_update.count <= since + || !services() + .rooms + .state_cache + .is_joined(&user_id, room_id) + .ok()? + { + return None; + } - let presence = parse_presence_event(&value)?; + Some((user_id, presence_update.curr_timestamp)) + }) + .collect(); - hashmap.insert(user_id, presence); - } + Ok(Box::new( + self.roomuserid_presenceevent + .scan_prefix(room_id.as_bytes().to_vec()) + .filter_map(move |(roomuserid_bytes, presence_bytes)| { + let user_id_bytes = roomuserid_bytes.split(|byte| *byte == 0xff).last()?; + let user_id: OwnedUserId = UserId::parse( + utils::string_from_bytes(user_id_bytes) + .expect("UserID bytes are a valid string"), + ) + .expect("UserID bytes from database are a valid UserID"); - Ok(hashmap) + let timestamp = user_timestamp.get(&user_id)?; + let presence_event = parse_presence_event(&presence_bytes, *timestamp) + .expect("PresenceEvent bytes from database are a valid PresenceEvent"); + + Some((user_id, presence_event)) + }), + )) } - /* - fn presence_maintain(&self, db: Arc>) { - // TODO @M0dEx: move this to a timed tasks module - tokio::spawn(async move { - loop { - select! { - Some(user_id) = self.presence_timers.next() { - // TODO @M0dEx: would it be better to acquire the lock outside the loop? - let guard = db.read().await; + fn presence_maintain( + &self, + mut timer_receiver: mpsc::UnboundedReceiver, + ) -> Result<()> { + let mut timers = FuturesUnordered::new(); + let mut timers_timestamp: HashMap = HashMap::new(); - // TODO @M0dEx: add self.presence_timers - // TODO @M0dEx: maintain presence + tokio::spawn(async move { + // Wait for services to be created + sleep(Duration::from_secs(15)).await; + + if !services().globals.allow_presence() { + return; + } + + let idle_timeout = Duration::from_secs(services().globals.presence_idle_timeout()); + let offline_timeout = + Duration::from_secs(services().globals.presence_offline_timeout()); + + // TODO: Get rid of this hack (hinting correct types to rustc) + timers.push(create_presence_timer( + idle_timeout, + UserId::parse_with_server_name("conduit", services().globals.server_name()) + .expect("Conduit user always exists"), + )); + + loop { + tokio::select! { + Some(user_id) = timers.next() => { + info!("Processing timer for user '{}' ({})", user_id.clone(), timers.len()); + let (prev_timestamp, curr_timestamp) = match services().rooms.edus.presence.last_presence_update(&user_id) { + Ok(timestamp_tuple) => match timestamp_tuple { + Some(timestamp_tuple) => timestamp_tuple, + None => continue, + }, + Err(e) => { + error!("{e}"); + continue; + } + }; + + let prev_presence_state = determine_presence_state(prev_timestamp); + let curr_presence_state = determine_presence_state(curr_timestamp); + + // Continue if there is no change in state + if prev_presence_state == curr_presence_state { + continue; + } + + match services().rooms.edus.presence.ping_presence(&user_id, true, false, false) { + Ok(_) => (), + Err(e) => error!("{e}") + } + + // TODO: Notify federation sender + } + Some(user_id) = timer_receiver.recv() => { + let now = millis_since_unix_epoch(); + // Do not create timers if we added timers recently + let should_send = match timers_timestamp.entry(user_id.to_owned()) { + Entry::Occupied(mut entry) => { + if now - entry.get() > 15 * 1000 { + entry.insert(now); + true + } else { + false + } + }, + Entry::Vacant(entry) => { + entry.insert(now); + true + } + }; + + if !should_send { + continue; + } + + // Idle timeout + timers.push(create_presence_timer(idle_timeout, user_id.clone())); + + // Offline timeout + timers.push(create_presence_timer(offline_timeout, user_id.clone())); + + info!("Added timers for user '{}' ({})", user_id, timers.len()); } } } }); + + Ok(()) + } + + fn presence_cleanup(&self) -> Result<()> { + let userid_presenceupdate = self.userid_presenceupdate.clone(); + let roomuserid_presenceevent = self.roomuserid_presenceevent.clone(); + + tokio::spawn(async move { + // Wait for services to be created + sleep(Duration::from_secs(15)).await; + + if !services().globals.allow_presence() { + return; + } + + let period = Duration::from_secs(services().globals.presence_cleanup_period()); + let age_limit = Duration::from_secs(services().globals.presence_cleanup_limit()); + + loop { + let mut removed_events: u64 = 0; + let age_limit_curr = + millis_since_unix_epoch().saturating_sub(age_limit.as_millis() as u64); + + for user_id in userid_presenceupdate + .iter() + .map(|(user_id_bytes, update_bytes)| { + ( + UserId::parse( + utils::string_from_bytes(&user_id_bytes) + .expect("UserID bytes are a valid string"), + ) + .expect("UserID bytes from database are a valid UserID"), + PresenceUpdate::from_be_bytes(&update_bytes).expect( + "PresenceUpdate bytes from database are a valid PresenceUpdate", + ), + ) + }) + .filter_map(|(user_id, presence_update)| { + if presence_update.curr_timestamp < age_limit_curr { + return None; + } + + Some(user_id) + }) + { + match userid_presenceupdate.remove(user_id.as_bytes()) { + Ok(_) => (), + Err(e) => { + error!("An errord occured while removing a stale presence update: {e}") + } + } + + for room_id in services() + .rooms + .state_cache + .rooms_joined(&user_id) + .filter_map(|room_id| room_id.ok()) + { + match roomuserid_presenceevent + .remove(&[room_id.as_bytes(), &[0xff], user_id.as_bytes()].concat()) + { + Ok(_) => removed_events += 1, + Err(e) => error!( + "An errord occured while removing a stale presence event: {e}" + ), + } + } + } + + info!("Cleaned up {removed_events} stale presence events!"); + sleep(period).await; + } + }); + + Ok(()) } - */ } -fn parse_presence_event(bytes: &[u8]) -> Result { +async fn create_presence_timer(duration: Duration, user_id: OwnedUserId) -> OwnedUserId { + sleep(duration).await; + + user_id +} + +fn parse_presence_event(bytes: &[u8], presence_timestamp: u64) -> Result { let mut presence: PresenceEvent = serde_json::from_slice(bytes) .map_err(|_| Error::bad_database("Invalid presence event in db."))?; - let current_timestamp: UInt = utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"); - - if presence.content.presence == PresenceState::Online { - // Don't set last_active_ago when the user is online - presence.content.last_active_ago = None; - } else { - // Convert from timestamp to duration - presence.content.last_active_ago = presence - .content - .last_active_ago - .map(|timestamp| current_timestamp - timestamp); - } + translate_active_ago(&mut presence, presence_timestamp); Ok(presence) } + +fn determine_presence_state(last_active_ago: u64) -> PresenceState { + let globals = &services().globals; + + if last_active_ago < globals.presence_idle_timeout() * 1000 { + PresenceState::Online + } else if last_active_ago < globals.presence_offline_timeout() * 1000 { + PresenceState::Unavailable + } else { + PresenceState::Offline + } +} + +/// Translates the timestamp representing last_active_ago to a diff from now. +fn translate_active_ago(presence_event: &mut PresenceEvent, last_active_ts: u64) { + let last_active_ago = millis_since_unix_epoch().saturating_sub(last_active_ts); + + presence_event.content.presence = determine_presence_state(last_active_ago); + + presence_event.content.last_active_ago = match presence_event.content.presence { + PresenceState::Online => None, + _ => Some(UInt::new_saturating(last_active_ago)), + } +} diff --git a/src/database/key_value/rooms/mod.rs b/src/database/key_value/rooms/mod.rs index e7b53d30..406943ed 100644 --- a/src/database/key_value/rooms/mod.rs +++ b/src/database/key_value/rooms/mod.rs @@ -12,7 +12,6 @@ mod state; mod state_accessor; mod state_cache; mod state_compressor; -mod threads; mod timeline; mod user; diff --git a/src/database/key_value/rooms/pdu_metadata.rs b/src/database/key_value/rooms/pdu_metadata.rs index 0641f9d8..76ec7346 100644 --- a/src/database/key_value/rooms/pdu_metadata.rs +++ b/src/database/key_value/rooms/pdu_metadata.rs @@ -1,64 +1,10 @@ -use std::{mem, sync::Arc}; +use std::sync::Arc; -use ruma::{EventId, RoomId, UserId}; +use ruma::{EventId, RoomId}; -use crate::{ - database::KeyValueDatabase, - service::{self, rooms::timeline::PduCount}, - services, utils, Error, PduEvent, Result, -}; +use crate::{database::KeyValueDatabase, service, Result}; impl service::rooms::pdu_metadata::Data for KeyValueDatabase { - fn add_relation(&self, from: u64, to: u64) -> Result<()> { - let mut key = to.to_be_bytes().to_vec(); - key.extend_from_slice(&from.to_be_bytes()); - self.tofrom_relation.insert(&key, &[])?; - Ok(()) - } - - fn relations_until<'a>( - &'a self, - user_id: &'a UserId, - shortroomid: u64, - target: u64, - until: PduCount, - ) -> Result> + 'a>> { - let prefix = target.to_be_bytes().to_vec(); - let mut current = prefix.clone(); - - let count_raw = match until { - PduCount::Normal(x) => x - 1, - PduCount::Backfilled(x) => { - current.extend_from_slice(&0_u64.to_be_bytes()); - u64::MAX - x - 1 - } - }; - current.extend_from_slice(&count_raw.to_be_bytes()); - - Ok(Box::new( - self.tofrom_relation - .iter_from(¤t, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(tofrom, _data)| { - let from = utils::u64_from_bytes(&tofrom[(mem::size_of::())..]) - .map_err(|_| Error::bad_database("Invalid count in tofrom_relation."))?; - - let mut pduid = shortroomid.to_be_bytes().to_vec(); - pduid.extend_from_slice(&from.to_be_bytes()); - - let mut pdu = services() - .rooms - .timeline - .get_pdu_from_id(&pduid)? - .ok_or_else(|| Error::bad_database("Pdu in tofrom_relation is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((PduCount::Normal(from), pdu)) - }), - )) - } - fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { for prev in event_ids { let mut key = room_id.as_bytes().to_vec(); diff --git a/src/database/key_value/rooms/search.rs b/src/database/key_value/rooms/search.rs index ad573f06..19ae57b4 100644 --- a/src/database/key_value/rooms/search.rs +++ b/src/database/key_value/rooms/search.rs @@ -1,3 +1,5 @@ +use std::mem::size_of; + use ruma::RoomId; use crate::{database::KeyValueDatabase, service, services, utils, Result}; @@ -13,7 +15,7 @@ impl service::rooms::search::Data for KeyValueDatabase { let mut key = shortroomid.to_be_bytes().to_vec(); key.extend_from_slice(word.as_bytes()); key.push(0xff); - key.extend_from_slice(pdu_id); // TODO: currently we save the room id a second time here + key.extend_from_slice(pdu_id); (key, Vec::new()) }); @@ -32,6 +34,7 @@ impl service::rooms::search::Data for KeyValueDatabase { .expect("room exists") .to_be_bytes() .to_vec(); + let prefix_clone = prefix.clone(); let words: Vec<_> = search_string .split_terminator(|c: char| !c.is_alphanumeric()) @@ -43,7 +46,6 @@ impl service::rooms::search::Data for KeyValueDatabase { let mut prefix2 = prefix.clone(); prefix2.extend_from_slice(word.as_bytes()); prefix2.push(0xff); - let prefix3 = prefix2.clone(); let mut last_possible_id = prefix2.clone(); last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); @@ -51,7 +53,7 @@ impl service::rooms::search::Data for KeyValueDatabase { self.tokenids .iter_from(&last_possible_id, true) // Newest pdus first .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(move |(key, _)| key[prefix3.len()..].to_vec()) + .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) }); let common_elements = match utils::common_elements(iterators, |a, b| { @@ -62,6 +64,12 @@ impl service::rooms::search::Data for KeyValueDatabase { None => return Ok(None), }; - Ok(Some((Box::new(common_elements), words))) + let mapped = common_elements.map(move |id| { + let mut pduid = prefix_clone.clone(); + pduid.extend_from_slice(&id); + pduid + }); + + Ok(Some((Box::new(mapped), words))) } } diff --git a/src/database/key_value/rooms/state_accessor.rs b/src/database/key_value/rooms/state_accessor.rs index ad08f46e..70e59acb 100644 --- a/src/database/key_value/rooms/state_accessor.rs +++ b/src/database/key_value/rooms/state_accessor.rs @@ -1,4 +1,7 @@ -use std::{collections::HashMap, sync::Arc}; +use std::{ + collections::{BTreeMap, HashMap}, + sync::Arc, +}; use crate::{database::KeyValueDatabase, service, services, utils, Error, PduEvent, Result}; use async_trait::async_trait; @@ -6,7 +9,7 @@ use ruma::{events::StateEventType, EventId, RoomId}; #[async_trait] impl service::rooms::state_accessor::Data for KeyValueDatabase { - async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { + async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { let full_state = services() .rooms .state_compressor @@ -14,9 +17,9 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { .pop() .expect("there is always one layer") .1; - let mut result = HashMap::new(); + let mut result = BTreeMap::new(); let mut i = 0; - for compressed in full_state.iter() { + for compressed in full_state.into_iter() { let parsed = services() .rooms .state_compressor @@ -45,7 +48,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { let mut result = HashMap::new(); let mut i = 0; - for compressed in full_state.iter() { + for compressed in full_state { let (_, eventid) = services() .rooms .state_compressor @@ -95,7 +98,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { .expect("there is always one layer") .1; Ok(full_state - .iter() + .into_iter() .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) .and_then(|compressed| { services() diff --git a/src/database/key_value/rooms/state_compressor.rs b/src/database/key_value/rooms/state_compressor.rs index 65ea603e..d0a9be48 100644 --- a/src/database/key_value/rooms/state_compressor.rs +++ b/src/database/key_value/rooms/state_compressor.rs @@ -1,4 +1,4 @@ -use std::{collections::HashSet, mem::size_of, sync::Arc}; +use std::{collections::HashSet, mem::size_of}; use crate::{ database::KeyValueDatabase, @@ -37,20 +37,20 @@ impl service::rooms::state_compressor::Data for KeyValueDatabase { Ok(StateDiff { parent, - added: Arc::new(added), - removed: Arc::new(removed), + added, + removed, }) } fn save_statediff(&self, shortstatehash: u64, diff: StateDiff) -> Result<()> { let mut value = diff.parent.unwrap_or(0).to_be_bytes().to_vec(); - for new in diff.added.iter() { + for new in &diff.added { value.extend_from_slice(&new[..]); } if !diff.removed.is_empty() { value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in diff.removed.iter() { + for removed in &diff.removed { value.extend_from_slice(&removed[..]); } } diff --git a/src/database/key_value/rooms/threads.rs b/src/database/key_value/rooms/threads.rs deleted file mode 100644 index 4be289b0..00000000 --- a/src/database/key_value/rooms/threads.rs +++ /dev/null @@ -1,78 +0,0 @@ -use std::mem; - -use ruma::{api::client::threads::get_threads::v1::IncludeThreads, OwnedUserId, RoomId, UserId}; - -use crate::{database::KeyValueDatabase, service, services, utils, Error, PduEvent, Result}; - -impl service::rooms::threads::Data for KeyValueDatabase { - fn threads_until<'a>( - &'a self, - user_id: &'a UserId, - room_id: &'a RoomId, - until: u64, - include: &'a IncludeThreads, - ) -> Result> + 'a>> { - let prefix = services() - .rooms - .short - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until - 1).to_be_bytes()); - - Ok(Box::new( - self.threadid_userids - .iter_from(¤t, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pduid, users)| { - let count = utils::u64_from_bytes(&pduid[(mem::size_of::())..]) - .map_err(|_| Error::bad_database("Invalid pduid in threadid_userids."))?; - let mut pdu = services() - .rooms - .timeline - .get_pdu_from_id(&pduid)? - .ok_or_else(|| { - Error::bad_database("Invalid pduid reference in threadid_userids") - })?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((count, pdu)) - }), - )) - } - - fn update_participants(&self, root_id: &[u8], participants: &[OwnedUserId]) -> Result<()> { - let users = participants - .iter() - .map(|user| user.as_bytes()) - .collect::>() - .join(&[0xff][..]); - - self.threadid_userids.insert(&root_id, &users)?; - - Ok(()) - } - - fn get_participants(&self, root_id: &[u8]) -> Result>> { - if let Some(users) = self.threadid_userids.get(&root_id)? { - Ok(Some( - users - .split(|b| *b == 0xff) - .map(|bytes| { - UserId::parse(utils::string_from_bytes(bytes).map_err(|_| { - Error::bad_database("Invalid UserId bytes in threadid_userids.") - })?) - .map_err(|_| Error::bad_database("Invalid UserId in threadid_userids.")) - }) - .filter_map(|r| r.ok()) - .collect(), - )) - } else { - Ok(None) - } - } -} diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index ba1e85ef..336317da 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -7,10 +7,30 @@ use tracing::error; use crate::{database::KeyValueDatabase, service, services, utils, Error, PduEvent, Result}; -use service::rooms::timeline::PduCount; - impl service::rooms::timeline::Data for KeyValueDatabase { - fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { + fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { + let prefix = services() + .rooms + .short + .get_shortroomid(room_id)? + .expect("room exists") + .to_be_bytes() + .to_vec(); + + // Look for PDUs in that room. + self.pduid_pdu + .iter_from(&prefix, false) + .filter(|(k, _)| k.starts_with(&prefix)) + .map(|(_, pdu)| { + serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid first PDU in db.")) + .map(Arc::new) + }) + .next() + .transpose() + } + + fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { match self .lasttimelinecount_cache .lock() @@ -19,18 +39,20 @@ impl service::rooms::timeline::Data for KeyValueDatabase { { hash_map::Entry::Vacant(v) => { if let Some(last_count) = self - .pdus_until(sender_user, room_id, PduCount::max())? - .find_map(|r| { + .pdus_until(sender_user, room_id, u64::MAX)? + .filter_map(|r| { // Filter out buggy events if r.is_err() { error!("Bad pdu in pdus_since: {:?}", r); } r.ok() }) + .map(|(pduid, _)| self.pdu_count(&pduid)) + .next() { - Ok(*v.insert(last_count.0)) + Ok(*v.insert(last_count?)) } else { - Ok(PduCount::Normal(0)) + Ok(0) } } hash_map::Entry::Occupied(o) => Ok(*o.get()), @@ -38,28 +60,29 @@ impl service::rooms::timeline::Data for KeyValueDatabase { } /// Returns the `count` of this pdu's id. - fn get_pdu_count(&self, event_id: &EventId) -> Result> { - Ok(self - .eventid_pduid + fn get_pdu_count(&self, event_id: &EventId) -> Result> { + self.eventid_pduid .get(event_id.as_bytes())? - .map(|pdu_id| pdu_count(&pdu_id)) - .transpose()?) + .map(|pdu_id| self.pdu_count(&pdu_id)) + .transpose() } /// Returns the json of a pdu. fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.get_non_outlier_pdu_json(event_id)?.map_or_else( - || { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - }, - |x| Ok(Some(x)), - ) + self.eventid_pduid + .get(event_id.as_bytes())? + .map_or_else( + || self.eventid_outlierpdu.get(event_id.as_bytes()), + |pduid| { + Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { + Error::bad_database("Invalid pduid in eventid_pduid.") + })?)) + }, + )? + .map(|pdu| { + serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) + }) + .transpose() } /// Returns the json of a pdu. @@ -80,10 +103,12 @@ impl service::rooms::timeline::Data for KeyValueDatabase { /// Returns the pdu's id. fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - Ok(self.eventid_pduid.get(event_id.as_bytes())?) + self.eventid_pduid.get(event_id.as_bytes()) } /// Returns the pdu. + /// + /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? @@ -108,20 +133,22 @@ impl service::rooms::timeline::Data for KeyValueDatabase { } if let Some(pdu) = self - .get_non_outlier_pdu(event_id)? + .eventid_pduid + .get(event_id.as_bytes())? .map_or_else( - || { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() + || self.eventid_outlierpdu.get(event_id.as_bytes()), + |pduid| { + Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { + Error::bad_database("Invalid pduid in eventid_pduid.") + })?)) }, - |x| Ok(Some(x)), )? - .map(Arc::new) + .map(|pdu| { + serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid PDU in db.")) + .map(Arc::new) + }) + .transpose()? { self.pdu_cache .lock() @@ -155,6 +182,12 @@ impl service::rooms::timeline::Data for KeyValueDatabase { }) } + /// Returns the `count` of this pdu's id. + fn pdu_count(&self, pdu_id: &[u8]) -> Result { + utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) + .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) + } + fn append_pdu( &self, pdu_id: &[u8], @@ -170,7 +203,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { self.lasttimelinecount_cache .lock() .unwrap() - .insert(pdu.room_id.clone(), PduCount::Normal(count)); + .insert(pdu.room_id.clone(), count); self.eventid_pduid.insert(pdu.event_id.as_bytes(), pdu_id)?; self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; @@ -178,48 +211,57 @@ impl service::rooms::timeline::Data for KeyValueDatabase { Ok(()) } - fn prepend_backfill_pdu( - &self, - pdu_id: &[u8], - event_id: &EventId, - json: &CanonicalJsonObject, - ) -> Result<()> { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(json).expect("CanonicalJsonObject is always a valid"), - )?; - - self.eventid_pduid.insert(event_id.as_bytes(), pdu_id)?; - self.eventid_outlierpdu.remove(event_id.as_bytes())?; - - Ok(()) - } - /// Removes a pdu and creates a new one with the same id. - fn replace_pdu( - &self, - pdu_id: &[u8], - pdu_json: &CanonicalJsonObject, - pdu: &PduEvent, - ) -> Result<()> { + fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { if self.pduid_pdu.get(pdu_id)?.is_some() { self.pduid_pdu.insert( pdu_id, - &serde_json::to_vec(pdu_json).expect("CanonicalJsonObject is always a valid"), + &serde_json::to_vec(pdu).expect("CanonicalJsonObject is always a valid"), )?; + Ok(()) } else { - return Err(Error::BadRequest( + Err(Error::BadRequest( ErrorKind::NotFound, "PDU does not exist.", - )); + )) } + } - self.pdu_cache - .lock() - .unwrap() - .remove(&(*pdu.event_id).to_owned()); + /// Returns an iterator over all events in a room that happened after the event with id `since` + /// in chronological order. + fn pdus_since<'a>( + &'a self, + user_id: &UserId, + room_id: &RoomId, + since: u64, + ) -> Result, PduEvent)>> + 'a>> { + let prefix = services() + .rooms + .short + .get_shortroomid(room_id)? + .expect("room exists") + .to_be_bytes() + .to_vec(); - Ok(()) + // Skip the first pdu if it's exactly at since, because we sent that last time + let mut first_pdu_id = prefix.clone(); + first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); + + let user_id = user_id.to_owned(); + + Ok(Box::new( + self.pduid_pdu + .iter_from(&first_pdu_id, false) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .map(move |(pdu_id, v)| { + let mut pdu = serde_json::from_slice::(&v) + .map_err(|_| Error::bad_database("PDU in db is invalid."))?; + if pdu.sender != user_id { + pdu.remove_transaction_id()?; + } + Ok((pdu_id, pdu)) + }), + )) } /// Returns an iterator over all events and their tokens in a room that happened before the @@ -228,15 +270,27 @@ impl service::rooms::timeline::Data for KeyValueDatabase { &'a self, user_id: &UserId, room_id: &RoomId, - until: PduCount, - ) -> Result> + 'a>> { - let (prefix, current) = count_to_id(&room_id, until, 1, true)?; + until: u64, + ) -> Result, PduEvent)>> + 'a>> { + // Create the first part of the full pdu id + let prefix = services() + .rooms + .short + .get_shortroomid(room_id)? + .expect("room exists") + .to_be_bytes() + .to_vec(); + + let mut current = prefix.clone(); + current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` + + let current: &[u8] = ¤t; let user_id = user_id.to_owned(); Ok(Box::new( self.pduid_pdu - .iter_from(¤t, true) + .iter_from(current, true) .take_while(move |(k, _)| k.starts_with(&prefix)) .map(move |(pdu_id, v)| { let mut pdu = serde_json::from_slice::(&v) @@ -244,9 +298,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { if pdu.sender != user_id { pdu.remove_transaction_id()?; } - pdu.add_age()?; - let count = pdu_count(&pdu_id)?; - Ok((count, pdu)) + Ok((pdu_id, pdu)) }), )) } @@ -255,15 +307,27 @@ impl service::rooms::timeline::Data for KeyValueDatabase { &'a self, user_id: &UserId, room_id: &RoomId, - from: PduCount, - ) -> Result> + 'a>> { - let (prefix, current) = count_to_id(&room_id, from, 1, false)?; + from: u64, + ) -> Result, PduEvent)>> + 'a>> { + // Create the first part of the full pdu id + let prefix = services() + .rooms + .short + .get_shortroomid(room_id)? + .expect("room exists") + .to_be_bytes() + .to_vec(); + + let mut current = prefix.clone(); + current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event + + let current: &[u8] = ¤t; let user_id = user_id.to_owned(); Ok(Box::new( self.pduid_pdu - .iter_from(¤t, false) + .iter_from(current, false) .take_while(move |(k, _)| k.starts_with(&prefix)) .map(move |(pdu_id, v)| { let mut pdu = serde_json::from_slice::(&v) @@ -271,9 +335,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { if pdu.sender != user_id { pdu.remove_transaction_id()?; } - pdu.add_age()?; - let count = pdu_count(&pdu_id)?; - Ok((count, pdu)) + Ok((pdu_id, pdu)) }), )) } @@ -306,60 +368,3 @@ impl service::rooms::timeline::Data for KeyValueDatabase { Ok(()) } } - -/// Returns the `count` of this pdu's id. -fn pdu_count(pdu_id: &[u8]) -> Result { - let last_u64 = utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes."))?; - let second_last_u64 = utils::u64_from_bytes( - &pdu_id[pdu_id.len() - 2 * size_of::()..pdu_id.len() - size_of::()], - ); - - if matches!(second_last_u64, Ok(0)) { - Ok(PduCount::Backfilled(u64::MAX - last_u64)) - } else { - Ok(PduCount::Normal(last_u64)) - } -} - -fn count_to_id( - room_id: &RoomId, - count: PduCount, - offset: u64, - subtract: bool, -) -> Result<(Vec, Vec)> { - let prefix = services() - .rooms - .short - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - let mut pdu_id = prefix.clone(); - // +1 so we don't send the base event - let count_raw = match count { - PduCount::Normal(x) => { - if subtract { - x - offset - } else { - x + offset - } - } - PduCount::Backfilled(x) => { - pdu_id.extend_from_slice(&0_u64.to_be_bytes()); - let num = u64::MAX - x; - if subtract { - if num > 0 { - num - offset - } else { - num - } - } else { - num + offset - } - } - }; - pdu_id.extend_from_slice(&count_raw.to_be_bytes()); - - Ok((prefix, pdu_id)) -} diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index 2b09d684..cd5a5352 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -1,7 +1,7 @@ use std::{collections::BTreeMap, mem::size_of}; use ruma::{ - api::client::{device::Device, error::ErrorKind, filter::FilterDefinition}, + api::client::{device::Device, error::ErrorKind, filter::IncomingFilterDefinition}, encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::{AnyToDeviceEvent, StateEventType}, serde::Raw, @@ -449,13 +449,33 @@ impl service::users::Data for KeyValueDatabase { master_key: &Raw, self_signing_key: &Option>, user_signing_key: &Option>, - notify: bool, ) -> Result<()> { // TODO: Check signatures + let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); - let (master_key_key, _) = self.parse_master_key(user_id, master_key)?; + // Master key + let mut master_key_ids = master_key + .deserialize() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid master key"))? + .keys + .into_values(); + + let master_key_id = master_key_ids.next().ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Master key contained no key.", + ))?; + + if master_key_ids.next().is_some() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Master key contained more than one key.", + )); + } + + let mut master_key_key = prefix.clone(); + master_key_key.extend_from_slice(master_key_id.as_bytes()); self.keyid_key .insert(&master_key_key, master_key.json().get().as_bytes())?; @@ -531,9 +551,7 @@ impl service::users::Data for KeyValueDatabase { .insert(user_id.as_bytes(), &user_signing_key_key)?; } - if notify { - self.mark_device_key_update(user_id)?; - } + self.mark_device_key_update(user_id)?; Ok(()) } @@ -574,6 +592,7 @@ impl service::users::Data for KeyValueDatabase { &serde_json::to_vec(&cross_signing_key).expect("CrossSigningKey::to_vec always works"), )?; + // TODO: Should we notify about this change? self.mark_device_key_update(target_id)?; Ok(()) @@ -672,80 +691,45 @@ impl service::users::Data for KeyValueDatabase { }) } - fn parse_master_key( - &self, - user_id: &UserId, - master_key: &Raw, - ) -> Result<(Vec, CrossSigningKey)> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - let master_key = master_key - .deserialize() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid master key"))?; - let mut master_key_ids = master_key.keys.values(); - let master_key_id = master_key_ids.next().ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Master key contained no key.", - ))?; - if master_key_ids.next().is_some() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Master key contained more than one key.", - )); - } - let mut master_key_key = prefix.clone(); - master_key_key.extend_from_slice(master_key_id.as_bytes()); - Ok((master_key_key, master_key)) - } - - fn get_key( - &self, - key: &[u8], - sender_user: Option<&UserId>, - user_id: &UserId, - allowed_signatures: &dyn Fn(&UserId) -> bool, - ) -> Result>> { - self.keyid_key.get(key)?.map_or(Ok(None), |bytes| { - let mut cross_signing_key = serde_json::from_slice::(&bytes) - .map_err(|_| Error::bad_database("CrossSigningKey in db is invalid."))?; - clean_signatures( - &mut cross_signing_key, - sender_user, - user_id, - allowed_signatures, - )?; - - Ok(Some(Raw::from_json( - serde_json::value::to_raw_value(&cross_signing_key) - .expect("Value to RawValue serialization"), - ))) - }) - } - fn get_master_key( &self, - sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &dyn Fn(&UserId) -> bool, ) -> Result>> { self.userid_masterkeyid .get(user_id.as_bytes())? .map_or(Ok(None), |key| { - self.get_key(&key, sender_user, user_id, allowed_signatures) + self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { + let mut cross_signing_key = serde_json::from_slice::(&bytes) + .map_err(|_| Error::bad_database("CrossSigningKey in db is invalid."))?; + clean_signatures(&mut cross_signing_key, user_id, allowed_signatures)?; + + Ok(Some(Raw::from_json( + serde_json::value::to_raw_value(&cross_signing_key) + .expect("Value to RawValue serialization"), + ))) + }) }) } fn get_self_signing_key( &self, - sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &dyn Fn(&UserId) -> bool, ) -> Result>> { self.userid_selfsigningkeyid .get(user_id.as_bytes())? .map_or(Ok(None), |key| { - self.get_key(&key, sender_user, user_id, allowed_signatures) + self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { + let mut cross_signing_key = serde_json::from_slice::(&bytes) + .map_err(|_| Error::bad_database("CrossSigningKey in db is invalid."))?; + clean_signatures(&mut cross_signing_key, user_id, allowed_signatures)?; + + Ok(Some(Raw::from_json( + serde_json::value::to_raw_value(&cross_signing_key) + .expect("Value to RawValue serialization"), + ))) + }) }) } @@ -915,7 +899,7 @@ impl service::users::Data for KeyValueDatabase { } /// Creates a new sync filter. Returns the filter id. - fn create_filter(&self, user_id: &UserId, filter: &FilterDefinition) -> Result { + fn create_filter(&self, user_id: &UserId, filter: &IncomingFilterDefinition) -> Result { let filter_id = utils::random_string(4); let mut key = user_id.as_bytes().to_vec(); @@ -930,7 +914,11 @@ impl service::users::Data for KeyValueDatabase { Ok(filter_id) } - fn get_filter(&self, user_id: &UserId, filter_id: &str) -> Result> { + fn get_filter( + &self, + user_id: &UserId, + filter_id: &str, + ) -> Result> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(filter_id.as_bytes()); @@ -946,8 +934,6 @@ impl service::users::Data for KeyValueDatabase { } } -impl KeyValueDatabase {} - /// Will only return with Some(username) if the password was not empty and the /// username could be successfully parsed. /// If utils::string_from_bytes(...) returns an error that username will be skipped diff --git a/src/database/mod.rs b/src/database/mod.rs index e247d9f0..7baa512a 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -1,24 +1,19 @@ pub mod abstraction; pub mod key_value; -use crate::{ - service::rooms::timeline::PduCount, services, utils, Config, Error, PduEvent, Result, Services, - SERVICES, -}; +use crate::{services, utils, Config, Error, PduEvent, Result, Services, SERVICES}; use abstraction::{KeyValueDatabaseEngine, KvTree}; use directories::ProjectDirs; use lru_cache::LruCache; use ruma::{ events::{ - push_rules::{PushRulesEvent, PushRulesEventContent}, - room::message::RoomMessageEventContent, + push_rules::PushRulesEventContent, room::message::RoomMessageEventContent, GlobalAccountDataEvent, GlobalAccountDataEventType, StateEventType, }, push::Ruleset, CanonicalJsonValue, EventId, OwnedDeviceId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId, }; -use serde::Deserialize; use std::{ collections::{BTreeMap, HashMap, HashSet}, fs::{self, remove_dir_all}, @@ -26,9 +21,7 @@ use std::{ mem::size_of, path::Path, sync::{Arc, Mutex, RwLock}, - time::Duration, }; -use tokio::time::interval; use tracing::{debug, error, info, warn}; @@ -72,8 +65,8 @@ pub struct KeyValueDatabase { pub(super) roomuserid_lastprivatereadupdate: Arc, // LastPrivateReadUpdate = Count pub(super) typingid_userid: Arc, // TypingId = RoomId + TimeoutTime + Count pub(super) roomid_lasttypingupdate: Arc, // LastRoomTypingUpdate = Count - pub(super) presenceid_presence: Arc, // PresenceId = RoomId + Count + UserId - pub(super) userid_lastpresenceupdate: Arc, // LastPresenceUpdate = Count + pub(super) userid_presenceupdate: Arc, // PresenceUpdate = Count + Timestamp + pub(super) roomuserid_presenceevent: Arc, // PresenceEvent //pub rooms: rooms::Rooms, pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count @@ -83,8 +76,6 @@ pub struct KeyValueDatabase { pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count pub(super) publicroomids: Arc, - pub(super) threadid_userids: Arc, // ThreadId = RoomId + Count - pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount /// Participating servers in a room. @@ -133,8 +124,6 @@ pub struct KeyValueDatabase { pub(super) eventid_outlierpdu: Arc, pub(super) softfailedeventids: Arc, - /// ShortEventId + ShortEventId -> (). - pub(super) tofrom_relation: Arc, /// RoomId + EventId -> Parent PDU EventId. pub(super) referencedevents: Arc, @@ -171,7 +160,7 @@ pub struct KeyValueDatabase { pub(super) shortstatekey_cache: Mutex>, pub(super) our_real_users_cache: RwLock>>>, pub(super) appservice_in_room_cache: RwLock>>, - pub(super) lasttimelinecount_cache: Mutex>, + pub(super) lasttimelinecount_cache: Mutex>, } impl KeyValueDatabase { @@ -267,12 +256,8 @@ impl KeyValueDatabase { } }; - if config.registration_token == Some(String::new()) { - return Err(Error::bad_config("Registration token is empty")); - } - if config.max_request_size < 1024 { - error!(?config.max_request_size, "Max request size is less than 1KB. Please increase it."); + eprintln!("ERROR: Max request size is less than 1KB. Please increase it."); } let db_raw = Box::new(Self { @@ -303,8 +288,8 @@ impl KeyValueDatabase { .open_tree("roomuserid_lastprivatereadupdate")?, typingid_userid: builder.open_tree("typingid_userid")?, roomid_lasttypingupdate: builder.open_tree("roomid_lasttypingupdate")?, - presenceid_presence: builder.open_tree("presenceid_presence")?, - userid_lastpresenceupdate: builder.open_tree("userid_lastpresenceupdate")?, + userid_presenceupdate: builder.open_tree("userid_presenceupdate")?, + roomuserid_presenceevent: builder.open_tree("roomuserid_presenceevent")?, pduid_pdu: builder.open_tree("pduid_pdu")?, eventid_pduid: builder.open_tree("eventid_pduid")?, roomid_pduleaves: builder.open_tree("roomid_pduleaves")?, @@ -313,8 +298,6 @@ impl KeyValueDatabase { aliasid_alias: builder.open_tree("aliasid_alias")?, publicroomids: builder.open_tree("publicroomids")?, - threadid_userids: builder.open_tree("threadid_userids")?, - tokenids: builder.open_tree("tokenids")?, roomserverids: builder.open_tree("roomserverids")?, @@ -355,7 +338,6 @@ impl KeyValueDatabase { eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?, softfailedeventids: builder.open_tree("softfailedeventids")?, - tofrom_relation: builder.open_tree("tofrom_relation")?, referencedevents: builder.open_tree("referencedevents")?, roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, roomusertype_roomuserdataid: builder.open_tree("roomusertype_roomuserdataid")?, @@ -425,7 +407,7 @@ impl KeyValueDatabase { } // If the database has any data, perform data migrations before starting - let latest_database_version = 13; + let latest_database_version = 11; if services().users.count()? > 0 { // MIGRATIONS @@ -500,7 +482,7 @@ impl KeyValueDatabase { for user in services().rooms.state_cache.room_members(&room?) { let user = user?; if user.server_name() != services().globals.server_name() { - info!(?user, "Migration: creating user"); + println!("Migration: Creating user {}", user); services().users.create(&user, None)?; } } @@ -562,6 +544,7 @@ impl KeyValueDatabase { current_state: HashSet<_>, last_roomstates: &mut HashMap<_, _>| { counter += 1; + println!("counter: {}", counter); let last_roomsstatehash = last_roomstates.get(current_room); let states_parents = last_roomsstatehash.map_or_else( @@ -594,8 +577,8 @@ impl KeyValueDatabase { services().rooms.state_compressor.save_state_from_diff( current_sstatehash, - Arc::new(statediffnew), - Arc::new(statediffremoved), + statediffnew, + statediffremoved, 2, // every state change is 2 event changes on average states_parents, )?; @@ -758,13 +741,15 @@ impl KeyValueDatabase { new_key.extend_from_slice(word); new_key.push(0xff); new_key.extend_from_slice(pdu_id_count); + println!("old {:?}", key); + println!("new {:?}", new_key); Some((new_key, Vec::new())) }) .peekable(); while iter.peek().is_some() { db.tokenids.insert_batch(&mut iter.by_ref().take(1000))?; - debug!("Inserted smaller batch"); + println!("smaller batch done"); } info!("Deleting starts"); @@ -774,6 +759,7 @@ impl KeyValueDatabase { .iter() .filter_map(|(key, _)| { if key.starts_with(b"!") { + println!("del {:?}", key); Some(key) } else { None @@ -782,6 +768,7 @@ impl KeyValueDatabase { .collect(); for key in batch2 { + println!("del"); db.tokenids.remove(&key)?; } @@ -816,134 +803,7 @@ impl KeyValueDatabase { warn!("Migration: 10 -> 11 finished"); } - if services().globals.database_version()? < 12 { - for username in services().users.list_local_users()? { - let user = match UserId::parse_with_server_name( - username.clone(), - services().globals.server_name(), - ) { - Ok(u) => u, - Err(e) => { - warn!("Invalid username {username}: {e}"); - continue; - } - }; - - let raw_rules_list = services() - .account_data - .get( - None, - &user, - GlobalAccountDataEventType::PushRules.to_string().into(), - ) - .unwrap() - .expect("Username is invalid"); - - let mut account_data = - serde_json::from_str::(raw_rules_list.get()).unwrap(); - let rules_list = &mut account_data.content.global; - - //content rule - { - let content_rule_transformation = - [".m.rules.contains_user_name", ".m.rule.contains_user_name"]; - - let rule = rules_list.content.get(content_rule_transformation[0]); - if rule.is_some() { - let mut rule = rule.unwrap().clone(); - rule.rule_id = content_rule_transformation[1].to_owned(); - rules_list.content.remove(content_rule_transformation[0]); - rules_list.content.insert(rule); - } - } - - //underride rules - { - let underride_rule_transformation = [ - [".m.rules.call", ".m.rule.call"], - [".m.rules.room_one_to_one", ".m.rule.room_one_to_one"], - [ - ".m.rules.encrypted_room_one_to_one", - ".m.rule.encrypted_room_one_to_one", - ], - [".m.rules.message", ".m.rule.message"], - [".m.rules.encrypted", ".m.rule.encrypted"], - ]; - - for transformation in underride_rule_transformation { - let rule = rules_list.underride.get(transformation[0]); - if let Some(rule) = rule { - let mut rule = rule.clone(); - rule.rule_id = transformation[1].to_owned(); - rules_list.underride.remove(transformation[0]); - rules_list.underride.insert(rule); - } - } - } - - services().account_data.update( - None, - &user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(account_data).expect("to json value always works"), - )?; - } - - services().globals.bump_database_version(12)?; - - warn!("Migration: 11 -> 12 finished"); - } - - // This migration can be reused as-is anytime the server-default rules are updated. - if services().globals.database_version()? < 13 { - for username in services().users.list_local_users()? { - let user = match UserId::parse_with_server_name( - username.clone(), - services().globals.server_name(), - ) { - Ok(u) => u, - Err(e) => { - warn!("Invalid username {username}: {e}"); - continue; - } - }; - - let raw_rules_list = services() - .account_data - .get( - None, - &user, - GlobalAccountDataEventType::PushRules.to_string().into(), - ) - .unwrap() - .expect("Username is invalid"); - - let mut account_data = - serde_json::from_str::(raw_rules_list.get()).unwrap(); - - let user_default_rules = ruma::push::Ruleset::server_default(&user); - account_data - .content - .global - .update_with_server_default(user_default_rules); - - services().account_data.update( - None, - &user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(account_data).expect("to json value always works"), - )?; - } - - services().globals.bump_database_version(13)?; - - warn!("Migration: 12 -> 13 finished"); - } - - assert_eq!( - services().globals.database_version().unwrap(), - latest_database_version - ); + assert_eq!(11, latest_database_version); info!( "Loaded {} database with version {}", @@ -965,9 +825,6 @@ impl KeyValueDatabase { ); } - // This data is probably outdated - db.presenceid_presence.clear()?; - services().admin.start_handler(); // Set emergency access for the conduit user @@ -989,9 +846,6 @@ impl KeyValueDatabase { services().sending.start_handler(); Self::start_cleanup_task().await; - if services().globals.allow_check_for_updates() { - Self::start_check_for_updates_task(); - } Ok(()) } @@ -1007,64 +861,13 @@ impl KeyValueDatabase { res } - #[tracing::instrument] - pub fn start_check_for_updates_task() { - tokio::spawn(async move { - let timer_interval = Duration::from_secs(60 * 60); - let mut i = interval(timer_interval); - loop { - i.tick().await; - let _ = Self::try_handle_updates().await; - } - }); - } - - async fn try_handle_updates() -> Result<()> { - let response = services() - .globals - .default_client() - .get("https://conduit.rs/check-for-updates/stable") - .send() - .await?; - - #[derive(Deserialize)] - struct CheckForUpdatesResponseEntry { - id: u64, - date: String, - message: String, - } - #[derive(Deserialize)] - struct CheckForUpdatesResponse { - updates: Vec, - } - - let response = serde_json::from_str::(&response.text().await?) - .map_err(|_| Error::BadServerResponse("Bad version check response"))?; - - let mut last_update_id = services().globals.last_check_for_updates_id()?; - for update in response.updates { - last_update_id = last_update_id.max(update.id); - if update.id > services().globals.last_check_for_updates_id()? { - println!("{}", update.message); - services() - .admin - .send_message(RoomMessageEventContent::text_plain(format!( - "@room: The following is a message from the Conduit developers. It was sent on '{}':\n\n{}", - update.date, update.message - ))) - } - } - services() - .globals - .update_check_for_updates_id(last_update_id)?; - - Ok(()) - } - #[tracing::instrument] pub async fn start_cleanup_task() { + use tokio::time::interval; + #[cfg(unix)] use tokio::signal::unix::{signal, SignalKind}; + use tracing::info; use std::time::{Duration, Instant}; @@ -1080,23 +883,23 @@ impl KeyValueDatabase { #[cfg(unix)] tokio::select! { _ = i.tick() => { - debug!("cleanup: Timer ticked"); + info!("cleanup: Timer ticked"); } _ = s.recv() => { - debug!("cleanup: Received SIGHUP"); + info!("cleanup: Received SIGHUP"); } }; #[cfg(not(unix))] { i.tick().await; - debug!("cleanup: Timer ticked") + info!("cleanup: Timer ticked") } let start = Instant::now(); if let Err(e) = services().globals.cleanup() { error!("cleanup: Errored: {}", e); } else { - debug!("cleanup: Finished in {:?}", start.elapsed()); + info!("cleanup: Finished in {:?}", start.elapsed()); } } }); diff --git a/src/main.rs b/src/main.rs index c74d6ddb..d2183a39 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2,16 +2,16 @@ rust_2018_idioms, unused_qualifications, clippy::cloned_instead_of_copied, - clippy::str_to_string, - clippy::future_not_send + clippy::str_to_string )] #![allow(clippy::suspicious_else_formatting)] #![deny(clippy::dbg_macro)] -use std::{future::Future, io, net::SocketAddr, sync::atomic, time::Duration}; +use std::{future::Future, io, net::SocketAddr, time::Duration}; use axum::{ - extract::{DefaultBodyLimit, FromRequestParts, MatchedPath}, + extract::{DefaultBodyLimit, FromRequest, MatchedPath}, + handler::Handler, response::IntoResponse, routing::{get, on, MethodFilter}, Router, @@ -26,9 +26,10 @@ use http::{ header::{self, HeaderName}, Method, StatusCode, Uri, }; +use opentelemetry::trace::{FutureExt, Tracer}; use ruma::api::{ client::{ - error::{Error as RumaError, ErrorBody, ErrorKind}, + error::{Error as RumaError, ErrorKind}, uiaa::UiaaResponse, }, IncomingRequest, @@ -40,7 +41,7 @@ use tower_http::{ trace::TraceLayer, ServiceBuilderExt as _, }; -use tracing::{debug, error, info, warn}; +use tracing::{info, warn}; use tracing_subscriber::{prelude::*, EnvFilter}; pub use conduit::*; // Re-export everything from the library crate @@ -54,7 +55,7 @@ static GLOBAL: Jemalloc = Jemalloc; #[tokio::main] async fn main() { - // Initialize config + // Initialize DB let raw_config = Figment::new() .merge( @@ -68,86 +69,65 @@ async fn main() { let config = match raw_config.extract::() { Ok(s) => s, Err(e) => { - eprintln!("It looks like your config is invalid. The following error occurred: {e}"); + eprintln!("It looks like your config is invalid. The following error occured while parsing it: {}", e); std::process::exit(1); } }; config.warn_deprecated(); - let log = format!("{},ruma_state_res=error,_=off,sled=off", config.log); + if let Err(e) = KeyValueDatabase::load_or_create(config).await { + eprintln!( + "The database couldn't be loaded or created. The following error occured: {}", + e + ); + std::process::exit(1); + }; + + let config = &services().globals.config; + + let start = async { + run_server().await.unwrap(); + }; if config.allow_jaeger { opentelemetry::global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new()); let tracer = opentelemetry_jaeger::new_agent_pipeline() - .with_auto_split_batch(true) - .with_service_name("conduit") .install_batch(opentelemetry::runtime::Tokio) .unwrap(); - let telemetry = tracing_opentelemetry::layer().with_tracer(tracer); - let filter_layer = match EnvFilter::try_new(&log) { - Ok(s) => s, - Err(e) => { - eprintln!( - "It looks like your log config is invalid. The following error occurred: {e}" - ); - EnvFilter::try_new("warn").unwrap() - } - }; + let span = tracer.start("conduit"); + start.with_current_context().await; + drop(span); - let subscriber = tracing_subscriber::Registry::default() - .with(filter_layer) - .with(telemetry); - tracing::subscriber::set_global_default(subscriber).unwrap(); - } else if config.tracing_flame { - let registry = tracing_subscriber::Registry::default(); - let (flame_layer, _guard) = - tracing_flame::FlameLayer::with_file("./tracing.folded").unwrap(); - let flame_layer = flame_layer.with_empty_samples(false); - - let filter_layer = EnvFilter::new("trace,h2=off"); - - let subscriber = registry.with(filter_layer).with(flame_layer); - tracing::subscriber::set_global_default(subscriber).unwrap(); + println!("exporting"); + opentelemetry::global::shutdown_tracer_provider(); } else { let registry = tracing_subscriber::Registry::default(); - let fmt_layer = tracing_subscriber::fmt::Layer::new(); - let filter_layer = match EnvFilter::try_new(&log) { - Ok(s) => s, - Err(e) => { - eprintln!("It looks like your config is invalid. The following error occured while parsing it: {e}"); - EnvFilter::try_new("warn").unwrap() - } - }; + if config.tracing_flame { + let (flame_layer, _guard) = + tracing_flame::FlameLayer::with_file("./tracing.folded").unwrap(); + let flame_layer = flame_layer.with_empty_samples(false); - let subscriber = registry.with(filter_layer).with(fmt_layer); - tracing::subscriber::set_global_default(subscriber).unwrap(); - } + let filter_layer = EnvFilter::new("trace,h2=off"); - // This is needed for opening lots of file descriptors, which tends to - // happen more often when using RocksDB and making lots of federation - // connections at startup. The soft limit is usually 1024, and the hard - // limit is usually 512000; I've personally seen it hit >2000. - // - // * https://www.freedesktop.org/software/systemd/man/systemd.exec.html#id-1.12.2.1.17.6 - // * https://github.com/systemd/systemd/commit/0abf94923b4a95a7d89bc526efc84e7ca2b71741 - #[cfg(unix)] - maximize_fd_limit().expect("should be able to increase the soft limit to the hard limit"); + let subscriber = registry.with(filter_layer).with(flame_layer); + tracing::subscriber::set_global_default(subscriber).unwrap(); + start.await; + } else { + let fmt_layer = tracing_subscriber::fmt::Layer::new(); + let filter_layer = match EnvFilter::try_new(&config.log) { + Ok(s) => s, + Err(e) => { + eprintln!("It looks like your log config is invalid. The following error occurred: {}", e); + EnvFilter::try_new("warn").unwrap() + } + }; - info!("Loading database"); - if let Err(error) = KeyValueDatabase::load_or_create(config).await { - error!(?error, "The database couldn't be loaded or created"); - - std::process::exit(1); - }; - let config = &services().globals.config; - - info!("Starting server"); - run_server().await.unwrap(); - - if config.allow_jaeger { - opentelemetry::global::shutdown_tracer_provider(); + let subscriber = registry.with(filter_layer).with(fmt_layer); + tracing::subscriber::set_global_default(subscriber).unwrap(); + start.await; + } } } @@ -159,7 +139,6 @@ async fn run_server() -> io::Result<()> { let middlewares = ServiceBuilder::new() .sensitive_headers([header::AUTHORIZATION]) - .layer(axum::middleware::from_fn(spawn_task)) .layer( TraceLayer::new_for_http().make_span_with(|request: &http::Request<_>| { let path = if let Some(path) = request.extensions().get::() { @@ -171,6 +150,7 @@ async fn run_server() -> io::Result<()> { tracing::info_span!("http_request", %path) }), ) + .compression() .layer(axum::middleware::from_fn(unrecognized_method)) .layer( CorsLayer::new() @@ -206,36 +186,18 @@ async fn run_server() -> io::Result<()> { match &config.tls { Some(tls) => { let conf = RustlsConfig::from_pem_file(&tls.certs, &tls.key).await?; - let server = bind_rustls(addr, conf).handle(handle).serve(app); - - #[cfg(feature = "systemd")] - let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Ready]); - - server.await? + bind_rustls(addr, conf).handle(handle).serve(app).await?; } None => { - let server = bind(addr).handle(handle).serve(app); - - #[cfg(feature = "systemd")] - let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Ready]); - - server.await? + bind(addr).handle(handle).serve(app).await?; } } - Ok(()) -} + // On shutdown + info!(target: "shutdown-sync", "Received shutdown notification, notifying sync helpers..."); + services().globals.rotate.fire(); -async fn spawn_task( - req: axum::http::Request, - next: axum::middleware::Next, -) -> std::result::Result { - if services().globals.shutdown.load(atomic::Ordering::Relaxed) { - return Err(StatusCode::SERVICE_UNAVAILABLE); - } - tokio::spawn(next.run(req)) - .await - .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR) + Ok(()) } async fn unrecognized_method( @@ -248,10 +210,8 @@ async fn unrecognized_method( if inner.status() == axum::http::StatusCode::METHOD_NOT_ALLOWED { warn!("Method not allowed: {method} {uri}"); return Ok(RumaResponse(UiaaResponse::MatrixError(RumaError { - body: ErrorBody::Standard { - kind: ErrorKind::Unrecognized, - message: "M_UNRECOGNIZED: Unrecognized request".to_owned(), - }, + kind: ErrorKind::Unrecognized, + message: "M_UNRECOGNIZED: Unrecognized request".to_owned(), status_code: StatusCode::METHOD_NOT_ALLOWED, })) .into_response()); @@ -369,7 +329,6 @@ fn routes() -> Router { .put(client_server::send_state_event_for_empty_key_route), ) .ruma_route(client_server::sync_events_route) - .ruma_route(client_server::sync_events_v4_route) .ruma_route(client_server::get_context_route) .ruma_route(client_server::get_message_events_route) .ruma_route(client_server::search_events_route) @@ -395,11 +354,6 @@ fn routes() -> Router { .ruma_route(client_server::set_pushers_route) // .ruma_route(client_server::third_party_route) .ruma_route(client_server::upgrade_room_route) - .ruma_route(client_server::get_threads_route) - .ruma_route(client_server::get_relating_events_with_rel_type_and_event_type_route) - .ruma_route(client_server::get_relating_events_with_rel_type_route) - .ruma_route(client_server::get_relating_events_route) - .ruma_route(client_server::get_hierarchy_route) .ruma_route(server_server::get_server_version_route) .route( "/_matrix/key/v2/server", @@ -413,7 +367,6 @@ fn routes() -> Router { .ruma_route(server_server::get_public_rooms_filtered_route) .ruma_route(server_server::send_transaction_message_route) .ruma_route(server_server::get_event_route) - .ruma_route(server_server::get_backfill_route) .ruma_route(server_server::get_missing_events_route) .ruma_route(server_server::get_event_authorization_route) .ruma_route(server_server::get_room_state_route) @@ -435,8 +388,7 @@ fn routes() -> Router { "/_matrix/client/v3/rooms/:room_id/initialSync", get(initial_sync), ) - .route("/", get(it_works)) - .fallback(not_found) + .fallback(not_found.into_service()) } async fn shutdown_signal(handle: ServerHandle) { @@ -466,11 +418,6 @@ async fn shutdown_signal(handle: ServerHandle) { warn!("Received {}, shutting down...", sig); handle.graceful_shutdown(Some(Duration::from_secs(30))); - - services().globals.shutdown(); - - #[cfg(feature = "systemd")] - let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Stopping]); } async fn not_found(uri: Uri) -> impl IntoResponse { @@ -485,10 +432,6 @@ async fn initial_sync(_uri: Uri) -> impl IntoResponse { ) } -async fn it_works() -> &'static str { - "Hello from Conduit!" -} - trait RouterExt { fn ruma_route(self, handler: H) -> Self where @@ -524,7 +467,7 @@ macro_rules! impl_ruma_handler { Fut: Future> + Send, E: IntoResponse, - $( $ty: FromRequestParts<()> + Send + 'static, )* + $( $ty: FromRequest + Send + 'static, )* { fn add_to_router(self, mut router: Router) -> Router { let meta = Req::METADATA; @@ -564,24 +507,6 @@ fn method_to_filter(method: Method) -> MethodFilter { Method::POST => MethodFilter::POST, Method::PUT => MethodFilter::PUT, Method::TRACE => MethodFilter::TRACE, - m => panic!("Unsupported HTTP method: {m:?}"), + m => panic!("Unsupported HTTP method: {:?}", m), } } - -#[cfg(unix)] -#[tracing::instrument(err)] -fn maximize_fd_limit() -> Result<(), nix::errno::Errno> { - use nix::sys::resource::{getrlimit, setrlimit, Resource}; - - let res = Resource::RLIMIT_NOFILE; - - let (soft_limit, hard_limit) = getrlimit(res)?; - - debug!("Current nofile soft limit: {soft_limit}"); - - setrlimit(res, hard_limit, hard_limit)?; - - debug!("Increased nofile soft limit to {hard_limit}"); - - Ok(()) -} diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index b22f8ed4..e2b2fd8d 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -1,7 +1,7 @@ use std::{ collections::BTreeMap, convert::{TryFrom, TryInto}, - sync::{Arc, RwLock}, + sync::Arc, time::Instant, }; @@ -21,7 +21,7 @@ use ruma::{ power_levels::RoomPowerLevelsEventContent, topic::RoomTopicEventContent, }, - TimelineEventType, + RoomEventType, }, EventId, OwnedRoomAliasId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; @@ -134,13 +134,7 @@ enum AdminCommand { }, /// Print database memory usage statistics - MemoryUsage, - - /// Clears all of Conduit's database caches with index smaller than the amount - ClearDatabaseCaches { amount: u32 }, - - /// Clears all of Conduit's service caches with index smaller than the amount - ClearServiceCaches { amount: u32 }, + DatabaseMemoryUsage, /// Show configuration values ShowConfig, @@ -163,20 +157,6 @@ enum AdminCommand { DisableRoom { room_id: Box }, /// Enables incoming federation handling for a room again. EnableRoom { room_id: Box }, - - /// Verify json signatures - /// [commandbody] - /// # ``` - /// # json here - /// # ``` - SignJson, - - /// Verify json signatures - /// [commandbody] - /// # ``` - /// # json here - /// # ``` - VerifyJson, } #[derive(Debug)] @@ -232,7 +212,7 @@ impl Service { .timeline .build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomMessage, + event_type: RoomEventType::RoomMessage, content: to_raw_value(&message) .expect("event is valid, we just created it"), unsigned: None, @@ -287,7 +267,7 @@ impl Service { // Parse and process a message from the admin room async fn process_admin_message(&self, room_message: String) -> RoomMessageEventContent { - let mut lines = room_message.lines().filter(|l| !l.trim().is_empty()); + let mut lines = room_message.lines(); let command_line = lines.next().expect("each string has at least one line"); let body: Vec<_> = lines.collect(); @@ -307,11 +287,13 @@ impl Service { Err(error) => { let markdown_message = format!( "Encountered an error while handling the command:\n\ - ```\n{error}\n```", + ```\n{}\n```", + error, ); let html_message = format!( "Encountered an error while handling the command:\n\ -
\n{error}\n
", +
\n{}\n
", + error, ); RoomMessageEventContent::text_html(markdown_message, html_message) @@ -356,14 +338,17 @@ impl Service { match parsed_config { Ok(yaml) => match services().appservice.register_appservice(yaml) { Ok(id) => RoomMessageEventContent::text_plain(format!( - "Appservice registered with ID: {id}." + "Appservice registered with ID: {}.", + id )), Err(e) => RoomMessageEventContent::text_plain(format!( - "Failed to register appservice: {e}" + "Failed to register appservice: {}", + e )), }, Err(e) => RoomMessageEventContent::text_plain(format!( - "Could not parse appservice config: {e}" + "Could not parse appservice config: {}", + e )), } } else { @@ -380,7 +365,8 @@ impl Service { { Ok(()) => RoomMessageEventContent::text_plain("Appservice unregistered."), Err(e) => RoomMessageEventContent::text_plain(format!( - "Failed to unregister appservice: {e}" + "Failed to unregister appservice: {}", + e )), }, AdminCommand::ListAppservices => { @@ -473,7 +459,8 @@ impl Service { .count(); let elapsed = start.elapsed(); RoomMessageEventContent::text_plain(format!( - "Loaded auth chain with length {count} in {elapsed:?}" + "Loaded auth chain with length {} in {:?}", + count, elapsed )) } else { RoomMessageEventContent::text_plain("Event not found.") @@ -487,26 +474,30 @@ impl Service { Ok(value) => { match ruma::signatures::reference_hash(&value, &RoomVersionId::V6) { Ok(hash) => { - let event_id = EventId::parse(format!("${hash}")); + let event_id = EventId::parse(format!("${}", hash)); match serde_json::from_value::( serde_json::to_value(value).expect("value is json"), ) { Ok(pdu) => RoomMessageEventContent::text_plain(format!( - "EventId: {event_id:?}\n{pdu:#?}" + "EventId: {:?}\n{:#?}", + event_id, pdu )), Err(e) => RoomMessageEventContent::text_plain(format!( - "EventId: {event_id:?}\nCould not parse event: {e}" + "EventId: {:?}\nCould not parse event: {}", + event_id, e )), } } Err(e) => RoomMessageEventContent::text_plain(format!( - "Could not parse PDU JSON: {e:?}" + "Could not parse PDU JSON: {:?}", + e )), } } Err(e) => RoomMessageEventContent::text_plain(format!( - "Invalid json in command body: {e}" + "Invalid json in command body: {}", + e )), } } else { @@ -551,24 +542,13 @@ impl Service { None => RoomMessageEventContent::text_plain("PDU not found."), } } - AdminCommand::MemoryUsage => { - let response1 = services().memory_usage(); - let response2 = services().globals.db.memory_usage(); - - RoomMessageEventContent::text_plain(format!( - "Services:\n{response1}\n\nDatabase:\n{response2}" - )) - } - AdminCommand::ClearDatabaseCaches { amount } => { - services().globals.db.clear_caches(amount); - - RoomMessageEventContent::text_plain("Done.") - } - AdminCommand::ClearServiceCaches { amount } => { - services().clear_caches(amount); - - RoomMessageEventContent::text_plain("Done.") - } + AdminCommand::DatabaseMemoryUsage => match services().globals.db.memory_usage() { + Ok(response) => RoomMessageEventContent::text_plain(response), + Err(e) => RoomMessageEventContent::text_plain(format!( + "Failed to get database memory usage: {}", + e + )), + }, AdminCommand::ShowConfig => { // Construct and send the response RoomMessageEventContent::text_plain(format!("{}", services().globals.config)) @@ -581,13 +561,15 @@ impl Service { Ok(id) => id, Err(e) => { return Ok(RoomMessageEventContent::text_plain(format!( - "The supplied username is not a valid username: {e}" + "The supplied username is not a valid username: {}", + e ))) } }; // Check if the specified user is valid if !services().users.exists(&user_id)? + || services().users.is_deactivated(&user_id)? || user_id == UserId::parse_with_server_name( "conduit", @@ -596,7 +578,7 @@ impl Service { .expect("conduit user exists") { return Ok(RoomMessageEventContent::text_plain( - "The specified user does not exist!", + "The specified user does not exist or is deactivated!", )); } @@ -607,10 +589,12 @@ impl Service { .set_password(&user_id, Some(new_password.as_str())) { Ok(()) => RoomMessageEventContent::text_plain(format!( - "Successfully reset the password for user {user_id}: {new_password}" + "Successfully reset the password for user {}: {}", + user_id, new_password )), Err(e) => RoomMessageEventContent::text_plain(format!( - "Couldn't reset the password for user {user_id}: {e}" + "Couldn't reset the password for user {}: {}", + user_id, e )), } } @@ -625,18 +609,19 @@ impl Service { Ok(id) => id, Err(e) => { return Ok(RoomMessageEventContent::text_plain(format!( - "The supplied username is not a valid username: {e}" + "The supplied username is not a valid username: {}", + e ))) } }; if user_id.is_historical() { return Ok(RoomMessageEventContent::text_plain(format!( - "Userid {user_id} is not allowed due to historical" + "userid {user_id} is not allowed due to historical" ))); } if services().users.exists(&user_id)? { return Ok(RoomMessageEventContent::text_plain(format!( - "Userid {user_id} already exists" + "userid {user_id} already exists" ))); } // Create user @@ -691,7 +676,8 @@ impl Service { let user_id = Arc::::from(user_id); if services().users.exists(&user_id)? { RoomMessageEventContent::text_plain(format!( - "Making {user_id} leave all rooms before deactivation..." + "Making {} leave all rooms before deactivation...", + user_id )); services().users.deactivate_account(&user_id)?; @@ -701,11 +687,13 @@ impl Service { } RoomMessageEventContent::text_plain(format!( - "User {user_id} has been deactivated" + "User {} has been deactivated", + user_id )) } else { RoomMessageEventContent::text_plain(format!( - "User {user_id} doesn't exist on this server" + "User {} doesn't exist on this server", + user_id )) } } @@ -721,7 +709,8 @@ impl Service { Ok(user_id) => user_ids.push(user_id), Err(_) => { return Ok(RoomMessageEventContent::text_plain(format!( - "{username} is not a valid username" + "{} is not a valid username", + username ))) } } @@ -757,7 +746,8 @@ impl Service { if admins.is_empty() { RoomMessageEventContent::text_plain(format!( - "Deactivated {deactivation_count} accounts." + "Deactivated {} accounts.", + deactivation_count )) } else { RoomMessageEventContent::text_plain(format!("Deactivated {} accounts.\nSkipped admin accounts: {:?}. Use --force to deactivate admin accounts", deactivation_count, admins.join(", "))) @@ -768,60 +758,6 @@ impl Service { ) } } - AdminCommand::SignJson => { - if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" - { - let string = body[1..body.len() - 1].join("\n"); - match serde_json::from_str(&string) { - Ok(mut value) => { - ruma::signatures::sign_json( - services().globals.server_name().as_str(), - services().globals.keypair(), - &mut value, - ) - .expect("our request json is what ruma expects"); - let json_text = serde_json::to_string_pretty(&value) - .expect("canonical json is valid json"); - RoomMessageEventContent::text_plain(json_text) - } - Err(e) => RoomMessageEventContent::text_plain(format!("Invalid json: {e}")), - } - } else { - RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - ) - } - } - AdminCommand::VerifyJson => { - if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" - { - let string = body[1..body.len() - 1].join("\n"); - match serde_json::from_str(&string) { - Ok(value) => { - let pub_key_map = RwLock::new(BTreeMap::new()); - - services() - .rooms - .event_handler - .fetch_required_signing_keys(&value, &pub_key_map) - .await?; - - let pub_key_map = pub_key_map.read().unwrap(); - match ruma::signatures::verify_json(&pub_key_map, &value) { - Ok(_) => RoomMessageEventContent::text_plain("Signature correct"), - Err(e) => RoomMessageEventContent::text_plain(format!( - "Signature verification failed: {e}" - )), - } - } - Err(e) => RoomMessageEventContent::text_plain(format!("Invalid json: {e}")), - } - } else { - RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - ) - } - } }; Ok(reply_message_content) @@ -831,8 +767,8 @@ impl Service { fn usage_to_html(&self, text: &str, server_name: &ServerName) -> String { // Replace `@conduit:servername:-subcmdname` with `@conduit:servername: subcmdname` let text = text.replace( - &format!("@conduit:{server_name}:-"), - &format!("@conduit:{server_name}: "), + &format!("@conduit:{}:-", server_name), + &format!("@conduit:{}: ", server_name), ); // For the conduit admin room, subcommands become main commands @@ -932,7 +868,7 @@ impl Service { services().users.create(&conduit_user, None)?; - let mut content = RoomCreateEventContent::new_v1(conduit_user.clone()); + let mut content = RoomCreateEventContent::new(conduit_user.clone()); content.federate = true; content.predecessor = None; content.room_version = services().globals.default_room_version(); @@ -940,7 +876,7 @@ impl Service { // 1. The room create event services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomCreate, + event_type: RoomEventType::RoomCreate, content: to_raw_value(&content).expect("event is valid, we just created it"), unsigned: None, state_key: Some("".to_owned()), @@ -954,7 +890,7 @@ impl Service { // 2. Make conduit bot join services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Join, displayname: None, @@ -981,7 +917,7 @@ impl Service { services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomPowerLevels, + event_type: RoomEventType::RoomPowerLevels, content: to_raw_value(&RoomPowerLevelsEventContent { users, ..Default::default() @@ -999,7 +935,7 @@ impl Service { // 4.1 Join Rules services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomJoinRules, + event_type: RoomEventType::RoomJoinRules, content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite)) .expect("event is valid, we just created it"), unsigned: None, @@ -1014,7 +950,7 @@ impl Service { // 4.2 History Visibility services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomHistoryVisibility, + event_type: RoomEventType::RoomHistoryVisibility, content: to_raw_value(&RoomHistoryVisibilityEventContent::new( HistoryVisibility::Shared, )) @@ -1031,7 +967,7 @@ impl Service { // 4.3 Guest Access services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomGuestAccess, + event_type: RoomEventType::RoomGuestAccess, content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden)) .expect("event is valid, we just created it"), unsigned: None, @@ -1047,7 +983,7 @@ impl Service { let room_name = format!("{} Admin Room", services().globals.server_name()); services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomName, + event_type: RoomEventType::RoomName, content: to_raw_value(&RoomNameEventContent::new(Some(room_name))) .expect("event is valid, we just created it"), unsigned: None, @@ -1061,7 +997,7 @@ impl Service { services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomTopic, + event_type: RoomEventType::RoomTopic, content: to_raw_value(&RoomTopicEventContent { topic: format!("Manage {}", services().globals.server_name()), }) @@ -1082,7 +1018,7 @@ impl Service { services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomCanonicalAlias, + event_type: RoomEventType::RoomCanonicalAlias, content: to_raw_value(&RoomCanonicalAliasEventContent { alias: Some(alias.clone()), alt_aliases: Vec::new(), @@ -1139,7 +1075,7 @@ impl Service { // Invite and join the real user services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Invite, displayname: None, @@ -1161,7 +1097,7 @@ impl Service { )?; services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Join, displayname: Some(displayname), @@ -1189,7 +1125,7 @@ impl Service { services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomPowerLevels, + event_type: RoomEventType::RoomPowerLevels, content: to_raw_value(&RoomPowerLevelsEventContent { users, ..Default::default() @@ -1207,7 +1143,7 @@ impl Service { // Send welcome message services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomMessage, + event_type: RoomEventType::RoomMessage, content: to_raw_value(&RoomMessageEventContent::text_html( format!("## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nFor a list of available commands, send the following message in this room: `@conduit:{}: --help`\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`", services().globals.server_name()), format!("

Thank you for trying out Conduit!

\n

Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

\n

Helpful links:

\n
\n

Website: https://conduit.rs
Git and Documentation: https://gitlab.com/famedly/conduit
Report issues: https://gitlab.com/famedly/conduit/-/issues

\n
\n

For a list of available commands, send the following message in this room: @conduit:{}: --help

\n

Here are some rooms you can join (by typing the command):

\n

Conduit room (Ask questions and get notified on updates):
/join #conduit:fachschaften.org

\n

Conduit lounge (Off-topic, only Conduit users are allowed to join)
/join #conduit-lounge:conduit.rs

\n", services().globals.server_name()), diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index 8a66751b..04371a0a 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -13,12 +13,9 @@ use crate::Result; pub trait Data: Send + Sync { fn next_count(&self) -> Result; fn current_count(&self) -> Result; - fn last_check_for_updates_id(&self) -> Result; - fn update_check_for_updates_id(&self, id: u64) -> Result<()>; async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()>; fn cleanup(&self) -> Result<()>; - fn memory_usage(&self) -> String; - fn clear_caches(&self, amount: u32); + fn memory_usage(&self) -> Result; fn load_keypair(&self) -> Result; fn remove_keypair(&self) -> Result<()>; fn add_signing_key( diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 9bce8a2c..50c465ce 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -1,13 +1,12 @@ mod data; pub use data::Data; use ruma::{ - serde::Base64, OwnedDeviceId, OwnedEventId, OwnedRoomId, OwnedServerName, - OwnedServerSigningKeyId, OwnedUserId, + OwnedDeviceId, OwnedEventId, OwnedRoomId, OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, }; use crate::api::server_server::FedDest; -use crate::{services, Config, Error, Result}; +use crate::{Config, Error, Result}; use ruma::{ api::{ client::sync::sync_events, @@ -21,18 +20,13 @@ use std::{ future::Future, net::{IpAddr, SocketAddr}, path::PathBuf, - sync::{ - atomic::{self, AtomicBool}, - Arc, Mutex, RwLock, - }, + sync::{Arc, Mutex, RwLock}, time::{Duration, Instant}, }; use tokio::sync::{broadcast, watch::Receiver, Mutex as TokioMutex, Semaphore}; -use tracing::{error, info}; +use tracing::error; use trust_dns_resolver::TokioAsyncResolver; -use base64::{engine::general_purpose, Engine as _}; - type WellKnownMap = HashMap; type TlsNameMap = HashMap, u16)>; type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries @@ -56,7 +50,6 @@ pub struct Service { pub unstable_room_versions: Vec, pub bad_event_ratelimiter: Arc>>, pub bad_signature_ratelimiter: Arc, RateLimitState>>>, - pub bad_query_ratelimiter: Arc>>, pub servername_ratelimiter: Arc>>>, pub sync_receivers: RwLock>, pub roomid_mutex_insert: RwLock>>>, @@ -65,8 +58,6 @@ pub struct Service { pub roomid_federationhandletime: RwLock>, pub stateres_mutex: Arc>, pub rotate: RotationHandler, - - pub shutdown: AtomicBool, } /// Handles "rotation" of long-polling requests. "Rotation" in this context is similar to "rotation" of log files and the like. @@ -161,7 +152,6 @@ impl Service { unstable_room_versions, bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())), bad_signature_ratelimiter: Arc::new(RwLock::new(HashMap::new())), - bad_query_ratelimiter: Arc::new(RwLock::new(HashMap::new())), servername_ratelimiter: Arc::new(RwLock::new(HashMap::new())), roomid_mutex_state: RwLock::new(HashMap::new()), roomid_mutex_insert: RwLock::new(HashMap::new()), @@ -170,7 +160,6 @@ impl Service { stateres_mutex: Arc::new(Mutex::new(())), sync_receivers: RwLock::new(HashMap::new()), rotate: RotationHandler::new(), - shutdown: AtomicBool::new(false), }; fs::create_dir_all(s.get_media_folder())?; @@ -179,7 +168,7 @@ impl Service { .supported_room_versions() .contains(&s.config.default_room_version) { - error!(config=?s.config.default_room_version, fallback=?crate::config::default_default_room_version(), "Room version in config isn't supported, falling back to default version"); + error!("Room version in config isn't supported, falling back to default version"); s.config.default_room_version = crate::config::default_default_room_version(); }; @@ -213,16 +202,6 @@ impl Service { self.db.current_count() } - #[tracing::instrument(skip(self))] - pub fn last_check_for_updates_id(&self) -> Result { - self.db.last_check_for_updates_id() - } - - #[tracing::instrument(skip(self))] - pub fn update_check_for_updates_id(&self, id: u64) -> Result<()> { - self.db.update_check_for_updates_id(id) - } - pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { self.db.watch(user_id, device_id).await } @@ -231,6 +210,10 @@ impl Service { self.db.cleanup() } + pub fn memory_usage(&self) -> Result { + self.db.memory_usage() + } + pub fn server_name(&self) -> &ServerName { self.config.server_name.as_ref() } @@ -239,10 +222,6 @@ impl Service { self.config.max_request_size } - pub fn max_fetch_prev_events(&self) -> u16 { - self.config.max_fetch_prev_events - } - pub fn allow_registration(&self) -> bool { self.config.allow_registration } @@ -271,10 +250,6 @@ impl Service { self.config.enable_lightning_bolt } - pub fn allow_check_for_updates(&self) -> bool { - self.config.allow_check_for_updates - } - pub fn trusted_servers(&self) -> &[OwnedServerName] { &self.config.trusted_servers } @@ -311,6 +286,26 @@ impl Service { &self.config.emergency_password } + pub fn allow_presence(&self) -> bool { + self.config.allow_presence + } + + pub fn presence_idle_timeout(&self) -> u64 { + self.config.presence_idle_timeout + } + + pub fn presence_offline_timeout(&self) -> u64 { + self.config.presence_offline_timeout + } + + pub fn presence_cleanup_period(&self) -> u64 { + self.config.presence_cleanup_period + } + + pub fn presence_cleanup_limit(&self) -> u64 { + self.config.presence_cleanup_limit + } + pub fn supported_room_versions(&self) -> Vec { let mut room_versions: Vec = vec![]; room_versions.extend(self.stable_room_versions.clone()); @@ -337,19 +332,7 @@ impl Service { &self, origin: &ServerName, ) -> Result> { - let mut keys = self.db.signing_keys_for(origin)?; - if origin == self.server_name() { - keys.insert( - format!("ed25519:{}", services().globals.keypair().version()) - .try_into() - .expect("found invalid server signing keys in DB"), - VerifyKey { - key: Base64::new(self.keypair.public_key().to_vec()), - }, - ); - } - - Ok(keys) + self.db.signing_keys_for(origin) } pub fn database_version(&self) -> Result { @@ -371,25 +354,13 @@ impl Service { let mut r = PathBuf::new(); r.push(self.config.database_path.clone()); r.push("media"); - r.push(general_purpose::URL_SAFE_NO_PAD.encode(key)); + r.push(base64::encode_config(key, base64::URL_SAFE_NO_PAD)); r } - - pub fn well_known_client(&self) -> &Option { - &self.config.well_known_client - } - - pub fn shutdown(&self) { - self.shutdown.store(true, atomic::Ordering::Relaxed); - // On shutdown - info!(target: "shutdown-sync", "Received shutdown notification, notifying sync helpers..."); - services().globals.rotate.fire(); - } } fn reqwest_client_builder(config: &Config) -> Result { let mut reqwest_client_builder = reqwest::Client::builder() - .pool_max_idle_per_host(0) .connect_timeout(Duration::from_secs(30)) .timeout(Duration::from_secs(60 * 3)); diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index fc8fa569..93937533 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -8,7 +8,7 @@ use image::imageops::FilterType; use tokio::{ fs::File, - io::{AsyncReadExt, AsyncWriteExt, BufReader}, + io::{AsyncReadExt, AsyncWriteExt}, }; pub struct FileMeta { @@ -70,9 +70,7 @@ impl Service { { let path = services().globals.get_media_file(&key); let mut file = Vec::new(); - BufReader::new(File::open(path).await?) - .read_to_end(&mut file) - .await?; + File::open(path).await?.read_to_end(&mut file).await?; Ok(Some(FileMeta { content_disposition, diff --git a/src/service/mod.rs b/src/service/mod.rs index f85da788..6858ce1e 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -1,5 +1,5 @@ use std::{ - collections::{BTreeMap, HashMap}, + collections::HashMap, sync::{Arc, Mutex}, }; @@ -62,7 +62,7 @@ impl Services { auth_chain: rooms::auth_chain::Service { db }, directory: rooms::directory::Service { db }, edus: rooms::edus::Service { - presence: rooms::edus::presence::Service { db }, + presence: rooms::edus::presence::Service::build(db)?, read_receipt: rooms::edus::read_receipt::Service { db }, typing: rooms::edus::typing::Service { db }, }, @@ -77,15 +77,7 @@ impl Services { search: rooms::search::Service { db }, short: rooms::short::Service { db }, state: rooms::state::Service { db }, - state_accessor: rooms::state_accessor::Service { - db, - server_visibility_cache: Mutex::new(LruCache::new( - (100.0 * config.conduit_cache_capacity_modifier) as usize, - )), - user_visibility_cache: Mutex::new(LruCache::new( - (100.0 * config.conduit_cache_capacity_modifier) as usize, - )), - }, + state_accessor: rooms::state_accessor::Service { db }, state_cache: rooms::state_cache::Service { db }, state_compressor: rooms::state_compressor::Service { db, @@ -97,18 +89,11 @@ impl Services { db, lasttimelinecount_cache: Mutex::new(HashMap::new()), }, - threads: rooms::threads::Service { db }, - spaces: rooms::spaces::Service { - roomid_spacechunk_cache: Mutex::new(LruCache::new(200)), - }, user: rooms::user::Service { db }, }, transaction_ids: transaction_ids::Service { db }, uiaa: uiaa::Service { db }, - users: users::Service { - db, - connections: Mutex::new(BTreeMap::new()), - }, + users: users::Service { db }, account_data: account_data::Service { db }, admin: admin::Service::build(), key_backups: key_backups::Service { db }, @@ -118,109 +103,4 @@ impl Services { globals: globals::Service::load(db, config)?, }) } - fn memory_usage(&self) -> String { - let lazy_load_waiting = self - .rooms - .lazy_loading - .lazy_load_waiting - .lock() - .unwrap() - .len(); - let server_visibility_cache = self - .rooms - .state_accessor - .server_visibility_cache - .lock() - .unwrap() - .len(); - let user_visibility_cache = self - .rooms - .state_accessor - .user_visibility_cache - .lock() - .unwrap() - .len(); - let stateinfo_cache = self - .rooms - .state_compressor - .stateinfo_cache - .lock() - .unwrap() - .len(); - let lasttimelinecount_cache = self - .rooms - .timeline - .lasttimelinecount_cache - .lock() - .unwrap() - .len(); - let roomid_spacechunk_cache = self - .rooms - .spaces - .roomid_spacechunk_cache - .lock() - .unwrap() - .len(); - - format!( - "\ -lazy_load_waiting: {lazy_load_waiting} -server_visibility_cache: {server_visibility_cache} -user_visibility_cache: {user_visibility_cache} -stateinfo_cache: {stateinfo_cache} -lasttimelinecount_cache: {lasttimelinecount_cache} -roomid_spacechunk_cache: {roomid_spacechunk_cache}\ - " - ) - } - fn clear_caches(&self, amount: u32) { - if amount > 0 { - self.rooms - .lazy_loading - .lazy_load_waiting - .lock() - .unwrap() - .clear(); - } - if amount > 1 { - self.rooms - .state_accessor - .server_visibility_cache - .lock() - .unwrap() - .clear(); - } - if amount > 2 { - self.rooms - .state_accessor - .user_visibility_cache - .lock() - .unwrap() - .clear(); - } - if amount > 3 { - self.rooms - .state_compressor - .stateinfo_cache - .lock() - .unwrap() - .clear(); - } - if amount > 4 { - self.rooms - .timeline - .lasttimelinecount_cache - .lock() - .unwrap() - .clear(); - } - if amount > 5 { - self.rooms - .spaces - .roomid_spacechunk_cache - .lock() - .unwrap() - .clear(); - } - } } diff --git a/src/service/pdu.rs b/src/service/pdu.rs index 4a170bc2..593a687b 100644 --- a/src/service/pdu.rs +++ b/src/service/pdu.rs @@ -1,13 +1,13 @@ -use crate::Error; +use crate::{services, Error}; use ruma::{ events::{ - room::member::RoomMemberEventContent, space::child::HierarchySpaceChildEvent, - AnyEphemeralRoomEvent, AnyMessageLikeEvent, AnyStateEvent, AnyStrippedStateEvent, - AnySyncStateEvent, AnySyncTimelineEvent, AnyTimelineEvent, StateEvent, TimelineEventType, + room::member::RoomMemberEventContent, AnyEphemeralRoomEvent, AnyStateEvent, + AnyStrippedStateEvent, AnySyncStateEvent, AnySyncTimelineEvent, AnyTimelineEvent, + RoomEventType, StateEvent, }, serde::Raw, state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, - OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, RoomVersionId, UInt, UserId, + OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UInt, UserId, }; use serde::{Deserialize, Serialize}; use serde_json::{ @@ -31,7 +31,7 @@ pub struct PduEvent { pub sender: OwnedUserId, pub origin_server_ts: UInt, #[serde(rename = "type")] - pub kind: TimelineEventType, + pub kind: RoomEventType, pub content: Box, #[serde(skip_serializing_if = "Option::is_none")] pub state_key: Option, @@ -53,10 +53,10 @@ impl PduEvent { self.unsigned = None; let allowed: &[&str] = match self.kind { - TimelineEventType::RoomMember => &["join_authorised_via_users_server", "membership"], - TimelineEventType::RoomCreate => &["creator"], - TimelineEventType::RoomJoinRules => &["join_rule"], - TimelineEventType::RoomPowerLevels => &[ + RoomEventType::RoomMember => &["join_authorised_via_users_server", "membership"], + RoomEventType::RoomCreate => &["creator"], + RoomEventType::RoomJoinRules => &["join_rule"], + RoomEventType::RoomPowerLevels => &[ "ban", "events", "events_default", @@ -66,7 +66,7 @@ impl PduEvent { "users", "users_default", ], - TimelineEventType::RoomHistoryVisibility => &["history_visibility"], + RoomEventType::RoomHistoryVisibility => &["history_visibility"], _ => &[], }; @@ -103,19 +103,6 @@ impl PduEvent { Ok(()) } - pub fn add_age(&mut self) -> crate::Result<()> { - let mut unsigned: BTreeMap> = self - .unsigned - .as_ref() - .map_or_else(|| Ok(BTreeMap::new()), |u| serde_json::from_str(u.get())) - .map_err(|_| Error::bad_database("Invalid unsigned in pdu event"))?; - - unsigned.insert("age".to_owned(), to_raw_value(&1).unwrap()); - self.unsigned = Some(to_raw_value(&unsigned).expect("unsigned is valid")); - - Ok(()) - } - #[tracing::instrument(skip(self))] pub fn to_sync_room_event(&self) -> Raw { let mut json = json!({ @@ -124,11 +111,9 @@ impl PduEvent { "event_id": self.event_id, "sender": self.sender, "origin_server_ts": self.origin_server_ts, + "unsigned": self.unsigned, }); - if let Some(unsigned) = &self.unsigned { - json["unsigned"] = json!(unsigned); - } if let Some(state_key) = &self.state_key { json["state_key"] = json!(state_key); } @@ -148,12 +133,10 @@ impl PduEvent { "event_id": self.event_id, "sender": self.sender, "origin_server_ts": self.origin_server_ts, + "unsigned": self.unsigned, "room_id": self.room_id, }); - if let Some(unsigned) = &self.unsigned { - json["unsigned"] = json!(unsigned); - } if let Some(state_key) = &self.state_key { json["state_key"] = json!(state_key); } @@ -172,36 +155,10 @@ impl PduEvent { "event_id": self.event_id, "sender": self.sender, "origin_server_ts": self.origin_server_ts, + "unsigned": self.unsigned, "room_id": self.room_id, }); - if let Some(unsigned) = &self.unsigned { - json["unsigned"] = json!(unsigned); - } - if let Some(state_key) = &self.state_key { - json["state_key"] = json!(state_key); - } - if let Some(redacts) = &self.redacts { - json["redacts"] = json!(redacts); - } - - serde_json::from_value(json).expect("Raw::from_value always works") - } - - #[tracing::instrument(skip(self))] - pub fn to_message_like_event(&self) -> Raw { - let mut json = json!({ - "content": self.content, - "type": self.kind, - "event_id": self.event_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - "room_id": self.room_id, - }); - - if let Some(unsigned) = &self.unsigned { - json["unsigned"] = json!(unsigned); - } if let Some(state_key) = &self.state_key { json["state_key"] = json!(state_key); } @@ -214,38 +171,32 @@ impl PduEvent { #[tracing::instrument(skip(self))] pub fn to_state_event(&self) -> Raw { - let mut json = json!({ + let json = json!({ "content": self.content, "type": self.kind, "event_id": self.event_id, "sender": self.sender, "origin_server_ts": self.origin_server_ts, + "unsigned": self.unsigned, "room_id": self.room_id, "state_key": self.state_key, }); - if let Some(unsigned) = &self.unsigned { - json["unsigned"] = json!(unsigned); - } - serde_json::from_value(json).expect("Raw::from_value always works") } #[tracing::instrument(skip(self))] pub fn to_sync_state_event(&self) -> Raw { - let mut json = json!({ + let json = json!({ "content": self.content, "type": self.kind, "event_id": self.event_id, "sender": self.sender, "origin_server_ts": self.origin_server_ts, + "unsigned": self.unsigned, "state_key": self.state_key, }); - if let Some(unsigned) = &self.unsigned { - json["unsigned"] = json!(unsigned); - } - serde_json::from_value(json).expect("Raw::from_value always works") } @@ -261,36 +212,20 @@ impl PduEvent { serde_json::from_value(json).expect("Raw::from_value always works") } - #[tracing::instrument(skip(self))] - pub fn to_stripped_spacechild_state_event(&self) -> Raw { - let json = json!({ - "content": self.content, - "type": self.kind, - "sender": self.sender, - "state_key": self.state_key, - "origin_server_ts": self.origin_server_ts, - }); - - serde_json::from_value(json).expect("Raw::from_value always works") - } - #[tracing::instrument(skip(self))] pub fn to_member_event(&self) -> Raw> { - let mut json = json!({ + let json = json!({ "content": self.content, "type": self.kind, "event_id": self.event_id, "sender": self.sender, "origin_server_ts": self.origin_server_ts, "redacts": self.redacts, + "unsigned": self.unsigned, "room_id": self.room_id, "state_key": self.state_key, }); - if let Some(unsigned) = &self.unsigned { - json["unsigned"] = json!(unsigned); - } - serde_json::from_value(json).expect("Raw::from_value always works") } @@ -346,7 +281,7 @@ impl state_res::Event for PduEvent { &self.sender } - fn event_type(&self) -> &TimelineEventType { + fn event_type(&self) -> &RoomEventType { &self.kind } @@ -399,17 +334,23 @@ impl Ord for PduEvent { /// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. pub(crate) fn gen_event_id_canonical_json( pdu: &RawJsonValue, - room_version_id: &RoomVersionId, ) -> crate::Result<(OwnedEventId, CanonicalJsonObject)> { let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { warn!("Error parsing incoming event {:?}: {:?}", pdu, e); Error::BadServerResponse("Invalid PDU in server response") })?; + let room_id = value + .get("room_id") + .and_then(|id| RoomId::parse(id.as_str()?).ok()) + .ok_or_else(|| Error::bad_database("PDU in db has invalid room_id."))?; + + let room_version_id = services().rooms.state.get_room_version(&room_id); + let event_id = format!( "${}", // Anything higher than version3 behaves the same - ruma::signatures::reference_hash(&value, room_version_id) + ruma::signatures::reference_hash(&value, &room_version_id?) .expect("ruma can calculate reference hashes") ) .try_into() @@ -422,7 +363,7 @@ pub(crate) fn gen_event_id_canonical_json( #[derive(Debug, Deserialize)] pub struct PduBuilder { #[serde(rename = "type")] - pub event_type: TimelineEventType, + pub event_type: RoomEventType, pub content: Box, pub unsigned: Option>, pub state_key: Option, diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 315c5ef0..d3d157c7 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -13,7 +13,10 @@ use ruma::{ }, IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, }, - events::{room::power_levels::RoomPowerLevelsEventContent, StateEventType, TimelineEventType}, + events::{ + room::{name::RoomNameEventContent, power_levels::RoomPowerLevelsEventContent}, + RoomEventType, StateEventType, + }, push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, serde::Raw, uint, RoomId, UInt, UserId, @@ -159,12 +162,13 @@ impl Service { &pdu.room_id, )? { let n = match action { - Action::Notify => true, + Action::DontNotify => false, + // TODO: Implement proper support for coalesce + Action::Notify | Action::Coalesce => true, Action::SetTweak(tweak) => { tweaks.push(tweak.clone()); continue; } - _ => false, }; if notify.is_some() { @@ -235,16 +239,16 @@ impl Service { device.tweaks = tweaks.clone(); } - let d = vec![device]; + let d = &[device]; let mut notifi = Notification::new(d); notifi.prio = NotificationPriority::Low; - notifi.event_id = Some((*event.event_id).to_owned()); - notifi.room_id = Some((*event.room_id).to_owned()); + notifi.event_id = Some(&event.event_id); + notifi.room_id = Some(&event.room_id); // TODO: missed calls notifi.counts = NotificationCounts::new(unread, uint!(0)); - if event.kind == TimelineEventType::RoomEncrypted + if event.kind == RoomEventType::RoomEncrypted || tweaks .iter() .any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_))) @@ -256,18 +260,34 @@ impl Service { self.send_request(&http.url, send_event_notification::v1::Request::new(notifi)) .await?; } else { - notifi.sender = Some(event.sender.clone()); - notifi.event_type = Some(event.kind.clone()); - notifi.content = serde_json::value::to_raw_value(&event.content).ok(); + notifi.sender = Some(&event.sender); + notifi.event_type = Some(&event.kind); + let content = serde_json::value::to_raw_value(&event.content).ok(); + notifi.content = content.as_deref(); - if event.kind == TimelineEventType::RoomMember { + if event.kind == RoomEventType::RoomMember { notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str()); } - notifi.sender_display_name = services().users.displayname(&event.sender)?; + let user_name = services().users.displayname(&event.sender)?; + notifi.sender_display_name = user_name.as_deref(); - notifi.room_name = services().rooms.state_accessor.get_name(&event.room_id)?; + let room_name = if let Some(room_name_pdu) = services() + .rooms + .state_accessor + .room_state_get(&event.room_id, &StateEventType::RoomName, "")? + { + serde_json::from_str::(room_name_pdu.content.get()) + .map_err(|_| { + Error::bad_database("Invalid room name event in database.") + })? + .name + } else { + None + }; + + notifi.room_name = room_name.as_deref(); self.send_request(&http.url, send_event_notification::v1::Request::new(notifi)) .await?; diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index da1944e2..d3b6e401 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -6,7 +6,7 @@ use std::{ pub use data::Data; use ruma::{api::client::error::ErrorKind, EventId, RoomId}; -use tracing::{debug, error, warn}; +use tracing::log::warn; use crate::{services, Error, Result}; @@ -15,7 +15,11 @@ pub struct Service { } impl Service { - pub fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result>>> { + #[tracing::instrument(skip(self))] + pub fn get_cached_eventid_authchain<'a>( + &'a self, + key: &[u64], + ) -> Result>>> { self.db.get_cached_eventid_authchain(key) } @@ -85,10 +89,10 @@ impl Service { .rooms .auth_chain .cache_auth_chain(vec![sevent_id], Arc::clone(&auth_chain))?; - debug!( - event_id = ?event_id, - chain_length = ?auth_chain.len(), - "Cache missed event" + println!( + "cache missed event {} with auth chain len {}", + event_id, + auth_chain.len() ); chunk_cache.extend(auth_chain.iter()); @@ -98,11 +102,11 @@ impl Service { } }; } - debug!( - chunk_cache_length = ?chunk_cache.len(), - hits = ?hits2, - misses = ?misses2, - "Chunk missed", + println!( + "chunk missed with len {}, event hits2: {}, misses2: {}", + chunk_cache.len(), + hits2, + misses2 ); let chunk_cache = Arc::new(chunk_cache); services() @@ -112,11 +116,11 @@ impl Service { full_auth_chain.extend(chunk_cache.iter()); } - debug!( - chain_length = ?full_auth_chain.len(), - hits = ?hits, - misses = ?misses, - "Auth chain stats", + println!( + "total: {}, chunk hits: {}, misses: {}", + full_auth_chain.len(), + hits, + misses ); Ok(full_auth_chain @@ -148,10 +152,10 @@ impl Service { } } Ok(None) => { - warn!(?event_id, "Could not find pdu mentioned in auth events"); + warn!("Could not find pdu mentioned in auth events: {}", event_id); } - Err(error) => { - error!(?event_id, ?error, "Could not load event in auth chain"); + Err(e) => { + warn!("Could not load event in auth chain: {} {}", event_id, e); } } } diff --git a/src/service/rooms/edus/presence/data.rs b/src/service/rooms/edus/presence/data.rs index 53329e08..2dd78b6f 100644 --- a/src/service/rooms/edus/presence/data.rs +++ b/src/service/rooms/edus/presence/data.rs @@ -1,7 +1,8 @@ -use std::collections::HashMap; - use crate::Result; use ruma::{events::presence::PresenceEvent, OwnedUserId, RoomId, UserId}; +use tokio::sync::mpsc; + +use super::PresenceIter; pub trait Data: Send + Sync { /// Adds a presence event which will be saved until a new event replaces it. @@ -16,23 +17,29 @@ pub trait Data: Send + Sync { ) -> Result<()>; /// Resets the presence timeout, so the user will stay in their current presence state. - fn ping_presence(&self, user_id: &UserId) -> Result<()>; + fn ping_presence( + &self, + user_id: &UserId, + update_count: bool, + update_timestamp: bool, + ) -> Result<()>; /// Returns the timestamp of the last presence update of this user in millis since the unix epoch. - fn last_presence_update(&self, user_id: &UserId) -> Result>; + fn last_presence_update(&self, user_id: &UserId) -> Result>; /// Returns the presence event with correct last_active_ago. fn get_presence_event( &self, room_id: &RoomId, user_id: &UserId, - count: u64, + presence_timestamp: u64, ) -> Result>; /// Returns the most recent presence updates that happened after the event with id `since`. - fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result>; + fn presence_since<'a>(&'a self, room_id: &RoomId, since: u64) -> Result>; + + fn presence_maintain(&self, timer_receiver: mpsc::UnboundedReceiver) + -> Result<()>; + + fn presence_cleanup(&self) -> Result<()>; } diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index 860aea18..0f3421c9 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -1,16 +1,55 @@ mod data; -use std::collections::HashMap; pub use data::Data; use ruma::{events::presence::PresenceEvent, OwnedUserId, RoomId, UserId}; +use tokio::sync::mpsc; -use crate::Result; +use crate::{services, Error, Result}; + +pub(crate) type PresenceIter<'a> = Box + 'a>; pub struct Service { pub db: &'static dyn Data, + + // Presence timers + timer_sender: mpsc::UnboundedSender, } impl Service { + /// Builds the service and initialized the presence_maintain task + pub fn build(db: &'static dyn Data) -> Result { + let (sender, receiver) = mpsc::unbounded_channel(); + let service = Self { + db, + timer_sender: sender, + }; + + service.presence_maintain(receiver)?; + service.presence_cleanup()?; + + Ok(service) + } + + /// Resets the presence timeout, so the user will stay in their current presence state. + pub fn ping_presence( + &self, + user_id: &UserId, + update_count: bool, + update_timestamp: bool, + spawn_timer: bool, + ) -> Result<()> { + if !services().globals.allow_presence() { + return Ok(()); + } + + if spawn_timer { + self.spawn_timer(user_id)?; + } + + self.db + .ping_presence(user_id, update_count, update_timestamp) + } + /// Adds a presence event which will be saved until a new event replaces it. /// /// Note: This method takes a RoomId because presence updates are always bound to rooms to @@ -20,103 +59,78 @@ impl Service { user_id: &UserId, room_id: &RoomId, presence: PresenceEvent, + spawn_timer: bool, ) -> Result<()> { + if !services().globals.allow_presence() { + return Ok(()); + } + + if spawn_timer { + self.spawn_timer(user_id)?; + } + self.db.update_presence(user_id, room_id, presence) } - /// Resets the presence timeout, so the user will stay in their current presence state. - pub fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.db.ping_presence(user_id) + /// Returns the timestamp of when the presence was last updated for the specified user. + pub fn last_presence_update(&self, user_id: &UserId) -> Result> { + if !services().globals.allow_presence() { + return Ok(None); + } + + self.db.last_presence_update(user_id) } - pub fn get_last_presence_event( + /// Returns the saved presence event for this user with actual last_active_ago. + pub fn get_presence_event( &self, user_id: &UserId, room_id: &RoomId, ) -> Result> { + if !services().globals.allow_presence() { + return Ok(None); + } + let last_update = match self.db.last_presence_update(user_id)? { - Some(last) => last, + Some(last) => last.1, None => return Ok(None), }; self.db.get_presence_event(room_id, user_id, last_update) } - /* TODO - /// Sets all users to offline who have been quiet for too long. - fn _presence_maintain( - &self, - rooms: &super::Rooms, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let current_timestamp = utils::millis_since_unix_epoch(); - - for (user_id_bytes, last_timestamp) in self - .userid_lastpresenceupdate - .iter() - .filter_map(|(k, bytes)| { - Some(( - k, - utils::u64_from_bytes(&bytes) - .map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - .ok()?, - )) - }) - .take_while(|(_, timestamp)| current_timestamp.saturating_sub(*timestamp) > 5 * 60_000) - // 5 Minutes - { - // Send new presence events to set the user offline - let count = globals.next_count()?.to_be_bytes(); - let user_id: Box<_> = utils::string_from_bytes(&user_id_bytes) - .map_err(|_| { - Error::bad_database("Invalid UserId bytes in userid_lastpresenceupdate.") - })? - .try_into() - .map_err(|_| Error::bad_database("Invalid UserId in userid_lastpresenceupdate."))?; - for room_id in rooms.rooms_joined(&user_id).filter_map(|r| r.ok()) { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(&user_id_bytes); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&PresenceEvent { - content: PresenceEventContent { - avatar_url: None, - currently_active: None, - displayname: None, - last_active_ago: Some( - last_timestamp.try_into().expect("time is valid"), - ), - presence: PresenceState::Offline, - status_msg: None, - }, - sender: user_id.to_owned(), - }) - .expect("PresenceEvent can be serialized"), - )?; - } - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - } - - Ok(()) - }*/ - /// Returns the most recent presence updates that happened after the event with id `since`. #[tracing::instrument(skip(self, since, room_id))] - pub fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result> { + pub fn presence_since<'a>(&'a self, room_id: &RoomId, since: u64) -> Result> { + if !services().globals.allow_presence() { + return Ok(Box::new(std::iter::empty())); + } + self.db.presence_since(room_id, since) } + + /// Spawns a task maintaining presence data + fn presence_maintain( + &self, + timer_receiver: mpsc::UnboundedReceiver, + ) -> Result<()> { + self.db.presence_maintain(timer_receiver) + } + + fn presence_cleanup(&self) -> Result<()> { + self.db.presence_cleanup() + } + + /// Spawns a timer for the user used by the maintenance task + fn spawn_timer(&self, user_id: &UserId) -> Result<()> { + if !services().globals.allow_presence() { + return Ok(()); + } + + self.timer_sender + .send(user_id.into()) + .map_err(|_| Error::bad_database("Sender errored out"))?; + + Ok(()) + } } diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 29199781..03f1f93f 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -7,7 +7,7 @@ use ruma::{ RoomVersionId, }; use std::{ - collections::{hash_map, BTreeMap, HashMap, HashSet}, + collections::{btree_map, hash_map, BTreeMap, HashMap, HashSet}, pin::Pin, sync::{Arc, RwLock, RwLockWriteGuard}, time::{Duration, Instant, SystemTime}, @@ -38,8 +38,6 @@ use tracing::{debug, error, info, trace, warn}; use crate::{service::*, services, Error, PduEvent, Result}; -use super::state_compressor::CompressedStateEvent; - pub struct Service; impl Service { @@ -64,8 +62,9 @@ impl Service { /// 12. Ensure that the state is derived from the previous current state (i.e. we calculated by /// doing state res where one of the inputs was a previously trusted set of state, don't just /// trust a set of state we got from a remote) - /// 13. Use state resolution to find new room state - /// 14. Check if the event passes auth based on the "current state" of the room, if not soft fail it + /// 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" + /// it + /// 14. Use state resolution to find new room state // We use some AsyncRecursiveType hacks here so we can call this async funtion recursively #[tracing::instrument(skip(self, value, is_timeline_event, pub_key_map))] pub(crate) async fn handle_incoming_pdu<'a>( @@ -77,7 +76,6 @@ impl Service { is_timeline_event: bool, pub_key_map: &'a RwLock>>, ) -> Result>> { - // 0. Check the server is in the room if !services().rooms.metadata.exists(room_id)? { return Err(Error::BadRequest( ErrorKind::NotFound, @@ -92,8 +90,6 @@ impl Service { )); } - services().rooms.event_handler.acl_check(origin, &room_id)?; - // 1. Skip the PDU if we already have it as a timeline event if let Some(pdu_id) = services().rooms.timeline.get_pdu_id(event_id)? { return Ok(Some(pdu_id.to_vec())); @@ -105,13 +101,6 @@ impl Service { .room_state_get(room_id, &StateEventType::RoomCreate, "")? .ok_or_else(|| Error::bad_database("Failed to find create event in db."))?; - let create_event_content: RoomCreateEventContent = - serde_json::from_str(create_event.content.get()).map_err(|e| { - error!("Invalid create event: {}", e); - Error::BadDatabase("Invalid create event in db") - })?; - let room_version_id = &create_event_content.room_version; - let first_pdu_in_room = services() .rooms .timeline @@ -119,17 +108,8 @@ impl Service { .ok_or_else(|| Error::bad_database("Failed to find first pdu in db."))?; let (incoming_pdu, val) = self - .handle_outlier_pdu( - origin, - &create_event, - event_id, - room_id, - value, - false, - pub_key_map, - ) + .handle_outlier_pdu(origin, &create_event, event_id, room_id, value, pub_key_map) .await?; - self.check_room_id(room_id, &incoming_pdu)?; // 8. if not timeline event: stop if !is_timeline_event { @@ -147,7 +127,6 @@ impl Service { origin, &create_event, room_id, - room_version_id, pub_key_map, incoming_pdu.prev_events.clone(), ) @@ -236,7 +215,7 @@ impl Service { .write() .unwrap() .remove(&room_id.to_owned()); - debug!( + warn!( "Handling prev event {} took {}m{}s", prev_id, elapsed.as_secs() / 60, @@ -284,7 +263,6 @@ impl Service { event_id: &'a EventId, room_id: &'a RoomId, mut value: BTreeMap, - auth_events_known: bool, pub_key_map: &'a RwLock>>, ) -> AsyncRecursiveType<'a, Result<(Arc, BTreeMap)>> { Box::pin(async move { @@ -317,7 +295,7 @@ impl Service { ) { Err(e) => { // Drop - warn!("Dropping bad event {}: {}", event_id, e,); + warn!("Dropping bad event {}: {}", event_id, e); return Err(Error::BadRequest( ErrorKind::InvalidParam, "Signature verification failed", @@ -326,7 +304,7 @@ impl Service { Ok(ruma::signatures::Verified::Signatures) => { // Redact warn!("Calculated hash does not match: {}", event_id); - let obj = match ruma::canonical_json::redact(value, room_version_id, None) { + match ruma::canonical_json::redact(&value, room_version_id) { Ok(obj) => obj, Err(_) => { return Err(Error::BadRequest( @@ -334,17 +312,7 @@ impl Service { "Redaction failed", )) } - }; - - // Skip the PDU if it is redacted and we already have it as an outlier event - if services().rooms.timeline.get_pdu_json(event_id)?.is_some() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Event was redacted and we already knew about it", - )); } - - obj } Ok(ruma::signatures::Verified::All) => value, }; @@ -360,30 +328,25 @@ impl Service { ) .map_err(|_| Error::bad_database("Event is not a valid PDU."))?; - self.check_room_id(room_id, &incoming_pdu)?; - - if !auth_events_known { - // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events - // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" - // NOTE: Step 5 is not applied anymore because it failed too often - debug!(event_id = ?incoming_pdu.event_id, "Fetching auth events"); - self.fetch_and_handle_outliers( - origin, - &incoming_pdu - .auth_events - .iter() - .map(|x| Arc::from(&**x)) - .collect::>(), - create_event, - room_id, - room_version_id, - pub_key_map, - ) - .await; - } + // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events + // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" + // NOTE: Step 5 is not applied anymore because it failed too often + debug!(event_id = ?incoming_pdu.event_id, "Fetching auth events"); + self.fetch_and_handle_outliers( + origin, + &incoming_pdu + .auth_events + .iter() + .map(|x| Arc::from(&**x)) + .collect::>(), + create_event, + room_id, + pub_key_map, + ) + .await; // 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events - debug!( + info!( "Auth check for {} based on auth events", incoming_pdu.event_id ); @@ -399,8 +362,6 @@ impl Service { } }; - self.check_room_id(room_id, &auth_event)?; - match auth_events.entry(( auth_event.kind.to_string().into(), auth_event @@ -421,12 +382,11 @@ impl Service { } // The original create event must be in the auth events - if !matches!( - auth_events - .get(&(StateEventType::RoomCreate, "".to_owned())) - .map(|a| a.as_ref()), - Some(_) | None - ) { + if auth_events + .get(&(StateEventType::RoomCreate, "".to_owned())) + .map(|a| a.as_ref()) + != Some(create_event) + { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Incoming event refers to wrong create event.", @@ -447,7 +407,7 @@ impl Service { )); } - debug!("Validation successful."); + info!("Validation successful."); // 7. Persist the event as an outlier. services() @@ -455,7 +415,7 @@ impl Service { .outlier .add_pdu_outlier(&incoming_pdu.event_id, &val)?; - debug!("Added pdu as outlier."); + info!("Added pdu as outlier."); Ok((Arc::new(incoming_pdu), val)) }) @@ -504,7 +464,7 @@ impl Service { // TODO: if we know the prev_events of the incoming event we can avoid the request and build // the state from a known point and resolve if > 1 prev_event - debug!("Requesting state at event"); + info!("Requesting state at event"); let mut state_at_incoming_event = None; if incoming_pdu.prev_events.len() == 1 { @@ -527,7 +487,7 @@ impl Service { }; if let Some(Ok(mut state)) = state { - debug!("Using cached state"); + info!("Using cached state"); let prev_pdu = services() .rooms .timeline @@ -551,7 +511,7 @@ impl Service { state_at_incoming_event = Some(state); } } else { - debug!("Calculating state at event using state res"); + info!("Calculating state at event using state res"); let mut extremity_sstatehashes = HashMap::new(); let mut okay = true; @@ -583,7 +543,7 @@ impl Service { let mut auth_chain_sets = Vec::with_capacity(extremity_sstatehashes.len()); for (sstatehash, prev_event) in extremity_sstatehashes { - let mut leaf_state: HashMap<_, _> = services() + let mut leaf_state: BTreeMap<_, _> = services() .rooms .state_accessor .state_full_ids(sstatehash) @@ -660,7 +620,7 @@ impl Service { } if state_at_incoming_event.is_none() { - debug!("Calling /state_ids"); + info!("Calling /state_ids"); // Call /state_ids to find out what the state at this pdu is. We trust the server's // response to some extend, but we still do a lot of checks on the events match services() @@ -668,14 +628,14 @@ impl Service { .send_federation_request( origin, get_room_state_ids::v1::Request { - room_id: room_id.to_owned(), - event_id: (*incoming_pdu.event_id).to_owned(), + room_id, + event_id: &incoming_pdu.event_id, }, ) .await { Ok(res) => { - debug!("Fetching state events at event."); + info!("Fetching state events at event."); let state_vec = self .fetch_and_handle_outliers( origin, @@ -685,12 +645,11 @@ impl Service { .collect::>(), create_event, room_id, - room_version_id, pub_key_map, ) .await; - let mut state: HashMap<_, Arc> = HashMap::new(); + let mut state: BTreeMap<_, Arc> = BTreeMap::new(); for (pdu, _) in state_vec { let state_key = pdu.state_key.clone().ok_or_else(|| { Error::bad_database("Found non-state pdu in state events.") @@ -702,10 +661,10 @@ impl Service { )?; match state.entry(shortstatekey) { - hash_map::Entry::Vacant(v) => { + btree_map::Entry::Vacant(v) => { v.insert(Arc::from(&*pdu.event_id)); } - hash_map::Entry::Occupied(_) => return Err( + btree_map::Entry::Occupied(_) => return Err( Error::bad_database("State event's type and state_key combination exists multiple times."), ), } @@ -738,7 +697,7 @@ impl Service { let state_at_incoming_event = state_at_incoming_event.expect("we always set this to some above"); - debug!("Starting auth check"); + info!("Starting auth check"); // 11. Check the auth of the event passes based on the state of the event let check_result = state_res::event_auth::auth_check( &room_version, @@ -762,28 +721,10 @@ impl Service { "Event has failed auth check with state at the event.", )); } - debug!("Auth check succeeded"); - - // Soft fail check before doing state res - let auth_events = services().rooms.state.get_auth_events( - room_id, - &incoming_pdu.kind, - &incoming_pdu.sender, - incoming_pdu.state_key.as_deref(), - &incoming_pdu.content, - )?; - - let soft_fail = !state_res::event_auth::auth_check( - &room_version, - &incoming_pdu, - None::, - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed."))?; - - // 13. Use state resolution to find new room state + info!("Auth check succeeded"); // We start looking at current room state now, so lets lock the room + let mutex_state = Arc::clone( services() .globals @@ -797,7 +738,7 @@ impl Service { // Now we calculate the set of extremities this room has after the incoming event has been // applied. We start with the previous extremities (aka leaves) - debug!("Calculating extremities"); + info!("Calculating extremities"); let mut extremities = services().rooms.state.get_forward_extremities(room_id)?; // Remove any forward extremities that are referenced by this incoming event's prev_events @@ -818,54 +759,35 @@ impl Service { ) }); - debug!("Compressing state at event"); - let state_ids_compressed = Arc::new( - state_at_incoming_event - .iter() - .map(|(shortstatekey, id)| { - services() - .rooms - .state_compressor - .compress_state_event(*shortstatekey, id) - }) - .collect::>()?, - ); + info!("Compressing state at event"); + let state_ids_compressed = state_at_incoming_event + .iter() + .map(|(shortstatekey, id)| { + services() + .rooms + .state_compressor + .compress_state_event(*shortstatekey, id) + }) + .collect::>()?; - if incoming_pdu.state_key.is_some() { - debug!("Preparing for stateres to derive new room state"); + // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it + info!("Starting soft fail auth check"); - // We also add state after incoming event to the fork states - let mut state_after = state_at_incoming_event.clone(); - if let Some(state_key) = &incoming_pdu.state_key { - let shortstatekey = services().rooms.short.get_or_create_shortstatekey( - &incoming_pdu.kind.to_string().into(), - state_key, - )?; + let auth_events = services().rooms.state.get_auth_events( + room_id, + &incoming_pdu.kind, + &incoming_pdu.sender, + incoming_pdu.state_key.as_deref(), + &incoming_pdu.content, + )?; - state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id)); - } - - let new_room_state = self - .resolve_state(room_id, room_version_id, state_after) - .await?; - - // Set the new room state to the resolved state - debug!("Forcing new room state"); - - let (sstatehash, new, removed) = services() - .rooms - .state_compressor - .save_state(room_id, new_room_state)?; - - services() - .rooms - .state - .force_state(room_id, sstatehash, new, removed, &state_lock) - .await?; - } - - // 14. Check if the event passes auth based on the "current state" of the room, if not soft fail it - debug!("Starting soft fail auth check"); + let soft_fail = !state_res::event_auth::auth_check( + &room_version, + &incoming_pdu, + None::, + |k, s| auth_events.get(&(k.clone(), s.to_owned())), + ) + .map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed."))?; if soft_fail { services().rooms.timeline.append_incoming_pdu( @@ -889,7 +811,182 @@ impl Service { )); } - debug!("Appending pdu to timeline"); + if incoming_pdu.state_key.is_some() { + info!("Loading current room state ids"); + let current_sstatehash = services() + .rooms + .state + .get_room_shortstatehash(room_id)? + .expect("every room has state"); + + let current_state_ids = services() + .rooms + .state_accessor + .state_full_ids(current_sstatehash) + .await?; + + info!("Preparing for stateres to derive new room state"); + let mut extremity_sstatehashes = HashMap::new(); + + info!("Loading extremities"); + for id in dbg!(&extremities) { + match services().rooms.timeline.get_pdu(id)? { + Some(leaf_pdu) => { + extremity_sstatehashes.insert( + services() + .rooms + .state_accessor + .pdu_shortstatehash(&leaf_pdu.event_id)? + .ok_or_else(|| { + error!( + "Found extremity pdu with no statehash in db: {:?}", + leaf_pdu + ); + Error::bad_database("Found pdu with no statehash in db.") + })?, + leaf_pdu, + ); + } + _ => { + error!("Missing state snapshot for {:?}", id); + return Err(Error::BadDatabase("Missing state snapshot.")); + } + } + } + + let mut fork_states = Vec::new(); + + // 12. Ensure that the state is derived from the previous current state (i.e. we calculated + // by doing state res where one of the inputs was a previously trusted set of state, + // don't just trust a set of state we got from a remote). + + // We do this by adding the current state to the list of fork states + extremity_sstatehashes.remove(¤t_sstatehash); + fork_states.push(current_state_ids); + + // We also add state after incoming event to the fork states + let mut state_after = state_at_incoming_event.clone(); + if let Some(state_key) = &incoming_pdu.state_key { + let shortstatekey = services().rooms.short.get_or_create_shortstatekey( + &incoming_pdu.kind.to_string().into(), + state_key, + )?; + + state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id)); + } + fork_states.push(state_after); + + let mut update_state = false; + // 14. Use state resolution to find new room state + let new_room_state = if fork_states.is_empty() { + panic!("State is empty"); + } else if fork_states.iter().skip(1).all(|f| &fork_states[0] == f) { + info!("State resolution trivial"); + // There was only one state, so it has to be the room's current state (because that is + // always included) + fork_states[0] + .iter() + .map(|(k, id)| { + services() + .rooms + .state_compressor + .compress_state_event(*k, id) + }) + .collect::>()? + } else { + info!("Loading auth chains"); + // We do need to force an update to this room's state + update_state = true; + + let mut auth_chain_sets = Vec::new(); + for state in &fork_states { + auth_chain_sets.push( + services() + .rooms + .auth_chain + .get_auth_chain( + room_id, + state.iter().map(|(_, id)| id.clone()).collect(), + ) + .await? + .collect(), + ); + } + + info!("Loading fork states"); + + let fork_states: Vec<_> = fork_states + .into_iter() + .map(|map| { + map.into_iter() + .filter_map(|(k, id)| { + services() + .rooms + .short + .get_statekey_from_short(k) + .map(|(ty, st_key)| ((ty.to_string().into(), st_key), id)) + .ok() + }) + .collect::>() + }) + .collect(); + + info!("Resolving state"); + + let lock = services().globals.stateres_mutex.lock(); + let state = match state_res::resolve( + room_version_id, + &fork_states, + auth_chain_sets, + |id| { + let res = services().rooms.timeline.get_pdu(id); + if let Err(e) = &res { + error!("LOOK AT ME Failed to fetch event: {}", e); + } + res.ok().flatten() + }, + ) { + Ok(new_state) => new_state, + Err(_) => { + return Err(Error::bad_database("State resolution failed, either an event could not be found or deserialization")); + } + }; + + drop(lock); + + info!("State resolution done. Compressing state"); + + state + .into_iter() + .map(|((event_type, state_key), event_id)| { + let shortstatekey = services().rooms.short.get_or_create_shortstatekey( + &event_type.to_string().into(), + &state_key, + )?; + services() + .rooms + .state_compressor + .compress_state_event(shortstatekey, &event_id) + }) + .collect::>()? + }; + + // Set the new room state to the resolved state + if update_state { + info!("Forcing new room state"); + let (sstatehash, new, removed) = services() + .rooms + .state_compressor + .save_state(room_id, new_room_state)?; + services() + .rooms + .state + .force_state(room_id, sstatehash, new, removed, &state_lock) + .await?; + } + } + + info!("Appending pdu to timeline"); extremities.insert(incoming_pdu.event_id.clone()); // Now that the event has passed all auth it is added into the timeline. @@ -905,101 +1002,13 @@ impl Service { &state_lock, )?; - debug!("Appended incoming pdu"); + info!("Appended incoming pdu"); // Event has passed all auth/stateres checks drop(state_lock); Ok(pdu_id) } - async fn resolve_state( - &self, - room_id: &RoomId, - room_version_id: &RoomVersionId, - incoming_state: HashMap>, - ) -> Result>> { - debug!("Loading current room state ids"); - let current_sstatehash = services() - .rooms - .state - .get_room_shortstatehash(room_id)? - .expect("every room has state"); - - let current_state_ids = services() - .rooms - .state_accessor - .state_full_ids(current_sstatehash) - .await?; - - let fork_states = [current_state_ids, incoming_state]; - - let mut auth_chain_sets = Vec::new(); - for state in &fork_states { - auth_chain_sets.push( - services() - .rooms - .auth_chain - .get_auth_chain(room_id, state.iter().map(|(_, id)| id.clone()).collect()) - .await? - .collect(), - ); - } - - debug!("Loading fork states"); - - let fork_states: Vec<_> = fork_states - .into_iter() - .map(|map| { - map.into_iter() - .filter_map(|(k, id)| { - services() - .rooms - .short - .get_statekey_from_short(k) - .map(|(ty, st_key)| ((ty.to_string().into(), st_key), id)) - .ok() - }) - .collect::>() - }) - .collect(); - - debug!("Resolving state"); - - let lock = services().globals.stateres_mutex.lock(); - let state = match state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| { - let res = services().rooms.timeline.get_pdu(id); - if let Err(e) = &res { - error!("LOOK AT ME Failed to fetch event: {}", e); - } - res.ok().flatten() - }) { - Ok(new_state) => new_state, - Err(_) => { - return Err(Error::bad_database("State resolution failed, either an event could not be found or deserialization")); - } - }; - - drop(lock); - - debug!("State resolution done. Compressing state"); - - let new_room_state = state - .into_iter() - .map(|((event_type, state_key), event_id)| { - let shortstatekey = services() - .rooms - .short - .get_or_create_shortstatekey(&event_type.to_string().into(), &state_key)?; - services() - .rooms - .state_compressor - .compress_state_event(shortstatekey, &event_id) - }) - .collect::>()?; - - Ok(Arc::new(new_room_state)) - } - /// Find the event and auth it. Once the event is validated (steps 1 - 8) /// it is appended to the outliers Tree. /// @@ -1016,7 +1025,6 @@ impl Service { events: &'a [Arc], create_event: &'a PduEvent, room_id: &'a RoomId, - room_version_id: &'a RoomVersionId, pub_key_map: &'a RwLock>>, ) -> AsyncRecursiveType<'a, Vec<(Arc, Option>)>> { @@ -1036,6 +1044,26 @@ impl Service { let mut pdus = vec![]; for id in events { + if let Some((time, tries)) = services() + .globals + .bad_event_ratelimiter + .read() + .unwrap() + .get(&**id) + { + // Exponential backoff + let mut min_elapsed_duration = + Duration::from_secs(5 * 60) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + info!("Backing off from {}", id); + continue; + } + } + // a. Look in the main timeline (pduid_pdu tree) // b. Look at outlier pdu tree // (get_pdu_json checks both) @@ -1053,26 +1081,6 @@ impl Service { let mut events_all = HashSet::new(); let mut i = 0; while let Some(next_id) = todo_auth_events.pop() { - if let Some((time, tries)) = services() - .globals - .bad_event_ratelimiter - .read() - .unwrap() - .get(&*next_id) - { - // Exponential backoff - let mut min_elapsed_duration = - Duration::from_secs(5 * 60) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); - } - - if time.elapsed() < min_elapsed_duration { - info!("Backing off from {}", next_id); - continue; - } - } - if events_all.contains(&next_id) { continue; } @@ -1083,7 +1091,7 @@ impl Service { } if let Ok(Some(_)) = services().rooms.timeline.get_pdu(&next_id) { - trace!("Found {} in db", next_id); + trace!("Found {} in db", id); continue; } @@ -1092,16 +1100,14 @@ impl Service { .sending .send_federation_request( origin, - get_event::v1::Request { - event_id: (*next_id).to_owned(), - }, + get_event::v1::Request { event_id: &next_id }, ) .await { Ok(res) => { info!("Got {} over federation", next_id); let (calculated_event_id, value) = - match pdu::gen_event_id_canonical_json(&res.pdu, room_version_id) { + match pdu::gen_event_id_canonical_json(&res.pdu) { Ok(t) => t, Err(_) => { back_off((*next_id).to_owned()); @@ -1142,26 +1148,6 @@ impl Service { } for (next_id, value) in events_in_reverse_order.iter().rev() { - if let Some((time, tries)) = services() - .globals - .bad_event_ratelimiter - .read() - .unwrap() - .get(&**next_id) - { - // Exponential backoff - let mut min_elapsed_duration = - Duration::from_secs(5 * 60) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); - } - - if time.elapsed() < min_elapsed_duration { - info!("Backing off from {}", next_id); - continue; - } - } - match self .handle_outlier_pdu( origin, @@ -1169,7 +1155,6 @@ impl Service { next_id, room_id, value.clone(), - true, pub_key_map, ) .await @@ -1195,7 +1180,6 @@ impl Service { origin: &ServerName, create_event: &PduEvent, room_id: &RoomId, - room_version_id: &RoomVersionId, pub_key_map: &RwLock>>, initial_set: Vec>, ) -> Result<( @@ -1221,15 +1205,12 @@ impl Service { &[prev_event_id.clone()], create_event, room_id, - room_version_id, pub_key_map, ) .await .pop() { - self.check_room_id(room_id, &pdu)?; - - if amount > services().globals.max_fetch_prev_events() { + if amount > 100 { // Max limit reached warn!("Max prev event limit reached!"); graph.insert(prev_event_id.clone(), HashSet::new()); @@ -1276,6 +1257,7 @@ impl Service { // This return value is the key used for sorting events, // events are then sorted by power level, time, // and lexically by event_id. + println!("{}", event_id); Ok(( int!(0), MilliSecondsSinceUnixEpoch( @@ -1462,12 +1444,12 @@ impl Service { } if servers.is_empty() { - info!("We had all keys locally"); + // We had all keys locally return Ok(()); } for server in services().globals.trusted_servers() { - info!("Asking batch signing keys from trusted server {}", server); + trace!("Asking batch signing keys from trusted server {}", server); if let Ok(keys) = services() .sending .send_federation_request( @@ -1510,12 +1492,10 @@ impl Service { } if servers.is_empty() { - info!("Trusted server supplied all signing keys"); return Ok(()); } } - info!("Asking individual servers for signing keys: {servers:?}"); let mut futures: FuturesUnordered<_> = servers .into_keys() .map(|server| async move { @@ -1530,26 +1510,20 @@ impl Service { .collect(); while let Some(result) = futures.next().await { - info!("Received new result"); if let (Ok(get_keys_response), origin) = result { - info!("Result is from {origin}"); - if let Ok(key) = get_keys_response.server_key.deserialize() { - let result: BTreeMap<_, _> = services() - .globals - .add_signing_key(&origin, key)? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(); - pub_key_map - .write() - .map_err(|_| Error::bad_database("RwLock is poisoned."))? - .insert(origin.to_string(), result); - } - } - info!("Done handling result"); - } + let result: BTreeMap<_, _> = services() + .globals + .add_signing_key(&origin, get_keys_response.server_key.deserialize().unwrap())? + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect(); - info!("Search for signing keys done"); + pub_key_map + .write() + .map_err(|_| Error::bad_database("RwLock is poisoned."))? + .insert(origin.to_string(), result); + } + } Ok(()) } @@ -1574,21 +1548,12 @@ impl Service { } }; - if acl_event_content.allow.is_empty() { - // Ignore broken acl events - return Ok(()); - } - if acl_event_content.is_allowed(server_name) { Ok(()) } else { - info!( - "Server {} was denied by room ACL in {}", - server_name, room_id - ); Err(Error::BadRequest( ErrorKind::Forbidden, - "Server was denied by room ACL", + "Server was denied by ACL", )) } } @@ -1710,7 +1675,7 @@ impl Service { .send_federation_request( server, get_remote_server_keys::v2::Request::new( - origin.to_owned(), + origin, MilliSecondsSinceUnixEpoch::from_system_time( SystemTime::now() .checked_add(Duration::from_secs(3600)) @@ -1758,15 +1723,4 @@ impl Service { "Failed to find public key for server", )) } - - fn check_room_id(&self, room_id: &RoomId, pdu: &PduEvent) -> Result<()> { - if pdu.room_id != room_id { - warn!("Found event from room {} in room {}", pdu.room_id, room_id); - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Event has wrong room id", - )); - } - Ok(()) - } } diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index e6e4f896..701a7340 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -9,13 +9,11 @@ use ruma::{DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, RoomId, UserId}; use crate::Result; -use super::timeline::PduCount; - pub struct Service { pub db: &'static dyn Data, pub lazy_load_waiting: - Mutex>>, + Mutex>>, } impl Service { @@ -38,7 +36,7 @@ impl Service { device_id: &DeviceId, room_id: &RoomId, lazy_load: HashSet, - count: PduCount, + count: u64, ) { self.lazy_load_waiting.lock().unwrap().insert( ( @@ -57,7 +55,7 @@ impl Service { user_id: &UserId, device_id: &DeviceId, room_id: &RoomId, - since: PduCount, + since: u64, ) -> Result<()> { if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( user_id.to_owned(), diff --git a/src/service/rooms/mod.rs b/src/service/rooms/mod.rs index f0739841..8956e4d8 100644 --- a/src/service/rooms/mod.rs +++ b/src/service/rooms/mod.rs @@ -9,12 +9,10 @@ pub mod outlier; pub mod pdu_metadata; pub mod search; pub mod short; -pub mod spaces; pub mod state; pub mod state_accessor; pub mod state_cache; pub mod state_compressor; -pub mod threads; pub mod timeline; pub mod user; @@ -34,7 +32,6 @@ pub trait Data: + state_cache::Data + state_compressor::Data + timeline::Data - + threads::Data + user::Data { } @@ -56,7 +53,5 @@ pub struct Service { pub state_cache: state_cache::Service, pub state_compressor: state_compressor::Service, pub timeline: timeline::Service, - pub threads: threads::Service, - pub spaces: spaces::Service, pub user: user::Service, } diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index 6c4cb3ce..b157938f 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -1,17 +1,9 @@ use std::sync::Arc; -use crate::{service::rooms::timeline::PduCount, PduEvent, Result}; -use ruma::{EventId, RoomId, UserId}; +use crate::Result; +use ruma::{EventId, RoomId}; pub trait Data: Send + Sync { - fn add_relation(&self, from: u64, to: u64) -> Result<()>; - fn relations_until<'a>( - &'a self, - user_id: &'a UserId, - room_id: u64, - target: u64, - until: PduCount, - ) -> Result> + 'a>>; fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()>; fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result; fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()>; diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index 9ce74f4d..b816678c 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -2,171 +2,15 @@ mod data; use std::sync::Arc; pub use data::Data; -use ruma::{ - api::client::relations::get_relating_events, - events::{relation::RelationType, TimelineEventType}, - EventId, RoomId, UserId, -}; -use serde::Deserialize; +use ruma::{EventId, RoomId}; -use crate::{services, PduEvent, Result}; - -use super::timeline::PduCount; +use crate::Result; pub struct Service { pub db: &'static dyn Data, } -#[derive(Clone, Debug, Deserialize)] -struct ExtractRelType { - rel_type: RelationType, -} -#[derive(Clone, Debug, Deserialize)] -struct ExtractRelatesToEventId { - #[serde(rename = "m.relates_to")] - relates_to: ExtractRelType, -} - impl Service { - #[tracing::instrument(skip(self, from, to))] - pub fn add_relation(&self, from: PduCount, to: PduCount) -> Result<()> { - match (from, to) { - (PduCount::Normal(f), PduCount::Normal(t)) => self.db.add_relation(f, t), - _ => { - // TODO: Relations with backfilled pdus - - Ok(()) - } - } - } - - pub fn paginate_relations_with_filter( - &self, - sender_user: &UserId, - room_id: &RoomId, - target: &EventId, - filter_event_type: Option, - filter_rel_type: Option, - from: PduCount, - to: Option, - limit: usize, - ) -> Result { - let next_token; - - //TODO: Fix ruma: match body.dir { - match ruma::api::Direction::Backward { - ruma::api::Direction::Forward => { - let events_after: Vec<_> = services() - .rooms - .pdu_metadata - .relations_until(sender_user, room_id, target, from)? // TODO: should be relations_after - .filter(|r| { - r.as_ref().map_or(true, |(_, pdu)| { - filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t) - && if let Ok(content) = - serde_json::from_str::( - pdu.content.get(), - ) - { - filter_rel_type - .as_ref() - .map_or(true, |r| &content.relates_to.rel_type == r) - } else { - false - } - }) - }) - .take(limit) - .filter_map(|r| r.ok()) // Filter out buggy events - .filter(|(_, pdu)| { - services() - .rooms - .state_accessor - .user_can_see_event(sender_user, &room_id, &pdu.event_id) - .unwrap_or(false) - }) - .take_while(|&(k, _)| Some(k) != to) // Stop at `to` - .collect(); - - next_token = events_after.last().map(|(count, _)| count).copied(); - - let events_after: Vec<_> = events_after - .into_iter() - .rev() // relations are always most recent first - .map(|(_, pdu)| pdu.to_message_like_event()) - .collect(); - - Ok(get_relating_events::v1::Response { - chunk: events_after, - next_batch: next_token.map(|t| t.stringify()), - prev_batch: Some(from.stringify()), - }) - } - ruma::api::Direction::Backward => { - let events_before: Vec<_> = services() - .rooms - .pdu_metadata - .relations_until(sender_user, &room_id, target, from)? - .filter(|r| { - r.as_ref().map_or(true, |(_, pdu)| { - filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t) - && if let Ok(content) = - serde_json::from_str::( - pdu.content.get(), - ) - { - filter_rel_type - .as_ref() - .map_or(true, |r| &content.relates_to.rel_type == r) - } else { - false - } - }) - }) - .take(limit) - .filter_map(|r| r.ok()) // Filter out buggy events - .filter(|(_, pdu)| { - services() - .rooms - .state_accessor - .user_can_see_event(sender_user, &room_id, &pdu.event_id) - .unwrap_or(false) - }) - .take_while(|&(k, _)| Some(k) != to) // Stop at `to` - .collect(); - - next_token = events_before.last().map(|(count, _)| count).copied(); - - let events_before: Vec<_> = events_before - .into_iter() - .map(|(_, pdu)| pdu.to_message_like_event()) - .collect(); - - Ok(get_relating_events::v1::Response { - chunk: events_before, - next_batch: next_token.map(|t| t.stringify()), - prev_batch: Some(from.stringify()), - }) - } - } - } - - pub fn relations_until<'a>( - &'a self, - user_id: &'a UserId, - room_id: &'a RoomId, - target: &'a EventId, - until: PduCount, - ) -> Result> + 'a> { - let room_id = services().rooms.short.get_or_create_shortroomid(room_id)?; - let target = match services().rooms.timeline.get_pdu_count(target)? { - Some(PduCount::Normal(c)) => c, - // TODO: Support backfilled relations - _ => 0, // This will result in an empty iterator - }; - self.db.relations_until(user_id, room_id, target, until) - } - #[tracing::instrument(skip(self, room_id, event_ids))] pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { self.db.mark_as_referenced(room_id, event_ids) diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs deleted file mode 100644 index 53232f46..00000000 --- a/src/service/rooms/spaces/mod.rs +++ /dev/null @@ -1,505 +0,0 @@ -use std::sync::{Arc, Mutex}; - -use lru_cache::LruCache; -use ruma::{ - api::{ - client::{ - error::ErrorKind, - space::{get_hierarchy, SpaceHierarchyRoomsChunk}, - }, - federation, - }, - events::{ - room::{ - avatar::RoomAvatarEventContent, - canonical_alias::RoomCanonicalAliasEventContent, - create::RoomCreateEventContent, - guest_access::{GuestAccess, RoomGuestAccessEventContent}, - history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, - join_rules::{self, AllowRule, JoinRule, RoomJoinRulesEventContent}, - topic::RoomTopicEventContent, - }, - space::child::SpaceChildEventContent, - StateEventType, - }, - space::SpaceRoomJoinRule, - OwnedRoomId, RoomId, UserId, -}; - -use tracing::{debug, error, warn}; - -use crate::{services, Error, PduEvent, Result}; - -pub enum CachedJoinRule { - //Simplified(SpaceRoomJoinRule), - Full(JoinRule), -} - -pub struct CachedSpaceChunk { - chunk: SpaceHierarchyRoomsChunk, - children: Vec, - join_rule: CachedJoinRule, -} - -pub struct Service { - pub roomid_spacechunk_cache: Mutex>>, -} - -impl Service { - pub async fn get_hierarchy( - &self, - sender_user: &UserId, - room_id: &RoomId, - limit: usize, - skip: usize, - max_depth: usize, - suggested_only: bool, - ) -> Result { - let mut left_to_skip = skip; - - let mut rooms_in_path = Vec::new(); - let mut stack = vec![vec![room_id.to_owned()]]; - let mut results = Vec::new(); - - while let Some(current_room) = { - while stack.last().map_or(false, |s| s.is_empty()) { - stack.pop(); - } - if !stack.is_empty() { - stack.last_mut().and_then(|s| s.pop()) - } else { - None - } - } { - rooms_in_path.push(current_room.clone()); - if results.len() >= limit { - break; - } - - if let Some(cached) = self - .roomid_spacechunk_cache - .lock() - .unwrap() - .get_mut(¤t_room.to_owned()) - .as_ref() - { - if let Some(cached) = cached { - let allowed = match &cached.join_rule { - //CachedJoinRule::Simplified(s) => { - //self.handle_simplified_join_rule(s, sender_user, ¤t_room)? - //} - CachedJoinRule::Full(f) => { - self.handle_join_rule(f, sender_user, ¤t_room)? - } - }; - if allowed { - if left_to_skip > 0 { - left_to_skip -= 1; - } else { - results.push(cached.chunk.clone()); - } - if rooms_in_path.len() < max_depth { - stack.push(cached.children.clone()); - } - } - } - continue; - } - - if let Some(current_shortstatehash) = services() - .rooms - .state - .get_room_shortstatehash(¤t_room)? - { - let state = services() - .rooms - .state_accessor - .state_full_ids(current_shortstatehash) - .await?; - - let mut children_ids = Vec::new(); - let mut children_pdus = Vec::new(); - for (key, id) in state { - let (event_type, state_key) = - services().rooms.short.get_statekey_from_short(key)?; - if event_type != StateEventType::SpaceChild { - continue; - } - - let pdu = services() - .rooms - .timeline - .get_pdu(&id)? - .ok_or_else(|| Error::bad_database("Event in space state not found"))?; - - if serde_json::from_str::(pdu.content.get()) - .ok() - .and_then(|c| c.via) - .map_or(true, |v| v.is_empty()) - { - continue; - } - - if let Ok(room_id) = OwnedRoomId::try_from(state_key) { - children_ids.push(room_id); - children_pdus.push(pdu); - } - } - - // TODO: Sort children - children_ids.reverse(); - - let chunk = self.get_room_chunk(sender_user, ¤t_room, children_pdus); - if let Ok(chunk) = chunk { - if left_to_skip > 0 { - left_to_skip -= 1; - } else { - results.push(chunk.clone()); - } - let join_rule = services() - .rooms - .state_accessor - .room_state_get(¤t_room, &StateEventType::RoomJoinRules, "")? - .map(|s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomJoinRulesEventContent| c.join_rule) - .map_err(|e| { - error!("Invalid room join rule event in database: {}", e); - Error::BadDatabase("Invalid room join rule event in database.") - }) - }) - .transpose()? - .unwrap_or(JoinRule::Invite); - - self.roomid_spacechunk_cache.lock().unwrap().insert( - current_room.clone(), - Some(CachedSpaceChunk { - chunk, - children: children_ids.clone(), - join_rule: CachedJoinRule::Full(join_rule), - }), - ); - } - - if rooms_in_path.len() < max_depth { - stack.push(children_ids); - } - } else { - let server = current_room.server_name(); - if server == services().globals.server_name() { - continue; - } - if !results.is_empty() { - // Early return so the client can see some data already - break; - } - warn!("Asking {server} for /hierarchy"); - if let Ok(response) = services() - .sending - .send_federation_request( - &server, - federation::space::get_hierarchy::v1::Request { - room_id: current_room.to_owned(), - suggested_only, - }, - ) - .await - { - warn!("Got response from {server} for /hierarchy\n{response:?}"); - let chunk = SpaceHierarchyRoomsChunk { - canonical_alias: response.room.canonical_alias, - name: response.room.name, - num_joined_members: response.room.num_joined_members, - room_id: response.room.room_id, - topic: response.room.topic, - world_readable: response.room.world_readable, - guest_can_join: response.room.guest_can_join, - avatar_url: response.room.avatar_url, - join_rule: response.room.join_rule.clone(), - room_type: response.room.room_type, - children_state: response.room.children_state, - }; - let children = response - .children - .iter() - .map(|c| c.room_id.clone()) - .collect::>(); - - let join_rule = match response.room.join_rule { - SpaceRoomJoinRule::Invite => JoinRule::Invite, - SpaceRoomJoinRule::Knock => JoinRule::Knock, - SpaceRoomJoinRule::Private => JoinRule::Private, - SpaceRoomJoinRule::Restricted => { - JoinRule::Restricted(join_rules::Restricted { - allow: response - .room - .allowed_room_ids - .into_iter() - .map(|room| AllowRule::room_membership(room)) - .collect(), - }) - } - SpaceRoomJoinRule::KnockRestricted => { - JoinRule::KnockRestricted(join_rules::Restricted { - allow: response - .room - .allowed_room_ids - .into_iter() - .map(|room| AllowRule::room_membership(room)) - .collect(), - }) - } - SpaceRoomJoinRule::Public => JoinRule::Public, - _ => return Err(Error::BadServerResponse("Unknown join rule")), - }; - if self.handle_join_rule(&join_rule, sender_user, ¤t_room)? { - if left_to_skip > 0 { - left_to_skip -= 1; - } else { - results.push(chunk.clone()); - } - if rooms_in_path.len() < max_depth { - stack.push(children.clone()); - } - } - - self.roomid_spacechunk_cache.lock().unwrap().insert( - current_room.clone(), - Some(CachedSpaceChunk { - chunk, - children, - join_rule: CachedJoinRule::Full(join_rule), - }), - ); - - /* TODO: - for child in response.children { - roomid_spacechunk_cache.insert( - current_room.clone(), - CachedSpaceChunk { - chunk: child.chunk, - children, - join_rule, - }, - ); - } - */ - } else { - self.roomid_spacechunk_cache - .lock() - .unwrap() - .insert(current_room.clone(), None); - } - } - } - - Ok(get_hierarchy::v1::Response { - next_batch: if results.is_empty() { - None - } else { - Some((skip + results.len()).to_string()) - }, - rooms: results, - }) - } - - fn get_room_chunk( - &self, - sender_user: &UserId, - room_id: &RoomId, - children: Vec>, - ) -> Result { - Ok(SpaceHierarchyRoomsChunk { - canonical_alias: services() - .rooms - .state_accessor - .room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "")? - .map_or(Ok(None), |s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomCanonicalAliasEventContent| c.alias) - .map_err(|_| { - Error::bad_database("Invalid canonical alias event in database.") - }) - })?, - name: services().rooms.state_accessor.get_name(&room_id)?, - num_joined_members: services() - .rooms - .state_cache - .room_joined_count(&room_id)? - .unwrap_or_else(|| { - warn!("Room {} has no member count", room_id); - 0 - }) - .try_into() - .expect("user count should not be that big"), - room_id: room_id.to_owned(), - topic: services() - .rooms - .state_accessor - .room_state_get(&room_id, &StateEventType::RoomTopic, "")? - .map_or(Ok(None), |s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomTopicEventContent| Some(c.topic)) - .map_err(|_| { - error!("Invalid room topic event in database for room {}", room_id); - Error::bad_database("Invalid room topic event in database.") - }) - })?, - world_readable: services() - .rooms - .state_accessor - .room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")? - .map_or(Ok(false), |s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomHistoryVisibilityEventContent| { - c.history_visibility == HistoryVisibility::WorldReadable - }) - .map_err(|_| { - Error::bad_database( - "Invalid room history visibility event in database.", - ) - }) - })?, - guest_can_join: services() - .rooms - .state_accessor - .room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")? - .map_or(Ok(false), |s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomGuestAccessEventContent| { - c.guest_access == GuestAccess::CanJoin - }) - .map_err(|_| { - Error::bad_database("Invalid room guest access event in database.") - }) - })?, - avatar_url: services() - .rooms - .state_accessor - .room_state_get(&room_id, &StateEventType::RoomAvatar, "")? - .map(|s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomAvatarEventContent| c.url) - .map_err(|_| Error::bad_database("Invalid room avatar event in database.")) - }) - .transpose()? - // url is now an Option so we must flatten - .flatten(), - join_rule: { - let join_rule = services() - .rooms - .state_accessor - .room_state_get(&room_id, &StateEventType::RoomJoinRules, "")? - .map(|s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomJoinRulesEventContent| c.join_rule) - .map_err(|e| { - error!("Invalid room join rule event in database: {}", e); - Error::BadDatabase("Invalid room join rule event in database.") - }) - }) - .transpose()? - .unwrap_or(JoinRule::Invite); - - if !self.handle_join_rule(&join_rule, sender_user, room_id)? { - debug!("User is not allowed to see room {room_id}"); - // This error will be caught later - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "User is not allowed to see the room", - )); - } - - self.translate_joinrule(&join_rule)? - }, - room_type: services() - .rooms - .state_accessor - .room_state_get(&room_id, &StateEventType::RoomCreate, "")? - .map(|s| { - serde_json::from_str::(s.content.get()).map_err(|e| { - error!("Invalid room create event in database: {}", e); - Error::BadDatabase("Invalid room create event in database.") - }) - }) - .transpose()? - .and_then(|e| e.room_type), - children_state: children - .into_iter() - .map(|pdu| pdu.to_stripped_spacechild_state_event()) - .collect(), - }) - } - - fn translate_joinrule(&self, join_rule: &JoinRule) -> Result { - match join_rule { - JoinRule::Invite => Ok(SpaceRoomJoinRule::Invite), - JoinRule::Knock => Ok(SpaceRoomJoinRule::Knock), - JoinRule::Private => Ok(SpaceRoomJoinRule::Private), - JoinRule::Restricted(_) => Ok(SpaceRoomJoinRule::Restricted), - JoinRule::KnockRestricted(_) => Ok(SpaceRoomJoinRule::KnockRestricted), - JoinRule::Public => Ok(SpaceRoomJoinRule::Public), - _ => Err(Error::BadServerResponse("Unknown join rule")), - } - } - - fn handle_simplified_join_rule( - &self, - join_rule: &SpaceRoomJoinRule, - sender_user: &UserId, - room_id: &RoomId, - ) -> Result { - let allowed = match join_rule { - SpaceRoomJoinRule::Public => true, - SpaceRoomJoinRule::Knock => true, - SpaceRoomJoinRule::Invite => services() - .rooms - .state_cache - .is_joined(sender_user, &room_id)?, - _ => false, - }; - - Ok(allowed) - } - - fn handle_join_rule( - &self, - join_rule: &JoinRule, - sender_user: &UserId, - room_id: &RoomId, - ) -> Result { - if self.handle_simplified_join_rule( - &self.translate_joinrule(join_rule)?, - sender_user, - room_id, - )? { - return Ok(true); - } - - match join_rule { - JoinRule::Restricted(r) => { - for rule in &r.allow { - match rule { - join_rules::AllowRule::RoomMembership(rm) => { - if let Ok(true) = services() - .rooms - .state_cache - .is_joined(sender_user, &rm.room_id) - { - return Ok(true); - } - } - _ => {} - } - } - - Ok(false) - } - JoinRule::KnockRestricted(_) => { - // TODO: Check rules - Ok(false) - } - _ => Ok(false), - } - } -} diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 48c60203..3072b80f 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -6,10 +6,9 @@ use std::{ pub use data::Data; use ruma::{ - api::client::error::ErrorKind, events::{ room::{create::RoomCreateEventContent, member::MembershipState}, - AnyStrippedStateEvent, StateEventType, TimelineEventType, + AnyStrippedStateEvent, RoomEventType, StateEventType, }, serde::Raw, state_res::{self, StateMap}, @@ -33,11 +32,11 @@ impl Service { &self, room_id: &RoomId, shortstatehash: u64, - statediffnew: Arc>, - _statediffremoved: Arc>, + statediffnew: HashSet, + _statediffremoved: HashSet, state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()> { - for event_id in statediffnew.iter().filter_map(|new| { + for event_id in statediffnew.into_iter().filter_map(|new| { services() .rooms .state_compressor @@ -50,6 +49,10 @@ impl Service { None => continue, }; + if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { + continue; + } + let pdu: PduEvent = match serde_json::from_str( &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), ) { @@ -57,49 +60,34 @@ impl Service { Err(_) => continue, }; - match pdu.kind { - TimelineEventType::RoomMember => { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = - match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - services().rooms.state_cache.update_membership( - room_id, - &user_id, - membership, - &pdu.sender, - None, - false, - )?; - } - TimelineEventType::SpaceChild => { - services() - .rooms - .spaces - .roomid_spacechunk_cache - .lock() - .unwrap() - .remove(&pdu.room_id); - } - _ => continue, + #[derive(Deserialize)] + struct ExtractMembership { + membership: MembershipState, } + + let membership = match serde_json::from_str::(pdu.content.get()) { + Ok(e) => e.membership, + Err(_) => continue, + }; + + let state_key = match pdu.state_key { + Some(k) => k, + None => continue, + }; + + let user_id = match UserId::parse(state_key) { + Ok(id) => id, + Err(_) => continue, + }; + + services().rooms.state_cache.update_membership( + room_id, + &user_id, + membership, + &pdu.sender, + None, + false, + )?; } services().rooms.state_cache.update_joined_count(room_id)?; @@ -119,7 +107,7 @@ impl Service { &self, event_id: &EventId, room_id: &RoomId, - state_ids_compressed: Arc>, + state_ids_compressed: HashSet, ) -> Result { let shorteventid = services() .rooms @@ -164,9 +152,9 @@ impl Service { .copied() .collect(); - (Arc::new(statediffnew), Arc::new(statediffremoved)) + (statediffnew, statediffremoved) } else { - (state_ids_compressed, Arc::new(HashSet::new())) + (state_ids_compressed, HashSet::new()) }; services().rooms.state_compressor.save_state_from_diff( shortstatehash, @@ -246,8 +234,8 @@ impl Service { services().rooms.state_compressor.save_state_from_diff( shortstatehash, - Arc::new(statediffnew), - Arc::new(statediffremoved), + statediffnew, + statediffremoved, 2, states_parents, )?; @@ -332,7 +320,7 @@ impl Service { "", )?; - let create_event_content: RoomCreateEventContent = create_event + let create_event_content: Option = create_event .as_ref() .map(|create_event| { serde_json::from_str(create_event.content.get()).map_err(|e| { @@ -340,10 +328,11 @@ impl Service { Error::bad_database("Invalid create event in db.") }) }) - .transpose()? - .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "No create event found"))?; - - Ok(create_event_content.room_version) + .transpose()?; + let room_version = create_event_content + .map(|create_event| create_event.room_version) + .ok_or(Error::BadDatabase("Invalid room version"))?; + Ok(room_version) } pub fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { @@ -369,7 +358,7 @@ impl Service { pub fn get_auth_events( &self, room_id: &RoomId, - kind: &TimelineEventType, + kind: &RoomEventType, sender: &UserId, state_key: Option<&str>, content: &serde_json::value::RawValue, @@ -407,7 +396,7 @@ impl Service { .1; Ok(full_state - .iter() + .into_iter() .filter_map(|compressed| { services() .rooms diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index f3ae3c21..340b19c3 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -1,4 +1,7 @@ -use std::{collections::HashMap, sync::Arc}; +use std::{ + collections::{BTreeMap, HashMap}, + sync::Arc, +}; use async_trait::async_trait; use ruma::{events::StateEventType, EventId, RoomId}; @@ -9,7 +12,7 @@ use crate::{PduEvent, Result}; pub trait Data: Send + Sync { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. - async fn state_full_ids(&self, shortstatehash: u64) -> Result>>; + async fn state_full_ids(&self, shortstatehash: u64) -> Result>>; async fn state_full( &self, diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index a4a62fe4..1a9c4a9e 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -1,38 +1,22 @@ mod data; use std::{ - collections::HashMap, - sync::{Arc, Mutex}, + collections::{BTreeMap, HashMap}, + sync::Arc, }; pub use data::Data; -use lru_cache::LruCache; -use ruma::{ - events::{ - room::{ - avatar::RoomAvatarEventContent, - history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, - member::{MembershipState, RoomMemberEventContent}, - name::RoomNameEventContent, - }, - StateEventType, - }, - EventId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, -}; -use tracing::error; +use ruma::{events::StateEventType, EventId, RoomId}; -use crate::{services, Error, PduEvent, Result}; +use crate::{PduEvent, Result}; pub struct Service { pub db: &'static dyn Data, - pub server_visibility_cache: Mutex>, - pub user_visibility_cache: Mutex>, } impl Service { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { + pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { self.db.state_full_ids(shortstatehash).await } @@ -55,6 +39,7 @@ impl Service { } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). + #[tracing::instrument(skip(self))] pub fn state_get( &self, shortstatehash: u64, @@ -64,178 +49,6 @@ impl Service { self.db.state_get(shortstatehash, event_type, state_key) } - /// Get membership for given user in state - fn user_membership(&self, shortstatehash: u64, user_id: &UserId) -> Result { - self.state_get( - shortstatehash, - &StateEventType::RoomMember, - user_id.as_str(), - )? - .map_or(Ok(MembershipState::Leave), |s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomMemberEventContent| c.membership) - .map_err(|_| Error::bad_database("Invalid room membership event in database.")) - }) - } - - /// The user was a joined member at this state (potentially in the past) - fn user_was_joined(&self, shortstatehash: u64, user_id: &UserId) -> bool { - self.user_membership(shortstatehash, user_id) - .map(|s| s == MembershipState::Join) - .unwrap_or_default() // Return sensible default, i.e. false - } - - /// The user was an invited or joined room member at this state (potentially - /// in the past) - fn user_was_invited(&self, shortstatehash: u64, user_id: &UserId) -> bool { - self.user_membership(shortstatehash, user_id) - .map(|s| s == MembershipState::Join || s == MembershipState::Invite) - .unwrap_or_default() // Return sensible default, i.e. false - } - - /// Whether a server is allowed to see an event through federation, based on - /// the room's history_visibility at that event's state. - #[tracing::instrument(skip(self, origin, room_id, event_id))] - pub fn server_can_see_event( - &self, - origin: &ServerName, - room_id: &RoomId, - event_id: &EventId, - ) -> Result { - let shortstatehash = match self.pdu_shortstatehash(event_id)? { - Some(shortstatehash) => shortstatehash, - None => return Ok(true), - }; - - if let Some(visibility) = self - .server_visibility_cache - .lock() - .unwrap() - .get_mut(&(origin.to_owned(), shortstatehash)) - { - return Ok(*visibility); - } - - let history_visibility = self - .state_get(shortstatehash, &StateEventType::RoomHistoryVisibility, "")? - .map_or(Ok(HistoryVisibility::Shared), |s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomHistoryVisibilityEventContent| c.history_visibility) - .map_err(|_| { - Error::bad_database("Invalid history visibility event in database.") - }) - })?; - - let mut current_server_members = services() - .rooms - .state_cache - .room_members(room_id) - .filter_map(|r| r.ok()) - .filter(|member| member.server_name() == origin); - - let visibility = match history_visibility { - HistoryVisibility::WorldReadable | HistoryVisibility::Shared => true, - HistoryVisibility::Invited => { - // Allow if any member on requesting server was AT LEAST invited, else deny - current_server_members.any(|member| self.user_was_invited(shortstatehash, &member)) - } - HistoryVisibility::Joined => { - // Allow if any member on requested server was joined, else deny - current_server_members.any(|member| self.user_was_joined(shortstatehash, &member)) - } - _ => { - error!("Unknown history visibility {history_visibility}"); - false - } - }; - - self.server_visibility_cache - .lock() - .unwrap() - .insert((origin.to_owned(), shortstatehash), visibility); - - Ok(visibility) - } - - /// Whether a user is allowed to see an event, based on - /// the room's history_visibility at that event's state. - #[tracing::instrument(skip(self, user_id, room_id, event_id))] - pub fn user_can_see_event( - &self, - user_id: &UserId, - room_id: &RoomId, - event_id: &EventId, - ) -> Result { - let shortstatehash = match self.pdu_shortstatehash(event_id)? { - Some(shortstatehash) => shortstatehash, - None => return Ok(true), - }; - - if let Some(visibility) = self - .user_visibility_cache - .lock() - .unwrap() - .get_mut(&(user_id.to_owned(), shortstatehash)) - { - return Ok(*visibility); - } - - let currently_member = services().rooms.state_cache.is_joined(&user_id, &room_id)?; - - let history_visibility = self - .state_get(shortstatehash, &StateEventType::RoomHistoryVisibility, "")? - .map_or(Ok(HistoryVisibility::Shared), |s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomHistoryVisibilityEventContent| c.history_visibility) - .map_err(|_| { - Error::bad_database("Invalid history visibility event in database.") - }) - })?; - - let visibility = match history_visibility { - HistoryVisibility::WorldReadable => true, - HistoryVisibility::Shared => currently_member, - HistoryVisibility::Invited => { - // Allow if any member on requesting server was AT LEAST invited, else deny - self.user_was_invited(shortstatehash, &user_id) - } - HistoryVisibility::Joined => { - // Allow if any member on requested server was joined, else deny - self.user_was_joined(shortstatehash, &user_id) - } - _ => { - error!("Unknown history visibility {history_visibility}"); - false - } - }; - - self.user_visibility_cache - .lock() - .unwrap() - .insert((user_id.to_owned(), shortstatehash), visibility); - - Ok(visibility) - } - - /// Whether a user is allowed to see an event, based on - /// the room's history_visibility at that event's state. - #[tracing::instrument(skip(self, user_id, room_id))] - pub fn user_can_see_state_events(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let currently_member = services().rooms.state_cache.is_joined(&user_id, &room_id)?; - - let history_visibility = self - .room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")? - .map_or(Ok(HistoryVisibility::Shared), |s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomHistoryVisibilityEventContent| c.history_visibility) - .map_err(|_| { - Error::bad_database("Invalid history visibility event in database.") - }) - })?; - - Ok(currently_member || history_visibility == HistoryVisibility::WorldReadable) - } - /// Returns the state hash for this pdu. pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { self.db.pdu_shortstatehash(event_id) @@ -271,42 +84,4 @@ impl Service { ) -> Result>> { self.db.room_state_get(room_id, event_type, state_key) } - - pub fn get_name(&self, room_id: &RoomId) -> Result> { - services() - .rooms - .state_accessor - .room_state_get(&room_id, &StateEventType::RoomName, "")? - .map_or(Ok(None), |s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomNameEventContent| c.name) - .map_err(|_| Error::bad_database("Invalid room name event in database.")) - }) - } - - pub fn get_avatar(&self, room_id: &RoomId) -> Result> { - services() - .rooms - .state_accessor - .room_state_get(&room_id, &StateEventType::RoomAvatar, "")? - .map_or(Ok(None), |s| { - serde_json::from_str(s.content.get()) - .map_err(|_| Error::bad_database("Invalid room avatar event in database.")) - }) - } - - pub fn get_member( - &self, - room_id: &RoomId, - user_id: &UserId, - ) -> Result> { - services() - .rooms - .state_accessor - .room_state_get(&room_id, &StateEventType::RoomMember, user_id.as_str())? - .map_or(Ok(None), |s| { - serde_json::from_str(s.content.get()) - .map_err(|_| Error::bad_database("Invalid room member event in database.")) - }) - } } diff --git a/src/service/rooms/state_cache/data.rs b/src/service/rooms/state_cache/data.rs index d8bb4a44..42de56d2 100644 --- a/src/service/rooms/state_cache/data.rs +++ b/src/service/rooms/state_cache/data.rs @@ -37,7 +37,7 @@ pub trait Data: Send + Sync { room_id: &RoomId, ) -> Box> + 'a>; - fn server_in_room(&self, server: &ServerName, room_id: &RoomId) -> Result; + fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result; /// Returns an iterator of all rooms a server participates in (as far as we know). fn server_rooms<'a>( diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index ef1ad61e..6c9bed35 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -14,7 +14,6 @@ use ruma::{ serde::Raw, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, }; -use tracing::warn; use crate::{services, Error, Result}; @@ -89,9 +88,8 @@ impl Service { RoomAccountDataEventType::Tag, )? .map(|event| { - serde_json::from_str(event.get()).map_err(|e| { - warn!("Invalid account data event in db: {e:?}"); - Error::BadDatabase("Invalid account data event in db.") + serde_json::from_str(event.get()).map_err(|_| { + Error::bad_database("Invalid account data event in db.") }) }) { @@ -115,9 +113,8 @@ impl Service { GlobalAccountDataEventType::Direct.to_string().into(), )? .map(|event| { - serde_json::from_str::(event.get()).map_err(|e| { - warn!("Invalid account data event in db: {e:?}"); - Error::BadDatabase("Invalid account data event in db.") + serde_json::from_str::(event.get()).map_err(|_| { + Error::bad_database("Invalid account data event in db.") }) }) { @@ -158,10 +155,8 @@ impl Service { .into(), )? .map(|event| { - serde_json::from_str::(event.get()).map_err(|e| { - warn!("Invalid account data event in db: {e:?}"); - Error::BadDatabase("Invalid account data event in db.") - }) + serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db.")) }) .transpose()? .map_or(false, |ignored| { @@ -169,7 +164,7 @@ impl Service { .content .ignored_users .iter() - .any(|(user, _details)| user == sender) + .any(|user| user == sender) }); if is_ignored { diff --git a/src/service/rooms/state_compressor/data.rs b/src/service/rooms/state_compressor/data.rs index d221d576..ce164c6d 100644 --- a/src/service/rooms/state_compressor/data.rs +++ b/src/service/rooms/state_compressor/data.rs @@ -1,12 +1,12 @@ -use std::{collections::HashSet, sync::Arc}; +use std::collections::HashSet; use super::CompressedStateEvent; use crate::Result; pub struct StateDiff { pub parent: Option, - pub added: Arc>, - pub removed: Arc>, + pub added: HashSet, + pub removed: HashSet, } pub trait Data: Send + Sync { diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index d29b020b..356f32c8 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -20,10 +20,10 @@ pub struct Service { LruCache< u64, Vec<( - u64, // sstatehash - Arc>, // full state - Arc>, // added - Arc>, // removed + u64, // sstatehash + HashSet, // full state + HashSet, // added + HashSet, // removed )>, >, >, @@ -39,10 +39,10 @@ impl Service { shortstatehash: u64, ) -> Result< Vec<( - u64, // sstatehash - Arc>, // full state - Arc>, // added - Arc>, // removed + u64, // sstatehash + HashSet, // full state + HashSet, // added + HashSet, // removed )>, > { if let Some(r) = self @@ -62,19 +62,13 @@ impl Service { if let Some(parent) = parent { let mut response = self.load_shortstatehash_info(parent)?; - let mut state = (*response.last().unwrap().1).clone(); + let mut state = response.last().unwrap().1.clone(); state.extend(added.iter().copied()); - let removed = (*removed).clone(); for r in &removed { state.remove(r); } - response.push((shortstatehash, Arc::new(state), added, Arc::new(removed))); - - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); + response.push((shortstatehash, state, added, removed)); Ok(response) } else { @@ -141,14 +135,14 @@ impl Service { pub fn save_state_from_diff( &self, shortstatehash: u64, - statediffnew: Arc>, - statediffremoved: Arc>, + statediffnew: HashSet, + statediffremoved: HashSet, diff_to_sibling: usize, mut parent_states: Vec<( - u64, // sstatehash - Arc>, // full state - Arc>, // added - Arc>, // removed + u64, // sstatehash + HashSet, // full state + HashSet, // added + HashSet, // removed )>, ) -> Result<()> { let diffsum = statediffnew.len() + statediffremoved.len(); @@ -158,29 +152,29 @@ impl Service { // To many layers, we have to go deeper let parent = parent_states.pop().unwrap(); - let mut parent_new = (*parent.2).clone(); - let mut parent_removed = (*parent.3).clone(); + let mut parent_new = parent.2; + let mut parent_removed = parent.3; - for removed in statediffremoved.iter() { - if !parent_new.remove(removed) { + for removed in statediffremoved { + if !parent_new.remove(&removed) { // It was not added in the parent and we removed it - parent_removed.insert(removed.clone()); + parent_removed.insert(removed); } // Else it was added in the parent and we removed it again. We can forget this change } - for new in statediffnew.iter() { - if !parent_removed.remove(new) { + for new in statediffnew { + if !parent_removed.remove(&new) { // It was not touched in the parent and we added it - parent_new.insert(new.clone()); + parent_new.insert(new); } // Else it was removed in the parent and we added it again. We can forget this change } self.save_state_from_diff( shortstatehash, - Arc::new(parent_new), - Arc::new(parent_removed), + parent_new, + parent_removed, diffsum, parent_states, )?; @@ -211,29 +205,29 @@ impl Service { if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { // Diff too big, we replace above layer(s) - let mut parent_new = (*parent.2).clone(); - let mut parent_removed = (*parent.3).clone(); + let mut parent_new = parent.2; + let mut parent_removed = parent.3; - for removed in statediffremoved.iter() { - if !parent_new.remove(removed) { + for removed in statediffremoved { + if !parent_new.remove(&removed) { // It was not added in the parent and we removed it - parent_removed.insert(removed.clone()); + parent_removed.insert(removed); } // Else it was added in the parent and we removed it again. We can forget this change } - for new in statediffnew.iter() { - if !parent_removed.remove(new) { + for new in statediffnew { + if !parent_removed.remove(&new) { // It was not touched in the parent and we added it - parent_new.insert(new.clone()); + parent_new.insert(new); } // Else it was removed in the parent and we added it again. We can forget this change } self.save_state_from_diff( shortstatehash, - Arc::new(parent_new), - Arc::new(parent_removed), + parent_new, + parent_removed, diffsum, parent_states, )?; @@ -256,11 +250,11 @@ impl Service { pub fn save_state( &self, room_id: &RoomId, - new_state_ids_compressed: Arc>, + new_state_ids_compressed: HashSet, ) -> Result<( u64, - Arc>, - Arc>, + HashSet, + HashSet, )> { let previous_shortstatehash = services().rooms.state.get_room_shortstatehash(room_id)?; @@ -277,11 +271,7 @@ impl Service { .get_or_create_shortstatehash(&state_hash)?; if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(( - new_shortstatehash, - Arc::new(HashSet::new()), - Arc::new(HashSet::new()), - )); + return Ok((new_shortstatehash, HashSet::new(), HashSet::new())); } let states_parents = previous_shortstatehash @@ -300,9 +290,9 @@ impl Service { .copied() .collect(); - (Arc::new(statediffnew), Arc::new(statediffremoved)) + (statediffnew, statediffremoved) } else { - (new_state_ids_compressed, Arc::new(HashSet::new())) + (new_state_ids_compressed, HashSet::new()) }; if !already_existed { diff --git a/src/service/rooms/threads/data.rs b/src/service/rooms/threads/data.rs deleted file mode 100644 index 9221e8e8..00000000 --- a/src/service/rooms/threads/data.rs +++ /dev/null @@ -1,15 +0,0 @@ -use crate::{PduEvent, Result}; -use ruma::{api::client::threads::get_threads::v1::IncludeThreads, OwnedUserId, RoomId, UserId}; - -pub trait Data: Send + Sync { - fn threads_until<'a>( - &'a self, - user_id: &'a UserId, - room_id: &'a RoomId, - until: u64, - include: &'a IncludeThreads, - ) -> Result> + 'a>>; - - fn update_participants(&self, root_id: &[u8], participants: &[OwnedUserId]) -> Result<()>; - fn get_participants(&self, root_id: &[u8]) -> Result>>; -} diff --git a/src/service/rooms/threads/mod.rs b/src/service/rooms/threads/mod.rs deleted file mode 100644 index fb703839..00000000 --- a/src/service/rooms/threads/mod.rs +++ /dev/null @@ -1,116 +0,0 @@ -mod data; - -pub use data::Data; -use ruma::{ - api::client::{error::ErrorKind, threads::get_threads::v1::IncludeThreads}, - events::relation::BundledThread, - uint, CanonicalJsonValue, EventId, RoomId, UserId, -}; - -use serde_json::json; - -use crate::{services, Error, PduEvent, Result}; - -pub struct Service { - pub db: &'static dyn Data, -} - -impl Service { - pub fn threads_until<'a>( - &'a self, - user_id: &'a UserId, - room_id: &'a RoomId, - until: u64, - include: &'a IncludeThreads, - ) -> Result> + 'a> { - self.db.threads_until(user_id, room_id, until, include) - } - - pub fn add_to_thread<'a>(&'a self, root_event_id: &EventId, pdu: &PduEvent) -> Result<()> { - let root_id = &services() - .rooms - .timeline - .get_pdu_id(root_event_id)? - .ok_or_else(|| { - Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid event id in thread message", - ) - })?; - - let root_pdu = services() - .rooms - .timeline - .get_pdu_from_id(root_id)? - .ok_or_else(|| { - Error::BadRequest(ErrorKind::InvalidParam, "Thread root pdu not found") - })?; - - let mut root_pdu_json = services() - .rooms - .timeline - .get_pdu_json_from_id(root_id)? - .ok_or_else(|| { - Error::BadRequest(ErrorKind::InvalidParam, "Thread root pdu not found") - })?; - - if let CanonicalJsonValue::Object(unsigned) = root_pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(mut relations) = unsigned - .get("m.relations") - .and_then(|r| r.as_object()) - .and_then(|r| r.get("m.thread")) - .and_then(|relations| { - serde_json::from_value::(relations.clone().into()).ok() - }) - { - // Thread already existed - relations.count += uint!(1); - relations.latest_event = pdu.to_message_like_event(); - - let content = serde_json::to_value(relations).expect("to_value always works"); - - unsigned.insert( - "m.relations".to_owned(), - json!({ "m.thread": content }) - .try_into() - .expect("thread is valid json"), - ); - } else { - // New thread - let relations = BundledThread { - latest_event: pdu.to_message_like_event(), - count: uint!(1), - current_user_participated: true, - }; - - let content = serde_json::to_value(relations).expect("to_value always works"); - - unsigned.insert( - "m.relations".to_owned(), - json!({ "m.thread": content }) - .try_into() - .expect("thread is valid json"), - ); - } - - services() - .rooms - .timeline - .replace_pdu(root_id, &root_pdu_json, &root_pdu)?; - } - - let mut users = Vec::new(); - if let Some(userids) = self.db.get_participants(&root_id)? { - users.extend_from_slice(&userids); - users.push(pdu.sender.clone()); - } else { - users.push(root_pdu.sender); - users.push(pdu.sender.clone()); - } - - self.db.update_participants(root_id, &users) - } -} diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index afa2cfbf..9377af07 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -4,13 +4,12 @@ use ruma::{CanonicalJsonObject, EventId, OwnedUserId, RoomId, UserId}; use crate::{PduEvent, Result}; -use super::PduCount; - pub trait Data: Send + Sync { - fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result; + fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>>; + fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result; /// Returns the `count` of this pdu's id. - fn get_pdu_count(&self, event_id: &EventId) -> Result>; + fn get_pdu_count(&self, event_id: &EventId) -> Result>; /// Returns the json of a pdu. fn get_pdu_json(&self, event_id: &EventId) -> Result>; @@ -39,6 +38,9 @@ pub trait Data: Send + Sync { /// Returns the pdu as a `BTreeMap`. fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result>; + /// Returns the `count` of this pdu's id. + fn pdu_count(&self, pdu_id: &[u8]) -> Result; + /// Adds a new pdu to the timeline fn append_pdu( &self, @@ -48,21 +50,17 @@ pub trait Data: Send + Sync { count: u64, ) -> Result<()>; - // Adds a new pdu to the backfilled timeline - fn prepend_backfill_pdu( - &self, - pdu_id: &[u8], - event_id: &EventId, - json: &CanonicalJsonObject, - ) -> Result<()>; - /// Removes a pdu and creates a new one with the same id. - fn replace_pdu( - &self, - pdu_id: &[u8], - pdu_json: &CanonicalJsonObject, - pdu: &PduEvent, - ) -> Result<()>; + fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()>; + + /// Returns an iterator over all events in a room that happened after the event with id `since` + /// in chronological order. + fn pdus_since<'a>( + &'a self, + user_id: &UserId, + room_id: &RoomId, + since: u64, + ) -> Result, PduEvent)>> + 'a>>; /// Returns an iterator over all events and their tokens in a room that happened before the /// event with id `until` in reverse-chronological order. @@ -70,17 +68,15 @@ pub trait Data: Send + Sync { &'a self, user_id: &UserId, room_id: &RoomId, - until: PduCount, - ) -> Result> + 'a>>; + until: u64, + ) -> Result, PduEvent)>> + 'a>>; - /// Returns an iterator over all events in a room that happened after the event with id `from` - /// in chronological order. fn pdus_after<'a>( &'a self, user_id: &UserId, room_id: &RoomId, - from: PduCount, - ) -> Result> + 'a>>; + from: u64, + ) -> Result, PduEvent)>> + 'a>>; fn increment_notification_counts( &self, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 25e1c54d..619dca28 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1,133 +1,60 @@ mod data; -use std::{ - cmp::Ordering, - collections::{BTreeMap, HashMap}, -}; +use std::collections::HashMap; use std::{ collections::HashSet, - sync::{Arc, Mutex, RwLock}, + sync::{Arc, Mutex}, }; pub use data::Data; use regex::Regex; use ruma::{ - api::{client::error::ErrorKind, federation}, + api::client::error::ErrorKind, canonical_json::to_canonical_value, events::{ push_rules::PushRulesEvent, room::{ - create::RoomCreateEventContent, encrypted::Relation, member::MembershipState, + create::RoomCreateEventContent, member::MembershipState, power_levels::RoomPowerLevelsEventContent, }, - GlobalAccountDataEventType, StateEventType, TimelineEventType, + GlobalAccountDataEventType, RoomEventType, StateEventType, }, push::{Action, Ruleset, Tweak}, - serde::Base64, state_res, - state_res::{Event, RoomVersion}, - uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, - OwnedServerName, RoomAliasId, RoomId, ServerName, UserId, + state_res::RoomVersion, + uint, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, + OwnedServerName, RoomAliasId, RoomId, UserId, }; use serde::Deserialize; -use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; +use serde_json::value::to_raw_value; use tokio::sync::MutexGuard; -use tracing::{error, info, warn}; +use tracing::{error, warn}; use crate::{ - api::server_server, service::pdu::{EventHash, PduBuilder}, services, utils, Error, PduEvent, Result, }; use super::state_compressor::CompressedStateEvent; -#[derive(Hash, PartialEq, Eq, Clone, Copy, Debug)] -pub enum PduCount { - Backfilled(u64), - Normal(u64), -} - -impl PduCount { - pub fn min() -> Self { - Self::Backfilled(u64::MAX) - } - pub fn max() -> Self { - Self::Normal(u64::MAX) - } - - pub fn try_from_string(token: &str) -> Result { - if token.starts_with('-') { - token[1..].parse().map(PduCount::Backfilled) - } else { - token.parse().map(PduCount::Normal) - } - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid pagination token.")) - } - - pub fn stringify(&self) -> String { - match self { - PduCount::Backfilled(x) => format!("-{x}"), - PduCount::Normal(x) => x.to_string(), - } - } -} - -impl PartialOrd for PduCount { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for PduCount { - fn cmp(&self, other: &Self) -> Ordering { - match (self, other) { - (PduCount::Normal(s), PduCount::Normal(o)) => s.cmp(o), - (PduCount::Backfilled(s), PduCount::Backfilled(o)) => o.cmp(s), - (PduCount::Normal(_), PduCount::Backfilled(_)) => Ordering::Greater, - (PduCount::Backfilled(_), PduCount::Normal(_)) => Ordering::Less, - } - } -} -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn comparisons() { - assert!(PduCount::Normal(1) < PduCount::Normal(2)); - assert!(PduCount::Backfilled(2) < PduCount::Backfilled(1)); - assert!(PduCount::Normal(1) > PduCount::Backfilled(1)); - assert!(PduCount::Backfilled(1) < PduCount::Normal(1)); - } -} - pub struct Service { pub db: &'static dyn Data, - pub lasttimelinecount_cache: Mutex>, + pub lasttimelinecount_cache: Mutex>, } impl Service { #[tracing::instrument(skip(self))] pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - self.all_pdus(&user_id!("@doesntmatter:conduit.rs"), &room_id)? - .next() - .map(|o| o.map(|(_, p)| Arc::new(p))) - .transpose() + self.db.first_pdu_in_room(room_id) } #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { + pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { self.db.last_timeline_count(sender_user, room_id) } - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.db.get_pdu_count(event_id) - } - // TODO Is this the same as the function above? /* #[tracing::instrument(skip(self))] @@ -151,6 +78,11 @@ impl Service { } */ + /// Returns the `count` of this pdu's id. + pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { + self.db.get_pdu_count(event_id) + } + /// Returns the json of a pdu. pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { self.db.get_pdu_json(event_id) @@ -195,15 +127,15 @@ impl Service { self.db.get_pdu_json_from_id(pdu_id) } + /// Returns the `count` of this pdu's id. + pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { + self.db.pdu_count(pdu_id) + } + /// Removes a pdu and creates a new one with the same id. #[tracing::instrument(skip(self))] - pub fn replace_pdu( - &self, - pdu_id: &[u8], - pdu_json: &CanonicalJsonObject, - pdu: &PduEvent, - ) -> Result<()> { - self.db.replace_pdu(pdu_id, pdu_json, pdu) + fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { + self.db.replace_pdu(pdu_id, pdu) } /// Creates a new persisted data unit and adds it to a room. @@ -357,7 +289,9 @@ impl Service { &pdu.room_id, )? { match action { - Action::Notify => notify = true, + Action::DontNotify => notify = false, + // TODO: Implement proper support for coalesce + Action::Notify | Action::Coalesce => notify = true, Action::SetTweak(Tweak::Highlight(true)) => { highlight = true; } @@ -382,23 +316,12 @@ impl Service { .increment_notification_counts(&pdu.room_id, notifies, highlights)?; match pdu.kind { - TimelineEventType::RoomRedaction => { + RoomEventType::RoomRedaction => { if let Some(redact_id) = &pdu.redacts { self.redact_pdu(redact_id, pdu)?; } } - TimelineEventType::SpaceChild => { - if let Some(_state_key) = &pdu.state_key { - services() - .rooms - .spaces - .roomid_spacechunk_cache - .lock() - .unwrap() - .remove(&pdu.room_id); - } - } - TimelineEventType::RoomMember => { + RoomEventType::RoomMember => { if let Some(state_key) = &pdu.state_key { #[derive(Deserialize)] struct ExtractMembership { @@ -432,7 +355,7 @@ impl Service { )?; } } - TimelineEventType::RoomMessage => { + RoomEventType::RoomMessage => { #[derive(Deserialize)] struct ExtractBody { body: Option, @@ -455,10 +378,7 @@ impl Service { )?; let server_user = format!("@conduit:{}", services().globals.server_name()); - let to_conduit = body.starts_with(&format!("{server_user}: ")) - || body.starts_with(&format!("{server_user} ")) - || body == format!("{server_user}:") - || body == format!("{server_user}"); + let to_conduit = body.starts_with(&format!("{}: ", server_user)); // This will evaluate to false if the emergency password is set up so that // the administrator can execute commands as conduit @@ -473,62 +393,6 @@ impl Service { _ => {} } - // Update Relationships - #[derive(Deserialize)] - struct ExtractRelatesTo { - #[serde(rename = "m.relates_to")] - relates_to: Relation, - } - - #[derive(Clone, Debug, Deserialize)] - struct ExtractEventId { - event_id: OwnedEventId, - } - #[derive(Clone, Debug, Deserialize)] - struct ExtractRelatesToEventId { - #[serde(rename = "m.relates_to")] - relates_to: ExtractEventId, - } - - if let Ok(content) = serde_json::from_str::(pdu.content.get()) { - if let Some(related_pducount) = services() - .rooms - .timeline - .get_pdu_count(&content.relates_to.event_id)? - { - services() - .rooms - .pdu_metadata - .add_relation(PduCount::Normal(count2), related_pducount)?; - } - } - - if let Ok(content) = serde_json::from_str::(pdu.content.get()) { - match content.relates_to { - Relation::Reply { in_reply_to } => { - // We need to do it again here, because replies don't have - // event_id as a top level field - if let Some(related_pducount) = services() - .rooms - .timeline - .get_pdu_count(&in_reply_to.event_id)? - { - services() - .rooms - .pdu_metadata - .add_relation(PduCount::Normal(count2), related_pducount)?; - } - } - Relation::Thread(thread) => { - services() - .rooms - .threads - .add_to_thread(&thread.event_id, pdu)?; - } - _ => {} // TODO: Aggregate other types - } - } - for appservice in services().appservice.all()? { if services() .rooms @@ -543,7 +407,7 @@ impl Service { // If the RoomMember event has a non-empty state_key, it is targeted at someone. // If it is our appservice user, we send this PDU to it. - if pdu.kind == TimelineEventType::RoomMember { + if pdu.kind == RoomEventType::RoomMember { if let Some(state_key_uid) = &pdu .state_key .as_ref() @@ -593,7 +457,7 @@ impl Service { let matching_users = |users: &Regex| { users.is_match(pdu.sender.as_str()) - || pdu.kind == TimelineEventType::RoomMember + || pdu.kind == RoomEventType::RoomMember && pdu .state_key .as_ref() @@ -819,92 +683,6 @@ impl Service { let (pdu, pdu_json) = self.create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock)?; - let admin_room = services().rooms.alias.resolve_local_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", services().globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - if admin_room.filter(|v| v == room_id).is_some() { - match pdu.event_type() { - TimelineEventType::RoomEncryption => { - warn!("Encryption is not allowed in the admins room"); - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Encryption is not allowed in the admins room.", - )); - } - TimelineEventType::RoomMember => { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let target = pdu - .state_key() - .filter(|v| v.starts_with("@")) - .unwrap_or(sender.as_str()); - let server_name = services().globals.server_name(); - let server_user = format!("@conduit:{}", server_name); - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if content.membership == MembershipState::Leave { - if target == &server_user { - warn!("Conduit user cannot leave from admins room"); - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Conduit user cannot leave from admins room.", - )); - } - - let count = services() - .rooms - .state_cache - .room_members(room_id) - .filter_map(|m| m.ok()) - .filter(|m| m.server_name() == server_name) - .filter(|m| m != target) - .count(); - if count < 2 { - warn!("Last admin cannot leave from admins room"); - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Last admin cannot leave from admins room.", - )); - } - } - - if content.membership == MembershipState::Ban && pdu.state_key().is_some() { - if target == &server_user { - warn!("Conduit user cannot be banned in admins room"); - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Conduit user cannot be banned in admins room.", - )); - } - - let count = services() - .rooms - .state_cache - .room_members(room_id) - .filter_map(|m| m.ok()) - .filter(|m| m.server_name() == server_name) - .filter(|m| m != target) - .count(); - if count < 2 { - warn!("Last admin cannot be banned in admins room"); - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Last admin cannot be banned in admins room.", - )); - } - } - } - _ => {} - } - } - // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. let statehashid = services().rooms.state.append_to_state(&pdu)?; @@ -933,7 +711,7 @@ impl Service { .collect(); // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == TimelineEventType::RoomMember { + if pdu.kind == RoomEventType::RoomMember { if let Some(state_key_uid) = &pdu .state_key .as_ref() @@ -959,7 +737,7 @@ impl Service { pdu: &PduEvent, pdu_json: CanonicalJsonObject, new_room_leaves: Vec, - state_ids_compressed: Arc>, + state_ids_compressed: HashSet, soft_fail: bool, state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result>> { @@ -998,8 +776,19 @@ impl Service { &'a self, user_id: &UserId, room_id: &RoomId, - ) -> Result> + 'a> { - self.pdus_after(user_id, room_id, PduCount::min()) + ) -> Result, PduEvent)>> + 'a> { + self.pdus_since(user_id, room_id, 0) + } + + /// Returns an iterator over all events in a room that happened after the event with id `since` + /// in chronological order. + pub fn pdus_since<'a>( + &'a self, + user_id: &UserId, + room_id: &RoomId, + since: u64, + ) -> Result, PduEvent)>> + 'a> { + self.db.pdus_since(user_id, room_id, since) } /// Returns an iterator over all events and their tokens in a room that happened before the @@ -1009,8 +798,8 @@ impl Service { &'a self, user_id: &UserId, room_id: &RoomId, - until: PduCount, - ) -> Result> + 'a> { + until: u64, + ) -> Result, PduEvent)>> + 'a> { self.db.pdus_until(user_id, room_id, until) } @@ -1021,182 +810,22 @@ impl Service { &'a self, user_id: &UserId, room_id: &RoomId, - from: PduCount, - ) -> Result> + 'a> { + from: u64, + ) -> Result, PduEvent)>> + 'a> { self.db.pdus_after(user_id, room_id, from) } /// Replace a PDU with the redacted form. #[tracing::instrument(skip(self, reason))] pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - // TODO: Don't reserialize, keep original json if let Some(pdu_id) = self.get_pdu_id(event_id)? { let mut pdu = self .get_pdu_from_id(&pdu_id)? .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; pdu.redact(reason)?; - self.replace_pdu( - &pdu_id, - &utils::to_canonical_object(&pdu).expect("PDU is an object"), - &pdu, - )?; + self.replace_pdu(&pdu_id, &pdu)?; } // If event does not exist, just noop Ok(()) } - - #[tracing::instrument(skip(self, room_id))] - pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Result<()> { - let first_pdu = self - .all_pdus(&user_id!("@doesntmatter:conduit.rs"), &room_id)? - .next() - .expect("Room is not empty")?; - - if first_pdu.0 < from { - // No backfill required, there are still events between them - return Ok(()); - } - - let power_levels: RoomPowerLevelsEventContent = services() - .rooms - .state_accessor - .room_state_get(&room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - let mut admin_servers = power_levels - .users - .iter() - .filter(|(_, level)| **level > power_levels.users_default) - .map(|(user_id, _)| user_id.server_name()) - .collect::>(); - admin_servers.remove(services().globals.server_name()); - - // Request backfill - for backfill_server in admin_servers { - info!("Asking {backfill_server} for backfill"); - let response = services() - .sending - .send_federation_request( - backfill_server, - federation::backfill::get_backfill::v1::Request { - room_id: room_id.to_owned(), - v: vec![first_pdu.1.event_id.as_ref().to_owned()], - limit: uint!(100), - }, - ) - .await; - match response { - Ok(response) => { - let mut pub_key_map = RwLock::new(BTreeMap::new()); - for pdu in response.pdus { - if let Err(e) = self - .backfill_pdu(backfill_server, pdu, &mut pub_key_map) - .await - { - warn!("Failed to add backfilled pdu: {e}"); - } - } - return Ok(()); - } - Err(e) => { - warn!("{backfill_server} could not provide backfill: {e}"); - } - } - } - - info!("No servers could backfill"); - Ok(()) - } - - #[tracing::instrument(skip(self, pdu))] - pub async fn backfill_pdu( - &self, - origin: &ServerName, - pdu: Box, - pub_key_map: &RwLock>>, - ) -> Result<()> { - let (event_id, value, room_id) = server_server::parse_incoming_pdu(&pdu)?; - - // Lock so we cannot backfill the same pdu twice at the same time - let mutex = Arc::clone( - services() - .globals - .roomid_mutex_federation - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let mutex_lock = mutex.lock().await; - - // Skip the PDU if we already have it as a timeline event - if let Some(pdu_id) = services().rooms.timeline.get_pdu_id(&event_id)? { - info!("We already know {event_id} at {pdu_id:?}"); - return Ok(()); - } - - services() - .rooms - .event_handler - .handle_incoming_pdu(origin, &event_id, &room_id, value, false, &pub_key_map) - .await?; - - let value = self.get_pdu_json(&event_id)?.expect("We just created it"); - let pdu = self.get_pdu(&event_id)?.expect("We just created it"); - - let shortroomid = services() - .rooms - .short - .get_shortroomid(&room_id)? - .expect("room exists"); - - let mutex_insert = Arc::clone( - services() - .globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count = services().globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&0_u64.to_be_bytes()); - pdu_id.extend_from_slice(&(u64::MAX - count).to_be_bytes()); - - // Insert pdu - self.db.prepend_backfill_pdu(&pdu_id, &event_id, &value)?; - - drop(insert_lock); - - match pdu.kind { - TimelineEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody { - body: Option, - } - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - services() - .rooms - .search - .index_pdu(shortroomid, &pdu_id, &body)?; - } - } - _ => {} - } - drop(mutex_lock); - - info!("Prepended backfill pdu"); - Ok(()) - } } diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index b4411444..b809fd50 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -18,15 +18,14 @@ use crate::{ use federation::transactions::send_transaction_message; use futures_util::{stream::FuturesUnordered, StreamExt}; -use base64::{engine::general_purpose, Engine as _}; - use ruma::{ api::{ appservice, federation::{ self, transactions::edu::{ - DeviceListUpdateContent, Edu, ReceiptContent, ReceiptData, ReceiptMap, + DeviceListUpdateContent, Edu, PresenceContent, PresenceUpdate, ReceiptContent, + ReceiptData, ReceiptMap, }, }, OutgoingRequest, @@ -42,7 +41,7 @@ use tokio::{ select, sync::{mpsc, Mutex, Semaphore}, }; -use tracing::{debug, error, warn}; +use tracing::{error, warn}; #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum OutgoingKind { @@ -285,6 +284,34 @@ impl Service { .filter(|user_id| user_id.server_name() == services().globals.server_name()), ); + // Look for presence updates in this room + let presence_updates: Vec = services() + .rooms + .edus + .presence + .presence_since(&room_id, since)? + .filter(|(user_id, _)| user_id.server_name() == services().globals.server_name()) + .map(|(user_id, presence_event)| PresenceUpdate { + user_id, + presence: presence_event.content.presence, + status_msg: presence_event.content.status_msg, + last_active_ago: presence_event + .content + .last_active_ago + .unwrap_or_else(|| uint!(0)), + currently_active: presence_event.content.currently_active.unwrap_or(false), + }) + .collect(); + + let presence_content = PresenceContent { + push: presence_updates, + }; + + events.push( + serde_json::to_vec(&Edu::Presence(presence_content)) + .expect("presence json can be serialized"), + ); + // Look for read receipts in this room for r in services() .rooms @@ -498,15 +525,18 @@ impl Service { ) })?, appservice::event::push_events::v1::Request { - events: pdu_jsons, - txn_id: (&*general_purpose::URL_SAFE_NO_PAD.encode(calculate_hash( - &events - .iter() - .map(|e| match e { - SendingEventType::Edu(b) | SendingEventType::Pdu(b) => &**b, - }) - .collect::>(), - ))) + events: &pdu_jsons, + txn_id: (&*base64::encode_config( + calculate_hash( + &events + .iter() + .map(|e| match e { + SendingEventType::Edu(b) | SendingEventType::Pdu(b) => &**b, + }) + .collect::>(), + ), + base64::URL_SAFE_NO_PAD, + )) .into(), }, ) @@ -637,11 +667,11 @@ impl Service { let response = server_server::send_request( server, send_transaction_message::v1::Request { - origin: services().globals.server_name().to_owned(), - pdus: pdu_jsons, - edus: edu_jsons, + origin: services().globals.server_name(), + pdus: &pdu_jsons, + edus: &edu_jsons, origin_server_ts: MilliSecondsSinceUnixEpoch::now(), - transaction_id: (&*general_purpose::URL_SAFE_NO_PAD.encode( + transaction_id: (&*base64::encode_config( calculate_hash( &events .iter() @@ -650,6 +680,7 @@ impl Service { }) .collect::>(), ), + base64::URL_SAFE_NO_PAD, )) .into(), }, @@ -681,18 +712,8 @@ impl Service { where T: Debug, { - debug!("Waiting for permit"); let permit = self.maximum_requests.acquire().await; - debug!("Got permit"); - let response = tokio::time::timeout( - Duration::from_secs(2 * 60), - server_server::send_request(destination, request), - ) - .await - .map_err(|_| { - warn!("Timeout waiting for server response of {destination}"); - Error::BadServerResponse("Timeout waiting for server response") - })?; + let response = server_server::send_request(destination, request).await; drop(permit); response diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index ed39af99..672290c3 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -5,7 +5,7 @@ pub use data::Data; use ruma::{ api::client::{ error::ErrorKind, - uiaa::{AuthData, AuthType, Password, UiaaInfo, UserIdentifier}, + uiaa::{AuthType, IncomingAuthData, IncomingPassword, IncomingUserIdentifier, UiaaInfo}, }, CanonicalJsonValue, DeviceId, UserId, }; @@ -44,7 +44,7 @@ impl Service { &self, user_id: &UserId, device_id: &DeviceId, - auth: &AuthData, + auth: &IncomingAuthData, uiaainfo: &UiaaInfo, ) -> Result<(bool, UiaaInfo)> { let mut uiaainfo = auth @@ -58,13 +58,13 @@ impl Service { match auth { // Find out what the user completed - AuthData::Password(Password { + IncomingAuthData::Password(IncomingPassword { identifier, password, .. }) => { let username = match identifier { - UserIdentifier::UserIdOrLocalpart(username) => username, + IncomingUserIdentifier::UserIdOrLocalpart(username) => username, _ => { return Err(Error::BadRequest( ErrorKind::Unrecognized, @@ -85,7 +85,7 @@ impl Service { argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false); if !hash_matches { - uiaainfo.auth_error = Some(ruma::api::client::error::StandardErrorBody { + uiaainfo.auth_error = Some(ruma::api::client::error::ErrorBody { kind: ErrorKind::Forbidden, message: "Invalid username or password.".to_owned(), }); @@ -96,18 +96,7 @@ impl Service { // Password was correct! Let's add it to `completed` uiaainfo.completed.push(AuthType::Password); } - AuthData::RegistrationToken(t) => { - if Some(t.token.trim()) == services().globals.config.registration_token.as_deref() { - uiaainfo.completed.push(AuthType::RegistrationToken); - } else { - uiaainfo.auth_error = Some(ruma::api::client::error::StandardErrorBody { - kind: ErrorKind::Forbidden, - message: "Invalid registration token.".to_owned(), - }); - return Ok((false, uiaainfo)); - } - } - AuthData::Dummy(_) => { + IncomingAuthData::Dummy(_) => { uiaainfo.completed.push(AuthType::Dummy); } k => error!("type not supported: {:?}", k), diff --git a/src/service/users/data.rs b/src/service/users/data.rs index ddf941e3..bc1db33f 100644 --- a/src/service/users/data.rs +++ b/src/service/users/data.rs @@ -1,6 +1,6 @@ use crate::Result; use ruma::{ - api::client::{device::Device, filter::FilterDefinition}, + api::client::{device::Device, filter::IncomingFilterDefinition}, encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::AnyToDeviceEvent, serde::Raw, @@ -111,7 +111,6 @@ pub trait Data: Send + Sync { master_key: &Raw, self_signing_key: &Option>, user_signing_key: &Option>, - notify: bool, ) -> Result<()>; fn sign_key( @@ -137,30 +136,14 @@ pub trait Data: Send + Sync { device_id: &DeviceId, ) -> Result>>; - fn parse_master_key( - &self, - user_id: &UserId, - master_key: &Raw, - ) -> Result<(Vec, CrossSigningKey)>; - - fn get_key( - &self, - key: &[u8], - sender_user: Option<&UserId>, - user_id: &UserId, - allowed_signatures: &dyn Fn(&UserId) -> bool, - ) -> Result>>; - fn get_master_key( &self, - sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &dyn Fn(&UserId) -> bool, ) -> Result>>; fn get_self_signing_key( &self, - sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &dyn Fn(&UserId) -> bool, ) -> Result>>; @@ -208,7 +191,11 @@ pub trait Data: Send + Sync { ) -> Box> + 'a>; /// Creates a new sync filter. Returns the filter id. - fn create_filter(&self, user_id: &UserId, filter: &FilterDefinition) -> Result; + fn create_filter(&self, user_id: &UserId, filter: &IncomingFilterDefinition) -> Result; - fn get_filter(&self, user_id: &UserId, filter_id: &str) -> Result>; + fn get_filter( + &self, + user_id: &UserId, + filter_id: &str, + ) -> Result>; } diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 6faa6d8e..9dcfa8be 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -1,41 +1,20 @@ mod data; -use std::{ - collections::{BTreeMap, BTreeSet}, - mem, - sync::{Arc, Mutex}, -}; +use std::{collections::BTreeMap, mem}; pub use data::Data; use ruma::{ - api::client::{ - device::Device, - error::ErrorKind, - filter::FilterDefinition, - sync::sync_events::{ - self, - v4::{ExtensionsConfig, SyncRequestList}, - }, - }, + api::client::{device::Device, error::ErrorKind, filter::IncomingFilterDefinition}, encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::AnyToDeviceEvent, serde::Raw, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, OwnedDeviceId, OwnedDeviceKeyId, OwnedMxcUri, - OwnedRoomId, OwnedUserId, RoomAliasId, UInt, UserId, + OwnedUserId, RoomAliasId, UInt, UserId, }; use crate::{services, Error, Result}; -pub struct SlidingSyncCache { - lists: BTreeMap, - subscriptions: BTreeMap, - known_rooms: BTreeMap>, // For every room, the roomsince number - extensions: ExtensionsConfig, -} - pub struct Service { pub db: &'static dyn Data, - pub connections: - Mutex>>>, } impl Service { @@ -44,208 +23,6 @@ impl Service { self.db.exists(user_id) } - pub fn forget_sync_request_connection( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: String, - ) { - self.connections - .lock() - .unwrap() - .remove(&(user_id, device_id, conn_id)); - } - - pub fn update_sync_request_with_cache( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - request: &mut sync_events::v4::Request, - ) -> BTreeMap> { - let Some(conn_id) = request.conn_id.clone() else { - return BTreeMap::new(); - }; - - let mut cache = self.connections.lock().unwrap(); - let cached = Arc::clone( - cache - .entry((user_id, device_id, conn_id)) - .or_insert_with(|| { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }), - ); - let cached = &mut cached.lock().unwrap(); - drop(cache); - - for (list_id, list) in &mut request.lists { - if let Some(cached_list) = cached.lists.get(list_id) { - if list.sort.is_empty() { - list.sort = cached_list.sort.clone(); - }; - if list.room_details.required_state.is_empty() { - list.room_details.required_state = - cached_list.room_details.required_state.clone(); - }; - list.room_details.timeline_limit = list - .room_details - .timeline_limit - .or(cached_list.room_details.timeline_limit); - list.include_old_rooms = list - .include_old_rooms - .clone() - .or(cached_list.include_old_rooms.clone()); - match (&mut list.filters, cached_list.filters.clone()) { - (Some(list_filters), Some(cached_filters)) => { - list_filters.is_dm = list_filters.is_dm.or(cached_filters.is_dm); - if list_filters.spaces.is_empty() { - list_filters.spaces = cached_filters.spaces; - } - list_filters.is_encrypted = - list_filters.is_encrypted.or(cached_filters.is_encrypted); - list_filters.is_invite = - list_filters.is_invite.or(cached_filters.is_invite); - if list_filters.room_types.is_empty() { - list_filters.room_types = cached_filters.room_types; - } - if list_filters.not_room_types.is_empty() { - list_filters.not_room_types = cached_filters.not_room_types; - } - list_filters.room_name_like = list_filters - .room_name_like - .clone() - .or(cached_filters.room_name_like); - if list_filters.tags.is_empty() { - list_filters.tags = cached_filters.tags; - } - if list_filters.not_tags.is_empty() { - list_filters.not_tags = cached_filters.not_tags; - } - } - (_, Some(cached_filters)) => list.filters = Some(cached_filters), - (Some(list_filters), _) => list.filters = Some(list_filters.clone()), - (_, _) => {} - } - if list.bump_event_types.is_empty() { - list.bump_event_types = cached_list.bump_event_types.clone(); - }; - } - cached.lists.insert(list_id.clone(), list.clone()); - } - - cached - .subscriptions - .extend(request.room_subscriptions.clone().into_iter()); - request - .room_subscriptions - .extend(cached.subscriptions.clone().into_iter()); - - request.extensions.e2ee.enabled = request - .extensions - .e2ee - .enabled - .or(cached.extensions.e2ee.enabled); - - request.extensions.to_device.enabled = request - .extensions - .to_device - .enabled - .or(cached.extensions.to_device.enabled); - - request.extensions.account_data.enabled = request - .extensions - .account_data - .enabled - .or(cached.extensions.account_data.enabled); - request.extensions.account_data.lists = request - .extensions - .account_data - .lists - .clone() - .or(cached.extensions.account_data.lists.clone()); - request.extensions.account_data.rooms = request - .extensions - .account_data - .rooms - .clone() - .or(cached.extensions.account_data.rooms.clone()); - - cached.extensions = request.extensions.clone(); - - cached.known_rooms.clone() - } - - pub fn update_sync_subscriptions( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: String, - subscriptions: BTreeMap, - ) { - let mut cache = self.connections.lock().unwrap(); - let cached = Arc::clone( - cache - .entry((user_id, device_id, conn_id)) - .or_insert_with(|| { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }), - ); - let cached = &mut cached.lock().unwrap(); - drop(cache); - - cached.subscriptions = subscriptions; - } - - pub fn update_sync_known_rooms( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: String, - list_id: String, - new_cached_rooms: BTreeSet, - globalsince: u64, - ) { - let mut cache = self.connections.lock().unwrap(); - let cached = Arc::clone( - cache - .entry((user_id, device_id, conn_id)) - .or_insert_with(|| { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }), - ); - let cached = &mut cached.lock().unwrap(); - drop(cache); - - for (roomid, lastsince) in cached - .known_rooms - .entry(list_id.clone()) - .or_default() - .iter_mut() - { - if !new_cached_rooms.contains(roomid) { - *lastsince = 0; - } - } - let list = cached.known_rooms.entry(list_id).or_default(); - for roomid in new_cached_rooms { - list.insert(roomid, globalsince); - } - } - /// Check if account is deactivated pub fn is_deactivated(&self, user_id: &UserId) -> Result { self.db.is_deactivated(user_id) @@ -413,15 +190,9 @@ impl Service { master_key: &Raw, self_signing_key: &Option>, user_signing_key: &Option>, - notify: bool, ) -> Result<()> { - self.db.add_cross_signing_keys( - user_id, - master_key, - self_signing_key, - user_signing_key, - notify, - ) + self.db + .add_cross_signing_keys(user_id, master_key, self_signing_key, user_signing_key) } pub fn sign_key( @@ -455,43 +226,20 @@ impl Service { self.db.get_device_keys(user_id, device_id) } - pub fn parse_master_key( - &self, - user_id: &UserId, - master_key: &Raw, - ) -> Result<(Vec, CrossSigningKey)> { - self.db.parse_master_key(user_id, master_key) - } - - pub fn get_key( - &self, - key: &[u8], - sender_user: Option<&UserId>, - user_id: &UserId, - allowed_signatures: &dyn Fn(&UserId) -> bool, - ) -> Result>> { - self.db - .get_key(key, sender_user, user_id, allowed_signatures) - } - pub fn get_master_key( &self, - sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &dyn Fn(&UserId) -> bool, ) -> Result>> { - self.db - .get_master_key(sender_user, user_id, allowed_signatures) + self.db.get_master_key(user_id, allowed_signatures) } pub fn get_self_signing_key( &self, - sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &dyn Fn(&UserId) -> bool, ) -> Result>> { - self.db - .get_self_signing_key(sender_user, user_id, allowed_signatures) + self.db.get_self_signing_key(user_id, allowed_signatures) } pub fn get_user_signing_key(&self, user_id: &UserId) -> Result>> { @@ -578,7 +326,11 @@ impl Service { } /// Creates a new sync filter. Returns the filter id. - pub fn create_filter(&self, user_id: &UserId, filter: &FilterDefinition) -> Result { + pub fn create_filter( + &self, + user_id: &UserId, + filter: &IncomingFilterDefinition, + ) -> Result { self.db.create_filter(user_id, filter) } @@ -586,7 +338,7 @@ impl Service { &self, user_id: &UserId, filter_id: &str, - ) -> Result> { + ) -> Result> { self.db.get_filter(user_id, filter_id) } } @@ -594,7 +346,6 @@ impl Service { /// Ensure that a user only sees signatures from themselves and the target user pub fn clean_signatures bool>( cross_signing_key: &mut serde_json::Value, - sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: F, ) -> Result<(), Error> { @@ -608,9 +359,9 @@ pub fn clean_signatures bool>( for (user, signature) in mem::replace(signatures, serde_json::Map::with_capacity(new_capacity)) { - let sid = <&UserId>::try_from(user.as_str()) + let id = <&UserId>::try_from(user.as_str()) .map_err(|_| Error::bad_database("Invalid user ID in database."))?; - if sender_user == Some(user_id) || sid == user_id || allowed_signatures(sid) { + if id == user_id || allowed_signatures(id) { signatures.insert(user, signature); } } diff --git a/src/utils/error.rs b/src/utils/error.rs index 6e88cf59..9c8617f9 100644 --- a/src/utils/error.rs +++ b/src/utils/error.rs @@ -3,13 +3,13 @@ use std::convert::Infallible; use http::StatusCode; use ruma::{ api::client::{ - error::{Error as RumaError, ErrorBody, ErrorKind}, + error::{Error as RumaError, ErrorKind}, uiaa::{UiaaInfo, UiaaResponse}, }, OwnedServerName, }; use thiserror::Error; -use tracing::{error, info}; +use tracing::{error, warn}; #[cfg(feature = "persy")] use persy::PersyError; @@ -102,14 +102,11 @@ impl Error { if let Self::FederationError(origin, error) = self { let mut error = error.clone(); - error.body = ErrorBody::Standard { - kind: Unknown, - message: format!("Answer from {origin}: {error}"), - }; + error.message = format!("Answer from {}: {}", origin, error.message); return RumaResponse(UiaaResponse::MatrixError(error)); } - let message = format!("{self}"); + let message = format!("{}", self); use ErrorKind::*; let (kind, status_code) = match self { @@ -131,35 +128,14 @@ impl Error { _ => (Unknown, StatusCode::INTERNAL_SERVER_ERROR), }; - info!("Returning an error: {}: {}", status_code, message); + warn!("{}: {}", status_code, message); RumaResponse(UiaaResponse::MatrixError(RumaError { - body: ErrorBody::Standard { kind, message }, + kind, + message, status_code, })) } - - /// Sanitizes public-facing errors that can leak sensitive information. - pub fn sanitized_error(&self) -> String { - let db_error = String::from("Database or I/O error occurred."); - - match self { - #[cfg(feature = "sled")] - Self::SledError { .. } => db_error, - #[cfg(feature = "sqlite")] - Self::SqliteError { .. } => db_error, - #[cfg(feature = "persy")] - Self::PersyError { .. } => db_error, - #[cfg(feature = "heed")] - Self::HeedError => db_error, - #[cfg(feature = "rocksdb")] - Self::RocksDbError { .. } => db_error, - Self::IoError { .. } => db_error, - Self::BadConfig { .. } => db_error, - Self::BadDatabase { .. } => db_error, - _ => self.to_string(), - } - } } #[cfg(feature = "persy")] diff --git a/tests/Complement.Dockerfile b/tests/Complement.Dockerfile new file mode 100644 index 00000000..b9d0f8c9 --- /dev/null +++ b/tests/Complement.Dockerfile @@ -0,0 +1,48 @@ +# For use in our CI only. This requires a build artifact created by a previous run pipline stage to be placed in cached_target/release/conduit +FROM valkum/docker-rust-ci:latest as builder +WORKDIR /workdir + +ARG RUSTC_WRAPPER +ARG AWS_ACCESS_KEY_ID +ARG AWS_SECRET_ACCESS_KEY +ARG SCCACHE_BUCKET +ARG SCCACHE_ENDPOINT +ARG SCCACHE_S3_USE_SSL + +COPY . . +RUN mkdir -p target/release +RUN test -e cached_target/release/conduit && cp cached_target/release/conduit target/release/conduit || cargo build --release + + +FROM valkum/docker-rust-ci:latest +WORKDIR /workdir + +RUN curl -OL "https://github.com/caddyserver/caddy/releases/download/v2.2.1/caddy_2.2.1_linux_amd64.tar.gz" +RUN tar xzf caddy_2.2.1_linux_amd64.tar.gz + +COPY cached_target/release/conduit /workdir/conduit +RUN chmod +x /workdir/conduit +RUN chmod +x /workdir/caddy + +COPY conduit-example.toml conduit.toml + +ENV SERVER_NAME=localhost +ENV CONDUIT_CONFIG=/workdir/conduit.toml + +RUN sed -i "s/port = 6167/port = 8008/g" conduit.toml +RUN echo "allow_federation = true" >> conduit.toml +RUN echo "allow_encryption = true" >> conduit.toml +RUN echo "allow_registration = true" >> conduit.toml +RUN echo "log = \"warn,_=off,sled=off\"" >> conduit.toml +RUN sed -i "s/address = \"127.0.0.1\"/address = \"0.0.0.0\"/g" conduit.toml + +# Enabled Caddy auto cert generation for complement provided CA. +RUN echo '{"logging":{"logs":{"default":{"level":"WARN"}}}, "apps":{"http":{"https_port":8448,"servers":{"srv0":{"listen":[":8448"],"routes":[{"match":[{"host":["your.server.name"]}],"handle":[{"handler":"subroute","routes":[{"handle":[{"handler":"reverse_proxy","upstreams":[{"dial":"127.0.0.1:8008"}]}]}]}],"terminal":true}],"tls_connection_policies": [{"match": {"sni": ["your.server.name"]}}]}}},"pki": {"certificate_authorities": {"local": {"name": "Complement CA","root": {"certificate": "/ca/ca.crt","private_key": "/ca/ca.key"},"intermediate": {"certificate": "/ca/ca.crt","private_key": "/ca/ca.key"}}}},"tls":{"automation":{"policies":[{"subjects":["your.server.name"],"issuer":{"module":"internal"},"on_demand":true},{"issuer":{"module":"internal", "ca": "local"}}]}}}}' > caddy.json + +EXPOSE 8008 8448 + +CMD ([ -z "${COMPLEMENT_CA}" ] && echo "Error: Need Complement PKI support" && true) || \ + sed -i "s/#server_name = \"your.server.name\"/server_name = \"${SERVER_NAME}\"/g" conduit.toml && \ + sed -i "s/your.server.name/${SERVER_NAME}/g" caddy.json && \ + /workdir/caddy start --config caddy.json > /dev/null && \ + /workdir/conduit