Compare commits
582 commits
Author | SHA1 | Date | |
---|---|---|---|
|
99ab234f40 | ||
|
e83416bb5a | ||
|
726b6f0fa6 | ||
|
d7fd89df49 | ||
|
f4e57fdb22 | ||
|
4f096adcfa | ||
21a5fa3ef0 | |||
b27e9ea95c | |||
8aa915acb9 | |||
ace9637bc2 | |||
|
be1e2e9307 | ||
|
1c6a4b1b24 | ||
976a73a0e5 | |||
4c06f329c4 | |||
d841b81c56 | |||
e707084345 | |||
|
6dcc8b6cf1 | ||
|
a2ac491c54 | ||
|
72a13d8353 | ||
|
3a63f9dfb6 | ||
|
f4f2d05b5b | ||
|
c3c7bcb2ed | ||
|
d6c57f9b2e | ||
|
7fb9e99649 | ||
|
1274b48ebb | ||
|
0a281e81a5 | ||
|
a43bde69fa | ||
|
986343877c | ||
|
2d47710b55 | ||
|
10542a1d70 | ||
|
c167f7a6ad | ||
|
5787a70bab | ||
|
cf8f1f2546 | ||
|
3c2fc4a4c6 | ||
|
dffd771e7c | ||
|
4da8c7e282 | ||
|
0df5d18fd6 | ||
|
825ceac1c3 | ||
|
3e389256f5 | ||
|
a7892a28ec | ||
|
9453dbc740 | ||
|
bf48c10d28 | ||
|
7c1a3e41d9 | ||
|
2a04a361e0 | ||
|
0e8e4f1083 | ||
|
81ae579b25 | ||
|
3a3cafe912 | ||
|
d29591d47d | ||
|
67d280dd2e | ||
|
3ac9be5a78 | ||
|
52954f7a11 | ||
|
692a31620d | ||
|
cf4015b830 | ||
|
9cef03127b | ||
|
249fc7769d | ||
|
5cc53c9e14 | ||
|
bdc46f6392 | ||
|
6ae776218c | ||
|
bd2b146d5d | ||
|
f7cc4fb3bb | ||
|
ca198c51fa | ||
|
fe86d28428 | ||
|
c86f9a5c5b | ||
|
e0358a9de5 | ||
|
69d0003222 | ||
|
5cf9f3df48 | ||
|
0b7ed5adc9 | ||
|
4de54db305 | ||
|
02781e4f9b | ||
|
f8bdfd82b0 | ||
|
7e66d2e2c0 | ||
|
ffd03a256b | ||
|
9d592d60d2 | ||
|
25ceb5ebd8 | ||
|
6f052fff98 | ||
|
e8ac881b2f | ||
|
0d17aedae5 | ||
|
ab1fff2642 | ||
|
92c5b6b86c | ||
|
dc2f53e773 | ||
|
2475995102 | ||
|
835f4ad8cf | ||
|
ca6219723b | ||
|
40c7c248fb | ||
|
8f3f5c01f9 | ||
|
9d7f7b871b | ||
|
30f0871e21 | ||
|
98e81c6217 | ||
|
f3b6b3e222 | ||
|
3bfdae795d | ||
|
75c80df271 | ||
|
094cb888d4 | ||
|
fa725a14e2 | ||
|
9b3664aeeb | ||
|
90fea00dc7 | ||
|
20924a44f1 | ||
|
38d6426b0e | ||
|
9b55ce933a | ||
|
f73a657a23 | ||
|
6dfb262ddf | ||
|
75cdc3a1f6 | ||
|
11103a92ed | ||
|
ce2017a10e | ||
|
0c2cfda3ae | ||
|
4bf8ee1f74 | ||
|
5d16948030 | ||
|
b7b2eb9d05 | ||
|
19bfee1835 | ||
|
9db87550fd | ||
|
606b25b9e7 | ||
|
fd9e52a559 | ||
|
0a0f227601 | ||
|
183558150d | ||
|
c028e0553c | ||
|
2581f7a10b | ||
|
3e518773e2 | ||
|
888f7e4403 | ||
|
d82c26f0a9 | ||
|
c1e2ffc0cd | ||
|
06fccbc340 | ||
|
fbd8090b0b | ||
|
06ab707c79 | ||
|
174a580319 | ||
|
fbb256dd91 | ||
|
5a7bade476 | ||
|
d2bfcb018e | ||
|
08f0f17ff7 | ||
|
57b86f1130 | ||
|
3a6eee7019 | ||
|
9ce1cad983 | ||
|
10da9485a5 | ||
|
acfe381dd3 | ||
|
83805c66e5 | ||
|
afd8112e25 | ||
|
b8c164dc60 | ||
|
0453a72890 | ||
|
e2c914cc11 | ||
|
da907451e7 | ||
|
2b4a6c96ee | ||
|
d7061e6984 | ||
|
3494d7759e | ||
|
cc5dcceacc | ||
|
863103450c | ||
|
a0148a9996 | ||
|
1f867a2c86 | ||
|
c0a2acb869 | ||
|
97835541ce | ||
|
081cc66eda | ||
|
7489e2c4f6 | ||
|
1e675dbb68 | ||
|
f4c1748ab1 | ||
|
7990822f72 | ||
|
2a100412fa | ||
|
3e7652909b | ||
|
9fb8498067 | ||
|
291290db92 | ||
|
54a115caf3 | ||
|
81866170f0 | ||
|
bf46829595 | ||
|
9f14ad7125 | ||
|
90a10c84ef | ||
|
d220641d64 | ||
|
caddc656fb | ||
|
b1a591a06c | ||
|
3cd3d0e0ff | ||
|
433dad6ac2 | ||
|
8cf408e966 | ||
|
1e560529d8 | ||
|
ff98444d03 | ||
|
82f31d6b72 | ||
|
6ae5143ff5 | ||
|
bd8fec3836 | ||
|
742331e054 | ||
|
abd8e1bf54 | ||
|
fa3b1fd9bd | ||
|
e9946f81a0 | ||
|
a9ba067e77 | ||
|
706148f941 | ||
|
24402312c5 | ||
|
17180a3e08 | ||
|
3c6ffd88bf | ||
|
c3966f501c | ||
|
56f0f3dfa4 | ||
|
ad06d475de | ||
|
0b4e3de9c0 | ||
|
edd4a3733f | ||
|
c17187777f | ||
|
78e7b711df | ||
|
4b7d3e24dd | ||
|
e4f769963f | ||
|
eab5dac6e8 | ||
|
c4824a6ebc | ||
|
f8a36e7554 | ||
|
a2c3256ced | ||
|
833c1505f1 | ||
|
bac13d08ae | ||
|
f0a27dcb00 | ||
|
9d49d599f3 | ||
|
2640f67e4b | ||
|
eb8bc1af8d | ||
|
0ded637b4a | ||
|
dc50197a13 | ||
|
06a1321e56 | ||
|
6a6f8e80f1 | ||
|
fd1ccbd3ad | ||
|
3a1a72df98 | ||
|
84784970b2 | ||
|
d64a56d88b | ||
|
be877ef719 | ||
|
7c6d25dcd1 | ||
|
b671238aa0 | ||
|
91180e011d | ||
|
26b8605fa0 | ||
|
dbd360ebb9 | ||
|
48e6e0659f | ||
|
72eb1972c1 | ||
|
63cbaedb79 | ||
|
db6def8800 | ||
|
caa841c434 | ||
|
49a0f3a60d | ||
|
bac82f43af | ||
|
15cc801840 | ||
|
5f9ca8e458 | ||
|
c7e0ea525a | ||
|
abd0a014e8 | ||
|
4a7d3c7301 | ||
|
15e60818c9 | ||
|
def079267d | ||
|
a3a9b60abc | ||
|
808b12f618 | ||
|
faa9208a3e | ||
|
1ea27c4f97 | ||
|
422ee40107 | ||
|
0280fa5793 | ||
|
664d6baace | ||
|
be9196430d | ||
|
533bccad8f | ||
|
a4261aac76 | ||
|
c38df57279 | ||
|
4e2bbf9d6a | ||
|
7a9ec851fc | ||
|
d62cd2ae51 | ||
|
49b5af6d45 | ||
|
1f1444da8c | ||
|
2a9a908343 | ||
|
921b266d86 | ||
|
dbbd164e39 | ||
|
f5e3b0e2dd | ||
|
1b9e63f426 | ||
|
eb4323cc0f | ||
|
a6712627e4 | ||
|
3be32c4dac | ||
|
55149e3336 | ||
|
2b63e46fc5 | ||
|
a0c449e570 | ||
|
c997311bea | ||
|
1929ca5d9d | ||
|
88c6bf7595 | ||
|
4635644e21 | ||
|
f53ecaa97d | ||
|
f704169aeb | ||
|
2a7c4693b8 | ||
|
da3871f39a | ||
|
664ee7d89a | ||
|
42b12934e3 | ||
|
63f787f635 | ||
|
a1bd348977 | ||
|
27f29ba699 | ||
|
cb0ce5b08f | ||
|
b7c99788e4 | ||
|
2316d89048 | ||
|
bde4880c1d | ||
|
8b648d0d3f | ||
|
4617ee2b6b | ||
|
10fa686c77 | ||
|
2a16a5e967 | ||
|
2aa0a2474b | ||
|
d39003ffc0 | ||
|
eae0989c40 | ||
|
17a6431f5f | ||
|
fcfb06ffa6 | ||
|
7bdd9660aa | ||
|
23b18d71ee | ||
|
84cfed5231 | ||
|
cdcf4a017d | ||
|
fc0aff20cf | ||
|
4223288cdf | ||
|
a4f18f99ad | ||
|
06df04f61c | ||
|
cfcc9086ff | ||
|
11b9cfad5e | ||
|
5d913f7010 | ||
|
d68dad580b | ||
|
e13dc7c14a | ||
|
b158896396 | ||
|
f95dd4521c | ||
|
1e77373332 | ||
|
f01b96588d | ||
|
4d589d9788 | ||
|
815db0d962 | ||
|
809c9b4481 | ||
|
c6e3438e76 | ||
|
844508bc48 | ||
|
b3aec63d67 | ||
|
2da4ae6b3b | ||
|
5e6b498c22 | ||
|
391beddaf4 | ||
|
112b76b1c1 | ||
|
315944968b | ||
|
9f74555c88 | ||
|
0a4e8e5909 | ||
|
19156c7bbf | ||
|
2a66ad4329 | ||
|
53f14a2c4c | ||
|
d20f21ae32 | ||
|
f7db3490f6 | ||
|
c7a7c913d4 | ||
|
76a82339a2 | ||
|
94df9cdbba | ||
|
b231d7f15c | ||
|
7cc346bc18 | ||
|
48bc0db723 | ||
|
7c196f4e00 | ||
|
c86313d4fa | ||
|
7b98741163 | ||
|
2a04c213f9 | ||
|
d7eaa9c5cc | ||
|
2a0515f528 | ||
|
3930fd08d7 | ||
|
683eefbd0b | ||
|
d963ad8cc1 | ||
|
6d5e54a66b | ||
|
2b2bfb91c2 | ||
|
f1d2574651 | ||
|
d39ce1401d | ||
|
7fd5b22e3b | ||
|
db7a7085f4 | ||
|
5894d35eb2 | ||
|
b9fd6127e2 | ||
|
bb9bc0a001 | ||
|
f4dd051a1d | ||
|
06d3efc4d0 | ||
|
66ad114e19 | ||
|
4b737b46ac | ||
|
bcd522e75f | ||
|
249960b111 | ||
|
583aea187b | ||
|
396dac6d82 | ||
|
9149be31af | ||
|
32a4ded4a1 | ||
|
e3dabdf525 | ||
|
b59304a4df | ||
|
66bc41125c | ||
|
6786c44f4d | ||
|
a3a1db124d | ||
|
3b3c451c83 | ||
|
cf99316082 | ||
|
c063700255 | ||
|
7540227388 | ||
|
09015f113c | ||
|
a2d8aec1e3 | ||
|
ccdaaceb33 | ||
|
b37876f3b2 | ||
|
e8e0a4dcc5 | ||
|
23cf39c525 | ||
|
00996dd834 | ||
|
2a52f666dc | ||
|
0cf6545116 | ||
|
5d691f405e | ||
|
c61914c8e1 | ||
|
9548c84d32 | ||
|
02dd3d32f2 | ||
|
7c98ba64aa | ||
|
52018c3967 | ||
|
e86fb11512 | ||
|
20e3c42456 | ||
|
1aff2a54ef | ||
|
238ebcfcac | ||
|
876fdf480d | ||
|
3bc0a1924b | ||
|
4af998963b | ||
|
10d2da3009 | ||
|
d47c1a8ba6 | ||
|
9c0c74f547 | ||
|
215d909e59 | ||
|
ada15ceacc | ||
|
716f82db6d | ||
|
fe7d8c4f12 | ||
|
cb2b5beea8 | ||
|
2231a69b4c | ||
|
13052388a7 | ||
|
6fd39ae174 | ||
|
2627ca5e3d | ||
|
ed5b8d6a46 | ||
|
2d0fdddd34 | ||
|
3054af41ba | ||
|
1e1a144dfa | ||
|
cc3e1f58cc | ||
|
b1991c8f4f | ||
|
6f7f2820ce | ||
|
e9697f13d6 | ||
|
3b0aa23fdf | ||
|
aca6218c0a | ||
|
3a45628e1d | ||
|
e923f63c49 | ||
|
842feabced | ||
|
286936db32 | ||
|
bf7c4b4001 | ||
|
d755a96c2c | ||
|
c948324cf2 | ||
|
76f81ac201 | ||
|
ce188daccb | ||
|
98702da4e6 | ||
|
92f7f0c849 | ||
|
7451abe3ea | ||
|
3e6c66b899 | ||
|
3a40bf8ae0 | ||
|
9c922db14b | ||
|
175fba5739 | ||
|
912491cb28 | ||
|
da2dbd2877 | ||
|
c67f95ebff | ||
|
3a8321f9ad | ||
|
f46d64e52f | ||
|
8c6e75a0cd | ||
|
c23b4946c5 | ||
|
ac52b234fa | ||
|
9a47069f45 | ||
|
7ef9fe3454 | ||
|
fc852f8be6 | ||
|
4710f739c0 | ||
|
3c20c1b72e | ||
|
37eb686b5b | ||
|
fdd64fc966 | ||
|
4d982d05af | ||
|
1e725bc548 | ||
|
dd8f4681a2 | ||
|
0290f1f355 | ||
|
cd835fc7a8 | ||
|
2b70d9604a | ||
|
d3968c2fd1 | ||
|
8105c5cc60 | ||
|
d1e5acd7b3 | ||
|
68227c06c3 | ||
|
31d1801912 | ||
|
fb6bfa9753 | ||
|
c30cc6120b | ||
|
2b7c19835b | ||
|
c2a5315e9f | ||
|
0ddc3c01ef | ||
|
c15205fb46 | ||
|
cb837d5a1c | ||
|
18ca2e4c29 | ||
|
a10dae38e2 | ||
|
7cf060ae5b | ||
|
de9b0cec50 | ||
|
773eded0af | ||
|
df8703cc13 | ||
|
71cffcd537 | ||
|
f430b87459 | ||
|
ca82b2940d | ||
|
229444c932 | ||
|
076e9810ba | ||
|
6b131202b9 | ||
|
275c6b447d | ||
|
1a7893dbbd | ||
|
5a04559cb4 | ||
|
25c3d89f28 | ||
|
8b5b7a1f63 | ||
|
50b0eb9929 | ||
|
7822a385bb | ||
|
d5b4754cf4 | ||
|
f47a5cd5d5 | ||
|
a4637e2ba1 | ||
|
33a2b2b772 | ||
|
44fe6d1554 | ||
|
cff52d7ebb | ||
|
face766e0f | ||
|
8708cd3b63 | ||
|
bd8b616ca0 | ||
|
785ddfc4aa | ||
|
232978087a | ||
|
7946c5f29e | ||
|
efad401751 | ||
|
e1e87b8d0c | ||
|
c6d1421e81 | ||
|
5a29511d34 | ||
|
d024d205c0 | ||
|
4649cd82b5 | ||
|
057f8364cc | ||
|
82e7f57b38 | ||
|
3e22bbeecd | ||
|
213579ee9d | ||
|
810a6baf34 | ||
|
61f6ac0d66 | ||
|
6d981f37a2 | ||
|
7e0b8ec0ac | ||
|
19743ae195 | ||
|
fd0ea4bf71 | ||
|
f56424bc8d | ||
|
01bf348811 | ||
|
bea5d1e0d8 | ||
|
e8b33e8c5a | ||
|
dc7670f3a8 | ||
|
94ce06bb76 | ||
|
70863260f6 | ||
|
cb9458122c | ||
|
e62b0904ea | ||
|
306ff5ee4e | ||
|
e045abe961 | ||
|
0daa3209db | ||
|
8d0ed3ec51 | ||
|
691e69847f | ||
|
c8f64844ab | ||
|
b0029c49b9 | ||
|
91ad250177 | ||
|
f6040ef2d7 | ||
|
877ee48480 | ||
|
03e6e43ecd | ||
|
6ace16abf6 | ||
|
158de9ca08 | ||
|
ea2dcf4ff0 | ||
|
332e7c9dba | ||
|
0213a32e6a | ||
|
cd3a163816 | ||
|
2950349adf | ||
|
56cacf6f1c | ||
|
0f77ae14e4 | ||
|
8fa990330f | ||
|
84630f90b7 | ||
|
1869a38b85 | ||
|
e39358d375 | ||
|
c7e601eb0b | ||
|
ac4724e82c | ||
|
73217f238c | ||
|
d410f08642 | ||
|
bfccd4f136 | ||
|
c21820083b | ||
|
865e35df17 | ||
|
604b1a5cf1 | ||
|
81ac01c2f5 | ||
|
1ccc226c6b | ||
|
0ce4446b1a | ||
|
daa969508f | ||
|
715b30a2b5 | ||
|
42fe118cbe | ||
|
06bfddf0da | ||
|
931c8ece4a | ||
|
85e571badd | ||
|
0071a9cbf4 | ||
|
a563b1ba9a | ||
|
9e1ab74bb4 | ||
|
adafb335ff | ||
|
05487c7c15 | ||
|
a2a327af7c | ||
|
33c0e0f430 | ||
|
1442c64420 | ||
|
28644f236e | ||
|
cc80152889 | ||
|
dcdbcc0851 | ||
|
1b0477d569 | ||
|
57c92f8044 | ||
|
e1d8c03e47 | ||
|
7d2b22f58d | ||
|
9efd9f06c6 | ||
|
d0cbe46ff0 | ||
|
025b64befc | ||
|
92e59f14e0 | ||
|
7989c7cdda | ||
|
e22f5fef1f | ||
|
64a022a4d2 | ||
|
751be39376 | ||
|
d05b84d0f5 | ||
|
54bf91b76e | ||
|
8ed79a00fd | ||
|
8dffdadfd3 | ||
|
2dbfbd45a2 | ||
|
249440115b | ||
|
baa8224cce | ||
|
bd7b49b098 | ||
|
27e2f0d545 | ||
|
4dc14e1580 |
197 changed files with 27113 additions and 18321 deletions
|
@ -25,4 +25,4 @@ docker-compose*
|
||||||
rustfmt.toml
|
rustfmt.toml
|
||||||
|
|
||||||
# Documentation
|
# Documentation
|
||||||
*.md
|
#*.md
|
||||||
|
|
5
.envrc
Normal file
5
.envrc
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
use flake
|
||||||
|
|
||||||
|
PATH_add bin
|
11
.gitignore
vendored
11
.gitignore
vendored
|
@ -31,7 +31,6 @@ modules.xml
|
||||||
|
|
||||||
### vscode ###
|
### vscode ###
|
||||||
.vscode/*
|
.vscode/*
|
||||||
!.vscode/settings.json
|
|
||||||
!.vscode/tasks.json
|
!.vscode/tasks.json
|
||||||
!.vscode/launch.json
|
!.vscode/launch.json
|
||||||
!.vscode/extensions.json
|
!.vscode/extensions.json
|
||||||
|
@ -62,3 +61,13 @@ conduit.db
|
||||||
|
|
||||||
# Etc.
|
# Etc.
|
||||||
**/*.rs.bk
|
**/*.rs.bk
|
||||||
|
cached_target
|
||||||
|
|
||||||
|
# Nix artifacts
|
||||||
|
/result*
|
||||||
|
|
||||||
|
# Direnv cache
|
||||||
|
/.direnv
|
||||||
|
|
||||||
|
# Gitlab CI cache
|
||||||
|
/.gitlab-ci.d
|
||||||
|
|
537
.gitlab-ci.yml
537
.gitlab-ci.yml
|
@ -1,399 +1,180 @@
|
||||||
stages:
|
stages:
|
||||||
- build
|
- ci
|
||||||
- build docker image
|
- artifacts
|
||||||
- test
|
- publish
|
||||||
- upload artifacts
|
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
GIT_SUBMODULE_STRATEGY: recursive
|
# Makes some things print in color
|
||||||
FF_USE_FASTZIP: 1
|
TERM: ansi
|
||||||
CACHE_COMPRESSION_LEVEL: fastest
|
|
||||||
# Docker in Docker
|
|
||||||
DOCKER_HOST: tcp://docker:2375/
|
|
||||||
DOCKER_TLS_CERTDIR: ""
|
|
||||||
DOCKER_DRIVER: overlay2
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------- #
|
|
||||||
# Cargo: Compiling for different architectures #
|
|
||||||
# --------------------------------------------------------------------- #
|
|
||||||
|
|
||||||
.build-cargo-shared-settings:
|
|
||||||
stage: "build"
|
|
||||||
needs: []
|
|
||||||
rules:
|
|
||||||
- if: '$CI_COMMIT_BRANCH == "master"'
|
|
||||||
- if: '$CI_COMMIT_BRANCH == "next"'
|
|
||||||
- if: "$CI_COMMIT_TAG"
|
|
||||||
- if: '($CI_MERGE_REQUEST_APPROVED == "true") || $BUILD_EVERYTHING' # Once MR is approved, test all builds. Or if BUILD_EVERYTHING is set.
|
|
||||||
interruptible: true
|
|
||||||
image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools@sha256:69ab327974aef4cc0daf4273579253bf7ae5e379a6c52729b83137e4caa9d093"
|
|
||||||
tags: ["docker"]
|
|
||||||
services: ["docker:dind"]
|
|
||||||
variables:
|
|
||||||
SHARED_PATH: $CI_PROJECT_DIR/shared
|
|
||||||
CARGO_PROFILE_RELEASE_LTO: "true"
|
|
||||||
CARGO_PROFILE_RELEASE_CODEGEN_UNITS: "1"
|
|
||||||
CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow
|
|
||||||
before_script:
|
before_script:
|
||||||
- 'echo "Building for target $TARGET"'
|
# Enable nix-command and flakes
|
||||||
- "rustup show && rustc --version && cargo --version" # Print version info for debugging
|
- if command -v nix > /dev/null; then echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi
|
||||||
# fix cargo and rustup mounts from this container (https://gitlab.com/gitlab-org/gitlab-foss/-/issues/41227)
|
|
||||||
- "mkdir -p $SHARED_PATH/cargo"
|
# Add our own binary cache
|
||||||
- "cp -r $CARGO_HOME/bin $SHARED_PATH/cargo"
|
- if command -v nix > /dev/null; then echo "extra-substituters = https://nix.computer.surgery/conduit" >> /etc/nix/nix.conf; fi
|
||||||
- "cp -r $RUSTUP_HOME $SHARED_PATH"
|
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = conduit:ZGAf6P6LhNvnoJJ3Me3PRg7tlLSrPxcQ2RiE5LIppjo=" >> /etc/nix/nix.conf; fi
|
||||||
- "export CARGO_HOME=$SHARED_PATH/cargo RUSTUP_HOME=$SHARED_PATH/rustup"
|
|
||||||
# If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results.
|
# Add crane binary cache
|
||||||
- if [ -n "${SCCACHE_ENDPOINT}" ]; then export RUSTC_WRAPPER=/sccache; fi
|
- if command -v nix > /dev/null; then echo "extra-substituters = https://crane.cachix.org" >> /etc/nix/nix.conf; fi
|
||||||
|
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=" >> /etc/nix/nix.conf; fi
|
||||||
|
|
||||||
|
# Add nix-community binary cache
|
||||||
|
- if command -v nix > /dev/null; then echo "extra-substituters = https://nix-community.cachix.org" >> /etc/nix/nix.conf; fi
|
||||||
|
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=" >> /etc/nix/nix.conf; fi
|
||||||
|
|
||||||
|
# Install direnv and nix-direnv
|
||||||
|
- if command -v nix > /dev/null; then nix-env -iA nixpkgs.direnv nixpkgs.nix-direnv; fi
|
||||||
|
|
||||||
|
# Allow .envrc
|
||||||
|
- if command -v nix > /dev/null; then direnv allow; fi
|
||||||
|
|
||||||
|
# Set CARGO_HOME to a cacheable path
|
||||||
|
- export CARGO_HOME="$(git rev-parse --show-toplevel)/.gitlab-ci.d/cargo"
|
||||||
|
|
||||||
|
ci:
|
||||||
|
stage: ci
|
||||||
|
image: nixos/nix:2.19.2
|
||||||
script:
|
script:
|
||||||
# cross-compile conduit for target
|
- direnv exec . engage
|
||||||
- 'time cross build --target="$TARGET" --locked --release'
|
|
||||||
- 'mv "target/$TARGET/release/conduit" "conduit-$TARGET"'
|
|
||||||
# print information about linking for debugging
|
|
||||||
- "file conduit-$TARGET" # print file information
|
|
||||||
- 'readelf --dynamic conduit-$TARGET | sed -e "/NEEDED/q1"' # ensure statically linked
|
|
||||||
cache:
|
cache:
|
||||||
# https://doc.rust-lang.org/cargo/guide/cargo-home.html#caching-the-cargo-home-in-ci
|
key: nix
|
||||||
key: "cargo-cache-$TARGET"
|
|
||||||
paths:
|
paths:
|
||||||
- $SHARED_PATH/cargo/registry/index
|
- target
|
||||||
- $SHARED_PATH/cargo/registry/cache
|
- .gitlab-ci.d
|
||||||
- $SHARED_PATH/cargo/git/db
|
|
||||||
artifacts:
|
|
||||||
expire_in: never
|
|
||||||
|
|
||||||
build:release:cargo:x86_64-unknown-linux-musl-with-debug:
|
static:x86_64-unknown-linux-musl:
|
||||||
extends: .build-cargo-shared-settings
|
stage: artifacts
|
||||||
variables:
|
image: nixos/nix:2.19.2
|
||||||
CARGO_PROFILE_RELEASE_DEBUG: 2 # Enable debug info for flamegraph profiling
|
|
||||||
TARGET: "x86_64-unknown-linux-musl"
|
|
||||||
after_script:
|
|
||||||
- "mv ./conduit-x86_64-unknown-linux-musl ./conduit-x86_64-unknown-linux-musl-with-debug"
|
|
||||||
artifacts:
|
|
||||||
name: "conduit-x86_64-unknown-linux-musl-with-debug"
|
|
||||||
paths:
|
|
||||||
- "conduit-x86_64-unknown-linux-musl-with-debug"
|
|
||||||
expose_as: "Conduit for x86_64-unknown-linux-musl-with-debug"
|
|
||||||
|
|
||||||
build:release:cargo:x86_64-unknown-linux-musl:
|
|
||||||
extends: .build-cargo-shared-settings
|
|
||||||
variables:
|
|
||||||
TARGET: "x86_64-unknown-linux-musl"
|
|
||||||
artifacts:
|
|
||||||
name: "conduit-x86_64-unknown-linux-musl"
|
|
||||||
paths:
|
|
||||||
- "conduit-x86_64-unknown-linux-musl"
|
|
||||||
expose_as: "Conduit for x86_64-unknown-linux-musl"
|
|
||||||
|
|
||||||
build:release:cargo:arm-unknown-linux-musleabihf:
|
|
||||||
extends: .build-cargo-shared-settings
|
|
||||||
variables:
|
|
||||||
TARGET: "arm-unknown-linux-musleabihf"
|
|
||||||
artifacts:
|
|
||||||
name: "conduit-arm-unknown-linux-musleabihf"
|
|
||||||
paths:
|
|
||||||
- "conduit-arm-unknown-linux-musleabihf"
|
|
||||||
expose_as: "Conduit for arm-unknown-linux-musleabihf"
|
|
||||||
|
|
||||||
build:release:cargo:armv7-unknown-linux-musleabihf:
|
|
||||||
extends: .build-cargo-shared-settings
|
|
||||||
variables:
|
|
||||||
TARGET: "armv7-unknown-linux-musleabihf"
|
|
||||||
artifacts:
|
|
||||||
name: "conduit-armv7-unknown-linux-musleabihf"
|
|
||||||
paths:
|
|
||||||
- "conduit-armv7-unknown-linux-musleabihf"
|
|
||||||
expose_as: "Conduit for armv7-unknown-linux-musleabihf"
|
|
||||||
|
|
||||||
build:release:cargo:aarch64-unknown-linux-musl:
|
|
||||||
extends: .build-cargo-shared-settings
|
|
||||||
variables:
|
|
||||||
TARGET: "aarch64-unknown-linux-musl"
|
|
||||||
artifacts:
|
|
||||||
name: "conduit-aarch64-unknown-linux-musl"
|
|
||||||
paths:
|
|
||||||
- "conduit-aarch64-unknown-linux-musl"
|
|
||||||
expose_as: "Conduit for aarch64-unknown-linux-musl"
|
|
||||||
|
|
||||||
.cargo-debug-shared-settings:
|
|
||||||
extends: ".build-cargo-shared-settings"
|
|
||||||
rules:
|
|
||||||
- when: "always"
|
|
||||||
cache:
|
|
||||||
key: "build_cache--$TARGET--$CI_COMMIT_BRANCH--debug"
|
|
||||||
script:
|
script:
|
||||||
# cross-compile conduit for target
|
# Push artifacts and build requirements to binary cache
|
||||||
- 'time time cross build --target="$TARGET" --locked'
|
- ./bin/nix-build-and-cache .#static-x86_64-unknown-linux-musl
|
||||||
- 'mv "target/$TARGET/debug/conduit" "conduit-debug-$TARGET"'
|
|
||||||
# print information about linking for debugging
|
|
||||||
- "file conduit-debug-$TARGET" # print file information
|
|
||||||
- 'readelf --dynamic conduit-debug-$TARGET | sed -e "/NEEDED/q1"' # ensure statically linked
|
|
||||||
artifacts:
|
|
||||||
expire_in: 4 weeks
|
|
||||||
|
|
||||||
build:debug:cargo:x86_64-unknown-linux-musl:
|
# Make the output less difficult to find
|
||||||
extends: ".cargo-debug-shared-settings"
|
- cp result/bin/conduit conduit
|
||||||
variables:
|
|
||||||
TARGET: "x86_64-unknown-linux-musl"
|
|
||||||
artifacts:
|
artifacts:
|
||||||
name: "conduit-debug-x86_64-unknown-linux-musl"
|
|
||||||
paths:
|
paths:
|
||||||
- "conduit-debug-x86_64-unknown-linux-musl"
|
- conduit
|
||||||
expose_as: "Conduit DEBUG for x86_64-unknown-linux-musl"
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------- #
|
static:aarch64-unknown-linux-musl:
|
||||||
# Create and publish docker image #
|
stage: artifacts
|
||||||
# --------------------------------------------------------------------- #
|
image: nixos/nix:2.19.2
|
||||||
|
script:
|
||||||
|
# Push artifacts and build requirements to binary cache
|
||||||
|
- ./bin/nix-build-and-cache .#static-aarch64-unknown-linux-musl
|
||||||
|
|
||||||
.docker-shared-settings:
|
# Make the output less difficult to find
|
||||||
stage: "build docker image"
|
- cp result/bin/conduit conduit
|
||||||
image: jdrouet/docker-with-buildx:stable
|
artifacts:
|
||||||
tags: ["docker"]
|
paths:
|
||||||
|
- conduit
|
||||||
|
|
||||||
|
# Note that although we have an `oci-image-x86_64-unknown-linux-musl` output,
|
||||||
|
# we don't build it because it would be largely redundant to this one since it's
|
||||||
|
# all containerized anyway.
|
||||||
|
oci-image:x86_64-unknown-linux-gnu:
|
||||||
|
stage: artifacts
|
||||||
|
image: nixos/nix:2.19.2
|
||||||
|
script:
|
||||||
|
# Push artifacts and build requirements to binary cache
|
||||||
|
#
|
||||||
|
# Since the OCI image package is based on the binary package, this has the
|
||||||
|
# fun side effect of uploading the normal binary too. Conduit users who are
|
||||||
|
# deploying with Nix can leverage this fact by adding our binary cache to
|
||||||
|
# their systems.
|
||||||
|
- ./bin/nix-build-and-cache .#oci-image
|
||||||
|
|
||||||
|
# Make the output less difficult to find
|
||||||
|
- cp result oci-image-amd64.tar.gz
|
||||||
|
artifacts:
|
||||||
|
paths:
|
||||||
|
- oci-image-amd64.tar.gz
|
||||||
|
|
||||||
|
oci-image:aarch64-unknown-linux-musl:
|
||||||
|
stage: artifacts
|
||||||
|
needs:
|
||||||
|
# Wait for the static binary job to finish before starting so we don't have
|
||||||
|
# to build that twice for no reason
|
||||||
|
- static:aarch64-unknown-linux-musl
|
||||||
|
image: nixos/nix:2.19.2
|
||||||
|
script:
|
||||||
|
# Push artifacts and build requirements to binary cache
|
||||||
|
- ./bin/nix-build-and-cache .#oci-image-aarch64-unknown-linux-musl
|
||||||
|
|
||||||
|
# Make the output less difficult to find
|
||||||
|
- cp result oci-image-arm64v8.tar.gz
|
||||||
|
artifacts:
|
||||||
|
paths:
|
||||||
|
- oci-image-arm64v8.tar.gz
|
||||||
|
|
||||||
|
debian:x86_64-unknown-linux-gnu:
|
||||||
|
stage: artifacts
|
||||||
|
# See also `rust-toolchain.toml`
|
||||||
|
image: rust:1.75.0
|
||||||
|
script:
|
||||||
|
- apt-get update && apt-get install -y --no-install-recommends libclang-dev
|
||||||
|
- cargo install cargo-deb
|
||||||
|
- cargo deb
|
||||||
|
|
||||||
|
# Make the output less difficult to find
|
||||||
|
- mv target/debian/*.deb conduit.deb
|
||||||
|
artifacts:
|
||||||
|
paths:
|
||||||
|
- conduit.deb
|
||||||
|
cache:
|
||||||
|
key: debian
|
||||||
|
paths:
|
||||||
|
- target
|
||||||
|
- .gitlab-ci.d
|
||||||
|
|
||||||
|
.push-oci-image:
|
||||||
|
stage: publish
|
||||||
|
image: docker:25.0.0
|
||||||
services:
|
services:
|
||||||
- docker:dind
|
- docker:25.0.0-dind
|
||||||
needs:
|
|
||||||
- "build:release:cargo:x86_64-unknown-linux-musl"
|
|
||||||
- "build:release:cargo:arm-unknown-linux-musleabihf"
|
|
||||||
- "build:release:cargo:armv7-unknown-linux-musleabihf"
|
|
||||||
- "build:release:cargo:aarch64-unknown-linux-musl"
|
|
||||||
variables:
|
variables:
|
||||||
PLATFORMS: "linux/arm/v6,linux/arm/v7,linux/arm64,linux/amd64"
|
IMAGE_SUFFIX_AMD64: amd64
|
||||||
DOCKER_FILE: "docker/ci-binaries-packaging.Dockerfile"
|
IMAGE_SUFFIX_ARM64V8: arm64v8
|
||||||
cache:
|
script:
|
||||||
paths:
|
- docker load -i oci-image-amd64.tar.gz
|
||||||
- docker_cache
|
- IMAGE_ID_AMD64=$(docker images -q conduit:next)
|
||||||
key: "$CI_JOB_NAME"
|
- docker load -i oci-image-arm64v8.tar.gz
|
||||||
|
- IMAGE_ID_ARM64V8=$(docker images -q conduit:next)
|
||||||
|
# Tag and push the architecture specific images
|
||||||
|
- docker tag $IMAGE_ID_AMD64 $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64
|
||||||
|
- docker tag $IMAGE_ID_ARM64V8 $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
||||||
|
- docker push $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64
|
||||||
|
- docker push $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
||||||
|
# Tag the multi-arch image
|
||||||
|
- docker manifest create $IMAGE_NAME:$CI_COMMIT_SHA --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
||||||
|
- docker manifest push $IMAGE_NAME:$CI_COMMIT_SHA
|
||||||
|
# Tag and push the git ref
|
||||||
|
- docker manifest create $IMAGE_NAME:$CI_COMMIT_REF_NAME --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
||||||
|
- docker manifest push $IMAGE_NAME:$CI_COMMIT_REF_NAME
|
||||||
|
# Tag git tags as 'latest'
|
||||||
|
- |
|
||||||
|
if [[ -n "$CI_COMMIT_TAG" ]]; then
|
||||||
|
docker manifest create $IMAGE_NAME:latest --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
||||||
|
docker manifest push $IMAGE_NAME:latest
|
||||||
|
fi
|
||||||
|
dependencies:
|
||||||
|
- oci-image:x86_64-unknown-linux-gnu
|
||||||
|
- oci-image:aarch64-unknown-linux-musl
|
||||||
|
only:
|
||||||
|
- next
|
||||||
|
- master
|
||||||
|
- tags
|
||||||
|
|
||||||
|
oci-image:push-gitlab:
|
||||||
|
extends: .push-oci-image
|
||||||
|
variables:
|
||||||
|
IMAGE_NAME: $CI_REGISTRY_IMAGE/matrix-conduit
|
||||||
before_script:
|
before_script:
|
||||||
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
|
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
|
||||||
# Only log in to Dockerhub if the credentials are given:
|
|
||||||
- if [ -n "${DOCKER_HUB}" ]; then docker login -u "$DOCKER_HUB_USER" -p "$DOCKER_HUB_PASSWORD" "$DOCKER_HUB"; fi
|
|
||||||
script:
|
|
||||||
# Prepare buildx to build multiarch stuff:
|
|
||||||
- docker context create 'ci-context'
|
|
||||||
- docker buildx create --name 'multiarch-builder' --use 'ci-context'
|
|
||||||
# Copy binaries to their docker arch path
|
|
||||||
- mkdir -p linux/ && mv ./conduit-x86_64-unknown-linux-musl linux/amd64
|
|
||||||
- mkdir -p linux/arm/ && mv ./conduit-arm-unknown-linux-musleabihf linux/arm/v6
|
|
||||||
- mkdir -p linux/arm/ && mv ./conduit-armv7-unknown-linux-musleabihf linux/arm/v7
|
|
||||||
- mv ./conduit-aarch64-unknown-linux-musl linux/arm64
|
|
||||||
- 'export CREATED=$(date -u +''%Y-%m-%dT%H:%M:%SZ'') && echo "Docker image creation date: $CREATED"'
|
|
||||||
# Build and push image:
|
|
||||||
- >
|
|
||||||
docker buildx build
|
|
||||||
--pull
|
|
||||||
--push
|
|
||||||
--cache-from=type=local,src=$CI_PROJECT_DIR/docker_cache
|
|
||||||
--cache-to=type=local,dest=$CI_PROJECT_DIR/docker_cache
|
|
||||||
--build-arg CREATED=$CREATED
|
|
||||||
--build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)
|
|
||||||
--build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA"
|
|
||||||
--platform "$PLATFORMS"
|
|
||||||
--tag "$TAG"
|
|
||||||
--tag "$TAG-alpine"
|
|
||||||
--tag "$TAG-commit-$CI_COMMIT_SHORT_SHA"
|
|
||||||
--file "$DOCKER_FILE" .
|
|
||||||
|
|
||||||
docker:next:gitlab:
|
oci-image:push-dockerhub:
|
||||||
extends: .docker-shared-settings
|
extends: .push-oci-image
|
||||||
rules:
|
|
||||||
- if: '$CI_COMMIT_BRANCH == "next"'
|
|
||||||
variables:
|
variables:
|
||||||
TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:next"
|
IMAGE_NAME: matrixconduit/matrix-conduit
|
||||||
|
|
||||||
docker:next:dockerhub:
|
|
||||||
extends: .docker-shared-settings
|
|
||||||
rules:
|
|
||||||
- if: '$CI_COMMIT_BRANCH == "next" && $DOCKER_HUB'
|
|
||||||
variables:
|
|
||||||
TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next"
|
|
||||||
|
|
||||||
docker:master:gitlab:
|
|
||||||
extends: .docker-shared-settings
|
|
||||||
rules:
|
|
||||||
- if: '$CI_COMMIT_BRANCH == "master"'
|
|
||||||
variables:
|
|
||||||
TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:latest"
|
|
||||||
|
|
||||||
docker:master:dockerhub:
|
|
||||||
extends: .docker-shared-settings
|
|
||||||
rules:
|
|
||||||
- if: '$CI_COMMIT_BRANCH == "master" && $DOCKER_HUB'
|
|
||||||
variables:
|
|
||||||
TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest"
|
|
||||||
|
|
||||||
docker:tags:gitlab:
|
|
||||||
extends: .docker-shared-settings
|
|
||||||
rules:
|
|
||||||
- if: "$CI_COMMIT_TAG"
|
|
||||||
variables:
|
|
||||||
TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:$CI_COMMIT_TAG"
|
|
||||||
|
|
||||||
docker:tags:dockerhub:
|
|
||||||
extends: .docker-shared-settings
|
|
||||||
rules:
|
|
||||||
- if: "$CI_COMMIT_TAG && $DOCKER_HUB"
|
|
||||||
variables:
|
|
||||||
TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:$CI_COMMIT_TAG"
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------- #
|
|
||||||
# Run tests #
|
|
||||||
# --------------------------------------------------------------------- #
|
|
||||||
|
|
||||||
.test-shared-settings:
|
|
||||||
stage: "test"
|
|
||||||
needs: []
|
|
||||||
image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:latest"
|
|
||||||
tags: ["docker"]
|
|
||||||
variables:
|
|
||||||
CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow
|
|
||||||
interruptible: true
|
|
||||||
|
|
||||||
test:cargo:
|
|
||||||
extends: .test-shared-settings
|
|
||||||
before_script:
|
before_script:
|
||||||
# If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results:
|
- docker login -u $DOCKER_HUB_USER -p $DOCKER_HUB_PASSWORD
|
||||||
- if [ -n "${SCCACHE_ENDPOINT}" ]; then export RUSTC_WRAPPER=/usr/local/cargo/bin/sccache; fi
|
|
||||||
script:
|
|
||||||
- rustc --version && cargo --version # Print version info for debugging
|
|
||||||
- "cargo test --color always --workspace --verbose --locked --no-fail-fast -- -Z unstable-options --format json | gitlab-report -p test > $CI_PROJECT_DIR/report.xml"
|
|
||||||
artifacts:
|
|
||||||
when: always
|
|
||||||
reports:
|
|
||||||
junit: report.xml
|
|
||||||
|
|
||||||
|
|
||||||
test:clippy:
|
|
||||||
extends: .test-shared-settings
|
|
||||||
allow_failure: true
|
|
||||||
before_script:
|
|
||||||
- rustup component add clippy
|
|
||||||
# If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results:
|
|
||||||
- if [ -n "${SCCACHE_ENDPOINT}" ]; then export RUSTC_WRAPPER=/usr/local/cargo/bin/sccache; fi
|
|
||||||
script:
|
|
||||||
- rustc --version && cargo --version # Print version info for debugging
|
|
||||||
- "cargo clippy --color always --verbose --message-format=json | gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json"
|
|
||||||
artifacts:
|
|
||||||
when: always
|
|
||||||
reports:
|
|
||||||
codequality: gl-code-quality-report.json
|
|
||||||
|
|
||||||
test:format:
|
|
||||||
extends: .test-shared-settings
|
|
||||||
before_script:
|
|
||||||
- rustup component add rustfmt
|
|
||||||
script:
|
|
||||||
- cargo fmt --all -- --check
|
|
||||||
|
|
||||||
test:audit:
|
|
||||||
extends: .test-shared-settings
|
|
||||||
allow_failure: true
|
|
||||||
script:
|
|
||||||
- cargo audit --color always || true
|
|
||||||
- cargo audit --stale --json | gitlab-report -p audit > gl-sast-report.json
|
|
||||||
artifacts:
|
|
||||||
when: always
|
|
||||||
reports:
|
|
||||||
sast: gl-sast-report.json
|
|
||||||
|
|
||||||
test:sytest:
|
|
||||||
stage: "test"
|
|
||||||
allow_failure: true
|
|
||||||
needs:
|
|
||||||
- "build:debug:cargo:x86_64-unknown-linux-musl"
|
|
||||||
image:
|
|
||||||
name: "valkum/sytest-conduit:latest"
|
|
||||||
entrypoint: [""]
|
|
||||||
tags: ["docker"]
|
|
||||||
variables:
|
|
||||||
PLUGINS: "https://github.com/valkum/sytest_conduit/archive/master.tar.gz"
|
|
||||||
interruptible: true
|
|
||||||
before_script:
|
|
||||||
- "mkdir -p /app"
|
|
||||||
- "cp ./conduit-debug-x86_64-unknown-linux-musl /app/conduit"
|
|
||||||
- "chmod +x /app/conduit"
|
|
||||||
- "rm -rf /src && ln -s $CI_PROJECT_DIR/ /src"
|
|
||||||
- "mkdir -p /work/server-0/database/ && mkdir -p /work/server-1/database/ && mkdir -p /work/server-2/database/"
|
|
||||||
- "cd /"
|
|
||||||
script:
|
|
||||||
- "SYTEST_EXIT_CODE=0"
|
|
||||||
- "/bootstrap.sh conduit || SYTEST_EXIT_CODE=1"
|
|
||||||
- 'perl /sytest/tap-to-junit-xml.pl --puretap --input /logs/results.tap --output $CI_PROJECT_DIR/sytest.xml "Sytest" && cp /logs/results.tap $CI_PROJECT_DIR/results.tap'
|
|
||||||
- "exit $SYTEST_EXIT_CODE"
|
|
||||||
artifacts:
|
|
||||||
when: always
|
|
||||||
paths:
|
|
||||||
- "$CI_PROJECT_DIR/sytest.xml"
|
|
||||||
- "$CI_PROJECT_DIR/results.tap"
|
|
||||||
reports:
|
|
||||||
junit: "$CI_PROJECT_DIR/sytest.xml"
|
|
||||||
|
|
||||||
test:dockerlint:
|
|
||||||
stage: "test"
|
|
||||||
needs: []
|
|
||||||
image: "ghcr.io/hadolint/hadolint@sha256:6c4b7c23f96339489dd35f21a711996d7ce63047467a9a562287748a03ad5242" # 2.8.0-alpine
|
|
||||||
interruptible: true
|
|
||||||
script:
|
|
||||||
- hadolint --version
|
|
||||||
# First pass: Print for CI log:
|
|
||||||
- >
|
|
||||||
hadolint
|
|
||||||
--no-fail --verbose
|
|
||||||
./Dockerfile
|
|
||||||
./docker/ci-binaries-packaging.Dockerfile
|
|
||||||
# Then output the results into a json for GitLab to pretty-print this in the MR:
|
|
||||||
- >
|
|
||||||
hadolint
|
|
||||||
--format gitlab_codeclimate
|
|
||||||
--failure-threshold error
|
|
||||||
./Dockerfile
|
|
||||||
./docker/ci-binaries-packaging.Dockerfile > dockerlint.json
|
|
||||||
artifacts:
|
|
||||||
when: always
|
|
||||||
reports:
|
|
||||||
codequality: dockerlint.json
|
|
||||||
paths:
|
|
||||||
- dockerlint.json
|
|
||||||
rules:
|
|
||||||
- if: '$CI_COMMIT_REF_NAME != "master"'
|
|
||||||
changes:
|
|
||||||
- docker/*Dockerfile
|
|
||||||
- Dockerfile
|
|
||||||
- .gitlab-ci.yml
|
|
||||||
- if: '$CI_COMMIT_REF_NAME == "master"'
|
|
||||||
- if: '$CI_COMMIT_REF_NAME == "next"'
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------- #
|
|
||||||
# Store binaries as package so they have download urls #
|
|
||||||
# --------------------------------------------------------------------- #
|
|
||||||
|
|
||||||
publish:package:
|
|
||||||
stage: "upload artifacts"
|
|
||||||
needs:
|
|
||||||
- "build:release:cargo:x86_64-unknown-linux-musl"
|
|
||||||
- "build:release:cargo:arm-unknown-linux-musleabihf"
|
|
||||||
- "build:release:cargo:armv7-unknown-linux-musleabihf"
|
|
||||||
- "build:release:cargo:aarch64-unknown-linux-musl"
|
|
||||||
# - "build:cargo-deb:x86_64-unknown-linux-gnu"
|
|
||||||
rules:
|
|
||||||
- if: '$CI_COMMIT_BRANCH == "master"'
|
|
||||||
- if: '$CI_COMMIT_BRANCH == "next"'
|
|
||||||
- if: "$CI_COMMIT_TAG"
|
|
||||||
image: curlimages/curl:latest
|
|
||||||
tags: ["docker"]
|
|
||||||
variables:
|
|
||||||
GIT_STRATEGY: "none" # Don't need a clean copy of the code, we just operate on artifacts
|
|
||||||
script:
|
|
||||||
- 'BASE_URL="${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}"'
|
|
||||||
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-musl "${BASE_URL}/conduit-x86_64-unknown-linux-musl"'
|
|
||||||
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-arm-unknown-linux-musleabihf "${BASE_URL}/conduit-arm-unknown-linux-musleabihf"'
|
|
||||||
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-armv7-unknown-linux-musleabihf "${BASE_URL}/conduit-armv7-unknown-linux-musleabihf"'
|
|
||||||
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-aarch64-unknown-linux-musl "${BASE_URL}/conduit-aarch64-unknown-linux-musl"'
|
|
||||||
|
|
||||||
# Avoid duplicate pipelines
|
|
||||||
# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines
|
|
||||||
workflow:
|
|
||||||
rules:
|
|
||||||
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
|
|
||||||
- if: "$CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS"
|
|
||||||
when: never
|
|
||||||
- if: "$CI_COMMIT_BRANCH"
|
|
||||||
- if: "$CI_COMMIT_TAG"
|
|
5
.gitlab/CODEOWNERS
Normal file
5
.gitlab/CODEOWNERS
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
# Nix things
|
||||||
|
.envrc @CobaltCause
|
||||||
|
flake.lock @CobaltCause
|
||||||
|
flake.nix @CobaltCause
|
||||||
|
nix/ @CobaltCause
|
37
.gitlab/setup-buildx-remote-builders.sh
Normal file
37
.gitlab/setup-buildx-remote-builders.sh
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
#!/bin/sh
|
||||||
|
set -eux
|
||||||
|
|
||||||
|
# --------------------------------------------------------------------- #
|
||||||
|
# #
|
||||||
|
# Configures docker buildx to use a remote server for arm building. #
|
||||||
|
# Expects $SSH_PRIVATE_KEY to be a valid ssh ed25519 private key with #
|
||||||
|
# access to the server $ARM_SERVER_USER@$ARM_SERVER_IP #
|
||||||
|
# #
|
||||||
|
# This is expected to only be used in the official CI/CD pipeline! #
|
||||||
|
# #
|
||||||
|
# Requirements: openssh-client, docker buildx #
|
||||||
|
# Inspired by: https://depot.dev/blog/building-arm-containers #
|
||||||
|
# #
|
||||||
|
# --------------------------------------------------------------------- #
|
||||||
|
|
||||||
|
cat "$BUILD_SERVER_SSH_PRIVATE_KEY" | ssh-add -
|
||||||
|
|
||||||
|
# Test server connections:
|
||||||
|
ssh "$ARM_SERVER_USER@$ARM_SERVER_IP" "uname -a"
|
||||||
|
ssh "$AMD_SERVER_USER@$AMD_SERVER_IP" "uname -a"
|
||||||
|
|
||||||
|
# Connect remote arm64 server for all arm builds:
|
||||||
|
docker buildx create \
|
||||||
|
--name "multi" \
|
||||||
|
--driver "docker-container" \
|
||||||
|
--platform "linux/arm64,linux/arm/v7" \
|
||||||
|
"ssh://$ARM_SERVER_USER@$ARM_SERVER_IP"
|
||||||
|
|
||||||
|
# Connect remote amd64 server for adm64 builds:
|
||||||
|
docker buildx create --append \
|
||||||
|
--name "multi" \
|
||||||
|
--driver "docker-container" \
|
||||||
|
--platform "linux/amd64" \
|
||||||
|
"ssh://$AMD_SERVER_USER@$AMD_SERVER_IP"
|
||||||
|
|
||||||
|
docker buildx use multi
|
3
.vscode/settings.json
vendored
3
.vscode/settings.json
vendored
|
@ -1,3 +0,0 @@
|
||||||
{
|
|
||||||
"rust-analyzer.procMacro.enable": true,
|
|
||||||
}
|
|
134
CODE_OF_CONDUCT.md
Normal file
134
CODE_OF_CONDUCT.md
Normal file
|
@ -0,0 +1,134 @@
|
||||||
|
|
||||||
|
# Contributor Covenant Code of Conduct
|
||||||
|
|
||||||
|
## Our Pledge
|
||||||
|
|
||||||
|
We as members, contributors, and leaders pledge to make participation in our
|
||||||
|
community a harassment-free experience for everyone, regardless of age, body
|
||||||
|
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||||
|
identity and expression, level of experience, education, socio-economic status,
|
||||||
|
nationality, personal appearance, race, caste, color, religion, or sexual
|
||||||
|
identity and orientation.
|
||||||
|
|
||||||
|
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||||
|
diverse, inclusive, and healthy community.
|
||||||
|
|
||||||
|
## Our Standards
|
||||||
|
|
||||||
|
Examples of behavior that contributes to a positive environment for our
|
||||||
|
community include:
|
||||||
|
|
||||||
|
* Demonstrating empathy and kindness toward other people
|
||||||
|
* Being respectful of differing opinions, viewpoints, and experiences
|
||||||
|
* Giving and gracefully accepting constructive feedback
|
||||||
|
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||||
|
and learning from the experience
|
||||||
|
* Focusing on what is best not just for us as individuals, but for the overall
|
||||||
|
community
|
||||||
|
|
||||||
|
Examples of unacceptable behavior include:
|
||||||
|
|
||||||
|
* The use of sexualized language or imagery, and sexual attention or advances of
|
||||||
|
any kind
|
||||||
|
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||||
|
* Public or private harassment
|
||||||
|
* Publishing others' private information, such as a physical or email address,
|
||||||
|
without their explicit permission
|
||||||
|
* Other conduct which could reasonably be considered inappropriate in a
|
||||||
|
professional setting
|
||||||
|
|
||||||
|
## Enforcement Responsibilities
|
||||||
|
|
||||||
|
Community leaders are responsible for clarifying and enforcing our standards of
|
||||||
|
acceptable behavior and will take appropriate and fair corrective action in
|
||||||
|
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||||
|
or harmful.
|
||||||
|
|
||||||
|
Community leaders have the right and responsibility to remove, edit, or reject
|
||||||
|
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||||
|
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||||
|
decisions when appropriate.
|
||||||
|
|
||||||
|
## Scope
|
||||||
|
|
||||||
|
This Code of Conduct applies within all community spaces, and also applies when
|
||||||
|
an individual is officially representing the community in public spaces.
|
||||||
|
Examples of representing our community include using an official e-mail address,
|
||||||
|
posting via an official social media account, or acting as an appointed
|
||||||
|
representative at an online or offline event.
|
||||||
|
|
||||||
|
## Enforcement
|
||||||
|
|
||||||
|
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||||
|
reported to the community leaders responsible for enforcement over email at
|
||||||
|
coc@koesters.xyz or over Matrix at @timo:conduit.rs.
|
||||||
|
All complaints will be reviewed and investigated promptly and fairly.
|
||||||
|
|
||||||
|
All community leaders are obligated to respect the privacy and security of the
|
||||||
|
reporter of any incident.
|
||||||
|
|
||||||
|
## Enforcement Guidelines
|
||||||
|
|
||||||
|
Community leaders will follow these Community Impact Guidelines in determining
|
||||||
|
the consequences for any action they deem in violation of this Code of Conduct:
|
||||||
|
|
||||||
|
### 1. Correction
|
||||||
|
|
||||||
|
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||||
|
unprofessional or unwelcome in the community.
|
||||||
|
|
||||||
|
**Consequence**: A private, written warning from community leaders, providing
|
||||||
|
clarity around the nature of the violation and an explanation of why the
|
||||||
|
behavior was inappropriate. A public apology may be requested.
|
||||||
|
|
||||||
|
### 2. Warning
|
||||||
|
|
||||||
|
**Community Impact**: A violation through a single incident or series of
|
||||||
|
actions.
|
||||||
|
|
||||||
|
**Consequence**: A warning with consequences for continued behavior. No
|
||||||
|
interaction with the people involved, including unsolicited interaction with
|
||||||
|
those enforcing the Code of Conduct, for a specified period of time. This
|
||||||
|
includes avoiding interactions in community spaces as well as external channels
|
||||||
|
like social media. Violating these terms may lead to a temporary or permanent
|
||||||
|
ban.
|
||||||
|
|
||||||
|
### 3. Temporary Ban
|
||||||
|
|
||||||
|
**Community Impact**: A serious violation of community standards, including
|
||||||
|
sustained inappropriate behavior.
|
||||||
|
|
||||||
|
**Consequence**: A temporary ban from any sort of interaction or public
|
||||||
|
communication with the community for a specified period of time. No public or
|
||||||
|
private interaction with the people involved, including unsolicited interaction
|
||||||
|
with those enforcing the Code of Conduct, is allowed during this period.
|
||||||
|
Violating these terms may lead to a permanent ban.
|
||||||
|
|
||||||
|
### 4. Permanent Ban
|
||||||
|
|
||||||
|
**Community Impact**: Demonstrating a pattern of violation of community
|
||||||
|
standards, including sustained inappropriate behavior, harassment of an
|
||||||
|
individual, or aggression toward or disparagement of classes of individuals.
|
||||||
|
|
||||||
|
**Consequence**: A permanent ban from any sort of public interaction within the
|
||||||
|
community.
|
||||||
|
|
||||||
|
## Attribution
|
||||||
|
|
||||||
|
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||||
|
version 2.1, available at
|
||||||
|
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
|
||||||
|
|
||||||
|
Community Impact Guidelines were inspired by
|
||||||
|
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
|
||||||
|
|
||||||
|
For answers to common questions about this code of conduct, see the FAQ at
|
||||||
|
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
|
||||||
|
[https://www.contributor-covenant.org/translations][translations].
|
||||||
|
|
||||||
|
[homepage]: https://www.contributor-covenant.org
|
||||||
|
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
|
||||||
|
[Mozilla CoC]: https://github.com/mozilla/diversity
|
||||||
|
[FAQ]: https://www.contributor-covenant.org/faq
|
||||||
|
[translations]: https://www.contributor-covenant.org/translations
|
||||||
|
|
2546
Cargo.lock
generated
2546
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
131
Cargo.toml
131
Cargo.toml
|
@ -1,3 +1,14 @@
|
||||||
|
# Keep alphabetically sorted
|
||||||
|
[workspace.lints.rust]
|
||||||
|
explicit_outlives_requirements = "warn"
|
||||||
|
unused_qualifications = "warn"
|
||||||
|
|
||||||
|
# Keep alphabetically sorted
|
||||||
|
[workspace.lints.clippy]
|
||||||
|
cloned_instead_of_copied = "warn"
|
||||||
|
dbg_macro = "warn"
|
||||||
|
str_to_string = "warn"
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "conduit"
|
name = "conduit"
|
||||||
description = "A Matrix homeserver written in Rust"
|
description = "A Matrix homeserver written in Rust"
|
||||||
|
@ -6,101 +17,119 @@ authors = ["timokoesters <timo@koesters.xyz>"]
|
||||||
homepage = "https://conduit.rs"
|
homepage = "https://conduit.rs"
|
||||||
repository = "https://gitlab.com/famedly/conduit"
|
repository = "https://gitlab.com/famedly/conduit"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
version = "0.4.0"
|
version = "0.7.0-alpha"
|
||||||
rust-version = "1.56"
|
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
|
# See also `rust-toolchain.toml`
|
||||||
|
rust-version = "1.75.0"
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
# Web framework
|
# Web framework
|
||||||
axum = { version = "0.5.8", default-features = false, features = ["form", "headers", "http1", "http2", "json", "matched-path"], optional = true }
|
axum = { version = "0.6.18", default-features = false, features = ["form", "headers", "http1", "http2", "json", "matched-path"], optional = true }
|
||||||
axum-server = { version = "0.4.0", features = ["tls-rustls"] }
|
axum-server = { version = "0.5.1", features = ["tls-rustls"] }
|
||||||
tower = { version = "0.4.8", features = ["util"] }
|
tower = { version = "0.4.13", features = ["util"] }
|
||||||
tower-http = { version = "0.3.4", features = ["add-extension", "cors", "compression-full", "sensitive-headers", "trace", "util"] }
|
tower-http = { version = "0.4.1", features = ["add-extension", "cors", "sensitive-headers", "trace", "util"] }
|
||||||
|
|
||||||
# Used for matrix spec type definitions and helpers
|
# Used for matrix spec type definitions and helpers
|
||||||
#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
||||||
ruma = { git = "https://github.com/ruma/ruma", rev = "d614ad1422d6c4b3437ebc318ca8514ae338fd6d", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
ruma = { git = "https://github.com/ruma/ruma", rev = "1a1c61ee1e8f0936e956a3b69c931ce12ee28475", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
|
||||||
#ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
#ruma = { git = "https://github.com/timokoesters/ruma", rev = "4ec9c69bb7e09391add2382b3ebac97b6e8f4c64", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
|
||||||
#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
|
||||||
|
|
||||||
# Async runtime and utilities
|
# Async runtime and utilities
|
||||||
tokio = { version = "1.11.0", features = ["fs", "macros", "signal", "sync"] }
|
tokio = { version = "1.28.1", features = ["fs", "macros", "signal", "sync"] }
|
||||||
# Used for storing data permanently
|
# Used for storing data permanently
|
||||||
sled = { version = "0.34.7", features = ["compression", "no_metrics"], optional = true }
|
#sled = { version = "0.34.7", features = ["compression", "no_metrics"], optional = true }
|
||||||
#sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] }
|
#sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] }
|
||||||
persy = { version = "1.0.0", optional = true, features = ["background_ops"] }
|
persy = { version = "1.4.4", optional = true, features = ["background_ops"] }
|
||||||
|
|
||||||
# Used for the http request / response body type for Ruma endpoints used with reqwest
|
# Used for the http request / response body type for Ruma endpoints used with reqwest
|
||||||
bytes = "1.1.0"
|
bytes = "1.4.0"
|
||||||
http = "0.2.4"
|
http = "0.2.9"
|
||||||
# Used to find data directory for default db path
|
# Used to find data directory for default db path
|
||||||
directories = "4.0.0"
|
directories = "4.0.1"
|
||||||
# Used for ruma wrapper
|
# Used for ruma wrapper
|
||||||
serde_json = { version = "1.0.68", features = ["raw_value"] }
|
serde_json = { version = "1.0.96", features = ["raw_value"] }
|
||||||
# Used for appservice registration files
|
# Used for appservice registration files
|
||||||
serde_yaml = "0.8.21"
|
serde_yaml = "0.9.21"
|
||||||
# Used for pdu definition
|
# Used for pdu definition
|
||||||
serde = { version = "1.0.130", features = ["rc"] }
|
serde = { version = "1.0.163", features = ["rc"] }
|
||||||
# Used for secure identifiers
|
# Used for secure identifiers
|
||||||
rand = "0.8.4"
|
rand = "0.8.5"
|
||||||
# Used to hash passwords
|
# Used to hash passwords
|
||||||
rust-argon2 = "0.8.3"
|
rust-argon2 = "1.0.0"
|
||||||
# Used to send requests
|
# Used to send requests
|
||||||
reqwest = { default-features = false, features = ["rustls-tls-native-roots", "socks"], git = "https://github.com/timokoesters/reqwest", rev = "57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" }
|
hyper = "0.14.26"
|
||||||
|
reqwest = { version = "0.11.18", default-features = false, features = ["rustls-tls-native-roots", "socks"] }
|
||||||
# Used for conduit::Error type
|
# Used for conduit::Error type
|
||||||
thiserror = "1.0.29"
|
thiserror = "1.0.40"
|
||||||
# Used to generate thumbnails for images
|
# Used to generate thumbnails for images
|
||||||
image = { version = "0.23.14", default-features = false, features = ["jpeg", "png", "gif"] }
|
image = { version = "0.24.6", default-features = false, features = ["jpeg", "png", "gif"] }
|
||||||
# Used to encode server public key
|
# Used to encode server public key
|
||||||
base64 = "0.13.0"
|
base64 = "0.21.2"
|
||||||
# Used when hashing the state
|
# Used when hashing the state
|
||||||
ring = "0.16.20"
|
ring = "0.17.7"
|
||||||
# Used when querying the SRV record of other servers
|
# Used when querying the SRV record of other servers
|
||||||
trust-dns-resolver = "0.20.3"
|
trust-dns-resolver = "0.22.0"
|
||||||
# Used to find matching events for appservices
|
# Used to find matching events for appservices
|
||||||
regex = "1.5.4"
|
regex = "1.8.1"
|
||||||
# jwt jsonwebtokens
|
# jwt jsonwebtokens
|
||||||
jsonwebtoken = "7.2.0"
|
jsonwebtoken = "9.2.0"
|
||||||
# Performance measurements
|
# Performance measurements
|
||||||
tracing = { version = "0.1.27", features = [] }
|
tracing = { version = "0.1.37", features = [] }
|
||||||
tracing-subscriber = "0.2.22"
|
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
|
||||||
tracing-flame = "0.1.0"
|
tracing-flame = "0.2.0"
|
||||||
opentelemetry = { version = "0.16.0", features = ["rt-tokio"] }
|
opentelemetry = { version = "0.18.0", features = ["rt-tokio"] }
|
||||||
opentelemetry-jaeger = { version = "0.15.0", features = ["rt-tokio"] }
|
opentelemetry-jaeger = { version = "0.17.0", features = ["rt-tokio"] }
|
||||||
|
tracing-opentelemetry = "0.18.0"
|
||||||
lru-cache = "0.1.2"
|
lru-cache = "0.1.2"
|
||||||
rusqlite = { version = "0.25.3", optional = true, features = ["bundled"] }
|
rusqlite = { version = "0.29.0", optional = true, features = ["bundled"] }
|
||||||
parking_lot = { version = "0.11.2", optional = true }
|
parking_lot = { version = "0.12.1", optional = true }
|
||||||
crossbeam = { version = "0.8.1", optional = true }
|
# crossbeam = { version = "0.8.2", optional = true }
|
||||||
num_cpus = "1.13.0"
|
num_cpus = "1.15.0"
|
||||||
threadpool = "1.8.1"
|
threadpool = "1.8.1"
|
||||||
heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true }
|
# heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true }
|
||||||
rocksdb = { version = "0.17.0", default-features = true, features = ["multi-threaded-cf", "zstd"], optional = true }
|
# Used for ruma wrapper
|
||||||
|
serde_html_form = "0.2.0"
|
||||||
|
|
||||||
thread_local = "1.1.3"
|
rocksdb = { version = "0.21.0", default-features = true, features = ["multi-threaded-cf", "zstd"], optional = true }
|
||||||
|
|
||||||
|
thread_local = "1.1.7"
|
||||||
# used for TURN server authentication
|
# used for TURN server authentication
|
||||||
hmac = "0.11.0"
|
hmac = "0.12.1"
|
||||||
sha-1 = "0.9.8"
|
sha-1 = "0.10.1"
|
||||||
# used for conduit's CLI and admin room command parsing
|
# used for conduit's CLI and admin room command parsing
|
||||||
clap = { version = "3.2.5", default-features = false, features = ["std", "derive"] }
|
clap = { version = "4.3.0", default-features = false, features = ["std", "derive", "help", "usage", "error-context"] }
|
||||||
futures-util = { version = "0.3.17", default-features = false }
|
futures-util = { version = "0.3.28", default-features = false }
|
||||||
# Used for reading the configuration from conduit.toml & environment variables
|
# Used for reading the configuration from conduit.toml & environment variables
|
||||||
figment = { version = "0.10.6", features = ["env", "toml"] }
|
figment = { version = "0.10.8", features = ["env", "toml"] }
|
||||||
|
|
||||||
tikv-jemalloc-ctl = { version = "0.4.2", features = ["use_std"], optional = true }
|
tikv-jemalloc-ctl = { version = "0.5.0", features = ["use_std"], optional = true }
|
||||||
tikv-jemallocator = { version = "0.4.1", features = ["unprefixed_malloc_on_supported_platforms"], optional = true }
|
tikv-jemallocator = { version = "0.5.0", features = ["unprefixed_malloc_on_supported_platforms"], optional = true }
|
||||||
|
lazy_static = "1.4.0"
|
||||||
|
async-trait = "0.1.68"
|
||||||
|
|
||||||
|
sd-notify = { version = "0.4.1", optional = true }
|
||||||
|
|
||||||
|
[target.'cfg(unix)'.dependencies]
|
||||||
|
nix = { version = "0.26.2", features = ["resource"] }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "jemalloc"]
|
default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "systemd"]
|
||||||
backend_sled = ["sled"]
|
#backend_sled = ["sled"]
|
||||||
backend_persy = ["persy", "parking_lot"]
|
backend_persy = ["persy", "parking_lot"]
|
||||||
backend_sqlite = ["sqlite"]
|
backend_sqlite = ["sqlite"]
|
||||||
backend_heed = ["heed", "crossbeam"]
|
#backend_heed = ["heed", "crossbeam"]
|
||||||
backend_rocksdb = ["rocksdb"]
|
backend_rocksdb = ["rocksdb"]
|
||||||
jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator"]
|
jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator"]
|
||||||
sqlite = ["rusqlite", "parking_lot", "tokio/signal"]
|
sqlite = ["rusqlite", "parking_lot", "tokio/signal"]
|
||||||
conduit_bin = ["axum"]
|
conduit_bin = ["axum"]
|
||||||
|
systemd = ["sd-notify"]
|
||||||
|
|
||||||
[[bin]]
|
[[bin]]
|
||||||
name = "conduit"
|
name = "conduit"
|
||||||
|
@ -123,7 +152,7 @@ instead of a server that has high scalability."""
|
||||||
section = "net"
|
section = "net"
|
||||||
priority = "optional"
|
priority = "optional"
|
||||||
assets = [
|
assets = [
|
||||||
["debian/README.Debian", "usr/share/doc/matrix-conduit/", "644"],
|
["debian/README.md", "usr/share/doc/matrix-conduit/README.Debian", "644"],
|
||||||
["README.md", "usr/share/doc/matrix-conduit/", "644"],
|
["README.md", "usr/share/doc/matrix-conduit/", "644"],
|
||||||
["target/release/conduit", "usr/sbin/matrix-conduit", "755"],
|
["target/release/conduit", "usr/sbin/matrix-conduit", "755"],
|
||||||
]
|
]
|
||||||
|
|
23
Cross.toml
23
Cross.toml
|
@ -1,23 +0,0 @@
|
||||||
[build.env]
|
|
||||||
# CI uses an S3 endpoint to store sccache artifacts, so their config needs to
|
|
||||||
# be available in the cross container as well
|
|
||||||
passthrough = [
|
|
||||||
"RUSTC_WRAPPER",
|
|
||||||
"AWS_ACCESS_KEY_ID",
|
|
||||||
"AWS_SECRET_ACCESS_KEY",
|
|
||||||
"SCCACHE_BUCKET",
|
|
||||||
"SCCACHE_ENDPOINT",
|
|
||||||
"SCCACHE_S3_USE_SSL",
|
|
||||||
]
|
|
||||||
|
|
||||||
[target.aarch64-unknown-linux-musl]
|
|
||||||
image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-aarch64-unknown-linux-musl:latest"
|
|
||||||
|
|
||||||
[target.arm-unknown-linux-musleabihf]
|
|
||||||
image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-arm-unknown-linux-musleabihf:latest"
|
|
||||||
|
|
||||||
[target.armv7-unknown-linux-musleabihf]
|
|
||||||
image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-armv7-unknown-linux-musleabihf:latest"
|
|
||||||
|
|
||||||
[target.x86_64-unknown-linux-musl]
|
|
||||||
image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-x86_64-unknown-linux-musl@sha256:b6d689e42f0236c8a38b961bca2a12086018b85ed20e0826310421daf182e2bb"
|
|
135
DEPLOY.md
135
DEPLOY.md
|
@ -2,59 +2,92 @@
|
||||||
|
|
||||||
> ## Getting help
|
> ## Getting help
|
||||||
>
|
>
|
||||||
> If you run into any problems while setting up Conduit, write an email to `timo@koesters.xyz`, ask us
|
> If you run into any problems while setting up Conduit, write an email to `conduit@koesters.xyz`, ask us
|
||||||
> in `#conduit:fachschaften.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new).
|
> in `#conduit:fachschaften.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new).
|
||||||
|
|
||||||
## Installing Conduit
|
## Installing Conduit
|
||||||
|
|
||||||
Although you might be able to compile Conduit for Windows, we do recommend running it on a linux server. We therefore
|
Although you might be able to compile Conduit for Windows, we do recommend running it on a Linux server. We therefore
|
||||||
only offer Linux binaries.
|
only offer Linux binaries.
|
||||||
|
|
||||||
You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the right url:
|
You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the appropriate url:
|
||||||
|
|
||||||
| CPU Architecture | Download stable version | Download development version |
|
**Stable versions:**
|
||||||
| ------------------------------------------- | ------------------------------ | ---------------------------- |
|
|
||||||
| x84_64 / amd64 (Most servers and computers) | [Download][x84_64-musl-master] | [Download][x84_64-musl-next] |
|
|
||||||
| armv6 | [Download][armv6-musl-master] | [Download][armv6-musl-next] |
|
|
||||||
| armv7 (e.g. Raspberry Pi by default) | [Download][armv7-musl-master] | [Download][armv7-musl-next] |
|
|
||||||
| armv8 / aarch64 | [Download][armv8-musl-master] | [Download][armv8-musl-next] |
|
|
||||||
|
|
||||||
[x84_64-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-musl?job=build:release:cargo:x86_64-unknown-linux-musl
|
| CPU Architecture | Download stable version |
|
||||||
[armv6-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-arm-unknown-linux-musleabihf?job=build:release:cargo:arm-unknown-linux-musleabihf
|
| ------------------------------------------- | --------------------------------------------------------------- |
|
||||||
[armv7-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-armv7-unknown-linux-musleabihf?job=build:release:cargo:armv7-unknown-linux-musleabihf
|
| x84_64 / amd64 (Most servers and computers) | [Binary][x84_64-glibc-master] / [.deb][x84_64-glibc-master-deb] |
|
||||||
[armv8-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-aarch64-unknown-linux-musl?job=build:release:cargo:aarch64-unknown-linux-musl
|
| armv7 (e.g. Raspberry Pi by default) | [Binary][armv7-glibc-master] / [.deb][armv7-glibc-master-deb] |
|
||||||
[x84_64-musl-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/conduit-x86_64-unknown-linux-musl?job=build:release:cargo:x86_64-unknown-linux-musl
|
| armv8 / aarch64 | [Binary][armv8-glibc-master] / [.deb][armv8-glibc-master-deb] |
|
||||||
[armv6-musl-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/conduit-arm-unknown-linux-musleabihf?job=build:release:cargo:arm-unknown-linux-musleabihf
|
|
||||||
[armv7-musl-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/conduit-armv7-unknown-linux-musleabihf?job=build:release:cargo:armv7-unknown-linux-musleabihf
|
These builds were created on and linked against the glibc version shipped with Debian bullseye.
|
||||||
[armv8-musl-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/conduit-aarch64-unknown-linux-musl?job=build:release:cargo:aarch64-unknown-linux-musl
|
If you use a system with an older glibc version (e.g. RHEL8), you might need to compile Conduit yourself.
|
||||||
|
|
||||||
|
[x84_64-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit?job=docker:master
|
||||||
|
[armv7-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit?job=docker:master
|
||||||
|
[armv8-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit?job=docker:master
|
||||||
|
[x84_64-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit.deb?job=docker:master
|
||||||
|
[armv7-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit.deb?job=docker:master
|
||||||
|
[armv8-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit.deb?job=docker:master
|
||||||
|
|
||||||
|
**Latest versions:**
|
||||||
|
|
||||||
|
| Target | Type | Download |
|
||||||
|
|-|-|-|
|
||||||
|
| `x86_64-unknown-linux-gnu` | Dynamically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/conduit.deb?job=debian:x86_64-unknown-linux-gnu) |
|
||||||
|
| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/conduit?job=static:x86_64-unknown-linux-musl) |
|
||||||
|
| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/conduit?job=static:aarch64-unknown-linux-musl) |
|
||||||
|
| `x86_64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-amd64.tar.gz?job=oci-image:x86_64-unknown-linux-musl) |
|
||||||
|
| `aarch64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-arm64v8.tar.gz?job=oci-image:aarch64-unknown-linux-musl) |
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ sudo wget -O /usr/local/bin/matrix-conduit <url>
|
$ sudo wget -O /usr/local/bin/matrix-conduit <url>
|
||||||
$ sudo chmod +x /usr/local/bin/matrix-conduit
|
$ sudo chmod +x /usr/local/bin/matrix-conduit
|
||||||
```
|
```
|
||||||
|
|
||||||
Alternatively, you may compile the binary yourself
|
Alternatively, you may compile the binary yourself. First, install any dependencies:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
# Debian
|
||||||
$ sudo apt install libclang-dev build-essential
|
$ sudo apt install libclang-dev build-essential
|
||||||
```
|
|
||||||
|
|
||||||
|
# RHEL
|
||||||
|
$ sudo dnf install clang
|
||||||
|
```
|
||||||
|
Then, `cd` into the source tree of conduit-next and run:
|
||||||
```bash
|
```bash
|
||||||
$ cargo build --release
|
$ cargo build --release
|
||||||
```
|
```
|
||||||
|
|
||||||
|
If you want to cross compile Conduit to another architecture, read the guide below.
|
||||||
|
|
||||||
If you want to cross compile Conduit to another architecture, read the [Cross-Compile Guide](cross/README.md).
|
<details>
|
||||||
|
<summary>Cross compilation</summary>
|
||||||
|
|
||||||
|
As easiest way to compile conduit for another platform [cross-rs](https://github.com/cross-rs/cross) is recommended, so install it first.
|
||||||
|
|
||||||
|
In order to use RockDB as storage backend append `-latomic` to linker flags.
|
||||||
|
|
||||||
|
For example, to build a binary for Raspberry Pi Zero W (ARMv6) you need `arm-unknown-linux-gnueabihf` as compilation
|
||||||
|
target.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://gitlab.com/famedly/conduit.git
|
||||||
|
cd conduit
|
||||||
|
export RUSTFLAGS='-C link-arg=-lgcc -Clink-arg=-latomic -Clink-arg=-static-libgcc'
|
||||||
|
cross build --release --no-default-features --features conduit_bin,backend_rocksdb,jemalloc --target=arm-unknown-linux-gnueabihf
|
||||||
|
```
|
||||||
|
</details>
|
||||||
|
|
||||||
## Adding a Conduit user
|
## Adding a Conduit user
|
||||||
|
|
||||||
While Conduit can run as any user it is usually better to use dedicated users for different services. This also allows
|
While Conduit can run as any user it is usually better to use dedicated users for different services. This also allows
|
||||||
you to make sure that the file permissions are correctly set up.
|
you to make sure that the file permissions are correctly set up.
|
||||||
|
|
||||||
In Debian you can use this command to create a Conduit user:
|
In Debian or RHEL, you can use this command to create a Conduit user:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo adduser --system conduit --no-create-home
|
sudo adduser --system conduit --group --disabled-login --no-create-home
|
||||||
```
|
```
|
||||||
|
|
||||||
## Forwarding ports in the firewall or the router
|
## Forwarding ports in the firewall or the router
|
||||||
|
@ -63,6 +96,19 @@ Conduit uses the ports 443 and 8448 both of which need to be open in the firewal
|
||||||
|
|
||||||
If Conduit runs behind a router or in a container and has a different public IP address than the host system these public ports need to be forwarded directly or indirectly to the port mentioned in the config.
|
If Conduit runs behind a router or in a container and has a different public IP address than the host system these public ports need to be forwarded directly or indirectly to the port mentioned in the config.
|
||||||
|
|
||||||
|
## Optional: Avoid port 8448
|
||||||
|
|
||||||
|
If Conduit runs behind Cloudflare reverse proxy, which doesn't support port 8448 on free plans, [delegation](https://matrix-org.github.io/synapse/latest/delegate.html) can be set up to have federation traffic routed to port 443:
|
||||||
|
```apache
|
||||||
|
# .well-known delegation on Apache
|
||||||
|
<Files "/.well-known/matrix/server">
|
||||||
|
ErrorDocument 200 '{"m.server": "your.server.name:443"}'
|
||||||
|
Header always set Content-Type application/json
|
||||||
|
Header always set Access-Control-Allow-Origin *
|
||||||
|
</Files>
|
||||||
|
```
|
||||||
|
[SRV DNS record](https://spec.matrix.org/latest/server-server-api/#resolving-server-names) delegation is also [possible](https://www.cloudflare.com/en-gb/learning/dns/dns-records/dns-srv-record/).
|
||||||
|
|
||||||
## Setting up a systemd service
|
## Setting up a systemd service
|
||||||
|
|
||||||
Now we'll set up a systemd service for Conduit, so it's easy to start/stop Conduit and set it to autostart when your
|
Now we'll set up a systemd service for Conduit, so it's easy to start/stop Conduit and set it to autostart when your
|
||||||
|
@ -77,7 +123,7 @@ After=network.target
|
||||||
[Service]
|
[Service]
|
||||||
Environment="CONDUIT_CONFIG=/etc/matrix-conduit/conduit.toml"
|
Environment="CONDUIT_CONFIG=/etc/matrix-conduit/conduit.toml"
|
||||||
User=conduit
|
User=conduit
|
||||||
Group=nogroup
|
Group=conduit
|
||||||
Restart=always
|
Restart=always
|
||||||
ExecStart=/usr/local/bin/matrix-conduit
|
ExecStart=/usr/local/bin/matrix-conduit
|
||||||
|
|
||||||
|
@ -132,11 +178,13 @@ max_request_size = 20_000_000 # in bytes
|
||||||
allow_registration = true
|
allow_registration = true
|
||||||
|
|
||||||
allow_federation = true
|
allow_federation = true
|
||||||
|
allow_check_for_updates = true
|
||||||
|
|
||||||
|
# Server to get public keys from. You probably shouldn't change this
|
||||||
trusted_servers = ["matrix.org"]
|
trusted_servers = ["matrix.org"]
|
||||||
|
|
||||||
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
||||||
#log = "info,state_res=warn,rocket=off,_=off,sled=off"
|
#log = "warn,state_res=warn,rocket=off,_=off,sled=off"
|
||||||
|
|
||||||
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
|
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
|
||||||
#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it.
|
#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it.
|
||||||
|
@ -145,7 +193,7 @@ address = "127.0.0.1" # This makes sure Conduit can only be reached using the re
|
||||||
## Setting the correct file permissions
|
## Setting the correct file permissions
|
||||||
|
|
||||||
As we are using a Conduit specific user we need to allow it to read the config. To do that you can run this command on
|
As we are using a Conduit specific user we need to allow it to read the config. To do that you can run this command on
|
||||||
Debian:
|
Debian or RHEL:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo chown -R root:root /etc/matrix-conduit
|
sudo chown -R root:root /etc/matrix-conduit
|
||||||
|
@ -156,7 +204,7 @@ If you use the default database path you also need to run this:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo mkdir -p /var/lib/matrix-conduit/
|
sudo mkdir -p /var/lib/matrix-conduit/
|
||||||
sudo chown -R conduit:nogroup /var/lib/matrix-conduit/
|
sudo chown -R conduit:conduit /var/lib/matrix-conduit/
|
||||||
sudo chmod 700 /var/lib/matrix-conduit/
|
sudo chmod 700 /var/lib/matrix-conduit/
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -169,6 +217,11 @@ This depends on whether you use Apache, Caddy, Nginx or another web server.
|
||||||
Create `/etc/apache2/sites-enabled/050-conduit.conf` and copy-and-paste this:
|
Create `/etc/apache2/sites-enabled/050-conduit.conf` and copy-and-paste this:
|
||||||
|
|
||||||
```apache
|
```apache
|
||||||
|
# Requires mod_proxy and mod_proxy_http
|
||||||
|
#
|
||||||
|
# On Apache instance compiled from source,
|
||||||
|
# paste into httpd-ssl.conf or httpd.conf
|
||||||
|
|
||||||
Listen 8448
|
Listen 8448
|
||||||
|
|
||||||
<VirtualHost *:443 *:8448>
|
<VirtualHost *:443 *:8448>
|
||||||
|
@ -176,7 +229,7 @@ Listen 8448
|
||||||
ServerName your.server.name # EDIT THIS
|
ServerName your.server.name # EDIT THIS
|
||||||
|
|
||||||
AllowEncodedSlashes NoDecode
|
AllowEncodedSlashes NoDecode
|
||||||
ProxyPass /_matrix/ http://127.0.0.1:6167/_matrix/ nocanon
|
ProxyPass /_matrix/ http://127.0.0.1:6167/_matrix/ timeout=300 nocanon
|
||||||
ProxyPassReverse /_matrix/ http://127.0.0.1:6167/_matrix/
|
ProxyPassReverse /_matrix/ http://127.0.0.1:6167/_matrix/
|
||||||
|
|
||||||
</VirtualHost>
|
</VirtualHost>
|
||||||
|
@ -185,22 +238,29 @@ ProxyPassReverse /_matrix/ http://127.0.0.1:6167/_matrix/
|
||||||
**You need to make some edits again.** When you are done, run
|
**You need to make some edits again.** When you are done, run
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
# Debian
|
||||||
$ sudo systemctl reload apache2
|
$ sudo systemctl reload apache2
|
||||||
|
|
||||||
|
# Installed from source
|
||||||
|
$ sudo apachectl -k graceful
|
||||||
```
|
```
|
||||||
|
|
||||||
### Caddy
|
### Caddy
|
||||||
|
|
||||||
Create `/etc/caddy/conf.d/conduit_caddyfile` and enter this (substitute for your server name).
|
Create `/etc/caddy/conf.d/conduit_caddyfile` and enter this (substitute for your server name).
|
||||||
|
|
||||||
```caddy
|
```caddy
|
||||||
your.server.name, your.server.name:8448 {
|
your.server.name, your.server.name:8448 {
|
||||||
reverse_proxy /_matrix/* 127.0.0.1:6167
|
reverse_proxy /_matrix/* 127.0.0.1:6167
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
That's it! Just start or enable the service and you're set.
|
That's it! Just start or enable the service and you're set.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ sudo systemctl enable caddy
|
$ sudo systemctl enable caddy
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
### Nginx
|
### Nginx
|
||||||
|
|
||||||
If you use Nginx and not Apache, add the following server section inside the http section of `/etc/nginx/nginx.conf`
|
If you use Nginx and not Apache, add the following server section inside the http section of `/etc/nginx/nginx.conf`
|
||||||
|
@ -214,10 +274,15 @@ server {
|
||||||
server_name your.server.name; # EDIT THIS
|
server_name your.server.name; # EDIT THIS
|
||||||
merge_slashes off;
|
merge_slashes off;
|
||||||
|
|
||||||
|
# Nginx defaults to only allow 1MB uploads
|
||||||
|
# Increase this to allow posting large files such as videos
|
||||||
|
client_max_body_size 20M;
|
||||||
|
|
||||||
location /_matrix/ {
|
location /_matrix/ {
|
||||||
proxy_pass http://127.0.0.1:6167$request_uri;
|
proxy_pass http://127.0.0.1:6167;
|
||||||
proxy_set_header Host $http_host;
|
proxy_set_header Host $http_host;
|
||||||
proxy_buffering off;
|
proxy_buffering off;
|
||||||
|
proxy_read_timeout 5m;
|
||||||
}
|
}
|
||||||
|
|
||||||
ssl_certificate /etc/letsencrypt/live/your.server.name/fullchain.pem; # EDIT THIS
|
ssl_certificate /etc/letsencrypt/live/your.server.name/fullchain.pem; # EDIT THIS
|
||||||
|
@ -237,11 +302,19 @@ $ sudo systemctl reload nginx
|
||||||
|
|
||||||
If you chose Caddy as your web proxy SSL certificates are handled automatically and you can skip this step.
|
If you chose Caddy as your web proxy SSL certificates are handled automatically and you can skip this step.
|
||||||
|
|
||||||
The easiest way to get an SSL certificate, if you don't have one already, is to install `certbot` and run this:
|
The easiest way to get an SSL certificate, if you don't have one already, is to [install](https://certbot.eff.org/instructions) `certbot` and run this:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
# To use ECC for the private key,
|
||||||
|
# paste into /etc/letsencrypt/cli.ini:
|
||||||
|
# key-type = ecdsa
|
||||||
|
# elliptic-curve = secp384r1
|
||||||
|
|
||||||
$ sudo certbot -d your.server.name
|
$ sudo certbot -d your.server.name
|
||||||
```
|
```
|
||||||
|
[Automated renewal](https://eff-certbot.readthedocs.io/en/stable/using.html#automated-renewals) is usually preconfigured.
|
||||||
|
|
||||||
|
If using Cloudflare, configure instead the edge and origin certificates in dashboard. In case you’re already running a website on the same Apache server, you can just copy-and-paste the SSL configuration from your main virtual host on port 443 into the above-mentioned vhost.
|
||||||
|
|
||||||
## You're done!
|
## You're done!
|
||||||
|
|
||||||
|
@ -265,6 +338,8 @@ You can also use these commands as a quick health check.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ curl https://your.server.name/_matrix/client/versions
|
$ curl https://your.server.name/_matrix/client/versions
|
||||||
|
|
||||||
|
# If using port 8448
|
||||||
$ curl https://your.server.name:8448/_matrix/client/versions
|
$ curl https://your.server.name:8448/_matrix/client/versions
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
83
Dockerfile
83
Dockerfile
|
@ -1,83 +0,0 @@
|
||||||
# syntax=docker/dockerfile:1
|
|
||||||
FROM docker.io/rust:1.58-bullseye AS builder
|
|
||||||
WORKDIR /usr/src/conduit
|
|
||||||
|
|
||||||
# Install required packages to build Conduit and it's dependencies
|
|
||||||
RUN apt-get update && \
|
|
||||||
apt-get -y --no-install-recommends install libclang-dev=1:11.0-51+nmu5
|
|
||||||
|
|
||||||
# == Build dependencies without our own code separately for caching ==
|
|
||||||
#
|
|
||||||
# Need a fake main.rs since Cargo refuses to build anything otherwise.
|
|
||||||
#
|
|
||||||
# See https://github.com/rust-lang/cargo/issues/2644 for a Cargo feature
|
|
||||||
# request that would allow just dependencies to be compiled, presumably
|
|
||||||
# regardless of whether source files are available.
|
|
||||||
RUN mkdir src && touch src/lib.rs && echo 'fn main() {}' > src/main.rs
|
|
||||||
COPY Cargo.toml Cargo.lock ./
|
|
||||||
RUN cargo build --release && rm -r src
|
|
||||||
|
|
||||||
# Copy over actual Conduit sources
|
|
||||||
COPY src src
|
|
||||||
|
|
||||||
# main.rs and lib.rs need their timestamp updated for this to work correctly since
|
|
||||||
# otherwise the build with the fake main.rs from above is newer than the
|
|
||||||
# source files (COPY preserves timestamps).
|
|
||||||
#
|
|
||||||
# Builds conduit and places the binary at /usr/src/conduit/target/release/conduit
|
|
||||||
RUN touch src/main.rs && touch src/lib.rs && cargo build --release
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------------------------------------------
|
|
||||||
# Stuff below this line actually ends up in the resulting docker image
|
|
||||||
# ---------------------------------------------------------------------------------------------------------------
|
|
||||||
FROM docker.io/debian:bullseye-slim AS runner
|
|
||||||
|
|
||||||
# Standard port on which Conduit launches.
|
|
||||||
# You still need to map the port when using the docker command or docker-compose.
|
|
||||||
EXPOSE 6167
|
|
||||||
|
|
||||||
ENV CONDUIT_PORT=6167 \
|
|
||||||
CONDUIT_ADDRESS="0.0.0.0" \
|
|
||||||
CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit \
|
|
||||||
CONDUIT_CONFIG=''
|
|
||||||
# └─> Set no config file to do all configuration with env vars
|
|
||||||
|
|
||||||
# Conduit needs:
|
|
||||||
# ca-certificates: for https
|
|
||||||
# iproute2 & wget: for the healthcheck script
|
|
||||||
RUN apt-get update && apt-get -y --no-install-recommends install \
|
|
||||||
ca-certificates \
|
|
||||||
iproute2 \
|
|
||||||
wget \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# Created directory for the database and media files
|
|
||||||
RUN mkdir -p /srv/conduit/.local/share/conduit
|
|
||||||
|
|
||||||
# Test if Conduit is still alive, uses the same endpoint as Element
|
|
||||||
COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh
|
|
||||||
HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh
|
|
||||||
|
|
||||||
# Copy over the actual Conduit binary from the builder stage
|
|
||||||
COPY --from=builder /usr/src/conduit/target/release/conduit /srv/conduit/conduit
|
|
||||||
|
|
||||||
# Improve security: Don't run stuff as root, that does not need to run as root
|
|
||||||
# Most distros also use 1000:1000 for the first real user, so this should resolve volume mounting problems.
|
|
||||||
ARG USER_ID=1000
|
|
||||||
ARG GROUP_ID=1000
|
|
||||||
RUN set -x ; \
|
|
||||||
groupadd -r -g ${GROUP_ID} conduit ; \
|
|
||||||
useradd -l -r -M -d /srv/conduit -o -u ${USER_ID} -g conduit conduit && exit 0 ; exit 1
|
|
||||||
|
|
||||||
# Change ownership of Conduit files to conduit user and group and make the healthcheck executable:
|
|
||||||
RUN chown -cR conduit:conduit /srv/conduit && \
|
|
||||||
chmod +x /srv/conduit/healthcheck.sh
|
|
||||||
|
|
||||||
# Change user to conduit, no root permissions afterwards:
|
|
||||||
USER conduit
|
|
||||||
# Set container home directory
|
|
||||||
WORKDIR /srv/conduit
|
|
||||||
|
|
||||||
# Run Conduit and print backtraces on panics
|
|
||||||
ENV RUST_BACKTRACE=1
|
|
||||||
ENTRYPOINT [ "/srv/conduit/conduit" ]
|
|
30
README.md
30
README.md
|
@ -1,7 +1,12 @@
|
||||||
# Conduit
|
# Conduit
|
||||||
|
|
||||||
### A Matrix homeserver written in Rust
|
### A Matrix homeserver written in Rust
|
||||||
|
|
||||||
|
#### What is Matrix?
|
||||||
|
[Matrix](https://matrix.org) is an open network for secure and decentralized
|
||||||
|
communication. Users from every Matrix homeserver can chat with users from all
|
||||||
|
other Matrix servers. You can even use bridges (also called Matrix appservices)
|
||||||
|
to communicate with users outside of Matrix, like a community on Discord.
|
||||||
|
|
||||||
#### What is the goal?
|
#### What is the goal?
|
||||||
|
|
||||||
An efficient Matrix homeserver that's easy to set up and just works. You can install
|
An efficient Matrix homeserver that's easy to set up and just works. You can install
|
||||||
|
@ -11,11 +16,9 @@ friends or company.
|
||||||
#### Can I try it out?
|
#### Can I try it out?
|
||||||
|
|
||||||
Yes! You can test our Conduit instance by opening a Matrix client (<https://app.element.io> or Element Android for
|
Yes! You can test our Conduit instance by opening a Matrix client (<https://app.element.io> or Element Android for
|
||||||
example) and registering on the `conduit.rs` homeserver.
|
example) and registering on the `conduit.rs` homeserver. The registration token is "for_testing_only". Don't share personal information.
|
||||||
|
|
||||||
It is hosted on a ODROID HC 2 with 2GB RAM and a SAMSUNG Exynos 5422 CPU, which
|
Server hosting for conduit.rs is donated by the Matrix.org Foundation.
|
||||||
was used in the Samsung Galaxy S5. It joined many big rooms including Matrix
|
|
||||||
HQ.
|
|
||||||
|
|
||||||
#### What is the current status?
|
#### What is the current status?
|
||||||
|
|
||||||
|
@ -25,15 +28,16 @@ from time to time.
|
||||||
|
|
||||||
There are still a few important features missing:
|
There are still a few important features missing:
|
||||||
|
|
||||||
- E2EE verification over federation
|
- E2EE emoji comparison over federation (E2EE chat works)
|
||||||
- Outgoing read receipts, typing, presence over federation
|
- Outgoing read receipts, typing, presence over federation (incoming works)
|
||||||
|
|
||||||
Check out the [Conduit 1.0 Release Milestone](https://gitlab.com/famedly/conduit/-/milestones/3).
|
Check out the [Conduit 1.0 Release Milestone](https://gitlab.com/famedly/conduit/-/milestones/3).
|
||||||
|
|
||||||
#### How can I deploy my own?
|
#### How can I deploy my own?
|
||||||
|
|
||||||
- Simple install (this was tested the most): [DEPLOY.md](DEPLOY.md)
|
- Simple install (this was tested the most): [DEPLOY.md](DEPLOY.md)
|
||||||
- Debian package: [debian/README.Debian](debian/README.Debian)
|
- Debian package: [debian/README.md](debian/README.md)
|
||||||
|
- Nix/NixOS: [nix/README.md](nix/README.md)
|
||||||
- Docker: [docker/README.md](docker/README.md)
|
- Docker: [docker/README.md](docker/README.md)
|
||||||
|
|
||||||
If you want to connect an Appservice to Conduit, take a look at [APPSERVICES.md](APPSERVICES.md).
|
If you want to connect an Appservice to Conduit, take a look at [APPSERVICES.md](APPSERVICES.md).
|
||||||
|
@ -49,13 +53,21 @@ If you want to connect an Appservice to Conduit, take a look at [APPSERVICES.md]
|
||||||
|
|
||||||
#### Thanks to
|
#### Thanks to
|
||||||
|
|
||||||
Thanks to Famedly, Prototype Fund (DLR and German BMBF) and all other individuals for financially supporting this project.
|
Thanks to FUTO, Famedly, Prototype Fund (DLR and German BMBF) and all individuals for financially supporting this project.
|
||||||
|
|
||||||
Thanks to the contributors to Conduit and all libraries we use, for example:
|
Thanks to the contributors to Conduit and all libraries we use, for example:
|
||||||
|
|
||||||
- Ruma: A clean library for the Matrix Spec in Rust
|
- Ruma: A clean library for the Matrix Spec in Rust
|
||||||
- axum: A modular web framework
|
- axum: A modular web framework
|
||||||
|
|
||||||
|
#### Contact
|
||||||
|
|
||||||
|
If you run into any question, feel free to
|
||||||
|
- Ask us in `#conduit:fachschaften.org` on Matrix
|
||||||
|
- Write an E-Mail to `conduit@koesters.xyz`
|
||||||
|
- Send an direct message to `timokoesters@fachschaften.org` on Matrix
|
||||||
|
- [Open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new)
|
||||||
|
|
||||||
#### Donate
|
#### Donate
|
||||||
|
|
||||||
Liberapay: <https://liberapay.com/timokoesters/>\
|
Liberapay: <https://liberapay.com/timokoesters/>\
|
||||||
|
|
37
bin/complement
Executable file
37
bin/complement
Executable file
|
@ -0,0 +1,37 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
# Path to Complement's source code
|
||||||
|
COMPLEMENT_SRC="$1"
|
||||||
|
|
||||||
|
# A `.jsonl` file to write test logs to
|
||||||
|
LOG_FILE="$2"
|
||||||
|
|
||||||
|
# A `.jsonl` file to write test results to
|
||||||
|
RESULTS_FILE="$3"
|
||||||
|
|
||||||
|
OCI_IMAGE="complement-conduit:dev"
|
||||||
|
|
||||||
|
env \
|
||||||
|
-C "$(git rev-parse --show-toplevel)" \
|
||||||
|
docker build \
|
||||||
|
--tag "$OCI_IMAGE" \
|
||||||
|
--file complement/Dockerfile \
|
||||||
|
.
|
||||||
|
|
||||||
|
# It's okay (likely, even) that `go test` exits nonzero
|
||||||
|
set +o pipefail
|
||||||
|
env \
|
||||||
|
-C "$COMPLEMENT_SRC" \
|
||||||
|
COMPLEMENT_BASE_IMAGE="$OCI_IMAGE" \
|
||||||
|
go test -json ./tests | tee "$LOG_FILE"
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
# Post-process the results into an easy-to-compare format
|
||||||
|
cat "$LOG_FILE" | jq -c '
|
||||||
|
select(
|
||||||
|
(.Action == "pass" or .Action == "fail" or .Action == "skip")
|
||||||
|
and .Test != null
|
||||||
|
) | {Action: .Action, Test: .Test}
|
||||||
|
' | sort > "$RESULTS_FILE"
|
31
bin/nix-build-and-cache
Executable file
31
bin/nix-build-and-cache
Executable file
|
@ -0,0 +1,31 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
# The first argument must be the desired installable
|
||||||
|
INSTALLABLE="$1"
|
||||||
|
|
||||||
|
# Build the installable and forward any other arguments too
|
||||||
|
nix build "$@"
|
||||||
|
|
||||||
|
if [ ! -z ${ATTIC_TOKEN+x} ]; then
|
||||||
|
|
||||||
|
nix run --inputs-from . attic -- login \
|
||||||
|
conduit \
|
||||||
|
https://nix.computer.surgery/conduit \
|
||||||
|
"$ATTIC_TOKEN"
|
||||||
|
|
||||||
|
push_args=(
|
||||||
|
# Attic and its build dependencies
|
||||||
|
"$(nix path-info --inputs-from . attic)"
|
||||||
|
"$(nix path-info --inputs-from . attic --derivation)"
|
||||||
|
|
||||||
|
# The target installable and its build dependencies
|
||||||
|
"$(nix path-info "$INSTALLABLE" --derivation)"
|
||||||
|
"$(nix path-info "$INSTALLABLE")"
|
||||||
|
)
|
||||||
|
|
||||||
|
nix run --inputs-from . attic -- push conduit "${push_args[@]}"
|
||||||
|
else
|
||||||
|
echo "\$ATTIC_TOKEN is unset, skipping uploading to the binary cache"
|
||||||
|
fi
|
45
complement/Dockerfile
Normal file
45
complement/Dockerfile
Normal file
|
@ -0,0 +1,45 @@
|
||||||
|
FROM rust:1.75.0
|
||||||
|
|
||||||
|
WORKDIR /workdir
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
|
libclang-dev
|
||||||
|
|
||||||
|
COPY Cargo.toml Cargo.toml
|
||||||
|
COPY Cargo.lock Cargo.lock
|
||||||
|
COPY src src
|
||||||
|
RUN cargo build --release \
|
||||||
|
&& mv target/release/conduit conduit \
|
||||||
|
&& rm -rf target
|
||||||
|
|
||||||
|
# Install caddy
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
debian-keyring \
|
||||||
|
debian-archive-keyring \
|
||||||
|
apt-transport-https \
|
||||||
|
curl \
|
||||||
|
&& curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/gpg.key' \
|
||||||
|
| gpg --dearmor -o /usr/share/keyrings/caddy-testing-archive-keyring.gpg \
|
||||||
|
&& curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/debian.deb.txt' \
|
||||||
|
| tee /etc/apt/sources.list.d/caddy-testing.list \
|
||||||
|
&& apt-get update \
|
||||||
|
&& apt-get install -y caddy
|
||||||
|
|
||||||
|
COPY conduit-example.toml conduit.toml
|
||||||
|
COPY complement/caddy.json caddy.json
|
||||||
|
|
||||||
|
ENV SERVER_NAME=localhost
|
||||||
|
ENV CONDUIT_CONFIG=/workdir/conduit.toml
|
||||||
|
|
||||||
|
RUN sed -i "s/port = 6167/port = 8008/g" conduit.toml
|
||||||
|
RUN echo "log = \"warn,_=off,sled=off\"" >> conduit.toml
|
||||||
|
RUN sed -i "s/address = \"127.0.0.1\"/address = \"0.0.0.0\"/g" conduit.toml
|
||||||
|
|
||||||
|
EXPOSE 8008 8448
|
||||||
|
|
||||||
|
CMD uname -a && \
|
||||||
|
sed -i "s/#server_name = \"your.server.name\"/server_name = \"${SERVER_NAME}\"/g" conduit.toml && \
|
||||||
|
sed -i "s/your.server.name/${SERVER_NAME}/g" caddy.json && \
|
||||||
|
caddy start --config caddy.json > /dev/null && \
|
||||||
|
/workdir/conduit
|
11
complement/README.md
Normal file
11
complement/README.md
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
# Complement
|
||||||
|
|
||||||
|
## What's that?
|
||||||
|
|
||||||
|
Have a look at [its repository](https://github.com/matrix-org/complement).
|
||||||
|
|
||||||
|
## How do I use it with Conduit?
|
||||||
|
|
||||||
|
The script at [`../bin/complement`](../bin/complement) has automation for this.
|
||||||
|
It takes a few command line arguments, you can read the script to find out what
|
||||||
|
those are.
|
72
complement/caddy.json
Normal file
72
complement/caddy.json
Normal file
|
@ -0,0 +1,72 @@
|
||||||
|
{
|
||||||
|
"logging": {
|
||||||
|
"logs": {
|
||||||
|
"default": {
|
||||||
|
"level": "WARN"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"apps": {
|
||||||
|
"http": {
|
||||||
|
"https_port": 8448,
|
||||||
|
"servers": {
|
||||||
|
"srv0": {
|
||||||
|
"listen": [":8448"],
|
||||||
|
"routes": [{
|
||||||
|
"match": [{
|
||||||
|
"host": ["your.server.name"]
|
||||||
|
}],
|
||||||
|
"handle": [{
|
||||||
|
"handler": "subroute",
|
||||||
|
"routes": [{
|
||||||
|
"handle": [{
|
||||||
|
"handler": "reverse_proxy",
|
||||||
|
"upstreams": [{
|
||||||
|
"dial": "127.0.0.1:8008"
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}],
|
||||||
|
"terminal": true
|
||||||
|
}],
|
||||||
|
"tls_connection_policies": [{
|
||||||
|
"match": {
|
||||||
|
"sni": ["your.server.name"]
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"pki": {
|
||||||
|
"certificate_authorities": {
|
||||||
|
"local": {
|
||||||
|
"name": "Complement CA",
|
||||||
|
"root": {
|
||||||
|
"certificate": "/complement/ca/ca.crt",
|
||||||
|
"private_key": "/complement/ca/ca.key"
|
||||||
|
},
|
||||||
|
"intermediate": {
|
||||||
|
"certificate": "/complement/ca/ca.crt",
|
||||||
|
"private_key": "/complement/ca/ca.key"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"tls": {
|
||||||
|
"automation": {
|
||||||
|
"policies": [{
|
||||||
|
"subjects": ["your.server.name"],
|
||||||
|
"issuers": [{
|
||||||
|
"module": "internal"
|
||||||
|
}],
|
||||||
|
"on_demand": true
|
||||||
|
}, {
|
||||||
|
"issuers": [{
|
||||||
|
"module": "internal",
|
||||||
|
"ca": "local"
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -39,14 +39,19 @@ max_request_size = 20_000_000 # in bytes
|
||||||
allow_registration = true
|
allow_registration = true
|
||||||
|
|
||||||
allow_federation = true
|
allow_federation = true
|
||||||
|
allow_check_for_updates = true
|
||||||
|
|
||||||
# Enable the display name lightning bolt on registration.
|
# Enable the display name lightning bolt on registration.
|
||||||
enable_lightning_bolt = true
|
enable_lightning_bolt = true
|
||||||
|
|
||||||
|
# Servers listed here will be used to gather public keys of other servers.
|
||||||
|
# Generally, copying this exactly should be enough. (Currently, Conduit doesn't
|
||||||
|
# support batched key requests, so this list should only contain Synapse
|
||||||
|
# servers.)
|
||||||
trusted_servers = ["matrix.org"]
|
trusted_servers = ["matrix.org"]
|
||||||
|
|
||||||
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
||||||
#log = "info,state_res=warn,rocket=off,_=off,sled=off"
|
#log = "warn,state_res=warn,rocket=off,_=off,sled=off"
|
||||||
|
|
||||||
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
|
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
|
||||||
#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it.
|
#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it.
|
||||||
|
|
18
debian/README.Debian → debian/README.md
vendored
18
debian/README.Debian → debian/README.md
vendored
|
@ -1,28 +1,36 @@
|
||||||
Conduit for Debian
|
Conduit for Debian
|
||||||
==================
|
==================
|
||||||
|
|
||||||
|
Installation
|
||||||
|
------------
|
||||||
|
|
||||||
|
Information about downloading, building and deploying the Debian package, see
|
||||||
|
the "Installing Conduit" section in [DEPLOY.md](../DEPLOY.md).
|
||||||
|
All following sections until "Setting up the Reverse Proxy" be ignored because
|
||||||
|
this is handled automatically by the packaging.
|
||||||
|
|
||||||
Configuration
|
Configuration
|
||||||
-------------
|
-------------
|
||||||
|
|
||||||
When installed, Debconf generates the configuration of the homeserver
|
When installed, Debconf generates the configuration of the homeserver
|
||||||
(host)name, the address and port it listens on. This configuration ends up in
|
(host)name, the address and port it listens on. This configuration ends up in
|
||||||
/etc/matrix-conduit/conduit.toml.
|
`/etc/matrix-conduit/conduit.toml`.
|
||||||
|
|
||||||
You can tweak more detailed settings by uncommenting and setting the variables
|
You can tweak more detailed settings by uncommenting and setting the variables
|
||||||
in /etc/matrix-conduit/conduit.toml. This involves settings such as the maximum
|
in `/etc/matrix-conduit/conduit.toml`. This involves settings such as the maximum
|
||||||
file size for download/upload, enabling federation, etc.
|
file size for download/upload, enabling federation, etc.
|
||||||
|
|
||||||
Running
|
Running
|
||||||
-------
|
-------
|
||||||
|
|
||||||
The package uses the matrix-conduit.service systemd unit file to start and
|
The package uses the `matrix-conduit.service` systemd unit file to start and
|
||||||
stop Conduit. It loads the configuration file mentioned above to set up the
|
stop Conduit. It loads the configuration file mentioned above to set up the
|
||||||
environment before running the server.
|
environment before running the server.
|
||||||
|
|
||||||
This package assumes by default that Conduit will be placed behind a reverse
|
This package assumes by default that Conduit will be placed behind a reverse
|
||||||
proxy such as Apache or nginx. This default deployment entails just listening
|
proxy such as Apache or nginx. This default deployment entails just listening
|
||||||
on 127.0.0.1 and the free port 6167 and is reachable via a client using the URL
|
on `127.0.0.1` and the free port `6167` and is reachable via a client using the URL
|
||||||
http://localhost:6167.
|
<http://localhost:6167>.
|
||||||
|
|
||||||
At a later stage this packaging may support also setting up TLS and running
|
At a later stage this packaging may support also setting up TLS and running
|
||||||
stand-alone. In this case, however, you need to set up some certificates and
|
stand-alone. In this case, however, you need to set up some certificates and
|
9
debian/postinst
vendored
9
debian/postinst
vendored
|
@ -19,11 +19,11 @@ case "$1" in
|
||||||
_matrix-conduit
|
_matrix-conduit
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Create the database path if it does not exist yet.
|
# Create the database path if it does not exist yet and fix up ownership
|
||||||
if [ ! -d "$CONDUIT_DATABASE_PATH" ]; then
|
# and permissions.
|
||||||
mkdir -p "$CONDUIT_DATABASE_PATH"
|
mkdir -p "$CONDUIT_DATABASE_PATH"
|
||||||
chown _matrix-conduit "$CONDUIT_DATABASE_PATH"
|
chown _matrix-conduit "$CONDUIT_DATABASE_PATH"
|
||||||
fi
|
chmod 700 "$CONDUIT_DATABASE_PATH"
|
||||||
|
|
||||||
if [ ! -e "$CONDUIT_CONFIG_FILE" ]; then
|
if [ ! -e "$CONDUIT_CONFIG_FILE" ]; then
|
||||||
# Write the debconf values in the config.
|
# Write the debconf values in the config.
|
||||||
|
@ -73,11 +73,12 @@ max_request_size = 20_000_000 # in bytes
|
||||||
allow_registration = true
|
allow_registration = true
|
||||||
|
|
||||||
allow_federation = true
|
allow_federation = true
|
||||||
|
allow_check_for_updates = true
|
||||||
|
|
||||||
trusted_servers = ["matrix.org"]
|
trusted_servers = ["matrix.org"]
|
||||||
|
|
||||||
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
||||||
#log = "info,state_res=warn,rocket=off,_=off,sled=off"
|
#log = "warn,state_res=warn,rocket=off,_=off,sled=off"
|
||||||
EOF
|
EOF
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
|
|
10
default.nix
Normal file
10
default.nix
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
(import
|
||||||
|
(
|
||||||
|
let lock = builtins.fromJSON (builtins.readFile ./flake.lock); in
|
||||||
|
fetchTarball {
|
||||||
|
url = lock.nodes.flake-compat.locked.url or "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz";
|
||||||
|
sha256 = lock.nodes.flake-compat.locked.narHash;
|
||||||
|
}
|
||||||
|
)
|
||||||
|
{ src = ./.; }
|
||||||
|
).defaultNix
|
116
docker/README.md
116
docker/README.md
|
@ -4,7 +4,36 @@
|
||||||
|
|
||||||
## Docker
|
## Docker
|
||||||
|
|
||||||
### Build & Dockerfile
|
To run Conduit with Docker you can either build the image yourself or pull it from a registry.
|
||||||
|
|
||||||
|
|
||||||
|
### Use a registry
|
||||||
|
|
||||||
|
OCI images for Conduit are available in the registries listed below. We recommend using the image tagged as `latest` from GitLab's own registry.
|
||||||
|
|
||||||
|
| Registry | Image | Size | Notes |
|
||||||
|
| --------------- | --------------------------------------------------------------- | ----------------------------- | ---------------------- |
|
||||||
|
| GitLab Registry | [registry.gitlab.com/famedly/conduit/matrix-conduit:latest][gl] | ![Image Size][shield-latest] | Stable image. |
|
||||||
|
| Docker Hub | [docker.io/matrixconduit/matrix-conduit:latest][dh] | ![Image Size][shield-latest] | Stable image. |
|
||||||
|
| GitLab Registry | [registry.gitlab.com/famedly/conduit/matrix-conduit:next][gl] | ![Image Size][shield-next] | Development version. |
|
||||||
|
| Docker Hub | [docker.io/matrixconduit/matrix-conduit:next][dh] | ![Image Size][shield-next] | Development version. |
|
||||||
|
|
||||||
|
|
||||||
|
[dh]: https://hub.docker.com/r/matrixconduit/matrix-conduit
|
||||||
|
[gl]: https://gitlab.com/famedly/conduit/container_registry/2497937
|
||||||
|
[shield-latest]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest
|
||||||
|
[shield-next]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/next
|
||||||
|
|
||||||
|
|
||||||
|
Use
|
||||||
|
```bash
|
||||||
|
docker image pull <link>
|
||||||
|
```
|
||||||
|
to pull it to your machine.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### Build using a dockerfile
|
||||||
|
|
||||||
The Dockerfile provided by Conduit has two stages, each of which creates an image.
|
The Dockerfile provided by Conduit has two stages, each of which creates an image.
|
||||||
|
|
||||||
|
@ -19,9 +48,11 @@ docker build --tag matrixconduit/matrix-conduit:latest .
|
||||||
|
|
||||||
which also will tag the resulting image as `matrixconduit/matrix-conduit:latest`.
|
which also will tag the resulting image as `matrixconduit/matrix-conduit:latest`.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### Run
|
### Run
|
||||||
|
|
||||||
After building the image you can simply run it with
|
When you have the image you can simply run it with
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker run -d -p 8448:6167 \
|
docker run -d -p 8448:6167 \
|
||||||
|
@ -33,20 +64,11 @@ docker run -d -p 8448:6167 \
|
||||||
-e CONDUIT_MAX_REQUEST_SIZE="20_000_000" \
|
-e CONDUIT_MAX_REQUEST_SIZE="20_000_000" \
|
||||||
-e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \
|
-e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \
|
||||||
-e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \
|
-e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \
|
||||||
-e CONDUIT_LOG="info,rocket=off,_=off,sled=off" \
|
-e CONDUIT_LOG="warn,rocket=off,_=off,sled=off" \
|
||||||
--name conduit matrixconduit/matrix-conduit:latest
|
--name conduit <link>
|
||||||
```
|
```
|
||||||
|
|
||||||
or you can skip the build step and pull the image from one of the following registries:
|
or you can use [docker-compose](#docker-compose).
|
||||||
|
|
||||||
| Registry | Image | Size |
|
|
||||||
| --------------- | --------------------------------------------------------------- | --------------------- |
|
|
||||||
| Docker Hub | [matrixconduit/matrix-conduit:latest][dh] | ![Image Size][shield] |
|
|
||||||
| GitLab Registry | [registry.gitlab.com/famedly/conduit/matrix-conduit:latest][gl] | ![Image Size][shield] |
|
|
||||||
|
|
||||||
[dh]: https://hub.docker.com/r/matrixconduit/matrix-conduit
|
|
||||||
[gl]: https://gitlab.com/famedly/conduit/container_registry/2497937
|
|
||||||
[shield]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest
|
|
||||||
|
|
||||||
The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../conduit-example.toml).
|
The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../conduit-example.toml).
|
||||||
You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need
|
You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need
|
||||||
|
@ -54,7 +76,7 @@ to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible
|
||||||
|
|
||||||
If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it.
|
If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it.
|
||||||
|
|
||||||
## Docker-compose
|
### Docker-compose
|
||||||
|
|
||||||
If the `docker run` command is not for you or your setup, you can also use one of the provided `docker-compose` files.
|
If the `docker run` command is not for you or your setup, you can also use one of the provided `docker-compose` files.
|
||||||
|
|
||||||
|
@ -95,7 +117,7 @@ As a container user, you probably know about Traefik. It is a easy to use revers
|
||||||
containerized app and services available through the web. With the two provided files,
|
containerized app and services available through the web. With the two provided files,
|
||||||
[`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or
|
[`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or
|
||||||
[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and
|
[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and
|
||||||
[`docker-compose.override.yml`](docker-compose.override.traefik.yml), it is equally easy to deploy
|
[`docker-compose.override.yml`](docker-compose.override.yml), it is equally easy to deploy
|
||||||
and use Conduit, with a little caveat. If you already took a look at the files, then you should have
|
and use Conduit, with a little caveat. If you already took a look at the files, then you should have
|
||||||
seen the `well-known` service, and that is the little caveat. Traefik is simply a proxy and
|
seen the `well-known` service, and that is the little caveat. Traefik is simply a proxy and
|
||||||
loadbalancer and is not able to serve any kind of content, but for Conduit to federate, we need to
|
loadbalancer and is not able to serve any kind of content, but for Conduit to federate, we need to
|
||||||
|
@ -106,7 +128,8 @@ With the service `well-known` we use a single `nginx` container that will serve
|
||||||
|
|
||||||
So...step by step:
|
So...step by step:
|
||||||
|
|
||||||
1. Copy [`docker-compose.traefik.yml`](docker-compose.traefik.yml) and [`docker-compose.override.traefik.yml`](docker-compose.override.traefik.yml) from the repository and remove `.traefik` from the filenames.
|
1. Copy [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or
|
||||||
|
[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and [`docker-compose.override.yml`](docker-compose.override.yml) from the repository and remove `.for-traefik` (or `.with-traefik`) from the filename.
|
||||||
2. Open both files and modify/adjust them to your needs. Meaning, change the `CONDUIT_SERVER_NAME` and the volume host mappings according to your needs.
|
2. Open both files and modify/adjust them to your needs. Meaning, change the `CONDUIT_SERVER_NAME` and the volume host mappings according to your needs.
|
||||||
3. Create the `conduit.toml` config file, an example can be found [here](../conduit-example.toml), or set `CONDUIT_CONFIG=""` and configure Conduit per env vars.
|
3. Create the `conduit.toml` config file, an example can be found [here](../conduit-example.toml), or set `CONDUIT_CONFIG=""` and configure Conduit per env vars.
|
||||||
4. Uncomment the `element-web` service if you want to host your own Element Web Client and create a `element_config.json`.
|
4. Uncomment the `element-web` service if you want to host your own Element Web Client and create a `element_config.json`.
|
||||||
|
@ -121,12 +144,12 @@ So...step by step:
|
||||||
|
|
||||||
location /.well-known/matrix/server {
|
location /.well-known/matrix/server {
|
||||||
return 200 '{"m.server": "<SUBDOMAIN>.<DOMAIN>:443"}';
|
return 200 '{"m.server": "<SUBDOMAIN>.<DOMAIN>:443"}';
|
||||||
add_header Content-Type application/json;
|
types { } default_type "application/json; charset=utf-8";
|
||||||
}
|
}
|
||||||
|
|
||||||
location /.well-known/matrix/client {
|
location /.well-known/matrix/client {
|
||||||
return 200 '{"m.homeserver": {"base_url": "https://<SUBDOMAIN>.<DOMAIN>"}}';
|
return 200 '{"m.homeserver": {"base_url": "https://<SUBDOMAIN>.<DOMAIN>"}}';
|
||||||
add_header Content-Type application/json;
|
types { } default_type "application/json; charset=utf-8";
|
||||||
add_header "Access-Control-Allow-Origin" *;
|
add_header "Access-Control-Allow-Origin" *;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -138,3 +161,58 @@ So...step by step:
|
||||||
|
|
||||||
6. Run `docker-compose up -d`
|
6. Run `docker-compose up -d`
|
||||||
7. Connect to your homeserver with your preferred client and create a user. You should do this immediately after starting Conduit, because the first created user is the admin.
|
7. Connect to your homeserver with your preferred client and create a user. You should do this immediately after starting Conduit, because the first created user is the admin.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Voice communication
|
||||||
|
|
||||||
|
In order to make or receive calls, a TURN server is required. Conduit suggests using [Coturn](https://github.com/coturn/coturn) for this purpose, which is also available as a Docker image. Before proceeding with the software installation, it is essential to have the necessary configurations in place.
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
Create a configuration file called `coturn.conf` containing:
|
||||||
|
|
||||||
|
```conf
|
||||||
|
use-auth-secret
|
||||||
|
static-auth-secret=<a secret key>
|
||||||
|
realm=<your server domain>
|
||||||
|
```
|
||||||
|
A common way to generate a suitable alphanumeric secret key is by using `pwgen -s 64 1`.
|
||||||
|
|
||||||
|
These same values need to be set in conduit. You can either modify conduit.toml to include these lines:
|
||||||
|
```
|
||||||
|
turn_uris = ["turn:<your server domain>?transport=udp", "turn:<your server domain>?transport=tcp"]
|
||||||
|
turn_secret = "<secret key from coturn configuration>"
|
||||||
|
```
|
||||||
|
or append the following to the docker environment variables dependig on which configuration method you used earlier:
|
||||||
|
```yml
|
||||||
|
CONDUIT_TURN_URIS: '["turn:<your server domain>?transport=udp", "turn:<your server domain>?transport=tcp"]'
|
||||||
|
CONDUIT_TURN_SECRET: "<secret key from coturn configuration>"
|
||||||
|
```
|
||||||
|
Restart Conduit to apply these changes.
|
||||||
|
|
||||||
|
### Run
|
||||||
|
Run the [Coturn](https://hub.docker.com/r/coturn/coturn) image using
|
||||||
|
```bash
|
||||||
|
docker run -d --network=host -v $(pwd)/coturn.conf:/etc/coturn/turnserver.conf coturn/coturn
|
||||||
|
```
|
||||||
|
|
||||||
|
or docker-compose. For the latter, paste the following section into a file called `docker-compose.yml`
|
||||||
|
and run `docker-compose up -d` in the same directory.
|
||||||
|
|
||||||
|
```yml
|
||||||
|
version: 3
|
||||||
|
services:
|
||||||
|
turn:
|
||||||
|
container_name: coturn-server
|
||||||
|
image: docker.io/coturn/coturn
|
||||||
|
restart: unless-stopped
|
||||||
|
network_mode: "host"
|
||||||
|
volumes:
|
||||||
|
- ./coturn.conf:/etc/coturn/turnserver.conf
|
||||||
|
```
|
||||||
|
|
||||||
|
To understand why the host networking mode is used and explore alternative configuration options, please visit the following link: https://github.com/coturn/coturn/blob/master/docker/coturn/README.md.
|
||||||
|
For security recommendations see Synapse's [Coturn documentation](https://github.com/matrix-org/synapse/blob/develop/docs/setup/turn/coturn.md#configuration).
|
||||||
|
|
||||||
|
|
|
@ -29,9 +29,10 @@ services:
|
||||||
CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
|
CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
|
||||||
CONDUIT_ALLOW_REGISTRATION: 'true'
|
CONDUIT_ALLOW_REGISTRATION: 'true'
|
||||||
CONDUIT_ALLOW_FEDERATION: 'true'
|
CONDUIT_ALLOW_FEDERATION: 'true'
|
||||||
|
CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||||
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
||||||
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
|
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
|
||||||
#CONDUIT_LOG: info,rocket=off,_=off,sled=off
|
#CONDUIT_LOG: warn,rocket=off,_=off,sled=off
|
||||||
CONDUIT_ADDRESS: 0.0.0.0
|
CONDUIT_ADDRESS: 0.0.0.0
|
||||||
CONDUIT_CONFIG: '' # Ignore this
|
CONDUIT_CONFIG: '' # Ignore this
|
||||||
|
|
||||||
|
|
|
@ -33,10 +33,11 @@ services:
|
||||||
# CONDUIT_PORT: 6167
|
# CONDUIT_PORT: 6167
|
||||||
# CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string ''
|
# CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string ''
|
||||||
# Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging
|
# Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging
|
||||||
# CONDUIT_LOG: info # default is: "info,_=off,sled=off"
|
# CONDUIT_LOG: info # default is: "warn,_=off,sled=off"
|
||||||
# CONDUIT_ALLOW_JAEGER: 'false'
|
# CONDUIT_ALLOW_JAEGER: 'false'
|
||||||
# CONDUIT_ALLOW_ENCRYPTION: 'false'
|
# CONDUIT_ALLOW_ENCRYPTION: 'true'
|
||||||
# CONDUIT_ALLOW_FEDERATION: 'false'
|
# CONDUIT_ALLOW_FEDERATION: 'true'
|
||||||
|
# CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||||
# CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit
|
# CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit
|
||||||
# CONDUIT_WORKERS: 10
|
# CONDUIT_WORKERS: 10
|
||||||
# CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
|
# CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
|
||||||
|
|
|
@ -29,9 +29,10 @@ services:
|
||||||
CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
|
CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
|
||||||
CONDUIT_ALLOW_REGISTRATION: 'true'
|
CONDUIT_ALLOW_REGISTRATION: 'true'
|
||||||
CONDUIT_ALLOW_FEDERATION: 'true'
|
CONDUIT_ALLOW_FEDERATION: 'true'
|
||||||
|
CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||||
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
||||||
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
|
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
|
||||||
#CONDUIT_LOG: info,rocket=off,_=off,sled=off
|
#CONDUIT_LOG: warn,rocket=off,_=off,sled=off
|
||||||
CONDUIT_ADDRESS: 0.0.0.0
|
CONDUIT_ADDRESS: 0.0.0.0
|
||||||
CONDUIT_CONFIG: '' # Ignore this
|
CONDUIT_CONFIG: '' # Ignore this
|
||||||
#
|
#
|
|
@ -6,9 +6,14 @@ if [ -z "${CONDUIT_PORT}" ]; then
|
||||||
CONDUIT_PORT=$(ss -tlpn | grep conduit | grep -m1 -o ':[0-9]*' | grep -m1 -o '[0-9]*')
|
CONDUIT_PORT=$(ss -tlpn | grep conduit | grep -m1 -o ':[0-9]*' | grep -m1 -o '[0-9]*')
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# If CONDUIT_ADDRESS is not set try to get the address from the process list
|
||||||
|
if [ -z "${CONDUIT_ADDRESS}" ]; then
|
||||||
|
CONDUIT_ADDRESS=$(ss -tlpn | awk -F ' +|:' '/conduit/ { print $4 }')
|
||||||
|
fi
|
||||||
|
|
||||||
# The actual health check.
|
# The actual health check.
|
||||||
# We try to first get a response on HTTP and when that fails on HTTPS and when that fails, we exit with code 1.
|
# We try to first get a response on HTTP and when that fails on HTTPS and when that fails, we exit with code 1.
|
||||||
# TODO: Change this to a single wget call. Do we have a config value that we can check for that?
|
# TODO: Change this to a single wget call. Do we have a config value that we can check for that?
|
||||||
wget --no-verbose --tries=1 --spider "http://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \
|
wget --no-verbose --tries=1 --spider "http://${CONDUIT_ADDRESS}:${CONDUIT_PORT}/_matrix/client/versions" || \
|
||||||
wget --no-verbose --tries=1 --spider "https://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \
|
wget --no-verbose --tries=1 --spider "https://${CONDUIT_ADDRESS}:${CONDUIT_PORT}/_matrix/client/versions" || \
|
||||||
exit 1
|
exit 1
|
||||||
|
|
64
engage.toml
Normal file
64
engage.toml
Normal file
|
@ -0,0 +1,64 @@
|
||||||
|
interpreter = ["bash", "-euo", "pipefail", "-c"]
|
||||||
|
|
||||||
|
[[task]]
|
||||||
|
name = "engage"
|
||||||
|
group = "versions"
|
||||||
|
script = "engage --version"
|
||||||
|
|
||||||
|
[[task]]
|
||||||
|
name = "rustc"
|
||||||
|
group = "versions"
|
||||||
|
script = "rustc --version"
|
||||||
|
|
||||||
|
[[task]]
|
||||||
|
name = "cargo"
|
||||||
|
group = "versions"
|
||||||
|
script = "cargo --version"
|
||||||
|
|
||||||
|
[[task]]
|
||||||
|
name = "cargo-fmt"
|
||||||
|
group = "versions"
|
||||||
|
script = "cargo fmt --version"
|
||||||
|
|
||||||
|
[[task]]
|
||||||
|
name = "rustdoc"
|
||||||
|
group = "versions"
|
||||||
|
script = "rustdoc --version"
|
||||||
|
|
||||||
|
[[task]]
|
||||||
|
name = "cargo-clippy"
|
||||||
|
group = "versions"
|
||||||
|
script = "cargo clippy -- --version"
|
||||||
|
|
||||||
|
[[task]]
|
||||||
|
name = "cargo-fmt"
|
||||||
|
group = "lints"
|
||||||
|
script = "cargo fmt --check -- --color=always"
|
||||||
|
|
||||||
|
[[task]]
|
||||||
|
name = "cargo-doc"
|
||||||
|
group = "lints"
|
||||||
|
script = """
|
||||||
|
RUSTDOCFLAGS="-D warnings" cargo doc \
|
||||||
|
--workspace \
|
||||||
|
--no-deps \
|
||||||
|
--document-private-items \
|
||||||
|
--color always
|
||||||
|
"""
|
||||||
|
|
||||||
|
[[task]]
|
||||||
|
name = "cargo-clippy"
|
||||||
|
group = "lints"
|
||||||
|
script = "cargo clippy --workspace --all-targets --color=always -- -D warnings"
|
||||||
|
|
||||||
|
[[task]]
|
||||||
|
name = "cargo"
|
||||||
|
group = "tests"
|
||||||
|
script = """
|
||||||
|
cargo test \
|
||||||
|
--workspace \
|
||||||
|
--all-targets \
|
||||||
|
--color=always \
|
||||||
|
-- \
|
||||||
|
--color=always
|
||||||
|
"""
|
263
flake.lock
Normal file
263
flake.lock
Normal file
|
@ -0,0 +1,263 @@
|
||||||
|
{
|
||||||
|
"nodes": {
|
||||||
|
"attic": {
|
||||||
|
"inputs": {
|
||||||
|
"crane": "crane",
|
||||||
|
"flake-compat": "flake-compat",
|
||||||
|
"flake-utils": "flake-utils",
|
||||||
|
"nixpkgs": "nixpkgs",
|
||||||
|
"nixpkgs-stable": "nixpkgs-stable"
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1705617092,
|
||||||
|
"narHash": "sha256-n9PK4O4X4S1JkwpkMuYm1wHZYJzRqif8g3RuVIPD+rY=",
|
||||||
|
"owner": "zhaofengli",
|
||||||
|
"repo": "attic",
|
||||||
|
"rev": "fbe252a5c21febbe920c025560cbd63b20e24f3b",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "zhaofengli",
|
||||||
|
"ref": "main",
|
||||||
|
"repo": "attic",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"crane": {
|
||||||
|
"inputs": {
|
||||||
|
"nixpkgs": [
|
||||||
|
"attic",
|
||||||
|
"nixpkgs"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1702918879,
|
||||||
|
"narHash": "sha256-tWJqzajIvYcaRWxn+cLUB9L9Pv4dQ3Bfit/YjU5ze3g=",
|
||||||
|
"owner": "ipetkov",
|
||||||
|
"repo": "crane",
|
||||||
|
"rev": "7195c00c272fdd92fc74e7d5a0a2844b9fadb2fb",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "ipetkov",
|
||||||
|
"repo": "crane",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"crane_2": {
|
||||||
|
"inputs": {
|
||||||
|
"nixpkgs": [
|
||||||
|
"nixpkgs"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1706473964,
|
||||||
|
"narHash": "sha256-Fq6xleee/TsX6NbtoRuI96bBuDHMU57PrcK9z1QEKbk=",
|
||||||
|
"owner": "ipetkov",
|
||||||
|
"repo": "crane",
|
||||||
|
"rev": "c798790eabec3e3da48190ae3698ac227aab770c",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "ipetkov",
|
||||||
|
"ref": "master",
|
||||||
|
"repo": "crane",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"fenix": {
|
||||||
|
"inputs": {
|
||||||
|
"nixpkgs": [
|
||||||
|
"nixpkgs"
|
||||||
|
],
|
||||||
|
"rust-analyzer-src": "rust-analyzer-src"
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1705559032,
|
||||||
|
"narHash": "sha256-Cb+Jd1+Gz4Wi+8elPnUIHnqQmE1qjDRZ+PsJaPaAffY=",
|
||||||
|
"owner": "nix-community",
|
||||||
|
"repo": "fenix",
|
||||||
|
"rev": "e132ea0eb0c799a2109a91688e499d7bf4962801",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "nix-community",
|
||||||
|
"repo": "fenix",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"flake-compat": {
|
||||||
|
"flake": false,
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1673956053,
|
||||||
|
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
|
||||||
|
"owner": "edolstra",
|
||||||
|
"repo": "flake-compat",
|
||||||
|
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "edolstra",
|
||||||
|
"repo": "flake-compat",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"flake-compat_2": {
|
||||||
|
"flake": false,
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1696426674,
|
||||||
|
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
|
||||||
|
"owner": "edolstra",
|
||||||
|
"repo": "flake-compat",
|
||||||
|
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "edolstra",
|
||||||
|
"repo": "flake-compat",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"flake-utils": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1667395993,
|
||||||
|
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "flake-utils",
|
||||||
|
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "flake-utils",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"flake-utils_2": {
|
||||||
|
"inputs": {
|
||||||
|
"systems": "systems"
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1705309234,
|
||||||
|
"narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=",
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "flake-utils",
|
||||||
|
"rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "flake-utils",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nix-filter": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1705332318,
|
||||||
|
"narHash": "sha256-kcw1yFeJe9N4PjQji9ZeX47jg0p9A0DuU4djKvg1a7I=",
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "nix-filter",
|
||||||
|
"rev": "3449dc925982ad46246cfc36469baf66e1b64f17",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "nix-filter",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nixpkgs": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1702539185,
|
||||||
|
"narHash": "sha256-KnIRG5NMdLIpEkZTnN5zovNYc0hhXjAgv6pfd5Z4c7U=",
|
||||||
|
"owner": "NixOS",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"rev": "aa9d4729cbc99dabacb50e3994dcefb3ea0f7447",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "NixOS",
|
||||||
|
"ref": "nixpkgs-unstable",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nixpkgs-stable": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1702780907,
|
||||||
|
"narHash": "sha256-blbrBBXjjZt6OKTcYX1jpe9SRof2P9ZYWPzq22tzXAA=",
|
||||||
|
"owner": "NixOS",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"rev": "1e2e384c5b7c50dbf8e9c441a9e58d85f408b01f",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "NixOS",
|
||||||
|
"ref": "nixos-23.11",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nixpkgs_2": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1705496572,
|
||||||
|
"narHash": "sha256-rPIe9G5EBLXdBdn9ilGc0nq082lzQd0xGGe092R/5QE=",
|
||||||
|
"owner": "NixOS",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"rev": "842d9d80cfd4560648c785f8a4e6f3b096790e19",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "NixOS",
|
||||||
|
"ref": "nixos-unstable",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": {
|
||||||
|
"inputs": {
|
||||||
|
"attic": "attic",
|
||||||
|
"crane": "crane_2",
|
||||||
|
"fenix": "fenix",
|
||||||
|
"flake-compat": "flake-compat_2",
|
||||||
|
"flake-utils": "flake-utils_2",
|
||||||
|
"nix-filter": "nix-filter",
|
||||||
|
"nixpkgs": "nixpkgs_2"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"rust-analyzer-src": {
|
||||||
|
"flake": false,
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1705523001,
|
||||||
|
"narHash": "sha256-TWq5vJ6m+9HGSDMsQAmz1TMegMi79R3TTyKjnPWsQp8=",
|
||||||
|
"owner": "rust-lang",
|
||||||
|
"repo": "rust-analyzer",
|
||||||
|
"rev": "9d9b34354d2f13e33568c9c55b226dd014a146a0",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "rust-lang",
|
||||||
|
"ref": "nightly",
|
||||||
|
"repo": "rust-analyzer",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"systems": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1681028828,
|
||||||
|
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||||
|
"owner": "nix-systems",
|
||||||
|
"repo": "default",
|
||||||
|
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "nix-systems",
|
||||||
|
"repo": "default",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": "root",
|
||||||
|
"version": 7
|
||||||
|
}
|
259
flake.nix
Normal file
259
flake.nix
Normal file
|
@ -0,0 +1,259 @@
|
||||||
|
{
|
||||||
|
inputs = {
|
||||||
|
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable";
|
||||||
|
flake-utils.url = "github:numtide/flake-utils";
|
||||||
|
nix-filter.url = "github:numtide/nix-filter";
|
||||||
|
flake-compat = {
|
||||||
|
url = "github:edolstra/flake-compat";
|
||||||
|
flake = false;
|
||||||
|
};
|
||||||
|
|
||||||
|
fenix = {
|
||||||
|
url = "github:nix-community/fenix";
|
||||||
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
|
};
|
||||||
|
crane = {
|
||||||
|
url = "github:ipetkov/crane?ref=master";
|
||||||
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
|
};
|
||||||
|
attic.url = "github:zhaofengli/attic?ref=main";
|
||||||
|
};
|
||||||
|
|
||||||
|
outputs =
|
||||||
|
{ self
|
||||||
|
, nixpkgs
|
||||||
|
, flake-utils
|
||||||
|
, nix-filter
|
||||||
|
|
||||||
|
, fenix
|
||||||
|
, crane
|
||||||
|
, ...
|
||||||
|
}: flake-utils.lib.eachDefaultSystem (system:
|
||||||
|
let
|
||||||
|
pkgsHost = nixpkgs.legacyPackages.${system};
|
||||||
|
|
||||||
|
# Nix-accessible `Cargo.toml`
|
||||||
|
cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml);
|
||||||
|
|
||||||
|
# The Rust toolchain to use
|
||||||
|
toolchain = fenix.packages.${system}.fromToolchainFile {
|
||||||
|
file = ./rust-toolchain.toml;
|
||||||
|
|
||||||
|
# See also `rust-toolchain.toml`
|
||||||
|
sha256 = "sha256-SXRtAuO4IqNOQq+nLbrsDFbVk+3aVA8NNpSZsKlVH/8=";
|
||||||
|
};
|
||||||
|
|
||||||
|
builder = pkgs:
|
||||||
|
((crane.mkLib pkgs).overrideToolchain toolchain).buildPackage;
|
||||||
|
|
||||||
|
nativeBuildInputs = pkgs: [
|
||||||
|
# bindgen needs the build platform's libclang. Apparently due to
|
||||||
|
# "splicing weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't
|
||||||
|
# quite do the right thing here.
|
||||||
|
pkgs.buildPackages.rustPlatform.bindgenHook
|
||||||
|
];
|
||||||
|
|
||||||
|
env = pkgs: {
|
||||||
|
ROCKSDB_INCLUDE_DIR = "${pkgs.rocksdb}/include";
|
||||||
|
ROCKSDB_LIB_DIR = "${pkgs.rocksdb}/lib";
|
||||||
|
}
|
||||||
|
// pkgs.lib.optionalAttrs pkgs.stdenv.hostPlatform.isStatic {
|
||||||
|
ROCKSDB_STATIC = "";
|
||||||
|
}
|
||||||
|
// {
|
||||||
|
CARGO_BUILD_RUSTFLAGS = let inherit (pkgs) lib stdenv; in
|
||||||
|
lib.concatStringsSep " " ([]
|
||||||
|
++ lib.optionals
|
||||||
|
# This disables PIE for static builds, which isn't great in terms
|
||||||
|
# of security. Unfortunately, my hand is forced because nixpkgs'
|
||||||
|
# `libstdc++.a` is built without `-fPIE`, which precludes us from
|
||||||
|
# leaving PIE enabled.
|
||||||
|
stdenv.hostPlatform.isStatic
|
||||||
|
["-C" "relocation-model=static"]
|
||||||
|
++ lib.optionals
|
||||||
|
(stdenv.buildPlatform.config != stdenv.hostPlatform.config)
|
||||||
|
["-l" "c"]
|
||||||
|
++ lib.optionals
|
||||||
|
# This check has to match the one [here][0]. We only need to set
|
||||||
|
# these flags when using a different linker. Don't ask me why,
|
||||||
|
# though, because I don't know. All I know is it breaks otherwise.
|
||||||
|
#
|
||||||
|
# [0]: https://github.com/NixOS/nixpkgs/blob/612f97239e2cc474c13c9dafa0df378058c5ad8d/pkgs/build-support/rust/lib/default.nix#L36-L39
|
||||||
|
(
|
||||||
|
# Nixpkgs doesn't check for x86_64 here but we do, because I
|
||||||
|
# observed a failure building statically for x86_64 without
|
||||||
|
# including it here. Linkers are weird.
|
||||||
|
(stdenv.hostPlatform.isAarch64 || stdenv.hostPlatform.isx86_64)
|
||||||
|
&& stdenv.hostPlatform.isStatic
|
||||||
|
&& !stdenv.isDarwin
|
||||||
|
&& !stdenv.cc.bintools.isLLVM
|
||||||
|
)
|
||||||
|
[
|
||||||
|
"-l"
|
||||||
|
"stdc++"
|
||||||
|
"-L"
|
||||||
|
"${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib"
|
||||||
|
]
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
# What follows is stolen from [here][0]. Its purpose is to properly
|
||||||
|
# configure compilers and linkers for various stages of the build, and
|
||||||
|
# even covers the case of build scripts that need native code compiled and
|
||||||
|
# run on the build platform (I think).
|
||||||
|
#
|
||||||
|
# [0]: https://github.com/NixOS/nixpkgs/blob/612f97239e2cc474c13c9dafa0df378058c5ad8d/pkgs/build-support/rust/lib/default.nix#L64-L78
|
||||||
|
// (
|
||||||
|
let
|
||||||
|
inherit (pkgs.rust.lib) envVars;
|
||||||
|
in
|
||||||
|
pkgs.lib.optionalAttrs
|
||||||
|
(pkgs.stdenv.targetPlatform.rust.rustcTarget
|
||||||
|
!= pkgs.stdenv.hostPlatform.rust.rustcTarget)
|
||||||
|
(
|
||||||
|
let
|
||||||
|
inherit (pkgs.stdenv.targetPlatform.rust) cargoEnvVarTarget;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
"CC_${cargoEnvVarTarget}" = envVars.ccForTarget;
|
||||||
|
"CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget;
|
||||||
|
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" =
|
||||||
|
envVars.linkerForTarget;
|
||||||
|
}
|
||||||
|
)
|
||||||
|
// (
|
||||||
|
let
|
||||||
|
inherit (pkgs.stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
"CC_${cargoEnvVarTarget}" = envVars.ccForHost;
|
||||||
|
"CXX_${cargoEnvVarTarget}" = envVars.cxxForHost;
|
||||||
|
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForHost;
|
||||||
|
CARGO_BUILD_TARGET = rustcTarget;
|
||||||
|
}
|
||||||
|
)
|
||||||
|
// (
|
||||||
|
let
|
||||||
|
inherit (pkgs.stdenv.buildPlatform.rust) cargoEnvVarTarget;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
"CC_${cargoEnvVarTarget}" = envVars.ccForBuild;
|
||||||
|
"CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild;
|
||||||
|
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForBuild;
|
||||||
|
HOST_CC = "${pkgs.buildPackages.stdenv.cc}/bin/cc";
|
||||||
|
HOST_CXX = "${pkgs.buildPackages.stdenv.cc}/bin/c++";
|
||||||
|
}
|
||||||
|
));
|
||||||
|
|
||||||
|
package = pkgs: builder pkgs {
|
||||||
|
src = nix-filter {
|
||||||
|
root = ./.;
|
||||||
|
include = [
|
||||||
|
"src"
|
||||||
|
"Cargo.toml"
|
||||||
|
"Cargo.lock"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
# This is redundant with CI
|
||||||
|
doCheck = false;
|
||||||
|
|
||||||
|
env = env pkgs;
|
||||||
|
nativeBuildInputs = nativeBuildInputs pkgs;
|
||||||
|
|
||||||
|
meta.mainProgram = cargoToml.package.name;
|
||||||
|
};
|
||||||
|
|
||||||
|
mkOciImage = pkgs: package:
|
||||||
|
pkgs.dockerTools.buildImage {
|
||||||
|
name = package.pname;
|
||||||
|
tag = "next";
|
||||||
|
copyToRoot = [
|
||||||
|
pkgs.dockerTools.caCertificates
|
||||||
|
];
|
||||||
|
config = {
|
||||||
|
# Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT)
|
||||||
|
# are handled as expected
|
||||||
|
Entrypoint = [
|
||||||
|
"${pkgs.lib.getExe' pkgs.tini "tini"}"
|
||||||
|
"--"
|
||||||
|
];
|
||||||
|
Cmd = [
|
||||||
|
"${pkgs.lib.getExe package}"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
packages = {
|
||||||
|
default = package pkgsHost;
|
||||||
|
oci-image = mkOciImage pkgsHost self.packages.${system}.default;
|
||||||
|
}
|
||||||
|
//
|
||||||
|
builtins.listToAttrs
|
||||||
|
(builtins.concatLists
|
||||||
|
(builtins.map
|
||||||
|
(crossSystem:
|
||||||
|
let
|
||||||
|
binaryName = "static-${crossSystem}";
|
||||||
|
pkgsCrossStatic =
|
||||||
|
(import nixpkgs {
|
||||||
|
inherit system;
|
||||||
|
crossSystem = {
|
||||||
|
config = crossSystem;
|
||||||
|
};
|
||||||
|
}).pkgsStatic;
|
||||||
|
in
|
||||||
|
[
|
||||||
|
# An output for a statically-linked binary
|
||||||
|
{
|
||||||
|
name = binaryName;
|
||||||
|
value = package pkgsCrossStatic;
|
||||||
|
}
|
||||||
|
|
||||||
|
# An output for an OCI image based on that binary
|
||||||
|
{
|
||||||
|
name = "oci-image-${crossSystem}";
|
||||||
|
value = mkOciImage
|
||||||
|
pkgsCrossStatic
|
||||||
|
self.packages.${system}.${binaryName};
|
||||||
|
}
|
||||||
|
]
|
||||||
|
)
|
||||||
|
[
|
||||||
|
"x86_64-unknown-linux-musl"
|
||||||
|
"aarch64-unknown-linux-musl"
|
||||||
|
]
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
devShells.default = pkgsHost.mkShell {
|
||||||
|
env = env pkgsHost // {
|
||||||
|
# Rust Analyzer needs to be able to find the path to default crate
|
||||||
|
# sources, and it can read this environment variable to do so. The
|
||||||
|
# `rust-src` component is required in order for this to work.
|
||||||
|
RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library";
|
||||||
|
};
|
||||||
|
|
||||||
|
# Development tools
|
||||||
|
nativeBuildInputs = nativeBuildInputs pkgsHost ++ [
|
||||||
|
# Always use nightly rustfmt because most of its options are unstable
|
||||||
|
#
|
||||||
|
# This needs to come before `toolchain` in this list, otherwise
|
||||||
|
# `$PATH` will have stable rustfmt instead.
|
||||||
|
fenix.packages.${system}.latest.rustfmt
|
||||||
|
|
||||||
|
toolchain
|
||||||
|
] ++ (with pkgsHost; [
|
||||||
|
engage
|
||||||
|
|
||||||
|
# Needed for Complement
|
||||||
|
go
|
||||||
|
olm
|
||||||
|
|
||||||
|
# Needed for our script for Complement
|
||||||
|
jq
|
||||||
|
]);
|
||||||
|
};
|
||||||
|
});
|
||||||
|
}
|
198
nix/README.md
Normal file
198
nix/README.md
Normal file
|
@ -0,0 +1,198 @@
|
||||||
|
# Conduit for Nix/NixOS
|
||||||
|
|
||||||
|
This guide assumes you have a recent version of Nix (^2.4) installed.
|
||||||
|
|
||||||
|
Since Conduit ships as a Nix flake, you'll first need to [enable
|
||||||
|
flakes][enable_flakes].
|
||||||
|
|
||||||
|
You can now use the usual Nix commands to interact with Conduit's flake. For
|
||||||
|
example, `nix run gitlab:famedly/conduit` will run Conduit (though you'll need
|
||||||
|
to provide configuration and such manually as usual).
|
||||||
|
|
||||||
|
If your NixOS configuration is defined as a flake, you can depend on this flake
|
||||||
|
to provide a more up-to-date version than provided by `nixpkgs`. In your flake,
|
||||||
|
add the following to your `inputs`:
|
||||||
|
|
||||||
|
```nix
|
||||||
|
conduit = {
|
||||||
|
url = "gitlab:famedly/conduit";
|
||||||
|
|
||||||
|
# Assuming you have an input for nixpkgs called `nixpkgs`. If you experience
|
||||||
|
# build failures while using this, try commenting/deleting this line. This
|
||||||
|
# will probably also require you to always build from source.
|
||||||
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
Next, make sure you're passing your flake inputs to the `specialArgs` argument
|
||||||
|
of `nixpkgs.lib.nixosSystem` [as explained here][specialargs]. This guide will
|
||||||
|
assume you've named the group `flake-inputs`.
|
||||||
|
|
||||||
|
Now you can configure Conduit and a reverse proxy for it. Add the following to
|
||||||
|
a new Nix file and include it in your configuration:
|
||||||
|
|
||||||
|
```nix
|
||||||
|
{ config
|
||||||
|
, pkgs
|
||||||
|
, flake-inputs
|
||||||
|
, ...
|
||||||
|
}:
|
||||||
|
|
||||||
|
let
|
||||||
|
# You'll need to edit these values
|
||||||
|
|
||||||
|
# The hostname that will appear in your user and room IDs
|
||||||
|
server_name = "example.com";
|
||||||
|
|
||||||
|
# The hostname that Conduit actually runs on
|
||||||
|
#
|
||||||
|
# This can be the same as `server_name` if you want. This is only necessary
|
||||||
|
# when Conduit is running on a different machine than the one hosting your
|
||||||
|
# root domain. This configuration also assumes this is all running on a single
|
||||||
|
# machine, some tweaks will need to be made if this is not the case.
|
||||||
|
matrix_hostname = "matrix.${server_name}";
|
||||||
|
|
||||||
|
# An admin email for TLS certificate notifications
|
||||||
|
admin_email = "admin@${server_name}";
|
||||||
|
|
||||||
|
# These ones you can leave alone
|
||||||
|
|
||||||
|
# Build a dervation that stores the content of `${server_name}/.well-known/matrix/server`
|
||||||
|
well_known_server = pkgs.writeText "well-known-matrix-server" ''
|
||||||
|
{
|
||||||
|
"m.server": "${matrix_hostname}"
|
||||||
|
}
|
||||||
|
'';
|
||||||
|
|
||||||
|
# Build a dervation that stores the content of `${server_name}/.well-known/matrix/client`
|
||||||
|
well_known_client = pkgs.writeText "well-known-matrix-client" ''
|
||||||
|
{
|
||||||
|
"m.homeserver": {
|
||||||
|
"base_url": "https://${matrix_hostname}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
'';
|
||||||
|
in
|
||||||
|
|
||||||
|
{
|
||||||
|
# Configure Conduit itself
|
||||||
|
services.matrix-conduit = {
|
||||||
|
enable = true;
|
||||||
|
|
||||||
|
# This causes NixOS to use the flake defined in this repository instead of
|
||||||
|
# the build of Conduit built into nixpkgs.
|
||||||
|
package = flake-inputs.conduit.packages.${pkgs.system}.default;
|
||||||
|
|
||||||
|
settings.global = {
|
||||||
|
inherit server_name;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# Configure automated TLS acquisition/renewal
|
||||||
|
security.acme = {
|
||||||
|
acceptTerms = true;
|
||||||
|
defaults = {
|
||||||
|
email = admin_email;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# ACME data must be readable by the NGINX user
|
||||||
|
users.users.nginx.extraGroups = [
|
||||||
|
"acme"
|
||||||
|
];
|
||||||
|
|
||||||
|
# Configure NGINX as a reverse proxy
|
||||||
|
services.nginx = {
|
||||||
|
enable = true;
|
||||||
|
recommendedProxySettings = true;
|
||||||
|
|
||||||
|
virtualHosts = {
|
||||||
|
"${matrix_hostname}" = {
|
||||||
|
forceSSL = true;
|
||||||
|
enableACME = true;
|
||||||
|
|
||||||
|
listen = [
|
||||||
|
{
|
||||||
|
addr = "0.0.0.0";
|
||||||
|
port = 443;
|
||||||
|
ssl = true;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
addr = "[::]";
|
||||||
|
port = 443;
|
||||||
|
ssl = true;
|
||||||
|
} {
|
||||||
|
addr = "0.0.0.0";
|
||||||
|
port = 8448;
|
||||||
|
ssl = true;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
addr = "[::]";
|
||||||
|
port = 8448;
|
||||||
|
ssl = true;
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
|
locations."/_matrix/" = {
|
||||||
|
proxyPass = "http://backend_conduit$request_uri";
|
||||||
|
proxyWebsockets = true;
|
||||||
|
extraConfig = ''
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_buffering off;
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
extraConfig = ''
|
||||||
|
merge_slashes off;
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
"${server_name}" = {
|
||||||
|
forceSSL = true;
|
||||||
|
enableACME = true;
|
||||||
|
|
||||||
|
locations."=/.well-known/matrix/server" = {
|
||||||
|
# Use the contents of the derivation built previously
|
||||||
|
alias = "${well_known_server}";
|
||||||
|
|
||||||
|
extraConfig = ''
|
||||||
|
# Set the header since by default NGINX thinks it's just bytes
|
||||||
|
default_type application/json;
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
locations."=/.well-known/matrix/client" = {
|
||||||
|
# Use the contents of the derivation built previously
|
||||||
|
alias = "${well_known_client}";
|
||||||
|
|
||||||
|
extraConfig = ''
|
||||||
|
# Set the header since by default NGINX thinks it's just bytes
|
||||||
|
default_type application/json;
|
||||||
|
|
||||||
|
# https://matrix.org/docs/spec/client_server/r0.4.0#web-browser-clients
|
||||||
|
add_header Access-Control-Allow-Origin "*";
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
upstreams = {
|
||||||
|
"backend_conduit" = {
|
||||||
|
servers = {
|
||||||
|
"[::1]:${toString config.services.matrix-conduit.settings.global.port}" = { };
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# Open firewall ports for HTTP, HTTPS, and Matrix federation
|
||||||
|
networking.firewall.allowedTCPPorts = [ 80 443 8448 ];
|
||||||
|
networking.firewall.allowedUDPPorts = [ 80 443 8448 ];
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Now you can rebuild your system configuration and you should be good to go!
|
||||||
|
|
||||||
|
[enable_flakes]: https://nixos.wiki/wiki/Flakes#Enable_flakes
|
||||||
|
|
||||||
|
[specialargs]: https://nixos.wiki/wiki/Flakes#Using_nix_flakes_with_NixOS
|
22
rust-toolchain.toml
Normal file
22
rust-toolchain.toml
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
# This is the authoritiative configuration of this project's Rust toolchain.
|
||||||
|
#
|
||||||
|
# Other files that need upkeep when this changes:
|
||||||
|
#
|
||||||
|
# * `.gitlab-ci.yml`
|
||||||
|
# * `Cargo.toml`
|
||||||
|
# * `flake.nix`
|
||||||
|
#
|
||||||
|
# Search in those files for `rust-toolchain.toml` to find the relevant places.
|
||||||
|
# If you're having trouble making the relevant changes, bug a maintainer.
|
||||||
|
|
||||||
|
[toolchain]
|
||||||
|
channel = "1.75.0"
|
||||||
|
components = [
|
||||||
|
# For rust-analyzer
|
||||||
|
"rust-src",
|
||||||
|
]
|
||||||
|
targets = [
|
||||||
|
"x86_64-unknown-linux-gnu",
|
||||||
|
"x86_64-unknown-linux-musl",
|
||||||
|
"aarch64-unknown-linux-musl",
|
||||||
|
]
|
|
@ -1,12 +1,11 @@
|
||||||
use crate::{utils, Error, Result};
|
use crate::{services, utils, Error, Result};
|
||||||
use bytes::BytesMut;
|
use bytes::BytesMut;
|
||||||
use ruma::api::{IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken};
|
use ruma::api::{IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken};
|
||||||
use std::{fmt::Debug, mem, time::Duration};
|
use std::{fmt::Debug, mem, time::Duration};
|
||||||
use tracing::warn;
|
use tracing::warn;
|
||||||
|
|
||||||
#[tracing::instrument(skip(globals, request))]
|
#[tracing::instrument(skip(request))]
|
||||||
pub(crate) async fn send_request<T: OutgoingRequest>(
|
pub(crate) async fn send_request<T: OutgoingRequest>(
|
||||||
globals: &crate::database::globals::Globals,
|
|
||||||
registration: serde_yaml::Value,
|
registration: serde_yaml::Value,
|
||||||
request: T,
|
request: T,
|
||||||
) -> Result<T::IncomingResponse>
|
) -> Result<T::IncomingResponse>
|
||||||
|
@ -19,7 +18,7 @@ where
|
||||||
let mut http_request = request
|
let mut http_request = request
|
||||||
.try_into_http_request::<BytesMut>(
|
.try_into_http_request::<BytesMut>(
|
||||||
destination,
|
destination,
|
||||||
SendAccessToken::IfRequired(""),
|
SendAccessToken::IfRequired(hs_token),
|
||||||
&[MatrixVersion::V1_0],
|
&[MatrixVersion::V1_0],
|
||||||
)
|
)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -46,7 +45,23 @@ where
|
||||||
*reqwest_request.timeout_mut() = Some(Duration::from_secs(30));
|
*reqwest_request.timeout_mut() = Some(Duration::from_secs(30));
|
||||||
|
|
||||||
let url = reqwest_request.url().clone();
|
let url = reqwest_request.url().clone();
|
||||||
let mut response = globals.default_client().execute(reqwest_request).await?;
|
let mut response = match services()
|
||||||
|
.globals
|
||||||
|
.default_client()
|
||||||
|
.execute(reqwest_request)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) => {
|
||||||
|
warn!(
|
||||||
|
"Could not send request to appservice {:?} at {}: {}",
|
||||||
|
registration.get("id"),
|
||||||
|
destination,
|
||||||
|
e
|
||||||
|
);
|
||||||
|
return Err(e.into());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// reqwest::Response -> http::Response conversion
|
// reqwest::Response -> http::Response conversion
|
||||||
let status = response.status();
|
let status = response.status();
|
|
@ -1,13 +1,11 @@
|
||||||
use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH};
|
use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH};
|
||||||
use crate::{
|
use crate::{api::client_server, services, utils, Error, Result, Ruma};
|
||||||
database::{admin::make_user_admin, DatabaseGuard},
|
|
||||||
utils, Error, Result, Ruma,
|
|
||||||
};
|
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{
|
api::client::{
|
||||||
account::{
|
account::{
|
||||||
change_password, deactivate, get_3pids, get_username_availability, register, whoami,
|
change_password, deactivate, get_3pids, get_username_availability, register,
|
||||||
ThirdPartyIdRemovalStatus,
|
request_3pid_management_token_via_email, request_3pid_management_token_via_msisdn,
|
||||||
|
whoami, ThirdPartyIdRemovalStatus,
|
||||||
},
|
},
|
||||||
error::ErrorKind,
|
error::ErrorKind,
|
||||||
uiaa::{AuthFlow, AuthType, UiaaInfo},
|
uiaa::{AuthFlow, AuthType, UiaaInfo},
|
||||||
|
@ -32,15 +30,16 @@ const RANDOM_USER_ID_LENGTH: usize = 10;
|
||||||
///
|
///
|
||||||
/// Note: This will not reserve the username, so the username might become invalid when trying to register
|
/// Note: This will not reserve the username, so the username might become invalid when trying to register
|
||||||
pub async fn get_register_available_route(
|
pub async fn get_register_available_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_username_availability::v3::Request>,
|
||||||
body: Ruma<get_username_availability::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_username_availability::v3::Response> {
|
) -> Result<get_username_availability::v3::Response> {
|
||||||
// Validate user id
|
// Validate user id
|
||||||
let user_id =
|
let user_id = UserId::parse_with_server_name(
|
||||||
UserId::parse_with_server_name(body.username.to_lowercase(), db.globals.server_name())
|
body.username.to_lowercase(),
|
||||||
|
services().globals.server_name(),
|
||||||
|
)
|
||||||
.ok()
|
.ok()
|
||||||
.filter(|user_id| {
|
.filter(|user_id| {
|
||||||
!user_id.is_historical() && user_id.server_name() == db.globals.server_name()
|
!user_id.is_historical() && user_id.server_name() == services().globals.server_name()
|
||||||
})
|
})
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::InvalidUsername,
|
ErrorKind::InvalidUsername,
|
||||||
|
@ -48,7 +47,7 @@ pub async fn get_register_available_route(
|
||||||
))?;
|
))?;
|
||||||
|
|
||||||
// Check if username is creative enough
|
// Check if username is creative enough
|
||||||
if db.users.exists(&user_id)? {
|
if services().users.exists(&user_id)? {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::UserInUse,
|
ErrorKind::UserInUse,
|
||||||
"Desired user ID is already taken.",
|
"Desired user ID is already taken.",
|
||||||
|
@ -74,11 +73,11 @@ pub async fn get_register_available_route(
|
||||||
/// - If type is not guest and no username is given: Always fails after UIAA check
|
/// - If type is not guest and no username is given: Always fails after UIAA check
|
||||||
/// - Creates a new account and populates it with default account data
|
/// - Creates a new account and populates it with default account data
|
||||||
/// - If `inhibit_login` is false: Creates a device and returns device id and access_token
|
/// - If `inhibit_login` is false: Creates a device and returns device id and access_token
|
||||||
pub async fn register_route(
|
pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<register::v3::Response> {
|
||||||
db: DatabaseGuard,
|
if !services().globals.allow_registration()
|
||||||
body: Ruma<register::v3::IncomingRequest>,
|
&& !body.from_appservice
|
||||||
) -> Result<register::v3::Response> {
|
&& services().globals.config.registration_token.is_none()
|
||||||
if !db.globals.allow_registration() && !body.from_appservice {
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
"Registration has been disabled.",
|
"Registration has been disabled.",
|
||||||
|
@ -89,18 +88,20 @@ pub async fn register_route(
|
||||||
|
|
||||||
let user_id = match (&body.username, is_guest) {
|
let user_id = match (&body.username, is_guest) {
|
||||||
(Some(username), false) => {
|
(Some(username), false) => {
|
||||||
let proposed_user_id =
|
let proposed_user_id = UserId::parse_with_server_name(
|
||||||
UserId::parse_with_server_name(username.to_lowercase(), db.globals.server_name())
|
username.to_lowercase(),
|
||||||
|
services().globals.server_name(),
|
||||||
|
)
|
||||||
.ok()
|
.ok()
|
||||||
.filter(|user_id| {
|
.filter(|user_id| {
|
||||||
!user_id.is_historical()
|
!user_id.is_historical()
|
||||||
&& user_id.server_name() == db.globals.server_name()
|
&& user_id.server_name() == services().globals.server_name()
|
||||||
})
|
})
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::InvalidUsername,
|
ErrorKind::InvalidUsername,
|
||||||
"Username is invalid.",
|
"Username is invalid.",
|
||||||
))?;
|
))?;
|
||||||
if db.users.exists(&proposed_user_id)? {
|
if services().users.exists(&proposed_user_id)? {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::UserInUse,
|
ErrorKind::UserInUse,
|
||||||
"Desired user ID is already taken.",
|
"Desired user ID is already taken.",
|
||||||
|
@ -111,10 +112,10 @@ pub async fn register_route(
|
||||||
_ => loop {
|
_ => loop {
|
||||||
let proposed_user_id = UserId::parse_with_server_name(
|
let proposed_user_id = UserId::parse_with_server_name(
|
||||||
utils::random_string(RANDOM_USER_ID_LENGTH).to_lowercase(),
|
utils::random_string(RANDOM_USER_ID_LENGTH).to_lowercase(),
|
||||||
db.globals.server_name(),
|
services().globals.server_name(),
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
if !db.users.exists(&proposed_user_id)? {
|
if !services().users.exists(&proposed_user_id)? {
|
||||||
break proposed_user_id;
|
break proposed_user_id;
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -123,7 +124,11 @@ pub async fn register_route(
|
||||||
// UIAA
|
// UIAA
|
||||||
let mut uiaainfo = UiaaInfo {
|
let mut uiaainfo = UiaaInfo {
|
||||||
flows: vec![AuthFlow {
|
flows: vec![AuthFlow {
|
||||||
stages: vec![AuthType::Dummy],
|
stages: if services().globals.config.registration_token.is_some() {
|
||||||
|
vec![AuthType::RegistrationToken]
|
||||||
|
} else {
|
||||||
|
vec![AuthType::Dummy]
|
||||||
|
},
|
||||||
}],
|
}],
|
||||||
completed: Vec::new(),
|
completed: Vec::new(),
|
||||||
params: Default::default(),
|
params: Default::default(),
|
||||||
|
@ -131,16 +136,14 @@ pub async fn register_route(
|
||||||
auth_error: None,
|
auth_error: None,
|
||||||
};
|
};
|
||||||
|
|
||||||
if !body.from_appservice {
|
if !body.from_appservice && !is_guest {
|
||||||
if let Some(auth) = &body.auth {
|
if let Some(auth) = &body.auth {
|
||||||
let (worked, uiaainfo) = db.uiaa.try_auth(
|
let (worked, uiaainfo) = services().uiaa.try_auth(
|
||||||
&UserId::parse_with_server_name("", db.globals.server_name())
|
&UserId::parse_with_server_name("", services().globals.server_name())
|
||||||
.expect("we know this is valid"),
|
.expect("we know this is valid"),
|
||||||
"".into(),
|
"".into(),
|
||||||
auth,
|
auth,
|
||||||
&uiaainfo,
|
&uiaainfo,
|
||||||
&db.users,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
)?;
|
||||||
if !worked {
|
if !worked {
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
|
@ -148,8 +151,8 @@ pub async fn register_route(
|
||||||
// Success!
|
// Success!
|
||||||
} else if let Some(json) = body.json_body {
|
} else if let Some(json) = body.json_body {
|
||||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||||
db.uiaa.create(
|
services().uiaa.create(
|
||||||
&UserId::parse_with_server_name("", db.globals.server_name())
|
&UserId::parse_with_server_name("", services().globals.server_name())
|
||||||
.expect("we know this is valid"),
|
.expect("we know this is valid"),
|
||||||
"".into(),
|
"".into(),
|
||||||
&uiaainfo,
|
&uiaainfo,
|
||||||
|
@ -168,30 +171,31 @@ pub async fn register_route(
|
||||||
};
|
};
|
||||||
|
|
||||||
// Create user
|
// Create user
|
||||||
db.users.create(&user_id, password)?;
|
services().users.create(&user_id, password)?;
|
||||||
|
|
||||||
// Default to pretty displayname
|
// Default to pretty displayname
|
||||||
let mut displayname = user_id.localpart().to_owned();
|
let mut displayname = user_id.localpart().to_owned();
|
||||||
|
|
||||||
// If enabled append lightning bolt to display name (default true)
|
// If enabled append lightning bolt to display name (default true)
|
||||||
if db.globals.enable_lightning_bolt() {
|
if services().globals.enable_lightning_bolt() {
|
||||||
displayname.push_str(" ⚡️");
|
displayname.push_str(" ⚡️");
|
||||||
}
|
}
|
||||||
|
|
||||||
db.users
|
services()
|
||||||
|
.users
|
||||||
.set_displayname(&user_id, Some(displayname.clone()))?;
|
.set_displayname(&user_id, Some(displayname.clone()))?;
|
||||||
|
|
||||||
// Initial account data
|
// Initial account data
|
||||||
db.account_data.update(
|
services().account_data.update(
|
||||||
None,
|
None,
|
||||||
&user_id,
|
&user_id,
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||||
&ruma::events::push_rules::PushRulesEvent {
|
&serde_json::to_value(ruma::events::push_rules::PushRulesEvent {
|
||||||
content: ruma::events::push_rules::PushRulesEventContent {
|
content: ruma::events::push_rules::PushRulesEventContent {
|
||||||
global: push::Ruleset::server_default(&user_id),
|
global: push::Ruleset::server_default(&user_id),
|
||||||
},
|
},
|
||||||
},
|
})
|
||||||
&db.globals,
|
.expect("to json always works"),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// Inhibit login does not work for guests
|
// Inhibit login does not work for guests
|
||||||
|
@ -200,6 +204,8 @@ pub async fn register_route(
|
||||||
access_token: None,
|
access_token: None,
|
||||||
user_id,
|
user_id,
|
||||||
device_id: None,
|
device_id: None,
|
||||||
|
refresh_token: None,
|
||||||
|
expires_in: None,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -215,7 +221,7 @@ pub async fn register_route(
|
||||||
let token = utils::random_string(TOKEN_LENGTH);
|
let token = utils::random_string(TOKEN_LENGTH);
|
||||||
|
|
||||||
// Create device for this account
|
// Create device for this account
|
||||||
db.users.create_device(
|
services().users.create_device(
|
||||||
&user_id,
|
&user_id,
|
||||||
&device_id,
|
&device_id,
|
||||||
&token,
|
&token,
|
||||||
|
@ -223,26 +229,31 @@ pub async fn register_route(
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
info!("New user {} registered on this server.", user_id);
|
info!("New user {} registered on this server.", user_id);
|
||||||
db.admin
|
if !body.from_appservice && !is_guest {
|
||||||
|
services()
|
||||||
|
.admin
|
||||||
.send_message(RoomMessageEventContent::notice_plain(format!(
|
.send_message(RoomMessageEventContent::notice_plain(format!(
|
||||||
"New user {} registered on this server.",
|
"New user {user_id} registered on this server."
|
||||||
user_id
|
|
||||||
)));
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
// If this is the first real user, grant them admin privileges
|
// If this is the first real user, grant them admin privileges
|
||||||
// Note: the server user, @conduit:servername, is generated first
|
// Note: the server user, @conduit:servername, is generated first
|
||||||
if db.users.count()? == 2 {
|
if services().users.count()? == 2 {
|
||||||
make_user_admin(&db, &user_id, displayname).await?;
|
services()
|
||||||
|
.admin
|
||||||
|
.make_user_admin(&user_id, displayname)
|
||||||
|
.await?;
|
||||||
|
|
||||||
warn!("Granting {} admin privileges as the first user", user_id);
|
warn!("Granting {} admin privileges as the first user", user_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(register::v3::Response {
|
Ok(register::v3::Response {
|
||||||
access_token: Some(token),
|
access_token: Some(token),
|
||||||
user_id,
|
user_id,
|
||||||
device_id: Some(device_id),
|
device_id: Some(device_id),
|
||||||
|
refresh_token: None,
|
||||||
|
expires_in: None,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -261,8 +272,7 @@ pub async fn register_route(
|
||||||
/// - Forgets to-device events
|
/// - Forgets to-device events
|
||||||
/// - Triggers device list updates
|
/// - Triggers device list updates
|
||||||
pub async fn change_password_route(
|
pub async fn change_password_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<change_password::v3::Request>,
|
||||||
body: Ruma<change_password::v3::IncomingRequest>,
|
|
||||||
) -> Result<change_password::v3::Response> {
|
) -> Result<change_password::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
@ -278,49 +288,45 @@ pub async fn change_password_route(
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(auth) = &body.auth {
|
if let Some(auth) = &body.auth {
|
||||||
let (worked, uiaainfo) = db.uiaa.try_auth(
|
let (worked, uiaainfo) =
|
||||||
sender_user,
|
services()
|
||||||
sender_device,
|
.uiaa
|
||||||
auth,
|
.try_auth(sender_user, sender_device, auth, &uiaainfo)?;
|
||||||
&uiaainfo,
|
|
||||||
&db.users,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
if !worked {
|
if !worked {
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
}
|
}
|
||||||
// Success!
|
// Success!
|
||||||
} else if let Some(json) = body.json_body {
|
} else if let Some(json) = body.json_body {
|
||||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||||
db.uiaa
|
services()
|
||||||
|
.uiaa
|
||||||
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
} else {
|
} else {
|
||||||
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
||||||
}
|
}
|
||||||
|
|
||||||
db.users
|
services()
|
||||||
|
.users
|
||||||
.set_password(sender_user, Some(&body.new_password))?;
|
.set_password(sender_user, Some(&body.new_password))?;
|
||||||
|
|
||||||
if body.logout_devices {
|
if body.logout_devices {
|
||||||
// Logout all devices except the current one
|
// Logout all devices except the current one
|
||||||
for id in db
|
for id in services()
|
||||||
.users
|
.users
|
||||||
.all_device_ids(sender_user)
|
.all_device_ids(sender_user)
|
||||||
.filter_map(|id| id.ok())
|
.filter_map(|id| id.ok())
|
||||||
.filter(|id| id != sender_device)
|
.filter(|id| id != sender_device)
|
||||||
{
|
{
|
||||||
db.users.remove_device(sender_user, &id)?;
|
services().users.remove_device(sender_user, &id)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
info!("User {} changed their password.", sender_user);
|
info!("User {} changed their password.", sender_user);
|
||||||
db.admin
|
services()
|
||||||
|
.admin
|
||||||
.send_message(RoomMessageEventContent::notice_plain(format!(
|
.send_message(RoomMessageEventContent::notice_plain(format!(
|
||||||
"User {} changed their password.",
|
"User {sender_user} changed their password."
|
||||||
sender_user
|
|
||||||
)));
|
)));
|
||||||
|
|
||||||
Ok(change_password::v3::Response {})
|
Ok(change_password::v3::Response {})
|
||||||
|
@ -331,17 +337,14 @@ pub async fn change_password_route(
|
||||||
/// Get user_id of the sender user.
|
/// Get user_id of the sender user.
|
||||||
///
|
///
|
||||||
/// Note: Also works for Application Services
|
/// Note: Also works for Application Services
|
||||||
pub async fn whoami_route(
|
pub async fn whoami_route(body: Ruma<whoami::v3::Request>) -> Result<whoami::v3::Response> {
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<whoami::v3::Request>,
|
|
||||||
) -> Result<whoami::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let device_id = body.sender_device.as_ref().cloned();
|
let device_id = body.sender_device.as_ref().cloned();
|
||||||
|
|
||||||
Ok(whoami::v3::Response {
|
Ok(whoami::v3::Response {
|
||||||
user_id: sender_user.clone(),
|
user_id: sender_user.clone(),
|
||||||
device_id,
|
device_id,
|
||||||
is_guest: db.users.is_deactivated(&sender_user)?,
|
is_guest: services().users.is_deactivated(sender_user)? && !body.from_appservice,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -356,8 +359,7 @@ pub async fn whoami_route(
|
||||||
/// - Triggers device list updates
|
/// - Triggers device list updates
|
||||||
/// - Removes ability to log in again
|
/// - Removes ability to log in again
|
||||||
pub async fn deactivate_route(
|
pub async fn deactivate_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<deactivate::v3::Request>,
|
||||||
body: Ruma<deactivate::v3::IncomingRequest>,
|
|
||||||
) -> Result<deactivate::v3::Response> {
|
) -> Result<deactivate::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
@ -373,21 +375,18 @@ pub async fn deactivate_route(
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(auth) = &body.auth {
|
if let Some(auth) = &body.auth {
|
||||||
let (worked, uiaainfo) = db.uiaa.try_auth(
|
let (worked, uiaainfo) =
|
||||||
sender_user,
|
services()
|
||||||
sender_device,
|
.uiaa
|
||||||
auth,
|
.try_auth(sender_user, sender_device, auth, &uiaainfo)?;
|
||||||
&uiaainfo,
|
|
||||||
&db.users,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
if !worked {
|
if !worked {
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
}
|
}
|
||||||
// Success!
|
// Success!
|
||||||
} else if let Some(json) = body.json_body {
|
} else if let Some(json) = body.json_body {
|
||||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||||
db.uiaa
|
services()
|
||||||
|
.uiaa
|
||||||
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
} else {
|
} else {
|
||||||
|
@ -395,26 +394,24 @@ pub async fn deactivate_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make the user leave all rooms before deactivation
|
// Make the user leave all rooms before deactivation
|
||||||
db.rooms.leave_all_rooms(&sender_user, &db).await?;
|
client_server::leave_all_rooms(sender_user).await?;
|
||||||
|
|
||||||
// Remove devices and mark account as deactivated
|
// Remove devices and mark account as deactivated
|
||||||
db.users.deactivate_account(sender_user)?;
|
services().users.deactivate_account(sender_user)?;
|
||||||
|
|
||||||
info!("User {} deactivated their account.", sender_user);
|
info!("User {} deactivated their account.", sender_user);
|
||||||
db.admin
|
services()
|
||||||
|
.admin
|
||||||
.send_message(RoomMessageEventContent::notice_plain(format!(
|
.send_message(RoomMessageEventContent::notice_plain(format!(
|
||||||
"User {} deactivated their account.",
|
"User {sender_user} deactivated their account."
|
||||||
sender_user
|
|
||||||
)));
|
)));
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(deactivate::v3::Response {
|
Ok(deactivate::v3::Response {
|
||||||
id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport,
|
id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `GET _matrix/client/r0/account/3pid`
|
/// # `GET _matrix/client/v3/account/3pid`
|
||||||
///
|
///
|
||||||
/// Get a list of third party identifiers associated with this account.
|
/// Get a list of third party identifiers associated with this account.
|
||||||
///
|
///
|
||||||
|
@ -426,3 +423,31 @@ pub async fn third_party_route(
|
||||||
|
|
||||||
Ok(get_3pids::v3::Response::new(Vec::new()))
|
Ok(get_3pids::v3::Response::new(Vec::new()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// # `POST /_matrix/client/v3/account/3pid/email/requestToken`
|
||||||
|
///
|
||||||
|
/// "This API should be used to request validation tokens when adding an email address to an account"
|
||||||
|
///
|
||||||
|
/// - 403 signals that The homeserver does not allow the third party identifier as a contact option.
|
||||||
|
pub async fn request_3pid_management_token_via_email_route(
|
||||||
|
_body: Ruma<request_3pid_management_token_via_email::v3::Request>,
|
||||||
|
) -> Result<request_3pid_management_token_via_email::v3::Response> {
|
||||||
|
Err(Error::BadRequest(
|
||||||
|
ErrorKind::ThreepidDenied,
|
||||||
|
"Third party identifier is not allowed",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `POST /_matrix/client/v3/account/3pid/msisdn/requestToken`
|
||||||
|
///
|
||||||
|
/// "This API should be used to request validation tokens when adding an phone number to an account"
|
||||||
|
///
|
||||||
|
/// - 403 signals that The homeserver does not allow the third party identifier as a contact option.
|
||||||
|
pub async fn request_3pid_management_token_via_msisdn_route(
|
||||||
|
_body: Ruma<request_3pid_management_token_via_msisdn::v3::Request>,
|
||||||
|
) -> Result<request_3pid_management_token_via_msisdn::v3::Response> {
|
||||||
|
Err(Error::BadRequest(
|
||||||
|
ErrorKind::ThreepidDenied,
|
||||||
|
"Third party identifier is not allowed",
|
||||||
|
))
|
||||||
|
}
|
|
@ -1,4 +1,5 @@
|
||||||
use crate::{database::DatabaseGuard, Database, Error, Result, Ruma};
|
use crate::{services, Error, Result, Ruma};
|
||||||
|
use rand::seq::SliceRandom;
|
||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{
|
api::{
|
||||||
|
@ -9,31 +10,35 @@ use ruma::{
|
||||||
},
|
},
|
||||||
federation,
|
federation,
|
||||||
},
|
},
|
||||||
RoomAliasId,
|
OwnedRoomAliasId,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/directory/room/{roomAlias}`
|
/// # `PUT /_matrix/client/r0/directory/room/{roomAlias}`
|
||||||
///
|
///
|
||||||
/// Creates a new room alias on this server.
|
/// Creates a new room alias on this server.
|
||||||
pub async fn create_alias_route(
|
pub async fn create_alias_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<create_alias::v3::Request>,
|
||||||
body: Ruma<create_alias::v3::IncomingRequest>,
|
|
||||||
) -> Result<create_alias::v3::Response> {
|
) -> Result<create_alias::v3::Response> {
|
||||||
if body.room_alias.server_name() != db.globals.server_name() {
|
if body.room_alias.server_name() != services().globals.server_name() {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
"Alias is from another server.",
|
"Alias is from another server.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if db.rooms.id_from_alias(&body.room_alias)?.is_some() {
|
if services()
|
||||||
|
.rooms
|
||||||
|
.alias
|
||||||
|
.resolve_local_alias(&body.room_alias)?
|
||||||
|
.is_some()
|
||||||
|
{
|
||||||
return Err(Error::Conflict("Alias already exists."));
|
return Err(Error::Conflict("Alias already exists."));
|
||||||
}
|
}
|
||||||
|
|
||||||
db.rooms
|
services()
|
||||||
.set_alias(&body.room_alias, Some(&body.room_id), &db.globals)?;
|
.rooms
|
||||||
|
.alias
|
||||||
db.flush()?;
|
.set_alias(&body.room_alias, &body.room_id)?;
|
||||||
|
|
||||||
Ok(create_alias::v3::Response::new())
|
Ok(create_alias::v3::Response::new())
|
||||||
}
|
}
|
||||||
|
@ -45,22 +50,19 @@ pub async fn create_alias_route(
|
||||||
/// - TODO: additional access control checks
|
/// - TODO: additional access control checks
|
||||||
/// - TODO: Update canonical alias event
|
/// - TODO: Update canonical alias event
|
||||||
pub async fn delete_alias_route(
|
pub async fn delete_alias_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<delete_alias::v3::Request>,
|
||||||
body: Ruma<delete_alias::v3::IncomingRequest>,
|
|
||||||
) -> Result<delete_alias::v3::Response> {
|
) -> Result<delete_alias::v3::Response> {
|
||||||
if body.room_alias.server_name() != db.globals.server_name() {
|
if body.room_alias.server_name() != services().globals.server_name() {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
"Alias is from another server.",
|
"Alias is from another server.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
db.rooms.set_alias(&body.room_alias, None, &db.globals)?;
|
services().rooms.alias.remove_alias(&body.room_alias)?;
|
||||||
|
|
||||||
// TODO: update alt_aliases?
|
// TODO: update alt_aliases?
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(delete_alias::v3::Response::new())
|
Ok(delete_alias::v3::Response::new())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -70,37 +72,36 @@ pub async fn delete_alias_route(
|
||||||
///
|
///
|
||||||
/// - TODO: Suggest more servers to join via
|
/// - TODO: Suggest more servers to join via
|
||||||
pub async fn get_alias_route(
|
pub async fn get_alias_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_alias::v3::Request>,
|
||||||
body: Ruma<get_alias::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_alias::v3::Response> {
|
) -> Result<get_alias::v3::Response> {
|
||||||
get_alias_helper(&db, &body.room_alias).await
|
get_alias_helper(body.body.room_alias).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn get_alias_helper(
|
pub(crate) async fn get_alias_helper(
|
||||||
db: &Database,
|
room_alias: OwnedRoomAliasId,
|
||||||
room_alias: &RoomAliasId,
|
|
||||||
) -> Result<get_alias::v3::Response> {
|
) -> Result<get_alias::v3::Response> {
|
||||||
if room_alias.server_name() != db.globals.server_name() {
|
if room_alias.server_name() != services().globals.server_name() {
|
||||||
let response = db
|
let response = services()
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(
|
.send_federation_request(
|
||||||
&db.globals,
|
|
||||||
room_alias.server_name(),
|
room_alias.server_name(),
|
||||||
federation::query::get_room_information::v1::Request { room_alias },
|
federation::query::get_room_information::v1::Request {
|
||||||
|
room_alias: room_alias.to_owned(),
|
||||||
|
},
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
return Ok(get_alias::v3::Response::new(
|
let mut servers = response.servers;
|
||||||
response.room_id,
|
servers.shuffle(&mut rand::thread_rng());
|
||||||
response.servers,
|
|
||||||
));
|
return Ok(get_alias::v3::Response::new(response.room_id, servers));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut room_id = None;
|
let mut room_id = None;
|
||||||
match db.rooms.id_from_alias(room_alias)? {
|
match services().rooms.alias.resolve_local_alias(&room_alias)? {
|
||||||
Some(r) => room_id = Some(r),
|
Some(r) => room_id = Some(r),
|
||||||
None => {
|
None => {
|
||||||
for (_id, registration) in db.appservice.all()? {
|
for (_id, registration) in services().appservice.all()? {
|
||||||
let aliases = registration
|
let aliases = registration
|
||||||
.get("namespaces")
|
.get("namespaces")
|
||||||
.and_then(|ns| ns.get("aliases"))
|
.and_then(|ns| ns.get("aliases"))
|
||||||
|
@ -115,19 +116,26 @@ pub(crate) async fn get_alias_helper(
|
||||||
if aliases
|
if aliases
|
||||||
.iter()
|
.iter()
|
||||||
.any(|aliases| aliases.is_match(room_alias.as_str()))
|
.any(|aliases| aliases.is_match(room_alias.as_str()))
|
||||||
&& db
|
&& services()
|
||||||
.sending
|
.sending
|
||||||
.send_appservice_request(
|
.send_appservice_request(
|
||||||
&db.globals,
|
|
||||||
registration,
|
registration,
|
||||||
appservice::query::query_room_alias::v1::Request { room_alias },
|
appservice::query::query_room_alias::v1::Request {
|
||||||
|
room_alias: room_alias.clone(),
|
||||||
|
},
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.is_ok()
|
.is_ok()
|
||||||
{
|
{
|
||||||
room_id = Some(db.rooms.id_from_alias(room_alias)?.ok_or_else(|| {
|
room_id = Some(
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.alias
|
||||||
|
.resolve_local_alias(&room_alias)?
|
||||||
|
.ok_or_else(|| {
|
||||||
Error::bad_config("Appservice lied to us. Room does not exist.")
|
Error::bad_config("Appservice lied to us. Room does not exist.")
|
||||||
})?);
|
})?,
|
||||||
|
);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -146,6 +154,6 @@ pub(crate) async fn get_alias_helper(
|
||||||
|
|
||||||
Ok(get_alias::v3::Response::new(
|
Ok(get_alias::v3::Response::new(
|
||||||
room_id,
|
room_id,
|
||||||
vec![db.globals.server_name().to_owned()],
|
vec![services().globals.server_name().to_owned()],
|
||||||
))
|
))
|
||||||
}
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
use crate::{database::DatabaseGuard, Error, Result, Ruma};
|
use crate::{services, Error, Result, Ruma};
|
||||||
use ruma::api::client::{
|
use ruma::api::client::{
|
||||||
backup::{
|
backup::{
|
||||||
add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session,
|
add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session,
|
||||||
|
@ -14,15 +14,12 @@ use ruma::api::client::{
|
||||||
///
|
///
|
||||||
/// Creates a new backup.
|
/// Creates a new backup.
|
||||||
pub async fn create_backup_version_route(
|
pub async fn create_backup_version_route(
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<create_backup_version::v3::Request>,
|
body: Ruma<create_backup_version::v3::Request>,
|
||||||
) -> Result<create_backup_version::v3::Response> {
|
) -> Result<create_backup_version::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let version = db
|
let version = services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.create_backup(sender_user, &body.algorithm, &db.globals)?;
|
.create_backup(sender_user, &body.algorithm)?;
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(create_backup_version::v3::Response { version })
|
Ok(create_backup_version::v3::Response { version })
|
||||||
}
|
}
|
||||||
|
@ -31,14 +28,12 @@ pub async fn create_backup_version_route(
|
||||||
///
|
///
|
||||||
/// Update information about an existing backup. Only `auth_data` can be modified.
|
/// Update information about an existing backup. Only `auth_data` can be modified.
|
||||||
pub async fn update_backup_version_route(
|
pub async fn update_backup_version_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<update_backup_version::v3::Request>,
|
||||||
body: Ruma<update_backup_version::v3::IncomingRequest>,
|
|
||||||
) -> Result<update_backup_version::v3::Response> {
|
) -> Result<update_backup_version::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
db.key_backups
|
services()
|
||||||
.update_backup(sender_user, &body.version, &body.algorithm, &db.globals)?;
|
.key_backups
|
||||||
|
.update_backup(sender_user, &body.version, &body.algorithm)?;
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(update_backup_version::v3::Response {})
|
Ok(update_backup_version::v3::Response {})
|
||||||
}
|
}
|
||||||
|
@ -47,13 +42,12 @@ pub async fn update_backup_version_route(
|
||||||
///
|
///
|
||||||
/// Get information about the latest backup version.
|
/// Get information about the latest backup version.
|
||||||
pub async fn get_latest_backup_info_route(
|
pub async fn get_latest_backup_info_route(
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<get_latest_backup_info::v3::Request>,
|
body: Ruma<get_latest_backup_info::v3::Request>,
|
||||||
) -> Result<get_latest_backup_info::v3::Response> {
|
) -> Result<get_latest_backup_info::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let (version, algorithm) =
|
let (version, algorithm) = services()
|
||||||
db.key_backups
|
.key_backups
|
||||||
.get_latest_backup(sender_user)?
|
.get_latest_backup(sender_user)?
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::NotFound,
|
ErrorKind::NotFound,
|
||||||
|
@ -62,8 +56,8 @@ pub async fn get_latest_backup_info_route(
|
||||||
|
|
||||||
Ok(get_latest_backup_info::v3::Response {
|
Ok(get_latest_backup_info::v3::Response {
|
||||||
algorithm,
|
algorithm,
|
||||||
count: (db.key_backups.count_keys(sender_user, &version)? as u32).into(),
|
count: (services().key_backups.count_keys(sender_user, &version)? as u32).into(),
|
||||||
etag: db.key_backups.get_etag(sender_user, &version)?,
|
etag: services().key_backups.get_etag(sender_user, &version)?,
|
||||||
version,
|
version,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -72,11 +66,10 @@ pub async fn get_latest_backup_info_route(
|
||||||
///
|
///
|
||||||
/// Get information about an existing backup.
|
/// Get information about an existing backup.
|
||||||
pub async fn get_backup_info_route(
|
pub async fn get_backup_info_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_backup_info::v3::Request>,
|
||||||
body: Ruma<get_backup_info::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_backup_info::v3::Response> {
|
) -> Result<get_backup_info::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let algorithm = db
|
let algorithm = services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.get_backup(sender_user, &body.version)?
|
.get_backup(sender_user, &body.version)?
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
|
@ -86,8 +79,13 @@ pub async fn get_backup_info_route(
|
||||||
|
|
||||||
Ok(get_backup_info::v3::Response {
|
Ok(get_backup_info::v3::Response {
|
||||||
algorithm,
|
algorithm,
|
||||||
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
|
count: (services()
|
||||||
etag: db.key_backups.get_etag(sender_user, &body.version)?,
|
.key_backups
|
||||||
|
.count_keys(sender_user, &body.version)? as u32)
|
||||||
|
.into(),
|
||||||
|
etag: services()
|
||||||
|
.key_backups
|
||||||
|
.get_etag(sender_user, &body.version)?,
|
||||||
version: body.version.to_owned(),
|
version: body.version.to_owned(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -98,14 +96,13 @@ pub async fn get_backup_info_route(
|
||||||
///
|
///
|
||||||
/// - Deletes both information about the backup, as well as all key data related to the backup
|
/// - Deletes both information about the backup, as well as all key data related to the backup
|
||||||
pub async fn delete_backup_version_route(
|
pub async fn delete_backup_version_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<delete_backup_version::v3::Request>,
|
||||||
body: Ruma<delete_backup_version::v3::IncomingRequest>,
|
|
||||||
) -> Result<delete_backup_version::v3::Response> {
|
) -> Result<delete_backup_version::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
db.key_backups.delete_backup(sender_user, &body.version)?;
|
services()
|
||||||
|
.key_backups
|
||||||
db.flush()?;
|
.delete_backup(sender_user, &body.version)?;
|
||||||
|
|
||||||
Ok(delete_backup_version::v3::Response {})
|
Ok(delete_backup_version::v3::Response {})
|
||||||
}
|
}
|
||||||
|
@ -118,13 +115,12 @@ pub async fn delete_backup_version_route(
|
||||||
/// - Adds the keys to the backup
|
/// - Adds the keys to the backup
|
||||||
/// - Returns the new number of keys in this backup and the etag
|
/// - Returns the new number of keys in this backup and the etag
|
||||||
pub async fn add_backup_keys_route(
|
pub async fn add_backup_keys_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<add_backup_keys::v3::Request>,
|
||||||
body: Ruma<add_backup_keys::v3::IncomingRequest>,
|
|
||||||
) -> Result<add_backup_keys::v3::Response> {
|
) -> Result<add_backup_keys::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if Some(&body.version)
|
if Some(&body.version)
|
||||||
!= db
|
!= services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.get_latest_backup_version(sender_user)?
|
.get_latest_backup_version(sender_user)?
|
||||||
.as_ref()
|
.as_ref()
|
||||||
|
@ -137,22 +133,24 @@ pub async fn add_backup_keys_route(
|
||||||
|
|
||||||
for (room_id, room) in &body.rooms {
|
for (room_id, room) in &body.rooms {
|
||||||
for (session_id, key_data) in &room.sessions {
|
for (session_id, key_data) in &room.sessions {
|
||||||
db.key_backups.add_key(
|
services().key_backups.add_key(
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.version,
|
&body.version,
|
||||||
room_id,
|
room_id,
|
||||||
session_id,
|
session_id,
|
||||||
key_data,
|
key_data,
|
||||||
&db.globals,
|
|
||||||
)?
|
)?
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(add_backup_keys::v3::Response {
|
Ok(add_backup_keys::v3::Response {
|
||||||
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
|
count: (services()
|
||||||
etag: db.key_backups.get_etag(sender_user, &body.version)?,
|
.key_backups
|
||||||
|
.count_keys(sender_user, &body.version)? as u32)
|
||||||
|
.into(),
|
||||||
|
etag: services()
|
||||||
|
.key_backups
|
||||||
|
.get_etag(sender_user, &body.version)?,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -164,13 +162,12 @@ pub async fn add_backup_keys_route(
|
||||||
/// - Adds the keys to the backup
|
/// - Adds the keys to the backup
|
||||||
/// - Returns the new number of keys in this backup and the etag
|
/// - Returns the new number of keys in this backup and the etag
|
||||||
pub async fn add_backup_keys_for_room_route(
|
pub async fn add_backup_keys_for_room_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<add_backup_keys_for_room::v3::Request>,
|
||||||
body: Ruma<add_backup_keys_for_room::v3::IncomingRequest>,
|
|
||||||
) -> Result<add_backup_keys_for_room::v3::Response> {
|
) -> Result<add_backup_keys_for_room::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if Some(&body.version)
|
if Some(&body.version)
|
||||||
!= db
|
!= services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.get_latest_backup_version(sender_user)?
|
.get_latest_backup_version(sender_user)?
|
||||||
.as_ref()
|
.as_ref()
|
||||||
|
@ -182,21 +179,23 @@ pub async fn add_backup_keys_for_room_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
for (session_id, key_data) in &body.sessions {
|
for (session_id, key_data) in &body.sessions {
|
||||||
db.key_backups.add_key(
|
services().key_backups.add_key(
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.version,
|
&body.version,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
session_id,
|
session_id,
|
||||||
key_data,
|
key_data,
|
||||||
&db.globals,
|
|
||||||
)?
|
)?
|
||||||
}
|
}
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(add_backup_keys_for_room::v3::Response {
|
Ok(add_backup_keys_for_room::v3::Response {
|
||||||
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
|
count: (services()
|
||||||
etag: db.key_backups.get_etag(sender_user, &body.version)?,
|
.key_backups
|
||||||
|
.count_keys(sender_user, &body.version)? as u32)
|
||||||
|
.into(),
|
||||||
|
etag: services()
|
||||||
|
.key_backups
|
||||||
|
.get_etag(sender_user, &body.version)?,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -208,13 +207,12 @@ pub async fn add_backup_keys_for_room_route(
|
||||||
/// - Adds the keys to the backup
|
/// - Adds the keys to the backup
|
||||||
/// - Returns the new number of keys in this backup and the etag
|
/// - Returns the new number of keys in this backup and the etag
|
||||||
pub async fn add_backup_keys_for_session_route(
|
pub async fn add_backup_keys_for_session_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<add_backup_keys_for_session::v3::Request>,
|
||||||
body: Ruma<add_backup_keys_for_session::v3::IncomingRequest>,
|
|
||||||
) -> Result<add_backup_keys_for_session::v3::Response> {
|
) -> Result<add_backup_keys_for_session::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if Some(&body.version)
|
if Some(&body.version)
|
||||||
!= db
|
!= services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.get_latest_backup_version(sender_user)?
|
.get_latest_backup_version(sender_user)?
|
||||||
.as_ref()
|
.as_ref()
|
||||||
|
@ -225,20 +223,22 @@ pub async fn add_backup_keys_for_session_route(
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
db.key_backups.add_key(
|
services().key_backups.add_key(
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.version,
|
&body.version,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
&body.session_id,
|
&body.session_id,
|
||||||
&body.session_data,
|
&body.session_data,
|
||||||
&db.globals,
|
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(add_backup_keys_for_session::v3::Response {
|
Ok(add_backup_keys_for_session::v3::Response {
|
||||||
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
|
count: (services()
|
||||||
etag: db.key_backups.get_etag(sender_user, &body.version)?,
|
.key_backups
|
||||||
|
.count_keys(sender_user, &body.version)? as u32)
|
||||||
|
.into(),
|
||||||
|
etag: services()
|
||||||
|
.key_backups
|
||||||
|
.get_etag(sender_user, &body.version)?,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -246,12 +246,11 @@ pub async fn add_backup_keys_for_session_route(
|
||||||
///
|
///
|
||||||
/// Retrieves all keys from the backup.
|
/// Retrieves all keys from the backup.
|
||||||
pub async fn get_backup_keys_route(
|
pub async fn get_backup_keys_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_backup_keys::v3::Request>,
|
||||||
body: Ruma<get_backup_keys::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_backup_keys::v3::Response> {
|
) -> Result<get_backup_keys::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let rooms = db.key_backups.get_all(sender_user, &body.version)?;
|
let rooms = services().key_backups.get_all(sender_user, &body.version)?;
|
||||||
|
|
||||||
Ok(get_backup_keys::v3::Response { rooms })
|
Ok(get_backup_keys::v3::Response { rooms })
|
||||||
}
|
}
|
||||||
|
@ -260,12 +259,11 @@ pub async fn get_backup_keys_route(
|
||||||
///
|
///
|
||||||
/// Retrieves all keys from the backup for a given room.
|
/// Retrieves all keys from the backup for a given room.
|
||||||
pub async fn get_backup_keys_for_room_route(
|
pub async fn get_backup_keys_for_room_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_backup_keys_for_room::v3::Request>,
|
||||||
body: Ruma<get_backup_keys_for_room::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_backup_keys_for_room::v3::Response> {
|
) -> Result<get_backup_keys_for_room::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let sessions = db
|
let sessions = services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.get_room(sender_user, &body.version, &body.room_id)?;
|
.get_room(sender_user, &body.version, &body.room_id)?;
|
||||||
|
|
||||||
|
@ -276,12 +274,11 @@ pub async fn get_backup_keys_for_room_route(
|
||||||
///
|
///
|
||||||
/// Retrieves a key from the backup.
|
/// Retrieves a key from the backup.
|
||||||
pub async fn get_backup_keys_for_session_route(
|
pub async fn get_backup_keys_for_session_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_backup_keys_for_session::v3::Request>,
|
||||||
body: Ruma<get_backup_keys_for_session::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_backup_keys_for_session::v3::Response> {
|
) -> Result<get_backup_keys_for_session::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let key_data = db
|
let key_data = services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.get_session(sender_user, &body.version, &body.room_id, &body.session_id)?
|
.get_session(sender_user, &body.version, &body.room_id, &body.session_id)?
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
|
@ -296,18 +293,22 @@ pub async fn get_backup_keys_for_session_route(
|
||||||
///
|
///
|
||||||
/// Delete the keys from the backup.
|
/// Delete the keys from the backup.
|
||||||
pub async fn delete_backup_keys_route(
|
pub async fn delete_backup_keys_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<delete_backup_keys::v3::Request>,
|
||||||
body: Ruma<delete_backup_keys::v3::IncomingRequest>,
|
|
||||||
) -> Result<delete_backup_keys::v3::Response> {
|
) -> Result<delete_backup_keys::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
db.key_backups.delete_all_keys(sender_user, &body.version)?;
|
services()
|
||||||
|
.key_backups
|
||||||
db.flush()?;
|
.delete_all_keys(sender_user, &body.version)?;
|
||||||
|
|
||||||
Ok(delete_backup_keys::v3::Response {
|
Ok(delete_backup_keys::v3::Response {
|
||||||
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
|
count: (services()
|
||||||
etag: db.key_backups.get_etag(sender_user, &body.version)?,
|
.key_backups
|
||||||
|
.count_keys(sender_user, &body.version)? as u32)
|
||||||
|
.into(),
|
||||||
|
etag: services()
|
||||||
|
.key_backups
|
||||||
|
.get_etag(sender_user, &body.version)?,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -315,19 +316,22 @@ pub async fn delete_backup_keys_route(
|
||||||
///
|
///
|
||||||
/// Delete the keys from the backup for a given room.
|
/// Delete the keys from the backup for a given room.
|
||||||
pub async fn delete_backup_keys_for_room_route(
|
pub async fn delete_backup_keys_for_room_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<delete_backup_keys_for_room::v3::Request>,
|
||||||
body: Ruma<delete_backup_keys_for_room::v3::IncomingRequest>,
|
|
||||||
) -> Result<delete_backup_keys_for_room::v3::Response> {
|
) -> Result<delete_backup_keys_for_room::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
db.key_backups
|
services()
|
||||||
|
.key_backups
|
||||||
.delete_room_keys(sender_user, &body.version, &body.room_id)?;
|
.delete_room_keys(sender_user, &body.version, &body.room_id)?;
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(delete_backup_keys_for_room::v3::Response {
|
Ok(delete_backup_keys_for_room::v3::Response {
|
||||||
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
|
count: (services()
|
||||||
etag: db.key_backups.get_etag(sender_user, &body.version)?,
|
.key_backups
|
||||||
|
.count_keys(sender_user, &body.version)? as u32)
|
||||||
|
.into(),
|
||||||
|
etag: services()
|
||||||
|
.key_backups
|
||||||
|
.get_etag(sender_user, &body.version)?,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -335,18 +339,24 @@ pub async fn delete_backup_keys_for_room_route(
|
||||||
///
|
///
|
||||||
/// Delete a key from the backup.
|
/// Delete a key from the backup.
|
||||||
pub async fn delete_backup_keys_for_session_route(
|
pub async fn delete_backup_keys_for_session_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<delete_backup_keys_for_session::v3::Request>,
|
||||||
body: Ruma<delete_backup_keys_for_session::v3::IncomingRequest>,
|
|
||||||
) -> Result<delete_backup_keys_for_session::v3::Response> {
|
) -> Result<delete_backup_keys_for_session::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
db.key_backups
|
services().key_backups.delete_room_key(
|
||||||
.delete_room_key(sender_user, &body.version, &body.room_id, &body.session_id)?;
|
sender_user,
|
||||||
|
&body.version,
|
||||||
db.flush()?;
|
&body.room_id,
|
||||||
|
&body.session_id,
|
||||||
|
)?;
|
||||||
|
|
||||||
Ok(delete_backup_keys_for_session::v3::Response {
|
Ok(delete_backup_keys_for_session::v3::Response {
|
||||||
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
|
count: (services()
|
||||||
etag: db.key_backups.get_etag(sender_user, &body.version)?,
|
.key_backups
|
||||||
|
.count_keys(sender_user, &body.version)? as u32)
|
||||||
|
.into(),
|
||||||
|
etag: services()
|
||||||
|
.key_backups
|
||||||
|
.get_etag(sender_user, &body.version)?,
|
||||||
})
|
})
|
||||||
}
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
use crate::{database::DatabaseGuard, Result, Ruma};
|
use crate::{services, Result, Ruma};
|
||||||
use ruma::api::client::discovery::get_capabilities::{
|
use ruma::api::client::discovery::get_capabilities::{
|
||||||
self, Capabilities, RoomVersionStability, RoomVersionsCapability,
|
self, Capabilities, RoomVersionStability, RoomVersionsCapability,
|
||||||
};
|
};
|
||||||
|
@ -8,26 +8,19 @@ use std::collections::BTreeMap;
|
||||||
///
|
///
|
||||||
/// Get information on the supported feature set and other relevent capabilities of this server.
|
/// Get information on the supported feature set and other relevent capabilities of this server.
|
||||||
pub async fn get_capabilities_route(
|
pub async fn get_capabilities_route(
|
||||||
db: DatabaseGuard,
|
_body: Ruma<get_capabilities::v3::Request>,
|
||||||
_body: Ruma<get_capabilities::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_capabilities::v3::Response> {
|
) -> Result<get_capabilities::v3::Response> {
|
||||||
let mut available = BTreeMap::new();
|
let mut available = BTreeMap::new();
|
||||||
if db.globals.allow_unstable_room_versions() {
|
for room_version in &services().globals.unstable_room_versions {
|
||||||
for room_version in &db.globals.unstable_room_versions {
|
|
||||||
available.insert(room_version.clone(), RoomVersionStability::Stable);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for room_version in &db.globals.unstable_room_versions {
|
|
||||||
available.insert(room_version.clone(), RoomVersionStability::Unstable);
|
available.insert(room_version.clone(), RoomVersionStability::Unstable);
|
||||||
}
|
}
|
||||||
}
|
for room_version in &services().globals.stable_room_versions {
|
||||||
for room_version in &db.globals.stable_room_versions {
|
|
||||||
available.insert(room_version.clone(), RoomVersionStability::Stable);
|
available.insert(room_version.clone(), RoomVersionStability::Stable);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut capabilities = Capabilities::new();
|
let mut capabilities = Capabilities::new();
|
||||||
capabilities.room_versions = RoomVersionsCapability {
|
capabilities.room_versions = RoomVersionsCapability {
|
||||||
default: db.globals.default_room_version(),
|
default: services().globals.default_room_version(),
|
||||||
available,
|
available,
|
||||||
};
|
};
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use crate::{database::DatabaseGuard, Error, Result, Ruma};
|
use crate::{services, Error, Result, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{
|
api::client::{
|
||||||
config::{
|
config::{
|
||||||
|
@ -17,8 +17,7 @@ use serde_json::{json, value::RawValue as RawJsonValue};
|
||||||
///
|
///
|
||||||
/// Sets some account data for the sender user.
|
/// Sets some account data for the sender user.
|
||||||
pub async fn set_global_account_data_route(
|
pub async fn set_global_account_data_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<set_global_account_data::v3::Request>,
|
||||||
body: Ruma<set_global_account_data::v3::IncomingRequest>,
|
|
||||||
) -> Result<set_global_account_data::v3::Response> {
|
) -> Result<set_global_account_data::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
@ -27,7 +26,7 @@ pub async fn set_global_account_data_route(
|
||||||
|
|
||||||
let event_type = body.event_type.to_string();
|
let event_type = body.event_type.to_string();
|
||||||
|
|
||||||
db.account_data.update(
|
services().account_data.update(
|
||||||
None,
|
None,
|
||||||
sender_user,
|
sender_user,
|
||||||
event_type.clone().into(),
|
event_type.clone().into(),
|
||||||
|
@ -35,11 +34,8 @@ pub async fn set_global_account_data_route(
|
||||||
"type": event_type,
|
"type": event_type,
|
||||||
"content": data,
|
"content": data,
|
||||||
}),
|
}),
|
||||||
&db.globals,
|
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(set_global_account_data::v3::Response {})
|
Ok(set_global_account_data::v3::Response {})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -47,8 +43,7 @@ pub async fn set_global_account_data_route(
|
||||||
///
|
///
|
||||||
/// Sets some room account data for the sender user.
|
/// Sets some room account data for the sender user.
|
||||||
pub async fn set_room_account_data_route(
|
pub async fn set_room_account_data_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<set_room_account_data::v3::Request>,
|
||||||
body: Ruma<set_room_account_data::v3::IncomingRequest>,
|
|
||||||
) -> Result<set_room_account_data::v3::Response> {
|
) -> Result<set_room_account_data::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
@ -57,7 +52,7 @@ pub async fn set_room_account_data_route(
|
||||||
|
|
||||||
let event_type = body.event_type.to_string();
|
let event_type = body.event_type.to_string();
|
||||||
|
|
||||||
db.account_data.update(
|
services().account_data.update(
|
||||||
Some(&body.room_id),
|
Some(&body.room_id),
|
||||||
sender_user,
|
sender_user,
|
||||||
event_type.clone().into(),
|
event_type.clone().into(),
|
||||||
|
@ -65,11 +60,8 @@ pub async fn set_room_account_data_route(
|
||||||
"type": event_type,
|
"type": event_type,
|
||||||
"content": data,
|
"content": data,
|
||||||
}),
|
}),
|
||||||
&db.globals,
|
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(set_room_account_data::v3::Response {})
|
Ok(set_room_account_data::v3::Response {})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -77,14 +69,13 @@ pub async fn set_room_account_data_route(
|
||||||
///
|
///
|
||||||
/// Gets some account data for the sender user.
|
/// Gets some account data for the sender user.
|
||||||
pub async fn get_global_account_data_route(
|
pub async fn get_global_account_data_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_global_account_data::v3::Request>,
|
||||||
body: Ruma<get_global_account_data::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_global_account_data::v3::Response> {
|
) -> Result<get_global_account_data::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let event: Box<RawJsonValue> = db
|
let event: Box<RawJsonValue> = services()
|
||||||
.account_data
|
.account_data
|
||||||
.get(None, sender_user, body.event_type.clone().into())?
|
.get(None, sender_user, body.event_type.to_string().into())?
|
||||||
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?;
|
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?;
|
||||||
|
|
||||||
let account_data = serde_json::from_str::<ExtractGlobalEventContent>(event.get())
|
let account_data = serde_json::from_str::<ExtractGlobalEventContent>(event.get())
|
||||||
|
@ -98,18 +89,13 @@ pub async fn get_global_account_data_route(
|
||||||
///
|
///
|
||||||
/// Gets some room account data for the sender user.
|
/// Gets some room account data for the sender user.
|
||||||
pub async fn get_room_account_data_route(
|
pub async fn get_room_account_data_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_room_account_data::v3::Request>,
|
||||||
body: Ruma<get_room_account_data::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_room_account_data::v3::Response> {
|
) -> Result<get_room_account_data::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let event: Box<RawJsonValue> = db
|
let event: Box<RawJsonValue> = services()
|
||||||
.account_data
|
.account_data
|
||||||
.get(
|
.get(Some(&body.room_id), sender_user, body.event_type.clone())?
|
||||||
Some(&body.room_id),
|
|
||||||
sender_user,
|
|
||||||
body.event_type.clone().into(),
|
|
||||||
)?
|
|
||||||
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?;
|
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?;
|
||||||
|
|
||||||
let account_data = serde_json::from_str::<ExtractRoomEventContent>(event.get())
|
let account_data = serde_json::from_str::<ExtractRoomEventContent>(event.get())
|
|
@ -1,9 +1,9 @@
|
||||||
use crate::{database::DatabaseGuard, Error, Result, Ruma};
|
use crate::{services, Error, Result, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{context::get_context, error::ErrorKind, filter::LazyLoadOptions},
|
api::client::{context::get_context, error::ErrorKind, filter::LazyLoadOptions},
|
||||||
events::StateEventType,
|
events::StateEventType,
|
||||||
};
|
};
|
||||||
use std::{collections::HashSet, convert::TryFrom};
|
use std::collections::HashSet;
|
||||||
use tracing::error;
|
use tracing::error;
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/rooms/{roomId}/context`
|
/// # `GET /_matrix/client/r0/rooms/{roomId}/context`
|
||||||
|
@ -13,8 +13,7 @@ use tracing::error;
|
||||||
/// - Only works if the user is joined (TODO: always allow, but only show events if the user was
|
/// - Only works if the user is joined (TODO: always allow, but only show events if the user was
|
||||||
/// joined, depending on history_visibility)
|
/// joined, depending on history_visibility)
|
||||||
pub async fn get_context_route(
|
pub async fn get_context_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_context::v3::Request>,
|
||||||
body: Ruma<get_context::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_context::v3::Response> {
|
) -> Result<get_context::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
@ -28,19 +27,20 @@ pub async fn get_context_route(
|
||||||
|
|
||||||
let mut lazy_loaded = HashSet::new();
|
let mut lazy_loaded = HashSet::new();
|
||||||
|
|
||||||
let base_pdu_id = db
|
let base_token = services()
|
||||||
.rooms
|
.rooms
|
||||||
.get_pdu_id(&body.event_id)?
|
.timeline
|
||||||
|
.get_pdu_count(&body.event_id)?
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::NotFound,
|
ErrorKind::NotFound,
|
||||||
"Base event id not found.",
|
"Base event id not found.",
|
||||||
))?;
|
))?;
|
||||||
|
|
||||||
let base_token = db.rooms.pdu_count(&base_pdu_id)?;
|
let base_event =
|
||||||
|
services()
|
||||||
let base_event = db
|
|
||||||
.rooms
|
.rooms
|
||||||
.get_pdu_from_id(&base_pdu_id)?
|
.timeline
|
||||||
|
.get_pdu(&body.event_id)?
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::NotFound,
|
ErrorKind::NotFound,
|
||||||
"Base event not found.",
|
"Base event not found.",
|
||||||
|
@ -48,14 +48,18 @@ pub async fn get_context_route(
|
||||||
|
|
||||||
let room_id = base_event.room_id.clone();
|
let room_id = base_event.room_id.clone();
|
||||||
|
|
||||||
if !db.rooms.is_joined(sender_user, &room_id)? {
|
if !services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.user_can_see_event(sender_user, &room_id, &body.event_id)?
|
||||||
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
"You don't have permission to view this room.",
|
"You don't have permission to view this event.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if !db.rooms.lazy_load_was_sent_before(
|
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
||||||
sender_user,
|
sender_user,
|
||||||
sender_device,
|
sender_device,
|
||||||
&room_id,
|
&room_id,
|
||||||
|
@ -65,22 +69,28 @@ pub async fn get_context_route(
|
||||||
lazy_loaded.insert(base_event.sender.as_str().to_owned());
|
lazy_loaded.insert(base_event.sender.as_str().to_owned());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Use limit with maximum 100
|
||||||
|
let limit = u64::from(body.limit).min(100) as usize;
|
||||||
|
|
||||||
let base_event = base_event.to_room_event();
|
let base_event = base_event.to_room_event();
|
||||||
|
|
||||||
let events_before: Vec<_> = db
|
let events_before: Vec<_> = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.timeline
|
||||||
.pdus_until(sender_user, &room_id, base_token)?
|
.pdus_until(sender_user, &room_id, base_token)?
|
||||||
.take(
|
.take(limit / 2)
|
||||||
u32::try_from(body.limit).map_err(|_| {
|
|
||||||
Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.")
|
|
||||||
})? as usize
|
|
||||||
/ 2,
|
|
||||||
)
|
|
||||||
.filter_map(|r| r.ok()) // Remove buggy events
|
.filter_map(|r| r.ok()) // Remove buggy events
|
||||||
|
.filter(|(_, pdu)| {
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.user_can_see_event(sender_user, &room_id, &pdu.event_id)
|
||||||
|
.unwrap_or(false)
|
||||||
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
for (_, event) in &events_before {
|
for (_, event) in &events_before {
|
||||||
if !db.rooms.lazy_load_was_sent_before(
|
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
||||||
sender_user,
|
sender_user,
|
||||||
sender_device,
|
sender_device,
|
||||||
&room_id,
|
&room_id,
|
||||||
|
@ -93,28 +103,31 @@ pub async fn get_context_route(
|
||||||
|
|
||||||
let start_token = events_before
|
let start_token = events_before
|
||||||
.last()
|
.last()
|
||||||
.and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok())
|
.map(|(count, _)| count.stringify())
|
||||||
.map(|count| count.to_string());
|
.unwrap_or_else(|| base_token.stringify());
|
||||||
|
|
||||||
let events_before: Vec<_> = events_before
|
let events_before: Vec<_> = events_before
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(_, pdu)| pdu.to_room_event())
|
.map(|(_, pdu)| pdu.to_room_event())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let events_after: Vec<_> = db
|
let events_after: Vec<_> = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.timeline
|
||||||
.pdus_after(sender_user, &room_id, base_token)?
|
.pdus_after(sender_user, &room_id, base_token)?
|
||||||
.take(
|
.take(limit / 2)
|
||||||
u32::try_from(body.limit).map_err(|_| {
|
|
||||||
Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.")
|
|
||||||
})? as usize
|
|
||||||
/ 2,
|
|
||||||
)
|
|
||||||
.filter_map(|r| r.ok()) // Remove buggy events
|
.filter_map(|r| r.ok()) // Remove buggy events
|
||||||
|
.filter(|(_, pdu)| {
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.user_can_see_event(sender_user, &room_id, &pdu.event_id)
|
||||||
|
.unwrap_or(false)
|
||||||
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
for (_, event) in &events_after {
|
for (_, event) in &events_after {
|
||||||
if !db.rooms.lazy_load_was_sent_before(
|
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
||||||
sender_user,
|
sender_user,
|
||||||
sender_device,
|
sender_device,
|
||||||
&room_id,
|
&room_id,
|
||||||
|
@ -125,24 +138,29 @@ pub async fn get_context_route(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let shortstatehash = match db.rooms.pdu_shortstatehash(
|
let shortstatehash = match services().rooms.state_accessor.pdu_shortstatehash(
|
||||||
events_after
|
events_after
|
||||||
.last()
|
.last()
|
||||||
.map_or(&*body.event_id, |(_, e)| &*e.event_id),
|
.map_or(&*body.event_id, |(_, e)| &*e.event_id),
|
||||||
)? {
|
)? {
|
||||||
Some(s) => s,
|
Some(s) => s,
|
||||||
None => db
|
None => services()
|
||||||
.rooms
|
.rooms
|
||||||
.current_shortstatehash(&room_id)?
|
.state
|
||||||
|
.get_room_shortstatehash(&room_id)?
|
||||||
.expect("All rooms have state"),
|
.expect("All rooms have state"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let state_ids = db.rooms.state_full_ids(shortstatehash).await?;
|
let state_ids = services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.state_full_ids(shortstatehash)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let end_token = events_after
|
let end_token = events_after
|
||||||
.last()
|
.last()
|
||||||
.and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok())
|
.map(|(count, _)| count.stringify())
|
||||||
.map(|count| count.to_string());
|
.unwrap_or_else(|| base_token.stringify());
|
||||||
|
|
||||||
let events_after: Vec<_> = events_after
|
let events_after: Vec<_> = events_after
|
||||||
.into_iter()
|
.into_iter()
|
||||||
|
@ -152,10 +170,13 @@ pub async fn get_context_route(
|
||||||
let mut state = Vec::new();
|
let mut state = Vec::new();
|
||||||
|
|
||||||
for (shortstatekey, id) in state_ids {
|
for (shortstatekey, id) in state_ids {
|
||||||
let (event_type, state_key) = db.rooms.get_statekey_from_short(shortstatekey)?;
|
let (event_type, state_key) = services()
|
||||||
|
.rooms
|
||||||
|
.short
|
||||||
|
.get_statekey_from_short(shortstatekey)?;
|
||||||
|
|
||||||
if event_type != StateEventType::RoomMember {
|
if event_type != StateEventType::RoomMember {
|
||||||
let pdu = match db.rooms.get_pdu(&id)? {
|
let pdu = match services().rooms.timeline.get_pdu(&id)? {
|
||||||
Some(pdu) => pdu,
|
Some(pdu) => pdu,
|
||||||
None => {
|
None => {
|
||||||
error!("Pdu in state not found: {}", id);
|
error!("Pdu in state not found: {}", id);
|
||||||
|
@ -164,7 +185,7 @@ pub async fn get_context_route(
|
||||||
};
|
};
|
||||||
state.push(pdu.to_state_event());
|
state.push(pdu.to_state_event());
|
||||||
} else if !lazy_load_enabled || lazy_loaded.contains(&state_key) {
|
} else if !lazy_load_enabled || lazy_loaded.contains(&state_key) {
|
||||||
let pdu = match db.rooms.get_pdu(&id)? {
|
let pdu = match services().rooms.timeline.get_pdu(&id)? {
|
||||||
Some(pdu) => pdu,
|
Some(pdu) => pdu,
|
||||||
None => {
|
None => {
|
||||||
error!("Pdu in state not found: {}", id);
|
error!("Pdu in state not found: {}", id);
|
||||||
|
@ -176,8 +197,8 @@ pub async fn get_context_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
let resp = get_context::v3::Response {
|
let resp = get_context::v3::Response {
|
||||||
start: start_token,
|
start: Some(start_token),
|
||||||
end: end_token,
|
end: Some(end_token),
|
||||||
events_before,
|
events_before,
|
||||||
event: Some(base_event),
|
event: Some(base_event),
|
||||||
events_after,
|
events_after,
|
|
@ -1,4 +1,4 @@
|
||||||
use crate::{database::DatabaseGuard, utils, Error, Result, Ruma};
|
use crate::{services, utils, Error, Result, Ruma};
|
||||||
use ruma::api::client::{
|
use ruma::api::client::{
|
||||||
device::{self, delete_device, delete_devices, get_device, get_devices, update_device},
|
device::{self, delete_device, delete_devices, get_device, get_devices, update_device},
|
||||||
error::ErrorKind,
|
error::ErrorKind,
|
||||||
|
@ -11,12 +11,11 @@ use super::SESSION_ID_LENGTH;
|
||||||
///
|
///
|
||||||
/// Get metadata on all devices of the sender user.
|
/// Get metadata on all devices of the sender user.
|
||||||
pub async fn get_devices_route(
|
pub async fn get_devices_route(
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<get_devices::v3::Request>,
|
body: Ruma<get_devices::v3::Request>,
|
||||||
) -> Result<get_devices::v3::Response> {
|
) -> Result<get_devices::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let devices: Vec<device::Device> = db
|
let devices: Vec<device::Device> = services()
|
||||||
.users
|
.users
|
||||||
.all_devices_metadata(sender_user)
|
.all_devices_metadata(sender_user)
|
||||||
.filter_map(|r| r.ok()) // Filter out buggy devices
|
.filter_map(|r| r.ok()) // Filter out buggy devices
|
||||||
|
@ -29,12 +28,11 @@ pub async fn get_devices_route(
|
||||||
///
|
///
|
||||||
/// Get metadata on a single device of the sender user.
|
/// Get metadata on a single device of the sender user.
|
||||||
pub async fn get_device_route(
|
pub async fn get_device_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_device::v3::Request>,
|
||||||
body: Ruma<get_device::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_device::v3::Response> {
|
) -> Result<get_device::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let device = db
|
let device = services()
|
||||||
.users
|
.users
|
||||||
.get_device_metadata(sender_user, &body.body.device_id)?
|
.get_device_metadata(sender_user, &body.body.device_id)?
|
||||||
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?;
|
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?;
|
||||||
|
@ -46,23 +44,21 @@ pub async fn get_device_route(
|
||||||
///
|
///
|
||||||
/// Updates the metadata on a given device of the sender user.
|
/// Updates the metadata on a given device of the sender user.
|
||||||
pub async fn update_device_route(
|
pub async fn update_device_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<update_device::v3::Request>,
|
||||||
body: Ruma<update_device::v3::IncomingRequest>,
|
|
||||||
) -> Result<update_device::v3::Response> {
|
) -> Result<update_device::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let mut device = db
|
let mut device = services()
|
||||||
.users
|
.users
|
||||||
.get_device_metadata(sender_user, &body.device_id)?
|
.get_device_metadata(sender_user, &body.device_id)?
|
||||||
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?;
|
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?;
|
||||||
|
|
||||||
device.display_name = body.display_name.clone();
|
device.display_name = body.display_name.clone();
|
||||||
|
|
||||||
db.users
|
services()
|
||||||
|
.users
|
||||||
.update_device_metadata(sender_user, &body.device_id, &device)?;
|
.update_device_metadata(sender_user, &body.device_id, &device)?;
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(update_device::v3::Response {})
|
Ok(update_device::v3::Response {})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -76,8 +72,7 @@ pub async fn update_device_route(
|
||||||
/// - Forgets to-device events
|
/// - Forgets to-device events
|
||||||
/// - Triggers device list updates
|
/// - Triggers device list updates
|
||||||
pub async fn delete_device_route(
|
pub async fn delete_device_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<delete_device::v3::Request>,
|
||||||
body: Ruma<delete_device::v3::IncomingRequest>,
|
|
||||||
) -> Result<delete_device::v3::Response> {
|
) -> Result<delete_device::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
@ -94,30 +89,27 @@ pub async fn delete_device_route(
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(auth) = &body.auth {
|
if let Some(auth) = &body.auth {
|
||||||
let (worked, uiaainfo) = db.uiaa.try_auth(
|
let (worked, uiaainfo) =
|
||||||
sender_user,
|
services()
|
||||||
sender_device,
|
.uiaa
|
||||||
auth,
|
.try_auth(sender_user, sender_device, auth, &uiaainfo)?;
|
||||||
&uiaainfo,
|
|
||||||
&db.users,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
if !worked {
|
if !worked {
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
}
|
}
|
||||||
// Success!
|
// Success!
|
||||||
} else if let Some(json) = body.json_body {
|
} else if let Some(json) = body.json_body {
|
||||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||||
db.uiaa
|
services()
|
||||||
|
.uiaa
|
||||||
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
} else {
|
} else {
|
||||||
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
||||||
}
|
}
|
||||||
|
|
||||||
db.users.remove_device(sender_user, &body.device_id)?;
|
services()
|
||||||
|
.users
|
||||||
db.flush()?;
|
.remove_device(sender_user, &body.device_id)?;
|
||||||
|
|
||||||
Ok(delete_device::v3::Response {})
|
Ok(delete_device::v3::Response {})
|
||||||
}
|
}
|
||||||
|
@ -134,8 +126,7 @@ pub async fn delete_device_route(
|
||||||
/// - Forgets to-device events
|
/// - Forgets to-device events
|
||||||
/// - Triggers device list updates
|
/// - Triggers device list updates
|
||||||
pub async fn delete_devices_route(
|
pub async fn delete_devices_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<delete_devices::v3::Request>,
|
||||||
body: Ruma<delete_devices::v3::IncomingRequest>,
|
|
||||||
) -> Result<delete_devices::v3::Response> {
|
) -> Result<delete_devices::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
@ -152,21 +143,18 @@ pub async fn delete_devices_route(
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(auth) = &body.auth {
|
if let Some(auth) = &body.auth {
|
||||||
let (worked, uiaainfo) = db.uiaa.try_auth(
|
let (worked, uiaainfo) =
|
||||||
sender_user,
|
services()
|
||||||
sender_device,
|
.uiaa
|
||||||
auth,
|
.try_auth(sender_user, sender_device, auth, &uiaainfo)?;
|
||||||
&uiaainfo,
|
|
||||||
&db.users,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
if !worked {
|
if !worked {
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
}
|
}
|
||||||
// Success!
|
// Success!
|
||||||
} else if let Some(json) = body.json_body {
|
} else if let Some(json) = body.json_body {
|
||||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||||
db.uiaa
|
services()
|
||||||
|
.uiaa
|
||||||
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
} else {
|
} else {
|
||||||
|
@ -174,10 +162,8 @@ pub async fn delete_devices_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
for device_id in &body.devices {
|
for device_id in &body.devices {
|
||||||
db.users.remove_device(sender_user, device_id)?
|
services().users.remove_device(sender_user, device_id)?
|
||||||
}
|
}
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(delete_devices::v3::Response {})
|
Ok(delete_devices::v3::Response {})
|
||||||
}
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
use crate::{database::DatabaseGuard, Database, Error, Result, Ruma};
|
use crate::{services, Error, Result, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{
|
api::{
|
||||||
client::{
|
client::{
|
||||||
|
@ -11,25 +11,22 @@ use ruma::{
|
||||||
},
|
},
|
||||||
federation,
|
federation,
|
||||||
},
|
},
|
||||||
directory::{
|
directory::{Filter, PublicRoomJoinRule, PublicRoomsChunk, RoomNetwork},
|
||||||
Filter, IncomingFilter, IncomingRoomNetwork, PublicRoomJoinRule, PublicRoomsChunk,
|
|
||||||
RoomNetwork,
|
|
||||||
},
|
|
||||||
events::{
|
events::{
|
||||||
room::{
|
room::{
|
||||||
avatar::RoomAvatarEventContent,
|
avatar::RoomAvatarEventContent,
|
||||||
canonical_alias::RoomCanonicalAliasEventContent,
|
canonical_alias::RoomCanonicalAliasEventContent,
|
||||||
|
create::RoomCreateEventContent,
|
||||||
guest_access::{GuestAccess, RoomGuestAccessEventContent},
|
guest_access::{GuestAccess, RoomGuestAccessEventContent},
|
||||||
history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
|
history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
|
||||||
join_rules::{JoinRule, RoomJoinRulesEventContent},
|
join_rules::{JoinRule, RoomJoinRulesEventContent},
|
||||||
name::RoomNameEventContent,
|
|
||||||
topic::RoomTopicEventContent,
|
topic::RoomTopicEventContent,
|
||||||
},
|
},
|
||||||
StateEventType,
|
StateEventType,
|
||||||
},
|
},
|
||||||
ServerName, UInt,
|
ServerName, UInt,
|
||||||
};
|
};
|
||||||
use tracing::{info, warn};
|
use tracing::{error, info, warn};
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/publicRooms`
|
/// # `POST /_matrix/client/r0/publicRooms`
|
||||||
///
|
///
|
||||||
|
@ -37,11 +34,9 @@ use tracing::{info, warn};
|
||||||
///
|
///
|
||||||
/// - Rooms are ordered by the number of joined members
|
/// - Rooms are ordered by the number of joined members
|
||||||
pub async fn get_public_rooms_filtered_route(
|
pub async fn get_public_rooms_filtered_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_public_rooms_filtered::v3::Request>,
|
||||||
body: Ruma<get_public_rooms_filtered::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_public_rooms_filtered::v3::Response> {
|
) -> Result<get_public_rooms_filtered::v3::Response> {
|
||||||
get_public_rooms_filtered_helper(
|
get_public_rooms_filtered_helper(
|
||||||
&db,
|
|
||||||
body.server.as_deref(),
|
body.server.as_deref(),
|
||||||
body.limit,
|
body.limit,
|
||||||
body.since.as_deref(),
|
body.since.as_deref(),
|
||||||
|
@ -57,16 +52,14 @@ pub async fn get_public_rooms_filtered_route(
|
||||||
///
|
///
|
||||||
/// - Rooms are ordered by the number of joined members
|
/// - Rooms are ordered by the number of joined members
|
||||||
pub async fn get_public_rooms_route(
|
pub async fn get_public_rooms_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_public_rooms::v3::Request>,
|
||||||
body: Ruma<get_public_rooms::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_public_rooms::v3::Response> {
|
) -> Result<get_public_rooms::v3::Response> {
|
||||||
let response = get_public_rooms_filtered_helper(
|
let response = get_public_rooms_filtered_helper(
|
||||||
&db,
|
|
||||||
body.server.as_deref(),
|
body.server.as_deref(),
|
||||||
body.limit,
|
body.limit,
|
||||||
body.since.as_deref(),
|
body.since.as_deref(),
|
||||||
&IncomingFilter::default(),
|
&Filter::default(),
|
||||||
&IncomingRoomNetwork::Matrix,
|
&RoomNetwork::Matrix,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
@ -84,17 +77,21 @@ pub async fn get_public_rooms_route(
|
||||||
///
|
///
|
||||||
/// - TODO: Access control checks
|
/// - TODO: Access control checks
|
||||||
pub async fn set_room_visibility_route(
|
pub async fn set_room_visibility_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<set_room_visibility::v3::Request>,
|
||||||
body: Ruma<set_room_visibility::v3::IncomingRequest>,
|
|
||||||
) -> Result<set_room_visibility::v3::Response> {
|
) -> Result<set_room_visibility::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
if !services().rooms.metadata.exists(&body.room_id)? {
|
||||||
|
// Return 404 if the room doesn't exist
|
||||||
|
return Err(Error::BadRequest(ErrorKind::NotFound, "Room not found"));
|
||||||
|
}
|
||||||
|
|
||||||
match &body.visibility {
|
match &body.visibility {
|
||||||
room::Visibility::Public => {
|
room::Visibility::Public => {
|
||||||
db.rooms.set_public(&body.room_id, true)?;
|
services().rooms.directory.set_public(&body.room_id)?;
|
||||||
info!("{} made {} public", sender_user, body.room_id);
|
info!("{} made {} public", sender_user, body.room_id);
|
||||||
}
|
}
|
||||||
room::Visibility::Private => db.rooms.set_public(&body.room_id, false)?,
|
room::Visibility::Private => services().rooms.directory.set_not_public(&body.room_id)?,
|
||||||
_ => {
|
_ => {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
|
@ -103,8 +100,6 @@ pub async fn set_room_visibility_route(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(set_room_visibility::v3::Response {})
|
Ok(set_room_visibility::v3::Response {})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -112,11 +107,15 @@ pub async fn set_room_visibility_route(
|
||||||
///
|
///
|
||||||
/// Gets the visibility of a given room in the room directory.
|
/// Gets the visibility of a given room in the room directory.
|
||||||
pub async fn get_room_visibility_route(
|
pub async fn get_room_visibility_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_room_visibility::v3::Request>,
|
||||||
body: Ruma<get_room_visibility::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_room_visibility::v3::Response> {
|
) -> Result<get_room_visibility::v3::Response> {
|
||||||
|
if !services().rooms.metadata.exists(&body.room_id)? {
|
||||||
|
// Return 404 if the room doesn't exist
|
||||||
|
return Err(Error::BadRequest(ErrorKind::NotFound, "Room not found"));
|
||||||
|
}
|
||||||
|
|
||||||
Ok(get_room_visibility::v3::Response {
|
Ok(get_room_visibility::v3::Response {
|
||||||
visibility: if db.rooms.is_public_room(&body.room_id)? {
|
visibility: if services().rooms.directory.is_public_room(&body.room_id)? {
|
||||||
room::Visibility::Public
|
room::Visibility::Public
|
||||||
} else {
|
} else {
|
||||||
room::Visibility::Private
|
room::Visibility::Private
|
||||||
|
@ -125,25 +124,25 @@ pub async fn get_room_visibility_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn get_public_rooms_filtered_helper(
|
pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
db: &Database,
|
|
||||||
server: Option<&ServerName>,
|
server: Option<&ServerName>,
|
||||||
limit: Option<UInt>,
|
limit: Option<UInt>,
|
||||||
since: Option<&str>,
|
since: Option<&str>,
|
||||||
filter: &IncomingFilter,
|
filter: &Filter,
|
||||||
_network: &IncomingRoomNetwork,
|
_network: &RoomNetwork,
|
||||||
) -> Result<get_public_rooms_filtered::v3::Response> {
|
) -> Result<get_public_rooms_filtered::v3::Response> {
|
||||||
if let Some(other_server) = server.filter(|server| *server != db.globals.server_name().as_str())
|
if let Some(other_server) =
|
||||||
|
server.filter(|server| *server != services().globals.server_name().as_str())
|
||||||
{
|
{
|
||||||
let response = db
|
let response = services()
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(
|
.send_federation_request(
|
||||||
&db.globals,
|
|
||||||
other_server,
|
other_server,
|
||||||
federation::directory::get_public_rooms_filtered::v1::Request {
|
federation::directory::get_public_rooms_filtered::v1::Request {
|
||||||
limit,
|
limit,
|
||||||
since,
|
since: since.map(ToOwned::to_owned),
|
||||||
filter: Filter {
|
filter: Filter {
|
||||||
generic_search_term: filter.generic_search_term.as_deref(),
|
generic_search_term: filter.generic_search_term.clone(),
|
||||||
|
room_types: filter.room_types.clone(),
|
||||||
},
|
},
|
||||||
room_network: RoomNetwork::Matrix,
|
room_network: RoomNetwork::Matrix,
|
||||||
},
|
},
|
||||||
|
@ -184,15 +183,17 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut all_rooms: Vec<_> = db
|
let mut all_rooms: Vec<_> = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.directory
|
||||||
.public_rooms()
|
.public_rooms()
|
||||||
.map(|room_id| {
|
.map(|room_id| {
|
||||||
let room_id = room_id?;
|
let room_id = room_id?;
|
||||||
|
|
||||||
let chunk = PublicRoomsChunk {
|
let chunk = PublicRoomsChunk {
|
||||||
canonical_alias: db
|
canonical_alias: services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "")?
|
.room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "")?
|
||||||
.map_or(Ok(None), |s| {
|
.map_or(Ok(None), |s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
|
@ -201,18 +202,10 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
Error::bad_database("Invalid canonical alias event in database.")
|
Error::bad_database("Invalid canonical alias event in database.")
|
||||||
})
|
})
|
||||||
})?,
|
})?,
|
||||||
name: db
|
name: services().rooms.state_accessor.get_name(&room_id)?,
|
||||||
.rooms
|
num_joined_members: services()
|
||||||
.room_state_get(&room_id, &StateEventType::RoomName, "")?
|
|
||||||
.map_or(Ok(None), |s| {
|
|
||||||
serde_json::from_str(s.content.get())
|
|
||||||
.map(|c: RoomNameEventContent| c.name)
|
|
||||||
.map_err(|_| {
|
|
||||||
Error::bad_database("Invalid room name event in database.")
|
|
||||||
})
|
|
||||||
})?,
|
|
||||||
num_joined_members: db
|
|
||||||
.rooms
|
.rooms
|
||||||
|
.state_cache
|
||||||
.room_joined_count(&room_id)?
|
.room_joined_count(&room_id)?
|
||||||
.unwrap_or_else(|| {
|
.unwrap_or_else(|| {
|
||||||
warn!("Room {} has no member count", room_id);
|
warn!("Room {} has no member count", room_id);
|
||||||
|
@ -220,18 +213,21 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
})
|
})
|
||||||
.try_into()
|
.try_into()
|
||||||
.expect("user count should not be that big"),
|
.expect("user count should not be that big"),
|
||||||
topic: db
|
topic: services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomTopic, "")?
|
.room_state_get(&room_id, &StateEventType::RoomTopic, "")?
|
||||||
.map_or(Ok(None), |s| {
|
.map_or(Ok(None), |s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
.map(|c: RoomTopicEventContent| Some(c.topic))
|
.map(|c: RoomTopicEventContent| Some(c.topic))
|
||||||
.map_err(|_| {
|
.map_err(|_| {
|
||||||
|
error!("Invalid room topic event in database for room {}", room_id);
|
||||||
Error::bad_database("Invalid room topic event in database.")
|
Error::bad_database("Invalid room topic event in database.")
|
||||||
})
|
})
|
||||||
})?,
|
})?,
|
||||||
world_readable: db
|
world_readable: services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")?
|
.room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")?
|
||||||
.map_or(Ok(false), |s| {
|
.map_or(Ok(false), |s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
|
@ -244,8 +240,9 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
})?,
|
})?,
|
||||||
guest_can_join: db
|
guest_can_join: services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")?
|
.room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")?
|
||||||
.map_or(Ok(false), |s| {
|
.map_or(Ok(false), |s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
|
@ -256,8 +253,9 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
Error::bad_database("Invalid room guest access event in database.")
|
Error::bad_database("Invalid room guest access event in database.")
|
||||||
})
|
})
|
||||||
})?,
|
})?,
|
||||||
avatar_url: db
|
avatar_url: services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomAvatar, "")?
|
.room_state_get(&room_id, &StateEventType::RoomAvatar, "")?
|
||||||
.map(|s| {
|
.map(|s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
|
@ -269,8 +267,9 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
.transpose()?
|
.transpose()?
|
||||||
// url is now an Option<String> so we must flatten
|
// url is now an Option<String> so we must flatten
|
||||||
.flatten(),
|
.flatten(),
|
||||||
join_rule: db
|
join_rule: services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomJoinRules, "")?
|
.room_state_get(&room_id, &StateEventType::RoomJoinRules, "")?
|
||||||
.map(|s| {
|
.map(|s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
|
@ -279,15 +278,28 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
JoinRule::Knock => Some(PublicRoomJoinRule::Knock),
|
JoinRule::Knock => Some(PublicRoomJoinRule::Knock),
|
||||||
_ => None,
|
_ => None,
|
||||||
})
|
})
|
||||||
.map_err(|_| {
|
.map_err(|e| {
|
||||||
Error::bad_database("Invalid room join rule event in database.")
|
error!("Invalid room join rule event in database: {}", e);
|
||||||
|
Error::BadDatabase("Invalid room join rule event in database.")
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
.transpose()?
|
.transpose()?
|
||||||
.flatten()
|
.flatten()
|
||||||
.ok_or(Error::bad_database(
|
.ok_or_else(|| Error::bad_database("Missing room join rule event for room."))?,
|
||||||
"Invalid room join rule event in database.",
|
room_type: services()
|
||||||
))?,
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.room_state_get(&room_id, &StateEventType::RoomCreate, "")?
|
||||||
|
.map(|s| {
|
||||||
|
serde_json::from_str::<RoomCreateEventContent>(s.content.get()).map_err(
|
||||||
|
|e| {
|
||||||
|
error!("Invalid room create event in database: {}", e);
|
||||||
|
Error::BadDatabase("Invalid room create event in database.")
|
||||||
|
},
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.transpose()?
|
||||||
|
.and_then(|e| e.room_type),
|
||||||
room_id,
|
room_id,
|
||||||
};
|
};
|
||||||
Ok(chunk)
|
Ok(chunk)
|
||||||
|
@ -339,7 +351,7 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
let prev_batch = if num_since == 0 {
|
let prev_batch = if num_since == 0 {
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
Some(format!("p{}", num_since))
|
Some(format!("p{num_since}"))
|
||||||
};
|
};
|
||||||
|
|
||||||
let next_batch = if chunk.len() < limit as usize {
|
let next_batch = if chunk.len() < limit as usize {
|
|
@ -1,4 +1,4 @@
|
||||||
use crate::{database::DatabaseGuard, Error, Result, Ruma};
|
use crate::{services, Error, Result, Ruma};
|
||||||
use ruma::api::client::{
|
use ruma::api::client::{
|
||||||
error::ErrorKind,
|
error::ErrorKind,
|
||||||
filter::{create_filter, get_filter},
|
filter::{create_filter, get_filter},
|
||||||
|
@ -10,11 +10,10 @@ use ruma::api::client::{
|
||||||
///
|
///
|
||||||
/// - A user can only access their own filters
|
/// - A user can only access their own filters
|
||||||
pub async fn get_filter_route(
|
pub async fn get_filter_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_filter::v3::Request>,
|
||||||
body: Ruma<get_filter::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_filter::v3::Response> {
|
) -> Result<get_filter::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let filter = match db.users.get_filter(sender_user, &body.filter_id)? {
|
let filter = match services().users.get_filter(sender_user, &body.filter_id)? {
|
||||||
Some(filter) => filter,
|
Some(filter) => filter,
|
||||||
None => return Err(Error::BadRequest(ErrorKind::NotFound, "Filter not found.")),
|
None => return Err(Error::BadRequest(ErrorKind::NotFound, "Filter not found.")),
|
||||||
};
|
};
|
||||||
|
@ -26,11 +25,10 @@ pub async fn get_filter_route(
|
||||||
///
|
///
|
||||||
/// Creates a new filter to be used by other endpoints.
|
/// Creates a new filter to be used by other endpoints.
|
||||||
pub async fn create_filter_route(
|
pub async fn create_filter_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<create_filter::v3::Request>,
|
||||||
body: Ruma<create_filter::v3::IncomingRequest>,
|
|
||||||
) -> Result<create_filter::v3::Response> {
|
) -> Result<create_filter::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
Ok(create_filter::v3::Response::new(
|
Ok(create_filter::v3::Response::new(
|
||||||
db.users.create_filter(sender_user, &body.filter)?,
|
services().users.create_filter(sender_user, &body.filter)?,
|
||||||
))
|
))
|
||||||
}
|
}
|
|
@ -1,5 +1,5 @@
|
||||||
use super::SESSION_ID_LENGTH;
|
use super::SESSION_ID_LENGTH;
|
||||||
use crate::{database::DatabaseGuard, utils, Database, Error, Result, Ruma};
|
use crate::{services, utils, Error, Result, Ruma};
|
||||||
use futures_util::{stream::FuturesUnordered, StreamExt};
|
use futures_util::{stream::FuturesUnordered, StreamExt};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{
|
api::{
|
||||||
|
@ -14,10 +14,14 @@ use ruma::{
|
||||||
federation,
|
federation,
|
||||||
},
|
},
|
||||||
serde::Raw,
|
serde::Raw,
|
||||||
DeviceId, DeviceKeyAlgorithm, UserId,
|
DeviceKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId,
|
||||||
};
|
};
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
use std::collections::{BTreeMap, HashMap, HashSet};
|
use std::{
|
||||||
|
collections::{hash_map, BTreeMap, HashMap, HashSet},
|
||||||
|
time::{Duration, Instant},
|
||||||
|
};
|
||||||
|
use tracing::debug;
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/keys/upload`
|
/// # `POST /_matrix/client/r0/keys/upload`
|
||||||
///
|
///
|
||||||
|
@ -26,39 +30,35 @@ use std::collections::{BTreeMap, HashMap, HashSet};
|
||||||
/// - Adds one time keys
|
/// - Adds one time keys
|
||||||
/// - If there are no device keys yet: Adds device keys (TODO: merge with existing keys?)
|
/// - If there are no device keys yet: Adds device keys (TODO: merge with existing keys?)
|
||||||
pub async fn upload_keys_route(
|
pub async fn upload_keys_route(
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<upload_keys::v3::Request>,
|
body: Ruma<upload_keys::v3::Request>,
|
||||||
) -> Result<upload_keys::v3::Response> {
|
) -> Result<upload_keys::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
for (key_key, key_value) in &body.one_time_keys {
|
for (key_key, key_value) in &body.one_time_keys {
|
||||||
db.users
|
services()
|
||||||
.add_one_time_key(sender_user, sender_device, key_key, key_value, &db.globals)?;
|
.users
|
||||||
|
.add_one_time_key(sender_user, sender_device, key_key, key_value)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(device_keys) = &body.device_keys {
|
if let Some(device_keys) = &body.device_keys {
|
||||||
// TODO: merge this and the existing event?
|
// TODO: merge this and the existing event?
|
||||||
// This check is needed to assure that signatures are kept
|
// This check is needed to assure that signatures are kept
|
||||||
if db
|
if services()
|
||||||
.users
|
.users
|
||||||
.get_device_keys(sender_user, sender_device)?
|
.get_device_keys(sender_user, sender_device)?
|
||||||
.is_none()
|
.is_none()
|
||||||
{
|
{
|
||||||
db.users.add_device_keys(
|
services()
|
||||||
sender_user,
|
.users
|
||||||
sender_device,
|
.add_device_keys(sender_user, sender_device, device_keys)?;
|
||||||
device_keys,
|
|
||||||
&db.rooms,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(upload_keys::v3::Response {
|
Ok(upload_keys::v3::Response {
|
||||||
one_time_key_counts: db.users.count_one_time_keys(sender_user, sender_device)?,
|
one_time_key_counts: services()
|
||||||
|
.users
|
||||||
|
.count_one_time_keys(sender_user, sender_device)?,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -69,19 +69,11 @@ pub async fn upload_keys_route(
|
||||||
/// - Always fetches users from other servers over federation
|
/// - Always fetches users from other servers over federation
|
||||||
/// - Gets master keys, self-signing keys, user signing keys and device keys.
|
/// - Gets master keys, self-signing keys, user signing keys and device keys.
|
||||||
/// - The master and self-signing keys contain signatures that the user is allowed to see
|
/// - The master and self-signing keys contain signatures that the user is allowed to see
|
||||||
pub async fn get_keys_route(
|
pub async fn get_keys_route(body: Ruma<get_keys::v3::Request>) -> Result<get_keys::v3::Response> {
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<get_keys::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_keys::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let response = get_keys_helper(
|
let response =
|
||||||
Some(sender_user),
|
get_keys_helper(Some(sender_user), &body.device_keys, |u| u == sender_user).await?;
|
||||||
&body.device_keys,
|
|
||||||
|u| u == sender_user,
|
|
||||||
&db,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(response)
|
Ok(response)
|
||||||
}
|
}
|
||||||
|
@ -90,12 +82,9 @@ pub async fn get_keys_route(
|
||||||
///
|
///
|
||||||
/// Claims one-time keys
|
/// Claims one-time keys
|
||||||
pub async fn claim_keys_route(
|
pub async fn claim_keys_route(
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<claim_keys::v3::Request>,
|
body: Ruma<claim_keys::v3::Request>,
|
||||||
) -> Result<claim_keys::v3::Response> {
|
) -> Result<claim_keys::v3::Response> {
|
||||||
let response = claim_keys_helper(&body.one_time_keys, &db).await?;
|
let response = claim_keys_helper(&body.one_time_keys).await?;
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(response)
|
Ok(response)
|
||||||
}
|
}
|
||||||
|
@ -106,8 +95,7 @@ pub async fn claim_keys_route(
|
||||||
///
|
///
|
||||||
/// - Requires UIAA to verify password
|
/// - Requires UIAA to verify password
|
||||||
pub async fn upload_signing_keys_route(
|
pub async fn upload_signing_keys_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<upload_signing_keys::v3::Request>,
|
||||||
body: Ruma<upload_signing_keys::v3::IncomingRequest>,
|
|
||||||
) -> Result<upload_signing_keys::v3::Response> {
|
) -> Result<upload_signing_keys::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
@ -124,21 +112,18 @@ pub async fn upload_signing_keys_route(
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(auth) = &body.auth {
|
if let Some(auth) = &body.auth {
|
||||||
let (worked, uiaainfo) = db.uiaa.try_auth(
|
let (worked, uiaainfo) =
|
||||||
sender_user,
|
services()
|
||||||
sender_device,
|
.uiaa
|
||||||
auth,
|
.try_auth(sender_user, sender_device, auth, &uiaainfo)?;
|
||||||
&uiaainfo,
|
|
||||||
&db.users,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
if !worked {
|
if !worked {
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
}
|
}
|
||||||
// Success!
|
// Success!
|
||||||
} else if let Some(json) = body.json_body {
|
} else if let Some(json) = body.json_body {
|
||||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||||
db.uiaa
|
services()
|
||||||
|
.uiaa
|
||||||
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
} else {
|
} else {
|
||||||
|
@ -146,18 +131,15 @@ pub async fn upload_signing_keys_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(master_key) = &body.master_key {
|
if let Some(master_key) = &body.master_key {
|
||||||
db.users.add_cross_signing_keys(
|
services().users.add_cross_signing_keys(
|
||||||
sender_user,
|
sender_user,
|
||||||
master_key,
|
master_key,
|
||||||
&body.self_signing_key,
|
&body.self_signing_key,
|
||||||
&body.user_signing_key,
|
&body.user_signing_key,
|
||||||
&db.rooms,
|
true, // notify so that other users see the new keys
|
||||||
&db.globals,
|
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(upload_signing_keys::v3::Response {})
|
Ok(upload_signing_keys::v3::Response {})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -165,16 +147,16 @@ pub async fn upload_signing_keys_route(
|
||||||
///
|
///
|
||||||
/// Uploads end-to-end key signatures from the sender user.
|
/// Uploads end-to-end key signatures from the sender user.
|
||||||
pub async fn upload_signatures_route(
|
pub async fn upload_signatures_route(
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<upload_signatures::v3::Request>,
|
body: Ruma<upload_signatures::v3::Request>,
|
||||||
) -> Result<upload_signatures::v3::Response> {
|
) -> Result<upload_signatures::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
for (user_id, signed_keys) in &body.signed_keys {
|
for (user_id, keys) in &body.signed_keys {
|
||||||
for (key_id, signed_key) in signed_keys {
|
for (key_id, key) in keys {
|
||||||
let signed_key = serde_json::to_value(signed_key).unwrap();
|
let key = serde_json::to_value(key)
|
||||||
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid key JSON"))?;
|
||||||
|
|
||||||
for signature in signed_key
|
for signature in key
|
||||||
.get("signatures")
|
.get("signatures")
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
|
@ -205,20 +187,13 @@ pub async fn upload_signatures_route(
|
||||||
))?
|
))?
|
||||||
.to_owned(),
|
.to_owned(),
|
||||||
);
|
);
|
||||||
db.users.sign_key(
|
services()
|
||||||
user_id,
|
.users
|
||||||
key_id,
|
.sign_key(user_id, key_id, signature, sender_user)?;
|
||||||
signature,
|
|
||||||
sender_user,
|
|
||||||
&db.rooms,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(upload_signatures::v3::Response {
|
Ok(upload_signatures::v3::Response {
|
||||||
failures: BTreeMap::new(), // TODO: integrate
|
failures: BTreeMap::new(), // TODO: integrate
|
||||||
})
|
})
|
||||||
|
@ -230,15 +205,15 @@ pub async fn upload_signatures_route(
|
||||||
///
|
///
|
||||||
/// - TODO: left users
|
/// - TODO: left users
|
||||||
pub async fn get_key_changes_route(
|
pub async fn get_key_changes_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_key_changes::v3::Request>,
|
||||||
body: Ruma<get_key_changes::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_key_changes::v3::Response> {
|
) -> Result<get_key_changes::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let mut device_list_updates = HashSet::new();
|
let mut device_list_updates = HashSet::new();
|
||||||
|
|
||||||
device_list_updates.extend(
|
device_list_updates.extend(
|
||||||
db.users
|
services()
|
||||||
|
.users
|
||||||
.keys_changed(
|
.keys_changed(
|
||||||
sender_user.as_str(),
|
sender_user.as_str(),
|
||||||
body.from
|
body.from
|
||||||
|
@ -253,11 +228,17 @@ pub async fn get_key_changes_route(
|
||||||
.filter_map(|r| r.ok()),
|
.filter_map(|r| r.ok()),
|
||||||
);
|
);
|
||||||
|
|
||||||
for room_id in db.rooms.rooms_joined(sender_user).filter_map(|r| r.ok()) {
|
for room_id in services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.rooms_joined(sender_user)
|
||||||
|
.filter_map(|r| r.ok())
|
||||||
|
{
|
||||||
device_list_updates.extend(
|
device_list_updates.extend(
|
||||||
db.users
|
services()
|
||||||
|
.users
|
||||||
.keys_changed(
|
.keys_changed(
|
||||||
&room_id.to_string(),
|
room_id.as_ref(),
|
||||||
body.from.parse().map_err(|_| {
|
body.from.parse().map_err(|_| {
|
||||||
Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`.")
|
Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`.")
|
||||||
})?,
|
})?,
|
||||||
|
@ -276,9 +257,8 @@ pub async fn get_key_changes_route(
|
||||||
|
|
||||||
pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
||||||
sender_user: Option<&UserId>,
|
sender_user: Option<&UserId>,
|
||||||
device_keys_input: &BTreeMap<Box<UserId>, Vec<Box<DeviceId>>>,
|
device_keys_input: &BTreeMap<OwnedUserId, Vec<OwnedDeviceId>>,
|
||||||
allowed_signatures: F,
|
allowed_signatures: F,
|
||||||
db: &Database,
|
|
||||||
) -> Result<get_keys::v3::Response> {
|
) -> Result<get_keys::v3::Response> {
|
||||||
let mut master_keys = BTreeMap::new();
|
let mut master_keys = BTreeMap::new();
|
||||||
let mut self_signing_keys = BTreeMap::new();
|
let mut self_signing_keys = BTreeMap::new();
|
||||||
|
@ -288,9 +268,9 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
||||||
let mut get_over_federation = HashMap::new();
|
let mut get_over_federation = HashMap::new();
|
||||||
|
|
||||||
for (user_id, device_ids) in device_keys_input {
|
for (user_id, device_ids) in device_keys_input {
|
||||||
let user_id: &UserId = &**user_id;
|
let user_id: &UserId = user_id;
|
||||||
|
|
||||||
if user_id.server_name() != db.globals.server_name() {
|
if user_id.server_name() != services().globals.server_name() {
|
||||||
get_over_federation
|
get_over_federation
|
||||||
.entry(user_id.server_name())
|
.entry(user_id.server_name())
|
||||||
.or_insert_with(Vec::new)
|
.or_insert_with(Vec::new)
|
||||||
|
@ -300,10 +280,10 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
||||||
|
|
||||||
if device_ids.is_empty() {
|
if device_ids.is_empty() {
|
||||||
let mut container = BTreeMap::new();
|
let mut container = BTreeMap::new();
|
||||||
for device_id in db.users.all_device_ids(user_id) {
|
for device_id in services().users.all_device_ids(user_id) {
|
||||||
let device_id = device_id?;
|
let device_id = device_id?;
|
||||||
if let Some(mut keys) = db.users.get_device_keys(user_id, &device_id)? {
|
if let Some(mut keys) = services().users.get_device_keys(user_id, &device_id)? {
|
||||||
let metadata = db
|
let metadata = services()
|
||||||
.users
|
.users
|
||||||
.get_device_metadata(user_id, &device_id)?
|
.get_device_metadata(user_id, &device_id)?
|
||||||
.ok_or_else(|| {
|
.ok_or_else(|| {
|
||||||
|
@ -319,13 +299,14 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
||||||
} else {
|
} else {
|
||||||
for device_id in device_ids {
|
for device_id in device_ids {
|
||||||
let mut container = BTreeMap::new();
|
let mut container = BTreeMap::new();
|
||||||
if let Some(mut keys) = db.users.get_device_keys(user_id, device_id)? {
|
if let Some(mut keys) = services().users.get_device_keys(user_id, device_id)? {
|
||||||
let metadata = db.users.get_device_metadata(user_id, device_id)?.ok_or(
|
let metadata = services()
|
||||||
Error::BadRequest(
|
.users
|
||||||
|
.get_device_metadata(user_id, device_id)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
"Tried to get keys for nonexistent device.",
|
"Tried to get keys for nonexistent device.",
|
||||||
),
|
))?;
|
||||||
)?;
|
|
||||||
|
|
||||||
add_unsigned_device_display_name(&mut keys, metadata)
|
add_unsigned_device_display_name(&mut keys, metadata)
|
||||||
.map_err(|_| Error::bad_database("invalid device keys in database"))?;
|
.map_err(|_| Error::bad_database("invalid device keys in database"))?;
|
||||||
|
@ -335,17 +316,22 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(master_key) = db.users.get_master_key(user_id, &allowed_signatures)? {
|
if let Some(master_key) =
|
||||||
|
services()
|
||||||
|
.users
|
||||||
|
.get_master_key(sender_user, user_id, &allowed_signatures)?
|
||||||
|
{
|
||||||
master_keys.insert(user_id.to_owned(), master_key);
|
master_keys.insert(user_id.to_owned(), master_key);
|
||||||
}
|
}
|
||||||
if let Some(self_signing_key) = db
|
if let Some(self_signing_key) =
|
||||||
|
services()
|
||||||
.users
|
.users
|
||||||
.get_self_signing_key(user_id, &allowed_signatures)?
|
.get_self_signing_key(sender_user, user_id, &allowed_signatures)?
|
||||||
{
|
{
|
||||||
self_signing_keys.insert(user_id.to_owned(), self_signing_key);
|
self_signing_keys.insert(user_id.to_owned(), self_signing_key);
|
||||||
}
|
}
|
||||||
if Some(user_id) == sender_user {
|
if Some(user_id) == sender_user {
|
||||||
if let Some(user_signing_key) = db.users.get_user_signing_key(user_id)? {
|
if let Some(user_signing_key) = services().users.get_user_signing_key(user_id)? {
|
||||||
user_signing_keys.insert(user_id.to_owned(), user_signing_key);
|
user_signing_keys.insert(user_id.to_owned(), user_signing_key);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -353,36 +339,96 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
||||||
|
|
||||||
let mut failures = BTreeMap::new();
|
let mut failures = BTreeMap::new();
|
||||||
|
|
||||||
|
let back_off = |id| match services()
|
||||||
|
.globals
|
||||||
|
.bad_query_ratelimiter
|
||||||
|
.write()
|
||||||
|
.unwrap()
|
||||||
|
.entry(id)
|
||||||
|
{
|
||||||
|
hash_map::Entry::Vacant(e) => {
|
||||||
|
e.insert((Instant::now(), 1));
|
||||||
|
}
|
||||||
|
hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
|
||||||
|
};
|
||||||
|
|
||||||
let mut futures: FuturesUnordered<_> = get_over_federation
|
let mut futures: FuturesUnordered<_> = get_over_federation
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(server, vec)| async move {
|
.map(|(server, vec)| async move {
|
||||||
|
if let Some((time, tries)) = services()
|
||||||
|
.globals
|
||||||
|
.bad_query_ratelimiter
|
||||||
|
.read()
|
||||||
|
.unwrap()
|
||||||
|
.get(server)
|
||||||
|
{
|
||||||
|
// Exponential backoff
|
||||||
|
let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries);
|
||||||
|
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
|
||||||
|
min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
|
||||||
|
}
|
||||||
|
|
||||||
|
if time.elapsed() < min_elapsed_duration {
|
||||||
|
debug!("Backing off query from {:?}", server);
|
||||||
|
return (
|
||||||
|
server,
|
||||||
|
Err(Error::BadServerResponse("bad query, still backing off")),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let mut device_keys_input_fed = BTreeMap::new();
|
let mut device_keys_input_fed = BTreeMap::new();
|
||||||
for (user_id, keys) in vec {
|
for (user_id, keys) in vec {
|
||||||
device_keys_input_fed.insert(user_id.to_owned(), keys.clone());
|
device_keys_input_fed.insert(user_id.to_owned(), keys.clone());
|
||||||
}
|
}
|
||||||
(
|
(
|
||||||
server,
|
server,
|
||||||
db.sending
|
tokio::time::timeout(
|
||||||
.send_federation_request(
|
Duration::from_secs(25),
|
||||||
&db.globals,
|
services().sending.send_federation_request(
|
||||||
server,
|
server,
|
||||||
federation::keys::get_keys::v1::Request {
|
federation::keys::get_keys::v1::Request {
|
||||||
device_keys: device_keys_input_fed,
|
device_keys: device_keys_input_fed,
|
||||||
},
|
},
|
||||||
|
),
|
||||||
)
|
)
|
||||||
.await,
|
.await
|
||||||
|
.map_err(|_e| Error::BadServerResponse("Query took too long")),
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
while let Some((server, response)) = futures.next().await {
|
while let Some((server, response)) = futures.next().await {
|
||||||
match response {
|
match response {
|
||||||
Ok(response) => {
|
Ok(Ok(response)) => {
|
||||||
master_keys.extend(response.master_keys);
|
for (user, masterkey) in response.master_keys {
|
||||||
|
let (master_key_id, mut master_key) =
|
||||||
|
services().users.parse_master_key(&user, &masterkey)?;
|
||||||
|
|
||||||
|
if let Some(our_master_key) = services().users.get_key(
|
||||||
|
&master_key_id,
|
||||||
|
sender_user,
|
||||||
|
&user,
|
||||||
|
&allowed_signatures,
|
||||||
|
)? {
|
||||||
|
let (_, our_master_key) =
|
||||||
|
services().users.parse_master_key(&user, &our_master_key)?;
|
||||||
|
master_key.signatures.extend(our_master_key.signatures);
|
||||||
|
}
|
||||||
|
let json = serde_json::to_value(master_key).expect("to_value always works");
|
||||||
|
let raw = serde_json::from_value(json).expect("Raw::from_value always works");
|
||||||
|
services().users.add_cross_signing_keys(
|
||||||
|
&user, &raw, &None, &None,
|
||||||
|
false, // Dont notify. A notification would trigger another key request resulting in an endless loop
|
||||||
|
)?;
|
||||||
|
master_keys.insert(user, raw);
|
||||||
|
}
|
||||||
|
|
||||||
self_signing_keys.extend(response.self_signing_keys);
|
self_signing_keys.extend(response.self_signing_keys);
|
||||||
device_keys.extend(response.device_keys);
|
device_keys.extend(response.device_keys);
|
||||||
}
|
}
|
||||||
Err(_e) => {
|
_ => {
|
||||||
|
back_off(server.to_owned());
|
||||||
failures.insert(server.to_string(), json!({}));
|
failures.insert(server.to_string(), json!({}));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -416,15 +462,14 @@ fn add_unsigned_device_display_name(
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn claim_keys_helper(
|
pub(crate) async fn claim_keys_helper(
|
||||||
one_time_keys_input: &BTreeMap<Box<UserId>, BTreeMap<Box<DeviceId>, DeviceKeyAlgorithm>>,
|
one_time_keys_input: &BTreeMap<OwnedUserId, BTreeMap<OwnedDeviceId, DeviceKeyAlgorithm>>,
|
||||||
db: &Database,
|
|
||||||
) -> Result<claim_keys::v3::Response> {
|
) -> Result<claim_keys::v3::Response> {
|
||||||
let mut one_time_keys = BTreeMap::new();
|
let mut one_time_keys = BTreeMap::new();
|
||||||
|
|
||||||
let mut get_over_federation = BTreeMap::new();
|
let mut get_over_federation = BTreeMap::new();
|
||||||
|
|
||||||
for (user_id, map) in one_time_keys_input {
|
for (user_id, map) in one_time_keys_input {
|
||||||
if user_id.server_name() != db.globals.server_name() {
|
if user_id.server_name() != services().globals.server_name() {
|
||||||
get_over_federation
|
get_over_federation
|
||||||
.entry(user_id.server_name())
|
.entry(user_id.server_name())
|
||||||
.or_insert_with(Vec::new)
|
.or_insert_with(Vec::new)
|
||||||
|
@ -434,8 +479,9 @@ pub(crate) async fn claim_keys_helper(
|
||||||
let mut container = BTreeMap::new();
|
let mut container = BTreeMap::new();
|
||||||
for (device_id, key_algorithm) in map {
|
for (device_id, key_algorithm) in map {
|
||||||
if let Some(one_time_keys) =
|
if let Some(one_time_keys) =
|
||||||
db.users
|
services()
|
||||||
.take_one_time_key(user_id, device_id, key_algorithm, &db.globals)?
|
.users
|
||||||
|
.take_one_time_key(user_id, device_id, key_algorithm)?
|
||||||
{
|
{
|
||||||
let mut c = BTreeMap::new();
|
let mut c = BTreeMap::new();
|
||||||
c.insert(one_time_keys.0, one_time_keys.1);
|
c.insert(one_time_keys.0, one_time_keys.1);
|
||||||
|
@ -447,28 +493,38 @@ pub(crate) async fn claim_keys_helper(
|
||||||
|
|
||||||
let mut failures = BTreeMap::new();
|
let mut failures = BTreeMap::new();
|
||||||
|
|
||||||
for (server, vec) in get_over_federation {
|
let mut futures: FuturesUnordered<_> = get_over_federation
|
||||||
|
.into_iter()
|
||||||
|
.map(|(server, vec)| async move {
|
||||||
let mut one_time_keys_input_fed = BTreeMap::new();
|
let mut one_time_keys_input_fed = BTreeMap::new();
|
||||||
for (user_id, keys) in vec {
|
for (user_id, keys) in vec {
|
||||||
one_time_keys_input_fed.insert(user_id.clone(), keys.clone());
|
one_time_keys_input_fed.insert(user_id.clone(), keys.clone());
|
||||||
}
|
}
|
||||||
// Ignore failures
|
(
|
||||||
if let Ok(keys) = db
|
server,
|
||||||
|
services()
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(
|
.send_federation_request(
|
||||||
&db.globals,
|
|
||||||
server,
|
server,
|
||||||
federation::keys::claim_keys::v1::Request {
|
federation::keys::claim_keys::v1::Request {
|
||||||
one_time_keys: one_time_keys_input_fed,
|
one_time_keys: one_time_keys_input_fed,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await
|
.await,
|
||||||
{
|
)
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
while let Some((server, response)) = futures.next().await {
|
||||||
|
match response {
|
||||||
|
Ok(keys) => {
|
||||||
one_time_keys.extend(keys.one_time_keys);
|
one_time_keys.extend(keys.one_time_keys);
|
||||||
} else {
|
}
|
||||||
|
Err(_e) => {
|
||||||
failures.insert(server.to_string(), json!({}));
|
failures.insert(server.to_string(), json!({}));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Ok(claim_keys::v3::Response {
|
Ok(claim_keys::v3::Response {
|
||||||
failures,
|
failures,
|
|
@ -1,7 +1,6 @@
|
||||||
use crate::{
|
use std::time::Duration;
|
||||||
database::{media::FileMeta, DatabaseGuard},
|
|
||||||
utils, Error, Result, Ruma,
|
use crate::{service::media::FileMeta, services, utils, Error, Result, Ruma};
|
||||||
};
|
|
||||||
use ruma::api::client::{
|
use ruma::api::client::{
|
||||||
error::ErrorKind,
|
error::ErrorKind,
|
||||||
media::{
|
media::{
|
||||||
|
@ -16,11 +15,10 @@ const MXC_LENGTH: usize = 32;
|
||||||
///
|
///
|
||||||
/// Returns max upload size.
|
/// Returns max upload size.
|
||||||
pub async fn get_media_config_route(
|
pub async fn get_media_config_route(
|
||||||
db: DatabaseGuard,
|
|
||||||
_body: Ruma<get_media_config::v3::Request>,
|
_body: Ruma<get_media_config::v3::Request>,
|
||||||
) -> Result<get_media_config::v3::Response> {
|
) -> Result<get_media_config::v3::Response> {
|
||||||
Ok(get_media_config::v3::Response {
|
Ok(get_media_config::v3::Response {
|
||||||
upload_size: db.globals.max_request_size().into(),
|
upload_size: services().globals.max_request_size().into(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -31,62 +29,58 @@ pub async fn get_media_config_route(
|
||||||
/// - Some metadata will be saved in the database
|
/// - Some metadata will be saved in the database
|
||||||
/// - Media will be saved in the media/ directory
|
/// - Media will be saved in the media/ directory
|
||||||
pub async fn create_content_route(
|
pub async fn create_content_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<create_content::v3::Request>,
|
||||||
body: Ruma<create_content::v3::IncomingRequest>,
|
|
||||||
) -> Result<create_content::v3::Response> {
|
) -> Result<create_content::v3::Response> {
|
||||||
let mxc = format!(
|
let mxc = format!(
|
||||||
"mxc://{}/{}",
|
"mxc://{}/{}",
|
||||||
db.globals.server_name(),
|
services().globals.server_name(),
|
||||||
utils::random_string(MXC_LENGTH)
|
utils::random_string(MXC_LENGTH)
|
||||||
);
|
);
|
||||||
|
|
||||||
db.media
|
services()
|
||||||
|
.media
|
||||||
.create(
|
.create(
|
||||||
mxc.clone(),
|
mxc.clone(),
|
||||||
&db.globals,
|
body.filename
|
||||||
&body
|
|
||||||
.filename
|
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map(|filename| "inline; filename=".to_owned() + filename)
|
.map(|filename| "inline; filename=".to_owned() + filename)
|
||||||
.as_deref(),
|
.as_deref(),
|
||||||
&body.content_type.as_deref(),
|
body.content_type.as_deref(),
|
||||||
&body.file,
|
&body.file,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(create_content::v3::Response {
|
Ok(create_content::v3::Response {
|
||||||
content_uri: mxc.try_into().expect("Invalid mxc:// URI"),
|
content_uri: mxc.into(),
|
||||||
blurhash: None,
|
blurhash: None,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_remote_content(
|
pub async fn get_remote_content(
|
||||||
db: &DatabaseGuard,
|
|
||||||
mxc: &str,
|
mxc: &str,
|
||||||
server_name: &ruma::ServerName,
|
server_name: &ruma::ServerName,
|
||||||
media_id: &str,
|
media_id: String,
|
||||||
) -> Result<get_content::v3::Response, Error> {
|
) -> Result<get_content::v3::Response, Error> {
|
||||||
let content_response = db
|
let content_response = services()
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(
|
.send_federation_request(
|
||||||
&db.globals,
|
|
||||||
server_name,
|
server_name,
|
||||||
get_content::v3::Request {
|
get_content::v3::Request {
|
||||||
allow_remote: false,
|
allow_remote: false,
|
||||||
server_name,
|
server_name: server_name.to_owned(),
|
||||||
media_id,
|
media_id,
|
||||||
|
timeout_ms: Duration::from_secs(20),
|
||||||
|
allow_redirect: false,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
db.media
|
services()
|
||||||
|
.media
|
||||||
.create(
|
.create(
|
||||||
mxc.to_string(),
|
mxc.to_owned(),
|
||||||
&db.globals,
|
content_response.content_disposition.as_deref(),
|
||||||
&content_response.content_disposition.as_deref(),
|
content_response.content_type.as_deref(),
|
||||||
&content_response.content_type.as_deref(),
|
|
||||||
&content_response.file,
|
&content_response.file,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
@ -100,8 +94,7 @@ pub async fn get_remote_content(
|
||||||
///
|
///
|
||||||
/// - Only allows federation if `allow_remote` is true
|
/// - Only allows federation if `allow_remote` is true
|
||||||
pub async fn get_content_route(
|
pub async fn get_content_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_content::v3::Request>,
|
||||||
body: Ruma<get_content::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_content::v3::Response> {
|
) -> Result<get_content::v3::Response> {
|
||||||
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
||||||
|
|
||||||
|
@ -109,16 +102,17 @@ pub async fn get_content_route(
|
||||||
content_disposition,
|
content_disposition,
|
||||||
content_type,
|
content_type,
|
||||||
file,
|
file,
|
||||||
}) = db.media.get(&db.globals, &mxc).await?
|
}) = services().media.get(mxc.clone()).await?
|
||||||
{
|
{
|
||||||
Ok(get_content::v3::Response {
|
Ok(get_content::v3::Response {
|
||||||
file,
|
file,
|
||||||
content_type,
|
content_type,
|
||||||
content_disposition,
|
content_disposition,
|
||||||
|
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
||||||
})
|
})
|
||||||
} else if &*body.server_name != db.globals.server_name() && body.allow_remote {
|
} else if &*body.server_name != services().globals.server_name() && body.allow_remote {
|
||||||
let remote_content_response =
|
let remote_content_response =
|
||||||
get_remote_content(&db, &mxc, &body.server_name, &body.media_id).await?;
|
get_remote_content(&mxc, &body.server_name, body.media_id.clone()).await?;
|
||||||
Ok(remote_content_response)
|
Ok(remote_content_response)
|
||||||
} else {
|
} else {
|
||||||
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
||||||
|
@ -131,8 +125,7 @@ pub async fn get_content_route(
|
||||||
///
|
///
|
||||||
/// - Only allows federation if `allow_remote` is true
|
/// - Only allows federation if `allow_remote` is true
|
||||||
pub async fn get_content_as_filename_route(
|
pub async fn get_content_as_filename_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_content_as_filename::v3::Request>,
|
||||||
body: Ruma<get_content_as_filename::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_content_as_filename::v3::Response> {
|
) -> Result<get_content_as_filename::v3::Response> {
|
||||||
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
||||||
|
|
||||||
|
@ -140,21 +133,23 @@ pub async fn get_content_as_filename_route(
|
||||||
content_disposition: _,
|
content_disposition: _,
|
||||||
content_type,
|
content_type,
|
||||||
file,
|
file,
|
||||||
}) = db.media.get(&db.globals, &mxc).await?
|
}) = services().media.get(mxc.clone()).await?
|
||||||
{
|
{
|
||||||
Ok(get_content_as_filename::v3::Response {
|
Ok(get_content_as_filename::v3::Response {
|
||||||
file,
|
file,
|
||||||
content_type,
|
content_type,
|
||||||
content_disposition: Some(format!("inline; filename={}", body.filename)),
|
content_disposition: Some(format!("inline; filename={}", body.filename)),
|
||||||
|
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
||||||
})
|
})
|
||||||
} else if &*body.server_name != db.globals.server_name() && body.allow_remote {
|
} else if &*body.server_name != services().globals.server_name() && body.allow_remote {
|
||||||
let remote_content_response =
|
let remote_content_response =
|
||||||
get_remote_content(&db, &mxc, &body.server_name, &body.media_id).await?;
|
get_remote_content(&mxc, &body.server_name, body.media_id.clone()).await?;
|
||||||
|
|
||||||
Ok(get_content_as_filename::v3::Response {
|
Ok(get_content_as_filename::v3::Response {
|
||||||
content_disposition: Some(format!("inline: filename={}", body.filename)),
|
content_disposition: Some(format!("inline: filename={}", body.filename)),
|
||||||
content_type: remote_content_response.content_type,
|
content_type: remote_content_response.content_type,
|
||||||
file: remote_content_response.file,
|
file: remote_content_response.file,
|
||||||
|
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
||||||
|
@ -167,18 +162,16 @@ pub async fn get_content_as_filename_route(
|
||||||
///
|
///
|
||||||
/// - Only allows federation if `allow_remote` is true
|
/// - Only allows federation if `allow_remote` is true
|
||||||
pub async fn get_content_thumbnail_route(
|
pub async fn get_content_thumbnail_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_content_thumbnail::v3::Request>,
|
||||||
body: Ruma<get_content_thumbnail::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_content_thumbnail::v3::Response> {
|
) -> Result<get_content_thumbnail::v3::Response> {
|
||||||
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
||||||
|
|
||||||
if let Some(FileMeta {
|
if let Some(FileMeta {
|
||||||
content_type, file, ..
|
content_type, file, ..
|
||||||
}) = db
|
}) = services()
|
||||||
.media
|
.media
|
||||||
.get_thumbnail(
|
.get_thumbnail(
|
||||||
&mxc,
|
mxc.clone(),
|
||||||
&db.globals,
|
|
||||||
body.width
|
body.width
|
||||||
.try_into()
|
.try_into()
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?,
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?,
|
||||||
|
@ -188,30 +181,35 @@ pub async fn get_content_thumbnail_route(
|
||||||
)
|
)
|
||||||
.await?
|
.await?
|
||||||
{
|
{
|
||||||
Ok(get_content_thumbnail::v3::Response { file, content_type })
|
Ok(get_content_thumbnail::v3::Response {
|
||||||
} else if &*body.server_name != db.globals.server_name() && body.allow_remote {
|
file,
|
||||||
let get_thumbnail_response = db
|
content_type,
|
||||||
|
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
||||||
|
})
|
||||||
|
} else if &*body.server_name != services().globals.server_name() && body.allow_remote {
|
||||||
|
let get_thumbnail_response = services()
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(
|
.send_federation_request(
|
||||||
&db.globals,
|
|
||||||
&body.server_name,
|
&body.server_name,
|
||||||
get_content_thumbnail::v3::Request {
|
get_content_thumbnail::v3::Request {
|
||||||
allow_remote: false,
|
allow_remote: false,
|
||||||
height: body.height,
|
height: body.height,
|
||||||
width: body.width,
|
width: body.width,
|
||||||
method: body.method.clone(),
|
method: body.method.clone(),
|
||||||
server_name: &body.server_name,
|
server_name: body.server_name.clone(),
|
||||||
media_id: &body.media_id,
|
media_id: body.media_id.clone(),
|
||||||
|
timeout_ms: Duration::from_secs(20),
|
||||||
|
allow_redirect: false,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
db.media
|
services()
|
||||||
|
.media
|
||||||
.upload_thumbnail(
|
.upload_thumbnail(
|
||||||
mxc,
|
mxc,
|
||||||
&db.globals,
|
None,
|
||||||
&None,
|
get_thumbnail_response.content_type.as_deref(),
|
||||||
&get_thumbnail_response.content_type,
|
|
||||||
body.width.try_into().expect("all UInts are valid u32s"),
|
body.width.try_into().expect("all UInts are valid u32s"),
|
||||||
body.height.try_into().expect("all UInts are valid u32s"),
|
body.height.try_into().expect("all UInts are valid u32s"),
|
||||||
&get_thumbnail_response.file,
|
&get_thumbnail_response.file,
|
1579
src/api/client_server/membership.rs
Normal file
1579
src/api/client_server/membership.rs
Normal file
File diff suppressed because it is too large
Load diff
|
@ -1,10 +1,13 @@
|
||||||
use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, Error, Result, Ruma};
|
use crate::{
|
||||||
|
service::{pdu::PduBuilder, rooms::timeline::PduCount},
|
||||||
|
services, utils, Error, Result, Ruma,
|
||||||
|
};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{
|
api::client::{
|
||||||
error::ErrorKind,
|
error::ErrorKind,
|
||||||
message::{get_message_events, send_message_event},
|
message::{get_message_events, send_message_event},
|
||||||
},
|
},
|
||||||
events::{RoomEventType, StateEventType},
|
events::{StateEventType, TimelineEventType},
|
||||||
};
|
};
|
||||||
use std::{
|
use std::{
|
||||||
collections::{BTreeMap, HashSet},
|
collections::{BTreeMap, HashSet},
|
||||||
|
@ -19,14 +22,14 @@ use std::{
|
||||||
/// - The only requirement for the content is that it has to be valid json
|
/// - The only requirement for the content is that it has to be valid json
|
||||||
/// - Tries to send the event into the room, auth rules will determine if it is allowed
|
/// - Tries to send the event into the room, auth rules will determine if it is allowed
|
||||||
pub async fn send_message_event_route(
|
pub async fn send_message_event_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<send_message_event::v3::Request>,
|
||||||
body: Ruma<send_message_event::v3::IncomingRequest>,
|
|
||||||
) -> Result<send_message_event::v3::Response> {
|
) -> Result<send_message_event::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_deref();
|
let sender_device = body.sender_device.as_deref();
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
let mutex_state = Arc::clone(
|
||||||
db.globals
|
services()
|
||||||
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -36,8 +39,8 @@ pub async fn send_message_event_route(
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
// Forbid m.room.encrypted if encryption is disabled
|
// Forbid m.room.encrypted if encryption is disabled
|
||||||
if RoomEventType::RoomEncrypted == body.event_type.to_string().into()
|
if TimelineEventType::RoomEncrypted == body.event_type.to_string().into()
|
||||||
&& !db.globals.allow_encryption()
|
&& !services().globals.allow_encryption()
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
|
@ -47,7 +50,8 @@ pub async fn send_message_event_route(
|
||||||
|
|
||||||
// Check if this is a new transaction id
|
// Check if this is a new transaction id
|
||||||
if let Some(response) =
|
if let Some(response) =
|
||||||
db.transaction_ids
|
services()
|
||||||
|
.transaction_ids
|
||||||
.existing_txnid(sender_user, sender_device, &body.txn_id)?
|
.existing_txnid(sender_user, sender_device, &body.txn_id)?
|
||||||
{
|
{
|
||||||
// The client might have sent a txnid of the /sendToDevice endpoint
|
// The client might have sent a txnid of the /sendToDevice endpoint
|
||||||
|
@ -69,7 +73,7 @@ pub async fn send_message_event_route(
|
||||||
let mut unsigned = BTreeMap::new();
|
let mut unsigned = BTreeMap::new();
|
||||||
unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into());
|
unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into());
|
||||||
|
|
||||||
let event_id = db.rooms.build_and_append_pdu(
|
let event_id = services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: body.event_type.to_string().into(),
|
event_type: body.event_type.to_string().into(),
|
||||||
content: serde_json::from_str(body.body.body.json().get())
|
content: serde_json::from_str(body.body.body.json().get())
|
||||||
|
@ -80,11 +84,10 @@ pub async fn send_message_event_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
&db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
db.transaction_ids.add_txnid(
|
services().transaction_ids.add_txnid(
|
||||||
sender_user,
|
sender_user,
|
||||||
sender_device,
|
sender_device,
|
||||||
&body.txn_id,
|
&body.txn_id,
|
||||||
|
@ -93,8 +96,6 @@ pub async fn send_message_event_route(
|
||||||
|
|
||||||
drop(state_lock);
|
drop(state_lock);
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(send_message_event::v3::Response::new(
|
Ok(send_message_event::v3::Response::new(
|
||||||
(*event_id).to_owned(),
|
(*event_id).to_owned(),
|
||||||
))
|
))
|
||||||
|
@ -107,37 +108,32 @@ pub async fn send_message_event_route(
|
||||||
/// - Only works if the user is joined (TODO: always allow, but only show events where the user was
|
/// - Only works if the user is joined (TODO: always allow, but only show events where the user was
|
||||||
/// joined, depending on history_visibility)
|
/// joined, depending on history_visibility)
|
||||||
pub async fn get_message_events_route(
|
pub async fn get_message_events_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_message_events::v3::Request>,
|
||||||
body: Ruma<get_message_events::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_message_events::v3::Response> {
|
) -> Result<get_message_events::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if !db.rooms.is_joined(sender_user, &body.room_id)? {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Forbidden,
|
|
||||||
"You don't have permission to view this room.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let from = match body.from.clone() {
|
let from = match body.from.clone() {
|
||||||
Some(from) => from
|
Some(from) => PduCount::try_from_string(&from)?,
|
||||||
.parse()
|
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from` value."))?,
|
|
||||||
|
|
||||||
None => match body.dir {
|
None => match body.dir {
|
||||||
get_message_events::v3::Direction::Forward => 0,
|
ruma::api::Direction::Forward => PduCount::min(),
|
||||||
get_message_events::v3::Direction::Backward => u64::MAX,
|
ruma::api::Direction::Backward => PduCount::max(),
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
let to = body.to.as_ref().map(|t| t.parse());
|
let to = body
|
||||||
|
.to
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|t| PduCount::try_from_string(t).ok());
|
||||||
|
|
||||||
db.rooms
|
services().rooms.lazy_loading.lazy_load_confirm_delivery(
|
||||||
.lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from)?;
|
sender_user,
|
||||||
|
sender_device,
|
||||||
|
&body.room_id,
|
||||||
|
from,
|
||||||
|
)?;
|
||||||
|
|
||||||
// Use limit or else 10
|
let limit = u64::from(body.limit).min(100) as usize;
|
||||||
let limit = body.limit.try_into().map_or(10_usize, |l: u32| l as usize);
|
|
||||||
|
|
||||||
let next_token;
|
let next_token;
|
||||||
|
|
||||||
|
@ -146,23 +142,28 @@ pub async fn get_message_events_route(
|
||||||
let mut lazy_loaded = HashSet::new();
|
let mut lazy_loaded = HashSet::new();
|
||||||
|
|
||||||
match body.dir {
|
match body.dir {
|
||||||
get_message_events::v3::Direction::Forward => {
|
ruma::api::Direction::Forward => {
|
||||||
let events_after: Vec<_> = db
|
let events_after: Vec<_> = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.timeline
|
||||||
.pdus_after(sender_user, &body.room_id, from)?
|
.pdus_after(sender_user, &body.room_id, from)?
|
||||||
.take(limit)
|
.take(limit)
|
||||||
.filter_map(|r| r.ok()) // Filter out buggy events
|
.filter_map(|r| r.ok()) // Filter out buggy events
|
||||||
.filter_map(|(pdu_id, pdu)| {
|
.filter(|(_, pdu)| {
|
||||||
db.rooms
|
services()
|
||||||
.pdu_count(&pdu_id)
|
.rooms
|
||||||
.map(|pdu_count| (pdu_count, pdu))
|
.state_accessor
|
||||||
.ok()
|
.user_can_see_event(sender_user, &body.room_id, &pdu.event_id)
|
||||||
|
.unwrap_or(false)
|
||||||
})
|
})
|
||||||
.take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to`
|
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
for (_, event) in &events_after {
|
for (_, event) in &events_after {
|
||||||
if !db.rooms.lazy_load_was_sent_before(
|
/* TODO: Remove this when these are resolved:
|
||||||
|
* https://github.com/vector-im/element-android/issues/3417
|
||||||
|
* https://github.com/vector-im/element-web/issues/21034
|
||||||
|
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
||||||
sender_user,
|
sender_user,
|
||||||
sender_device,
|
sender_device,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
|
@ -170,6 +171,8 @@ pub async fn get_message_events_route(
|
||||||
)? {
|
)? {
|
||||||
lazy_loaded.insert(event.sender.clone());
|
lazy_loaded.insert(event.sender.clone());
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
lazy_loaded.insert(event.sender.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
next_token = events_after.last().map(|(count, _)| count).copied();
|
next_token = events_after.last().map(|(count, _)| count).copied();
|
||||||
|
@ -179,27 +182,37 @@ pub async fn get_message_events_route(
|
||||||
.map(|(_, pdu)| pdu.to_room_event())
|
.map(|(_, pdu)| pdu.to_room_event())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
resp.start = from.to_string();
|
resp.start = from.stringify();
|
||||||
resp.end = next_token.map(|count| count.to_string());
|
resp.end = next_token.map(|count| count.stringify());
|
||||||
resp.chunk = events_after;
|
resp.chunk = events_after;
|
||||||
}
|
}
|
||||||
get_message_events::v3::Direction::Backward => {
|
ruma::api::Direction::Backward => {
|
||||||
let events_before: Vec<_> = db
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.timeline
|
||||||
|
.backfill_if_required(&body.room_id, from)
|
||||||
|
.await?;
|
||||||
|
let events_before: Vec<_> = services()
|
||||||
|
.rooms
|
||||||
|
.timeline
|
||||||
.pdus_until(sender_user, &body.room_id, from)?
|
.pdus_until(sender_user, &body.room_id, from)?
|
||||||
.take(limit)
|
.take(limit)
|
||||||
.filter_map(|r| r.ok()) // Filter out buggy events
|
.filter_map(|r| r.ok()) // Filter out buggy events
|
||||||
.filter_map(|(pdu_id, pdu)| {
|
.filter(|(_, pdu)| {
|
||||||
db.rooms
|
services()
|
||||||
.pdu_count(&pdu_id)
|
.rooms
|
||||||
.map(|pdu_count| (pdu_count, pdu))
|
.state_accessor
|
||||||
.ok()
|
.user_can_see_event(sender_user, &body.room_id, &pdu.event_id)
|
||||||
|
.unwrap_or(false)
|
||||||
})
|
})
|
||||||
.take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to`
|
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
for (_, event) in &events_before {
|
for (_, event) in &events_before {
|
||||||
if !db.rooms.lazy_load_was_sent_before(
|
/* TODO: Remove this when these are resolved:
|
||||||
|
* https://github.com/vector-im/element-android/issues/3417
|
||||||
|
* https://github.com/vector-im/element-web/issues/21034
|
||||||
|
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
||||||
sender_user,
|
sender_user,
|
||||||
sender_device,
|
sender_device,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
|
@ -207,6 +220,8 @@ pub async fn get_message_events_route(
|
||||||
)? {
|
)? {
|
||||||
lazy_loaded.insert(event.sender.clone());
|
lazy_loaded.insert(event.sender.clone());
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
lazy_loaded.insert(event.sender.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
next_token = events_before.last().map(|(count, _)| count).copied();
|
next_token = events_before.last().map(|(count, _)| count).copied();
|
||||||
|
@ -216,24 +231,27 @@ pub async fn get_message_events_route(
|
||||||
.map(|(_, pdu)| pdu.to_room_event())
|
.map(|(_, pdu)| pdu.to_room_event())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
resp.start = from.to_string();
|
resp.start = from.stringify();
|
||||||
resp.end = next_token.map(|count| count.to_string());
|
resp.end = next_token.map(|count| count.stringify());
|
||||||
resp.chunk = events_before;
|
resp.chunk = events_before;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resp.state = Vec::new();
|
resp.state = Vec::new();
|
||||||
for ll_id in &lazy_loaded {
|
for ll_id in &lazy_loaded {
|
||||||
if let Some(member_event) =
|
if let Some(member_event) = services().rooms.state_accessor.room_state_get(
|
||||||
db.rooms
|
&body.room_id,
|
||||||
.room_state_get(&body.room_id, &StateEventType::RoomMember, ll_id.as_str())?
|
&StateEventType::RoomMember,
|
||||||
{
|
ll_id.as_str(),
|
||||||
|
)? {
|
||||||
resp.state.push(member_event.to_state_event());
|
resp.state.push(member_event.to_state_event());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: enable again when we are sure clients can handle it
|
||||||
|
/*
|
||||||
if let Some(next_token) = next_token {
|
if let Some(next_token) = next_token {
|
||||||
db.rooms.lazy_load_mark_sent(
|
services().rooms.lazy_loading.lazy_load_mark_sent(
|
||||||
sender_user,
|
sender_user,
|
||||||
sender_device,
|
sender_device,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
|
@ -241,6 +259,7 @@ pub async fn get_message_events_route(
|
||||||
next_token,
|
next_token,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
Ok(resp)
|
Ok(resp)
|
||||||
}
|
}
|
|
@ -16,14 +16,17 @@ mod profile;
|
||||||
mod push;
|
mod push;
|
||||||
mod read_marker;
|
mod read_marker;
|
||||||
mod redact;
|
mod redact;
|
||||||
|
mod relations;
|
||||||
mod report;
|
mod report;
|
||||||
mod room;
|
mod room;
|
||||||
mod search;
|
mod search;
|
||||||
mod session;
|
mod session;
|
||||||
|
mod space;
|
||||||
mod state;
|
mod state;
|
||||||
mod sync;
|
mod sync;
|
||||||
mod tag;
|
mod tag;
|
||||||
mod thirdparty;
|
mod thirdparty;
|
||||||
|
mod threads;
|
||||||
mod to_device;
|
mod to_device;
|
||||||
mod typing;
|
mod typing;
|
||||||
mod unversioned;
|
mod unversioned;
|
||||||
|
@ -48,14 +51,17 @@ pub use profile::*;
|
||||||
pub use push::*;
|
pub use push::*;
|
||||||
pub use read_marker::*;
|
pub use read_marker::*;
|
||||||
pub use redact::*;
|
pub use redact::*;
|
||||||
|
pub use relations::*;
|
||||||
pub use report::*;
|
pub use report::*;
|
||||||
pub use room::*;
|
pub use room::*;
|
||||||
pub use search::*;
|
pub use search::*;
|
||||||
pub use session::*;
|
pub use session::*;
|
||||||
|
pub use space::*;
|
||||||
pub use state::*;
|
pub use state::*;
|
||||||
pub use sync::*;
|
pub use sync::*;
|
||||||
pub use tag::*;
|
pub use tag::*;
|
||||||
pub use thirdparty::*;
|
pub use thirdparty::*;
|
||||||
|
pub use threads::*;
|
||||||
pub use to_device::*;
|
pub use to_device::*;
|
||||||
pub use typing::*;
|
pub use typing::*;
|
||||||
pub use unversioned::*;
|
pub use unversioned::*;
|
||||||
|
@ -63,6 +69,6 @@ pub use user_directory::*;
|
||||||
pub use voip::*;
|
pub use voip::*;
|
||||||
|
|
||||||
pub const DEVICE_ID_LENGTH: usize = 10;
|
pub const DEVICE_ID_LENGTH: usize = 10;
|
||||||
pub const TOKEN_LENGTH: usize = 256;
|
pub const TOKEN_LENGTH: usize = 32;
|
||||||
pub const SESSION_ID_LENGTH: usize = 256;
|
pub const SESSION_ID_LENGTH: usize = 32;
|
||||||
pub const AUTO_GEN_PASSWORD_LENGTH: usize = 15;
|
pub const AUTO_GEN_PASSWORD_LENGTH: usize = 15;
|
|
@ -1,27 +1,29 @@
|
||||||
use crate::{database::DatabaseGuard, utils, Result, Ruma};
|
use crate::{services, utils, Error, Result, Ruma};
|
||||||
use ruma::api::client::presence::{get_presence, set_presence};
|
use ruma::api::client::{
|
||||||
|
error::ErrorKind,
|
||||||
|
presence::{get_presence, set_presence},
|
||||||
|
};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/presence/{userId}/status`
|
/// # `PUT /_matrix/client/r0/presence/{userId}/status`
|
||||||
///
|
///
|
||||||
/// Sets the presence state of the sender user.
|
/// Sets the presence state of the sender user.
|
||||||
pub async fn set_presence_route(
|
pub async fn set_presence_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<set_presence::v3::Request>,
|
||||||
body: Ruma<set_presence::v3::IncomingRequest>,
|
|
||||||
) -> Result<set_presence::v3::Response> {
|
) -> Result<set_presence::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
for room_id in db.rooms.rooms_joined(sender_user) {
|
for room_id in services().rooms.state_cache.rooms_joined(sender_user) {
|
||||||
let room_id = room_id?;
|
let room_id = room_id?;
|
||||||
|
|
||||||
db.rooms.edus.update_presence(
|
services().rooms.edus.presence.update_presence(
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
ruma::events::presence::PresenceEvent {
|
ruma::events::presence::PresenceEvent {
|
||||||
content: ruma::events::presence::PresenceEventContent {
|
content: ruma::events::presence::PresenceEventContent {
|
||||||
avatar_url: db.users.avatar_url(sender_user)?,
|
avatar_url: services().users.avatar_url(sender_user)?,
|
||||||
currently_active: None,
|
currently_active: None,
|
||||||
displayname: db.users.displayname(sender_user)?,
|
displayname: services().users.displayname(sender_user)?,
|
||||||
last_active_ago: Some(
|
last_active_ago: Some(
|
||||||
utils::millis_since_unix_epoch()
|
utils::millis_since_unix_epoch()
|
||||||
.try_into()
|
.try_into()
|
||||||
|
@ -32,12 +34,9 @@ pub async fn set_presence_route(
|
||||||
},
|
},
|
||||||
sender: sender_user.clone(),
|
sender: sender_user.clone(),
|
||||||
},
|
},
|
||||||
&db.globals,
|
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(set_presence::v3::Response {})
|
Ok(set_presence::v3::Response {})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -47,22 +46,23 @@ pub async fn set_presence_route(
|
||||||
///
|
///
|
||||||
/// - Only works if you share a room with the user
|
/// - Only works if you share a room with the user
|
||||||
pub async fn get_presence_route(
|
pub async fn get_presence_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_presence::v3::Request>,
|
||||||
body: Ruma<get_presence::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_presence::v3::Response> {
|
) -> Result<get_presence::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let mut presence_event = None;
|
let mut presence_event = None;
|
||||||
|
|
||||||
for room_id in db
|
for room_id in services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.user
|
||||||
.get_shared_rooms(vec![sender_user.clone(), body.user_id.clone()])?
|
.get_shared_rooms(vec![sender_user.clone(), body.user_id.clone()])?
|
||||||
{
|
{
|
||||||
let room_id = room_id?;
|
let room_id = room_id?;
|
||||||
|
|
||||||
if let Some(presence) = db
|
if let Some(presence) = services()
|
||||||
.rooms
|
.rooms
|
||||||
.edus
|
.edus
|
||||||
|
.presence
|
||||||
.get_last_presence_event(sender_user, &room_id)?
|
.get_last_presence_event(sender_user, &room_id)?
|
||||||
{
|
{
|
||||||
presence_event = Some(presence);
|
presence_event = Some(presence);
|
||||||
|
@ -82,6 +82,9 @@ pub async fn get_presence_route(
|
||||||
presence: presence.content.presence,
|
presence: presence.content.presence,
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
todo!();
|
Err(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"Presence state for this user was not found",
|
||||||
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, Error, Result, Ruma};
|
use crate::{service::pdu::PduBuilder, services, utils, Error, Result, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{
|
api::{
|
||||||
client::{
|
client::{
|
||||||
|
@ -9,7 +9,7 @@ use ruma::{
|
||||||
},
|
},
|
||||||
federation::{self, query::get_profile_information::v1::ProfileField},
|
federation::{self, query::get_profile_information::v1::ProfileField},
|
||||||
},
|
},
|
||||||
events::{room::member::RoomMemberEventContent, RoomEventType, StateEventType},
|
events::{room::member::RoomMemberEventContent, StateEventType, TimelineEventType},
|
||||||
};
|
};
|
||||||
use serde_json::value::to_raw_value;
|
use serde_json::value::to_raw_value;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
@ -20,27 +20,30 @@ use std::sync::Arc;
|
||||||
///
|
///
|
||||||
/// - Also makes sure other users receive the update using presence EDUs
|
/// - Also makes sure other users receive the update using presence EDUs
|
||||||
pub async fn set_displayname_route(
|
pub async fn set_displayname_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<set_display_name::v3::Request>,
|
||||||
body: Ruma<set_display_name::v3::IncomingRequest>,
|
|
||||||
) -> Result<set_display_name::v3::Response> {
|
) -> Result<set_display_name::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
db.users
|
services()
|
||||||
|
.users
|
||||||
.set_displayname(sender_user, body.displayname.clone())?;
|
.set_displayname(sender_user, body.displayname.clone())?;
|
||||||
|
|
||||||
// Send a new membership event and presence update into all joined rooms
|
// Send a new membership event and presence update into all joined rooms
|
||||||
let all_rooms_joined: Vec<_> = db
|
let all_rooms_joined: Vec<_> = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.state_cache
|
||||||
.rooms_joined(sender_user)
|
.rooms_joined(sender_user)
|
||||||
.filter_map(|r| r.ok())
|
.filter_map(|r| r.ok())
|
||||||
.map(|room_id| {
|
.map(|room_id| {
|
||||||
Ok::<_, Error>((
|
Ok::<_, Error>((
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: RoomEventType::RoomMember,
|
event_type: TimelineEventType::RoomMember,
|
||||||
content: to_raw_value(&RoomMemberEventContent {
|
content: to_raw_value(&RoomMemberEventContent {
|
||||||
displayname: body.displayname.clone(),
|
displayname: body.displayname.clone(),
|
||||||
..serde_json::from_str(
|
..serde_json::from_str(
|
||||||
db.rooms
|
services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(
|
.room_state_get(
|
||||||
&room_id,
|
&room_id,
|
||||||
&StateEventType::RoomMember,
|
&StateEventType::RoomMember,
|
||||||
|
@ -70,7 +73,8 @@ pub async fn set_displayname_route(
|
||||||
|
|
||||||
for (pdu_builder, room_id) in all_rooms_joined {
|
for (pdu_builder, room_id) in all_rooms_joined {
|
||||||
let mutex_state = Arc::clone(
|
let mutex_state = Arc::clone(
|
||||||
db.globals
|
services()
|
||||||
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -79,19 +83,22 @@ pub async fn set_displayname_route(
|
||||||
);
|
);
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
let _ = db
|
let _ = services().rooms.timeline.build_and_append_pdu(
|
||||||
.rooms
|
pdu_builder,
|
||||||
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &db, &state_lock);
|
sender_user,
|
||||||
|
&room_id,
|
||||||
|
&state_lock,
|
||||||
|
);
|
||||||
|
|
||||||
// Presence update
|
// Presence update
|
||||||
db.rooms.edus.update_presence(
|
services().rooms.edus.presence.update_presence(
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
ruma::events::presence::PresenceEvent {
|
ruma::events::presence::PresenceEvent {
|
||||||
content: ruma::events::presence::PresenceEventContent {
|
content: ruma::events::presence::PresenceEventContent {
|
||||||
avatar_url: db.users.avatar_url(sender_user)?,
|
avatar_url: services().users.avatar_url(sender_user)?,
|
||||||
currently_active: None,
|
currently_active: None,
|
||||||
displayname: db.users.displayname(sender_user)?,
|
displayname: services().users.displayname(sender_user)?,
|
||||||
last_active_ago: Some(
|
last_active_ago: Some(
|
||||||
utils::millis_since_unix_epoch()
|
utils::millis_since_unix_epoch()
|
||||||
.try_into()
|
.try_into()
|
||||||
|
@ -102,12 +109,9 @@ pub async fn set_displayname_route(
|
||||||
},
|
},
|
||||||
sender: sender_user.clone(),
|
sender: sender_user.clone(),
|
||||||
},
|
},
|
||||||
&db.globals,
|
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(set_display_name::v3::Response {})
|
Ok(set_display_name::v3::Response {})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -117,18 +121,16 @@ pub async fn set_displayname_route(
|
||||||
///
|
///
|
||||||
/// - If user is on another server: Fetches displayname over federation
|
/// - If user is on another server: Fetches displayname over federation
|
||||||
pub async fn get_displayname_route(
|
pub async fn get_displayname_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_display_name::v3::Request>,
|
||||||
body: Ruma<get_display_name::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_display_name::v3::Response> {
|
) -> Result<get_display_name::v3::Response> {
|
||||||
if body.user_id.server_name() != db.globals.server_name() {
|
if body.user_id.server_name() != services().globals.server_name() {
|
||||||
let response = db
|
let response = services()
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(
|
.send_federation_request(
|
||||||
&db.globals,
|
|
||||||
body.user_id.server_name(),
|
body.user_id.server_name(),
|
||||||
federation::query::get_profile_information::v1::Request {
|
federation::query::get_profile_information::v1::Request {
|
||||||
user_id: &body.user_id,
|
user_id: body.user_id.clone(),
|
||||||
field: Some(&ProfileField::DisplayName),
|
field: Some(ProfileField::DisplayName),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
@ -139,7 +141,7 @@ pub async fn get_displayname_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(get_display_name::v3::Response {
|
Ok(get_display_name::v3::Response {
|
||||||
displayname: db.users.displayname(&body.user_id)?,
|
displayname: services().users.displayname(&body.user_id)?,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -149,29 +151,34 @@ pub async fn get_displayname_route(
|
||||||
///
|
///
|
||||||
/// - Also makes sure other users receive the update using presence EDUs
|
/// - Also makes sure other users receive the update using presence EDUs
|
||||||
pub async fn set_avatar_url_route(
|
pub async fn set_avatar_url_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<set_avatar_url::v3::Request>,
|
||||||
body: Ruma<set_avatar_url::v3::IncomingRequest>,
|
|
||||||
) -> Result<set_avatar_url::v3::Response> {
|
) -> Result<set_avatar_url::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
db.users
|
services()
|
||||||
|
.users
|
||||||
.set_avatar_url(sender_user, body.avatar_url.clone())?;
|
.set_avatar_url(sender_user, body.avatar_url.clone())?;
|
||||||
|
|
||||||
db.users.set_blurhash(sender_user, body.blurhash.clone())?;
|
services()
|
||||||
|
.users
|
||||||
|
.set_blurhash(sender_user, body.blurhash.clone())?;
|
||||||
|
|
||||||
// Send a new membership event and presence update into all joined rooms
|
// Send a new membership event and presence update into all joined rooms
|
||||||
let all_joined_rooms: Vec<_> = db
|
let all_joined_rooms: Vec<_> = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.state_cache
|
||||||
.rooms_joined(sender_user)
|
.rooms_joined(sender_user)
|
||||||
.filter_map(|r| r.ok())
|
.filter_map(|r| r.ok())
|
||||||
.map(|room_id| {
|
.map(|room_id| {
|
||||||
Ok::<_, Error>((
|
Ok::<_, Error>((
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: RoomEventType::RoomMember,
|
event_type: TimelineEventType::RoomMember,
|
||||||
content: to_raw_value(&RoomMemberEventContent {
|
content: to_raw_value(&RoomMemberEventContent {
|
||||||
avatar_url: body.avatar_url.clone(),
|
avatar_url: body.avatar_url.clone(),
|
||||||
..serde_json::from_str(
|
..serde_json::from_str(
|
||||||
db.rooms
|
services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(
|
.room_state_get(
|
||||||
&room_id,
|
&room_id,
|
||||||
&StateEventType::RoomMember,
|
&StateEventType::RoomMember,
|
||||||
|
@ -201,7 +208,8 @@ pub async fn set_avatar_url_route(
|
||||||
|
|
||||||
for (pdu_builder, room_id) in all_joined_rooms {
|
for (pdu_builder, room_id) in all_joined_rooms {
|
||||||
let mutex_state = Arc::clone(
|
let mutex_state = Arc::clone(
|
||||||
db.globals
|
services()
|
||||||
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -210,19 +218,22 @@ pub async fn set_avatar_url_route(
|
||||||
);
|
);
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
let _ = db
|
let _ = services().rooms.timeline.build_and_append_pdu(
|
||||||
.rooms
|
pdu_builder,
|
||||||
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &db, &state_lock);
|
sender_user,
|
||||||
|
&room_id,
|
||||||
|
&state_lock,
|
||||||
|
);
|
||||||
|
|
||||||
// Presence update
|
// Presence update
|
||||||
db.rooms.edus.update_presence(
|
services().rooms.edus.presence.update_presence(
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
ruma::events::presence::PresenceEvent {
|
ruma::events::presence::PresenceEvent {
|
||||||
content: ruma::events::presence::PresenceEventContent {
|
content: ruma::events::presence::PresenceEventContent {
|
||||||
avatar_url: db.users.avatar_url(sender_user)?,
|
avatar_url: services().users.avatar_url(sender_user)?,
|
||||||
currently_active: None,
|
currently_active: None,
|
||||||
displayname: db.users.displayname(sender_user)?,
|
displayname: services().users.displayname(sender_user)?,
|
||||||
last_active_ago: Some(
|
last_active_ago: Some(
|
||||||
utils::millis_since_unix_epoch()
|
utils::millis_since_unix_epoch()
|
||||||
.try_into()
|
.try_into()
|
||||||
|
@ -233,12 +244,9 @@ pub async fn set_avatar_url_route(
|
||||||
},
|
},
|
||||||
sender: sender_user.clone(),
|
sender: sender_user.clone(),
|
||||||
},
|
},
|
||||||
&db.globals,
|
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(set_avatar_url::v3::Response {})
|
Ok(set_avatar_url::v3::Response {})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -248,18 +256,16 @@ pub async fn set_avatar_url_route(
|
||||||
///
|
///
|
||||||
/// - If user is on another server: Fetches avatar_url and blurhash over federation
|
/// - If user is on another server: Fetches avatar_url and blurhash over federation
|
||||||
pub async fn get_avatar_url_route(
|
pub async fn get_avatar_url_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_avatar_url::v3::Request>,
|
||||||
body: Ruma<get_avatar_url::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_avatar_url::v3::Response> {
|
) -> Result<get_avatar_url::v3::Response> {
|
||||||
if body.user_id.server_name() != db.globals.server_name() {
|
if body.user_id.server_name() != services().globals.server_name() {
|
||||||
let response = db
|
let response = services()
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(
|
.send_federation_request(
|
||||||
&db.globals,
|
|
||||||
body.user_id.server_name(),
|
body.user_id.server_name(),
|
||||||
federation::query::get_profile_information::v1::Request {
|
federation::query::get_profile_information::v1::Request {
|
||||||
user_id: &body.user_id,
|
user_id: body.user_id.clone(),
|
||||||
field: Some(&ProfileField::AvatarUrl),
|
field: Some(ProfileField::AvatarUrl),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
@ -271,8 +277,8 @@ pub async fn get_avatar_url_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(get_avatar_url::v3::Response {
|
Ok(get_avatar_url::v3::Response {
|
||||||
avatar_url: db.users.avatar_url(&body.user_id)?,
|
avatar_url: services().users.avatar_url(&body.user_id)?,
|
||||||
blurhash: db.users.blurhash(&body.user_id)?,
|
blurhash: services().users.blurhash(&body.user_id)?,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -282,17 +288,15 @@ pub async fn get_avatar_url_route(
|
||||||
///
|
///
|
||||||
/// - If user is on another server: Fetches profile over federation
|
/// - If user is on another server: Fetches profile over federation
|
||||||
pub async fn get_profile_route(
|
pub async fn get_profile_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_profile::v3::Request>,
|
||||||
body: Ruma<get_profile::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_profile::v3::Response> {
|
) -> Result<get_profile::v3::Response> {
|
||||||
if body.user_id.server_name() != db.globals.server_name() {
|
if body.user_id.server_name() != services().globals.server_name() {
|
||||||
let response = db
|
let response = services()
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(
|
.send_federation_request(
|
||||||
&db.globals,
|
|
||||||
body.user_id.server_name(),
|
body.user_id.server_name(),
|
||||||
federation::query::get_profile_information::v1::Request {
|
federation::query::get_profile_information::v1::Request {
|
||||||
user_id: &body.user_id,
|
user_id: body.user_id.clone(),
|
||||||
field: None,
|
field: None,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
@ -305,7 +309,7 @@ pub async fn get_profile_route(
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
if !db.users.exists(&body.user_id)? {
|
if !services().users.exists(&body.user_id)? {
|
||||||
// Return 404 if this user doesn't exist
|
// Return 404 if this user doesn't exist
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::NotFound,
|
ErrorKind::NotFound,
|
||||||
|
@ -314,8 +318,8 @@ pub async fn get_profile_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(get_profile::v3::Response {
|
Ok(get_profile::v3::Response {
|
||||||
avatar_url: db.users.avatar_url(&body.user_id)?,
|
avatar_url: services().users.avatar_url(&body.user_id)?,
|
||||||
blurhash: db.users.blurhash(&body.user_id)?,
|
blurhash: services().users.blurhash(&body.user_id)?,
|
||||||
displayname: db.users.displayname(&body.user_id)?,
|
displayname: services().users.displayname(&body.user_id)?,
|
||||||
})
|
})
|
||||||
}
|
}
|
432
src/api/client_server/push.rs
Normal file
432
src/api/client_server/push.rs
Normal file
|
@ -0,0 +1,432 @@
|
||||||
|
use crate::{services, Error, Result, Ruma};
|
||||||
|
use ruma::{
|
||||||
|
api::client::{
|
||||||
|
error::ErrorKind,
|
||||||
|
push::{
|
||||||
|
delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, get_pushrule_enabled,
|
||||||
|
get_pushrules_all, set_pusher, set_pushrule, set_pushrule_actions,
|
||||||
|
set_pushrule_enabled, RuleScope,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
events::{push_rules::PushRulesEvent, GlobalAccountDataEventType},
|
||||||
|
push::{InsertPushRuleError, RemovePushRuleError},
|
||||||
|
};
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/pushrules`
|
||||||
|
///
|
||||||
|
/// Retrieves the push rules event for this user.
|
||||||
|
pub async fn get_pushrules_all_route(
|
||||||
|
body: Ruma<get_pushrules_all::v3::Request>,
|
||||||
|
) -> Result<get_pushrules_all::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let event = services()
|
||||||
|
.account_data
|
||||||
|
.get(
|
||||||
|
None,
|
||||||
|
sender_user,
|
||||||
|
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||||
|
)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"PushRules event not found.",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
let account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))?
|
||||||
|
.content;
|
||||||
|
|
||||||
|
Ok(get_pushrules_all::v3::Response {
|
||||||
|
global: account_data.global,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}`
|
||||||
|
///
|
||||||
|
/// Retrieves a single specified push rule for this user.
|
||||||
|
pub async fn get_pushrule_route(
|
||||||
|
body: Ruma<get_pushrule::v3::Request>,
|
||||||
|
) -> Result<get_pushrule::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let event = services()
|
||||||
|
.account_data
|
||||||
|
.get(
|
||||||
|
None,
|
||||||
|
sender_user,
|
||||||
|
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||||
|
)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"PushRules event not found.",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
let account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))?
|
||||||
|
.content;
|
||||||
|
|
||||||
|
let rule = account_data
|
||||||
|
.global
|
||||||
|
.get(body.kind.clone(), &body.rule_id)
|
||||||
|
.map(Into::into);
|
||||||
|
|
||||||
|
if let Some(rule) = rule {
|
||||||
|
Ok(get_pushrule::v3::Response { rule })
|
||||||
|
} else {
|
||||||
|
Err(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"Push rule not found.",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}`
|
||||||
|
///
|
||||||
|
/// Creates a single specified push rule for this user.
|
||||||
|
pub async fn set_pushrule_route(
|
||||||
|
body: Ruma<set_pushrule::v3::Request>,
|
||||||
|
) -> Result<set_pushrule::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
let body = body.body;
|
||||||
|
|
||||||
|
if body.scope != RuleScope::Global {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Scopes other than 'global' are not supported.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let event = services()
|
||||||
|
.account_data
|
||||||
|
.get(
|
||||||
|
None,
|
||||||
|
sender_user,
|
||||||
|
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||||
|
)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"PushRules event not found.",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
let mut account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
|
||||||
|
|
||||||
|
if let Err(error) = account_data.content.global.insert(
|
||||||
|
body.rule.clone(),
|
||||||
|
body.after.as_deref(),
|
||||||
|
body.before.as_deref(),
|
||||||
|
) {
|
||||||
|
let err = match error {
|
||||||
|
InsertPushRuleError::ServerDefaultRuleId => Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Rule IDs starting with a dot are reserved for server-default rules.",
|
||||||
|
),
|
||||||
|
InsertPushRuleError::InvalidRuleId => Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Rule ID containing invalid characters.",
|
||||||
|
),
|
||||||
|
InsertPushRuleError::RelativeToServerDefaultRule => Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Can't place a push rule relatively to a server-default rule.",
|
||||||
|
),
|
||||||
|
InsertPushRuleError::UnknownRuleId => Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"The before or after rule could not be found.",
|
||||||
|
),
|
||||||
|
InsertPushRuleError::BeforeHigherThanAfter => Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"The before rule has a higher priority than the after rule.",
|
||||||
|
),
|
||||||
|
_ => Error::BadRequest(ErrorKind::InvalidParam, "Invalid data."),
|
||||||
|
};
|
||||||
|
|
||||||
|
return Err(err);
|
||||||
|
}
|
||||||
|
|
||||||
|
services().account_data.update(
|
||||||
|
None,
|
||||||
|
sender_user,
|
||||||
|
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||||
|
&serde_json::to_value(account_data).expect("to json value always works"),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(set_pushrule::v3::Response {})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions`
|
||||||
|
///
|
||||||
|
/// Gets the actions of a single specified push rule for this user.
|
||||||
|
pub async fn get_pushrule_actions_route(
|
||||||
|
body: Ruma<get_pushrule_actions::v3::Request>,
|
||||||
|
) -> Result<get_pushrule_actions::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
if body.scope != RuleScope::Global {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Scopes other than 'global' are not supported.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let event = services()
|
||||||
|
.account_data
|
||||||
|
.get(
|
||||||
|
None,
|
||||||
|
sender_user,
|
||||||
|
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||||
|
)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"PushRules event not found.",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
let account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))?
|
||||||
|
.content;
|
||||||
|
|
||||||
|
let global = account_data.global;
|
||||||
|
let actions = global
|
||||||
|
.get(body.kind.clone(), &body.rule_id)
|
||||||
|
.map(|rule| rule.actions().to_owned())
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"Push rule not found.",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
Ok(get_pushrule_actions::v3::Response { actions })
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions`
|
||||||
|
///
|
||||||
|
/// Sets the actions of a single specified push rule for this user.
|
||||||
|
pub async fn set_pushrule_actions_route(
|
||||||
|
body: Ruma<set_pushrule_actions::v3::Request>,
|
||||||
|
) -> Result<set_pushrule_actions::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
if body.scope != RuleScope::Global {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Scopes other than 'global' are not supported.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let event = services()
|
||||||
|
.account_data
|
||||||
|
.get(
|
||||||
|
None,
|
||||||
|
sender_user,
|
||||||
|
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||||
|
)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"PushRules event not found.",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
let mut account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
|
||||||
|
|
||||||
|
if account_data
|
||||||
|
.content
|
||||||
|
.global
|
||||||
|
.set_actions(body.kind.clone(), &body.rule_id, body.actions.clone())
|
||||||
|
.is_err()
|
||||||
|
{
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"Push rule not found.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
services().account_data.update(
|
||||||
|
None,
|
||||||
|
sender_user,
|
||||||
|
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||||
|
&serde_json::to_value(account_data).expect("to json value always works"),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(set_pushrule_actions::v3::Response {})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled`
|
||||||
|
///
|
||||||
|
/// Gets the enabled status of a single specified push rule for this user.
|
||||||
|
pub async fn get_pushrule_enabled_route(
|
||||||
|
body: Ruma<get_pushrule_enabled::v3::Request>,
|
||||||
|
) -> Result<get_pushrule_enabled::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
if body.scope != RuleScope::Global {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Scopes other than 'global' are not supported.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let event = services()
|
||||||
|
.account_data
|
||||||
|
.get(
|
||||||
|
None,
|
||||||
|
sender_user,
|
||||||
|
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||||
|
)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"PushRules event not found.",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
let account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
|
||||||
|
|
||||||
|
let global = account_data.content.global;
|
||||||
|
let enabled = global
|
||||||
|
.get(body.kind.clone(), &body.rule_id)
|
||||||
|
.map(|r| r.enabled())
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"Push rule not found.",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
Ok(get_pushrule_enabled::v3::Response { enabled })
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled`
|
||||||
|
///
|
||||||
|
/// Sets the enabled status of a single specified push rule for this user.
|
||||||
|
pub async fn set_pushrule_enabled_route(
|
||||||
|
body: Ruma<set_pushrule_enabled::v3::Request>,
|
||||||
|
) -> Result<set_pushrule_enabled::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
if body.scope != RuleScope::Global {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Scopes other than 'global' are not supported.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let event = services()
|
||||||
|
.account_data
|
||||||
|
.get(
|
||||||
|
None,
|
||||||
|
sender_user,
|
||||||
|
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||||
|
)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"PushRules event not found.",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
let mut account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
|
||||||
|
|
||||||
|
if account_data
|
||||||
|
.content
|
||||||
|
.global
|
||||||
|
.set_enabled(body.kind.clone(), &body.rule_id, body.enabled)
|
||||||
|
.is_err()
|
||||||
|
{
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"Push rule not found.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
services().account_data.update(
|
||||||
|
None,
|
||||||
|
sender_user,
|
||||||
|
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||||
|
&serde_json::to_value(account_data).expect("to json value always works"),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(set_pushrule_enabled::v3::Response {})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `DELETE /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}`
|
||||||
|
///
|
||||||
|
/// Deletes a single specified push rule for this user.
|
||||||
|
pub async fn delete_pushrule_route(
|
||||||
|
body: Ruma<delete_pushrule::v3::Request>,
|
||||||
|
) -> Result<delete_pushrule::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
if body.scope != RuleScope::Global {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Scopes other than 'global' are not supported.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let event = services()
|
||||||
|
.account_data
|
||||||
|
.get(
|
||||||
|
None,
|
||||||
|
sender_user,
|
||||||
|
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||||
|
)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"PushRules event not found.",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
let mut account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
|
||||||
|
|
||||||
|
if let Err(error) = account_data
|
||||||
|
.content
|
||||||
|
.global
|
||||||
|
.remove(body.kind.clone(), &body.rule_id)
|
||||||
|
{
|
||||||
|
let err = match error {
|
||||||
|
RemovePushRuleError::ServerDefault => Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Cannot delete a server-default pushrule.",
|
||||||
|
),
|
||||||
|
RemovePushRuleError::NotFound => {
|
||||||
|
Error::BadRequest(ErrorKind::NotFound, "Push rule not found.")
|
||||||
|
}
|
||||||
|
_ => Error::BadRequest(ErrorKind::InvalidParam, "Invalid data."),
|
||||||
|
};
|
||||||
|
|
||||||
|
return Err(err);
|
||||||
|
}
|
||||||
|
|
||||||
|
services().account_data.update(
|
||||||
|
None,
|
||||||
|
sender_user,
|
||||||
|
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||||
|
&serde_json::to_value(account_data).expect("to json value always works"),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(delete_pushrule::v3::Response {})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/pushers`
|
||||||
|
///
|
||||||
|
/// Gets all currently active pushers for the sender user.
|
||||||
|
pub async fn get_pushers_route(
|
||||||
|
body: Ruma<get_pushers::v3::Request>,
|
||||||
|
) -> Result<get_pushers::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
Ok(get_pushers::v3::Response {
|
||||||
|
pushers: services().pusher.get_pushers(sender_user)?,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `POST /_matrix/client/r0/pushers/set`
|
||||||
|
///
|
||||||
|
/// Adds a pusher for the sender user.
|
||||||
|
///
|
||||||
|
/// - TODO: Handle `append`
|
||||||
|
pub async fn set_pushers_route(
|
||||||
|
body: Ruma<set_pusher::v3::Request>,
|
||||||
|
) -> Result<set_pusher::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
services()
|
||||||
|
.pusher
|
||||||
|
.set_pusher(sender_user, body.action.clone())?;
|
||||||
|
|
||||||
|
Ok(set_pusher::v3::Response::default())
|
||||||
|
}
|
182
src/api/client_server/read_marker.rs
Normal file
182
src/api/client_server/read_marker.rs
Normal file
|
@ -0,0 +1,182 @@
|
||||||
|
use crate::{service::rooms::timeline::PduCount, services, Error, Result, Ruma};
|
||||||
|
use ruma::{
|
||||||
|
api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt},
|
||||||
|
events::{
|
||||||
|
receipt::{ReceiptThread, ReceiptType},
|
||||||
|
RoomAccountDataEventType,
|
||||||
|
},
|
||||||
|
MilliSecondsSinceUnixEpoch,
|
||||||
|
};
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
|
/// # `POST /_matrix/client/r0/rooms/{roomId}/read_markers`
|
||||||
|
///
|
||||||
|
/// Sets different types of read markers.
|
||||||
|
///
|
||||||
|
/// - Updates fully-read account data event to `fully_read`
|
||||||
|
/// - If `read_receipt` is set: Update private marker and public read receipt EDU
|
||||||
|
pub async fn set_read_marker_route(
|
||||||
|
body: Ruma<set_read_marker::v3::Request>,
|
||||||
|
) -> Result<set_read_marker::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
if let Some(fully_read) = &body.fully_read {
|
||||||
|
let fully_read_event = ruma::events::fully_read::FullyReadEvent {
|
||||||
|
content: ruma::events::fully_read::FullyReadEventContent {
|
||||||
|
event_id: fully_read.clone(),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
services().account_data.update(
|
||||||
|
Some(&body.room_id),
|
||||||
|
sender_user,
|
||||||
|
RoomAccountDataEventType::FullyRead,
|
||||||
|
&serde_json::to_value(fully_read_event).expect("to json value always works"),
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if body.private_read_receipt.is_some() || body.read_receipt.is_some() {
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.user
|
||||||
|
.reset_notification_counts(sender_user, &body.room_id)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(event) = &body.private_read_receipt {
|
||||||
|
let count = services()
|
||||||
|
.rooms
|
||||||
|
.timeline
|
||||||
|
.get_pdu_count(event)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Event does not exist.",
|
||||||
|
))?;
|
||||||
|
let count = match count {
|
||||||
|
PduCount::Backfilled(_) => {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Read receipt is in backfilled timeline",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
PduCount::Normal(c) => c,
|
||||||
|
};
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.edus
|
||||||
|
.read_receipt
|
||||||
|
.private_read_set(&body.room_id, sender_user, count)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(event) = &body.read_receipt {
|
||||||
|
let mut user_receipts = BTreeMap::new();
|
||||||
|
user_receipts.insert(
|
||||||
|
sender_user.clone(),
|
||||||
|
ruma::events::receipt::Receipt {
|
||||||
|
ts: Some(MilliSecondsSinceUnixEpoch::now()),
|
||||||
|
thread: ReceiptThread::Unthreaded,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut receipts = BTreeMap::new();
|
||||||
|
receipts.insert(ReceiptType::Read, user_receipts);
|
||||||
|
|
||||||
|
let mut receipt_content = BTreeMap::new();
|
||||||
|
receipt_content.insert(event.to_owned(), receipts);
|
||||||
|
|
||||||
|
services().rooms.edus.read_receipt.readreceipt_update(
|
||||||
|
sender_user,
|
||||||
|
&body.room_id,
|
||||||
|
ruma::events::receipt::ReceiptEvent {
|
||||||
|
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
|
||||||
|
room_id: body.room_id.clone(),
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(set_read_marker::v3::Response {})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `POST /_matrix/client/r0/rooms/{roomId}/receipt/{receiptType}/{eventId}`
|
||||||
|
///
|
||||||
|
/// Sets private read marker and public read receipt EDU.
|
||||||
|
pub async fn create_receipt_route(
|
||||||
|
body: Ruma<create_receipt::v3::Request>,
|
||||||
|
) -> Result<create_receipt::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
if matches!(
|
||||||
|
&body.receipt_type,
|
||||||
|
create_receipt::v3::ReceiptType::Read | create_receipt::v3::ReceiptType::ReadPrivate
|
||||||
|
) {
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.user
|
||||||
|
.reset_notification_counts(sender_user, &body.room_id)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
match body.receipt_type {
|
||||||
|
create_receipt::v3::ReceiptType::FullyRead => {
|
||||||
|
let fully_read_event = ruma::events::fully_read::FullyReadEvent {
|
||||||
|
content: ruma::events::fully_read::FullyReadEventContent {
|
||||||
|
event_id: body.event_id.clone(),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
services().account_data.update(
|
||||||
|
Some(&body.room_id),
|
||||||
|
sender_user,
|
||||||
|
RoomAccountDataEventType::FullyRead,
|
||||||
|
&serde_json::to_value(fully_read_event).expect("to json value always works"),
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
create_receipt::v3::ReceiptType::Read => {
|
||||||
|
let mut user_receipts = BTreeMap::new();
|
||||||
|
user_receipts.insert(
|
||||||
|
sender_user.clone(),
|
||||||
|
ruma::events::receipt::Receipt {
|
||||||
|
ts: Some(MilliSecondsSinceUnixEpoch::now()),
|
||||||
|
thread: ReceiptThread::Unthreaded,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
let mut receipts = BTreeMap::new();
|
||||||
|
receipts.insert(ReceiptType::Read, user_receipts);
|
||||||
|
|
||||||
|
let mut receipt_content = BTreeMap::new();
|
||||||
|
receipt_content.insert(body.event_id.to_owned(), receipts);
|
||||||
|
|
||||||
|
services().rooms.edus.read_receipt.readreceipt_update(
|
||||||
|
sender_user,
|
||||||
|
&body.room_id,
|
||||||
|
ruma::events::receipt::ReceiptEvent {
|
||||||
|
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
|
||||||
|
room_id: body.room_id.clone(),
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
create_receipt::v3::ReceiptType::ReadPrivate => {
|
||||||
|
let count = services()
|
||||||
|
.rooms
|
||||||
|
.timeline
|
||||||
|
.get_pdu_count(&body.event_id)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Event does not exist.",
|
||||||
|
))?;
|
||||||
|
let count = match count {
|
||||||
|
PduCount::Backfilled(_) => {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Read receipt is in backfilled timeline",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
PduCount::Normal(c) => c,
|
||||||
|
};
|
||||||
|
services().rooms.edus.read_receipt.private_read_set(
|
||||||
|
&body.room_id,
|
||||||
|
sender_user,
|
||||||
|
count,
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
_ => return Err(Error::bad_database("Unsupported receipt type")),
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(create_receipt::v3::Response {})
|
||||||
|
}
|
|
@ -1,9 +1,9 @@
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use crate::{database::DatabaseGuard, pdu::PduBuilder, Result, Ruma};
|
use crate::{service::pdu::PduBuilder, services, Result, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::redact::redact_event,
|
api::client::redact::redact_event,
|
||||||
events::{room::redaction::RoomRedactionEventContent, RoomEventType},
|
events::{room::redaction::RoomRedactionEventContent, TimelineEventType},
|
||||||
};
|
};
|
||||||
|
|
||||||
use serde_json::value::to_raw_value;
|
use serde_json::value::to_raw_value;
|
||||||
|
@ -14,14 +14,14 @@ use serde_json::value::to_raw_value;
|
||||||
///
|
///
|
||||||
/// - TODO: Handle txn id
|
/// - TODO: Handle txn id
|
||||||
pub async fn redact_event_route(
|
pub async fn redact_event_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<redact_event::v3::Request>,
|
||||||
body: Ruma<redact_event::v3::IncomingRequest>,
|
|
||||||
) -> Result<redact_event::v3::Response> {
|
) -> Result<redact_event::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let body = body.body;
|
let body = body.body;
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
let mutex_state = Arc::clone(
|
||||||
db.globals
|
services()
|
||||||
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -30,10 +30,11 @@ pub async fn redact_event_route(
|
||||||
);
|
);
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
let event_id = db.rooms.build_and_append_pdu(
|
let event_id = services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: RoomEventType::RoomRedaction,
|
event_type: TimelineEventType::RoomRedaction,
|
||||||
content: to_raw_value(&RoomRedactionEventContent {
|
content: to_raw_value(&RoomRedactionEventContent {
|
||||||
|
redacts: Some(body.event_id.clone()),
|
||||||
reason: body.reason.clone(),
|
reason: body.reason.clone(),
|
||||||
})
|
})
|
||||||
.expect("event is valid, we just created it"),
|
.expect("event is valid, we just created it"),
|
||||||
|
@ -43,14 +44,11 @@ pub async fn redact_event_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
&db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
drop(state_lock);
|
drop(state_lock);
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
let event_id = (*event_id).to_owned();
|
let event_id = (*event_id).to_owned();
|
||||||
Ok(redact_event::v3::Response { event_id })
|
Ok(redact_event::v3::Response { event_id })
|
||||||
}
|
}
|
146
src/api/client_server/relations.rs
Normal file
146
src/api/client_server/relations.rs
Normal file
|
@ -0,0 +1,146 @@
|
||||||
|
use ruma::api::client::relations::{
|
||||||
|
get_relating_events, get_relating_events_with_rel_type,
|
||||||
|
get_relating_events_with_rel_type_and_event_type,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{service::rooms::timeline::PduCount, services, Result, Ruma};
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}/{eventType}`
|
||||||
|
pub async fn get_relating_events_with_rel_type_and_event_type_route(
|
||||||
|
body: Ruma<get_relating_events_with_rel_type_and_event_type::v1::Request>,
|
||||||
|
) -> Result<get_relating_events_with_rel_type_and_event_type::v1::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let from = match body.from.clone() {
|
||||||
|
Some(from) => PduCount::try_from_string(&from)?,
|
||||||
|
None => match ruma::api::Direction::Backward {
|
||||||
|
// TODO: fix ruma so `body.dir` exists
|
||||||
|
ruma::api::Direction::Forward => PduCount::min(),
|
||||||
|
ruma::api::Direction::Backward => PduCount::max(),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
let to = body
|
||||||
|
.to
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|t| PduCount::try_from_string(t).ok());
|
||||||
|
|
||||||
|
// Use limit or else 10, with maximum 100
|
||||||
|
let limit = body
|
||||||
|
.limit
|
||||||
|
.and_then(|u| u32::try_from(u).ok())
|
||||||
|
.map_or(10_usize, |u| u as usize)
|
||||||
|
.min(100);
|
||||||
|
|
||||||
|
let res = services()
|
||||||
|
.rooms
|
||||||
|
.pdu_metadata
|
||||||
|
.paginate_relations_with_filter(
|
||||||
|
sender_user,
|
||||||
|
&body.room_id,
|
||||||
|
&body.event_id,
|
||||||
|
Some(body.event_type.clone()),
|
||||||
|
Some(body.rel_type.clone()),
|
||||||
|
from,
|
||||||
|
to,
|
||||||
|
limit,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(
|
||||||
|
get_relating_events_with_rel_type_and_event_type::v1::Response {
|
||||||
|
chunk: res.chunk,
|
||||||
|
next_batch: res.next_batch,
|
||||||
|
prev_batch: res.prev_batch,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}`
|
||||||
|
pub async fn get_relating_events_with_rel_type_route(
|
||||||
|
body: Ruma<get_relating_events_with_rel_type::v1::Request>,
|
||||||
|
) -> Result<get_relating_events_with_rel_type::v1::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let from = match body.from.clone() {
|
||||||
|
Some(from) => PduCount::try_from_string(&from)?,
|
||||||
|
None => match ruma::api::Direction::Backward {
|
||||||
|
// TODO: fix ruma so `body.dir` exists
|
||||||
|
ruma::api::Direction::Forward => PduCount::min(),
|
||||||
|
ruma::api::Direction::Backward => PduCount::max(),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
let to = body
|
||||||
|
.to
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|t| PduCount::try_from_string(t).ok());
|
||||||
|
|
||||||
|
// Use limit or else 10, with maximum 100
|
||||||
|
let limit = body
|
||||||
|
.limit
|
||||||
|
.and_then(|u| u32::try_from(u).ok())
|
||||||
|
.map_or(10_usize, |u| u as usize)
|
||||||
|
.min(100);
|
||||||
|
|
||||||
|
let res = services()
|
||||||
|
.rooms
|
||||||
|
.pdu_metadata
|
||||||
|
.paginate_relations_with_filter(
|
||||||
|
sender_user,
|
||||||
|
&body.room_id,
|
||||||
|
&body.event_id,
|
||||||
|
None,
|
||||||
|
Some(body.rel_type.clone()),
|
||||||
|
from,
|
||||||
|
to,
|
||||||
|
limit,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(get_relating_events_with_rel_type::v1::Response {
|
||||||
|
chunk: res.chunk,
|
||||||
|
next_batch: res.next_batch,
|
||||||
|
prev_batch: res.prev_batch,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}`
|
||||||
|
pub async fn get_relating_events_route(
|
||||||
|
body: Ruma<get_relating_events::v1::Request>,
|
||||||
|
) -> Result<get_relating_events::v1::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let from = match body.from.clone() {
|
||||||
|
Some(from) => PduCount::try_from_string(&from)?,
|
||||||
|
None => match ruma::api::Direction::Backward {
|
||||||
|
// TODO: fix ruma so `body.dir` exists
|
||||||
|
ruma::api::Direction::Forward => PduCount::min(),
|
||||||
|
ruma::api::Direction::Backward => PduCount::max(),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
let to = body
|
||||||
|
.to
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|t| PduCount::try_from_string(t).ok());
|
||||||
|
|
||||||
|
// Use limit or else 10, with maximum 100
|
||||||
|
let limit = body
|
||||||
|
.limit
|
||||||
|
.and_then(|u| u32::try_from(u).ok())
|
||||||
|
.map_or(10_usize, |u| u as usize)
|
||||||
|
.min(100);
|
||||||
|
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.pdu_metadata
|
||||||
|
.paginate_relations_with_filter(
|
||||||
|
sender_user,
|
||||||
|
&body.room_id,
|
||||||
|
&body.event_id,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
from,
|
||||||
|
to,
|
||||||
|
limit,
|
||||||
|
)
|
||||||
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
use crate::{database::DatabaseGuard, utils::HtmlEscape, Error, Result, Ruma};
|
use crate::{services, utils::HtmlEscape, Error, Result, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{error::ErrorKind, room::report_content},
|
api::client::{error::ErrorKind, room::report_content},
|
||||||
events::room::message,
|
events::room::message,
|
||||||
|
@ -10,12 +10,11 @@ use ruma::{
|
||||||
/// Reports an inappropriate event to homeserver admins
|
/// Reports an inappropriate event to homeserver admins
|
||||||
///
|
///
|
||||||
pub async fn report_event_route(
|
pub async fn report_event_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<report_content::v3::Request>,
|
||||||
body: Ruma<report_content::v3::IncomingRequest>,
|
|
||||||
) -> Result<report_content::v3::Response> {
|
) -> Result<report_content::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let pdu = match db.rooms.get_pdu(&body.event_id)? {
|
let pdu = match services().rooms.timeline.get_pdu(&body.event_id)? {
|
||||||
Some(pdu) => pdu,
|
Some(pdu) => pdu,
|
||||||
_ => {
|
_ => {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
|
@ -39,7 +38,7 @@ pub async fn report_event_route(
|
||||||
));
|
));
|
||||||
};
|
};
|
||||||
|
|
||||||
db.admin
|
services().admin
|
||||||
.send_message(message::RoomMessageEventContent::text_html(
|
.send_message(message::RoomMessageEventContent::text_html(
|
||||||
format!(
|
format!(
|
||||||
"Report received from: {}\n\n\
|
"Report received from: {}\n\n\
|
||||||
|
@ -66,7 +65,5 @@ pub async fn report_event_route(
|
||||||
),
|
),
|
||||||
));
|
));
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(report_content::v3::Response {})
|
Ok(report_content::v3::Response {})
|
||||||
}
|
}
|
|
@ -1,5 +1,5 @@
|
||||||
use crate::{
|
use crate::{
|
||||||
client_server::invite_helper, database::DatabaseGuard, pdu::PduBuilder, Error, Result, Ruma,
|
api::client_server::invite_helper, service::pdu::PduBuilder, services, Error, Result, Ruma,
|
||||||
};
|
};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{
|
api::client::{
|
||||||
|
@ -19,11 +19,11 @@ use ruma::{
|
||||||
tombstone::RoomTombstoneEventContent,
|
tombstone::RoomTombstoneEventContent,
|
||||||
topic::RoomTopicEventContent,
|
topic::RoomTopicEventContent,
|
||||||
},
|
},
|
||||||
RoomEventType, StateEventType,
|
StateEventType, TimelineEventType,
|
||||||
},
|
},
|
||||||
int,
|
int,
|
||||||
serde::{CanonicalJsonObject, JsonObject},
|
serde::JsonObject,
|
||||||
RoomAliasId, RoomId,
|
CanonicalJsonObject, OwnedRoomAliasId, RoomAliasId, RoomId,
|
||||||
};
|
};
|
||||||
use serde_json::{json, value::to_raw_value};
|
use serde_json::{json, value::to_raw_value};
|
||||||
use std::{cmp::max, collections::BTreeMap, sync::Arc};
|
use std::{cmp::max, collections::BTreeMap, sync::Arc};
|
||||||
|
@ -46,19 +46,19 @@ use tracing::{info, warn};
|
||||||
/// - Send events implied by `name` and `topic`
|
/// - Send events implied by `name` and `topic`
|
||||||
/// - Send invite events
|
/// - Send invite events
|
||||||
pub async fn create_room_route(
|
pub async fn create_room_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<create_room::v3::Request>,
|
||||||
body: Ruma<create_room::v3::IncomingRequest>,
|
|
||||||
) -> Result<create_room::v3::Response> {
|
) -> Result<create_room::v3::Response> {
|
||||||
use create_room::v3::RoomPreset;
|
use create_room::v3::RoomPreset;
|
||||||
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let room_id = RoomId::new(db.globals.server_name());
|
let room_id = RoomId::new(services().globals.server_name());
|
||||||
|
|
||||||
db.rooms.get_or_create_shortroomid(&room_id, &db.globals)?;
|
services().rooms.short.get_or_create_shortroomid(&room_id)?;
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
let mutex_state = Arc::clone(
|
||||||
db.globals
|
services()
|
||||||
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -67,9 +67,9 @@ pub async fn create_room_route(
|
||||||
);
|
);
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
if !db.globals.allow_room_creation()
|
if !services().globals.allow_room_creation()
|
||||||
&& !body.from_appservice
|
&& !body.from_appservice
|
||||||
&& !db.users.is_admin(sender_user, &db.rooms, &db.globals)?
|
&& !services().users.is_admin(sender_user)?
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
|
@ -77,18 +77,24 @@ pub async fn create_room_route(
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let alias: Option<Box<RoomAliasId>> =
|
let alias: Option<OwnedRoomAliasId> =
|
||||||
body.room_alias_name
|
body.room_alias_name
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map_or(Ok(None), |localpart| {
|
.map_or(Ok(None), |localpart| {
|
||||||
// TODO: Check for invalid characters and maximum length
|
// TODO: Check for invalid characters and maximum length
|
||||||
let alias =
|
let alias = RoomAliasId::parse(format!(
|
||||||
RoomAliasId::parse(format!("#{}:{}", localpart, db.globals.server_name()))
|
"#{}:{}",
|
||||||
.map_err(|_| {
|
localpart,
|
||||||
Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias.")
|
services().globals.server_name()
|
||||||
})?;
|
))
|
||||||
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?;
|
||||||
|
|
||||||
if db.rooms.id_from_alias(&alias)?.is_some() {
|
if services()
|
||||||
|
.rooms
|
||||||
|
.alias
|
||||||
|
.resolve_local_alias(&alias)?
|
||||||
|
.is_some()
|
||||||
|
{
|
||||||
Err(Error::BadRequest(
|
Err(Error::BadRequest(
|
||||||
ErrorKind::RoomInUse,
|
ErrorKind::RoomInUse,
|
||||||
"Room alias already exists.",
|
"Room alias already exists.",
|
||||||
|
@ -100,7 +106,11 @@ pub async fn create_room_route(
|
||||||
|
|
||||||
let room_version = match body.room_version.clone() {
|
let room_version = match body.room_version.clone() {
|
||||||
Some(room_version) => {
|
Some(room_version) => {
|
||||||
if db.rooms.is_supported_version(&db, &room_version) {
|
if services()
|
||||||
|
.globals
|
||||||
|
.supported_room_versions()
|
||||||
|
.contains(&room_version)
|
||||||
|
{
|
||||||
room_version
|
room_version
|
||||||
} else {
|
} else {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
|
@ -109,7 +119,7 @@ pub async fn create_room_route(
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
None => db.globals.default_room_version(),
|
None => services().globals.default_room_version(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let content = match &body.creation_content {
|
let content = match &body.creation_content {
|
||||||
|
@ -132,8 +142,9 @@ pub async fn create_room_route(
|
||||||
content
|
content
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
|
// TODO: Add correct value for v11
|
||||||
let mut content = serde_json::from_str::<CanonicalJsonObject>(
|
let mut content = serde_json::from_str::<CanonicalJsonObject>(
|
||||||
to_raw_value(&RoomCreateEventContent::new(sender_user.clone()))
|
to_raw_value(&RoomCreateEventContent::new_v1(sender_user.clone()))
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid creation content"))?
|
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid creation content"))?
|
||||||
.get(),
|
.get(),
|
||||||
)
|
)
|
||||||
|
@ -163,9 +174,9 @@ pub async fn create_room_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
// 1. The room create event
|
// 1. The room create event
|
||||||
db.rooms.build_and_append_pdu(
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: RoomEventType::RoomCreate,
|
event_type: TimelineEventType::RoomCreate,
|
||||||
content: to_raw_value(&content).expect("event is valid, we just created it"),
|
content: to_raw_value(&content).expect("event is valid, we just created it"),
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some("".to_owned()),
|
state_key: Some("".to_owned()),
|
||||||
|
@ -173,21 +184,20 @@ pub async fn create_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// 2. Let the room creator join
|
// 2. Let the room creator join
|
||||||
db.rooms.build_and_append_pdu(
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: RoomEventType::RoomMember,
|
event_type: TimelineEventType::RoomMember,
|
||||||
content: to_raw_value(&RoomMemberEventContent {
|
content: to_raw_value(&RoomMemberEventContent {
|
||||||
membership: MembershipState::Join,
|
membership: MembershipState::Join,
|
||||||
displayname: db.users.displayname(sender_user)?,
|
displayname: services().users.displayname(sender_user)?,
|
||||||
avatar_url: db.users.avatar_url(sender_user)?,
|
avatar_url: services().users.avatar_url(sender_user)?,
|
||||||
is_direct: Some(body.is_direct),
|
is_direct: Some(body.is_direct),
|
||||||
third_party_invite: None,
|
third_party_invite: None,
|
||||||
blurhash: db.users.blurhash(sender_user)?,
|
blurhash: services().users.blurhash(sender_user)?,
|
||||||
reason: None,
|
reason: None,
|
||||||
join_authorized_via_users_server: None,
|
join_authorized_via_users_server: None,
|
||||||
})
|
})
|
||||||
|
@ -198,17 +208,13 @@ pub async fn create_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// 3. Power levels
|
// 3. Power levels
|
||||||
|
|
||||||
// Figure out preset. We need it for preset specific events
|
// Figure out preset. We need it for preset specific events
|
||||||
let preset = body
|
let preset = body.preset.clone().unwrap_or(match &body.visibility {
|
||||||
.preset
|
|
||||||
.clone()
|
|
||||||
.unwrap_or_else(|| match &body.visibility {
|
|
||||||
room::Visibility::Private => RoomPreset::PrivateChat,
|
room::Visibility::Private => RoomPreset::PrivateChat,
|
||||||
room::Visibility::Public => RoomPreset::PublicChat,
|
room::Visibility::Public => RoomPreset::PublicChat,
|
||||||
_ => RoomPreset::PrivateChat, // Room visibility should not be custom
|
_ => RoomPreset::PrivateChat, // Room visibility should not be custom
|
||||||
|
@ -240,9 +246,9 @@ pub async fn create_room_route(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
db.rooms.build_and_append_pdu(
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: RoomEventType::RoomPowerLevels,
|
event_type: TimelineEventType::RoomPowerLevels,
|
||||||
content: to_raw_value(&power_levels_content)
|
content: to_raw_value(&power_levels_content)
|
||||||
.expect("to_raw_value always works on serde_json::Value"),
|
.expect("to_raw_value always works on serde_json::Value"),
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
|
@ -251,15 +257,14 @@ pub async fn create_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// 4. Canonical room alias
|
// 4. Canonical room alias
|
||||||
if let Some(room_alias_id) = &alias {
|
if let Some(room_alias_id) = &alias {
|
||||||
db.rooms.build_and_append_pdu(
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: RoomEventType::RoomCanonicalAlias,
|
event_type: TimelineEventType::RoomCanonicalAlias,
|
||||||
content: to_raw_value(&RoomCanonicalAliasEventContent {
|
content: to_raw_value(&RoomCanonicalAliasEventContent {
|
||||||
alias: Some(room_alias_id.to_owned()),
|
alias: Some(room_alias_id.to_owned()),
|
||||||
alt_aliases: vec![],
|
alt_aliases: vec![],
|
||||||
|
@ -271,7 +276,6 @@ pub async fn create_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
@ -279,9 +283,9 @@ pub async fn create_room_route(
|
||||||
// 5. Events set by preset
|
// 5. Events set by preset
|
||||||
|
|
||||||
// 5.1 Join Rules
|
// 5.1 Join Rules
|
||||||
db.rooms.build_and_append_pdu(
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: RoomEventType::RoomJoinRules,
|
event_type: TimelineEventType::RoomJoinRules,
|
||||||
content: to_raw_value(&RoomJoinRulesEventContent::new(match preset {
|
content: to_raw_value(&RoomJoinRulesEventContent::new(match preset {
|
||||||
RoomPreset::PublicChat => JoinRule::Public,
|
RoomPreset::PublicChat => JoinRule::Public,
|
||||||
// according to spec "invite" is the default
|
// according to spec "invite" is the default
|
||||||
|
@ -294,14 +298,13 @@ pub async fn create_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// 5.2 History Visibility
|
// 5.2 History Visibility
|
||||||
db.rooms.build_and_append_pdu(
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: RoomEventType::RoomHistoryVisibility,
|
event_type: TimelineEventType::RoomHistoryVisibility,
|
||||||
content: to_raw_value(&RoomHistoryVisibilityEventContent::new(
|
content: to_raw_value(&RoomHistoryVisibilityEventContent::new(
|
||||||
HistoryVisibility::Shared,
|
HistoryVisibility::Shared,
|
||||||
))
|
))
|
||||||
|
@ -312,14 +315,13 @@ pub async fn create_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// 5.3 Guest Access
|
// 5.3 Guest Access
|
||||||
db.rooms.build_and_append_pdu(
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: RoomEventType::RoomGuestAccess,
|
event_type: TimelineEventType::RoomGuestAccess,
|
||||||
content: to_raw_value(&RoomGuestAccessEventContent::new(match preset {
|
content: to_raw_value(&RoomGuestAccessEventContent::new(match preset {
|
||||||
RoomPreset::PublicChat => GuestAccess::Forbidden,
|
RoomPreset::PublicChat => GuestAccess::Forbidden,
|
||||||
_ => GuestAccess::CanJoin,
|
_ => GuestAccess::CanJoin,
|
||||||
|
@ -331,7 +333,6 @@ pub async fn create_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|
@ -346,21 +347,26 @@ pub async fn create_room_route(
|
||||||
pdu_builder.state_key.get_or_insert_with(|| "".to_owned());
|
pdu_builder.state_key.get_or_insert_with(|| "".to_owned());
|
||||||
|
|
||||||
// Silently skip encryption events if they are not allowed
|
// Silently skip encryption events if they are not allowed
|
||||||
if pdu_builder.event_type == RoomEventType::RoomEncryption && !db.globals.allow_encryption()
|
if pdu_builder.event_type == TimelineEventType::RoomEncryption
|
||||||
|
&& !services().globals.allow_encryption()
|
||||||
{
|
{
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
db.rooms
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &db, &state_lock)?;
|
pdu_builder,
|
||||||
|
sender_user,
|
||||||
|
&room_id,
|
||||||
|
&state_lock,
|
||||||
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// 7. Events implied by name and topic
|
// 7. Events implied by name and topic
|
||||||
if let Some(name) = &body.name {
|
if let Some(name) = &body.name {
|
||||||
db.rooms.build_and_append_pdu(
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: RoomEventType::RoomName,
|
event_type: TimelineEventType::RoomName,
|
||||||
content: to_raw_value(&RoomNameEventContent::new(Some(name.clone())))
|
content: to_raw_value(&RoomNameEventContent::new(name.clone()))
|
||||||
.expect("event is valid, we just created it"),
|
.expect("event is valid, we just created it"),
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
state_key: Some("".to_owned()),
|
state_key: Some("".to_owned()),
|
||||||
|
@ -368,15 +374,14 @@ pub async fn create_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(topic) = &body.topic {
|
if let Some(topic) = &body.topic {
|
||||||
db.rooms.build_and_append_pdu(
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: RoomEventType::RoomTopic,
|
event_type: TimelineEventType::RoomTopic,
|
||||||
content: to_raw_value(&RoomTopicEventContent {
|
content: to_raw_value(&RoomTopicEventContent {
|
||||||
topic: topic.clone(),
|
topic: topic.clone(),
|
||||||
})
|
})
|
||||||
|
@ -387,7 +392,6 @@ pub async fn create_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
@ -395,22 +399,20 @@ pub async fn create_room_route(
|
||||||
// 8. Events implied by invite (and TODO: invite_3pid)
|
// 8. Events implied by invite (and TODO: invite_3pid)
|
||||||
drop(state_lock);
|
drop(state_lock);
|
||||||
for user_id in &body.invite {
|
for user_id in &body.invite {
|
||||||
let _ = invite_helper(sender_user, user_id, &room_id, &db, body.is_direct).await;
|
let _ = invite_helper(sender_user, user_id, &room_id, None, body.is_direct).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Homeserver specific stuff
|
// Homeserver specific stuff
|
||||||
if let Some(alias) = alias {
|
if let Some(alias) = alias {
|
||||||
db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?;
|
services().rooms.alias.set_alias(&alias, &room_id)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if body.visibility == room::Visibility::Public {
|
if body.visibility == room::Visibility::Public {
|
||||||
db.rooms.set_public(&room_id, true)?;
|
services().rooms.directory.set_public(&room_id)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
info!("{} created a room", sender_user);
|
info!("{} created a room", sender_user);
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(create_room::v3::Response::new(room_id))
|
Ok(create_room::v3::Response::new(room_id))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -420,24 +422,35 @@ pub async fn create_room_route(
|
||||||
///
|
///
|
||||||
/// - You have to currently be joined to the room (TODO: Respect history visibility)
|
/// - You have to currently be joined to the room (TODO: Respect history visibility)
|
||||||
pub async fn get_room_event_route(
|
pub async fn get_room_event_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_room_event::v3::Request>,
|
||||||
body: Ruma<get_room_event::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_room_event::v3::Response> {
|
) -> Result<get_room_event::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if !db.rooms.is_joined(sender_user, &body.room_id)? {
|
let event = services()
|
||||||
|
.rooms
|
||||||
|
.timeline
|
||||||
|
.get_pdu(&body.event_id)?
|
||||||
|
.ok_or_else(|| {
|
||||||
|
warn!("Event not found, event ID: {:?}", &body.event_id);
|
||||||
|
Error::BadRequest(ErrorKind::NotFound, "Event not found.")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
if !services().rooms.state_accessor.user_can_see_event(
|
||||||
|
sender_user,
|
||||||
|
&event.room_id,
|
||||||
|
&body.event_id,
|
||||||
|
)? {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
"You don't have permission to view this room.",
|
"You don't have permission to view this event.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let mut event = (*event).clone();
|
||||||
|
event.add_age()?;
|
||||||
|
|
||||||
Ok(get_room_event::v3::Response {
|
Ok(get_room_event::v3::Response {
|
||||||
event: db
|
event: event.to_room_event(),
|
||||||
.rooms
|
|
||||||
.get_pdu(&body.event_id)?
|
|
||||||
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?
|
|
||||||
.to_room_event(),
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -447,12 +460,15 @@ pub async fn get_room_event_route(
|
||||||
///
|
///
|
||||||
/// - Only users joined to the room are allowed to call this TODO: Allow any user to call it if history_visibility is world readable
|
/// - Only users joined to the room are allowed to call this TODO: Allow any user to call it if history_visibility is world readable
|
||||||
pub async fn get_room_aliases_route(
|
pub async fn get_room_aliases_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<aliases::v3::Request>,
|
||||||
body: Ruma<aliases::v3::IncomingRequest>,
|
|
||||||
) -> Result<aliases::v3::Response> {
|
) -> Result<aliases::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if !db.rooms.is_joined(sender_user, &body.room_id)? {
|
if !services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.is_joined(sender_user, &body.room_id)?
|
||||||
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
"You don't have permission to view this room.",
|
"You don't have permission to view this room.",
|
||||||
|
@ -460,9 +476,10 @@ pub async fn get_room_aliases_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(aliases::v3::Response {
|
Ok(aliases::v3::Response {
|
||||||
aliases: db
|
aliases: services()
|
||||||
.rooms
|
.rooms
|
||||||
.room_aliases(&body.room_id)
|
.alias
|
||||||
|
.local_aliases_for_room(&body.room_id)
|
||||||
.filter_map(|a| a.ok())
|
.filter_map(|a| a.ok())
|
||||||
.collect(),
|
.collect(),
|
||||||
})
|
})
|
||||||
|
@ -479,12 +496,15 @@ pub async fn get_room_aliases_route(
|
||||||
/// - Moves local aliases
|
/// - Moves local aliases
|
||||||
/// - Modifies old room power levels to prevent users from speaking
|
/// - Modifies old room power levels to prevent users from speaking
|
||||||
pub async fn upgrade_room_route(
|
pub async fn upgrade_room_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<upgrade_room::v3::Request>,
|
||||||
body: Ruma<upgrade_room::v3::IncomingRequest>,
|
|
||||||
) -> Result<upgrade_room::v3::Response> {
|
) -> Result<upgrade_room::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if !db.rooms.is_supported_version(&db, &body.new_version) {
|
if !services()
|
||||||
|
.globals
|
||||||
|
.supported_room_versions()
|
||||||
|
.contains(&body.new_version)
|
||||||
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::UnsupportedRoomVersion,
|
ErrorKind::UnsupportedRoomVersion,
|
||||||
"This server does not support that room version.",
|
"This server does not support that room version.",
|
||||||
|
@ -492,12 +512,15 @@ pub async fn upgrade_room_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a replacement room
|
// Create a replacement room
|
||||||
let replacement_room = RoomId::new(db.globals.server_name());
|
let replacement_room = RoomId::new(services().globals.server_name());
|
||||||
db.rooms
|
services()
|
||||||
.get_or_create_shortroomid(&replacement_room, &db.globals)?;
|
.rooms
|
||||||
|
.short
|
||||||
|
.get_or_create_shortroomid(&replacement_room)?;
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
let mutex_state = Arc::clone(
|
||||||
db.globals
|
services()
|
||||||
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -508,9 +531,9 @@ pub async fn upgrade_room_route(
|
||||||
|
|
||||||
// Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further
|
// Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further
|
||||||
// Fail if the sender does not have the required permissions
|
// Fail if the sender does not have the required permissions
|
||||||
let tombstone_event_id = db.rooms.build_and_append_pdu(
|
let tombstone_event_id = services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: RoomEventType::RoomTombstone,
|
event_type: TimelineEventType::RoomTombstone,
|
||||||
content: to_raw_value(&RoomTombstoneEventContent {
|
content: to_raw_value(&RoomTombstoneEventContent {
|
||||||
body: "This room has been replaced".to_owned(),
|
body: "This room has been replaced".to_owned(),
|
||||||
replacement_room: replacement_room.clone(),
|
replacement_room: replacement_room.clone(),
|
||||||
|
@ -522,14 +545,14 @@ pub async fn upgrade_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
&db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// Change lock to replacement room
|
// Change lock to replacement room
|
||||||
drop(state_lock);
|
drop(state_lock);
|
||||||
let mutex_state = Arc::clone(
|
let mutex_state = Arc::clone(
|
||||||
db.globals
|
services()
|
||||||
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -540,7 +563,9 @@ pub async fn upgrade_room_route(
|
||||||
|
|
||||||
// Get the old room creation event
|
// Get the old room creation event
|
||||||
let mut create_event_content = serde_json::from_str::<CanonicalJsonObject>(
|
let mut create_event_content = serde_json::from_str::<CanonicalJsonObject>(
|
||||||
db.rooms
|
services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(&body.room_id, &StateEventType::RoomCreate, "")?
|
.room_state_get(&body.room_id, &StateEventType::RoomCreate, "")?
|
||||||
.ok_or_else(|| Error::bad_database("Found room without m.room.create event."))?
|
.ok_or_else(|| Error::bad_database("Found room without m.room.create event."))?
|
||||||
.content
|
.content
|
||||||
|
@ -588,9 +613,9 @@ pub async fn upgrade_room_route(
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
db.rooms.build_and_append_pdu(
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: RoomEventType::RoomCreate,
|
event_type: TimelineEventType::RoomCreate,
|
||||||
content: to_raw_value(&create_event_content)
|
content: to_raw_value(&create_event_content)
|
||||||
.expect("event is valid, we just created it"),
|
.expect("event is valid, we just created it"),
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
|
@ -599,21 +624,20 @@ pub async fn upgrade_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&replacement_room,
|
&replacement_room,
|
||||||
&db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// Join the new room
|
// Join the new room
|
||||||
db.rooms.build_and_append_pdu(
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: RoomEventType::RoomMember,
|
event_type: TimelineEventType::RoomMember,
|
||||||
content: to_raw_value(&RoomMemberEventContent {
|
content: to_raw_value(&RoomMemberEventContent {
|
||||||
membership: MembershipState::Join,
|
membership: MembershipState::Join,
|
||||||
displayname: db.users.displayname(sender_user)?,
|
displayname: services().users.displayname(sender_user)?,
|
||||||
avatar_url: db.users.avatar_url(sender_user)?,
|
avatar_url: services().users.avatar_url(sender_user)?,
|
||||||
is_direct: None,
|
is_direct: None,
|
||||||
third_party_invite: None,
|
third_party_invite: None,
|
||||||
blurhash: db.users.blurhash(sender_user)?,
|
blurhash: services().users.blurhash(sender_user)?,
|
||||||
reason: None,
|
reason: None,
|
||||||
join_authorized_via_users_server: None,
|
join_authorized_via_users_server: None,
|
||||||
})
|
})
|
||||||
|
@ -624,7 +648,6 @@ pub async fn upgrade_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&replacement_room,
|
&replacement_room,
|
||||||
&db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|
@ -643,12 +666,17 @@ pub async fn upgrade_room_route(
|
||||||
|
|
||||||
// Replicate transferable state events to the new room
|
// Replicate transferable state events to the new room
|
||||||
for event_type in transferable_state_events {
|
for event_type in transferable_state_events {
|
||||||
let event_content = match db.rooms.room_state_get(&body.room_id, &event_type, "")? {
|
let event_content =
|
||||||
|
match services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.room_state_get(&body.room_id, &event_type, "")?
|
||||||
|
{
|
||||||
Some(v) => v.content.clone(),
|
Some(v) => v.content.clone(),
|
||||||
None => continue, // Skipping missing events.
|
None => continue, // Skipping missing events.
|
||||||
};
|
};
|
||||||
|
|
||||||
db.rooms.build_and_append_pdu(
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: event_type.to_string().into(),
|
event_type: event_type.to_string().into(),
|
||||||
content: event_content,
|
content: event_content,
|
||||||
|
@ -658,20 +686,28 @@ pub async fn upgrade_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&replacement_room,
|
&replacement_room,
|
||||||
&db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Moves any local aliases to the new room
|
// Moves any local aliases to the new room
|
||||||
for alias in db.rooms.room_aliases(&body.room_id).filter_map(|r| r.ok()) {
|
for alias in services()
|
||||||
db.rooms
|
.rooms
|
||||||
.set_alias(&alias, Some(&replacement_room), &db.globals)?;
|
.alias
|
||||||
|
.local_aliases_for_room(&body.room_id)
|
||||||
|
.filter_map(|r| r.ok())
|
||||||
|
{
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.alias
|
||||||
|
.set_alias(&alias, &replacement_room)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the old room power levels
|
// Get the old room power levels
|
||||||
let mut power_levels_event_content: RoomPowerLevelsEventContent = serde_json::from_str(
|
let mut power_levels_event_content: RoomPowerLevelsEventContent = serde_json::from_str(
|
||||||
db.rooms
|
services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(&body.room_id, &StateEventType::RoomPowerLevels, "")?
|
.room_state_get(&body.room_id, &StateEventType::RoomPowerLevels, "")?
|
||||||
.ok_or_else(|| Error::bad_database("Found room without m.room.create event."))?
|
.ok_or_else(|| Error::bad_database("Found room without m.room.create event."))?
|
||||||
.content
|
.content
|
||||||
|
@ -685,9 +721,9 @@ pub async fn upgrade_room_route(
|
||||||
power_levels_event_content.invite = new_level;
|
power_levels_event_content.invite = new_level;
|
||||||
|
|
||||||
// Modify the power levels in the old room to prevent sending of events and inviting new users
|
// Modify the power levels in the old room to prevent sending of events and inviting new users
|
||||||
let _ = db.rooms.build_and_append_pdu(
|
let _ = services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: RoomEventType::RoomPowerLevels,
|
event_type: TimelineEventType::RoomPowerLevels,
|
||||||
content: to_raw_value(&power_levels_event_content)
|
content: to_raw_value(&power_levels_event_content)
|
||||||
.expect("event is valid, we just created it"),
|
.expect("event is valid, we just created it"),
|
||||||
unsigned: None,
|
unsigned: None,
|
||||||
|
@ -696,14 +732,11 @@ pub async fn upgrade_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
&db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
drop(state_lock);
|
drop(state_lock);
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
// Return the replacement room id
|
// Return the replacement room id
|
||||||
Ok(upgrade_room::v3::Response { replacement_room })
|
Ok(upgrade_room::v3::Response { replacement_room })
|
||||||
}
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
use crate::{database::DatabaseGuard, Error, Result, Ruma};
|
use crate::{services, Error, Result, Ruma};
|
||||||
use ruma::api::client::{
|
use ruma::api::client::{
|
||||||
error::ErrorKind,
|
error::ErrorKind,
|
||||||
search::search_events::{
|
search::search_events::{
|
||||||
|
@ -15,8 +15,7 @@ use std::collections::BTreeMap;
|
||||||
///
|
///
|
||||||
/// - Only works if the user is currently joined to the room (TODO: Respect history visibility)
|
/// - Only works if the user is currently joined to the room (TODO: Respect history visibility)
|
||||||
pub async fn search_events_route(
|
pub async fn search_events_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<search_events::v3::Request>,
|
||||||
body: Ruma<search_events::v3::IncomingRequest>,
|
|
||||||
) -> Result<search_events::v3::Response> {
|
) -> Result<search_events::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
@ -24,26 +23,34 @@ pub async fn search_events_route(
|
||||||
let filter = &search_criteria.filter;
|
let filter = &search_criteria.filter;
|
||||||
|
|
||||||
let room_ids = filter.rooms.clone().unwrap_or_else(|| {
|
let room_ids = filter.rooms.clone().unwrap_or_else(|| {
|
||||||
db.rooms
|
services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
.rooms_joined(sender_user)
|
.rooms_joined(sender_user)
|
||||||
.filter_map(|r| r.ok())
|
.filter_map(|r| r.ok())
|
||||||
.collect()
|
.collect()
|
||||||
});
|
});
|
||||||
|
|
||||||
let limit = filter.limit.map_or(10, |l| u64::from(l) as usize);
|
// Use limit or else 10, with maximum 100
|
||||||
|
let limit = filter.limit.map_or(10, u64::from).min(100) as usize;
|
||||||
|
|
||||||
let mut searches = Vec::new();
|
let mut searches = Vec::new();
|
||||||
|
|
||||||
for room_id in room_ids {
|
for room_id in room_ids {
|
||||||
if !db.rooms.is_joined(sender_user, &room_id)? {
|
if !services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.is_joined(sender_user, &room_id)?
|
||||||
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
"You don't have permission to view this room.",
|
"You don't have permission to view this room.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(search) = db
|
if let Some(search) = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.search
|
||||||
.search_pdus(&room_id, &search_criteria.search_term)?
|
.search_pdus(&room_id, &search_criteria.search_term)?
|
||||||
{
|
{
|
||||||
searches.push(search.0.peekable());
|
searches.push(search.0.peekable());
|
||||||
|
@ -75,6 +82,21 @@ pub async fn search_events_route(
|
||||||
|
|
||||||
let results: Vec<_> = results
|
let results: Vec<_> = results
|
||||||
.iter()
|
.iter()
|
||||||
|
.filter_map(|result| {
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.timeline
|
||||||
|
.get_pdu_from_id(result)
|
||||||
|
.ok()?
|
||||||
|
.filter(|pdu| {
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.user_can_see_event(sender_user, &pdu.room_id, &pdu.event_id)
|
||||||
|
.unwrap_or(false)
|
||||||
|
})
|
||||||
|
.map(|pdu| pdu.to_room_event())
|
||||||
|
})
|
||||||
.map(|result| {
|
.map(|result| {
|
||||||
Ok::<_, Error>(SearchResult {
|
Ok::<_, Error>(SearchResult {
|
||||||
context: EventContextResult {
|
context: EventContextResult {
|
||||||
|
@ -85,10 +107,7 @@ pub async fn search_events_route(
|
||||||
start: None,
|
start: None,
|
||||||
},
|
},
|
||||||
rank: None,
|
rank: None,
|
||||||
result: db
|
result: Some(result),
|
||||||
.rooms
|
|
||||||
.get_pdu_from_id(result)?
|
|
||||||
.map(|pdu| pdu.to_room_event()),
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
.filter_map(|r| r.ok())
|
.filter_map(|r| r.ok())
|
||||||
|
@ -96,7 +115,7 @@ pub async fn search_events_route(
|
||||||
.take(limit)
|
.take(limit)
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let next_batch = if results.len() < limit as usize {
|
let next_batch = if results.len() < limit {
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
Some((skip + limit).to_string())
|
Some((skip + limit).to_string())
|
|
@ -1,15 +1,15 @@
|
||||||
use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH};
|
use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH};
|
||||||
use crate::{database::DatabaseGuard, utils, Error, Result, Ruma};
|
use crate::{services, utils, Error, Result, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{
|
api::client::{
|
||||||
error::ErrorKind,
|
error::ErrorKind,
|
||||||
session::{get_login_types, login, logout, logout_all},
|
session::{get_login_types, login, logout, logout_all},
|
||||||
uiaa::IncomingUserIdentifier,
|
uiaa::UserIdentifier,
|
||||||
},
|
},
|
||||||
UserId,
|
UserId,
|
||||||
};
|
};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use tracing::info;
|
use tracing::{info, warn};
|
||||||
|
|
||||||
#[derive(Debug, Deserialize)]
|
#[derive(Debug, Deserialize)]
|
||||||
struct Claims {
|
struct Claims {
|
||||||
|
@ -22,10 +22,11 @@ struct Claims {
|
||||||
/// Get the supported login types of this server. One of these should be used as the `type` field
|
/// Get the supported login types of this server. One of these should be used as the `type` field
|
||||||
/// when logging in.
|
/// when logging in.
|
||||||
pub async fn get_login_types_route(
|
pub async fn get_login_types_route(
|
||||||
_body: Ruma<get_login_types::v3::IncomingRequest>,
|
_body: Ruma<get_login_types::v3::Request>,
|
||||||
) -> Result<get_login_types::v3::Response> {
|
) -> Result<get_login_types::v3::Response> {
|
||||||
Ok(get_login_types::v3::Response::new(vec![
|
Ok(get_login_types::v3::Response::new(vec![
|
||||||
get_login_types::v3::LoginType::Password(Default::default()),
|
get_login_types::v3::LoginType::Password(Default::default()),
|
||||||
|
get_login_types::v3::LoginType::ApplicationService(Default::default()),
|
||||||
]))
|
]))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -40,28 +41,36 @@ pub async fn get_login_types_route(
|
||||||
///
|
///
|
||||||
/// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see
|
/// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see
|
||||||
/// supported login types.
|
/// supported login types.
|
||||||
pub async fn login_route(
|
pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Response> {
|
||||||
db: DatabaseGuard,
|
// To allow deprecated login methods
|
||||||
body: Ruma<login::v3::IncomingRequest>,
|
#![allow(deprecated)]
|
||||||
) -> Result<login::v3::Response> {
|
|
||||||
// Validate login method
|
// Validate login method
|
||||||
// TODO: Other login methods
|
// TODO: Other login methods
|
||||||
let user_id = match &body.login_info {
|
let user_id = match &body.login_info {
|
||||||
login::v3::IncomingLoginInfo::Password(login::v3::IncomingPassword {
|
login::v3::LoginInfo::Password(login::v3::Password {
|
||||||
identifier,
|
identifier,
|
||||||
password,
|
password,
|
||||||
|
user,
|
||||||
|
address: _,
|
||||||
|
medium: _,
|
||||||
}) => {
|
}) => {
|
||||||
let username = if let IncomingUserIdentifier::UserIdOrLocalpart(user_id) = identifier {
|
let user_id = if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier {
|
||||||
user_id.to_lowercase()
|
UserId::parse_with_server_name(
|
||||||
|
user_id.to_lowercase(),
|
||||||
|
services().globals.server_name(),
|
||||||
|
)
|
||||||
|
} else if let Some(user) = user {
|
||||||
|
UserId::parse(user)
|
||||||
} else {
|
} else {
|
||||||
|
warn!("Bad login type: {:?}", &body.login_info);
|
||||||
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
|
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
|
||||||
};
|
}
|
||||||
let user_id =
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?;
|
||||||
UserId::parse_with_server_name(username.to_owned(), db.globals.server_name())
|
|
||||||
.map_err(|_| {
|
let hash = services()
|
||||||
Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.")
|
.users
|
||||||
})?;
|
.password_hash(&user_id)?
|
||||||
let hash = db.users.password_hash(&user_id)?.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
"Wrong username or password.",
|
"Wrong username or password.",
|
||||||
))?;
|
))?;
|
||||||
|
@ -84,16 +93,16 @@ pub async fn login_route(
|
||||||
|
|
||||||
user_id
|
user_id
|
||||||
}
|
}
|
||||||
login::v3::IncomingLoginInfo::Token(login::v3::IncomingToken { token }) => {
|
login::v3::LoginInfo::Token(login::v3::Token { token }) => {
|
||||||
if let Some(jwt_decoding_key) = db.globals.jwt_decoding_key() {
|
if let Some(jwt_decoding_key) = services().globals.jwt_decoding_key() {
|
||||||
let token = jsonwebtoken::decode::<Claims>(
|
let token = jsonwebtoken::decode::<Claims>(
|
||||||
token,
|
token,
|
||||||
jwt_decoding_key,
|
jwt_decoding_key,
|
||||||
&jsonwebtoken::Validation::default(),
|
&jsonwebtoken::Validation::default(),
|
||||||
)
|
)
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Token is invalid."))?;
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Token is invalid."))?;
|
||||||
let username = token.claims.sub;
|
let username = token.claims.sub.to_lowercase();
|
||||||
UserId::parse_with_server_name(username, db.globals.server_name()).map_err(
|
UserId::parse_with_server_name(username, services().globals.server_name()).map_err(
|
||||||
|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."),
|
|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."),
|
||||||
)?
|
)?
|
||||||
} else {
|
} else {
|
||||||
|
@ -103,7 +112,31 @@ pub async fn login_route(
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
login::v3::LoginInfo::ApplicationService(login::v3::ApplicationService {
|
||||||
|
identifier,
|
||||||
|
user,
|
||||||
|
}) => {
|
||||||
|
if !body.from_appservice {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::Forbidden,
|
||||||
|
"Forbidden login type.",
|
||||||
|
));
|
||||||
|
};
|
||||||
|
if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier {
|
||||||
|
UserId::parse_with_server_name(
|
||||||
|
user_id.to_lowercase(),
|
||||||
|
services().globals.server_name(),
|
||||||
|
)
|
||||||
|
} else if let Some(user) = user {
|
||||||
|
UserId::parse(user)
|
||||||
|
} else {
|
||||||
|
warn!("Bad login type: {:?}", &body.login_info);
|
||||||
|
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
|
||||||
|
}
|
||||||
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?
|
||||||
|
}
|
||||||
_ => {
|
_ => {
|
||||||
|
warn!("Unsupported or unknown login type: {:?}", &body.login_info);
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Unknown,
|
ErrorKind::Unknown,
|
||||||
"Unsupported login type.",
|
"Unsupported login type.",
|
||||||
|
@ -122,15 +155,16 @@ pub async fn login_route(
|
||||||
|
|
||||||
// Determine if device_id was provided and exists in the db for this user
|
// Determine if device_id was provided and exists in the db for this user
|
||||||
let device_exists = body.device_id.as_ref().map_or(false, |device_id| {
|
let device_exists = body.device_id.as_ref().map_or(false, |device_id| {
|
||||||
db.users
|
services()
|
||||||
|
.users
|
||||||
.all_device_ids(&user_id)
|
.all_device_ids(&user_id)
|
||||||
.any(|x| x.as_ref().map_or(false, |v| v == device_id))
|
.any(|x| x.as_ref().map_or(false, |v| v == device_id))
|
||||||
});
|
});
|
||||||
|
|
||||||
if device_exists {
|
if device_exists {
|
||||||
db.users.set_token(&user_id, &device_id, &token)?;
|
services().users.set_token(&user_id, &device_id, &token)?;
|
||||||
} else {
|
} else {
|
||||||
db.users.create_device(
|
services().users.create_device(
|
||||||
&user_id,
|
&user_id,
|
||||||
&device_id,
|
&device_id,
|
||||||
&token,
|
&token,
|
||||||
|
@ -140,14 +174,16 @@ pub async fn login_route(
|
||||||
|
|
||||||
info!("{} logged in", user_id);
|
info!("{} logged in", user_id);
|
||||||
|
|
||||||
db.flush()?;
|
// Homeservers are still required to send the `home_server` field
|
||||||
|
#[allow(deprecated)]
|
||||||
Ok(login::v3::Response {
|
Ok(login::v3::Response {
|
||||||
user_id,
|
user_id,
|
||||||
access_token: token,
|
access_token: token,
|
||||||
home_server: Some(db.globals.server_name().to_owned()),
|
home_server: Some(services().globals.server_name().to_owned()),
|
||||||
device_id,
|
device_id,
|
||||||
well_known: None,
|
well_known: None,
|
||||||
|
refresh_token: None,
|
||||||
|
expires_in: None,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -159,16 +195,11 @@ pub async fn login_route(
|
||||||
/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
|
/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
|
||||||
/// - Forgets to-device events
|
/// - Forgets to-device events
|
||||||
/// - Triggers device list updates
|
/// - Triggers device list updates
|
||||||
pub async fn logout_route(
|
pub async fn logout_route(body: Ruma<logout::v3::Request>) -> Result<logout::v3::Response> {
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<logout::v3::Request>,
|
|
||||||
) -> Result<logout::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
db.users.remove_device(sender_user, sender_device)?;
|
services().users.remove_device(sender_user, sender_device)?;
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(logout::v3::Response::new())
|
Ok(logout::v3::Response::new())
|
||||||
}
|
}
|
||||||
|
@ -185,16 +216,13 @@ pub async fn logout_route(
|
||||||
/// Note: This is equivalent to calling [`GET /_matrix/client/r0/logout`](fn.logout_route.html)
|
/// Note: This is equivalent to calling [`GET /_matrix/client/r0/logout`](fn.logout_route.html)
|
||||||
/// from each device of this user.
|
/// from each device of this user.
|
||||||
pub async fn logout_all_route(
|
pub async fn logout_all_route(
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<logout_all::v3::Request>,
|
body: Ruma<logout_all::v3::Request>,
|
||||||
) -> Result<logout_all::v3::Response> {
|
) -> Result<logout_all::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
for device_id in db.users.all_device_ids(sender_user).flatten() {
|
for device_id in services().users.all_device_ids(sender_user).flatten() {
|
||||||
db.users.remove_device(sender_user, &device_id)?;
|
services().users.remove_device(sender_user, &device_id)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(logout_all::v3::Response::new())
|
Ok(logout_all::v3::Response::new())
|
||||||
}
|
}
|
34
src/api/client_server/space.rs
Normal file
34
src/api/client_server/space.rs
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
use crate::{services, Result, Ruma};
|
||||||
|
use ruma::api::client::space::get_hierarchy;
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/v1/rooms/{room_id}/hierarchy``
|
||||||
|
///
|
||||||
|
/// Paginates over the space tree in a depth-first manner to locate child rooms of a given space.
|
||||||
|
pub async fn get_hierarchy_route(
|
||||||
|
body: Ruma<get_hierarchy::v1::Request>,
|
||||||
|
) -> Result<get_hierarchy::v1::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let skip = body
|
||||||
|
.from
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|s| s.parse::<usize>().ok())
|
||||||
|
.unwrap_or(0);
|
||||||
|
|
||||||
|
let limit = body.limit.map_or(10, u64::from).min(100) as usize;
|
||||||
|
|
||||||
|
let max_depth = body.max_depth.map_or(3, u64::from).min(10) as usize + 1; // +1 to skip the space room itself
|
||||||
|
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.spaces
|
||||||
|
.get_hierarchy(
|
||||||
|
sender_user,
|
||||||
|
&body.room_id,
|
||||||
|
limit,
|
||||||
|
skip,
|
||||||
|
max_depth,
|
||||||
|
body.suggested_only,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
|
@ -1,23 +1,18 @@
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use crate::{
|
use crate::{service::pdu::PduBuilder, services, Error, Result, Ruma, RumaResponse};
|
||||||
database::DatabaseGuard, pdu::PduBuilder, Database, Error, Result, Ruma, RumaResponse,
|
|
||||||
};
|
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{
|
api::client::{
|
||||||
error::ErrorKind,
|
error::ErrorKind,
|
||||||
state::{get_state_events, get_state_events_for_key, send_state_event},
|
state::{get_state_events, get_state_events_for_key, send_state_event},
|
||||||
},
|
},
|
||||||
events::{
|
events::{
|
||||||
room::{
|
room::canonical_alias::RoomCanonicalAliasEventContent, AnyStateEventContent, StateEventType,
|
||||||
canonical_alias::RoomCanonicalAliasEventContent,
|
|
||||||
history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
|
|
||||||
},
|
|
||||||
AnyStateEventContent, StateEventType,
|
|
||||||
},
|
},
|
||||||
serde::Raw,
|
serde::Raw,
|
||||||
EventId, RoomId, UserId,
|
EventId, RoomId, UserId,
|
||||||
};
|
};
|
||||||
|
use tracing::log::warn;
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}/{stateKey}`
|
/// # `PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}/{stateKey}`
|
||||||
///
|
///
|
||||||
|
@ -27,13 +22,11 @@ use ruma::{
|
||||||
/// - Tries to send the event into the room, auth rules will determine if it is allowed
|
/// - Tries to send the event into the room, auth rules will determine if it is allowed
|
||||||
/// - If event is new canonical_alias: Rejects if alias is incorrect
|
/// - If event is new canonical_alias: Rejects if alias is incorrect
|
||||||
pub async fn send_state_event_for_key_route(
|
pub async fn send_state_event_for_key_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<send_state_event::v3::Request>,
|
||||||
body: Ruma<send_state_event::v3::IncomingRequest>,
|
|
||||||
) -> Result<send_state_event::v3::Response> {
|
) -> Result<send_state_event::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let event_id = send_state_event_for_key_helper(
|
let event_id = send_state_event_for_key_helper(
|
||||||
&db,
|
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
&body.event_type,
|
&body.event_type,
|
||||||
|
@ -42,8 +35,6 @@ pub async fn send_state_event_for_key_route(
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
let event_id = (*event_id).to_owned();
|
let event_id = (*event_id).to_owned();
|
||||||
Ok(send_state_event::v3::Response { event_id })
|
Ok(send_state_event::v3::Response { event_id })
|
||||||
}
|
}
|
||||||
|
@ -56,13 +47,12 @@ pub async fn send_state_event_for_key_route(
|
||||||
/// - Tries to send the event into the room, auth rules will determine if it is allowed
|
/// - Tries to send the event into the room, auth rules will determine if it is allowed
|
||||||
/// - If event is new canonical_alias: Rejects if alias is incorrect
|
/// - If event is new canonical_alias: Rejects if alias is incorrect
|
||||||
pub async fn send_state_event_for_empty_key_route(
|
pub async fn send_state_event_for_empty_key_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<send_state_event::v3::Request>,
|
||||||
body: Ruma<send_state_event::v3::IncomingRequest>,
|
|
||||||
) -> Result<RumaResponse<send_state_event::v3::Response>> {
|
) -> Result<RumaResponse<send_state_event::v3::Response>> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
// Forbid m.room.encryption if encryption is disabled
|
// Forbid m.room.encryption if encryption is disabled
|
||||||
if body.event_type == StateEventType::RoomEncryption && !db.globals.allow_encryption() {
|
if body.event_type == StateEventType::RoomEncryption && !services().globals.allow_encryption() {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
"Encryption has been disabled",
|
"Encryption has been disabled",
|
||||||
|
@ -70,7 +60,6 @@ pub async fn send_state_event_for_empty_key_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
let event_id = send_state_event_for_key_helper(
|
let event_id = send_state_event_for_key_helper(
|
||||||
&db,
|
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
&body.event_type.to_string().into(),
|
&body.event_type.to_string().into(),
|
||||||
|
@ -79,8 +68,6 @@ pub async fn send_state_event_for_empty_key_route(
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
let event_id = (*event_id).to_owned();
|
let event_id = (*event_id).to_owned();
|
||||||
Ok(send_state_event::v3::Response { event_id }.into())
|
Ok(send_state_event::v3::Response { event_id }.into())
|
||||||
}
|
}
|
||||||
|
@ -91,29 +78,14 @@ pub async fn send_state_event_for_empty_key_route(
|
||||||
///
|
///
|
||||||
/// - If not joined: Only works if current room history visibility is world readable
|
/// - If not joined: Only works if current room history visibility is world readable
|
||||||
pub async fn get_state_events_route(
|
pub async fn get_state_events_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_state_events::v3::Request>,
|
||||||
body: Ruma<get_state_events::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_state_events::v3::Response> {
|
) -> Result<get_state_events::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
#[allow(clippy::blocks_in_if_conditions)]
|
if !services()
|
||||||
// Users not in the room should not be able to access the state unless history_visibility is
|
.rooms
|
||||||
// WorldReadable
|
.state_accessor
|
||||||
if !db.rooms.is_joined(sender_user, &body.room_id)?
|
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||||
&& !matches!(
|
|
||||||
db.rooms
|
|
||||||
.room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")?
|
|
||||||
.map(|event| {
|
|
||||||
serde_json::from_str(event.content.get())
|
|
||||||
.map(|e: RoomHistoryVisibilityEventContent| e.history_visibility)
|
|
||||||
.map_err(|_| {
|
|
||||||
Error::bad_database(
|
|
||||||
"Invalid room history visibility event in database.",
|
|
||||||
)
|
|
||||||
})
|
|
||||||
}),
|
|
||||||
Some(Ok(HistoryVisibility::WorldReadable))
|
|
||||||
)
|
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
|
@ -122,8 +94,9 @@ pub async fn get_state_events_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(get_state_events::v3::Response {
|
Ok(get_state_events::v3::Response {
|
||||||
room_state: db
|
room_state: services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_full(&body.room_id)
|
.room_state_full(&body.room_id)
|
||||||
.await?
|
.await?
|
||||||
.values()
|
.values()
|
||||||
|
@ -138,29 +111,14 @@ pub async fn get_state_events_route(
|
||||||
///
|
///
|
||||||
/// - If not joined: Only works if current room history visibility is world readable
|
/// - If not joined: Only works if current room history visibility is world readable
|
||||||
pub async fn get_state_events_for_key_route(
|
pub async fn get_state_events_for_key_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_state_events_for_key::v3::Request>,
|
||||||
body: Ruma<get_state_events_for_key::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_state_events_for_key::v3::Response> {
|
) -> Result<get_state_events_for_key::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
#[allow(clippy::blocks_in_if_conditions)]
|
if !services()
|
||||||
// Users not in the room should not be able to access the state unless history_visibility is
|
.rooms
|
||||||
// WorldReadable
|
.state_accessor
|
||||||
if !db.rooms.is_joined(sender_user, &body.room_id)?
|
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||||
&& !matches!(
|
|
||||||
db.rooms
|
|
||||||
.room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")?
|
|
||||||
.map(|event| {
|
|
||||||
serde_json::from_str(event.content.get())
|
|
||||||
.map(|e: RoomHistoryVisibilityEventContent| e.history_visibility)
|
|
||||||
.map_err(|_| {
|
|
||||||
Error::bad_database(
|
|
||||||
"Invalid room history visibility event in database.",
|
|
||||||
)
|
|
||||||
})
|
|
||||||
}),
|
|
||||||
Some(Ok(HistoryVisibility::WorldReadable))
|
|
||||||
)
|
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
|
@ -168,13 +126,17 @@ pub async fn get_state_events_for_key_route(
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let event = db
|
let event = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(&body.room_id, &body.event_type, &body.state_key)?
|
.room_state_get(&body.room_id, &body.event_type, &body.state_key)?
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or_else(|| {
|
||||||
ErrorKind::NotFound,
|
warn!(
|
||||||
"State event not found.",
|
"State event {:?} not found in room {:?}",
|
||||||
))?;
|
&body.event_type, &body.room_id
|
||||||
|
);
|
||||||
|
Error::BadRequest(ErrorKind::NotFound, "State event not found.")
|
||||||
|
})?;
|
||||||
|
|
||||||
Ok(get_state_events_for_key::v3::Response {
|
Ok(get_state_events_for_key::v3::Response {
|
||||||
content: serde_json::from_str(event.content.get())
|
content: serde_json::from_str(event.content.get())
|
||||||
|
@ -188,29 +150,14 @@ pub async fn get_state_events_for_key_route(
|
||||||
///
|
///
|
||||||
/// - If not joined: Only works if current room history visibility is world readable
|
/// - If not joined: Only works if current room history visibility is world readable
|
||||||
pub async fn get_state_events_for_empty_key_route(
|
pub async fn get_state_events_for_empty_key_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_state_events_for_key::v3::Request>,
|
||||||
body: Ruma<get_state_events_for_key::v3::IncomingRequest>,
|
|
||||||
) -> Result<RumaResponse<get_state_events_for_key::v3::Response>> {
|
) -> Result<RumaResponse<get_state_events_for_key::v3::Response>> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
#[allow(clippy::blocks_in_if_conditions)]
|
if !services()
|
||||||
// Users not in the room should not be able to access the state unless history_visibility is
|
.rooms
|
||||||
// WorldReadable
|
.state_accessor
|
||||||
if !db.rooms.is_joined(sender_user, &body.room_id)?
|
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||||
&& !matches!(
|
|
||||||
db.rooms
|
|
||||||
.room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")?
|
|
||||||
.map(|event| {
|
|
||||||
serde_json::from_str(event.content.get())
|
|
||||||
.map(|e: RoomHistoryVisibilityEventContent| e.history_visibility)
|
|
||||||
.map_err(|_| {
|
|
||||||
Error::bad_database(
|
|
||||||
"Invalid room history visibility event in database.",
|
|
||||||
)
|
|
||||||
})
|
|
||||||
}),
|
|
||||||
Some(Ok(HistoryVisibility::WorldReadable))
|
|
||||||
)
|
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
|
@ -218,13 +165,17 @@ pub async fn get_state_events_for_empty_key_route(
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let event = db
|
let event = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(&body.room_id, &body.event_type, "")?
|
.room_state_get(&body.room_id, &body.event_type, "")?
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or_else(|| {
|
||||||
ErrorKind::NotFound,
|
warn!(
|
||||||
"State event not found.",
|
"State event {:?} not found in room {:?}",
|
||||||
))?;
|
&body.event_type, &body.room_id
|
||||||
|
);
|
||||||
|
Error::BadRequest(ErrorKind::NotFound, "State event not found.")
|
||||||
|
})?;
|
||||||
|
|
||||||
Ok(get_state_events_for_key::v3::Response {
|
Ok(get_state_events_for_key::v3::Response {
|
||||||
content: serde_json::from_str(event.content.get())
|
content: serde_json::from_str(event.content.get())
|
||||||
|
@ -234,7 +185,6 @@ pub async fn get_state_events_for_empty_key_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn send_state_event_for_key_helper(
|
async fn send_state_event_for_key_helper(
|
||||||
db: &Database,
|
|
||||||
sender: &UserId,
|
sender: &UserId,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
event_type: &StateEventType,
|
event_type: &StateEventType,
|
||||||
|
@ -255,10 +205,11 @@ async fn send_state_event_for_key_helper(
|
||||||
}
|
}
|
||||||
|
|
||||||
for alias in aliases {
|
for alias in aliases {
|
||||||
if alias.server_name() != db.globals.server_name()
|
if alias.server_name() != services().globals.server_name()
|
||||||
|| db
|
|| services()
|
||||||
.rooms
|
.rooms
|
||||||
.id_from_alias(&alias)?
|
.alias
|
||||||
|
.resolve_local_alias(&alias)?
|
||||||
.filter(|room| room == room_id) // Make sure it's the right room
|
.filter(|room| room == room_id) // Make sure it's the right room
|
||||||
.is_none()
|
.is_none()
|
||||||
{
|
{
|
||||||
|
@ -272,7 +223,8 @@ async fn send_state_event_for_key_helper(
|
||||||
}
|
}
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
let mutex_state = Arc::clone(
|
||||||
db.globals
|
services()
|
||||||
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -281,7 +233,7 @@ async fn send_state_event_for_key_helper(
|
||||||
);
|
);
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
let event_id = db.rooms.build_and_append_pdu(
|
let event_id = services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: event_type.to_string().into(),
|
event_type: event_type.to_string().into(),
|
||||||
content: serde_json::from_str(json.json().get()).expect("content is valid json"),
|
content: serde_json::from_str(json.json().get()).expect("content is valid json"),
|
||||||
|
@ -291,7 +243,6 @@ async fn send_state_event_for_key_helper(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
room_id,
|
room_id,
|
||||||
db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
1746
src/api/client_server/sync.rs
Normal file
1746
src/api/client_server/sync.rs
Normal file
File diff suppressed because it is too large
Load diff
126
src/api/client_server/tag.rs
Normal file
126
src/api/client_server/tag.rs
Normal file
|
@ -0,0 +1,126 @@
|
||||||
|
use crate::{services, Error, Result, Ruma};
|
||||||
|
use ruma::{
|
||||||
|
api::client::tag::{create_tag, delete_tag, get_tags},
|
||||||
|
events::{
|
||||||
|
tag::{TagEvent, TagEventContent},
|
||||||
|
RoomAccountDataEventType,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
|
/// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}`
|
||||||
|
///
|
||||||
|
/// Adds a tag to the room.
|
||||||
|
///
|
||||||
|
/// - Inserts the tag into the tag event of the room account data.
|
||||||
|
pub async fn update_tag_route(
|
||||||
|
body: Ruma<create_tag::v3::Request>,
|
||||||
|
) -> Result<create_tag::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let event = services().account_data.get(
|
||||||
|
Some(&body.room_id),
|
||||||
|
sender_user,
|
||||||
|
RoomAccountDataEventType::Tag,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let mut tags_event = event
|
||||||
|
.map(|e| {
|
||||||
|
serde_json::from_str(e.get())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))
|
||||||
|
})
|
||||||
|
.unwrap_or_else(|| {
|
||||||
|
Ok(TagEvent {
|
||||||
|
content: TagEventContent {
|
||||||
|
tags: BTreeMap::new(),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
})?;
|
||||||
|
|
||||||
|
tags_event
|
||||||
|
.content
|
||||||
|
.tags
|
||||||
|
.insert(body.tag.clone().into(), body.tag_info.clone());
|
||||||
|
|
||||||
|
services().account_data.update(
|
||||||
|
Some(&body.room_id),
|
||||||
|
sender_user,
|
||||||
|
RoomAccountDataEventType::Tag,
|
||||||
|
&serde_json::to_value(tags_event).expect("to json value always works"),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(create_tag::v3::Response {})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `DELETE /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}`
|
||||||
|
///
|
||||||
|
/// Deletes a tag from the room.
|
||||||
|
///
|
||||||
|
/// - Removes the tag from the tag event of the room account data.
|
||||||
|
pub async fn delete_tag_route(
|
||||||
|
body: Ruma<delete_tag::v3::Request>,
|
||||||
|
) -> Result<delete_tag::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let event = services().account_data.get(
|
||||||
|
Some(&body.room_id),
|
||||||
|
sender_user,
|
||||||
|
RoomAccountDataEventType::Tag,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let mut tags_event = event
|
||||||
|
.map(|e| {
|
||||||
|
serde_json::from_str(e.get())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))
|
||||||
|
})
|
||||||
|
.unwrap_or_else(|| {
|
||||||
|
Ok(TagEvent {
|
||||||
|
content: TagEventContent {
|
||||||
|
tags: BTreeMap::new(),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
})?;
|
||||||
|
|
||||||
|
tags_event.content.tags.remove(&body.tag.clone().into());
|
||||||
|
|
||||||
|
services().account_data.update(
|
||||||
|
Some(&body.room_id),
|
||||||
|
sender_user,
|
||||||
|
RoomAccountDataEventType::Tag,
|
||||||
|
&serde_json::to_value(tags_event).expect("to json value always works"),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(delete_tag::v3::Response {})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags`
|
||||||
|
///
|
||||||
|
/// Returns tags on the room.
|
||||||
|
///
|
||||||
|
/// - Gets the tag event of the room account data.
|
||||||
|
pub async fn get_tags_route(body: Ruma<get_tags::v3::Request>) -> Result<get_tags::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let event = services().account_data.get(
|
||||||
|
Some(&body.room_id),
|
||||||
|
sender_user,
|
||||||
|
RoomAccountDataEventType::Tag,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let tags_event = event
|
||||||
|
.map(|e| {
|
||||||
|
serde_json::from_str(e.get())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))
|
||||||
|
})
|
||||||
|
.unwrap_or_else(|| {
|
||||||
|
Ok(TagEvent {
|
||||||
|
content: TagEventContent {
|
||||||
|
tags: BTreeMap::new(),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(get_tags::v3::Response {
|
||||||
|
tags: tags_event.content.tags,
|
||||||
|
})
|
||||||
|
}
|
|
@ -7,7 +7,7 @@ use std::collections::BTreeMap;
|
||||||
///
|
///
|
||||||
/// TODO: Fetches all metadata about protocols supported by the homeserver.
|
/// TODO: Fetches all metadata about protocols supported by the homeserver.
|
||||||
pub async fn get_protocols_route(
|
pub async fn get_protocols_route(
|
||||||
_body: Ruma<get_protocols::v3::IncomingRequest>,
|
_body: Ruma<get_protocols::v3::Request>,
|
||||||
) -> Result<get_protocols::v3::Response> {
|
) -> Result<get_protocols::v3::Response> {
|
||||||
// TODO
|
// TODO
|
||||||
Ok(get_protocols::v3::Response {
|
Ok(get_protocols::v3::Response {
|
49
src/api/client_server/threads.rs
Normal file
49
src/api/client_server/threads.rs
Normal file
|
@ -0,0 +1,49 @@
|
||||||
|
use ruma::api::client::{error::ErrorKind, threads::get_threads};
|
||||||
|
|
||||||
|
use crate::{services, Error, Result, Ruma};
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/rooms/{roomId}/threads`
|
||||||
|
pub async fn get_threads_route(
|
||||||
|
body: Ruma<get_threads::v1::Request>,
|
||||||
|
) -> Result<get_threads::v1::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
// Use limit or else 10, with maximum 100
|
||||||
|
let limit = body
|
||||||
|
.limit
|
||||||
|
.and_then(|l| l.try_into().ok())
|
||||||
|
.unwrap_or(10)
|
||||||
|
.min(100);
|
||||||
|
|
||||||
|
let from = if let Some(from) = &body.from {
|
||||||
|
from.parse()
|
||||||
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, ""))?
|
||||||
|
} else {
|
||||||
|
u64::MAX
|
||||||
|
};
|
||||||
|
|
||||||
|
let threads = services()
|
||||||
|
.rooms
|
||||||
|
.threads
|
||||||
|
.threads_until(sender_user, &body.room_id, from, &body.include)?
|
||||||
|
.take(limit)
|
||||||
|
.filter_map(|r| r.ok())
|
||||||
|
.filter(|(_, pdu)| {
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.user_can_see_event(sender_user, &body.room_id, &pdu.event_id)
|
||||||
|
.unwrap_or(false)
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let next_batch = threads.last().map(|(count, _)| count.to_string());
|
||||||
|
|
||||||
|
Ok(get_threads::v1::Response {
|
||||||
|
chunk: threads
|
||||||
|
.into_iter()
|
||||||
|
.map(|(_, pdu)| pdu.to_room_event())
|
||||||
|
.collect(),
|
||||||
|
next_batch,
|
||||||
|
})
|
||||||
|
}
|
|
@ -1,7 +1,6 @@
|
||||||
use ruma::events::ToDeviceEventType;
|
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
use crate::{database::DatabaseGuard, Error, Result, Ruma};
|
use crate::{services, Error, Result, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{
|
api::{
|
||||||
client::{error::ErrorKind, to_device::send_event_to_device},
|
client::{error::ErrorKind, to_device::send_event_to_device},
|
||||||
|
@ -14,14 +13,13 @@ use ruma::{
|
||||||
///
|
///
|
||||||
/// Send a to-device event to a set of client devices.
|
/// Send a to-device event to a set of client devices.
|
||||||
pub async fn send_event_to_device_route(
|
pub async fn send_event_to_device_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<send_event_to_device::v3::Request>,
|
||||||
body: Ruma<send_event_to_device::v3::IncomingRequest>,
|
|
||||||
) -> Result<send_event_to_device::v3::Response> {
|
) -> Result<send_event_to_device::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_deref();
|
let sender_device = body.sender_device.as_deref();
|
||||||
|
|
||||||
// Check if this is a new transaction id
|
// Check if this is a new transaction id
|
||||||
if db
|
if services()
|
||||||
.transaction_ids
|
.transaction_ids
|
||||||
.existing_txnid(sender_user, sender_device, &body.txn_id)?
|
.existing_txnid(sender_user, sender_device, &body.txn_id)?
|
||||||
.is_some()
|
.is_some()
|
||||||
|
@ -31,52 +29,53 @@ pub async fn send_event_to_device_route(
|
||||||
|
|
||||||
for (target_user_id, map) in &body.messages {
|
for (target_user_id, map) in &body.messages {
|
||||||
for (target_device_id_maybe, event) in map {
|
for (target_device_id_maybe, event) in map {
|
||||||
if target_user_id.server_name() != db.globals.server_name() {
|
if target_user_id.server_name() != services().globals.server_name() {
|
||||||
let mut map = BTreeMap::new();
|
let mut map = BTreeMap::new();
|
||||||
map.insert(target_device_id_maybe.clone(), event.clone());
|
map.insert(target_device_id_maybe.clone(), event.clone());
|
||||||
let mut messages = BTreeMap::new();
|
let mut messages = BTreeMap::new();
|
||||||
messages.insert(target_user_id.clone(), map);
|
messages.insert(target_user_id.clone(), map);
|
||||||
|
let count = services().globals.next_count()?;
|
||||||
|
|
||||||
db.sending.send_reliable_edu(
|
services().sending.send_reliable_edu(
|
||||||
target_user_id.server_name(),
|
target_user_id.server_name(),
|
||||||
serde_json::to_vec(&federation::transactions::edu::Edu::DirectToDevice(
|
serde_json::to_vec(&federation::transactions::edu::Edu::DirectToDevice(
|
||||||
DirectDeviceContent {
|
DirectDeviceContent {
|
||||||
sender: sender_user.clone(),
|
sender: sender_user.clone(),
|
||||||
ev_type: ToDeviceEventType::from(&*body.event_type),
|
ev_type: body.event_type.clone(),
|
||||||
message_id: body.txn_id.to_owned(),
|
message_id: count.to_string().into(),
|
||||||
messages,
|
messages,
|
||||||
},
|
},
|
||||||
))
|
))
|
||||||
.expect("DirectToDevice EDU can be serialized"),
|
.expect("DirectToDevice EDU can be serialized"),
|
||||||
db.globals.next_count()?,
|
count,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
match target_device_id_maybe {
|
match target_device_id_maybe {
|
||||||
DeviceIdOrAllDevices::DeviceId(target_device_id) => db.users.add_to_device_event(
|
DeviceIdOrAllDevices::DeviceId(target_device_id) => {
|
||||||
|
services().users.add_to_device_event(
|
||||||
sender_user,
|
sender_user,
|
||||||
target_user_id,
|
target_user_id,
|
||||||
&target_device_id,
|
target_device_id,
|
||||||
&body.event_type,
|
&body.event_type.to_string(),
|
||||||
event.deserialize_as().map_err(|_| {
|
event.deserialize_as().map_err(|_| {
|
||||||
Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid")
|
Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid")
|
||||||
})?,
|
})?,
|
||||||
&db.globals,
|
)?
|
||||||
)?,
|
}
|
||||||
|
|
||||||
DeviceIdOrAllDevices::AllDevices => {
|
DeviceIdOrAllDevices::AllDevices => {
|
||||||
for target_device_id in db.users.all_device_ids(target_user_id) {
|
for target_device_id in services().users.all_device_ids(target_user_id) {
|
||||||
db.users.add_to_device_event(
|
services().users.add_to_device_event(
|
||||||
sender_user,
|
sender_user,
|
||||||
target_user_id,
|
target_user_id,
|
||||||
&target_device_id?,
|
&target_device_id?,
|
||||||
&body.event_type,
|
&body.event_type.to_string(),
|
||||||
event.deserialize_as().map_err(|_| {
|
event.deserialize_as().map_err(|_| {
|
||||||
Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid")
|
Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid")
|
||||||
})?,
|
})?,
|
||||||
&db.globals,
|
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -85,10 +84,9 @@ pub async fn send_event_to_device_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save transaction id with empty data
|
// Save transaction id with empty data
|
||||||
db.transaction_ids
|
services()
|
||||||
|
.transaction_ids
|
||||||
.add_txnid(sender_user, sender_device, &body.txn_id, &[])?;
|
.add_txnid(sender_user, sender_device, &body.txn_id, &[])?;
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(send_event_to_device::v3::Response {})
|
Ok(send_event_to_device::v3::Response {})
|
||||||
}
|
}
|
|
@ -1,18 +1,21 @@
|
||||||
use crate::{database::DatabaseGuard, utils, Error, Result, Ruma};
|
use crate::{services, utils, Error, Result, Ruma};
|
||||||
use ruma::api::client::{error::ErrorKind, typing::create_typing_event};
|
use ruma::api::client::{error::ErrorKind, typing::create_typing_event};
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}`
|
/// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}`
|
||||||
///
|
///
|
||||||
/// Sets the typing state of the sender user.
|
/// Sets the typing state of the sender user.
|
||||||
pub async fn create_typing_event_route(
|
pub async fn create_typing_event_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<create_typing_event::v3::Request>,
|
||||||
body: Ruma<create_typing_event::v3::IncomingRequest>,
|
|
||||||
) -> Result<create_typing_event::v3::Response> {
|
) -> Result<create_typing_event::v3::Response> {
|
||||||
use create_typing_event::v3::Typing;
|
use create_typing_event::v3::Typing;
|
||||||
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if !db.rooms.is_joined(sender_user, &body.room_id)? {
|
if !services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.is_joined(sender_user, &body.room_id)?
|
||||||
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
"You are not in this room.",
|
"You are not in this room.",
|
||||||
|
@ -20,16 +23,17 @@ pub async fn create_typing_event_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Typing::Yes(duration) = body.state {
|
if let Typing::Yes(duration) = body.state {
|
||||||
db.rooms.edus.typing_add(
|
services().rooms.edus.typing.typing_add(
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
duration.as_millis() as u64 + utils::millis_since_unix_epoch(),
|
duration.as_millis() as u64 + utils::millis_since_unix_epoch(),
|
||||||
&db.globals,
|
|
||||||
)?;
|
)?;
|
||||||
} else {
|
} else {
|
||||||
db.rooms
|
services()
|
||||||
|
.rooms
|
||||||
.edus
|
.edus
|
||||||
.typing_remove(sender_user, &body.room_id, &db.globals)?;
|
.typing
|
||||||
|
.typing_remove(sender_user, &body.room_id)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(create_typing_event::v3::Response {})
|
Ok(create_typing_event::v3::Response {})
|
|
@ -1,8 +1,9 @@
|
||||||
use std::{collections::BTreeMap, iter::FromIterator};
|
use std::{collections::BTreeMap, iter::FromIterator};
|
||||||
|
|
||||||
use ruma::api::client::discovery::get_supported_versions;
|
use axum::{response::IntoResponse, Json};
|
||||||
|
use ruma::api::client::{discovery::get_supported_versions, error::ErrorKind};
|
||||||
|
|
||||||
use crate::{Result, Ruma};
|
use crate::{services, Error, Result, Ruma};
|
||||||
|
|
||||||
/// # `GET /_matrix/client/versions`
|
/// # `GET /_matrix/client/versions`
|
||||||
///
|
///
|
||||||
|
@ -15,7 +16,7 @@ use crate::{Result, Ruma};
|
||||||
/// Note: Unstable features are used while developing new features. Clients should avoid using
|
/// Note: Unstable features are used while developing new features. Clients should avoid using
|
||||||
/// unstable features in their stable releases
|
/// unstable features in their stable releases
|
||||||
pub async fn get_supported_versions_route(
|
pub async fn get_supported_versions_route(
|
||||||
_body: Ruma<get_supported_versions::IncomingRequest>,
|
_body: Ruma<get_supported_versions::Request>,
|
||||||
) -> Result<get_supported_versions::Response> {
|
) -> Result<get_supported_versions::Response> {
|
||||||
let resp = get_supported_versions::Response {
|
let resp = get_supported_versions::Response {
|
||||||
versions: vec![
|
versions: vec![
|
||||||
|
@ -23,9 +24,27 @@ pub async fn get_supported_versions_route(
|
||||||
"r0.6.0".to_owned(),
|
"r0.6.0".to_owned(),
|
||||||
"v1.1".to_owned(),
|
"v1.1".to_owned(),
|
||||||
"v1.2".to_owned(),
|
"v1.2".to_owned(),
|
||||||
|
"v1.3".to_owned(),
|
||||||
|
"v1.4".to_owned(),
|
||||||
|
"v1.5".to_owned(),
|
||||||
],
|
],
|
||||||
unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]),
|
unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]),
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(resp)
|
Ok(resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// # `GET /.well-known/matrix/client`
|
||||||
|
pub async fn well_known_client_route(
|
||||||
|
_body: Ruma<get_supported_versions::Request>,
|
||||||
|
) -> Result<impl IntoResponse> {
|
||||||
|
let client_url = match services().globals.well_known_client() {
|
||||||
|
Some(url) => url.clone(),
|
||||||
|
None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(Json(serde_json::json!({
|
||||||
|
"m.homeserver": {"base_url": client_url},
|
||||||
|
"org.matrix.msc3575.proxy": {"url": client_url}
|
||||||
|
})))
|
||||||
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
use crate::{database::DatabaseGuard, Result, Ruma};
|
use crate::{services, Result, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::user_directory::search_users,
|
api::client::user_directory::search_users,
|
||||||
events::{
|
events::{
|
||||||
|
@ -14,20 +14,19 @@ use ruma::{
|
||||||
/// - Hides any local users that aren't in any public rooms (i.e. those that have the join rule set to public)
|
/// - Hides any local users that aren't in any public rooms (i.e. those that have the join rule set to public)
|
||||||
/// and don't share a room with the sender
|
/// and don't share a room with the sender
|
||||||
pub async fn search_users_route(
|
pub async fn search_users_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<search_users::v3::Request>,
|
||||||
body: Ruma<search_users::v3::IncomingRequest>,
|
|
||||||
) -> Result<search_users::v3::Response> {
|
) -> Result<search_users::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let limit = u64::from(body.limit) as usize;
|
let limit = u64::from(body.limit) as usize;
|
||||||
|
|
||||||
let mut users = db.users.iter().filter_map(|user_id| {
|
let mut users = services().users.iter().filter_map(|user_id| {
|
||||||
// Filter out buggy users (they should not exist, but you never know...)
|
// Filter out buggy users (they should not exist, but you never know...)
|
||||||
let user_id = user_id.ok()?;
|
let user_id = user_id.ok()?;
|
||||||
|
|
||||||
let user = search_users::v3::User {
|
let user = search_users::v3::User {
|
||||||
user_id: user_id.clone(),
|
user_id: user_id.clone(),
|
||||||
display_name: db.users.displayname(&user_id).ok()?,
|
display_name: services().users.displayname(&user_id).ok()?,
|
||||||
avatar_url: db.users.avatar_url(&user_id).ok()?,
|
avatar_url: services().users.avatar_url(&user_id).ok()?,
|
||||||
};
|
};
|
||||||
|
|
||||||
let user_id_matches = user
|
let user_id_matches = user
|
||||||
|
@ -49,12 +48,15 @@ pub async fn search_users_route(
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
let user_is_in_public_rooms =
|
let user_is_in_public_rooms = services()
|
||||||
db.rooms
|
.rooms
|
||||||
|
.state_cache
|
||||||
.rooms_joined(&user_id)
|
.rooms_joined(&user_id)
|
||||||
.filter_map(|r| r.ok())
|
.filter_map(|r| r.ok())
|
||||||
.any(|room| {
|
.any(|room| {
|
||||||
db.rooms
|
services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(&room, &StateEventType::RoomJoinRules, "")
|
.room_state_get(&room, &StateEventType::RoomJoinRules, "")
|
||||||
.map_or(false, |event| {
|
.map_or(false, |event| {
|
||||||
event.map_or(false, |event| {
|
event.map_or(false, |event| {
|
||||||
|
@ -70,9 +72,10 @@ pub async fn search_users_route(
|
||||||
return Some(user);
|
return Some(user);
|
||||||
}
|
}
|
||||||
|
|
||||||
let user_is_in_shared_rooms = db
|
let user_is_in_shared_rooms = services()
|
||||||
.rooms
|
.rooms
|
||||||
.get_shared_rooms(vec![sender_user.clone(), user_id.clone()])
|
.user
|
||||||
|
.get_shared_rooms(vec![sender_user.clone(), user_id])
|
||||||
.ok()?
|
.ok()?
|
||||||
.next()
|
.next()
|
||||||
.is_some();
|
.is_some();
|
|
@ -1,5 +1,6 @@
|
||||||
use crate::{database::DatabaseGuard, Result, Ruma};
|
use crate::{services, Result, Ruma};
|
||||||
use hmac::{Hmac, Mac, NewMac};
|
use base64::{engine::general_purpose, Engine as _};
|
||||||
|
use hmac::{Hmac, Mac};
|
||||||
use ruma::{api::client::voip::get_turn_server_info, SecondsSinceUnixEpoch};
|
use ruma::{api::client::voip::get_turn_server_info, SecondsSinceUnixEpoch};
|
||||||
use sha1::Sha1;
|
use sha1::Sha1;
|
||||||
use std::time::{Duration, SystemTime};
|
use std::time::{Duration, SystemTime};
|
||||||
|
@ -10,16 +11,15 @@ type HmacSha1 = Hmac<Sha1>;
|
||||||
///
|
///
|
||||||
/// TODO: Returns information about the recommended turn server.
|
/// TODO: Returns information about the recommended turn server.
|
||||||
pub async fn turn_server_route(
|
pub async fn turn_server_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_turn_server_info::v3::Request>,
|
||||||
body: Ruma<get_turn_server_info::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_turn_server_info::v3::Response> {
|
) -> Result<get_turn_server_info::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let turn_secret = db.globals.turn_secret();
|
let turn_secret = services().globals.turn_secret().clone();
|
||||||
|
|
||||||
let (username, password) = if !turn_secret.is_empty() {
|
let (username, password) = if !turn_secret.is_empty() {
|
||||||
let expiry = SecondsSinceUnixEpoch::from_system_time(
|
let expiry = SecondsSinceUnixEpoch::from_system_time(
|
||||||
SystemTime::now() + Duration::from_secs(db.globals.turn_ttl()),
|
SystemTime::now() + Duration::from_secs(services().globals.turn_ttl()),
|
||||||
)
|
)
|
||||||
.expect("time is valid");
|
.expect("time is valid");
|
||||||
|
|
||||||
|
@ -29,20 +29,20 @@ pub async fn turn_server_route(
|
||||||
.expect("HMAC can take key of any size");
|
.expect("HMAC can take key of any size");
|
||||||
mac.update(username.as_bytes());
|
mac.update(username.as_bytes());
|
||||||
|
|
||||||
let password: String = base64::encode_config(mac.finalize().into_bytes(), base64::STANDARD);
|
let password: String = general_purpose::STANDARD.encode(mac.finalize().into_bytes());
|
||||||
|
|
||||||
(username, password)
|
(username, password)
|
||||||
} else {
|
} else {
|
||||||
(
|
(
|
||||||
db.globals.turn_username().clone(),
|
services().globals.turn_username().clone(),
|
||||||
db.globals.turn_password().clone(),
|
services().globals.turn_password().clone(),
|
||||||
)
|
)
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(get_turn_server_info::v3::Response {
|
Ok(get_turn_server_info::v3::Response {
|
||||||
username,
|
username,
|
||||||
password,
|
password,
|
||||||
uris: db.globals.turn_uris().to_vec(),
|
uris: services().globals.turn_uris().to_vec(),
|
||||||
ttl: Duration::from_secs(db.globals.turn_ttl()),
|
ttl: Duration::from_secs(services().globals.turn_ttl()),
|
||||||
})
|
})
|
||||||
}
|
}
|
4
src/api/mod.rs
Normal file
4
src/api/mod.rs
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
pub mod appservice_server;
|
||||||
|
pub mod client_server;
|
||||||
|
pub mod ruma_wrapper;
|
||||||
|
pub mod server_server;
|
|
@ -3,53 +3,66 @@ use std::{collections::BTreeMap, iter::FromIterator, str};
|
||||||
use axum::{
|
use axum::{
|
||||||
async_trait,
|
async_trait,
|
||||||
body::{Full, HttpBody},
|
body::{Full, HttpBody},
|
||||||
extract::{
|
extract::{rejection::TypedHeaderRejectionReason, FromRequest, Path, TypedHeader},
|
||||||
rejection::TypedHeaderRejectionReason, FromRequest, Path, RequestParts, TypedHeader,
|
|
||||||
},
|
|
||||||
headers::{
|
headers::{
|
||||||
authorization::{Bearer, Credentials},
|
authorization::{Bearer, Credentials},
|
||||||
Authorization,
|
Authorization,
|
||||||
},
|
},
|
||||||
response::{IntoResponse, Response},
|
response::{IntoResponse, Response},
|
||||||
BoxError,
|
BoxError, RequestExt, RequestPartsExt,
|
||||||
};
|
};
|
||||||
use bytes::{BufMut, Bytes, BytesMut};
|
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
||||||
use http::StatusCode;
|
use http::{Request, StatusCode};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse},
|
api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse},
|
||||||
signatures::CanonicalJsonValue,
|
CanonicalJsonValue, OwnedDeviceId, OwnedServerName, UserId,
|
||||||
DeviceId, ServerName, UserId,
|
|
||||||
};
|
};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use tracing::{debug, error, warn};
|
use tracing::{debug, error, warn};
|
||||||
|
|
||||||
use super::{Ruma, RumaResponse};
|
use super::{Ruma, RumaResponse};
|
||||||
use crate::{database::DatabaseGuard, server_server, Error, Result};
|
use crate::{services, Error, Result};
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T, B> FromRequest<B> for Ruma<T>
|
impl<T, S, B> FromRequest<S, B> for Ruma<T>
|
||||||
where
|
where
|
||||||
T: IncomingRequest,
|
T: IncomingRequest,
|
||||||
B: HttpBody + Send,
|
B: HttpBody + Send + 'static,
|
||||||
B::Data: Send,
|
B::Data: Send,
|
||||||
B::Error: Into<BoxError>,
|
B::Error: Into<BoxError>,
|
||||||
{
|
{
|
||||||
type Rejection = Error;
|
type Rejection = Error;
|
||||||
|
|
||||||
async fn from_request(req: &mut RequestParts<B>) -> Result<Self, Self::Rejection> {
|
async fn from_request(req: Request<B>, _state: &S) -> Result<Self, Self::Rejection> {
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
struct QueryParams {
|
struct QueryParams {
|
||||||
access_token: Option<String>,
|
access_token: Option<String>,
|
||||||
user_id: Option<String>,
|
user_id: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
let metadata = T::METADATA;
|
let (mut parts, mut body) = match req.with_limited_body() {
|
||||||
let db = DatabaseGuard::from_request(req).await?;
|
Ok(limited_req) => {
|
||||||
let auth_header = Option::<TypedHeader<Authorization<Bearer>>>::from_request(req).await?;
|
let (parts, body) = limited_req.into_parts();
|
||||||
let path_params = Path::<Vec<String>>::from_request(req).await?;
|
let body = to_bytes(body)
|
||||||
|
.await
|
||||||
|
.map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?;
|
||||||
|
(parts, body)
|
||||||
|
}
|
||||||
|
Err(original_req) => {
|
||||||
|
let (parts, body) = original_req.into_parts();
|
||||||
|
let body = to_bytes(body)
|
||||||
|
.await
|
||||||
|
.map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?;
|
||||||
|
(parts, body)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let query = req.uri().query().unwrap_or_default();
|
let metadata = T::METADATA;
|
||||||
let query_params: QueryParams = match ruma::serde::urlencoded::from_str(query) {
|
let auth_header: Option<TypedHeader<Authorization<Bearer>>> = parts.extract().await?;
|
||||||
|
let path_params: Path<Vec<String>> = parts.extract().await?;
|
||||||
|
|
||||||
|
let query = parts.uri.query().unwrap_or_default();
|
||||||
|
let query_params: QueryParams = match serde_html_form::from_str(query) {
|
||||||
Ok(params) => params,
|
Ok(params) => params,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!(%query, "Failed to deserialize query parameters: {}", e);
|
error!(%query, "Failed to deserialize query parameters: {}", e);
|
||||||
|
@ -65,13 +78,9 @@ where
|
||||||
None => query_params.access_token.as_deref(),
|
None => query_params.access_token.as_deref(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut body = Bytes::from_request(req)
|
|
||||||
.await
|
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?;
|
|
||||||
|
|
||||||
let mut json_body = serde_json::from_slice::<CanonicalJsonValue>(&body).ok();
|
let mut json_body = serde_json::from_slice::<CanonicalJsonValue>(&body).ok();
|
||||||
|
|
||||||
let appservices = db.appservice.all().unwrap();
|
let appservices = services().appservice.all().unwrap();
|
||||||
let appservice_registration = appservices.iter().find(|(_id, registration)| {
|
let appservice_registration = appservices.iter().find(|(_id, registration)| {
|
||||||
registration
|
registration
|
||||||
.get("as_token")
|
.get("as_token")
|
||||||
|
@ -82,7 +91,7 @@ where
|
||||||
let (sender_user, sender_device, sender_servername, from_appservice) =
|
let (sender_user, sender_device, sender_servername, from_appservice) =
|
||||||
if let Some((_id, registration)) = appservice_registration {
|
if let Some((_id, registration)) = appservice_registration {
|
||||||
match metadata.authentication {
|
match metadata.authentication {
|
||||||
AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => {
|
AuthScheme::AccessToken => {
|
||||||
let user_id = query_params.user_id.map_or_else(
|
let user_id = query_params.user_id.map_or_else(
|
||||||
|| {
|
|| {
|
||||||
UserId::parse_with_server_name(
|
UserId::parse_with_server_name(
|
||||||
|
@ -91,14 +100,14 @@ where
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.as_str()
|
.as_str()
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
db.globals.server_name(),
|
services().globals.server_name(),
|
||||||
)
|
)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
},
|
},
|
||||||
|s| UserId::parse(s).unwrap(),
|
|s| UserId::parse(s).unwrap(),
|
||||||
);
|
);
|
||||||
|
|
||||||
if !db.users.exists(&user_id).unwrap() {
|
if !services().users.exists(&user_id).unwrap() {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
"User does not exist.",
|
"User does not exist.",
|
||||||
|
@ -113,7 +122,7 @@ where
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
match metadata.authentication {
|
match metadata.authentication {
|
||||||
AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => {
|
AuthScheme::AccessToken => {
|
||||||
let token = match token {
|
let token = match token {
|
||||||
Some(token) => token,
|
Some(token) => token,
|
||||||
_ => {
|
_ => {
|
||||||
|
@ -124,7 +133,7 @@ where
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
match db.users.find_from_token(token).unwrap() {
|
match services().users.find_from_token(token).unwrap() {
|
||||||
None => {
|
None => {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::UnknownToken { soft_logout: false },
|
ErrorKind::UnknownToken { soft_logout: false },
|
||||||
|
@ -133,15 +142,15 @@ where
|
||||||
}
|
}
|
||||||
Some((user_id, device_id)) => (
|
Some((user_id, device_id)) => (
|
||||||
Some(user_id),
|
Some(user_id),
|
||||||
Some(Box::<DeviceId>::from(device_id)),
|
Some(OwnedDeviceId::from(device_id)),
|
||||||
None,
|
None,
|
||||||
false,
|
false,
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
AuthScheme::ServerSignatures => {
|
AuthScheme::ServerSignatures => {
|
||||||
let TypedHeader(Authorization(x_matrix)) =
|
let TypedHeader(Authorization(x_matrix)) = parts
|
||||||
TypedHeader::<Authorization<XMatrix>>::from_request(req)
|
.extract::<TypedHeader<Authorization<XMatrix>>>()
|
||||||
.await
|
.await
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
warn!("Missing or invalid Authorization header: {}", e);
|
warn!("Missing or invalid Authorization header: {}", e);
|
||||||
|
@ -172,11 +181,11 @@ where
|
||||||
let mut request_map = BTreeMap::from_iter([
|
let mut request_map = BTreeMap::from_iter([
|
||||||
(
|
(
|
||||||
"method".to_owned(),
|
"method".to_owned(),
|
||||||
CanonicalJsonValue::String(req.method().to_string()),
|
CanonicalJsonValue::String(parts.method.to_string()),
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"uri".to_owned(),
|
"uri".to_owned(),
|
||||||
CanonicalJsonValue::String(req.uri().to_string()),
|
CanonicalJsonValue::String(parts.uri.to_string()),
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"origin".to_owned(),
|
"origin".to_owned(),
|
||||||
|
@ -185,7 +194,7 @@ where
|
||||||
(
|
(
|
||||||
"destination".to_owned(),
|
"destination".to_owned(),
|
||||||
CanonicalJsonValue::String(
|
CanonicalJsonValue::String(
|
||||||
db.globals.server_name().as_str().to_owned(),
|
services().globals.server_name().as_str().to_owned(),
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
|
@ -198,11 +207,10 @@ where
|
||||||
request_map.insert("content".to_owned(), json_body.clone());
|
request_map.insert("content".to_owned(), json_body.clone());
|
||||||
};
|
};
|
||||||
|
|
||||||
let keys_result = server_server::fetch_signing_keys(
|
let keys_result = services()
|
||||||
&db,
|
.rooms
|
||||||
&x_matrix.origin,
|
.event_handler
|
||||||
vec![x_matrix.key.to_owned()],
|
.fetch_signing_keys(&x_matrix.origin, vec![x_matrix.key.to_owned()])
|
||||||
)
|
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
let keys = match keys_result {
|
let keys = match keys_result {
|
||||||
|
@ -227,7 +235,7 @@ where
|
||||||
x_matrix.origin, e, request_map
|
x_matrix.origin, e, request_map
|
||||||
);
|
);
|
||||||
|
|
||||||
if req.uri().to_string().contains('@') {
|
if parts.uri.to_string().contains('@') {
|
||||||
warn!(
|
warn!(
|
||||||
"Request uri contained '@' character. Make sure your \
|
"Request uri contained '@' character. Make sure your \
|
||||||
reverse proxy gives Conduit the raw uri (apache: use \
|
reverse proxy gives Conduit the raw uri (apache: use \
|
||||||
|
@ -246,12 +254,12 @@ where
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut http_request = http::Request::builder().uri(req.uri()).method(req.method());
|
let mut http_request = http::Request::builder().uri(parts.uri).method(parts.method);
|
||||||
*http_request.headers_mut().unwrap() = req.headers().clone();
|
*http_request.headers_mut().unwrap() = parts.headers;
|
||||||
|
|
||||||
if let Some(CanonicalJsonValue::Object(json_body)) = &mut json_body {
|
if let Some(CanonicalJsonValue::Object(json_body)) = &mut json_body {
|
||||||
let user_id = sender_user.clone().unwrap_or_else(|| {
|
let user_id = sender_user.clone().unwrap_or_else(|| {
|
||||||
UserId::parse_with_server_name("", db.globals.server_name())
|
UserId::parse_with_server_name("", services().globals.server_name())
|
||||||
.expect("we know this is valid")
|
.expect("we know this is valid")
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -261,7 +269,7 @@ where
|
||||||
.and_then(|auth| auth.get("session"))
|
.and_then(|auth| auth.get("session"))
|
||||||
.and_then(|session| session.as_str())
|
.and_then(|session| session.as_str())
|
||||||
.and_then(|session| {
|
.and_then(|session| {
|
||||||
db.uiaa.get_uiaa_request(
|
services().uiaa.get_uiaa_request(
|
||||||
&user_id,
|
&user_id,
|
||||||
&sender_device.clone().unwrap_or_else(|| "".into()),
|
&sender_device.clone().unwrap_or_else(|| "".into()),
|
||||||
session,
|
session,
|
||||||
|
@ -284,7 +292,8 @@ where
|
||||||
debug!("{:?}", http_request);
|
debug!("{:?}", http_request);
|
||||||
|
|
||||||
let body = T::try_from_http_request(http_request, &path_params).map_err(|e| {
|
let body = T::try_from_http_request(http_request, &path_params).map_err(|e| {
|
||||||
warn!("{:?}", e);
|
warn!("try_from_http_request failed: {:?}", e);
|
||||||
|
debug!("JSON body: {:?}", json_body);
|
||||||
Error::BadRequest(ErrorKind::BadJson, "Failed to deserialize request.")
|
Error::BadRequest(ErrorKind::BadJson, "Failed to deserialize request.")
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
|
@ -300,7 +309,7 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
struct XMatrix {
|
struct XMatrix {
|
||||||
origin: Box<ServerName>,
|
origin: OwnedServerName,
|
||||||
key: String, // KeyName?
|
key: String, // KeyName?
|
||||||
sig: String,
|
sig: String,
|
||||||
}
|
}
|
||||||
|
@ -311,8 +320,7 @@ impl Credentials for XMatrix {
|
||||||
fn decode(value: &http::HeaderValue) -> Option<Self> {
|
fn decode(value: &http::HeaderValue) -> Option<Self> {
|
||||||
debug_assert!(
|
debug_assert!(
|
||||||
value.as_bytes().starts_with(b"X-Matrix "),
|
value.as_bytes().starts_with(b"X-Matrix "),
|
||||||
"HeaderValue to decode should start with \"X-Matrix ..\", received = {:?}",
|
"HeaderValue to decode should start with \"X-Matrix ..\", received = {value:?}",
|
||||||
value,
|
|
||||||
);
|
);
|
||||||
|
|
||||||
let parameters = str::from_utf8(&value.as_bytes()["X-Matrix ".len()..])
|
let parameters = str::from_utf8(&value.as_bytes()["X-Matrix ".len()..])
|
||||||
|
@ -365,3 +373,55 @@ impl<T: OutgoingResponse> IntoResponse for RumaResponse<T> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// copied from hyper under the following license:
|
||||||
|
// Copyright (c) 2014-2021 Sean McArthur
|
||||||
|
|
||||||
|
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
// of this software and associated documentation files (the "Software"), to deal
|
||||||
|
// in the Software without restriction, including without limitation the rights
|
||||||
|
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
// copies of the Software, and to permit persons to whom the Software is
|
||||||
|
// furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
// The above copyright notice and this permission notice shall be included in
|
||||||
|
// all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
// THE SOFTWARE.
|
||||||
|
pub(crate) async fn to_bytes<T>(body: T) -> Result<Bytes, T::Error>
|
||||||
|
where
|
||||||
|
T: HttpBody,
|
||||||
|
{
|
||||||
|
futures_util::pin_mut!(body);
|
||||||
|
|
||||||
|
// If there's only 1 chunk, we can just return Buf::to_bytes()
|
||||||
|
let mut first = if let Some(buf) = body.data().await {
|
||||||
|
buf?
|
||||||
|
} else {
|
||||||
|
return Ok(Bytes::new());
|
||||||
|
};
|
||||||
|
|
||||||
|
let second = if let Some(buf) = body.data().await {
|
||||||
|
buf?
|
||||||
|
} else {
|
||||||
|
return Ok(first.copy_to_bytes(first.remaining()));
|
||||||
|
};
|
||||||
|
|
||||||
|
// With more than 1 buf, we gotta flatten into a Vec first.
|
||||||
|
let cap = first.remaining() + second.remaining() + body.size_hint().lower() as usize;
|
||||||
|
let mut vec = Vec::with_capacity(cap);
|
||||||
|
vec.put(first);
|
||||||
|
vec.put(second);
|
||||||
|
|
||||||
|
while let Some(buf) = body.data().await {
|
||||||
|
vec.put(buf?);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(vec.into())
|
||||||
|
}
|
|
@ -1,6 +1,7 @@
|
||||||
use crate::Error;
|
use crate::Error;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::uiaa::UiaaResponse, signatures::CanonicalJsonValue, DeviceId, ServerName, UserId,
|
api::client::uiaa::UiaaResponse, CanonicalJsonValue, OwnedDeviceId, OwnedServerName,
|
||||||
|
OwnedUserId,
|
||||||
};
|
};
|
||||||
use std::ops::Deref;
|
use std::ops::Deref;
|
||||||
|
|
||||||
|
@ -10,9 +11,9 @@ mod axum;
|
||||||
/// Extractor for Ruma request structs
|
/// Extractor for Ruma request structs
|
||||||
pub struct Ruma<T> {
|
pub struct Ruma<T> {
|
||||||
pub body: T,
|
pub body: T,
|
||||||
pub sender_user: Option<Box<UserId>>,
|
pub sender_user: Option<OwnedUserId>,
|
||||||
pub sender_device: Option<Box<DeviceId>>,
|
pub sender_device: Option<OwnedDeviceId>,
|
||||||
pub sender_servername: Option<Box<ServerName>>,
|
pub sender_servername: Option<OwnedServerName>,
|
||||||
// This is None when body is not a valid string
|
// This is None when body is not a valid string
|
||||||
pub json_body: Option<CanonicalJsonValue>,
|
pub json_body: Option<CanonicalJsonValue>,
|
||||||
pub from_appservice: bool,
|
pub from_appservice: bool,
|
2020
src/api/server_server.rs
Normal file
2020
src/api/server_server.rs
Normal file
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -1,584 +0,0 @@
|
||||||
use crate::{database::DatabaseGuard, Error, Result, Ruma};
|
|
||||||
use ruma::{
|
|
||||||
api::client::{
|
|
||||||
error::ErrorKind,
|
|
||||||
push::{
|
|
||||||
delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, get_pushrule_enabled,
|
|
||||||
get_pushrules_all, set_pusher, set_pushrule, set_pushrule_actions,
|
|
||||||
set_pushrule_enabled, RuleKind,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
events::{push_rules::PushRulesEvent, GlobalAccountDataEventType},
|
|
||||||
push::{ConditionalPushRuleInit, PatternedPushRuleInit, SimplePushRuleInit},
|
|
||||||
};
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/pushrules`
|
|
||||||
///
|
|
||||||
/// Retrieves the push rules event for this user.
|
|
||||||
pub async fn get_pushrules_all_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<get_pushrules_all::v3::Request>,
|
|
||||||
) -> Result<get_pushrules_all::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let event: PushRulesEvent = db
|
|
||||||
.account_data
|
|
||||||
.get(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"PushRules event not found.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
Ok(get_pushrules_all::v3::Response {
|
|
||||||
global: event.content.global,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}`
|
|
||||||
///
|
|
||||||
/// Retrieves a single specified push rule for this user.
|
|
||||||
pub async fn get_pushrule_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<get_pushrule::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_pushrule::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let event: PushRulesEvent = db
|
|
||||||
.account_data
|
|
||||||
.get(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"PushRules event not found.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
let global = event.content.global;
|
|
||||||
let rule = match body.kind {
|
|
||||||
RuleKind::Override => global
|
|
||||||
.override_
|
|
||||||
.get(body.rule_id.as_str())
|
|
||||||
.map(|rule| rule.clone().into()),
|
|
||||||
RuleKind::Underride => global
|
|
||||||
.underride
|
|
||||||
.get(body.rule_id.as_str())
|
|
||||||
.map(|rule| rule.clone().into()),
|
|
||||||
RuleKind::Sender => global
|
|
||||||
.sender
|
|
||||||
.get(body.rule_id.as_str())
|
|
||||||
.map(|rule| rule.clone().into()),
|
|
||||||
RuleKind::Room => global
|
|
||||||
.room
|
|
||||||
.get(body.rule_id.as_str())
|
|
||||||
.map(|rule| rule.clone().into()),
|
|
||||||
RuleKind::Content => global
|
|
||||||
.content
|
|
||||||
.get(body.rule_id.as_str())
|
|
||||||
.map(|rule| rule.clone().into()),
|
|
||||||
_ => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(rule) = rule {
|
|
||||||
Ok(get_pushrule::v3::Response { rule })
|
|
||||||
} else {
|
|
||||||
Err(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"Push rule not found.",
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}`
|
|
||||||
///
|
|
||||||
/// Creates a single specified push rule for this user.
|
|
||||||
pub async fn set_pushrule_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<set_pushrule::v3::IncomingRequest>,
|
|
||||||
) -> Result<set_pushrule::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
let body = body.body;
|
|
||||||
|
|
||||||
if body.scope != "global" {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Scopes other than 'global' are not supported.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut event: PushRulesEvent = db
|
|
||||||
.account_data
|
|
||||||
.get(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"PushRules event not found.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
let global = &mut event.content.global;
|
|
||||||
match body.kind {
|
|
||||||
RuleKind::Override => {
|
|
||||||
global.override_.replace(
|
|
||||||
ConditionalPushRuleInit {
|
|
||||||
actions: body.actions,
|
|
||||||
default: false,
|
|
||||||
enabled: true,
|
|
||||||
rule_id: body.rule_id,
|
|
||||||
conditions: body.conditions,
|
|
||||||
}
|
|
||||||
.into(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
RuleKind::Underride => {
|
|
||||||
global.underride.replace(
|
|
||||||
ConditionalPushRuleInit {
|
|
||||||
actions: body.actions,
|
|
||||||
default: false,
|
|
||||||
enabled: true,
|
|
||||||
rule_id: body.rule_id,
|
|
||||||
conditions: body.conditions,
|
|
||||||
}
|
|
||||||
.into(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
RuleKind::Sender => {
|
|
||||||
global.sender.replace(
|
|
||||||
SimplePushRuleInit {
|
|
||||||
actions: body.actions,
|
|
||||||
default: false,
|
|
||||||
enabled: true,
|
|
||||||
rule_id: body.rule_id,
|
|
||||||
}
|
|
||||||
.into(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
RuleKind::Room => {
|
|
||||||
global.room.replace(
|
|
||||||
SimplePushRuleInit {
|
|
||||||
actions: body.actions,
|
|
||||||
default: false,
|
|
||||||
enabled: true,
|
|
||||||
rule_id: body.rule_id,
|
|
||||||
}
|
|
||||||
.into(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
RuleKind::Content => {
|
|
||||||
global.content.replace(
|
|
||||||
PatternedPushRuleInit {
|
|
||||||
actions: body.actions,
|
|
||||||
default: false,
|
|
||||||
enabled: true,
|
|
||||||
rule_id: body.rule_id,
|
|
||||||
pattern: body.pattern.unwrap_or_default(),
|
|
||||||
}
|
|
||||||
.into(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
|
|
||||||
db.account_data.update(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
&event,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(set_pushrule::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions`
|
|
||||||
///
|
|
||||||
/// Gets the actions of a single specified push rule for this user.
|
|
||||||
pub async fn get_pushrule_actions_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<get_pushrule_actions::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_pushrule_actions::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if body.scope != "global" {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Scopes other than 'global' are not supported.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut event: PushRulesEvent = db
|
|
||||||
.account_data
|
|
||||||
.get(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"PushRules event not found.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
let global = &mut event.content.global;
|
|
||||||
let actions = match body.kind {
|
|
||||||
RuleKind::Override => global
|
|
||||||
.override_
|
|
||||||
.get(body.rule_id.as_str())
|
|
||||||
.map(|rule| rule.actions.clone()),
|
|
||||||
RuleKind::Underride => global
|
|
||||||
.underride
|
|
||||||
.get(body.rule_id.as_str())
|
|
||||||
.map(|rule| rule.actions.clone()),
|
|
||||||
RuleKind::Sender => global
|
|
||||||
.sender
|
|
||||||
.get(body.rule_id.as_str())
|
|
||||||
.map(|rule| rule.actions.clone()),
|
|
||||||
RuleKind::Room => global
|
|
||||||
.room
|
|
||||||
.get(body.rule_id.as_str())
|
|
||||||
.map(|rule| rule.actions.clone()),
|
|
||||||
RuleKind::Content => global
|
|
||||||
.content
|
|
||||||
.get(body.rule_id.as_str())
|
|
||||||
.map(|rule| rule.actions.clone()),
|
|
||||||
_ => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(get_pushrule_actions::v3::Response {
|
|
||||||
actions: actions.unwrap_or_default(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions`
|
|
||||||
///
|
|
||||||
/// Sets the actions of a single specified push rule for this user.
|
|
||||||
pub async fn set_pushrule_actions_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<set_pushrule_actions::v3::IncomingRequest>,
|
|
||||||
) -> Result<set_pushrule_actions::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if body.scope != "global" {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Scopes other than 'global' are not supported.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut event: PushRulesEvent = db
|
|
||||||
.account_data
|
|
||||||
.get(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"PushRules event not found.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
let global = &mut event.content.global;
|
|
||||||
match body.kind {
|
|
||||||
RuleKind::Override => {
|
|
||||||
if let Some(mut rule) = global.override_.get(body.rule_id.as_str()).cloned() {
|
|
||||||
rule.actions = body.actions.clone();
|
|
||||||
global.override_.replace(rule);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
RuleKind::Underride => {
|
|
||||||
if let Some(mut rule) = global.underride.get(body.rule_id.as_str()).cloned() {
|
|
||||||
rule.actions = body.actions.clone();
|
|
||||||
global.underride.replace(rule);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
RuleKind::Sender => {
|
|
||||||
if let Some(mut rule) = global.sender.get(body.rule_id.as_str()).cloned() {
|
|
||||||
rule.actions = body.actions.clone();
|
|
||||||
global.sender.replace(rule);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
RuleKind::Room => {
|
|
||||||
if let Some(mut rule) = global.room.get(body.rule_id.as_str()).cloned() {
|
|
||||||
rule.actions = body.actions.clone();
|
|
||||||
global.room.replace(rule);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
RuleKind::Content => {
|
|
||||||
if let Some(mut rule) = global.content.get(body.rule_id.as_str()).cloned() {
|
|
||||||
rule.actions = body.actions.clone();
|
|
||||||
global.content.replace(rule);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => {}
|
|
||||||
};
|
|
||||||
|
|
||||||
db.account_data.update(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
&event,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(set_pushrule_actions::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled`
|
|
||||||
///
|
|
||||||
/// Gets the enabled status of a single specified push rule for this user.
|
|
||||||
pub async fn get_pushrule_enabled_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<get_pushrule_enabled::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_pushrule_enabled::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if body.scope != "global" {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Scopes other than 'global' are not supported.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut event: PushRulesEvent = db
|
|
||||||
.account_data
|
|
||||||
.get(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"PushRules event not found.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
let global = &mut event.content.global;
|
|
||||||
let enabled = match body.kind {
|
|
||||||
RuleKind::Override => global
|
|
||||||
.override_
|
|
||||||
.iter()
|
|
||||||
.find(|rule| rule.rule_id == body.rule_id)
|
|
||||||
.map_or(false, |rule| rule.enabled),
|
|
||||||
RuleKind::Underride => global
|
|
||||||
.underride
|
|
||||||
.iter()
|
|
||||||
.find(|rule| rule.rule_id == body.rule_id)
|
|
||||||
.map_or(false, |rule| rule.enabled),
|
|
||||||
RuleKind::Sender => global
|
|
||||||
.sender
|
|
||||||
.iter()
|
|
||||||
.find(|rule| rule.rule_id == body.rule_id)
|
|
||||||
.map_or(false, |rule| rule.enabled),
|
|
||||||
RuleKind::Room => global
|
|
||||||
.room
|
|
||||||
.iter()
|
|
||||||
.find(|rule| rule.rule_id == body.rule_id)
|
|
||||||
.map_or(false, |rule| rule.enabled),
|
|
||||||
RuleKind::Content => global
|
|
||||||
.content
|
|
||||||
.iter()
|
|
||||||
.find(|rule| rule.rule_id == body.rule_id)
|
|
||||||
.map_or(false, |rule| rule.enabled),
|
|
||||||
_ => false,
|
|
||||||
};
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(get_pushrule_enabled::v3::Response { enabled })
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled`
|
|
||||||
///
|
|
||||||
/// Sets the enabled status of a single specified push rule for this user.
|
|
||||||
pub async fn set_pushrule_enabled_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<set_pushrule_enabled::v3::IncomingRequest>,
|
|
||||||
) -> Result<set_pushrule_enabled::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if body.scope != "global" {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Scopes other than 'global' are not supported.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut event: PushRulesEvent = db
|
|
||||||
.account_data
|
|
||||||
.get(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"PushRules event not found.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
let global = &mut event.content.global;
|
|
||||||
match body.kind {
|
|
||||||
RuleKind::Override => {
|
|
||||||
if let Some(mut rule) = global.override_.get(body.rule_id.as_str()).cloned() {
|
|
||||||
global.override_.remove(&rule);
|
|
||||||
rule.enabled = body.enabled;
|
|
||||||
global.override_.insert(rule);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
RuleKind::Underride => {
|
|
||||||
if let Some(mut rule) = global.underride.get(body.rule_id.as_str()).cloned() {
|
|
||||||
global.underride.remove(&rule);
|
|
||||||
rule.enabled = body.enabled;
|
|
||||||
global.underride.insert(rule);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
RuleKind::Sender => {
|
|
||||||
if let Some(mut rule) = global.sender.get(body.rule_id.as_str()).cloned() {
|
|
||||||
global.sender.remove(&rule);
|
|
||||||
rule.enabled = body.enabled;
|
|
||||||
global.sender.insert(rule);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
RuleKind::Room => {
|
|
||||||
if let Some(mut rule) = global.room.get(body.rule_id.as_str()).cloned() {
|
|
||||||
global.room.remove(&rule);
|
|
||||||
rule.enabled = body.enabled;
|
|
||||||
global.room.insert(rule);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
RuleKind::Content => {
|
|
||||||
if let Some(mut rule) = global.content.get(body.rule_id.as_str()).cloned() {
|
|
||||||
global.content.remove(&rule);
|
|
||||||
rule.enabled = body.enabled;
|
|
||||||
global.content.insert(rule);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
|
|
||||||
db.account_data.update(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
&event,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(set_pushrule_enabled::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `DELETE /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}`
|
|
||||||
///
|
|
||||||
/// Deletes a single specified push rule for this user.
|
|
||||||
pub async fn delete_pushrule_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<delete_pushrule::v3::IncomingRequest>,
|
|
||||||
) -> Result<delete_pushrule::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if body.scope != "global" {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Scopes other than 'global' are not supported.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut event: PushRulesEvent = db
|
|
||||||
.account_data
|
|
||||||
.get(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"PushRules event not found.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
let global = &mut event.content.global;
|
|
||||||
match body.kind {
|
|
||||||
RuleKind::Override => {
|
|
||||||
if let Some(rule) = global.override_.get(body.rule_id.as_str()).cloned() {
|
|
||||||
global.override_.remove(&rule);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
RuleKind::Underride => {
|
|
||||||
if let Some(rule) = global.underride.get(body.rule_id.as_str()).cloned() {
|
|
||||||
global.underride.remove(&rule);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
RuleKind::Sender => {
|
|
||||||
if let Some(rule) = global.sender.get(body.rule_id.as_str()).cloned() {
|
|
||||||
global.sender.remove(&rule);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
RuleKind::Room => {
|
|
||||||
if let Some(rule) = global.room.get(body.rule_id.as_str()).cloned() {
|
|
||||||
global.room.remove(&rule);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
RuleKind::Content => {
|
|
||||||
if let Some(rule) = global.content.get(body.rule_id.as_str()).cloned() {
|
|
||||||
global.content.remove(&rule);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
|
|
||||||
db.account_data.update(
|
|
||||||
None,
|
|
||||||
sender_user,
|
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
|
||||||
&event,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(delete_pushrule::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/pushers`
|
|
||||||
///
|
|
||||||
/// Gets all currently active pushers for the sender user.
|
|
||||||
pub async fn get_pushers_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<get_pushers::v3::Request>,
|
|
||||||
) -> Result<get_pushers::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
Ok(get_pushers::v3::Response {
|
|
||||||
pushers: db.pusher.get_pushers(sender_user)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/pushers/set`
|
|
||||||
///
|
|
||||||
/// Adds a pusher for the sender user.
|
|
||||||
///
|
|
||||||
/// - TODO: Handle `append`
|
|
||||||
pub async fn set_pushers_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<set_pusher::v3::Request>,
|
|
||||||
) -> Result<set_pusher::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
let pusher = body.pusher.clone();
|
|
||||||
|
|
||||||
db.pusher.set_pusher(sender_user, pusher)?;
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(set_pusher::v3::Response::default())
|
|
||||||
}
|
|
|
@ -1,127 +0,0 @@
|
||||||
use crate::{database::DatabaseGuard, Error, Result, Ruma};
|
|
||||||
use ruma::{
|
|
||||||
api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt},
|
|
||||||
events::RoomAccountDataEventType,
|
|
||||||
receipt::ReceiptType,
|
|
||||||
MilliSecondsSinceUnixEpoch,
|
|
||||||
};
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/rooms/{roomId}/read_markers`
|
|
||||||
///
|
|
||||||
/// Sets different types of read markers.
|
|
||||||
///
|
|
||||||
/// - Updates fully-read account data event to `fully_read`
|
|
||||||
/// - If `read_receipt` is set: Update private marker and public read receipt EDU
|
|
||||||
pub async fn set_read_marker_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<set_read_marker::v3::IncomingRequest>,
|
|
||||||
) -> Result<set_read_marker::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let fully_read_event = ruma::events::fully_read::FullyReadEvent {
|
|
||||||
content: ruma::events::fully_read::FullyReadEventContent {
|
|
||||||
event_id: body.fully_read.clone(),
|
|
||||||
},
|
|
||||||
};
|
|
||||||
db.account_data.update(
|
|
||||||
Some(&body.room_id),
|
|
||||||
sender_user,
|
|
||||||
RoomAccountDataEventType::FullyRead,
|
|
||||||
&fully_read_event,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
if let Some(event) = &body.read_receipt {
|
|
||||||
db.rooms.edus.private_read_set(
|
|
||||||
&body.room_id,
|
|
||||||
sender_user,
|
|
||||||
db.rooms.get_pdu_count(event)?.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Event does not exist.",
|
|
||||||
))?,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
db.rooms
|
|
||||||
.reset_notification_counts(sender_user, &body.room_id)?;
|
|
||||||
|
|
||||||
let mut user_receipts = BTreeMap::new();
|
|
||||||
user_receipts.insert(
|
|
||||||
sender_user.clone(),
|
|
||||||
ruma::events::receipt::Receipt {
|
|
||||||
ts: Some(MilliSecondsSinceUnixEpoch::now()),
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut receipts = BTreeMap::new();
|
|
||||||
receipts.insert(ReceiptType::Read, user_receipts);
|
|
||||||
|
|
||||||
let mut receipt_content = BTreeMap::new();
|
|
||||||
receipt_content.insert(event.to_owned(), receipts);
|
|
||||||
|
|
||||||
db.rooms.edus.readreceipt_update(
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
ruma::events::receipt::ReceiptEvent {
|
|
||||||
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
|
|
||||||
room_id: body.room_id.clone(),
|
|
||||||
},
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(set_read_marker::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/rooms/{roomId}/receipt/{receiptType}/{eventId}`
|
|
||||||
///
|
|
||||||
/// Sets private read marker and public read receipt EDU.
|
|
||||||
pub async fn create_receipt_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<create_receipt::v3::IncomingRequest>,
|
|
||||||
) -> Result<create_receipt::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
db.rooms.edus.private_read_set(
|
|
||||||
&body.room_id,
|
|
||||||
sender_user,
|
|
||||||
db.rooms
|
|
||||||
.get_pdu_count(&body.event_id)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Event does not exist.",
|
|
||||||
))?,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
db.rooms
|
|
||||||
.reset_notification_counts(sender_user, &body.room_id)?;
|
|
||||||
|
|
||||||
let mut user_receipts = BTreeMap::new();
|
|
||||||
user_receipts.insert(
|
|
||||||
sender_user.clone(),
|
|
||||||
ruma::events::receipt::Receipt {
|
|
||||||
ts: Some(MilliSecondsSinceUnixEpoch::now()),
|
|
||||||
},
|
|
||||||
);
|
|
||||||
let mut receipts = BTreeMap::new();
|
|
||||||
receipts.insert(ReceiptType::Read, user_receipts);
|
|
||||||
|
|
||||||
let mut receipt_content = BTreeMap::new();
|
|
||||||
receipt_content.insert(body.event_id.to_owned(), receipts);
|
|
||||||
|
|
||||||
db.rooms.edus.readreceipt_update(
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
ruma::events::receipt::ReceiptEvent {
|
|
||||||
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
|
|
||||||
room_id: body.room_id.clone(),
|
|
||||||
},
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(create_receipt::v3::Response {})
|
|
||||||
}
|
|
|
@ -1,952 +0,0 @@
|
||||||
use crate::{database::DatabaseGuard, Database, Error, Result, Ruma, RumaResponse};
|
|
||||||
use ruma::{
|
|
||||||
api::client::{
|
|
||||||
filter::{IncomingFilterDefinition, LazyLoadOptions},
|
|
||||||
sync::sync_events,
|
|
||||||
uiaa::UiaaResponse,
|
|
||||||
},
|
|
||||||
events::{
|
|
||||||
room::member::{MembershipState, RoomMemberEventContent},
|
|
||||||
RoomEventType, StateEventType,
|
|
||||||
},
|
|
||||||
serde::Raw,
|
|
||||||
DeviceId, RoomId, UserId,
|
|
||||||
};
|
|
||||||
use std::{
|
|
||||||
collections::{hash_map::Entry, BTreeMap, HashMap, HashSet},
|
|
||||||
sync::Arc,
|
|
||||||
time::Duration,
|
|
||||||
};
|
|
||||||
use tokio::sync::watch::Sender;
|
|
||||||
use tracing::error;
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/sync`
|
|
||||||
///
|
|
||||||
/// Synchronize the client's state with the latest state on the server.
|
|
||||||
///
|
|
||||||
/// - This endpoint takes a `since` parameter which should be the `next_batch` value from a
|
|
||||||
/// previous request for incremental syncs.
|
|
||||||
///
|
|
||||||
/// Calling this endpoint without a `since` parameter returns:
|
|
||||||
/// - Some of the most recent events of each timeline
|
|
||||||
/// - Notification counts for each room
|
|
||||||
/// - Joined and invited member counts, heroes
|
|
||||||
/// - All state events
|
|
||||||
///
|
|
||||||
/// Calling this endpoint with a `since` parameter from a previous `next_batch` returns:
|
|
||||||
/// For joined rooms:
|
|
||||||
/// - Some of the most recent events of each timeline that happened after since
|
|
||||||
/// - If user joined the room after since: All state events (unless lazy loading is activated) and
|
|
||||||
/// all device list updates in that room
|
|
||||||
/// - If the user was already in the room: A list of all events that are in the state now, but were
|
|
||||||
/// not in the state at `since`
|
|
||||||
/// - If the state we send contains a member event: Joined and invited member counts, heroes
|
|
||||||
/// - Device list updates that happened after `since`
|
|
||||||
/// - If there are events in the timeline we send or the user send updated his read mark: Notification counts
|
|
||||||
/// - EDUs that are active now (read receipts, typing updates, presence)
|
|
||||||
/// - TODO: Allow multiple sync streams to support Pantalaimon
|
|
||||||
///
|
|
||||||
/// For invited rooms:
|
|
||||||
/// - If the user was invited after `since`: A subset of the state of the room at the point of the invite
|
|
||||||
///
|
|
||||||
/// For left rooms:
|
|
||||||
/// - If the user left after `since`: prev_batch token, empty state (TODO: subset of the state at the point of the leave)
|
|
||||||
///
|
|
||||||
/// - Sync is handled in an async task, multiple requests from the same device with the same
|
|
||||||
/// `since` will be cached
|
|
||||||
pub async fn sync_events_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<sync_events::v3::IncomingRequest>,
|
|
||||||
) -> Result<sync_events::v3::Response, RumaResponse<UiaaResponse>> {
|
|
||||||
let sender_user = body.sender_user.expect("user is authenticated");
|
|
||||||
let sender_device = body.sender_device.expect("user is authenticated");
|
|
||||||
let body = body.body;
|
|
||||||
|
|
||||||
let arc_db = Arc::new(db);
|
|
||||||
|
|
||||||
let mut rx = match arc_db
|
|
||||||
.globals
|
|
||||||
.sync_receivers
|
|
||||||
.write()
|
|
||||||
.unwrap()
|
|
||||||
.entry((sender_user.clone(), sender_device.clone()))
|
|
||||||
{
|
|
||||||
Entry::Vacant(v) => {
|
|
||||||
let (tx, rx) = tokio::sync::watch::channel(None);
|
|
||||||
|
|
||||||
v.insert((body.since.to_owned(), rx.clone()));
|
|
||||||
|
|
||||||
tokio::spawn(sync_helper_wrapper(
|
|
||||||
Arc::clone(&arc_db),
|
|
||||||
sender_user.clone(),
|
|
||||||
sender_device.clone(),
|
|
||||||
body,
|
|
||||||
tx,
|
|
||||||
));
|
|
||||||
|
|
||||||
rx
|
|
||||||
}
|
|
||||||
Entry::Occupied(mut o) => {
|
|
||||||
if o.get().0 != body.since {
|
|
||||||
let (tx, rx) = tokio::sync::watch::channel(None);
|
|
||||||
|
|
||||||
o.insert((body.since.clone(), rx.clone()));
|
|
||||||
|
|
||||||
tokio::spawn(sync_helper_wrapper(
|
|
||||||
Arc::clone(&arc_db),
|
|
||||||
sender_user.clone(),
|
|
||||||
sender_device.clone(),
|
|
||||||
body,
|
|
||||||
tx,
|
|
||||||
));
|
|
||||||
|
|
||||||
rx
|
|
||||||
} else {
|
|
||||||
o.get().1.clone()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let we_have_to_wait = rx.borrow().is_none();
|
|
||||||
if we_have_to_wait {
|
|
||||||
if let Err(e) = rx.changed().await {
|
|
||||||
error!("Error waiting for sync: {}", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let result = match rx
|
|
||||||
.borrow()
|
|
||||||
.as_ref()
|
|
||||||
.expect("When sync channel changes it's always set to some")
|
|
||||||
{
|
|
||||||
Ok(response) => Ok(response.clone()),
|
|
||||||
Err(error) => Err(error.to_response()),
|
|
||||||
};
|
|
||||||
|
|
||||||
result
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn sync_helper_wrapper(
|
|
||||||
db: Arc<DatabaseGuard>,
|
|
||||||
sender_user: Box<UserId>,
|
|
||||||
sender_device: Box<DeviceId>,
|
|
||||||
body: sync_events::v3::IncomingRequest,
|
|
||||||
tx: Sender<Option<Result<sync_events::v3::Response>>>,
|
|
||||||
) {
|
|
||||||
let since = body.since.clone();
|
|
||||||
|
|
||||||
let r = sync_helper(
|
|
||||||
Arc::clone(&db),
|
|
||||||
sender_user.clone(),
|
|
||||||
sender_device.clone(),
|
|
||||||
body,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
if let Ok((_, caching_allowed)) = r {
|
|
||||||
if !caching_allowed {
|
|
||||||
match db
|
|
||||||
.globals
|
|
||||||
.sync_receivers
|
|
||||||
.write()
|
|
||||||
.unwrap()
|
|
||||||
.entry((sender_user, sender_device))
|
|
||||||
{
|
|
||||||
Entry::Occupied(o) => {
|
|
||||||
// Only remove if the device didn't start a different /sync already
|
|
||||||
if o.get().0 == since {
|
|
||||||
o.remove();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Entry::Vacant(_) => {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
drop(db);
|
|
||||||
|
|
||||||
let _ = tx.send(Some(r.map(|(r, _)| r)));
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn sync_helper(
|
|
||||||
db: Arc<DatabaseGuard>,
|
|
||||||
sender_user: Box<UserId>,
|
|
||||||
sender_device: Box<DeviceId>,
|
|
||||||
body: sync_events::v3::IncomingRequest,
|
|
||||||
// bool = caching allowed
|
|
||||||
) -> Result<(sync_events::v3::Response, bool), Error> {
|
|
||||||
use sync_events::v3::{
|
|
||||||
DeviceLists, Ephemeral, GlobalAccountData, IncomingFilter, InviteState, InvitedRoom,
|
|
||||||
JoinedRoom, LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, State, Timeline,
|
|
||||||
ToDevice, UnreadNotificationsCount,
|
|
||||||
};
|
|
||||||
|
|
||||||
// TODO: match body.set_presence {
|
|
||||||
db.rooms.edus.ping_presence(&sender_user)?;
|
|
||||||
|
|
||||||
// Setup watchers, so if there's no response, we can wait for them
|
|
||||||
let watcher = db.watch(&sender_user, &sender_device);
|
|
||||||
|
|
||||||
let next_batch = db.globals.current_count()?;
|
|
||||||
let next_batch_string = next_batch.to_string();
|
|
||||||
|
|
||||||
// Load filter
|
|
||||||
let filter = match body.filter {
|
|
||||||
None => IncomingFilterDefinition::default(),
|
|
||||||
Some(IncomingFilter::FilterDefinition(filter)) => filter,
|
|
||||||
Some(IncomingFilter::FilterId(filter_id)) => db
|
|
||||||
.users
|
|
||||||
.get_filter(&sender_user, &filter_id)?
|
|
||||||
.unwrap_or_default(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let (lazy_load_enabled, lazy_load_send_redundant) = match filter.room.state.lazy_load_options {
|
|
||||||
LazyLoadOptions::Enabled {
|
|
||||||
include_redundant_members: redundant,
|
|
||||||
} => (true, redundant),
|
|
||||||
_ => (false, false),
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut joined_rooms = BTreeMap::new();
|
|
||||||
let since = body
|
|
||||||
.since
|
|
||||||
.clone()
|
|
||||||
.and_then(|string| string.parse().ok())
|
|
||||||
.unwrap_or(0);
|
|
||||||
|
|
||||||
let mut presence_updates = HashMap::new();
|
|
||||||
let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in
|
|
||||||
let mut device_list_updates = HashSet::new();
|
|
||||||
let mut device_list_left = HashSet::new();
|
|
||||||
|
|
||||||
// Look for device list updates of this account
|
|
||||||
device_list_updates.extend(
|
|
||||||
db.users
|
|
||||||
.keys_changed(&sender_user.to_string(), since, None)
|
|
||||||
.filter_map(|r| r.ok()),
|
|
||||||
);
|
|
||||||
|
|
||||||
let all_joined_rooms = db.rooms.rooms_joined(&sender_user).collect::<Vec<_>>();
|
|
||||||
for room_id in all_joined_rooms {
|
|
||||||
let room_id = room_id?;
|
|
||||||
|
|
||||||
{
|
|
||||||
// Get and drop the lock to wait for remaining operations to finish
|
|
||||||
// This will make sure the we have all events until next_batch
|
|
||||||
let mutex_insert = Arc::clone(
|
|
||||||
db.globals
|
|
||||||
.roomid_mutex_insert
|
|
||||||
.write()
|
|
||||||
.unwrap()
|
|
||||||
.entry(room_id.clone())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let insert_lock = mutex_insert.lock().unwrap();
|
|
||||||
drop(insert_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
let timeline_pdus;
|
|
||||||
let limited;
|
|
||||||
if db.rooms.last_timeline_count(&sender_user, &room_id)? > since {
|
|
||||||
let mut non_timeline_pdus = db
|
|
||||||
.rooms
|
|
||||||
.pdus_until(&sender_user, &room_id, u64::MAX)?
|
|
||||||
.filter_map(|r| {
|
|
||||||
// Filter out buggy events
|
|
||||||
if r.is_err() {
|
|
||||||
error!("Bad pdu in pdus_since: {:?}", r);
|
|
||||||
}
|
|
||||||
r.ok()
|
|
||||||
})
|
|
||||||
.take_while(|(pduid, _)| {
|
|
||||||
db.rooms
|
|
||||||
.pdu_count(pduid)
|
|
||||||
.map_or(false, |count| count > since)
|
|
||||||
});
|
|
||||||
|
|
||||||
// Take the last 10 events for the timeline
|
|
||||||
timeline_pdus = non_timeline_pdus
|
|
||||||
.by_ref()
|
|
||||||
.take(10)
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
.into_iter()
|
|
||||||
.rev()
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
// They /sync response doesn't always return all messages, so we say the output is
|
|
||||||
// limited unless there are events in non_timeline_pdus
|
|
||||||
limited = non_timeline_pdus.next().is_some();
|
|
||||||
} else {
|
|
||||||
timeline_pdus = Vec::new();
|
|
||||||
limited = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
let send_notification_counts = !timeline_pdus.is_empty()
|
|
||||||
|| db
|
|
||||||
.rooms
|
|
||||||
.edus
|
|
||||||
.last_privateread_update(&sender_user, &room_id)?
|
|
||||||
> since;
|
|
||||||
|
|
||||||
let mut timeline_users = HashSet::new();
|
|
||||||
for (_, event) in &timeline_pdus {
|
|
||||||
timeline_users.insert(event.sender.as_str().to_owned());
|
|
||||||
}
|
|
||||||
|
|
||||||
db.rooms
|
|
||||||
.lazy_load_confirm_delivery(&sender_user, &sender_device, &room_id, since)?;
|
|
||||||
|
|
||||||
// Database queries:
|
|
||||||
|
|
||||||
let current_shortstatehash = if let Some(s) = db.rooms.current_shortstatehash(&room_id)? {
|
|
||||||
s
|
|
||||||
} else {
|
|
||||||
error!("Room {} has no state", room_id);
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
|
|
||||||
let since_shortstatehash = db.rooms.get_token_shortstatehash(&room_id, since)?;
|
|
||||||
|
|
||||||
// Calculates joined_member_count, invited_member_count and heroes
|
|
||||||
let calculate_counts = || {
|
|
||||||
let joined_member_count = db.rooms.room_joined_count(&room_id)?.unwrap_or(0);
|
|
||||||
let invited_member_count = db.rooms.room_invited_count(&room_id)?.unwrap_or(0);
|
|
||||||
|
|
||||||
// Recalculate heroes (first 5 members)
|
|
||||||
let mut heroes = Vec::new();
|
|
||||||
|
|
||||||
if joined_member_count + invited_member_count <= 5 {
|
|
||||||
// Go through all PDUs and for each member event, check if the user is still joined or
|
|
||||||
// invited until we have 5 or we reach the end
|
|
||||||
|
|
||||||
for hero in db
|
|
||||||
.rooms
|
|
||||||
.all_pdus(&sender_user, &room_id)?
|
|
||||||
.filter_map(|pdu| pdu.ok()) // Ignore all broken pdus
|
|
||||||
.filter(|(_, pdu)| pdu.kind == RoomEventType::RoomMember)
|
|
||||||
.map(|(_, pdu)| {
|
|
||||||
let content: RoomMemberEventContent =
|
|
||||||
serde_json::from_str(pdu.content.get()).map_err(|_| {
|
|
||||||
Error::bad_database("Invalid member event in database.")
|
|
||||||
})?;
|
|
||||||
|
|
||||||
if let Some(state_key) = &pdu.state_key {
|
|
||||||
let user_id = UserId::parse(state_key.clone()).map_err(|_| {
|
|
||||||
Error::bad_database("Invalid UserId in member PDU.")
|
|
||||||
})?;
|
|
||||||
|
|
||||||
// The membership was and still is invite or join
|
|
||||||
if matches!(
|
|
||||||
content.membership,
|
|
||||||
MembershipState::Join | MembershipState::Invite
|
|
||||||
) && (db.rooms.is_joined(&user_id, &room_id)?
|
|
||||||
|| db.rooms.is_invited(&user_id, &room_id)?)
|
|
||||||
{
|
|
||||||
Ok::<_, Error>(Some(state_key.clone()))
|
|
||||||
} else {
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
// Filter out buggy users
|
|
||||||
.filter_map(|u| u.ok())
|
|
||||||
// Filter for possible heroes
|
|
||||||
.flatten()
|
|
||||||
{
|
|
||||||
if heroes.contains(&hero) || hero == sender_user.as_str() {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
heroes.push(hero);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok::<_, Error>((
|
|
||||||
Some(joined_member_count),
|
|
||||||
Some(invited_member_count),
|
|
||||||
heroes,
|
|
||||||
))
|
|
||||||
};
|
|
||||||
|
|
||||||
let (
|
|
||||||
heroes,
|
|
||||||
joined_member_count,
|
|
||||||
invited_member_count,
|
|
||||||
joined_since_last_sync,
|
|
||||||
state_events,
|
|
||||||
) = if since_shortstatehash.is_none() {
|
|
||||||
// Probably since = 0, we will do an initial sync
|
|
||||||
|
|
||||||
let (joined_member_count, invited_member_count, heroes) = calculate_counts()?;
|
|
||||||
|
|
||||||
let current_state_ids = db.rooms.state_full_ids(current_shortstatehash).await?;
|
|
||||||
|
|
||||||
let mut state_events = Vec::new();
|
|
||||||
let mut lazy_loaded = HashSet::new();
|
|
||||||
|
|
||||||
let mut i = 0;
|
|
||||||
for (shortstatekey, id) in current_state_ids {
|
|
||||||
let (event_type, state_key) = db.rooms.get_statekey_from_short(shortstatekey)?;
|
|
||||||
|
|
||||||
if event_type != StateEventType::RoomMember {
|
|
||||||
let pdu = match db.rooms.get_pdu(&id)? {
|
|
||||||
Some(pdu) => pdu,
|
|
||||||
None => {
|
|
||||||
error!("Pdu in state not found: {}", id);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
state_events.push(pdu);
|
|
||||||
|
|
||||||
i += 1;
|
|
||||||
if i % 100 == 0 {
|
|
||||||
tokio::task::yield_now().await;
|
|
||||||
}
|
|
||||||
} else if !lazy_load_enabled
|
|
||||||
|| body.full_state
|
|
||||||
|| timeline_users.contains(&state_key)
|
|
||||||
{
|
|
||||||
let pdu = match db.rooms.get_pdu(&id)? {
|
|
||||||
Some(pdu) => pdu,
|
|
||||||
None => {
|
|
||||||
error!("Pdu in state not found: {}", id);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// This check is in case a bad user ID made it into the database
|
|
||||||
if let Ok(uid) = UserId::parse(state_key.as_ref()) {
|
|
||||||
lazy_loaded.insert(uid);
|
|
||||||
}
|
|
||||||
state_events.push(pdu);
|
|
||||||
|
|
||||||
i += 1;
|
|
||||||
if i % 100 == 0 {
|
|
||||||
tokio::task::yield_now().await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset lazy loading because this is an initial sync
|
|
||||||
db.rooms
|
|
||||||
.lazy_load_reset(&sender_user, &sender_device, &room_id)?;
|
|
||||||
|
|
||||||
// The state_events above should contain all timeline_users, let's mark them as lazy
|
|
||||||
// loaded.
|
|
||||||
db.rooms.lazy_load_mark_sent(
|
|
||||||
&sender_user,
|
|
||||||
&sender_device,
|
|
||||||
&room_id,
|
|
||||||
lazy_loaded,
|
|
||||||
next_batch,
|
|
||||||
);
|
|
||||||
|
|
||||||
(
|
|
||||||
heroes,
|
|
||||||
joined_member_count,
|
|
||||||
invited_member_count,
|
|
||||||
true,
|
|
||||||
state_events,
|
|
||||||
)
|
|
||||||
} else if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) {
|
|
||||||
// No state changes
|
|
||||||
(Vec::new(), None, None, false, Vec::new())
|
|
||||||
} else {
|
|
||||||
// Incremental /sync
|
|
||||||
let since_shortstatehash = since_shortstatehash.unwrap();
|
|
||||||
|
|
||||||
let since_sender_member: Option<RoomMemberEventContent> = db
|
|
||||||
.rooms
|
|
||||||
.state_get(
|
|
||||||
since_shortstatehash,
|
|
||||||
&StateEventType::RoomMember,
|
|
||||||
sender_user.as_str(),
|
|
||||||
)?
|
|
||||||
.and_then(|pdu| {
|
|
||||||
serde_json::from_str(pdu.content.get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid PDU in database."))
|
|
||||||
.ok()
|
|
||||||
});
|
|
||||||
|
|
||||||
let joined_since_last_sync = since_sender_member
|
|
||||||
.map_or(true, |member| member.membership != MembershipState::Join);
|
|
||||||
|
|
||||||
let mut state_events = Vec::new();
|
|
||||||
let mut lazy_loaded = HashSet::new();
|
|
||||||
|
|
||||||
if since_shortstatehash != current_shortstatehash {
|
|
||||||
let current_state_ids = db.rooms.state_full_ids(current_shortstatehash).await?;
|
|
||||||
let since_state_ids = db.rooms.state_full_ids(since_shortstatehash).await?;
|
|
||||||
|
|
||||||
for (key, id) in current_state_ids {
|
|
||||||
if body.full_state || since_state_ids.get(&key) != Some(&id) {
|
|
||||||
let pdu = match db.rooms.get_pdu(&id)? {
|
|
||||||
Some(pdu) => pdu,
|
|
||||||
None => {
|
|
||||||
error!("Pdu in state not found: {}", id);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if pdu.kind == RoomEventType::RoomMember {
|
|
||||||
match UserId::parse(
|
|
||||||
pdu.state_key
|
|
||||||
.as_ref()
|
|
||||||
.expect("State event has state key")
|
|
||||||
.clone(),
|
|
||||||
) {
|
|
||||||
Ok(state_key_userid) => {
|
|
||||||
lazy_loaded.insert(state_key_userid);
|
|
||||||
}
|
|
||||||
Err(e) => error!("Invalid state key for member event: {}", e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
state_events.push(pdu);
|
|
||||||
tokio::task::yield_now().await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (_, event) in &timeline_pdus {
|
|
||||||
if lazy_loaded.contains(&event.sender) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if !db.rooms.lazy_load_was_sent_before(
|
|
||||||
&sender_user,
|
|
||||||
&sender_device,
|
|
||||||
&room_id,
|
|
||||||
&event.sender,
|
|
||||||
)? || lazy_load_send_redundant
|
|
||||||
{
|
|
||||||
if let Some(member_event) = db.rooms.room_state_get(
|
|
||||||
&room_id,
|
|
||||||
&StateEventType::RoomMember,
|
|
||||||
event.sender.as_str(),
|
|
||||||
)? {
|
|
||||||
lazy_loaded.insert(event.sender.clone());
|
|
||||||
state_events.push(member_event);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
db.rooms.lazy_load_mark_sent(
|
|
||||||
&sender_user,
|
|
||||||
&sender_device,
|
|
||||||
&room_id,
|
|
||||||
lazy_loaded,
|
|
||||||
next_batch,
|
|
||||||
);
|
|
||||||
|
|
||||||
let encrypted_room = db
|
|
||||||
.rooms
|
|
||||||
.state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")?
|
|
||||||
.is_some();
|
|
||||||
|
|
||||||
let since_encryption =
|
|
||||||
db.rooms
|
|
||||||
.state_get(since_shortstatehash, &StateEventType::RoomEncryption, "")?;
|
|
||||||
|
|
||||||
// Calculations:
|
|
||||||
let new_encrypted_room = encrypted_room && since_encryption.is_none();
|
|
||||||
|
|
||||||
let send_member_count = state_events
|
|
||||||
.iter()
|
|
||||||
.any(|event| event.kind == RoomEventType::RoomMember);
|
|
||||||
|
|
||||||
if encrypted_room {
|
|
||||||
for state_event in &state_events {
|
|
||||||
if state_event.kind != RoomEventType::RoomMember {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(state_key) = &state_event.state_key {
|
|
||||||
let user_id = UserId::parse(state_key.clone())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?;
|
|
||||||
|
|
||||||
if user_id == sender_user {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let new_membership = serde_json::from_str::<RoomMemberEventContent>(
|
|
||||||
state_event.content.get(),
|
|
||||||
)
|
|
||||||
.map_err(|_| Error::bad_database("Invalid PDU in database."))?
|
|
||||||
.membership;
|
|
||||||
|
|
||||||
match new_membership {
|
|
||||||
MembershipState::Join => {
|
|
||||||
// A new user joined an encrypted room
|
|
||||||
if !share_encrypted_room(&db, &sender_user, &user_id, &room_id)? {
|
|
||||||
device_list_updates.insert(user_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
MembershipState::Leave => {
|
|
||||||
// Write down users that have left encrypted rooms we are in
|
|
||||||
left_encrypted_users.insert(user_id);
|
|
||||||
}
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if joined_since_last_sync && encrypted_room || new_encrypted_room {
|
|
||||||
// If the user is in a new encrypted room, give them all joined users
|
|
||||||
device_list_updates.extend(
|
|
||||||
db.rooms
|
|
||||||
.room_members(&room_id)
|
|
||||||
.flatten()
|
|
||||||
.filter(|user_id| {
|
|
||||||
// Don't send key updates from the sender to the sender
|
|
||||||
&sender_user != user_id
|
|
||||||
})
|
|
||||||
.filter(|user_id| {
|
|
||||||
// Only send keys if the sender doesn't share an encrypted room with the target already
|
|
||||||
!share_encrypted_room(&db, &sender_user, user_id, &room_id)
|
|
||||||
.unwrap_or(false)
|
|
||||||
}),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
let (joined_member_count, invited_member_count, heroes) = if send_member_count {
|
|
||||||
calculate_counts()?
|
|
||||||
} else {
|
|
||||||
(None, None, Vec::new())
|
|
||||||
};
|
|
||||||
|
|
||||||
(
|
|
||||||
heroes,
|
|
||||||
joined_member_count,
|
|
||||||
invited_member_count,
|
|
||||||
joined_since_last_sync,
|
|
||||||
state_events,
|
|
||||||
)
|
|
||||||
};
|
|
||||||
|
|
||||||
// Look for device list updates in this room
|
|
||||||
device_list_updates.extend(
|
|
||||||
db.users
|
|
||||||
.keys_changed(&room_id.to_string(), since, None)
|
|
||||||
.filter_map(|r| r.ok()),
|
|
||||||
);
|
|
||||||
|
|
||||||
let notification_count = if send_notification_counts {
|
|
||||||
Some(
|
|
||||||
db.rooms
|
|
||||||
.notification_count(&sender_user, &room_id)?
|
|
||||||
.try_into()
|
|
||||||
.expect("notification count can't go that high"),
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
let highlight_count = if send_notification_counts {
|
|
||||||
Some(
|
|
||||||
db.rooms
|
|
||||||
.highlight_count(&sender_user, &room_id)?
|
|
||||||
.try_into()
|
|
||||||
.expect("highlight count can't go that high"),
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
let prev_batch = timeline_pdus
|
|
||||||
.first()
|
|
||||||
.map_or(Ok::<_, Error>(None), |(pdu_id, _)| {
|
|
||||||
Ok(Some(db.rooms.pdu_count(pdu_id)?.to_string()))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let room_events: Vec<_> = timeline_pdus
|
|
||||||
.iter()
|
|
||||||
.map(|(_, pdu)| pdu.to_sync_room_event())
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let mut edus: Vec<_> = db
|
|
||||||
.rooms
|
|
||||||
.edus
|
|
||||||
.readreceipts_since(&room_id, since)
|
|
||||||
.filter_map(|r| r.ok()) // Filter out buggy events
|
|
||||||
.map(|(_, _, v)| v)
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
if db.rooms.edus.last_typing_update(&room_id, &db.globals)? > since {
|
|
||||||
edus.push(
|
|
||||||
serde_json::from_str(
|
|
||||||
&serde_json::to_string(&db.rooms.edus.typings_all(&room_id)?)
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
)
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save the state after this sync so we can send the correct state diff next sync
|
|
||||||
db.rooms
|
|
||||||
.associate_token_shortstatehash(&room_id, next_batch, current_shortstatehash)?;
|
|
||||||
|
|
||||||
let joined_room = JoinedRoom {
|
|
||||||
account_data: RoomAccountData {
|
|
||||||
events: db
|
|
||||||
.account_data
|
|
||||||
.changes_since(Some(&room_id), &sender_user, since)?
|
|
||||||
.into_iter()
|
|
||||||
.filter_map(|(_, v)| {
|
|
||||||
serde_json::from_str(v.json().get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid account event in database."))
|
|
||||||
.ok()
|
|
||||||
})
|
|
||||||
.collect(),
|
|
||||||
},
|
|
||||||
summary: RoomSummary {
|
|
||||||
heroes,
|
|
||||||
joined_member_count: joined_member_count.map(|n| (n as u32).into()),
|
|
||||||
invited_member_count: invited_member_count.map(|n| (n as u32).into()),
|
|
||||||
},
|
|
||||||
unread_notifications: UnreadNotificationsCount {
|
|
||||||
highlight_count,
|
|
||||||
notification_count,
|
|
||||||
},
|
|
||||||
timeline: Timeline {
|
|
||||||
limited: limited || joined_since_last_sync,
|
|
||||||
prev_batch,
|
|
||||||
events: room_events,
|
|
||||||
},
|
|
||||||
state: State {
|
|
||||||
events: state_events
|
|
||||||
.iter()
|
|
||||||
.map(|pdu| pdu.to_sync_state_event())
|
|
||||||
.collect(),
|
|
||||||
},
|
|
||||||
ephemeral: Ephemeral { events: edus },
|
|
||||||
};
|
|
||||||
|
|
||||||
if !joined_room.is_empty() {
|
|
||||||
joined_rooms.insert(room_id.clone(), joined_room);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Take presence updates from this room
|
|
||||||
for (user_id, presence) in
|
|
||||||
db.rooms
|
|
||||||
.edus
|
|
||||||
.presence_since(&room_id, since, &db.rooms, &db.globals)?
|
|
||||||
{
|
|
||||||
match presence_updates.entry(user_id) {
|
|
||||||
Entry::Vacant(v) => {
|
|
||||||
v.insert(presence);
|
|
||||||
}
|
|
||||||
Entry::Occupied(mut o) => {
|
|
||||||
let p = o.get_mut();
|
|
||||||
|
|
||||||
// Update existing presence event with more info
|
|
||||||
p.content.presence = presence.content.presence;
|
|
||||||
if let Some(status_msg) = presence.content.status_msg {
|
|
||||||
p.content.status_msg = Some(status_msg);
|
|
||||||
}
|
|
||||||
if let Some(last_active_ago) = presence.content.last_active_ago {
|
|
||||||
p.content.last_active_ago = Some(last_active_ago);
|
|
||||||
}
|
|
||||||
if let Some(displayname) = presence.content.displayname {
|
|
||||||
p.content.displayname = Some(displayname);
|
|
||||||
}
|
|
||||||
if let Some(avatar_url) = presence.content.avatar_url {
|
|
||||||
p.content.avatar_url = Some(avatar_url);
|
|
||||||
}
|
|
||||||
if let Some(currently_active) = presence.content.currently_active {
|
|
||||||
p.content.currently_active = Some(currently_active);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut left_rooms = BTreeMap::new();
|
|
||||||
let all_left_rooms: Vec<_> = db.rooms.rooms_left(&sender_user).collect();
|
|
||||||
for result in all_left_rooms {
|
|
||||||
let (room_id, left_state_events) = result?;
|
|
||||||
|
|
||||||
{
|
|
||||||
// Get and drop the lock to wait for remaining operations to finish
|
|
||||||
let mutex_insert = Arc::clone(
|
|
||||||
db.globals
|
|
||||||
.roomid_mutex_insert
|
|
||||||
.write()
|
|
||||||
.unwrap()
|
|
||||||
.entry(room_id.clone())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let insert_lock = mutex_insert.lock().unwrap();
|
|
||||||
drop(insert_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
let left_count = db.rooms.get_left_count(&room_id, &sender_user)?;
|
|
||||||
|
|
||||||
// Left before last sync
|
|
||||||
if Some(since) >= left_count {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
left_rooms.insert(
|
|
||||||
room_id.clone(),
|
|
||||||
LeftRoom {
|
|
||||||
account_data: RoomAccountData { events: Vec::new() },
|
|
||||||
timeline: Timeline {
|
|
||||||
limited: false,
|
|
||||||
prev_batch: Some(next_batch_string.clone()),
|
|
||||||
events: Vec::new(),
|
|
||||||
},
|
|
||||||
state: State {
|
|
||||||
events: left_state_events,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut invited_rooms = BTreeMap::new();
|
|
||||||
let all_invited_rooms: Vec<_> = db.rooms.rooms_invited(&sender_user).collect();
|
|
||||||
for result in all_invited_rooms {
|
|
||||||
let (room_id, invite_state_events) = result?;
|
|
||||||
|
|
||||||
{
|
|
||||||
// Get and drop the lock to wait for remaining operations to finish
|
|
||||||
let mutex_insert = Arc::clone(
|
|
||||||
db.globals
|
|
||||||
.roomid_mutex_insert
|
|
||||||
.write()
|
|
||||||
.unwrap()
|
|
||||||
.entry(room_id.clone())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let insert_lock = mutex_insert.lock().unwrap();
|
|
||||||
drop(insert_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
let invite_count = db.rooms.get_invite_count(&room_id, &sender_user)?;
|
|
||||||
|
|
||||||
// Invited before last sync
|
|
||||||
if Some(since) >= invite_count {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
invited_rooms.insert(
|
|
||||||
room_id.clone(),
|
|
||||||
InvitedRoom {
|
|
||||||
invite_state: InviteState {
|
|
||||||
events: invite_state_events,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
for user_id in left_encrypted_users {
|
|
||||||
let still_share_encrypted_room = db
|
|
||||||
.rooms
|
|
||||||
.get_shared_rooms(vec![sender_user.clone(), user_id.clone()])?
|
|
||||||
.filter_map(|r| r.ok())
|
|
||||||
.filter_map(|other_room_id| {
|
|
||||||
Some(
|
|
||||||
db.rooms
|
|
||||||
.room_state_get(&other_room_id, &StateEventType::RoomEncryption, "")
|
|
||||||
.ok()?
|
|
||||||
.is_some(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.all(|encrypted| !encrypted);
|
|
||||||
// If the user doesn't share an encrypted room with the target anymore, we need to tell
|
|
||||||
// them
|
|
||||||
if still_share_encrypted_room {
|
|
||||||
device_list_left.insert(user_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove all to-device events the device received *last time*
|
|
||||||
db.users
|
|
||||||
.remove_to_device_events(&sender_user, &sender_device, since)?;
|
|
||||||
|
|
||||||
let response = sync_events::v3::Response {
|
|
||||||
next_batch: next_batch_string,
|
|
||||||
rooms: Rooms {
|
|
||||||
leave: left_rooms,
|
|
||||||
join: joined_rooms,
|
|
||||||
invite: invited_rooms,
|
|
||||||
knock: BTreeMap::new(), // TODO
|
|
||||||
},
|
|
||||||
presence: Presence {
|
|
||||||
events: presence_updates
|
|
||||||
.into_iter()
|
|
||||||
.map(|(_, v)| Raw::new(&v).expect("PresenceEvent always serializes successfully"))
|
|
||||||
.collect(),
|
|
||||||
},
|
|
||||||
account_data: GlobalAccountData {
|
|
||||||
events: db
|
|
||||||
.account_data
|
|
||||||
.changes_since(None, &sender_user, since)?
|
|
||||||
.into_iter()
|
|
||||||
.filter_map(|(_, v)| {
|
|
||||||
serde_json::from_str(v.json().get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid account event in database."))
|
|
||||||
.ok()
|
|
||||||
})
|
|
||||||
.collect(),
|
|
||||||
},
|
|
||||||
device_lists: DeviceLists {
|
|
||||||
changed: device_list_updates.into_iter().collect(),
|
|
||||||
left: device_list_left.into_iter().collect(),
|
|
||||||
},
|
|
||||||
device_one_time_keys_count: db.users.count_one_time_keys(&sender_user, &sender_device)?,
|
|
||||||
to_device: ToDevice {
|
|
||||||
events: db
|
|
||||||
.users
|
|
||||||
.get_to_device_events(&sender_user, &sender_device)?,
|
|
||||||
},
|
|
||||||
// Fallback keys are not yet supported
|
|
||||||
device_unused_fallback_key_types: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
// TODO: Retry the endpoint instead of returning (waiting for #118)
|
|
||||||
if !body.full_state
|
|
||||||
&& response.rooms.is_empty()
|
|
||||||
&& response.presence.is_empty()
|
|
||||||
&& response.account_data.is_empty()
|
|
||||||
&& response.device_lists.is_empty()
|
|
||||||
&& response.to_device.is_empty()
|
|
||||||
{
|
|
||||||
// Hang a few seconds so requests are not spammed
|
|
||||||
// Stop hanging if new info arrives
|
|
||||||
let mut duration = body.timeout.unwrap_or_default();
|
|
||||||
if duration.as_secs() > 30 {
|
|
||||||
duration = Duration::from_secs(30);
|
|
||||||
}
|
|
||||||
let _ = tokio::time::timeout(duration, watcher).await;
|
|
||||||
Ok((response, false))
|
|
||||||
} else {
|
|
||||||
Ok((response, since != next_batch)) // Only cache if we made progress
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tracing::instrument(skip(db))]
|
|
||||||
fn share_encrypted_room(
|
|
||||||
db: &Database,
|
|
||||||
sender_user: &UserId,
|
|
||||||
user_id: &UserId,
|
|
||||||
ignore_room: &RoomId,
|
|
||||||
) -> Result<bool> {
|
|
||||||
Ok(db
|
|
||||||
.rooms
|
|
||||||
.get_shared_rooms(vec![sender_user.to_owned(), user_id.to_owned()])?
|
|
||||||
.filter_map(|r| r.ok())
|
|
||||||
.filter(|room_id| room_id != ignore_room)
|
|
||||||
.filter_map(|other_room_id| {
|
|
||||||
Some(
|
|
||||||
db.rooms
|
|
||||||
.room_state_get(&other_room_id, &StateEventType::RoomEncryption, "")
|
|
||||||
.ok()?
|
|
||||||
.is_some(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.any(|encrypted| encrypted))
|
|
||||||
}
|
|
|
@ -1,117 +0,0 @@
|
||||||
use crate::{database::DatabaseGuard, Result, Ruma};
|
|
||||||
use ruma::{
|
|
||||||
api::client::tag::{create_tag, delete_tag, get_tags},
|
|
||||||
events::{
|
|
||||||
tag::{TagEvent, TagEventContent},
|
|
||||||
RoomAccountDataEventType,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}`
|
|
||||||
///
|
|
||||||
/// Adds a tag to the room.
|
|
||||||
///
|
|
||||||
/// - Inserts the tag into the tag event of the room account data.
|
|
||||||
pub async fn update_tag_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<create_tag::v3::IncomingRequest>,
|
|
||||||
) -> Result<create_tag::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let mut tags_event = db
|
|
||||||
.account_data
|
|
||||||
.get(
|
|
||||||
Some(&body.room_id),
|
|
||||||
sender_user,
|
|
||||||
RoomAccountDataEventType::Tag,
|
|
||||||
)?
|
|
||||||
.unwrap_or_else(|| TagEvent {
|
|
||||||
content: TagEventContent {
|
|
||||||
tags: BTreeMap::new(),
|
|
||||||
},
|
|
||||||
});
|
|
||||||
tags_event
|
|
||||||
.content
|
|
||||||
.tags
|
|
||||||
.insert(body.tag.clone().into(), body.tag_info.clone());
|
|
||||||
|
|
||||||
db.account_data.update(
|
|
||||||
Some(&body.room_id),
|
|
||||||
sender_user,
|
|
||||||
RoomAccountDataEventType::Tag,
|
|
||||||
&tags_event,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(create_tag::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `DELETE /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}`
|
|
||||||
///
|
|
||||||
/// Deletes a tag from the room.
|
|
||||||
///
|
|
||||||
/// - Removes the tag from the tag event of the room account data.
|
|
||||||
pub async fn delete_tag_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<delete_tag::v3::IncomingRequest>,
|
|
||||||
) -> Result<delete_tag::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let mut tags_event = db
|
|
||||||
.account_data
|
|
||||||
.get(
|
|
||||||
Some(&body.room_id),
|
|
||||||
sender_user,
|
|
||||||
RoomAccountDataEventType::Tag,
|
|
||||||
)?
|
|
||||||
.unwrap_or_else(|| TagEvent {
|
|
||||||
content: TagEventContent {
|
|
||||||
tags: BTreeMap::new(),
|
|
||||||
},
|
|
||||||
});
|
|
||||||
tags_event.content.tags.remove(&body.tag.clone().into());
|
|
||||||
|
|
||||||
db.account_data.update(
|
|
||||||
Some(&body.room_id),
|
|
||||||
sender_user,
|
|
||||||
RoomAccountDataEventType::Tag,
|
|
||||||
&tags_event,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(delete_tag::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags`
|
|
||||||
///
|
|
||||||
/// Returns tags on the room.
|
|
||||||
///
|
|
||||||
/// - Gets the tag event of the room account data.
|
|
||||||
pub async fn get_tags_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<get_tags::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_tags::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
Ok(get_tags::v3::Response {
|
|
||||||
tags: db
|
|
||||||
.account_data
|
|
||||||
.get(
|
|
||||||
Some(&body.room_id),
|
|
||||||
sender_user,
|
|
||||||
RoomAccountDataEventType::Tag,
|
|
||||||
)?
|
|
||||||
.unwrap_or_else(|| TagEvent {
|
|
||||||
content: TagEventContent {
|
|
||||||
tags: BTreeMap::new(),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
.content
|
|
||||||
.tags,
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -4,7 +4,7 @@ use std::{
|
||||||
net::{IpAddr, Ipv4Addr},
|
net::{IpAddr, Ipv4Addr},
|
||||||
};
|
};
|
||||||
|
|
||||||
use ruma::{RoomVersionId, ServerName};
|
use ruma::{OwnedServerName, RoomVersionId};
|
||||||
use serde::{de::IgnoredAny, Deserialize};
|
use serde::{de::IgnoredAny, Deserialize};
|
||||||
use tracing::warn;
|
use tracing::warn;
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@ pub struct Config {
|
||||||
pub port: u16,
|
pub port: u16,
|
||||||
pub tls: Option<TlsConfig>,
|
pub tls: Option<TlsConfig>,
|
||||||
|
|
||||||
pub server_name: Box<ServerName>,
|
pub server_name: OwnedServerName,
|
||||||
#[serde(default = "default_database_backend")]
|
#[serde(default = "default_database_backend")]
|
||||||
pub database_backend: String,
|
pub database_backend: String,
|
||||||
pub database_path: String,
|
pub database_path: String,
|
||||||
|
@ -28,6 +28,8 @@ pub struct Config {
|
||||||
pub db_cache_capacity_mb: f64,
|
pub db_cache_capacity_mb: f64,
|
||||||
#[serde(default = "true_fn")]
|
#[serde(default = "true_fn")]
|
||||||
pub enable_lightning_bolt: bool,
|
pub enable_lightning_bolt: bool,
|
||||||
|
#[serde(default = "true_fn")]
|
||||||
|
pub allow_check_for_updates: bool,
|
||||||
#[serde(default = "default_conduit_cache_capacity_modifier")]
|
#[serde(default = "default_conduit_cache_capacity_modifier")]
|
||||||
pub conduit_cache_capacity_modifier: f64,
|
pub conduit_cache_capacity_modifier: f64,
|
||||||
#[serde(default = "default_rocksdb_max_open_files")]
|
#[serde(default = "default_rocksdb_max_open_files")]
|
||||||
|
@ -40,8 +42,11 @@ pub struct Config {
|
||||||
pub max_request_size: u32,
|
pub max_request_size: u32,
|
||||||
#[serde(default = "default_max_concurrent_requests")]
|
#[serde(default = "default_max_concurrent_requests")]
|
||||||
pub max_concurrent_requests: u16,
|
pub max_concurrent_requests: u16,
|
||||||
|
#[serde(default = "default_max_fetch_prev_events")]
|
||||||
|
pub max_fetch_prev_events: u16,
|
||||||
#[serde(default = "false_fn")]
|
#[serde(default = "false_fn")]
|
||||||
pub allow_registration: bool,
|
pub allow_registration: bool,
|
||||||
|
pub registration_token: Option<String>,
|
||||||
#[serde(default = "true_fn")]
|
#[serde(default = "true_fn")]
|
||||||
pub allow_encryption: bool,
|
pub allow_encryption: bool,
|
||||||
#[serde(default = "false_fn")]
|
#[serde(default = "false_fn")]
|
||||||
|
@ -52,6 +57,7 @@ pub struct Config {
|
||||||
pub allow_unstable_room_versions: bool,
|
pub allow_unstable_room_versions: bool,
|
||||||
#[serde(default = "default_default_room_version")]
|
#[serde(default = "default_default_room_version")]
|
||||||
pub default_room_version: RoomVersionId,
|
pub default_room_version: RoomVersionId,
|
||||||
|
pub well_known_client: Option<String>,
|
||||||
#[serde(default = "false_fn")]
|
#[serde(default = "false_fn")]
|
||||||
pub allow_jaeger: bool,
|
pub allow_jaeger: bool,
|
||||||
#[serde(default = "false_fn")]
|
#[serde(default = "false_fn")]
|
||||||
|
@ -59,8 +65,8 @@ pub struct Config {
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub proxy: ProxyConfig,
|
pub proxy: ProxyConfig,
|
||||||
pub jwt_secret: Option<String>,
|
pub jwt_secret: Option<String>,
|
||||||
#[serde(default = "Vec::new")]
|
#[serde(default = "default_trusted_servers")]
|
||||||
pub trusted_servers: Vec<Box<ServerName>>,
|
pub trusted_servers: Vec<OwnedServerName>,
|
||||||
#[serde(default = "default_log")]
|
#[serde(default = "default_log")]
|
||||||
pub log: String,
|
pub log: String,
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
|
@ -183,7 +189,7 @@ impl fmt::Display for Config {
|
||||||
("Turn TTL", &self.turn_ttl.to_string()),
|
("Turn TTL", &self.turn_ttl.to_string()),
|
||||||
("Turn URIs", {
|
("Turn URIs", {
|
||||||
let mut lst = vec![];
|
let mut lst = vec![];
|
||||||
for item in self.turn_uris.to_vec().into_iter().enumerate() {
|
for item in self.turn_uris.iter().cloned().enumerate() {
|
||||||
let (_, uri): (usize, String) = item;
|
let (_, uri): (usize, String) = item;
|
||||||
lst.push(uri);
|
lst.push(uri);
|
||||||
}
|
}
|
||||||
|
@ -191,13 +197,13 @@ impl fmt::Display for Config {
|
||||||
}),
|
}),
|
||||||
];
|
];
|
||||||
|
|
||||||
let mut msg: String = "Active config values:\n\n".to_string();
|
let mut msg: String = "Active config values:\n\n".to_owned();
|
||||||
|
|
||||||
for line in lines.into_iter().enumerate() {
|
for line in lines.into_iter().enumerate() {
|
||||||
msg += &format!("{}: {}\n", line.1 .0, line.1 .1);
|
msg += &format!("{}: {}\n", line.1 .0, line.1 .1);
|
||||||
}
|
}
|
||||||
|
|
||||||
write!(f, "{}", msg)
|
write!(f, "{msg}")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -222,7 +228,7 @@ fn default_database_backend() -> String {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_db_cache_capacity_mb() -> f64 {
|
fn default_db_cache_capacity_mb() -> f64 {
|
||||||
10.0
|
300.0
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_conduit_cache_capacity_modifier() -> f64 {
|
fn default_conduit_cache_capacity_modifier() -> f64 {
|
||||||
|
@ -230,7 +236,7 @@ fn default_conduit_cache_capacity_modifier() -> f64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_rocksdb_max_open_files() -> i32 {
|
fn default_rocksdb_max_open_files() -> i32 {
|
||||||
20
|
1000
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_pdu_cache_capacity() -> u32 {
|
fn default_pdu_cache_capacity() -> u32 {
|
||||||
|
@ -238,7 +244,7 @@ fn default_pdu_cache_capacity() -> u32 {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_cleanup_second_interval() -> u32 {
|
fn default_cleanup_second_interval() -> u32 {
|
||||||
1 * 60 // every minute
|
60 // every minute
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_max_request_size() -> u32 {
|
fn default_max_request_size() -> u32 {
|
||||||
|
@ -249,8 +255,16 @@ fn default_max_concurrent_requests() -> u16 {
|
||||||
100
|
100
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn default_max_fetch_prev_events() -> u16 {
|
||||||
|
100_u16
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_trusted_servers() -> Vec<OwnedServerName> {
|
||||||
|
vec![OwnedServerName::try_from("matrix.org").unwrap()]
|
||||||
|
}
|
||||||
|
|
||||||
fn default_log() -> String {
|
fn default_log() -> String {
|
||||||
"info,state_res=warn,_=off,sled=off".to_owned()
|
"warn,state_res=warn,_=off,sled=off".to_owned()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_turn_ttl() -> u64 {
|
fn default_turn_ttl() -> u64 {
|
||||||
|
@ -258,6 +272,6 @@ fn default_turn_ttl() -> u64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
// I know, it's a great name
|
// I know, it's a great name
|
||||||
fn default_default_room_version() -> RoomVersionId {
|
pub fn default_default_room_version() -> RoomVersionId {
|
||||||
RoomVersionId::V6
|
RoomVersionId::V9
|
||||||
}
|
}
|
|
@ -29,7 +29,9 @@ use crate::Result;
|
||||||
/// would be used for `ordinary.onion`, `matrix.myspecial.onion`, but not `hello.myspecial.onion`.
|
/// would be used for `ordinary.onion`, `matrix.myspecial.onion`, but not `hello.myspecial.onion`.
|
||||||
#[derive(Clone, Debug, Deserialize)]
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
#[serde(rename_all = "snake_case")]
|
#[serde(rename_all = "snake_case")]
|
||||||
|
#[derive(Default)]
|
||||||
pub enum ProxyConfig {
|
pub enum ProxyConfig {
|
||||||
|
#[default]
|
||||||
None,
|
None,
|
||||||
Global {
|
Global {
|
||||||
#[serde(deserialize_with = "crate::utils::deserialize_from_str")]
|
#[serde(deserialize_with = "crate::utils::deserialize_from_str")]
|
||||||
|
@ -48,11 +50,6 @@ impl ProxyConfig {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl Default for ProxyConfig {
|
|
||||||
fn default() -> Self {
|
|
||||||
ProxyConfig::None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize)]
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
pub struct PartialProxyConfig {
|
pub struct PartialProxyConfig {
|
||||||
|
|
1017
src/database.rs
1017
src/database.rs
File diff suppressed because it is too large
Load diff
|
@ -26,11 +26,11 @@ pub mod persy;
|
||||||
))]
|
))]
|
||||||
pub mod watchers;
|
pub mod watchers;
|
||||||
|
|
||||||
pub trait DatabaseEngine: Send + Sync {
|
pub trait KeyValueDatabaseEngine: Send + Sync {
|
||||||
fn open(config: &Config) -> Result<Self>
|
fn open(config: &Config) -> Result<Self>
|
||||||
where
|
where
|
||||||
Self: Sized;
|
Self: Sized;
|
||||||
fn open_tree(&self, name: &'static str) -> Result<Arc<dyn Tree>>;
|
fn open_tree(&self, name: &'static str) -> Result<Arc<dyn KvTree>>;
|
||||||
fn flush(&self) -> Result<()>;
|
fn flush(&self) -> Result<()>;
|
||||||
fn cleanup(&self) -> Result<()> {
|
fn cleanup(&self) -> Result<()> {
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -38,9 +38,10 @@ pub trait DatabaseEngine: Send + Sync {
|
||||||
fn memory_usage(&self) -> Result<String> {
|
fn memory_usage(&self) -> Result<String> {
|
||||||
Ok("Current database engine does not support memory usage reporting.".to_owned())
|
Ok("Current database engine does not support memory usage reporting.".to_owned())
|
||||||
}
|
}
|
||||||
|
fn clear_caches(&self) {}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait Tree: Send + Sync {
|
pub trait KvTree: Send + Sync {
|
||||||
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>>;
|
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>>;
|
||||||
|
|
||||||
fn insert(&self, key: &[u8], value: &[u8]) -> Result<()>;
|
fn insert(&self, key: &[u8], value: &[u8]) -> Result<()>;
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
use crate::{
|
use crate::{
|
||||||
database::{
|
database::{
|
||||||
abstraction::{watchers::Watchers, DatabaseEngine, Tree},
|
abstraction::{watchers::Watchers, KeyValueDatabaseEngine, KvTree},
|
||||||
Config,
|
Config,
|
||||||
},
|
},
|
||||||
Result,
|
Result,
|
||||||
|
@ -15,7 +15,7 @@ pub struct Engine {
|
||||||
persy: Persy,
|
persy: Persy,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DatabaseEngine for Arc<Engine> {
|
impl KeyValueDatabaseEngine for Arc<Engine> {
|
||||||
fn open(config: &Config) -> Result<Self> {
|
fn open(config: &Config) -> Result<Self> {
|
||||||
let mut cfg = persy::Config::new();
|
let mut cfg = persy::Config::new();
|
||||||
cfg.change_cache_size((config.db_cache_capacity_mb * 1024.0 * 1024.0) as u64);
|
cfg.change_cache_size((config.db_cache_capacity_mb * 1024.0 * 1024.0) as u64);
|
||||||
|
@ -27,7 +27,7 @@ impl DatabaseEngine for Arc<Engine> {
|
||||||
Ok(Arc::new(Engine { persy }))
|
Ok(Arc::new(Engine { persy }))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn open_tree(&self, name: &'static str) -> Result<Arc<dyn Tree>> {
|
fn open_tree(&self, name: &'static str) -> Result<Arc<dyn KvTree>> {
|
||||||
// Create if it doesn't exist
|
// Create if it doesn't exist
|
||||||
if !self.persy.exists_index(name)? {
|
if !self.persy.exists_index(name)? {
|
||||||
let mut tx = self.persy.begin()?;
|
let mut tx = self.persy.begin()?;
|
||||||
|
@ -61,7 +61,7 @@ impl PersyTree {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Tree for PersyTree {
|
impl KvTree for PersyTree {
|
||||||
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
||||||
let result = self
|
let result = self
|
||||||
.persy
|
.persy
|
||||||
|
@ -116,7 +116,7 @@ impl Tree for PersyTree {
|
||||||
match iter {
|
match iter {
|
||||||
Ok(iter) => Box::new(iter.filter_map(|(k, v)| {
|
Ok(iter) => Box::new(iter.filter_map(|(k, v)| {
|
||||||
v.into_iter()
|
v.into_iter()
|
||||||
.map(|val| ((*k).to_owned().into(), (*val).to_owned().into()))
|
.map(|val| ((*k).to_owned(), (*val).to_owned()))
|
||||||
.next()
|
.next()
|
||||||
})),
|
})),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
|
@ -142,7 +142,7 @@ impl Tree for PersyTree {
|
||||||
Ok(iter) => {
|
Ok(iter) => {
|
||||||
let map = iter.filter_map(|(k, v)| {
|
let map = iter.filter_map(|(k, v)| {
|
||||||
v.into_iter()
|
v.into_iter()
|
||||||
.map(|val| ((*k).to_owned().into(), (*val).to_owned().into()))
|
.map(|val| ((*k).to_owned(), (*val).to_owned()))
|
||||||
.next()
|
.next()
|
||||||
});
|
});
|
||||||
if backwards {
|
if backwards {
|
||||||
|
@ -179,7 +179,7 @@ impl Tree for PersyTree {
|
||||||
iter.take_while(move |(k, _)| (*k).starts_with(&owned_prefix))
|
iter.take_while(move |(k, _)| (*k).starts_with(&owned_prefix))
|
||||||
.filter_map(|(k, v)| {
|
.filter_map(|(k, v)| {
|
||||||
v.into_iter()
|
v.into_iter()
|
||||||
.map(|val| ((*k).to_owned().into(), (*val).to_owned().into()))
|
.map(|val| ((*k).to_owned(), (*val).to_owned()))
|
||||||
.next()
|
.next()
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use super::{super::Config, watchers::Watchers, DatabaseEngine, Tree};
|
use super::{super::Config, watchers::Watchers, KeyValueDatabaseEngine, KvTree};
|
||||||
use crate::{utils, Result};
|
use crate::{utils, Result};
|
||||||
use std::{
|
use std::{
|
||||||
future::Future,
|
future::Future,
|
||||||
|
@ -45,16 +45,27 @@ fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::O
|
||||||
db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level);
|
db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level);
|
||||||
db_opts.optimize_level_style_compaction(10 * 1024 * 1024);
|
db_opts.optimize_level_style_compaction(10 * 1024 * 1024);
|
||||||
|
|
||||||
|
// https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning
|
||||||
|
db_opts.set_max_background_jobs(6);
|
||||||
|
db_opts.set_bytes_per_sync(1048576);
|
||||||
|
|
||||||
|
// https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes#ktoleratecorruptedtailrecords
|
||||||
|
//
|
||||||
|
// Unclean shutdowns of a Matrix homeserver are likely to be fine when
|
||||||
|
// recovered in this manner as it's likely any lost information will be
|
||||||
|
// restored via federation.
|
||||||
|
db_opts.set_wal_recovery_mode(rocksdb::DBRecoveryMode::TolerateCorruptedTailRecords);
|
||||||
|
|
||||||
let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1);
|
let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1);
|
||||||
db_opts.set_prefix_extractor(prefix_extractor);
|
db_opts.set_prefix_extractor(prefix_extractor);
|
||||||
|
|
||||||
db_opts
|
db_opts
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DatabaseEngine for Arc<Engine> {
|
impl KeyValueDatabaseEngine for Arc<Engine> {
|
||||||
fn open(config: &Config) -> Result<Self> {
|
fn open(config: &Config) -> Result<Self> {
|
||||||
let cache_capacity_bytes = (config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize;
|
let cache_capacity_bytes = (config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize;
|
||||||
let rocksdb_cache = rocksdb::Cache::new_lru_cache(cache_capacity_bytes).unwrap();
|
let rocksdb_cache = rocksdb::Cache::new_lru_cache(cache_capacity_bytes);
|
||||||
|
|
||||||
let db_opts = db_options(config.rocksdb_max_open_files, &rocksdb_cache);
|
let db_opts = db_options(config.rocksdb_max_open_files, &rocksdb_cache);
|
||||||
|
|
||||||
|
@ -83,7 +94,7 @@ impl DatabaseEngine for Arc<Engine> {
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn open_tree(&self, name: &'static str) -> Result<Arc<dyn Tree>> {
|
fn open_tree(&self, name: &'static str) -> Result<Arc<dyn KvTree>> {
|
||||||
if !self.old_cfs.contains(&name.to_owned()) {
|
if !self.old_cfs.contains(&name.to_owned()) {
|
||||||
// Create if it didn't exist
|
// Create if it didn't exist
|
||||||
let _ = self
|
let _ = self
|
||||||
|
@ -121,6 +132,8 @@ impl DatabaseEngine for Arc<Engine> {
|
||||||
self.cache.get_pinned_usage() as f64 / 1024.0 / 1024.0,
|
self.cache.get_pinned_usage() as f64 / 1024.0 / 1024.0,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn clear_caches(&self) {}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RocksDbEngineTree<'_> {
|
impl RocksDbEngineTree<'_> {
|
||||||
|
@ -129,7 +142,7 @@ impl RocksDbEngineTree<'_> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Tree for RocksDbEngineTree<'_> {
|
impl KvTree for RocksDbEngineTree<'_> {
|
||||||
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
||||||
Ok(self.db.rocks.get_cf(&self.cf(), key)?)
|
Ok(self.db.rocks.get_cf(&self.cf(), key)?)
|
||||||
}
|
}
|
||||||
|
@ -161,6 +174,7 @@ impl Tree for RocksDbEngineTree<'_> {
|
||||||
self.db
|
self.db
|
||||||
.rocks
|
.rocks
|
||||||
.iterator_cf(&self.cf(), rocksdb::IteratorMode::Start)
|
.iterator_cf(&self.cf(), rocksdb::IteratorMode::Start)
|
||||||
|
.map(|r| r.unwrap())
|
||||||
.map(|(k, v)| (Vec::from(k), Vec::from(v))),
|
.map(|(k, v)| (Vec::from(k), Vec::from(v))),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -184,6 +198,7 @@ impl Tree for RocksDbEngineTree<'_> {
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
.map(|r| r.unwrap())
|
||||||
.map(|(k, v)| (Vec::from(k), Vec::from(v))),
|
.map(|(k, v)| (Vec::from(k), Vec::from(v))),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -191,7 +206,7 @@ impl Tree for RocksDbEngineTree<'_> {
|
||||||
fn increment(&self, key: &[u8]) -> Result<Vec<u8>> {
|
fn increment(&self, key: &[u8]) -> Result<Vec<u8>> {
|
||||||
let lock = self.write_lock.write().unwrap();
|
let lock = self.write_lock.write().unwrap();
|
||||||
|
|
||||||
let old = self.db.rocks.get_cf(&self.cf(), &key)?;
|
let old = self.db.rocks.get_cf(&self.cf(), key)?;
|
||||||
let new = utils::increment(old.as_deref()).unwrap();
|
let new = utils::increment(old.as_deref()).unwrap();
|
||||||
self.db.rocks.put_cf(&self.cf(), key, &new)?;
|
self.db.rocks.put_cf(&self.cf(), key, &new)?;
|
||||||
|
|
||||||
|
@ -224,6 +239,7 @@ impl Tree for RocksDbEngineTree<'_> {
|
||||||
&self.cf(),
|
&self.cf(),
|
||||||
rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward),
|
rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward),
|
||||||
)
|
)
|
||||||
|
.map(|r| r.unwrap())
|
||||||
.map(|(k, v)| (Vec::from(k), Vec::from(v)))
|
.map(|(k, v)| (Vec::from(k), Vec::from(v)))
|
||||||
.take_while(move |(k, _)| k.starts_with(&prefix)),
|
.take_while(move |(k, _)| k.starts_with(&prefix)),
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use super::{watchers::Watchers, DatabaseEngine, Tree};
|
use super::{watchers::Watchers, KeyValueDatabaseEngine, KvTree};
|
||||||
use crate::{database::Config, Result};
|
use crate::{database::Config, Result};
|
||||||
use parking_lot::{Mutex, MutexGuard};
|
use parking_lot::{Mutex, MutexGuard};
|
||||||
use rusqlite::{Connection, DatabaseName::Main, OptionalExtension};
|
use rusqlite::{Connection, DatabaseName::Main, OptionalExtension};
|
||||||
|
@ -33,7 +33,7 @@ impl Iterator for PreparedStatementIterator<'_> {
|
||||||
struct NonAliasingBox<T>(*mut T);
|
struct NonAliasingBox<T>(*mut T);
|
||||||
impl<T> Drop for NonAliasingBox<T> {
|
impl<T> Drop for NonAliasingBox<T> {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
unsafe { Box::from_raw(self.0) };
|
drop(unsafe { Box::from_raw(self.0) });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -48,13 +48,13 @@ pub struct Engine {
|
||||||
|
|
||||||
impl Engine {
|
impl Engine {
|
||||||
fn prepare_conn(path: &Path, cache_size_kb: u32) -> Result<Connection> {
|
fn prepare_conn(path: &Path, cache_size_kb: u32) -> Result<Connection> {
|
||||||
let conn = Connection::open(&path)?;
|
let conn = Connection::open(path)?;
|
||||||
|
|
||||||
conn.pragma_update(Some(Main), "page_size", &2048)?;
|
conn.pragma_update(Some(Main), "page_size", 2048)?;
|
||||||
conn.pragma_update(Some(Main), "journal_mode", &"WAL")?;
|
conn.pragma_update(Some(Main), "journal_mode", "WAL")?;
|
||||||
conn.pragma_update(Some(Main), "synchronous", &"NORMAL")?;
|
conn.pragma_update(Some(Main), "synchronous", "NORMAL")?;
|
||||||
conn.pragma_update(Some(Main), "cache_size", &(-i64::from(cache_size_kb)))?;
|
conn.pragma_update(Some(Main), "cache_size", -i64::from(cache_size_kb))?;
|
||||||
conn.pragma_update(Some(Main), "wal_autocheckpoint", &0)?;
|
conn.pragma_update(Some(Main), "wal_autocheckpoint", 0)?;
|
||||||
|
|
||||||
Ok(conn)
|
Ok(conn)
|
||||||
}
|
}
|
||||||
|
@ -75,12 +75,12 @@ impl Engine {
|
||||||
|
|
||||||
pub fn flush_wal(self: &Arc<Self>) -> Result<()> {
|
pub fn flush_wal(self: &Arc<Self>) -> Result<()> {
|
||||||
self.write_lock()
|
self.write_lock()
|
||||||
.pragma_update(Some(Main), "wal_checkpoint", &"RESTART")?;
|
.pragma_update(Some(Main), "wal_checkpoint", "RESTART")?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DatabaseEngine for Arc<Engine> {
|
impl KeyValueDatabaseEngine for Arc<Engine> {
|
||||||
fn open(config: &Config) -> Result<Self> {
|
fn open(config: &Config) -> Result<Self> {
|
||||||
let path = Path::new(&config.database_path).join("conduit.db");
|
let path = Path::new(&config.database_path).join("conduit.db");
|
||||||
|
|
||||||
|
@ -105,8 +105,8 @@ impl DatabaseEngine for Arc<Engine> {
|
||||||
Ok(arc)
|
Ok(arc)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn open_tree(&self, name: &str) -> Result<Arc<dyn Tree>> {
|
fn open_tree(&self, name: &str) -> Result<Arc<dyn KvTree>> {
|
||||||
self.write_lock().execute(&format!("CREATE TABLE IF NOT EXISTS {} ( \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL )", name), [])?;
|
self.write_lock().execute(&format!("CREATE TABLE IF NOT EXISTS {name} ( \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL )"), [])?;
|
||||||
|
|
||||||
Ok(Arc::new(SqliteTable {
|
Ok(Arc::new(SqliteTable {
|
||||||
engine: Arc::clone(self),
|
engine: Arc::clone(self),
|
||||||
|
@ -135,7 +135,6 @@ type TupleOfBytes = (Vec<u8>, Vec<u8>);
|
||||||
|
|
||||||
impl SqliteTable {
|
impl SqliteTable {
|
||||||
fn get_with_guard(&self, guard: &Connection, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
fn get_with_guard(&self, guard: &Connection, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
||||||
//dbg!(&self.name);
|
|
||||||
Ok(guard
|
Ok(guard
|
||||||
.prepare(format!("SELECT value FROM {} WHERE key = ?", self.name).as_str())?
|
.prepare(format!("SELECT value FROM {} WHERE key = ?", self.name).as_str())?
|
||||||
.query_row([key], |row| row.get(0))
|
.query_row([key], |row| row.get(0))
|
||||||
|
@ -143,7 +142,6 @@ impl SqliteTable {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn insert_with_guard(&self, guard: &Connection, key: &[u8], value: &[u8]) -> Result<()> {
|
fn insert_with_guard(&self, guard: &Connection, key: &[u8], value: &[u8]) -> Result<()> {
|
||||||
//dbg!(&self.name);
|
|
||||||
guard.execute(
|
guard.execute(
|
||||||
format!(
|
format!(
|
||||||
"INSERT OR REPLACE INTO {} (key, value) VALUES (?, ?)",
|
"INSERT OR REPLACE INTO {} (key, value) VALUES (?, ?)",
|
||||||
|
@ -176,10 +174,7 @@ impl SqliteTable {
|
||||||
statement
|
statement
|
||||||
.query_map([], |row| Ok((row.get_unwrap(0), row.get_unwrap(1))))
|
.query_map([], |row| Ok((row.get_unwrap(0), row.get_unwrap(1))))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.map(move |r| {
|
.map(move |r| r.unwrap()),
|
||||||
//dbg!(&name);
|
|
||||||
r.unwrap()
|
|
||||||
}),
|
|
||||||
);
|
);
|
||||||
|
|
||||||
Box::new(PreparedStatementIterator {
|
Box::new(PreparedStatementIterator {
|
||||||
|
@ -189,7 +184,7 @@ impl SqliteTable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Tree for SqliteTable {
|
impl KvTree for SqliteTable {
|
||||||
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
||||||
self.get_with_guard(self.engine.read_lock(), key)
|
self.get_with_guard(self.engine.read_lock(), key)
|
||||||
}
|
}
|
||||||
|
@ -276,10 +271,7 @@ impl Tree for SqliteTable {
|
||||||
statement
|
statement
|
||||||
.query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1))))
|
.query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1))))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.map(move |r| {
|
.map(move |r| r.unwrap()),
|
||||||
//dbg!(&name);
|
|
||||||
r.unwrap()
|
|
||||||
}),
|
|
||||||
);
|
);
|
||||||
Box::new(PreparedStatementIterator {
|
Box::new(PreparedStatementIterator {
|
||||||
iterator,
|
iterator,
|
||||||
|
@ -301,10 +293,7 @@ impl Tree for SqliteTable {
|
||||||
statement
|
statement
|
||||||
.query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1))))
|
.query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1))))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.map(move |r| {
|
.map(move |r| r.unwrap()),
|
||||||
//dbg!(&name);
|
|
||||||
r.unwrap()
|
|
||||||
}),
|
|
||||||
);
|
);
|
||||||
|
|
||||||
Box::new(PreparedStatementIterator {
|
Box::new(PreparedStatementIterator {
|
||||||
|
|
|
@ -8,6 +8,7 @@ use tokio::sync::watch;
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub(super) struct Watchers {
|
pub(super) struct Watchers {
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
watchers: RwLock<HashMap<Vec<u8>, (watch::Sender<()>, watch::Receiver<()>)>>,
|
watchers: RwLock<HashMap<Vec<u8>, (watch::Sender<()>, watch::Receiver<()>)>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,30 +1,23 @@
|
||||||
use crate::{utils, Error, Result};
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::error::ErrorKind,
|
api::client::error::ErrorKind,
|
||||||
events::{AnyEphemeralRoomEvent, RoomAccountDataEventType},
|
events::{AnyEphemeralRoomEvent, RoomAccountDataEventType},
|
||||||
serde::Raw,
|
serde::Raw,
|
||||||
RoomId, UserId,
|
RoomId, UserId,
|
||||||
};
|
};
|
||||||
use serde::{de::DeserializeOwned, Serialize};
|
|
||||||
use std::{collections::HashMap, sync::Arc};
|
|
||||||
|
|
||||||
use super::abstraction::Tree;
|
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
||||||
|
|
||||||
pub struct AccountData {
|
impl service::account_data::Data for KeyValueDatabase {
|
||||||
pub(super) roomuserdataid_accountdata: Arc<dyn Tree>, // RoomUserDataId = Room + User + Count + Type
|
|
||||||
pub(super) roomusertype_roomuserdataid: Arc<dyn Tree>, // RoomUserType = Room + User + Type
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AccountData {
|
|
||||||
/// Places one event in the account data of the user and removes the previous entry.
|
/// Places one event in the account data of the user and removes the previous entry.
|
||||||
#[tracing::instrument(skip(self, room_id, user_id, event_type, data, globals))]
|
#[tracing::instrument(skip(self, room_id, user_id, event_type, data))]
|
||||||
pub fn update<T: Serialize>(
|
fn update(
|
||||||
&self,
|
&self,
|
||||||
room_id: Option<&RoomId>,
|
room_id: Option<&RoomId>,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
event_type: RoomAccountDataEventType,
|
event_type: RoomAccountDataEventType,
|
||||||
data: &T,
|
data: &serde_json::Value,
|
||||||
globals: &super::globals::Globals,
|
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let mut prefix = room_id
|
let mut prefix = room_id
|
||||||
.map(|r| r.to_string())
|
.map(|r| r.to_string())
|
||||||
|
@ -36,15 +29,14 @@ impl AccountData {
|
||||||
prefix.push(0xff);
|
prefix.push(0xff);
|
||||||
|
|
||||||
let mut roomuserdataid = prefix.clone();
|
let mut roomuserdataid = prefix.clone();
|
||||||
roomuserdataid.extend_from_slice(&globals.next_count()?.to_be_bytes());
|
roomuserdataid.extend_from_slice(&services().globals.next_count()?.to_be_bytes());
|
||||||
roomuserdataid.push(0xff);
|
roomuserdataid.push(0xff);
|
||||||
roomuserdataid.extend_from_slice(event_type.to_string().as_bytes());
|
roomuserdataid.extend_from_slice(event_type.to_string().as_bytes());
|
||||||
|
|
||||||
let mut key = prefix;
|
let mut key = prefix;
|
||||||
key.extend_from_slice(event_type.to_string().as_bytes());
|
key.extend_from_slice(event_type.to_string().as_bytes());
|
||||||
|
|
||||||
let json = serde_json::to_value(data).expect("all types here can be serialized"); // TODO: maybe add error handling
|
if data.get("type").is_none() || data.get("content").is_none() {
|
||||||
if json.get("type").is_none() || json.get("content").is_none() {
|
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
"Account data doesn't have all required fields.",
|
"Account data doesn't have all required fields.",
|
||||||
|
@ -53,7 +45,7 @@ impl AccountData {
|
||||||
|
|
||||||
self.roomuserdataid_accountdata.insert(
|
self.roomuserdataid_accountdata.insert(
|
||||||
&roomuserdataid,
|
&roomuserdataid,
|
||||||
&serde_json::to_vec(&json).expect("to_vec always works on json values"),
|
&serde_json::to_vec(&data).expect("to_vec always works on json values"),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let prev = self.roomusertype_roomuserdataid.get(&key)?;
|
let prev = self.roomusertype_roomuserdataid.get(&key)?;
|
||||||
|
@ -71,12 +63,12 @@ impl AccountData {
|
||||||
|
|
||||||
/// Searches the account data for a specific kind.
|
/// Searches the account data for a specific kind.
|
||||||
#[tracing::instrument(skip(self, room_id, user_id, kind))]
|
#[tracing::instrument(skip(self, room_id, user_id, kind))]
|
||||||
pub fn get<T: DeserializeOwned>(
|
fn get(
|
||||||
&self,
|
&self,
|
||||||
room_id: Option<&RoomId>,
|
room_id: Option<&RoomId>,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
kind: RoomAccountDataEventType,
|
kind: RoomAccountDataEventType,
|
||||||
) -> Result<Option<T>> {
|
) -> Result<Option<Box<serde_json::value::RawValue>>> {
|
||||||
let mut key = room_id
|
let mut key = room_id
|
||||||
.map(|r| r.to_string())
|
.map(|r| r.to_string())
|
||||||
.unwrap_or_default()
|
.unwrap_or_default()
|
||||||
|
@ -104,7 +96,7 @@ impl AccountData {
|
||||||
|
|
||||||
/// Returns all changes to the account data that happened after `since`.
|
/// Returns all changes to the account data that happened after `since`.
|
||||||
#[tracing::instrument(skip(self, room_id, user_id, since))]
|
#[tracing::instrument(skip(self, room_id, user_id, since))]
|
||||||
pub fn changes_since(
|
fn changes_since(
|
||||||
&self,
|
&self,
|
||||||
room_id: Option<&RoomId>,
|
room_id: Option<&RoomId>,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
|
@ -131,13 +123,12 @@ impl AccountData {
|
||||||
.take_while(move |(k, _)| k.starts_with(&prefix))
|
.take_while(move |(k, _)| k.starts_with(&prefix))
|
||||||
.map(|(k, v)| {
|
.map(|(k, v)| {
|
||||||
Ok::<_, Error>((
|
Ok::<_, Error>((
|
||||||
RoomAccountDataEventType::try_from(
|
RoomAccountDataEventType::from(
|
||||||
utils::string_from_bytes(k.rsplit(|&b| b == 0xff).next().ok_or_else(
|
utils::string_from_bytes(k.rsplit(|&b| b == 0xff).next().ok_or_else(
|
||||||
|| Error::bad_database("RoomUserData ID in db is invalid."),
|
|| Error::bad_database("RoomUserData ID in db is invalid."),
|
||||||
)?)
|
)?)
|
||||||
.map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?,
|
.map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?,
|
||||||
)
|
),
|
||||||
.map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?,
|
|
||||||
serde_json::from_slice::<Raw<AnyEphemeralRoomEvent>>(&v).map_err(|_| {
|
serde_json::from_slice::<Raw<AnyEphemeralRoomEvent>>(&v).map_err(|_| {
|
||||||
Error::bad_database("Database contains invalid account data.")
|
Error::bad_database("Database contains invalid account data.")
|
||||||
})?,
|
})?,
|
|
@ -1,20 +1,8 @@
|
||||||
use crate::{utils, Error, Result};
|
use crate::{database::KeyValueDatabase, service, utils, Error, Result};
|
||||||
use std::{
|
|
||||||
collections::HashMap,
|
|
||||||
sync::{Arc, RwLock},
|
|
||||||
};
|
|
||||||
|
|
||||||
use super::abstraction::Tree;
|
impl service::appservice::Data for KeyValueDatabase {
|
||||||
|
|
||||||
pub struct Appservice {
|
|
||||||
pub(super) cached_registrations: Arc<RwLock<HashMap<String, serde_yaml::Value>>>,
|
|
||||||
pub(super) id_appserviceregistrations: Arc<dyn Tree>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Appservice {
|
|
||||||
/// Registers an appservice and returns the ID to the caller
|
/// Registers an appservice and returns the ID to the caller
|
||||||
///
|
fn register_appservice(&self, yaml: serde_yaml::Value) -> Result<String> {
|
||||||
pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result<String> {
|
|
||||||
// TODO: Rumaify
|
// TODO: Rumaify
|
||||||
let id = yaml.get("id").unwrap().as_str().unwrap();
|
let id = yaml.get("id").unwrap().as_str().unwrap();
|
||||||
self.id_appserviceregistrations.insert(
|
self.id_appserviceregistrations.insert(
|
||||||
|
@ -34,7 +22,7 @@ impl Appservice {
|
||||||
/// # Arguments
|
/// # Arguments
|
||||||
///
|
///
|
||||||
/// * `service_name` - the name you send to register the service previously
|
/// * `service_name` - the name you send to register the service previously
|
||||||
pub fn unregister_appservice(&self, service_name: &str) -> Result<()> {
|
fn unregister_appservice(&self, service_name: &str) -> Result<()> {
|
||||||
self.id_appserviceregistrations
|
self.id_appserviceregistrations
|
||||||
.remove(service_name.as_bytes())?;
|
.remove(service_name.as_bytes())?;
|
||||||
self.cached_registrations
|
self.cached_registrations
|
||||||
|
@ -44,7 +32,7 @@ impl Appservice {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_registration(&self, id: &str) -> Result<Option<serde_yaml::Value>> {
|
fn get_registration(&self, id: &str) -> Result<Option<serde_yaml::Value>> {
|
||||||
self.cached_registrations
|
self.cached_registrations
|
||||||
.read()
|
.read()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -66,14 +54,17 @@ impl Appservice {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn iter_ids(&self) -> Result<impl Iterator<Item = Result<String>> + '_> {
|
fn iter_ids<'a>(&'a self) -> Result<Box<dyn Iterator<Item = Result<String>> + 'a>> {
|
||||||
Ok(self.id_appserviceregistrations.iter().map(|(id, _)| {
|
Ok(Box::new(self.id_appserviceregistrations.iter().map(
|
||||||
utils::string_from_bytes(&id)
|
|(id, _)| {
|
||||||
.map_err(|_| Error::bad_database("Invalid id bytes in id_appserviceregistrations."))
|
utils::string_from_bytes(&id).map_err(|_| {
|
||||||
}))
|
Error::bad_database("Invalid id bytes in id_appserviceregistrations.")
|
||||||
|
})
|
||||||
|
},
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn all(&self) -> Result<Vec<(String, serde_yaml::Value)>> {
|
fn all(&self) -> Result<Vec<(String, serde_yaml::Value)>> {
|
||||||
self.iter_ids()?
|
self.iter_ids()?
|
||||||
.filter_map(|id| id.ok())
|
.filter_map(|id| id.ok())
|
||||||
.map(move |id| {
|
.map(move |id| {
|
311
src/database/key_value/globals.rs
Normal file
311
src/database/key_value/globals.rs
Normal file
|
@ -0,0 +1,311 @@
|
||||||
|
use std::collections::{BTreeMap, HashMap};
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use futures_util::{stream::FuturesUnordered, StreamExt};
|
||||||
|
use lru_cache::LruCache;
|
||||||
|
use ruma::{
|
||||||
|
api::federation::discovery::{ServerSigningKeys, VerifyKey},
|
||||||
|
signatures::Ed25519KeyPair,
|
||||||
|
DeviceId, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, ServerName, UserId,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
||||||
|
|
||||||
|
pub const COUNTER: &[u8] = b"c";
|
||||||
|
pub const LAST_CHECK_FOR_UPDATES_COUNT: &[u8] = b"u";
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl service::globals::Data for KeyValueDatabase {
|
||||||
|
fn next_count(&self) -> Result<u64> {
|
||||||
|
utils::u64_from_bytes(&self.global.increment(COUNTER)?)
|
||||||
|
.map_err(|_| Error::bad_database("Count has invalid bytes."))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn current_count(&self) -> Result<u64> {
|
||||||
|
self.global.get(COUNTER)?.map_or(Ok(0_u64), |bytes| {
|
||||||
|
utils::u64_from_bytes(&bytes)
|
||||||
|
.map_err(|_| Error::bad_database("Count has invalid bytes."))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn last_check_for_updates_id(&self) -> Result<u64> {
|
||||||
|
self.global
|
||||||
|
.get(LAST_CHECK_FOR_UPDATES_COUNT)?
|
||||||
|
.map_or(Ok(0_u64), |bytes| {
|
||||||
|
utils::u64_from_bytes(&bytes).map_err(|_| {
|
||||||
|
Error::bad_database("last check for updates count has invalid bytes.")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn update_check_for_updates_id(&self, id: u64) -> Result<()> {
|
||||||
|
self.global
|
||||||
|
.insert(LAST_CHECK_FOR_UPDATES_COUNT, &id.to_be_bytes())?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> {
|
||||||
|
let userid_bytes = user_id.as_bytes().to_vec();
|
||||||
|
let mut userid_prefix = userid_bytes.clone();
|
||||||
|
userid_prefix.push(0xff);
|
||||||
|
|
||||||
|
let mut userdeviceid_prefix = userid_prefix.clone();
|
||||||
|
userdeviceid_prefix.extend_from_slice(device_id.as_bytes());
|
||||||
|
userdeviceid_prefix.push(0xff);
|
||||||
|
|
||||||
|
let mut futures = FuturesUnordered::new();
|
||||||
|
|
||||||
|
// Return when *any* user changed his key
|
||||||
|
// TODO: only send for user they share a room with
|
||||||
|
futures.push(self.todeviceid_events.watch_prefix(&userdeviceid_prefix));
|
||||||
|
|
||||||
|
futures.push(self.userroomid_joined.watch_prefix(&userid_prefix));
|
||||||
|
futures.push(self.userroomid_invitestate.watch_prefix(&userid_prefix));
|
||||||
|
futures.push(self.userroomid_leftstate.watch_prefix(&userid_prefix));
|
||||||
|
futures.push(
|
||||||
|
self.userroomid_notificationcount
|
||||||
|
.watch_prefix(&userid_prefix),
|
||||||
|
);
|
||||||
|
futures.push(self.userroomid_highlightcount.watch_prefix(&userid_prefix));
|
||||||
|
|
||||||
|
// Events for rooms we are in
|
||||||
|
for room_id in services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.rooms_joined(user_id)
|
||||||
|
.filter_map(|r| r.ok())
|
||||||
|
{
|
||||||
|
let short_roomid = services()
|
||||||
|
.rooms
|
||||||
|
.short
|
||||||
|
.get_shortroomid(&room_id)
|
||||||
|
.ok()
|
||||||
|
.flatten()
|
||||||
|
.expect("room exists")
|
||||||
|
.to_be_bytes()
|
||||||
|
.to_vec();
|
||||||
|
|
||||||
|
let roomid_bytes = room_id.as_bytes().to_vec();
|
||||||
|
let mut roomid_prefix = roomid_bytes.clone();
|
||||||
|
roomid_prefix.push(0xff);
|
||||||
|
|
||||||
|
// PDUs
|
||||||
|
futures.push(self.pduid_pdu.watch_prefix(&short_roomid));
|
||||||
|
|
||||||
|
// EDUs
|
||||||
|
futures.push(self.roomid_lasttypingupdate.watch_prefix(&roomid_bytes));
|
||||||
|
|
||||||
|
futures.push(self.readreceiptid_readreceipt.watch_prefix(&roomid_prefix));
|
||||||
|
|
||||||
|
// Key changes
|
||||||
|
futures.push(self.keychangeid_userid.watch_prefix(&roomid_prefix));
|
||||||
|
|
||||||
|
// Room account data
|
||||||
|
let mut roomuser_prefix = roomid_prefix.clone();
|
||||||
|
roomuser_prefix.extend_from_slice(&userid_prefix);
|
||||||
|
|
||||||
|
futures.push(
|
||||||
|
self.roomusertype_roomuserdataid
|
||||||
|
.watch_prefix(&roomuser_prefix),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut globaluserdata_prefix = vec![0xff];
|
||||||
|
globaluserdata_prefix.extend_from_slice(&userid_prefix);
|
||||||
|
|
||||||
|
futures.push(
|
||||||
|
self.roomusertype_roomuserdataid
|
||||||
|
.watch_prefix(&globaluserdata_prefix),
|
||||||
|
);
|
||||||
|
|
||||||
|
// More key changes (used when user is not joined to any rooms)
|
||||||
|
futures.push(self.keychangeid_userid.watch_prefix(&userid_prefix));
|
||||||
|
|
||||||
|
// One time keys
|
||||||
|
futures.push(self.userid_lastonetimekeyupdate.watch_prefix(&userid_bytes));
|
||||||
|
|
||||||
|
futures.push(Box::pin(services().globals.rotate.watch()));
|
||||||
|
|
||||||
|
// Wait until one of them finds something
|
||||||
|
futures.next().await;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn cleanup(&self) -> Result<()> {
|
||||||
|
self._db.cleanup()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn memory_usage(&self) -> String {
|
||||||
|
let pdu_cache = self.pdu_cache.lock().unwrap().len();
|
||||||
|
let shorteventid_cache = self.shorteventid_cache.lock().unwrap().len();
|
||||||
|
let auth_chain_cache = self.auth_chain_cache.lock().unwrap().len();
|
||||||
|
let eventidshort_cache = self.eventidshort_cache.lock().unwrap().len();
|
||||||
|
let statekeyshort_cache = self.statekeyshort_cache.lock().unwrap().len();
|
||||||
|
let our_real_users_cache = self.our_real_users_cache.read().unwrap().len();
|
||||||
|
let appservice_in_room_cache = self.appservice_in_room_cache.read().unwrap().len();
|
||||||
|
let lasttimelinecount_cache = self.lasttimelinecount_cache.lock().unwrap().len();
|
||||||
|
|
||||||
|
let mut response = format!(
|
||||||
|
"\
|
||||||
|
pdu_cache: {pdu_cache}
|
||||||
|
shorteventid_cache: {shorteventid_cache}
|
||||||
|
auth_chain_cache: {auth_chain_cache}
|
||||||
|
eventidshort_cache: {eventidshort_cache}
|
||||||
|
statekeyshort_cache: {statekeyshort_cache}
|
||||||
|
our_real_users_cache: {our_real_users_cache}
|
||||||
|
appservice_in_room_cache: {appservice_in_room_cache}
|
||||||
|
lasttimelinecount_cache: {lasttimelinecount_cache}\n"
|
||||||
|
);
|
||||||
|
if let Ok(db_stats) = self._db.memory_usage() {
|
||||||
|
response += &db_stats;
|
||||||
|
}
|
||||||
|
|
||||||
|
response
|
||||||
|
}
|
||||||
|
|
||||||
|
fn clear_caches(&self, amount: u32) {
|
||||||
|
if amount > 0 {
|
||||||
|
let c = &mut *self.pdu_cache.lock().unwrap();
|
||||||
|
*c = LruCache::new(c.capacity());
|
||||||
|
}
|
||||||
|
if amount > 1 {
|
||||||
|
let c = &mut *self.shorteventid_cache.lock().unwrap();
|
||||||
|
*c = LruCache::new(c.capacity());
|
||||||
|
}
|
||||||
|
if amount > 2 {
|
||||||
|
let c = &mut *self.auth_chain_cache.lock().unwrap();
|
||||||
|
*c = LruCache::new(c.capacity());
|
||||||
|
}
|
||||||
|
if amount > 3 {
|
||||||
|
let c = &mut *self.eventidshort_cache.lock().unwrap();
|
||||||
|
*c = LruCache::new(c.capacity());
|
||||||
|
}
|
||||||
|
if amount > 4 {
|
||||||
|
let c = &mut *self.statekeyshort_cache.lock().unwrap();
|
||||||
|
*c = LruCache::new(c.capacity());
|
||||||
|
}
|
||||||
|
if amount > 5 {
|
||||||
|
let c = &mut *self.our_real_users_cache.write().unwrap();
|
||||||
|
*c = HashMap::new();
|
||||||
|
}
|
||||||
|
if amount > 6 {
|
||||||
|
let c = &mut *self.appservice_in_room_cache.write().unwrap();
|
||||||
|
*c = HashMap::new();
|
||||||
|
}
|
||||||
|
if amount > 7 {
|
||||||
|
let c = &mut *self.lasttimelinecount_cache.lock().unwrap();
|
||||||
|
*c = HashMap::new();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn load_keypair(&self) -> Result<Ed25519KeyPair> {
|
||||||
|
let keypair_bytes = self.global.get(b"keypair")?.map_or_else(
|
||||||
|
|| {
|
||||||
|
let keypair = utils::generate_keypair();
|
||||||
|
self.global.insert(b"keypair", &keypair)?;
|
||||||
|
Ok::<_, Error>(keypair)
|
||||||
|
},
|
||||||
|
|s| Ok(s.to_vec()),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let mut parts = keypair_bytes.splitn(2, |&b| b == 0xff);
|
||||||
|
|
||||||
|
utils::string_from_bytes(
|
||||||
|
// 1. version
|
||||||
|
parts
|
||||||
|
.next()
|
||||||
|
.expect("splitn always returns at least one element"),
|
||||||
|
)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid version bytes in keypair."))
|
||||||
|
.and_then(|version| {
|
||||||
|
// 2. key
|
||||||
|
parts
|
||||||
|
.next()
|
||||||
|
.ok_or_else(|| Error::bad_database("Invalid keypair format in database."))
|
||||||
|
.map(|key| (version, key))
|
||||||
|
})
|
||||||
|
.and_then(|(version, key)| {
|
||||||
|
Ed25519KeyPair::from_der(key, version)
|
||||||
|
.map_err(|_| Error::bad_database("Private or public keys are invalid."))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
fn remove_keypair(&self) -> Result<()> {
|
||||||
|
self.global.remove(b"keypair")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn add_signing_key(
|
||||||
|
&self,
|
||||||
|
origin: &ServerName,
|
||||||
|
new_keys: ServerSigningKeys,
|
||||||
|
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
|
||||||
|
// Not atomic, but this is not critical
|
||||||
|
let signingkeys = self.server_signingkeys.get(origin.as_bytes())?;
|
||||||
|
|
||||||
|
let mut keys = signingkeys
|
||||||
|
.and_then(|keys| serde_json::from_slice(&keys).ok())
|
||||||
|
.unwrap_or_else(|| {
|
||||||
|
// Just insert "now", it doesn't matter
|
||||||
|
ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now())
|
||||||
|
});
|
||||||
|
|
||||||
|
let ServerSigningKeys {
|
||||||
|
verify_keys,
|
||||||
|
old_verify_keys,
|
||||||
|
..
|
||||||
|
} = new_keys;
|
||||||
|
|
||||||
|
keys.verify_keys.extend(verify_keys);
|
||||||
|
keys.old_verify_keys.extend(old_verify_keys);
|
||||||
|
|
||||||
|
self.server_signingkeys.insert(
|
||||||
|
origin.as_bytes(),
|
||||||
|
&serde_json::to_vec(&keys).expect("serversigningkeys can be serialized"),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let mut tree = keys.verify_keys;
|
||||||
|
tree.extend(
|
||||||
|
keys.old_verify_keys
|
||||||
|
.into_iter()
|
||||||
|
.map(|old| (old.0, VerifyKey::new(old.1.key))),
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(tree)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server.
|
||||||
|
fn signing_keys_for(
|
||||||
|
&self,
|
||||||
|
origin: &ServerName,
|
||||||
|
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
|
||||||
|
let signingkeys = self
|
||||||
|
.server_signingkeys
|
||||||
|
.get(origin.as_bytes())?
|
||||||
|
.and_then(|bytes| serde_json::from_slice(&bytes).ok())
|
||||||
|
.map(|keys: ServerSigningKeys| {
|
||||||
|
let mut tree = keys.verify_keys;
|
||||||
|
tree.extend(
|
||||||
|
keys.old_verify_keys
|
||||||
|
.into_iter()
|
||||||
|
.map(|old| (old.0, VerifyKey::new(old.1.key))),
|
||||||
|
);
|
||||||
|
tree
|
||||||
|
})
|
||||||
|
.unwrap_or_else(BTreeMap::new);
|
||||||
|
|
||||||
|
Ok(signingkeys)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn database_version(&self) -> Result<u64> {
|
||||||
|
self.global.get(b"version")?.map_or(Ok(0), |version| {
|
||||||
|
utils::u64_from_bytes(&version)
|
||||||
|
.map_err(|_| Error::bad_database("Database version id is invalid."))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn bump_database_version(&self, new_version: u64) -> Result<()> {
|
||||||
|
self.global.insert(b"version", &new_version.to_be_bytes())?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,30 +1,23 @@
|
||||||
use crate::{utils, Error, Result};
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{
|
api::client::{
|
||||||
backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup},
|
backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup},
|
||||||
error::ErrorKind,
|
error::ErrorKind,
|
||||||
},
|
},
|
||||||
serde::Raw,
|
serde::Raw,
|
||||||
RoomId, UserId,
|
OwnedRoomId, RoomId, UserId,
|
||||||
};
|
};
|
||||||
use std::{collections::BTreeMap, sync::Arc};
|
|
||||||
|
|
||||||
use super::abstraction::Tree;
|
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
||||||
|
|
||||||
pub struct KeyBackups {
|
impl service::key_backups::Data for KeyValueDatabase {
|
||||||
pub(super) backupid_algorithm: Arc<dyn Tree>, // BackupId = UserId + Version(Count)
|
fn create_backup(
|
||||||
pub(super) backupid_etag: Arc<dyn Tree>, // BackupId = UserId + Version(Count)
|
|
||||||
pub(super) backupkeyid_backup: Arc<dyn Tree>, // BackupKeyId = UserId + Version + RoomId + SessionId
|
|
||||||
}
|
|
||||||
|
|
||||||
impl KeyBackups {
|
|
||||||
pub fn create_backup(
|
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
backup_metadata: &Raw<BackupAlgorithm>,
|
backup_metadata: &Raw<BackupAlgorithm>,
|
||||||
globals: &super::globals::Globals,
|
|
||||||
) -> Result<String> {
|
) -> Result<String> {
|
||||||
let version = globals.next_count()?.to_string();
|
let version = services().globals.next_count()?.to_string();
|
||||||
|
|
||||||
let mut key = user_id.as_bytes().to_vec();
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
|
@ -35,11 +28,11 @@ impl KeyBackups {
|
||||||
&serde_json::to_vec(backup_metadata).expect("BackupAlgorithm::to_vec always works"),
|
&serde_json::to_vec(backup_metadata).expect("BackupAlgorithm::to_vec always works"),
|
||||||
)?;
|
)?;
|
||||||
self.backupid_etag
|
self.backupid_etag
|
||||||
.insert(&key, &globals.next_count()?.to_be_bytes())?;
|
.insert(&key, &services().globals.next_count()?.to_be_bytes())?;
|
||||||
Ok(version)
|
Ok(version)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()> {
|
fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()> {
|
||||||
let mut key = user_id.as_bytes().to_vec();
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
key.extend_from_slice(version.as_bytes());
|
key.extend_from_slice(version.as_bytes());
|
||||||
|
@ -56,12 +49,11 @@ impl KeyBackups {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn update_backup(
|
fn update_backup(
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
version: &str,
|
version: &str,
|
||||||
backup_metadata: &Raw<BackupAlgorithm>,
|
backup_metadata: &Raw<BackupAlgorithm>,
|
||||||
globals: &super::globals::Globals,
|
|
||||||
) -> Result<String> {
|
) -> Result<String> {
|
||||||
let mut key = user_id.as_bytes().to_vec();
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
|
@ -77,11 +69,11 @@ impl KeyBackups {
|
||||||
self.backupid_algorithm
|
self.backupid_algorithm
|
||||||
.insert(&key, backup_metadata.json().get().as_bytes())?;
|
.insert(&key, backup_metadata.json().get().as_bytes())?;
|
||||||
self.backupid_etag
|
self.backupid_etag
|
||||||
.insert(&key, &globals.next_count()?.to_be_bytes())?;
|
.insert(&key, &services().globals.next_count()?.to_be_bytes())?;
|
||||||
Ok(version.to_owned())
|
Ok(version.to_owned())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_latest_backup_version(&self, user_id: &UserId) -> Result<Option<String>> {
|
fn get_latest_backup_version(&self, user_id: &UserId) -> Result<Option<String>> {
|
||||||
let mut prefix = user_id.as_bytes().to_vec();
|
let mut prefix = user_id.as_bytes().to_vec();
|
||||||
prefix.push(0xff);
|
prefix.push(0xff);
|
||||||
let mut last_possible_key = prefix.clone();
|
let mut last_possible_key = prefix.clone();
|
||||||
|
@ -102,7 +94,7 @@ impl KeyBackups {
|
||||||
.transpose()
|
.transpose()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_latest_backup(
|
fn get_latest_backup(
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
) -> Result<Option<(String, Raw<BackupAlgorithm>)>> {
|
) -> Result<Option<(String, Raw<BackupAlgorithm>)>> {
|
||||||
|
@ -133,11 +125,7 @@ impl KeyBackups {
|
||||||
.transpose()
|
.transpose()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_backup(
|
fn get_backup(&self, user_id: &UserId, version: &str) -> Result<Option<Raw<BackupAlgorithm>>> {
|
||||||
&self,
|
|
||||||
user_id: &UserId,
|
|
||||||
version: &str,
|
|
||||||
) -> Result<Option<Raw<BackupAlgorithm>>> {
|
|
||||||
let mut key = user_id.as_bytes().to_vec();
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
key.extend_from_slice(version.as_bytes());
|
key.extend_from_slice(version.as_bytes());
|
||||||
|
@ -150,14 +138,13 @@ impl KeyBackups {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn add_key(
|
fn add_key(
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
version: &str,
|
version: &str,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
session_id: &str,
|
session_id: &str,
|
||||||
key_data: &Raw<KeyBackupData>,
|
key_data: &Raw<KeyBackupData>,
|
||||||
globals: &super::globals::Globals,
|
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let mut key = user_id.as_bytes().to_vec();
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
|
@ -171,7 +158,7 @@ impl KeyBackups {
|
||||||
}
|
}
|
||||||
|
|
||||||
self.backupid_etag
|
self.backupid_etag
|
||||||
.insert(&key, &globals.next_count()?.to_be_bytes())?;
|
.insert(&key, &services().globals.next_count()?.to_be_bytes())?;
|
||||||
|
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
key.extend_from_slice(room_id.as_bytes());
|
key.extend_from_slice(room_id.as_bytes());
|
||||||
|
@ -184,7 +171,7 @@ impl KeyBackups {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn count_keys(&self, user_id: &UserId, version: &str) -> Result<usize> {
|
fn count_keys(&self, user_id: &UserId, version: &str) -> Result<usize> {
|
||||||
let mut prefix = user_id.as_bytes().to_vec();
|
let mut prefix = user_id.as_bytes().to_vec();
|
||||||
prefix.push(0xff);
|
prefix.push(0xff);
|
||||||
prefix.extend_from_slice(version.as_bytes());
|
prefix.extend_from_slice(version.as_bytes());
|
||||||
|
@ -192,7 +179,7 @@ impl KeyBackups {
|
||||||
Ok(self.backupkeyid_backup.scan_prefix(prefix).count())
|
Ok(self.backupkeyid_backup.scan_prefix(prefix).count())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_etag(&self, user_id: &UserId, version: &str) -> Result<String> {
|
fn get_etag(&self, user_id: &UserId, version: &str) -> Result<String> {
|
||||||
let mut key = user_id.as_bytes().to_vec();
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
key.extend_from_slice(version.as_bytes());
|
key.extend_from_slice(version.as_bytes());
|
||||||
|
@ -207,17 +194,17 @@ impl KeyBackups {
|
||||||
.to_string())
|
.to_string())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_all(
|
fn get_all(
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
version: &str,
|
version: &str,
|
||||||
) -> Result<BTreeMap<Box<RoomId>, RoomKeyBackup>> {
|
) -> Result<BTreeMap<OwnedRoomId, RoomKeyBackup>> {
|
||||||
let mut prefix = user_id.as_bytes().to_vec();
|
let mut prefix = user_id.as_bytes().to_vec();
|
||||||
prefix.push(0xff);
|
prefix.push(0xff);
|
||||||
prefix.extend_from_slice(version.as_bytes());
|
prefix.extend_from_slice(version.as_bytes());
|
||||||
prefix.push(0xff);
|
prefix.push(0xff);
|
||||||
|
|
||||||
let mut rooms = BTreeMap::<Box<RoomId>, RoomKeyBackup>::new();
|
let mut rooms = BTreeMap::<OwnedRoomId, RoomKeyBackup>::new();
|
||||||
|
|
||||||
for result in self
|
for result in self
|
||||||
.backupkeyid_backup
|
.backupkeyid_backup
|
||||||
|
@ -263,7 +250,7 @@ impl KeyBackups {
|
||||||
Ok(rooms)
|
Ok(rooms)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_room(
|
fn get_room(
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
version: &str,
|
version: &str,
|
||||||
|
@ -300,7 +287,7 @@ impl KeyBackups {
|
||||||
.collect())
|
.collect())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_session(
|
fn get_session(
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
version: &str,
|
version: &str,
|
||||||
|
@ -325,7 +312,7 @@ impl KeyBackups {
|
||||||
.transpose()
|
.transpose()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()> {
|
fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()> {
|
||||||
let mut key = user_id.as_bytes().to_vec();
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
key.extend_from_slice(version.as_bytes());
|
key.extend_from_slice(version.as_bytes());
|
||||||
|
@ -338,12 +325,7 @@ impl KeyBackups {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete_room_keys(
|
fn delete_room_keys(&self, user_id: &UserId, version: &str, room_id: &RoomId) -> Result<()> {
|
||||||
&self,
|
|
||||||
user_id: &UserId,
|
|
||||||
version: &str,
|
|
||||||
room_id: &RoomId,
|
|
||||||
) -> Result<()> {
|
|
||||||
let mut key = user_id.as_bytes().to_vec();
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
key.extend_from_slice(version.as_bytes());
|
key.extend_from_slice(version.as_bytes());
|
||||||
|
@ -358,7 +340,7 @@ impl KeyBackups {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete_room_key(
|
fn delete_room_key(
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
version: &str,
|
version: &str,
|
82
src/database/key_value/media.rs
Normal file
82
src/database/key_value/media.rs
Normal file
|
@ -0,0 +1,82 @@
|
||||||
|
use ruma::api::client::error::ErrorKind;
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, utils, Error, Result};
|
||||||
|
|
||||||
|
impl service::media::Data for KeyValueDatabase {
|
||||||
|
fn create_file_metadata(
|
||||||
|
&self,
|
||||||
|
mxc: String,
|
||||||
|
width: u32,
|
||||||
|
height: u32,
|
||||||
|
content_disposition: Option<&str>,
|
||||||
|
content_type: Option<&str>,
|
||||||
|
) -> Result<Vec<u8>> {
|
||||||
|
let mut key = mxc.as_bytes().to_vec();
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(&width.to_be_bytes());
|
||||||
|
key.extend_from_slice(&height.to_be_bytes());
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(
|
||||||
|
content_disposition
|
||||||
|
.as_ref()
|
||||||
|
.map(|f| f.as_bytes())
|
||||||
|
.unwrap_or_default(),
|
||||||
|
);
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(
|
||||||
|
content_type
|
||||||
|
.as_ref()
|
||||||
|
.map(|c| c.as_bytes())
|
||||||
|
.unwrap_or_default(),
|
||||||
|
);
|
||||||
|
|
||||||
|
self.mediaid_file.insert(&key, &[])?;
|
||||||
|
|
||||||
|
Ok(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn search_file_metadata(
|
||||||
|
&self,
|
||||||
|
mxc: String,
|
||||||
|
width: u32,
|
||||||
|
height: u32,
|
||||||
|
) -> Result<(Option<String>, Option<String>, Vec<u8>)> {
|
||||||
|
let mut prefix = mxc.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
prefix.extend_from_slice(&width.to_be_bytes());
|
||||||
|
prefix.extend_from_slice(&height.to_be_bytes());
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
let (key, _) = self
|
||||||
|
.mediaid_file
|
||||||
|
.scan_prefix(prefix)
|
||||||
|
.next()
|
||||||
|
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Media not found"))?;
|
||||||
|
|
||||||
|
let mut parts = key.rsplit(|&b| b == 0xff);
|
||||||
|
|
||||||
|
let content_type = parts
|
||||||
|
.next()
|
||||||
|
.map(|bytes| {
|
||||||
|
utils::string_from_bytes(bytes).map_err(|_| {
|
||||||
|
Error::bad_database("Content type in mediaid_file is invalid unicode.")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.transpose()?;
|
||||||
|
|
||||||
|
let content_disposition_bytes = parts
|
||||||
|
.next()
|
||||||
|
.ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?;
|
||||||
|
|
||||||
|
let content_disposition = if content_disposition_bytes.is_empty() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(
|
||||||
|
utils::string_from_bytes(content_disposition_bytes).map_err(|_| {
|
||||||
|
Error::bad_database("Content Disposition in mediaid_file is invalid unicode.")
|
||||||
|
})?,
|
||||||
|
)
|
||||||
|
};
|
||||||
|
Ok((content_disposition, content_type, key))
|
||||||
|
}
|
||||||
|
}
|
13
src/database/key_value/mod.rs
Normal file
13
src/database/key_value/mod.rs
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
mod account_data;
|
||||||
|
//mod admin;
|
||||||
|
mod appservice;
|
||||||
|
mod globals;
|
||||||
|
mod key_backups;
|
||||||
|
mod media;
|
||||||
|
//mod pdu;
|
||||||
|
mod pusher;
|
||||||
|
mod rooms;
|
||||||
|
mod sending;
|
||||||
|
mod transaction_ids;
|
||||||
|
mod uiaa;
|
||||||
|
mod users;
|
79
src/database/key_value/pusher.rs
Normal file
79
src/database/key_value/pusher.rs
Normal file
|
@ -0,0 +1,79 @@
|
||||||
|
use ruma::{
|
||||||
|
api::client::push::{set_pusher, Pusher},
|
||||||
|
UserId,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, utils, Error, Result};
|
||||||
|
|
||||||
|
impl service::pusher::Data for KeyValueDatabase {
|
||||||
|
fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::PusherAction) -> Result<()> {
|
||||||
|
match &pusher {
|
||||||
|
set_pusher::v3::PusherAction::Post(data) => {
|
||||||
|
let mut key = sender.as_bytes().to_vec();
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(data.pusher.ids.pushkey.as_bytes());
|
||||||
|
self.senderkey_pusher.insert(
|
||||||
|
&key,
|
||||||
|
&serde_json::to_vec(&pusher).expect("Pusher is valid JSON value"),
|
||||||
|
)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
set_pusher::v3::PusherAction::Delete(ids) => {
|
||||||
|
let mut key = sender.as_bytes().to_vec();
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(ids.pushkey.as_bytes());
|
||||||
|
self.senderkey_pusher
|
||||||
|
.remove(&key)
|
||||||
|
.map(|_| ())
|
||||||
|
.map_err(Into::into)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result<Option<Pusher>> {
|
||||||
|
let mut senderkey = sender.as_bytes().to_vec();
|
||||||
|
senderkey.push(0xff);
|
||||||
|
senderkey.extend_from_slice(pushkey.as_bytes());
|
||||||
|
|
||||||
|
self.senderkey_pusher
|
||||||
|
.get(&senderkey)?
|
||||||
|
.map(|push| {
|
||||||
|
serde_json::from_slice(&push)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid Pusher in db."))
|
||||||
|
})
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_pushers(&self, sender: &UserId) -> Result<Vec<Pusher>> {
|
||||||
|
let mut prefix = sender.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
self.senderkey_pusher
|
||||||
|
.scan_prefix(prefix)
|
||||||
|
.map(|(_, push)| {
|
||||||
|
serde_json::from_slice(&push)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid Pusher in db."))
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_pushkeys<'a>(
|
||||||
|
&'a self,
|
||||||
|
sender: &UserId,
|
||||||
|
) -> Box<dyn Iterator<Item = Result<String>> + 'a> {
|
||||||
|
let mut prefix = sender.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
Box::new(self.senderkey_pusher.scan_prefix(prefix).map(|(k, _)| {
|
||||||
|
let mut parts = k.splitn(2, |&b| b == 0xff);
|
||||||
|
let _senderkey = parts.next();
|
||||||
|
let push_key = parts
|
||||||
|
.next()
|
||||||
|
.ok_or_else(|| Error::bad_database("Invalid senderkey_pusher in db"))?;
|
||||||
|
let push_key_string = utils::string_from_bytes(push_key)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid pusher bytes in senderkey_pusher"))?;
|
||||||
|
|
||||||
|
Ok(push_key_string)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
60
src/database/key_value/rooms/alias.rs
Normal file
60
src/database/key_value/rooms/alias.rs
Normal file
|
@ -0,0 +1,60 @@
|
||||||
|
use ruma::{api::client::error::ErrorKind, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId};
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
||||||
|
|
||||||
|
impl service::rooms::alias::Data for KeyValueDatabase {
|
||||||
|
fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()> {
|
||||||
|
self.alias_roomid
|
||||||
|
.insert(alias.alias().as_bytes(), room_id.as_bytes())?;
|
||||||
|
let mut aliasid = room_id.as_bytes().to_vec();
|
||||||
|
aliasid.push(0xff);
|
||||||
|
aliasid.extend_from_slice(&services().globals.next_count()?.to_be_bytes());
|
||||||
|
self.aliasid_alias.insert(&aliasid, alias.as_bytes())?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn remove_alias(&self, alias: &RoomAliasId) -> Result<()> {
|
||||||
|
if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? {
|
||||||
|
let mut prefix = room_id.to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
for (key, _) in self.aliasid_alias.scan_prefix(prefix) {
|
||||||
|
self.aliasid_alias.remove(&key)?;
|
||||||
|
}
|
||||||
|
self.alias_roomid.remove(alias.alias().as_bytes())?;
|
||||||
|
} else {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"Alias does not exist.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result<Option<OwnedRoomId>> {
|
||||||
|
self.alias_roomid
|
||||||
|
.get(alias.alias().as_bytes())?
|
||||||
|
.map(|bytes| {
|
||||||
|
RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| {
|
||||||
|
Error::bad_database("Room ID in alias_roomid is invalid unicode.")
|
||||||
|
})?)
|
||||||
|
.map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid."))
|
||||||
|
})
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn local_aliases_for_room<'a>(
|
||||||
|
&'a self,
|
||||||
|
room_id: &RoomId,
|
||||||
|
) -> Box<dyn Iterator<Item = Result<OwnedRoomAliasId>> + 'a> {
|
||||||
|
let mut prefix = room_id.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
Box::new(self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| {
|
||||||
|
utils::string_from_bytes(&bytes)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))?
|
||||||
|
.try_into()
|
||||||
|
.map_err(|_| Error::bad_database("Invalid alias in aliasid_alias."))
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
61
src/database/key_value/rooms/auth_chain.rs
Normal file
61
src/database/key_value/rooms/auth_chain.rs
Normal file
|
@ -0,0 +1,61 @@
|
||||||
|
use std::{collections::HashSet, mem::size_of, sync::Arc};
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, utils, Result};
|
||||||
|
|
||||||
|
impl service::rooms::auth_chain::Data for KeyValueDatabase {
|
||||||
|
fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result<Option<Arc<HashSet<u64>>>> {
|
||||||
|
// Check RAM cache
|
||||||
|
if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) {
|
||||||
|
return Ok(Some(Arc::clone(result)));
|
||||||
|
}
|
||||||
|
|
||||||
|
// We only save auth chains for single events in the db
|
||||||
|
if key.len() == 1 {
|
||||||
|
// Check DB cache
|
||||||
|
let chain = self
|
||||||
|
.shorteventid_authchain
|
||||||
|
.get(&key[0].to_be_bytes())?
|
||||||
|
.map(|chain| {
|
||||||
|
chain
|
||||||
|
.chunks_exact(size_of::<u64>())
|
||||||
|
.map(|chunk| utils::u64_from_bytes(chunk).expect("byte length is correct"))
|
||||||
|
.collect()
|
||||||
|
});
|
||||||
|
|
||||||
|
if let Some(chain) = chain {
|
||||||
|
let chain = Arc::new(chain);
|
||||||
|
|
||||||
|
// Cache in RAM
|
||||||
|
self.auth_chain_cache
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.insert(vec![key[0]], Arc::clone(&chain));
|
||||||
|
|
||||||
|
return Ok(Some(chain));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn cache_auth_chain(&self, key: Vec<u64>, auth_chain: Arc<HashSet<u64>>) -> Result<()> {
|
||||||
|
// Only persist single events in db
|
||||||
|
if key.len() == 1 {
|
||||||
|
self.shorteventid_authchain.insert(
|
||||||
|
&key[0].to_be_bytes(),
|
||||||
|
&auth_chain
|
||||||
|
.iter()
|
||||||
|
.flat_map(|s| s.to_be_bytes().to_vec())
|
||||||
|
.collect::<Vec<u8>>(),
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cache in RAM
|
||||||
|
self.auth_chain_cache
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.insert(key, auth_chain);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
28
src/database/key_value/rooms/directory.rs
Normal file
28
src/database/key_value/rooms/directory.rs
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
use ruma::{OwnedRoomId, RoomId};
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, utils, Error, Result};
|
||||||
|
|
||||||
|
impl service::rooms::directory::Data for KeyValueDatabase {
|
||||||
|
fn set_public(&self, room_id: &RoomId) -> Result<()> {
|
||||||
|
self.publicroomids.insert(room_id.as_bytes(), &[])
|
||||||
|
}
|
||||||
|
|
||||||
|
fn set_not_public(&self, room_id: &RoomId) -> Result<()> {
|
||||||
|
self.publicroomids.remove(room_id.as_bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_public_room(&self, room_id: &RoomId) -> Result<bool> {
|
||||||
|
Ok(self.publicroomids.get(room_id.as_bytes())?.is_some())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn public_rooms<'a>(&'a self) -> Box<dyn Iterator<Item = Result<OwnedRoomId>> + 'a> {
|
||||||
|
Box::new(self.publicroomids.iter().map(|(bytes, _)| {
|
||||||
|
RoomId::parse(
|
||||||
|
utils::string_from_bytes(&bytes).map_err(|_| {
|
||||||
|
Error::bad_database("Room ID in publicroomids is invalid unicode.")
|
||||||
|
})?,
|
||||||
|
)
|
||||||
|
.map_err(|_| Error::bad_database("Room ID in publicroomids is invalid."))
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
7
src/database/key_value/rooms/edus/mod.rs
Normal file
7
src/database/key_value/rooms/edus/mod.rs
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
mod presence;
|
||||||
|
mod read_receipt;
|
||||||
|
mod typing;
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service};
|
||||||
|
|
||||||
|
impl service::rooms::edus::Data for KeyValueDatabase {}
|
152
src/database/key_value/rooms/edus/presence.rs
Normal file
152
src/database/key_value/rooms/edus/presence.rs
Normal file
|
@ -0,0 +1,152 @@
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use ruma::{
|
||||||
|
events::presence::PresenceEvent, presence::PresenceState, OwnedUserId, RoomId, UInt, UserId,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
||||||
|
|
||||||
|
impl service::rooms::edus::presence::Data for KeyValueDatabase {
|
||||||
|
fn update_presence(
|
||||||
|
&self,
|
||||||
|
user_id: &UserId,
|
||||||
|
room_id: &RoomId,
|
||||||
|
presence: PresenceEvent,
|
||||||
|
) -> Result<()> {
|
||||||
|
// TODO: Remove old entry? Or maybe just wipe completely from time to time?
|
||||||
|
|
||||||
|
let count = services().globals.next_count()?.to_be_bytes();
|
||||||
|
|
||||||
|
let mut presence_id = room_id.as_bytes().to_vec();
|
||||||
|
presence_id.push(0xff);
|
||||||
|
presence_id.extend_from_slice(&count);
|
||||||
|
presence_id.push(0xff);
|
||||||
|
presence_id.extend_from_slice(presence.sender.as_bytes());
|
||||||
|
|
||||||
|
self.presenceid_presence.insert(
|
||||||
|
&presence_id,
|
||||||
|
&serde_json::to_vec(&presence).expect("PresenceEvent can be serialized"),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
self.userid_lastpresenceupdate.insert(
|
||||||
|
user_id.as_bytes(),
|
||||||
|
&utils::millis_since_unix_epoch().to_be_bytes(),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn ping_presence(&self, user_id: &UserId) -> Result<()> {
|
||||||
|
self.userid_lastpresenceupdate.insert(
|
||||||
|
user_id.as_bytes(),
|
||||||
|
&utils::millis_since_unix_epoch().to_be_bytes(),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn last_presence_update(&self, user_id: &UserId) -> Result<Option<u64>> {
|
||||||
|
self.userid_lastpresenceupdate
|
||||||
|
.get(user_id.as_bytes())?
|
||||||
|
.map(|bytes| {
|
||||||
|
utils::u64_from_bytes(&bytes).map_err(|_| {
|
||||||
|
Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_presence_event(
|
||||||
|
&self,
|
||||||
|
room_id: &RoomId,
|
||||||
|
user_id: &UserId,
|
||||||
|
count: u64,
|
||||||
|
) -> Result<Option<PresenceEvent>> {
|
||||||
|
let mut presence_id = room_id.as_bytes().to_vec();
|
||||||
|
presence_id.push(0xff);
|
||||||
|
presence_id.extend_from_slice(&count.to_be_bytes());
|
||||||
|
presence_id.push(0xff);
|
||||||
|
presence_id.extend_from_slice(user_id.as_bytes());
|
||||||
|
|
||||||
|
self.presenceid_presence
|
||||||
|
.get(&presence_id)?
|
||||||
|
.map(|value| parse_presence_event(&value))
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn presence_since(
|
||||||
|
&self,
|
||||||
|
room_id: &RoomId,
|
||||||
|
since: u64,
|
||||||
|
) -> Result<HashMap<OwnedUserId, PresenceEvent>> {
|
||||||
|
let mut prefix = room_id.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
let mut first_possible_edu = prefix.clone();
|
||||||
|
first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since
|
||||||
|
let mut hashmap = HashMap::new();
|
||||||
|
|
||||||
|
for (key, value) in self
|
||||||
|
.presenceid_presence
|
||||||
|
.iter_from(&first_possible_edu, false)
|
||||||
|
.take_while(|(key, _)| key.starts_with(&prefix))
|
||||||
|
{
|
||||||
|
let user_id = UserId::parse(
|
||||||
|
utils::string_from_bytes(
|
||||||
|
key.rsplit(|&b| b == 0xff)
|
||||||
|
.next()
|
||||||
|
.expect("rsplit always returns an element"),
|
||||||
|
)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid UserId bytes in presenceid_presence."))?,
|
||||||
|
)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?;
|
||||||
|
|
||||||
|
let presence = parse_presence_event(&value)?;
|
||||||
|
|
||||||
|
hashmap.insert(user_id, presence);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(hashmap)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
fn presence_maintain(&self, db: Arc<TokioRwLock<Database>>) {
|
||||||
|
// TODO @M0dEx: move this to a timed tasks module
|
||||||
|
tokio::spawn(async move {
|
||||||
|
loop {
|
||||||
|
select! {
|
||||||
|
Some(user_id) = self.presence_timers.next() {
|
||||||
|
// TODO @M0dEx: would it be better to acquire the lock outside the loop?
|
||||||
|
let guard = db.read().await;
|
||||||
|
|
||||||
|
// TODO @M0dEx: add self.presence_timers
|
||||||
|
// TODO @M0dEx: maintain presence
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_presence_event(bytes: &[u8]) -> Result<PresenceEvent> {
|
||||||
|
let mut presence: PresenceEvent = serde_json::from_slice(bytes)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid presence event in db."))?;
|
||||||
|
|
||||||
|
let current_timestamp: UInt = utils::millis_since_unix_epoch()
|
||||||
|
.try_into()
|
||||||
|
.expect("time is valid");
|
||||||
|
|
||||||
|
if presence.content.presence == PresenceState::Online {
|
||||||
|
// Don't set last_active_ago when the user is online
|
||||||
|
presence.content.last_active_ago = None;
|
||||||
|
} else {
|
||||||
|
// Convert from timestamp to duration
|
||||||
|
presence.content.last_active_ago = presence
|
||||||
|
.content
|
||||||
|
.last_active_ago
|
||||||
|
.map(|timestamp| current_timestamp - timestamp);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(presence)
|
||||||
|
}
|
150
src/database/key_value/rooms/edus/read_receipt.rs
Normal file
150
src/database/key_value/rooms/edus/read_receipt.rs
Normal file
|
@ -0,0 +1,150 @@
|
||||||
|
use std::mem;
|
||||||
|
|
||||||
|
use ruma::{
|
||||||
|
events::receipt::ReceiptEvent, serde::Raw, CanonicalJsonObject, OwnedUserId, RoomId, UserId,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
||||||
|
|
||||||
|
impl service::rooms::edus::read_receipt::Data for KeyValueDatabase {
|
||||||
|
fn readreceipt_update(
|
||||||
|
&self,
|
||||||
|
user_id: &UserId,
|
||||||
|
room_id: &RoomId,
|
||||||
|
event: ReceiptEvent,
|
||||||
|
) -> Result<()> {
|
||||||
|
let mut prefix = room_id.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
let mut last_possible_key = prefix.clone();
|
||||||
|
last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes());
|
||||||
|
|
||||||
|
// Remove old entry
|
||||||
|
if let Some((old, _)) = self
|
||||||
|
.readreceiptid_readreceipt
|
||||||
|
.iter_from(&last_possible_key, true)
|
||||||
|
.take_while(|(key, _)| key.starts_with(&prefix))
|
||||||
|
.find(|(key, _)| {
|
||||||
|
key.rsplit(|&b| b == 0xff)
|
||||||
|
.next()
|
||||||
|
.expect("rsplit always returns an element")
|
||||||
|
== user_id.as_bytes()
|
||||||
|
})
|
||||||
|
{
|
||||||
|
// This is the old room_latest
|
||||||
|
self.readreceiptid_readreceipt.remove(&old)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut room_latest_id = prefix;
|
||||||
|
room_latest_id.extend_from_slice(&services().globals.next_count()?.to_be_bytes());
|
||||||
|
room_latest_id.push(0xff);
|
||||||
|
room_latest_id.extend_from_slice(user_id.as_bytes());
|
||||||
|
|
||||||
|
self.readreceiptid_readreceipt.insert(
|
||||||
|
&room_latest_id,
|
||||||
|
&serde_json::to_vec(&event).expect("EduEvent::to_string always works"),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn readreceipts_since<'a>(
|
||||||
|
&'a self,
|
||||||
|
room_id: &RoomId,
|
||||||
|
since: u64,
|
||||||
|
) -> Box<
|
||||||
|
dyn Iterator<
|
||||||
|
Item = Result<(
|
||||||
|
OwnedUserId,
|
||||||
|
u64,
|
||||||
|
Raw<ruma::events::AnySyncEphemeralRoomEvent>,
|
||||||
|
)>,
|
||||||
|
> + 'a,
|
||||||
|
> {
|
||||||
|
let mut prefix = room_id.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
let prefix2 = prefix.clone();
|
||||||
|
|
||||||
|
let mut first_possible_edu = prefix.clone();
|
||||||
|
first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since
|
||||||
|
|
||||||
|
Box::new(
|
||||||
|
self.readreceiptid_readreceipt
|
||||||
|
.iter_from(&first_possible_edu, false)
|
||||||
|
.take_while(move |(k, _)| k.starts_with(&prefix2))
|
||||||
|
.map(move |(k, v)| {
|
||||||
|
let count = utils::u64_from_bytes(
|
||||||
|
&k[prefix.len()..prefix.len() + mem::size_of::<u64>()],
|
||||||
|
)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?;
|
||||||
|
let user_id = UserId::parse(
|
||||||
|
utils::string_from_bytes(&k[prefix.len() + mem::size_of::<u64>() + 1..])
|
||||||
|
.map_err(|_| {
|
||||||
|
Error::bad_database("Invalid readreceiptid userid bytes in db.")
|
||||||
|
})?,
|
||||||
|
)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?;
|
||||||
|
|
||||||
|
let mut json =
|
||||||
|
serde_json::from_slice::<CanonicalJsonObject>(&v).map_err(|_| {
|
||||||
|
Error::bad_database(
|
||||||
|
"Read receipt in roomlatestid_roomlatest is invalid json.",
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
json.remove("room_id");
|
||||||
|
|
||||||
|
Ok((
|
||||||
|
user_id,
|
||||||
|
count,
|
||||||
|
Raw::from_json(
|
||||||
|
serde_json::value::to_raw_value(&json)
|
||||||
|
.expect("json is valid raw value"),
|
||||||
|
),
|
||||||
|
))
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()> {
|
||||||
|
let mut key = room_id.as_bytes().to_vec();
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(user_id.as_bytes());
|
||||||
|
|
||||||
|
self.roomuserid_privateread
|
||||||
|
.insert(&key, &count.to_be_bytes())?;
|
||||||
|
|
||||||
|
self.roomuserid_lastprivatereadupdate
|
||||||
|
.insert(&key, &services().globals.next_count()?.to_be_bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result<Option<u64>> {
|
||||||
|
let mut key = room_id.as_bytes().to_vec();
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(user_id.as_bytes());
|
||||||
|
|
||||||
|
self.roomuserid_privateread
|
||||||
|
.get(&key)?
|
||||||
|
.map_or(Ok(None), |v| {
|
||||||
|
Ok(Some(utils::u64_from_bytes(&v).map_err(|_| {
|
||||||
|
Error::bad_database("Invalid private read marker bytes")
|
||||||
|
})?))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result<u64> {
|
||||||
|
let mut key = room_id.as_bytes().to_vec();
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(user_id.as_bytes());
|
||||||
|
|
||||||
|
Ok(self
|
||||||
|
.roomuserid_lastprivatereadupdate
|
||||||
|
.get(&key)?
|
||||||
|
.map(|bytes| {
|
||||||
|
utils::u64_from_bytes(&bytes).map_err(|_| {
|
||||||
|
Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.transpose()?
|
||||||
|
.unwrap_or(0))
|
||||||
|
}
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue